query
stringlengths 9
9.05k
| document
stringlengths 10
222k
| negatives
listlengths 19
20
| metadata
dict |
---|---|---|---|
Returns the index corresponding to the given class label.
|
def lookup_class_idx(self,label):
return self.class_labels[label]
|
[
"def get_class_index(label):\n if isinstance(label,str) is False:\n basic.outputlogMessage('input label must be a string')\n assert(False)\n length = len(class_label)\n for i in range(0,length):\n if label.lower()==class_label[i]:\n return i\n #if not found\n basic.outputlogMessage('class label: %s not found in the class list'%label)\n assert(False)\n return False",
"def index_of(self, label: str) -> int:\n return self.label_dict[label]",
"def get_idx(self, label):\n idx = self.tok2idx[label]\n return idx",
"def labelIndex(self, label):\n for idx, taskDef in enumerate(self):\n if taskDef.label == label:\n return idx\n return -1",
"def class_str_to_index(self, obj_label: EnnosObjectLabel):\n if obj_label.label in self.dataset_config.classes:\n return self.dataset_config.classes.index(obj_label.label) + 1\n\n raise ValueError('Invalid class string {}, not in {}'.format(obj_label.label, self.dataset_config.classes))",
"def class_to_index(self, class_name):\n return self.classes.index(class_name)",
"def get_index(self, label):\n if label in self.labels:\n return self.labels.index(label)\n else:\n self.labels.append(label)\n return self.labels.index(label)",
"def get_class_label(index):\n if isinstance(index,str):\n index = int(index)\n # print(type(index))\n if index < len(class_label):\n return class_label[index]\n basic.outputlogMessage('class index: %d not found in the class list' % index)\n assert (False)\n return False",
"def label_index(self):\n return self._label_index",
"def fromLabel(name):\n return Data.labels.index(name)",
"def encode_label(self, label: str) -> int:\n return self.class_map[label]",
"def gen_idx_byclass(labels):\n # print(\"in gen_idx_byclass...\")\n from collections import Counter\n classes = Counter(labels).keys() # obtain a list of classes\n idx_byclass = {}\n\n for class_label in classes:\n # Find samples of this class:\n class_idx = [] # indices for samples that belong to this class\n for idx in range(len(labels)):\n if labels[idx] == class_label:\n class_idx.append(idx)\n idx_byclass[class_label] = class_idx\n\n return idx_byclass",
"def indices_of_label(self, label_name):\n return self.indices_of('label', label_name)",
"def label_of(self, index: int) -> str:\n return self.index_dict[index]",
"def _get_class_index_in_target(target_format: DetectionTargetsFormat) -> int:\n if isinstance(target_format, ConcatenatedTensorFormat):\n return target_format.indexes[LabelTensorSliceItem.NAME][0]\n elif isinstance(target_format, DetectionTargetsFormat):\n return get_class_index_in_target(target_format)\n else:\n raise NotImplementedError(\n f\"{target_format} is not supported. Supported formats are: {ConcatenatedTensorFormat.__name__}, {DetectionTargetsFormat.__name__}\"\n )",
"def classify_label(classes, label):\n for cl in classes:\n for name in classes[cl][\"names\"]:\n if name.lower() == label.lower():\n return cl\n raise Exception(\"Unknown Label Class: %s\" % label)",
"def getClassKey(self, className=\"\"):\n if className == \"\":\n className = self.getClassName()\n return self.classList.index(className)",
"def api_get_class_index(word_class):\r\n p_validate_argument('word_class', word_class, str)\r\n\r\n if word_class in WordClass.WORD_CLASS_DICTIONARY:\r\n return WordClass.WORD_CLASS_DICTIONARY[word_class]\r\n else:\r\n raise APIException('不存在的词类名。')",
"def class_text_to_int(row_label):\n if row_label == 'person':\n return 1\n else:\n None"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
Applies a function mapping to each element in the feature data.
|
def apply_fn(self,fn):
self.check_Data()
for split,data_ in self.processed_data.items():
x = data_['x']
x = np.array([fn(xi) for xi in x])
data_['x'] = x
|
[
"def map(self, function=lambda item: item):\n for i, row in enumerate(self):\n for j, item in enumerate(row):\n row[j] = function(item)",
"def map(self, function):\n pass",
"def map(self, function):\n return FunctionalWrapper(map(function, self.data))",
"def apply_to_all(map_fn, s):\n return [map_fn(x) for x in s]",
"def map_function(x):",
"def mapData(self, function, data):\n self.results = list(map(function, list(data)))\n return self",
"def map(self, map_function, *map_arguments) -> None:\n\n elements = []\n self.__get_sorted_elements(self.__root, elements)\n\n for element in elements:\n map_function(element, *map_arguments)",
"def apply(self, fn, column_label):\n return [fn(v) for v in self[column_label]]",
"def Map(dataset, map_func, input_columns=None):\n return dataset.map(map_func)",
"def map(self, func):\n return List(map(func, self))",
"def applyToEach(L,f):\n for i in range(len(L)):\n L[i] = f(L[i])",
"def applyToEach(L, f):\n for i in range(len(L)):\n L[i] = f(L[i])",
"def map(self, func):\n return torch.stack([func(x) for x in self.keys])",
"def map(function, iterable):\n\n return [function(x) for x in iterable]",
"def applyToEach(l, f):\n for i in range(len(L)):\n L[i]=f(L[i])",
"def map(self, fn, *side_inputs, **options):\n return transforms.map(self, fn, *side_inputs, **options)",
"def _map_fn(self):\n raise NotImplementedError",
"def map_functions(x, functions):\n res = []\n for func in functions:\n res.append(map(func,x))\n return res",
"def transform(func, data):\n out = []\n for r in data:\n out.append(func(r))\n return out"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
Generates a new MLP using the nn.Sequential class. Returns
|
def generate(self):
components = []
components.append(nn.Linear(self.n_features,self.hidden_sizes[0]))
self._activation(components,self.activation)
self._dropout(components,self.dropout)
for i in range(1,len(self.hidden_sizes)):
components.append(nn.Linear(self.hidden_sizes[i-1],self.hidden_sizes[i]))
self._activation(components,self.activation)
self._dropout(components,self.dropout)
components.append(nn.Linear(self.hidden_sizes[-1],self.n_classes))
mlp = nn.Sequential(*components)
num_params = sum(p.numel() for p in mlp.parameters() if p.requires_grad)
print("Created MLP with "+str(num_params)+" learnable params")
return mlp
|
[
"def make_mlp_model():\n return snt.Sequential([\n snt.nets.MLP([LATENT_SIZE] * NUM_LAYERS, activate_final=True),\n snt.LayerNorm()\n ])",
"def mlpModel(input1_shape, layers=[4]):\n model = Sequential()\n last_idx = len(layers) - 1\n for (idx, num_units) in enumerate(layers):\n activation_name = 'relu'\n if idx == last_idx:\n activation_name = 'softmax'\n if idx == 0:\n model.add(Dense(input_shape = input1_shape, units = num_units, activation=activation_name))\n else:\n model.add(Dropout(0.5))\n model.add(Dense(units = num_units, activation=activation_name))\n \n model.compile(optimizer = 'adam', loss='binary_crossentropy',metrics=['acc', precision])\n print(model.summary())\n return model",
"def __init__(self):\n self.model = MLP(\n hidden_layer_sizes=(500, 500, 250, 100, 200),\n activation=\"relu\",\n solver=\"adam\",\n )",
"def create_model(num_vars, num_categs, hidden_dims, actfn=None):\n num_outputs = max(1, num_categs)\n num_inputs = num_vars\n actfn = get_activation_function(actfn)\n\n mask = InputMask(None)\n if num_categs > 0:\n pre_layers = EmbedLayer(num_vars=num_vars,\n num_categs=num_categs,\n hidden_dim=hidden_dims[0],\n input_mask=mask,\n sparse_embeds=(num_vars >= 50))\n num_inputs = pre_layers.hidden_dim\n pre_layers = [pre_layers, actfn()]\n else:\n pre_layers = mask\n\n mlps = MultivarMLP(input_dims=num_inputs,\n hidden_dims=hidden_dims,\n output_dims=num_outputs,\n extra_dims=[num_vars],\n actfn=actfn,\n pre_layers=pre_layers)\n return mlps",
"def mlp1(out_size):\n \n def res(**kwargs):\n dense = kwargs[\"dense\"]\n before = kwargs[\"before\"]\n act = kwargs[\"act\"]\n after = kwargs[\"after\"]\n dropout = kwargs[\"dropout\"]\n \n pipeline = [\n # Initial preprocessing of input.\n Functional(flatten),\n after((3072,)),\n \n dense(3072, 3000),\n *whole_act((3000,), kwargs),\n dropout(0.5, \"1d\")\n ]\n # Many dense layers. Dropout after each dense layer.\n for i in range(10):\n ins = 3000 - 200*i\n outs = ins - 200\n pipeline.extend([\n dense(ins, outs),\n *whole_act((outs,), kwargs)\n ])\n if i % 2 == 1:\n pipeline.append(dropout(0.5, \"1d\"))\n for i in range(9):\n ins = 1000 - 100*i\n outs = ins - 100\n pipeline.extend([\n dense(ins, outs),\n *whole_act((outs,), kwargs),\n ])\n if i % 2 == 1:\n pipeline.append(dropout(0.5, \"1d\"))\n \n # Final dense layer, with gain 1.\n pipeline.extend([\n dfl_dense(100, out_size),\n before((out_size,))\n ])\n \n return nn.Sequential(*squash(pipeline))\n \n res.__name__ = \"mlp1\"\n return res",
"def build(self):\n\t\tmodel = Sequential()\n\t\tmodel.add(Dense(self.num_hidden1, init='lecun_uniform', input_shape=(self.num_inputs,)))\n\t\tmodel.add(Activation('relu'))\n\n\t\tmodel.add(Dense(self.num_hidden2, init='lecun_uniform'))\n\t\tmodel.add(Activation('relu'))\n\n\t\tmodel.add(Dense(self.num_output, init='lecun_uniform'))\n\t\tmodel.add(Activation('linear'))\n\n\t\trms = RMSprop(lr=self.lr)\n\t\tmodel.compile(loss='mse', optimizer=rms)\n\t\tself.model = model",
"def generate_nn_problem(layer_sizes, num_examples, add_noise=False):\n # reversed because this is a generative model\n data_model = build_seq_model(reversed(layer_sizes))\n\n out_data = np.random.normal(size=(num_examples, layer_sizes[-1]))\n in_data = data_model.predict(out_data)\n # independent variables determine dependent variables\n\n if add_noise:\n noise = np.random.normal(size=in_data.shape) * 0.5\n in_data = in_data + noise\n\n return in_data, out_data",
"def create_neural_network():\n model = Sequential()\n model.add(LSTM(32, input_shape=(4, 45))) # 4 time-steps and 45 features\n model.add(Dense(64))\n model.add(Activation('tanh'))\n model.add(Dense(units=45)) # 45 is the number of class\n model.add(Activation('softmax')) # Output the density of probability\n\n model.compile(optimizer=adam(lr=0.001, decay=1e-6),\n loss=\"categorical_crossentropy\",\n metrics=['accuracy'])\n\n model.summary()\n print(\"Creation of the Neural Network is finished.\")\n return model",
"def mlp_model(train, layers=(100,), window_size=5):\n # generate a window\n window = mlp_window_selector(train, window_size)\n # interpolate new data\n train_x = mlp_input_mapper(train[0], window)\n train_y = mlp_input_mapper(train[1], window)\n # generate model\n model = MLPRegressor(hidden_layer_sizes=tuple(layers))\n # fit model with new rounded data\n model.fit(train_x, train_y)\n # return model and window\n return (model, window)",
"def mlp(self):\n raise NotImplementedError",
"def __init__(self, hidden_layer_sizes, activation='relu', reg=0.001, k_fold=5, random_state=0):\n print(\"Initialize model Multi-layer Perceptron\")\n self.hidden_layer_sizes = hidden_layer_sizes\n self.activation = activation\n self.reg = reg\n self.k_fold = k_fold\n self.random_state = random_state\n self.model = sklearn.neural_network.MLPClassifier(self.hidden_layer_sizes,\n activation=self.activation,\n alpha=self.reg, max_iter=1000, \n random_state=self.random_state)",
"def create_model(self): # noqa: D103\n # reference for creation of the model https://yilundu.github.io/2016/12/24/Deep-Q-Learning-on-Space-Invaders.html\n model=Sequential()\n model.add(Flatten( input_shape=(84,84,4)))\n model.add(Dense(self.num_actions)) \n\n return model",
"def build_graph(self):\n return nn.Sequential(\n nn.Linear(self.input_dim, self.hidden_dim),\n self.hidden_activation,\n nn.Linear(self.hidden_dim, self.n_classes_))",
"def generate_homography_nn_adam(self):\n # Create the NN\n self.set_optimizer_adam()\n self.set_callback(utils.lr_callback)\n self.build_model()\n self.compile()",
"def create_probabilistic_nn(feature_names, target_names, hidden_units, name = 'PNN'):\n return create_deterministic_nn(feature_names, target_names, hidden_units,\n name = name, out = 'P')",
"def forward_prop_mlp(particle,task,score,X_train,Y_train,X_val,Y_val):\n try:\n decodedparams = pspso.decode_parameters(particle)\n modelparameters = {**pspso.defaultparams,**decodedparams} \n model=Sequential()\n model.add(Dense(int(modelparameters['neurons']), input_dim=X_train.shape[1], activation=modelparameters['hiddenactivation']))#particle,task='regression',score='rmse',X_train,Y_train,X_val,Y_val\n model.add(Dense(1, activation=modelparameters['activation']))#kernel_initializer='lecun_uniform',bias_initializer='zeros'\n model.compile(loss=modelparameters['loss'], optimizer=modelparameters['optimizer'], metrics=modelparameters['metrics'])\n model.optimizer.learning_rate=modelparameters['learning_rate']\n #checkpoint=ModelCheckpoint('mlp.h5',monitor='val_loss',verbose=pspso.verbose,save_best_only=True,mode=mode)\n es = EarlyStopping(monitor='val_loss', mode=modelparameters['mode'], verbose=pspso.verbose,patience=pspso.early_stopping)\n #callbacks_list=[checkpoint,es] \n callbacks_list=[es] \n history=model.fit(X_train,\n Y_train,\n batch_size=modelparameters['batch_size'],\n epochs=modelparameters['epochs'],\n shuffle=modelparameters['shuffle'],\n validation_data=(X_val,Y_val),\n callbacks=callbacks_list,\n verbose=pspso.verbose)\n #model.load_weights('mlp.h5')\n #model.compile(loss=loss, optimizer=modelparameters['optimizer'], metrics=metrics)\n return pspso.predict(model,'mlp',task, score,X_val,Y_val),model,history\n \n except Exception as e:\n print(\"An exception occured in MLP training.\")\n print(e)\n return None,None",
"def build_graph(self):\n return nn.Sequential(\n nn.Linear(self.input_dim, self.hidden_dim),\n self.hidden_activation,\n nn.Linear(self.hidden_dim, self.output_dim))",
"def MLP_NN_train(features, labels, ds):\n\n MLP_NN_Classifier = MLPClassifier(batch_size=500)\n MLP_NN_Classifier.fit(features, labels)\n save_classifier(MLP_NN_Classifier, \"ds\" + ds + \"MLP_NN_Classifier.pkl\")",
"def build_mlp(input_, config):\n current_input = input_\n print(current_input)\n for i in range(len(config['fcc_layers']) - 1):\n current_input = tf.keras.layers.Dense(\n units=config['fcc_layers'][i], activation='tanh',\n name='fcc_layer_{}'.format(i + 1))(current_input)\n current_input = tf.keras.layers.Dropout(\n rate=config['dropout'], name='dropout_{}'.format(i + 1))(current_input)\n cascade_embedding_layer = tf.keras.layers.Dense(\n units=config['fcc_layers'][-1], activation='tanh',\n name='cascade_embedding_layer')(current_input)\n cascade_embedding_layer_do = tf.keras.layers.Dropout(\n rate=config['dropout'],\n name='cascade_embedding_dropout')(cascade_embedding_layer)\n prediction_layer = tf.keras.layers.Dense(\n units=1, activation='sigmoid',\n name='prediction_layer')(cascade_embedding_layer_do)\n return cascade_embedding_layer, prediction_layer"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
Creates a new activation function and adds it to the list of components.
|
def _activation(self,components,activation):
if activation == "ReLU":
components.append(nn.ReLU())
elif activation == "Sigmoid":
components.append(nn.Sigmoid())
else:
raise Exception("Invalid activation fn: "+activation)
|
[
"def construct_activation_function(self):\n # Add the activation function\n if not self.activation_function is None:\n # Check if it is a string\n if isinstance(self.activation_function, str):\n activation_function = get_activation_function_by_name(\n self.activation_function\n )()\n else:\n assert isinstance(self.activation_function, ActivationFunction)\n activation_function = self.activation_function\n # Plot the function above the rest of the layer\n self.activation_function = activation_function\n self.add(self.activation_function)",
"def create_activation_function(op_type: Op) -> ActivationFunction:\n act = ActivationFunction(op_type)\n if op_type == Op.Relu:\n act.min = 0.0\n elif op_type == Op.Relu6:\n act.min = 0.0\n act.max = 6.0\n elif op_type == Op.ReluN1To1:\n act.min = -1.0\n act.max = 1.0\n elif op_type == Op.Tanh:\n act.min = -1.0\n act.max = 1.0\n elif op_type == Op.Sigmoid:\n act.min = 0.0\n act.max = 1.0\n return act",
"def encoder_activation_func(num_layer):\n ec_funct = []\n for i in range(num_layer):\n ec_funct.append('relu')\n ec_funct.append('softmax')\n\n return ec_funct",
"def initialiseActivationFunctions(self):\n\n\t\t###uniform for output units\n\t\tif self._outputActivationFunctions == None or self._outputActivationDerivatives == None:\t\n\t\n\t\t\tself._outputActivationFunctions = []\n\t\t\tself._outputActivationDerivatives = []\n\n\t\t\tactFunc = lambda x : x\n\t\t\tdActFunc = lambda x : 1.0\n\t\n\t\t\tfor i in range(self.nOutputs):\n\t\t\t\t\n\t\t\t\tself._outputActivationFunctions.append(actFunc)\n\t\t\t\tself._outputActivationDerivatives.append(dActFunc)\n\n\t\t\tself._outputActivationFunctions = np.array(self._outputActivationFunctions)\n\t\t\tself._outputActivationDerivatives = np.array(self._outputActivationDerivatives)\n\t\t\t\n\n\t\tif self._hiddenActivationFunctions == None or self._hiddenActivationDerivatives == None:\n\n\t\t\tself._hiddenActivationFunctions = []\n\t\t\tself._hiddenActivationDerivatives = []\n\n\t\t\tfor i in range(self.nHiddenLayers):\n\n\t\t\t\tfTemp = []\n\t\t\t\tdTemp = []\n\t\t\t\t\n\t\t\t\t#Make the default sigmoid the one suggested in LeCun et al 1998\n\t\t\t\ttwist = 0.01\n\t\t\t\ta = 1.7159\n\t\t\t\tc = 2.0/3.0\n\n\t\t\t\tactFunc = lambda x : a*np.tanh(c*x) + twist*x\n\t\t\t\tdActFunc = lambda x : twist + a*c*(1.0 - (np.tanh(c*x)**2.0))\n\n#\t\t\t\tactFunc = lambda x : np.tanh(x)\n#\t\t\t\tdActFunc = lambda x : 1.0 - np.tanh(x)**2.0\n\n\t\t\t\t#plus all of the bias\n\t\t\t\tfor j in range(self.nUnitsPerLayer+1):\n\t\t\t\t\t\n\t\t\t\t\tfTemp.append(actFunc)\n\t\t\t\t\tdTemp.append(dActFunc)\n\t\t\t\t\n\t\t\t\tself._hiddenActivationFunctions.append(fTemp)\n\t\t\t\tself._hiddenActivationDerivatives.append(dTemp)\n\t\t\t\n\t\t\tself._hiddenActivationFunctions = np.array(self._hiddenActivationFunctions)\n\t\t\tself._hiddenActivationDerivatives = np.array(self._hiddenActivationDerivatives)",
"def __init_activation(self, activation):\n\n if activation is None: #This is necessary if activation functions are included in blocks\n fn = lambda x: x #i.e. just return input\n elif activation == \"lrelu\":\n fn = \"lrelu\"\n elif activation == \"relu\":\n fn = \"relu\"\n elif activation == \"prelu\":\n fn = \"prelu\" #defer until NNBuilder()\n elif activation == \"GDN\":\n fn = \"GDN\"\n else:\n raise NotImplemtedError(\"Activation function must be in {`relu`, `lrelu`, `prelu`, `GDN`}\")\n self.activation = fn\n self.act_fn = lambda x: x #i.e. does nothing",
"def get_activation_function(func_name):\n return {\n 'linear': lambda x: x,\n 'relu': lambda x: x * (x > 0),\n 'elu': lambda x: x * (x >= 0) + (T.exp(x) - 1) * (x < 0),\n 'softmax': T.nnet.softmax,\n 'tanh': T.tanh,\n 'log_softmax': log_softmax,\n 'sigmoid': T.nnet.sigmoid\n }[func_name]",
"def register_activations(model: onnx_pb.ModelProto, activation_names: List):\n for act_name in activation_names:\n _ = add_hook_to_get_activation(model, act_name)",
"def addFunction(self, func):\n self.__functions.append(func)",
"def get_activation_fn(activation: str) -> Callable:\n if activation == 'relu':\n return F.relu\n elif activation == 'gelu':\n return gelu\n elif activation == 'gelu_fast':\n deprecation_warning('--activation-fn=gelu_fast has been renamed to gelu_accurate')\n return gelu_accurate\n elif activation == 'gelu_accurate':\n return gelu_accurate\n elif activation == 'tanh':\n return torch.tanh\n elif activation == 'linear':\n return lambda x: x\n else:\n raise RuntimeError(\"--activation-fn {} not supported\".format(activation))",
"def activation_func(activation:str):\n return nn.ModuleDict([\n ['relu', nn.ReLU(inplace=True)],\n ['leaky_relu', nn.LeakyReLU(negative_slope=0.01, inplace=True)],\n ['selu', nn.SELU(inplace=True)],\n ['none', nn.Identity()]\n ])[activation]",
"def create_by_name(name):\n name = name.lower()\n if name == \"sigmoid\":\n return SigmoidActivation()\n if name == \"relu\":\n return ReluActivation()\n if name == \"linear\":\n return LinearActivation()\n\n raise NameError(\"Activation function with name \\\"\" + name + \"\\\" not found!\")",
"def generate_activation(self, layers: List[str], concept: Concept):\n\n if concept.name not in self.concept2av_map:\n self.concept2av_map[concept.name] = defaultdict(list)\n\n def forward_hook_wrapper(layer_name):\n def forward_hook(module, inp, out=None):\n out = torch.reshape(out, (out.shape[0], -1))\n # TODO: it would be better to use an iterator here (T69119305)\n # out.shape = NxF, N=concept batch size, F = feature size\n self.concept2av_map[concept.name][layer_name].append(out.detach())\n\n return forward_hook\n\n hooks = []\n for layer in layers:\n layer_module = self.get_module_from_name(layer)\n hooks.append(\n layer_module.register_forward_hook(forward_hook_wrapper(layer))\n )\n\n for examples in concept.data_iter:\n self.model(examples)\n\n for hook in hooks:\n hook.remove()",
"def linear_activation_forward(A_prev, W, b, activation):\n pass",
"def create_gp_activation(original_function):\n expected_f = sp.integrate.quad(\n lambda x: sp.stats.norm.pdf(x) * original_function(x),\n -onp.infty,\n onp.infty\n )[0]\n expected_f2 = sp.integrate.quad(\n lambda x: sp.stats.norm.pdf(x) * original_function(x)**2,\n -onp.infty,\n onp.infty\n )[0]\n scalar_derivative = jax.grad(original_function)\n expected_fp2 = sp.integrate.quad(\n lambda x: sp.stats.norm.pdf(x) * scalar_derivative(x)**2,\n -onp.infty,\n onp.infty\n )[0]\n scale_factor = 1. / onp.sqrt(expected_fp2)\n quadratic_equation = [\n 1., 2. * scale_factor * expected_f, scale_factor**2 * expected_f2 - 1.\n ]\n offset = min(onp.roots(quadratic_equation))\n\n def nruter(x):\n return scale_factor * original_function(x) + offset\n\n return nruter",
"def __activation__(self,inputs):\n activation=0\n for counter in range(self.num_inputs):\n activation+=self.weights[counter]*inputs[counter]\n return self.__sigmoid__(activation)\n #return activation+self.bias",
"def convert_activation(node, **kwargs):\n name, input_nodes, attrs = get_inputs(node, kwargs)\n\n act_type = attrs[\"act_type\"]\n\n # Creating a dictionary here, but if this titlecase pattern\n # mxnet_name.title()\n act_types = {\n \"tanh\": \"Tanh\",\n \"relu\": \"Relu\",\n \"sigmoid\": \"Sigmoid\",\n \"softrelu\": \"Softplus\",\n \"softsign\": \"Softsign\"\n }\n\n act_name = act_types.get(act_type)\n if act_name:\n node = onnx.helper.make_node(\n act_name,\n input_nodes,\n [name],\n name=name\n )\n else:\n raise AttributeError(\n \"Activation %s not implemented or recognized in the converter\" % act_type\n )\n\n return [node]",
"def add_function(self, ty, name):\r\n return Function.new(self, ty, name)",
"def activation(self, summatory_activation):\n # Этот метод необходимо реализовать\n \n return self.activation_function(summatory_activation)",
"def compute_internal_activation(self) -> None:\n self.U = self.F * (1 + self.beta * self.L)\n return"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
Adds a dropout object to the list of components
|
def _dropout(self,components,dropout=None):
if dropout is not None:
components.append(nn.Dropout(dropout))
|
[
"def add(self, component) -> None:\n pass",
"def addComponent(self,component):\r\n self.append(component)",
"def add(self, component):\n self.components.add(component)",
"def add_depot(self, depot):\n self.destination_list.append(depot)",
"def add_component(self, componentInstance):\n\n #print \"Componet being added to %s entity.\"%(self._sName)\n #print componentInstance\n \n self._dComponents[componentInstance.get_name()] = componentInstance\n\n #These if statements will save a pointer of the same variable as in dComponents if True.\n\n if componentInstance.get_updateable():\n self._lUpdatables.append(componentInstance)\n\n if componentInstance.is_view_drawable():\n self._lViewDrawables.append(componentInstance)\n\n elif componentInstance.is_screen_drawable():\n self._lScreenDrawables.append(componentInstance)",
"def add_output(self, item):\n assert type(item) is output, \"Item must be of type output\"\n self.outputlist.append(item)",
"def add_component(self, lib_component):\n comp_name = lib_component.name\n try:\n comp = self.__component_list[comp_name]\n except KeyError:\n self.__component_list[comp_name] = lib_component",
"def addComponent(self):\n\n # Collect new component information\n self.comp_name = input(\"Component Name: \")\n self.comp_desig = input(\"Designation: \")\n\n self.des_var_count = input(\"How many design variables? \")\n self.des_vars = self.defineVariables(self.des_var_count,\"design\")\n\n self.calc_var_count = input(\"How many calculated variables? \")\n self.calc_vars = self.defineVariables(self.calc_var_count,\"calculated\")\n\n self.stress_func_count = input(\"How many stress functions? \")\n self.stress_funcs = self.defineVariables(self.stress_func_count,\"stress\")\n\n self.misc_var_count = input(\"How many misc vars? \")\n self.misc_vars = self.defineVariables(self.misc_var_count,\"misc\")\n\n # Write information to component_list file in correct format\n\n\n self.refereshComponents()",
"def add_dut(self):\n pass",
"def addDropzone( self, dropzone ):\n self._dropzones.append(dropzone)",
"def changeDropout(self,dropout):\n self.dropout = dropout",
"def add(self, *components):\n for component in components:\n if component.container is not None:\n component.container.remove(component)\n component.container = self\n self._components.extend(components)",
"def InitDrops(self):\r\n\r\n # get sample and drop info out of jtray\r\n try:\r\n drop = self.dbBackend.GetItems('Drop')[0]\r\n sample = self.dbBackend.GetItems('Sample')[0]\r\n except IndexError:\r\n # this happens when no drop is available => only screen files\r\n return\r\n # create new drop component\r\n\tsampleComponent = self.dbBackend.GetNewItem(\"DropComponent\",self.screen)\r\n # transfer sample information\r\n sampleComponent.SetProperty('Description', sample.GetProperty('Description'))\r\n sampleComponent.SetProperty('Concentration', sample.GetProperty('SampleConcentration'))\r\n sampleComponent.SetProperty('Volume', drop.GetProperty('SampleVol'))\r\n sampleComponent.SetProperty('Buffer', sample.GetProperty('SampleBuffer'))\r\n sampleComponent.SetProperty('Remarks', \"\")\r\n ## transfer drop solution information\r\n solComponent = self.dbBackend.GetNewItem(\"DropComponent\",self.screen)\r\n solComponent.SetProperty('Description', \"Screen Solution\")\r\n solComponent.SetProperty('Concentration', \"0\")\r\n solComponent.SetProperty('Buffer', \"\")\r\n solComponent.SetProperty('Volume', drop.GetProperty(\"ResVol\"))\r\n solComponent.SetProperty('Remarks', \"\")\r\n drop.Delete()\r\n sample.Delete()\r\n \r\n for i in range(self.noWells):\r\n drop = self.dbBackend.GetNewItem(\"Drop\",self.screen)\r\n self.screen.AddChild(drop)\r\n drop.SetProperty(\"Position\", i)\r\n drop.SetProperty(\"DropNr\", 0)\r\n sampleComponent.parent = drop\r\n solComponent.parent = drop\r\n drop.AddChild(sampleComponent.GetCopy())\r\n drop.AddChild(solComponent.GetCopy())\r\n self.drops[(drop.GetProperty(\"Position\"), drop.GetProperty(\"DropNr\"))] = drop",
"def buttonAdd_Clicked( self, event ):\n\t\tid = DM.FixedIndex(self._combos[self._treasureIndex].GetSelection())\n\t\tif id is not None and id >= DM.FixedIndex(0):\n\t\t\tqty = self.spinCtrlQuantity.GetValue()\n\t\t\tprob = self.spinCtrlProbability.GetValue()\n\t\t\ttreasure = (id, prob, qty)\n\t\t\tself.Treasure[self._treasureIndex].append(treasure)\n\t\t\tself.refreshTreasureList()",
"def add(self, component):\r\n\t\tcomponent_class = component.__class__\r\n\t\tfor c in self.components_array:\r\n\t\t\tif c.__class__ == component_class:\r\n\t\t\t\tdel self.components_array[c]\r\n\t\t\t\tbreak\r\n\r\n\t\tcomponent_class_index = ComponentType.get_index_for(component_class)\r\n\t\tself.components[component_class_index] = component\r\n\t\tself.components_array.append(component)\r\n\r\n\t\tself.component_bits.set(component_class_index)\r\n\r\n\t\tself.component_added(self)\r\n\t\treturn self",
"def _addOutlet(self, outlet, other): \n self._outlets.append(outlet)\n if self._type == 2 and other._type == 1:\n self._reservoirs.append(other)",
"def onTreeSpanningComponentAfterAddOrMove(ob, event):\n if not IObjectRemovedEvent.providedBy(event):\n affected_objects = ObjectsAffectedBySpanningComponent(ob)\n affected_objects.index_affected_objects()",
"def append(self, obj: Any) -> None:\n from ..pane import panel\n new_objects = list(self)\n new_objects.append(panel(obj))\n self.objects = new_objects",
"def __add_pilot(self, evt):\n dlg = PilotForm(self)\n try:\n while True:\n try:\n if dlg.ShowModal() == wx.ID_OK:\n record = dlg.GetData()\n session.add(record)\n session.commit()\n self.pilot_items.append(record)\n self.__init_combo_pilot()\n self.combo_pilot.SetSelection(\n self.pilot_items.index(record))\n break\n except Exception, e:\n session.rollback()\n error_message_dialog(self, PILOT_INSERT_ERROR, e)\n finally:\n dlg.Destroy()"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
Splits a DataFrame into 3 distinct DataFrames based on the given percentages and returns a dict of the data.
|
def split_data(text_df,splits=None,rand_perm=True):
if splits is None:
splits = {'train':0.6,'val':0.1,'test':0.3}
if np.round(np.sum(list(splits.values())),4) != 1:
raise Exception("Split percentages do not sum to 1")
size = len(text_df)
if rand_perm:
perm_idx = np.random.permutation(size)
else:
perm_idx = np.arange(size)
text_df = text_df.iloc[perm_idx,:]
all_data = dict()
keys = list(splits.keys())
pct = list(splits.values())
count = np.round(np.array(pct) * size).astype(np.int32)
split_idx = np.cumsum(count)[:-1]
data_list = np.split(text_df,split_idx,axis=0)
all_data = {keys[i]:data for i,data in enumerate(data_list)}
return all_data
|
[
"def split_data(df_data, clusters):\n\n if clusters is None:\n\n return {0: df_data}\n\n return {\n k: df_data.loc[clusters.index[clusters == k]]\n for k in clusters.unique()\n }",
"def split_train_dev_set(df, percent=0.2):\n train = []\n dev = []\n for k, g in df.groupby(\"sender\")[\"mid\", \"recipients\"]:\n n_msg = g.shape[0]\n n_dev = int(n_msg * percent)\n g = g.sort_values(\"date\")\n g_train = g[:-n_dev]\n g_dev = g[-n_dev:]\n train.append(g_train)\n dev.append(g_dev)\n # concat all dataframe\n df_train = pd.concat(train, axis=0).sort_index()\n df_dev = pd.concat(dev, axis=0).sort_index()\n return df_train, df_dev",
"def split_percentiles_pediatrics(df):\n df.rename(columns={\"ageyears\": \"age\", \"sex\": \"Sex\"}, inplace=True)\n cols = [\"Sex\", \"agedays\", \"age\"]\n\n ht_cols = cols.copy()\n ht_cols.extend([col for col in df.columns if \"s_ht_p\" in col])\n df_ht = df[ht_cols]\n df_ht.columns = [c.replace(\"s_ht_p\", \"P\") for c in df_ht]\n\n wt_cols = cols.copy()\n wt_cols.extend([col for col in df.columns if \"s_wt_p\" in col])\n df_wt = df[wt_cols]\n df_wt.columns = [c.replace(\"s_wt_p\", \"P\") for c in df_wt]\n\n bmi_cols = cols.copy()\n bmi_cols.extend([col for col in df.columns if \"s_bmi_p\" in col])\n df_bmi = df[bmi_cols]\n df_bmi.columns = [c.replace(\"s_bmi_p\", \"P\") for c in df_bmi]\n\n return (df_ht, df_wt, df_bmi)",
"def split_data_3_folds(all_data):\n \n shuffle(all_data)\n split_1 = int(2 * len(all_data)/5)\n split_2 = 2 * split_1\n \n return [all_data[:split_1], all_data[split_1:split_2], all_data[split_2:]]",
"def split_by_percentage(data, percentage) -> tuple:\n try:\n percentage = int(round(percentage*len(data)))\n return(data[percentage:], data[:percentage])\n except Exception as error:\n print(f\"Error: split_by_percentage([...], {percentage}) -> {error}\")",
"def split(df, group):\n data = namedtuple('data', ['filename', 'object'])\n gb = df.groupby(group)\n return [data(filename, gb.get_group(x)) for filename, x in zip(gb.groups.keys(), gb.groups)]",
"def subset_df(df: pd.DataFrame) -> dict:\n prct10 = int(round(len(df) * 10 / 100, 0))\n dict_nb = {}\n deb = 0\n fin = prct10\n dict_nb[\"df1\"] = df.iloc[deb:fin, :]\n deb = fin\n dixieme = 10 * prct10\n reste = (len(df) - dixieme)\n fin_reste = len(df) + 1\n for i in range(2, 11):\n fin = (i * prct10 + 1)\n dict_nb[\"df\" + str(i)] = df.iloc[deb:fin, :]\n if reste > 0:\n dict_nb[\"reste\"] = df.iloc[fin: fin_reste, :]\n deb = fin\n\n return dict_nb",
"def split_train_test_by_percentage(dataset, train_percentage=0.8):\n train_length = int(len(dataset) * train_percentage)\n return torch.utils.data.random_split(dataset, (train_length, len(dataset) - train_length))",
"def split_data(data, labels, proportion):\n size = data.shape[0]\n np.random.seed(69)\n s = np.random.permutation(size)\n split_idx = int(proportion * size)\n return data[s[:split_idx]], data[s[split_idx:]], labels[s[:split_idx]], labels[s[split_idx:]]",
"def split_dataset(dataset, train_percentage, valid_percentage):\n\n # Split dataset into train and test dataset\n train_x, test_x, train_y, test_y = train_test_split(dataset[:, :-1], dataset[:, -1],\n train_size=train_percentage + valid_percentage,\n test_size=1-(train_percentage + valid_percentage))\n\n valid_x = train_x[int(np.ceil(train_percentage * len(dataset))):]\n valid_y = train_y[int(np.ceil(train_percentage * len(dataset))):]\n\n return train_x, valid_x, test_x, train_y, valid_y, test_y",
"def _split_by_filename(\n df: pd.DataFrame):\n data = namedtuple('data', ['filename', 'object'])\n gb = df.groupby('filename')\n return [data(filename, gb.get_group(x)) for filename, x in zip(gb.groups.keys(), gb.groups)]",
"def split(self, percent = 0.8):\n idx = round(len(self.label)*percent)\n \n # create a new dataset\n new_dataset = copy.deepcopy(self)\n new_dataset.volume = self.volume[idx:]\n new_dataset.label = self.label[idx:]\n\n # update self\n self.volume = self.volume[:idx]\n self.label = self.label[:idx]\n\n return new_dataset",
"def split(df):\n import matplotlib.pyplot as plt \n import metapack as mp\n import pandas as pd\n import numpy as np\n\n o_cols = []\n n_cols = []\n empty = []\n const = [] \n\n for cn in df.columns:\n c = df[cn]\n\n nu = c.nunique()\n\n if nu == 0:\n empty.append(cn)\n elif nu == 1:\n const.append(cn)\n elif c.dtype == np.dtype('O'):\n o_cols.append(cn)\n else:\n n_cols.append(cn)\n\n return df[o_cols], df[n_cols], df[const].drop_duplicates(), empty",
"def split_data(data, labels, proportion):\n size = data.shape[0]\n np.random.seed(42)\n s = np.random.permutation(size)\n split_idx = int(proportion * size)\n return (data[s[:split_idx]], data[s[split_idx:]], labels[s[:split_idx]], labels[s[split_idx:]])",
"def split_dataset(dataset: torch.utils.data.Dataset, split_perc: float = 0.20):\n assert (split_perc >= 0.0) and (split_perc <= 1.0), (\n f\"FATAL ERROR: invalid split_perc value {split_perc}.\" f\"Expecting float >= 0.0 and <= 1.0\"\n )\n\n if split_perc > 0.0:\n num_recs = len(dataset)\n train_count = int((1.0 - split_perc) * num_recs)\n test_count = num_recs - train_count\n train_dataset, test_dataset = torch.utils.data.random_split(dataset, [train_count, test_count])\n return train_dataset, test_dataset\n else:\n return dataset, None",
"def create_splits(self) -> dict[str, pd.DataFrame]:\n if self.data is None:\n raise ValueError('Data is not loaded yet. run \"FolkTables.load_data\"')\n splits = {}\n if self.split_type == \"random\":\n remainder_df = self.data.copy()\n original_size = remainder_df.shape[0]\n for key, value in self.splits.items():\n adjusted_frac = (original_size / remainder_df.shape[0]) * value\n sample = remainder_df.sample(frac=adjusted_frac, random_state=self.seed)\n splits[key] = sample\n sample_indexes = sample.index\n remainder_df = remainder_df.drop(sample_indexes)\n\n elif self.split_type == \"predefined\":\n for key, value in zip([\"train\", \"validation\", \"test\"], self.data):\n splits[key] = value\n\n return splits",
"def create_data_sets(file_location, percentage_int=75):\n\n # Original source of 'adult.data' no longer available. Changed from using httplib2 to opening a local file.\n try:\n local_file = open(file_location, 'r')\n except FileNotFoundError as error:\n print(error)\n sys.exit()\n\n data_set_tmp_list = [line.strip() for line in local_file]\n\n data_set_csv_list = [line for line in data_set_tmp_list if line != '']\n\n requested_records_int = len(data_set_csv_list) // 100 * percentage_int\n\n higher_income_records_list = []\n lower_income_records_list = []\n test_records_list = []\n\n record_count = 0\n\n for record in data_set_csv_list:\n \n try:\n if ' ?' in record:\n raise Exception('Record with invalid values found:', record)\n except Exception:\n continue\n\n temp_value_list = record.split(',')\n \n record_value_list = [value.strip().lower() for value in temp_value_list]\n\n if record_count >= requested_records_int:\n test_records_list.append(tuple(record_value_list))\n elif record_value_list[-1] == '>50k':\n higher_income_records_list.append(tuple(record_value_list))\n record_count += 1\n else:\n lower_income_records_list.append(tuple(record_value_list))\n record_count += 1\n\n return higher_income_records_list, lower_income_records_list, test_records_list",
"def get_data_per_categories(data, categories):\n\n countries, continents, incomes, world = categories\n\n df_countries = data[data.location.isin(countries)]\n df_continents = data[data.location.isin(continents)]\n df_incomes = data[data.location.isin(incomes)]\n df_world = data[data.location.isin(world)]\n\n return df_countries, df_continents, df_incomes, df_world",
"def dataset_splits(self):\n # 10% evaluation data\n return [{\n \"split\": problem.DatasetSplit.TRAIN,\n \"shards\": 9,\n }, {\n \"split\": problem.DatasetSplit.EVAL,\n \"shards\": 1,\n }]"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
Reads a English > French text file and filters the lines based on the given filter_fn. If filter_fn is None, the default filter will be
|
def filter_nmt_file(filename,filter_fn=None):
if filter_fn is None:
filter_fn = lambda en : en.lower().startswith('i am') or \
en.lower().startswith('he is') or \
en.lower().startswith('she is') or \
en.lower().startswith('they are') or \
en.lower().startswith('you are') or \
en.lower().startswith('we are')
filtered_lines = []
with open(filename) as file:
lines = file.readlines()
for line in lines:
text = line.split('\t')
en = text[0]
fra = text[1]
if filter_fn(en):
filtered_lines.append(en.lower() + '\t' + fra.lower())
return filtered_lines
|
[
"def on_filter_process(self, filter_terms=None):\n if not filter_terms:\n filter_terms = self.ui_filterLine.text()\n # break up the string term based common separator characters\n terms = lists.fragment(terms=filter_terms, splits=list(' ,'),\n clean=True)\n # group up common terms based on their starting character. We should\n # get 5 groups: includes, excludes, required, starts, ends\n groupings = lists.grouping(items=terms,\n searchTerms=[[t] for t in '+-!<>'])\n files = self.var_files\n if groupings:\n # if we have an extra group, it means there was no matching\n # character and we can assume those to include terms\n if len(groupings) == 6:\n groupings[0].extend(groupings.pop())\n includes, excludes, required, starts, ends = groupings\n # filter the list of files\n files = lists.filter(self.var_files, includes=includes,\n excludes=excludes, required=required,\n starts=starts, ends=ends)\n self.var_files_filtered = files\n self.ui_fileView.on_file_process(files)",
"def test_filter_fn( self ):\n def filter_ts( string ):\n if string.lower().startswith( 't' ):\n return None\n return string\n ( contents, provider, data ) = self.contents_provider_and_data( filter_fn=filter_ts )\n # no block fns here, so will parse as lines\n self.assertEqual( data, [ ['One'], ['ABCD'], ['ABCD'], ['EFGH'] ] )\n self.assertCounters( provider, 4, 4, 4 )",
"def filter(fn):\n fn.is_filter = True\n return fn",
"def set_filter(self, filter_fn):\n\n # pylint: disable=W0143\n if self.filter_fn == filter_fn:\n return\n\n if isinstance(filter_fn, NumericFilter):\n # Link the text variable to the filter\n filter_fn.text_var = self.text_var\n\n def _filter_cb(event=None):\n return filter_fn(event)\n\n self.bind(\"<KeyPress>\", _filter_cb)",
"def load_filter_file(filter_file):\n df = pd.DataFrame()\n # if\n if filter_file:\n with open(filter_file,'rb') as csvfile:\n df = pd.read_csv(csvfile)\n return df",
"def test_filter_sff_file(self):\r\n\r\n try:\r\n fh = open(self.tiny_test)\r\n except IOError:\r\n self.fail(\r\n \"Could not open test file %s. Skipping test\" %\r\n self.tiny_test)\r\n\r\n # With no filters all flowgram should be in out file\r\n flowgrams, header = lazy_parse_sff_handle(fh)\r\n filter_list = []\r\n fd, out_file_name = mkstemp(\r\n prefix=\"test_filter_sff_file\",\r\n suffix=\".sff.txt\")\r\n close(fd)\r\n out_fh = open(out_file_name, \"w\")\r\n l = filter_sff_file(flowgrams, header, filter_list, out_fh)\r\n remove(out_file_name)\r\n fh.close()\r\n self.assertEqual(l, 114)\r\n\r\n # With good filters some should survive\r\n fh = open(self.tiny_test)\r\n flowgrams, header = lazy_parse_sff_handle(fh)\r\n filter_list = [lambda f:within_length(f, 100, 300)]\r\n fd, out_file_name = mkstemp(\r\n prefix=\"test_filter_sff_file\",\r\n suffix=\".sff.txt\")\r\n close(fd)\r\n out_fh = open(out_file_name, \"w\")\r\n l = filter_sff_file(flowgrams, header, filter_list, out_fh)\r\n remove(out_file_name)\r\n fh.close()\r\n self.assertEqual(l, 112)\r\n\r\n # With strong filters nothing should be in\r\n fh = open(self.tiny_test)\r\n flowgrams, header = lazy_parse_sff_handle(fh)\r\n filter_list = [lambda f:within_length(f, 0, 0)]\r\n fd, out_file_name = mkstemp(\r\n prefix=\"test_filter_sff_file\",\r\n suffix=\".sff.txt\")\r\n close(fd)\r\n out_fh = open(out_file_name, \"w\")\r\n l = filter_sff_file(flowgrams, header, filter_list, out_fh)\r\n remove(out_file_name)\r\n self.assertEqual(l, 0)",
"def pipeline(file):\n # special processing is performed to avoid sentence boundaries after abbrevs\n doc = nlp(text_processing.preprocess_text_ents(file))\n grid = get_grid(doc)\n distrib = get_distrib(grid, doc)\n return get_feats(distrib)",
"def fileFiltRecGen(filePath, filt, delim = \",\"):\n\twith open(filePath, \"r\") as fp:\n\t\tfor line in fp:\t\n\t\t\tline = line[:-1]\n\t\t\tif delim is not None:\n\t\t\t\tline = line.split(delim)\n\t\t\tif filt(line):\n\t\t\t\tyield line",
"def filter_display(self):\n filter_text_data = self.line_edit.text()\n\n # if user has not entered any string, the whole file is shown\n if not filter_text_data:\n self.display.setPlainText(''.join(self.textfile_data))\n return\n\n # otherwise only the rows with the string in them are shown\n self.display.setPlainText(''.join([\n i for i in self.textfile_data\n if filter_text_data.lower() in i.lower()\n ]))",
"def tail_filter_and_process(fname, filterfunc, chan,\n handlerfunc=default_handler, **kwargs):\n sys.stderr.write(\"Reading log: \"+str(fname)+\"\\n\")\n with open(fname, \"r\") as f:\n f.seek(0, 2) # Go to EOF\n log = None\n while True:\n try:\n chan.get(timeout=0 if log != \"\" else 1)\n break\n except Timeout:\n pass\n\n log = f.readline().strip()\n if not log or not filterfunc(log):\n continue\n # send(\"calling\", str(handlerfunc))\n handlerfunc(log, **kwargs)\n chan.put(False)",
"def TextFilter(_event=None):\n return None",
"def ascii_to_filter(filename, filter_name=None, detector=None, temperature=None, \n filter_type=None, wcol=0, tcol=None, **kwargs):\n strg = \"Reading a MiriFilter model from an ASCII file \"\n strg += \"is not longer supported.\"\n raise NotImplementedError(strg)",
"def test_filter_func(self):\n LEXER.input('filter(lambda: x == 2)')\n self.checks_tokens([\n 'FILTER', 'LPAREN', 'LAMBDA', 'COL', 'ID', 'EQ',\n 'NUMBER', 'RPAREN'])",
"def get_file_filter(cls, file_filter, filter, mask, ext, path):\n filter.value, mask.value, ext.value, path.value = gxapi_cy.WrapGUI._get_file_filter(GXContext._get_tls_geo(), file_filter, filter.value.encode(), mask.value.encode(), ext.value.encode(), path.value)",
"def filter(self, fn, filter_fields=None):\n if filter_fields is None:\n filter_fields = [('awc_name', 'awcs'), ('block', 'blocks')]\n for key, field in filter_fields:\n keys = self.filter_data.get(field, []) \n if keys and fn(key) not in keys:\n raise InvalidRow",
"def applyTextFilter(info):\n filterInstance = TextFilter(info.getTextFilter())\n tempTDoc = info.getTDocument()\n tempEDoc = info.getEDocument()\n \n for doc in tempTDoc:\n filterInstance.apply(doc)\n \n for doc in tempEDoc:\n filterInstance.apply(doc)\n \n print('Text filters applied successfully!')",
"def LoadSourceFilter(coverable_file_name):\n \n with open(coverable_file_name, \"r\") as cov_file:\n file_list = [line.strip() for line in cov_file.readlines()]\n return SourceFilter(file_list)",
"def filter(ctx: click.Context):\n vcf: Reader = vcfpy.Reader.from_path(ctx.obj[\"vcf_file\"])\n filter_settings: Dict[str, Dict] = SV_FILTER_SETTINGS[\"tiddit_tumor_normal\"]\n\n # Update VCF header\n vcf.header.add_info_line(\n vcfpy.OrderedDict(\n [\n (\"ID\", \"AF_T_MAX\"),\n (\"Number\", \".\"),\n (\"Type\", \"Float\"),\n (\n \"Description\",\n \"Max AF in tumor, for rows with merged overlapping variants\",\n ),\n ]\n )\n )\n\n vcf.header.add_info_line(\n vcfpy.OrderedDict(\n [\n (\"ID\", \"AF_N_MAX\"),\n (\"Number\", \".\"),\n (\"Type\", \"Float\"),\n (\n \"Description\",\n \"Max AF in normal, for rows with merged overlapping variants\",\n ),\n ]\n )\n )\n\n vcf.header.add_filter_line(\n vcfpy.OrderedDict(\n [\n (\"ID\", \"normal_variant\"),\n (\"Description\", \"AF_T_MAX == 0 and ctg_t == False\"),\n ]\n )\n )\n\n vcf.header.add_filter_line(\n vcfpy.OrderedDict(\n [\n (\"ID\", f\"{filter_settings['max_normal_allele_frequency']['filter']}\"),\n (\n \"Description\",\n f\"AF_N_MAX > {filter_settings['max_normal_allele_frequency']['value']}\",\n ),\n ]\n )\n )\n\n vcf.header.add_filter_line(\n vcfpy.OrderedDict(\n [\n (\"ID\", f\"{filter_settings['max_tin_fraction']['filter']}\"),\n (\n \"Description\",\n f\"(AF_N_MAX / AF_T_MAX) > {filter_settings['max_tin_fraction']['value']}\",\n ),\n ]\n )\n )\n\n vcf.header.add_filter_line(\n vcfpy.OrderedDict(\n [\n (\"ID\", \"in_normal\"),\n (\"Description\", \"ctg_n == True and AF_N_MAX == 0 and AF_T_MAX <= 0.25\"),\n ]\n )\n )\n\n writer = vcfpy.Writer.from_path(\"/dev/stdout\", vcf.header)\n\n # Set soft filters for variants based on presence in the normal sample\n for variant in vcf:\n variant_info: dict = variant.INFO\n\n # Collect evidence of variant in tumor and normal sample\n evidence_dict: dict = get_tumor_normal_evidence(variant_info)\n allele_frequency_tumor: float = evidence_dict[\"tumor_max_af\"]\n allele_frequency_normal: float = evidence_dict[\"normal_max_af\"]\n tumor_has_contig: bool = evidence_dict[\"tumor_has_contig\"]\n normal_has_contig: bool = evidence_dict[\"normal_has_contig\"]\n\n # Add AF_MAX to info field\n variant.INFO[\"AF_T_MAX\"] = [round(allele_frequency_tumor, 4)]\n variant.INFO[\"AF_N_MAX\"] = [round(allele_frequency_normal, 4)]\n\n # Set filter statuses\n if allele_frequency_tumor == 0 and not tumor_has_contig:\n variant.add_filter(\"normal_variant\")\n writer.write_record(variant)\n continue\n\n # Regardless of CTG, set filter if AF_T / AF_N > max_tin_fraction\n normal_tumor_af_ratio = (\n float(allele_frequency_normal / allele_frequency_tumor)\n if allele_frequency_tumor > 0\n else 0\n )\n if normal_tumor_af_ratio > filter_settings[\"max_tin_fraction\"][\"value\"]:\n variant.add_filter(\"high_normal_af_fraction\")\n\n # Set filter if AF_N > 0.25\n if (\n allele_frequency_normal\n > filter_settings[\"max_normal_allele_frequency\"][\"value\"]\n ):\n variant.add_filter(\"high_normal_af\")\n\n # Set filter if CTG_N = True, AF_N is 0 and AF_T is below 0.25\n if (\n normal_has_contig\n and allele_frequency_normal == 0\n and allele_frequency_tumor <= 0.25\n ):\n variant.add_filter(\"in_normal\")\n\n writer.write_record(variant)",
"def filter(self, fn):\r\n\t\treturn FilterProjectedList(self, [fn])"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
Given a list of lines of English/French text, creates a DataFrame with train/val/test split labels.
|
def create_nmt_data(text,train_pct=0.7,val_pct=0.15):
if train_pct + val_pct >= 1:
raise Exception("train_pct + val_pct must be < 1.0")
source = []
target = []
for line in text:
text = line.split('\t')
source.append(text[0])
target.append(text[1])
text_df = pd.DataFrame({'source_language':source,'target_language':target})
text_df['split'] = 'train'
text_df = text_df.sample(frac=1).reset_index(drop=True)
idx = int(len(text_df)*train_pct)
text_df.loc[:idx,'split'] = 'train'
idx2 = idx + int(len(text_df)*val_pct)
text_df.loc[idx:idx2,'split'] = 'val'
text_df.loc[idx2:,'split'] = 'test'
return text_df
|
[
"def ucf_read_train_test_split(self, path):\n # get the test train split txt file\n train = []\n test = []\n for (dirpath, dirnames, filenames) in os.walk(path):\n train += [os.path.join(path, file) for file in filenames if file.startswith('trainlist')]\n test += [os.path.join(path, file) for file in filenames if file.startswith('testlist')]\n train.sort()\n test.sort()\n\n # read test train data name and label from the txt file\n train_data_labels = []\n test_data_labels = []\n for tra, test in zip(train, test):\n with open(tra) as f:\n names_labels = f.readlines()\n data = [line.split(' ')[0].split('/')[-1].split('.')[0] for line in names_labels]\n label = [line.split(' ')[0].split('/')[0] for line in names_labels]\n train_data_labels.append({'data': data, 'label': label})\n with open(test) as f:\n names_labels = f.readlines()\n data = [line.split('/')[-1].split('.')[0] for line in names_labels]\n label = [line.split('/')[0] for line in names_labels]\n test_data_labels.append({'data': data, 'label': label})\n return train_data_labels, test_data_labels",
"def make_language_detector_dataset(html_filepaths, min_length=30):\n texts, language_labels, article_names = [], [], []\n for html_filepath in html_filepaths:\n language_label = wikipedia_language(html_filepath)\n article_name = html_filepath.parent.name\n article = WikipediaArticle(html_filepath.read_bytes(),\n encoding=\"utf-8\")\n text = article.get_main_text()\n for short_text in split_paragraphs(text, min_length):\n texts.append(short_text)\n language_labels.append(language_label)\n article_names.append(article_name)\n return texts, language_labels, article_names",
"def textFeature(mode):\r\n \r\n classlist = ['negative', 'positive']\r\n data = pd.DataFrame()\r\n\r\n for label in classlist:\r\n path = 'C:\\\\Users\\\\Tom\\\\Documents\\\\Informatiekunde\\\\Thesis\\\\data\\\\' + mode + '\\\\' + label + '\\\\'\r\n allFiles = glob.glob(path + \"*.txt\")\r\n df1 = pd.DataFrame()\r\n for review in allFiles:\r\n title = review.strip('.txt').split('\\\\')[-1]\r\n text = open(review, 'r', encoding='utf8').read()\r\n df = pd.DataFrame({'File': [title], 'Text': [text], 'Label': [label]}).set_index('File')\r\n df1 = df1.append(df)\r\n data = data.append(df1)\r\n \r\n return data",
"def sentences_to_dataset(group):\r\n df = pd.DataFrame(columns=['form', 'sentence_1', 'sentence_2', 'label'])\r\n sentences = group['sentences'].split('||')\r\n count = 0\r\n for i in range(len(sentences)-1):\r\n df.loc[count] = [group['form'],\r\n clean(sentences[i]),\r\n clean(sentences[i + 1]),\r\n 1]\r\n\r\n count += 1\r\n return df",
"def build_dataframe(textline):\n column_names = []\n records = [line.split(u',') for line in textline]\n records = [pd.np.nan if token in (u'\\\\N', 'NULL') else token for token in records]\n # df_line = pd.read_csv(textline, header=None, names=column_names)\n df = pd.DataFrame(records, columns=column_names)\n df = df.convert_objects(convert_numeric=True)\n df.set_index('msisdn', inplace=True)\n print('-----', df.dtypes)\n return df",
"def load_dataset(train_path, test_path, tokenizer):\n train_dataset = TextDataset(\n tokenizer=tokenizer,\n file_path=train_path,\n block_size=128)\n\n test_dataset = TextDataset(\n tokenizer=tokenizer,\n file_path=test_path,\n block_size=128)\n\n data_collator = DataCollatorForLanguageModeling(\n tokenizer=tokenizer, mlm=False,\n )\n return train_dataset, test_dataset, data_collator",
"def load_text_and_label(data_file):\n # load data from file\n\n # splite by word\n dfRaw = pd.read_csv(data_file)\n dfRec = dfRaw[['Review Text', 'Recommended IND']].dropna()\n pos_examples = dfRec[dfRec['Recommended IND'] == 1]['Review Text'].tolist()\n neg_examples = dfRec[dfRec['Recommended IND'] == 0]['Review Text'].tolist()\n\n x_text = pos_examples + neg_examples\n x_text = np.array([clean_str(sentence) for sentence in x_text])\n # generate label (y)\n pos_labels = [[0,1] for _ in pos_examples]\n neg_labels = [[1,0] for _ in neg_examples]\n y = np.array(pos_labels + neg_labels)\n return [x_text, y]",
"def predict_logits_table(self, text_list):\n if isinstance(text_list[0], Instance): # dataset\n dfres = pd.DataFrame(list(map(self.predict_on_sentence, text_list)))\n dfres.index = [x.index for x in text_list]\n dfres_logit_lists = dfres['logit']\n elif isinstance(text_list[0], str):\n text_list = pd.Series(text_list)\n dfres_logit_lists = text_list.apply(lambda x: self.predict(x)['tag_logits'])\n #dfres_logits = pd.concat({str(kk):vv for kk,vv in dfres_logits.map(pd.DataFrame).items()})\n\n dfres_logits = self.logit_list_to_table(dfres_logit_lists)\n return dfres_logits",
"def create_lm_dataset(opt, logger=None):\n # Using spacy to tokenize text\n spacy_en = spacy.load('en')\n # Add <unk> special case is due to wiki text which has raw <unk>\n spacy_en.tokenizer.add_special_case(\"<unk>\", [{ORTH: \"<unk>\"}])\n\n def tokenize(text):\n \"\"\"tokenize sentence\"\"\"\n return [item.text for item in spacy_en.tokenizer(text)]\n\n is_lower = True\n if opt.data_type == \"ptb\":\n is_lower = False\n TEXT = torchtext.data.Field(\n sequential=True,\n tokenize=tokenize,\n lower=is_lower\n )\n\n resources_dir = os.path.expanduser(opt.resources_dir)\n if opt.data_type == \"wiki3\":\n train, valid, test = torchtext.datasets.WikiText103.splits(\n text_field=TEXT,\n root=resources_dir\n )\n if opt.data_type == \"wiki2\":\n train, valid, test = torchtext.datasets.WikiText2.splits(\n text_field=TEXT,\n root=resources_dir\n )\n if opt.data_type == \"ptb\":\n train, valid, test = torchtext.datasets.PennTreebank.splits(\n text_field=TEXT,\n root=resources_dir\n )\n\n if logger:\n logger.info(f\"train token: {len(train.examples[0].text)}\")\n logger.info(f\"test token: {len(test.examples[0].text)}\")\n logger.info(f\"valid token: {len(valid.examples[0].text)}\")\n\n device = torch.device(opt.device)\n if opt.input_vector is not None:\n opt.input_vector = os.path.expanduser(opt.input_vector)\n head, tail = os.path.split(opt.input_vector)\n torchtext_vectors = torchtext.vocab.Vectors(name=tail, cache=head)\n torchtext_vectors.vectors.to(device)\n # print(f\"len: {len(torchtext_vectors.stoi)}\")\n # print(f\"size: {torchtext_vectors.vectors.size()}\")\n # Here the list of list is to simulate the real dataset\n # where first dim is sentence and second is word.\n limited_train = [[word] for word in torchtext_vectors.stoi.keys()]\n TEXT.build_vocab(limited_train, vectors=torchtext_vectors)\n else:\n TEXT.build_vocab(train)\n\n train_iter, val_iter, test_iter = torchtext.data.BPTTIterator.splits(\n (train, valid, test),\n batch_size=opt.batch_size,\n bptt_len=opt.bptt_len,\n device=device,\n repeat=False\n )\n return (TEXT, train_iter, test_iter, val_iter)",
"def build_dataset(path,language):\n \n language_set = []\n \n #Reading the data text files in unicode\n with codecs.open(path,\"r\",\"utf-8\") as filep:\n \n for i,line in enumerate(filep):\n \n line = preprocessing(line) #preprocessing on data\n language_set.append(line)\n \n return language_set #individual language set",
"def pre_process_df(train_data, test_data):\n train_data[\"text\"] = train_data[\"sentence1\"] + \", \" + train_data[\"sentence2\"] # noqa\n test_data[\"text\"] = test_data[\"sentence1\"] + \", \" + test_data[\"sentence2\"]\n train_data.drop([\"sentence1\", \"sentence2\"], axis=1, inplace=True)\n test_data.drop([\"sentence1\", \"sentence2\"], axis=1, inplace=True)\n train_data = train_data[[\"text\", \"label\"]]\n test_data = test_data[[\"text\", \"label\"]]\n simple_pre_process_text_df(train_data)\n simple_pre_process_text_df(test_data)\n return train_data, test_data",
"def _preprocess_wiki_tokens(split: str) -> lmp.dataset.LanguageModelDataset:\n if not isinstance(split, str):\n raise TypeError('`split` must be an instance of `str`.')\n\n file_path = os.path.join(f'{lmp.path.DATA_PATH}', f'wiki.{split}.tokens')\n\n if not os.path.exists(file_path):\n raise FileNotFoundError(f'file {file_path} does not exist.')\n\n with open(file_path, 'r', encoding='utf8') as input_file:\n data = input_file.read()\n\n # Split based on section pattern.\n data = re.split(r' \\n( =){1,3} .+ (= ){1,3}\\n ', data)\n data = list(filter(\n lambda sample: sample.strip()\n and not re.match(r'( =){1,3}', sample)\n and not re.match(r'(= ){1,3}', sample),\n data\n ))\n\n # Normalized by unicode NFKC.\n data = [unicodedata.normalize('NFKC', sample) for sample in data]\n\n # Convert all new lines and consecutive whitespace into single whitespace.\n data = [re.sub(r'\\s+', ' ', sample) for sample in data]\n\n # Strip leading and trailing whitespaces.\n data = [sample.strip() for sample in data]\n\n return lmp.dataset.LanguageModelDataset(batch_sequences=data)",
"def hmdb_read_train_test_split(self, path):\n # read file names according to the split number from 1 to 3\n action_splits = []\n for (dirpath, dirnames, filenames) in os.walk(path):\n for i in range(1, 4, 1):\n action_splits.append(\n sorted([os.path.join(path, f) for f in filenames if f.split('.')[0].endswith(str(i))]))\n\n # fetch the data and labels for all 3 splits\n train_data_labels = []\n test_data_labels = []\n for s in action_splits:\n train_data = []\n train_label = []\n test_data = []\n test_label = []\n for a in s:\n with open(a) as f:\n name_labels = f.readlines()\n train = [line.split(' ')[0].split('.')[0] for line in name_labels if\n line.rstrip().split(' ')[-1] == '1']\n train_data += train\n train_label += [a.split('/')[-1][:a.split('/')[-1].index('test')-1] for i in range(len(train))]\n test = [line.split(' ')[0].split('.')[0] for line in name_labels if\n line.rstrip().split(' ')[-1] == '2']\n test_data += test\n test_label += [a.split('/')[-1][:a.split('/')[-1].index('test')-1] for i in range(len(test))]\n train_data_labels.append({'data': train_data, 'label': np.array(train_label)})\n test_data_labels.append({'data': test_data, 'label': np.array(test_label)})\n return train_data_labels, test_data_labels",
"def prepare_text_data(descriptions):\n text_data = []\n for line in descriptions:\n tokens = prepare_text_for_lda(line)\n text_data.append(tokens)\n return text_data",
"def create_article_dataset(record_list, dataset_dir, sess,\n validation_size=10,\n eval_every=100,\n input_feature='text',\n max_input_sequence_length=Article.max_text+2,\n target_feature='short_description',\n max_target_sequence_length=Article.max_short_description+2,\n hparams=None):\n input_table = tf.contrib.lookup.index_table_from_file(vocabulary_file=os.path.join(dataset_dir, '{}_vocab.txt'.format(input_feature)),\n num_oov_buckets=1,\n default_value=3)\n target_table = tf.contrib.lookup.index_table_from_file(vocabulary_file=os.path.join(dataset_dir, '{}_vocab.txt'.format(target_feature)),\n num_oov_buckets=1,\n default_value=3)\n lookup_table = tf.contrib.lookup.index_to_string_table_from_file(\n os.path.join(dataset_dir, '{}_vocab.txt'.format(target_feature)),\n default_value='<U>')\n\n input_lookup_table = tf.contrib.lookup.index_to_string_table_from_file(\n os.path.join(dataset_dir, '{}_vocab.txt'.format(input_feature)),\n default_value='<U>')\n\n tf.tables_initializer().run(session=sess)\n\n dataset = tf.data.Dataset.from_tensor_slices(record_list)\n\n def _read_npy(filename):\n parsed = np.load(filename.decode('utf-8'))\n data = parsed.item()\n i = list(map(lambda x: x.decode('utf-8'), data[input_feature].tolist()))\n i_l = data['{}_length'.format(input_feature)]\n t = list(map(lambda x: x.decode('utf-8'), data[target_feature].tolist()))\n t_l = data['{}_length'.format(target_feature)]\n return i, i_l, t, t_l\n\n def next_example(input_feature, input_sequence_length, target_feature, target_sequence_length):\n input_sequence_length.set_shape([1])\n input_feature.set_shape([max_input_sequence_length])\n target_sequence_length.set_shape([1])\n target_feature.set_shape([max_target_sequence_length])\n\n feature_input_sequences = {\n 'input_sequence_length': tf.cast([max_input_sequence_length], tf.int32),\n }\n feature_target_sequences = {\n 'target_sequence_length': tf.cast([max_target_sequence_length], tf.int32),\n }\n\n feature_target_sequences['target'] = target_table.lookup(target_feature)\n feature_input_sequences['input'] = input_table.lookup(input_feature)\n return feature_input_sequences, feature_target_sequences\n\n dataset = dataset.map(lambda filename: tf.py_func(_read_npy, [filename], [tf.string, tf.int64, tf.string, tf.int64]))\n dataset = dataset.map(next_example)\n dataset = dataset.shuffle(buffer_size=hparams.shuffle_buffer_size)\n\n def training_set(dataset):\n iterator = dataset.make_initializable_iterator()\n iterator.initializer.run(session=sess)\n def train():\n return iterator.get_next()\n return train\n\n def validation_set(dataset):\n iterator = dataset.make_initializable_iterator()\n iterator.initializer.run(session=sess)\n def validate():\n return iterator.get_next()\n return validate\n\n dataset = dataset.batch(hparams.batch_size)\n validation_dataset = dataset.take(validation_size)\n training_dataset = dataset.repeat(hparams.epochs)\n training_dataset = training_dataset.shuffle(buffer_size=hparams.shuffle_buffer_size)\n train = training_set(training_dataset)\n valid = validation_set(validation_dataset)\n input_vocab_size = input_table.size().eval(session=sess)\n target_vocab_size = target_table.size().eval(session=sess)\n\n return train, valid, (input_vocab_size, target_vocab_size), lookup_table, input_lookup_table",
"def read_dataset_from_list(self, lineLst):\n data = []\n for line in lineLst:\n if self.sos != '':\n data.append(self.sos)\n for word in line:\n word = self.replace_special_chars(word)\n _word = word\n if self.unit == \"oracle\":\n if \"+\" in word:\n # double check\n if word.startswith(\"word\") and len(word.split('+'))>1 \\\n and len(word.split('+')[0].split(\":\"))>1:\n _word = word.split('+')[0].split(\":\")[1]\n else:\n continue\n if self.unit == \"morpheme\":\n _word = re.sub(\"@@\", \"\", word)\n if not self.is_hyperlink(_word.lower()) and len(_word) <= 100:\n data.append(word)\n if self.eos != '':\n data.append(self.eos)\n return data",
"def dataset_preparation():\r\n with open('../data/patterns_num.txt', 'r') as f:\r\n data = f.readlines()\r\n X, Y = [], []\r\n for line in data:\r\n x, y = line.split('\\t')\r\n if len(x) > 5 and x not in X: # better results are achieved excluding short query patterns\r\n X.append(x.replace(\"X\", \"\").replace(\"Y\", \"\").lower())\r\n Y.append(int(y.replace('\\n', '')))\r\n test_size = 0.2\r\n # print('Test size:', test_size, '\\nWrong classifications:\\n')\r\n\r\n X_train, X_test, y_train, y_test = train_test_split(X, Y, test_size=test_size, random_state=42, stratify=Y)\r\n return X_train, y_train, X_test, y_test",
"def create_train_validation(df, train_percentage=0.8):\n train = df.sample(frac=train_percentage).sort_index()\n validation = df.drop(train.index).sort_index()\n return train.iloc[:,0:3], validation.iloc[:,0:3], pd.DataFrame(train.iloc[:,-1]), pd.DataFrame(validation.iloc[:,-1])",
"def get_features_and_labels(df, columns, other=False, min_samples=5, min_sample_alltx=10,\n n_samples_staff=96,n_samples_dep=127, n_samples_stu=300, n_samples_cls=96):\n if(other):\n df = df.copy()\n else:\n df = df.copy()[df['class']!='other'] #Não utiliza classe outros\n \n index_to_drop = set(find_nan(df, 'all_text'))\n index_to_keep = set(df.index)-index_to_drop\n df = df.loc[list(index_to_keep)] #Retira objetos com nan em all_text\n df.sort_index(inplace=True)\n df = df.reset_index(drop=True) \n \n \n x = 100\n classes = set(df.values.T[0])-set(['staff', 'department', 'student'])\n df_train = df[df['class']=='staff'].sample(n_samples_staff, random_state=x)\n df_train = df_train.append(df[df['class']=='department'].sample(n_samples_dep, random_state=x))\n df_train = df_train.append(df[df['class']=='student'].sample(n_samples_stu, random_state=x))\n for cl in list(classes):\n df_train = df_train.append(df[df['class']==cl].sample(n_samples_cls, random_state=x))\n \n df_test = df.loc[set(df.index)-set(df_train.index)].reset_index(drop=True) #DataFrame de teste\n \n df_train = df_train.reset_index(drop=True) #DataFrame de treino\n\n X_train = pd.DataFrame()\n X_test = pd.DataFrame()\n\n for column in columns:\n if(column == 'all_text'):\n \n n_samples = df_train.shape[0]\n vectorizer = TfidfVectorizer(stop_words='english', max_df=0.9,min_df=min_sample_alltx/n_samples)\n features_column = vectorizer.fit_transform(df_train[column])\n features_column = pd.DataFrame(features_column.toarray())\n \n features_column_test = vectorizer.transform(df_test[column])\n features_column_test = pd.DataFrame(features_column_test.toarray())\n \n \n elif(column == 'h3' or column== 'h1' or column=='a'or column=='h2'or column=='title'or column=='li' or column=='hs'):\n corpus = df_train.loc[:,column].fillna('')\n corpus_test = df_test.loc[:,column].fillna('')\n\n \n n_samples = df_train.shape[0]\n vectorizer = TfidfVectorizer(stop_words='english',max_df=0.9 , min_df=min_samples/n_samples)\n features_column = vectorizer.fit_transform(corpus)\n features_column = pd.DataFrame(features_column.toarray())\n \n features_column_test = vectorizer.transform(corpus_test)\n features_column_test = pd.DataFrame(features_column_test.toarray())\n \n \n else:\n features_column = df_train[column].fillna(0)\n features_column = (features_column-features_column.min())/(features_column.max()-features_column.min())\n \n features_column_test = df_test[column].fillna(0)\n features_column_test = (features_column_test-features_column.min())/(features_column.max()-features_column.min())\n # https://pandas.pydata.org/pandas-docs/stable/merging.html\n # Documentação de como o concat funciona\n X_train = pd.concat([X_train,features_column], axis=1)\n X_test = pd.concat([X_test,features_column_test], axis=1)\n \n X_train = X_train.fillna(0)\n X_test = X_test.fillna(0)\n \n \n y_train = df_train['class']\n y_test = df_test['class']\n \n return X_train, X_test, y_train, y_test"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
Reads a glove word embedding text file and generates a DataFrame with the embeddings.
|
def process_glove_data(filename):
word_list = []
embed_list = []
with open(filename,encoding="utf8") as file:
lines = file.readlines()
for line in lines:
toks = line.split(' ')
word_list.append(toks[0])
vec = [float(tok) for tok in toks[1:]]
embed_list.append(vec)
embed = np.array(embed_list,dtype=float)
embed_df = pd.DataFrame(embed,index=word_list)
embed_df.index = embed_df.index.str.lower()
return embed_df
|
[
"def read_glove_source(self):\n embeddings = []\n word2vec = {}\n idx2word = []\n with open(self.source) as file:\n lines = file.readlines()\n for line in lines:\n data = line.split()\n word = data[0]\n vector = np.asarray(data[1:], dtype='float32')\n embeddings.append(vector)\n idx2word.append(word)\n word2vec[word] = vector\n return embeddings, word2vec, idx2word",
"def load_glove_embeddings():\n data = open(\"glove.6B.50d.txt\",'r',encoding=\"utf-8\")\n embeddings = []\n word_index_dict = {'UNK':0}\n index = 1\n for lines in data:\n wordVector = lines.split(\" \")\n if(wordVector[0] in string.punctuation or any(char.isdigit() for char in wordVector[0])):\n continue\n embeddings.append(wordVector[1:-1])\n word_index_dict[wordVector[0]] = index\n index+=1\n print(\"done\")\n\n return embeddings, word_index_dict",
"def read_glove_embeddings(self,glove_filename):\n print('Reading embeddings from %s ...'%glove_filename)\n\n embedding_matrix = (rand(self.lexicon_size,self.embedding_size) - 0.5)/10.0 #uniform init [-0.05,0.05]\n\n istream = open(glove_filename)\n for line in istream:\n values = line.split()\n word = values[0]\n widx = self.word_codes.get(word)\n if widx != None:\n coefs = np.asarray(values[1:], dtype='float32')\n embedding_matrix[widx] = coefs\n istream.close()\n print('done.')\n\n return embedding_matrix",
"def load_text(file: Union[str, bytes, int, PathLike]) -> Embeddings:\n words = []\n vecs = []\n with open(file) as inf:\n for line in inf:\n line = line.strip().split()\n words.append(line[0])\n vecs.append(line[1:])\n matrix = np.array(vecs, dtype=np.float32)\n norms = np.linalg.norm(matrix, axis=1)\n matrix /= np.expand_dims(norms, axis=1)\n return Embeddings(storage=NdArray(matrix),\n norms=Norms(norms),\n vocab=SimpleVocab(words))",
"def load_glove_embeddings():\n #if you are running on the CSE machines, you can load the glove data from here\n #data = open(\"/home/cs9444/public_html/17s2/hw2/glove.6B.50d.txt\",'r',encoding=\"utf-8\")\n data = open(\"glove.6B.50d.txt\",'r',encoding=\"utf-8\")\n\n word_index_dict = {}\n word_index_dict['UNK'] = 0\n embeddings = np.ndarray(shape=(500001, batch_size), dtype='float32')\n embeddings_list = []\n i = 1\n for line in data:\n load_array = line.split()\n # Sets the word to the 0th value in array\n word = load_array[0] \n # Other values are the assigned index\n values = np.asarray(load_array[1:], dtype='float32')\n # Put values in row of array\n embeddings[i] = values\n # E.g. word_index_dict[\"the\"] = 0\n word_index_dict[word] = i\n i = i+1\n data.close()\n return embeddings, word_index_dict",
"def _load_embedding_from_txt(self, file_path, vocab):\n word_to_emb = {}\n with codecs.open(file_path, encoding='utf-8') as f:\n for line in f:\n first_line = line.split('\\t')\n break\n emb_dim = len(first_line[1].split(' '))\n with codecs.open(file_path, encoding='utf-8') as f:\n for line in f:\n split = line.split('\\t')\n word = split[0]\n try:\n embedding = list(map(float, split[1].split(' ')))\n except:\n continue\n word_to_emb[word] = embedding\n return self._load_embedding(vocab,word_to_emb,emb_dim)",
"def load_glove_embeddings():\n data = open(\"glove.6B.50d.txt\",'r',encoding=\"utf-8\")\n #if you are running on the CSE machines, you can load the glove data from here\n #data = open(\"/home/cs9444/public_html/17s2/hw2/glove.6B.50d.txt\",'r',encoding=\"utf-8\")\n embeddings = list()\n word_index_dict = dict()\n idx = 0\n for words in data:\n wordList = words.split()\n word_index_dict[wordList[0]] = idx\n word_embedding_vector = list()\n for word in wordList[1:]:\n word_embedding_vector.append(np.float32(word)) \n embeddings.append(word_embedding_vector)\n idx += 1\n \n word_index_dict['UNK'] = idx\n embeddings.append([np.float32(0)]*len(embeddings[0]))\n #transfer the list to array\n embeddings = np.asarray(embeddings)\n return embeddings, word_index_dict",
"def load_textdims(file: Union[str, bytes, int, PathLike]) -> Embeddings:\n words = []\n with open(file) as inf:\n rows, cols = next(inf).split()\n matrix = np.zeros((int(rows), int(cols)), dtype=np.float32)\n for i, line in enumerate(inf):\n line = line.strip().split()\n words.append(line[0])\n matrix[i] = line[1:]\n norms = np.linalg.norm(matrix, axis=1)\n matrix /= np.expand_dims(norms, axis=1)\n return Embeddings(storage=NdArray(matrix),\n norms=Norms(norms),\n vocab=SimpleVocab(words))",
"def load_embed_txt(embed_file):\n emb_dict = dict()\n emb_size = None\n with codecs.getreader(\"utf-8\")(tf.gfile.GFile(embed_file, 'rb')) as f:\n for line in f:\n tokens = line.strip().split(\" \")\n word = tokens[0]\n vec = list(map(float, tokens[1:]))\n emb_dict[word] = vec\n if emb_size:\n assert emb_size == len(vec), \"All embedding size should be same.\"\n else:\n emb_size = len(vec)\n return emb_dict, emb_size",
"def construct_embedding(self):\n i = 0\n self.load_dicts()\n embedding_shape = (max(self.word2idx.values()) + 1,\n self.embedding_size)\n self.embedding = np.zeros(embedding_shape)\n\n with open(self.config.word_vec_fi_glove, 'r') as fi:\n for line in fi:\n word_vec = line.split(\" \")[1:]\n self.embedding[i, :] = np.array(word_vec, dtype=np.float32)\n i += 1\n\n self.write_embedding()",
"def gen_embedding(path):\r\n word_emb = {}\r\n with open(path, encoding='utf-8') as f:\r\n for line in tqdm(f):\r\n values = line.split()\r\n word_emb[values[0]] = np.asarray(values[1:], dtype='float32')\r\n return word_emb",
"def loadEmbedding(filename):\n \n if filename == \"glove\":\n embeddingFile = \"C:/Users/A/Desktop/FYP/Embeddings/glove.840B.300d.txt\"\n elif filename == \"fasttext\":\n embeddingFile = \"C:/Users/A/Desktop/FYP/Embeddings/crawl-300d-2M.vec\"\n elif filename == \"word2vec\":\n word2vecDict = word2vec.KeyedVectors.load_word2vec_format(\"C:/Users/A/Desktop/FYP/Embeddings/GoogleNews-vectors-negative300.bin\", binary=True)\n \n embeddings_index = {}\n if filename == \"glove\" or filename == \"fasttext\":\n with open(embeddingFile, encoding = \"utf-8\") as f:\n for line in f:\n values = line.rstrip().rsplit(\" \")\n word = values[0]\n # dtype : data-type, optional. By default, the data-type is inferred from the input data.\n coefs = np.asarray(values[1:], dtype = \"float32\")\n embeddings_index[word] = coefs\n \n else:\n for word in word2vecDict.wv.vocab:\n embeddings_index[word] = word2vecDict.word_vec(word)\n \n print (\"Total word vectors = \", len(embeddings_index)) \n return embeddings_index",
"def load_or_create_dataset_word2vec(filename, text_samples, vocabulary_size=VOCABULARY_SIZE):\n filename_vocabulary = '{}_{}'.format(filename, vocabulary_size)\n filename_dict = '{}_dict'.format(filename_vocabulary)\n filename_count = '{}_count'.format(filename_vocabulary)\n filename_tsv = '{}.tsv'.format(filename_vocabulary)\n if not os.path.exists(os.path.join(DIR_DATA_WORD2VEC, filename_vocabulary)):\n text_lines = []\n for text_sample in text_samples:\n sentences = re.split('\\n|\\s\\.\\s', text_sample.lower())\n for sentence in sentences:\n words = sentence.split()\n if len(words) > 0:\n words.append('.')\n words = list([word.strip().lower() for word in words])\n text_lines.append(words)\n symbols_count = group_count(text_lines)\n symbols_ordered_by_count = sorted(symbols_count.items(), key=lambda x: x[1], reverse=True)\n total_symbols = len(symbols_ordered_by_count)\n print('Total symbols: {}'.format(total_symbols))\n print('Vocabulary size: {}'.format(vocabulary_size))\n unknown_symbols = symbols_ordered_by_count[vocabulary_size - 1:]\n known_symbols = symbols_ordered_by_count[:vocabulary_size - 1]\n symbols_dict = { }\n for symbol, _ in unknown_symbols:\n symbols_dict[symbol] = 0\n counter = 1\n for symbol, _ in known_symbols:\n symbols_dict[symbol] = counter\n counter += 1\n encoded_text = []\n\n words_count = 0\n for sentence in text_lines:\n words_count += len(sentence)\n encoded_sentence = []\n for word in sentence:\n encoded_sentence.append(symbols_dict[word])\n if len(encoded_sentence) > 0:\n encoded_text.append(encoded_sentence)\n print('Total sentences: {}'.format(len(text_lines)))\n print('Total words: {}'.format(words_count))\n print('words/sentences: {}'.format(float(words_count) / float(len(text_lines))))\n\n with io.open(os.path.join(DIR_DATA_WORD2VEC, filename_dict), 'w', encoding='utf8') as f:\n for symbol in sorted(symbols_dict.keys()):\n f.write(u'{} {}\\n'.format(symbol, symbols_dict[symbol]))\n with io.open(os.path.join(DIR_DATA_WORD2VEC, filename_vocabulary), 'w',\n encoding='utf8') as f:\n for sentence in encoded_text:\n f.write(u' '.join(str(word) for word in sentence))\n f.write(u'\\n')\n with io.open(os.path.join(DIR_DATA_WORD2VEC, filename_count), 'w', encoding='utf8') as f:\n for symbol, count in symbols_ordered_by_count:\n f.write(u'{} = {}\\n'.format(symbol, count))\n with io.open(os.path.join(DIR_DATA_WORD2VEC, filename_tsv), 'w', encoding='utf8') as f:\n f.write(u'word\\tcount\\tid\\n')\n f.write(u'_UNKOWN_\\t{}\\t0\\n'.format(len(unknown_symbols)))\n pos = 1\n for symbol, count in known_symbols:\n f.write(u'{}\\t{}\\t{}\\n'.format(symbol, count, pos))\n pos += 1\n\n return load_word2vec_data(filename)",
"def load_embeddings(filename):\n count = 0\n matrix = []\n word_map = {}\n with open(filename, encoding=\"utf8\") as f:\n # with open(filename) as f:\n for line in f:\n line = line.strip()\n items = line.split()\n word = items[0]\n rest = items[1:]\n # print(\"word:\", word)\n word_map[word] = count\n count += 1\n\n rest = list(map(float, rest))\n matrix.append(rest)\n matrix = np.array(matrix)\n return word_map, matrix",
"def load_embedding(self):\n vocab = []\n embedding = []\n with codecs.open(FLAGS.embedding_path,'r',encoding='utf8') as file:\n for line in file.readlines():\n row = line.strip().split(' ')\n vocab.append(row[0])\n embedding.append([ float(i) for i in row[1:]])\n self._embedding = embedding\n self._embedding_dimension = len(embedding[0])\n self._word2id = {'<UNK>':seq_match_seq.UNK_ID}\n for v in vocab:\n self._word2id[v] = len(self._word2id)\n self._vocab = self._word2id.keys()",
"def load_pretrained_words_data(embeddings_filename, vocab):\n words = dict()\n emb_dim = None\n with gzip.open(cached_path(embeddings_filename), 'rb') as embeddings_file:\n for line in embeddings_file:\n fields = line.decode('utf-8').strip().split(' ')\n if len(fields) == 0:\n continue\n word = fields[0]\n if emb_dim is None:\n emb_dim = len(fields) - 1\n if emb_dim < 10: # my pretrained file is poisonous 😭\n emb_dim = None\n else:\n assert emb_dim == len(fields) - 1, \"{}, {}\".format(emb_dim, len(fields) - 1)\n words.update({word: [float(i) for i in fields[1:]]})\n print(\"Embedding dim: {}\".format(emb_dim))\n tokens = vocab.get_index_to_token_vocabulary(\"tokens\")\n n_tokens = len(tokens)\n data = []\n for i in tokens:\n if tokens[i] in words:\n data.append(words[tokens[i]])\n else:\n data.append([0] * emb_dim)\n return torch.tensor(data), emb_dim",
"def load_glove_vectors():\n\n glove_home = '../assets/glove/'\n src_filename = os.path.join(glove_home, 'glove.6B.50d.txt')\n reader = csv.reader(open(src_filename), delimiter=' ', quoting=csv.QUOTE_NONE) \n \n word_to_index = {}\n embedding_mat = []\n\n counter = 0\n for line in reader:\n word_to_index[line[0]] = counter\n vec = np.array(list(map(float, line[1: ])))\n embedding_mat.append(vec)\n counter += 1\n\n return word_to_index, embedding_mat",
"def load_word2vec(file: Union[str, bytes, int, PathLike]) -> Embeddings:\n words = []\n with open(file, 'rb') as inf:\n rows, cols = map(int, inf.readline().decode(\"utf-8\").split())\n matrix = np.zeros((rows, cols), dtype=np.float32)\n for row in range(rows):\n word = []\n while True:\n byte = inf.read(1)\n if byte == b' ':\n break\n if byte == b'':\n raise EOFError\n if byte != b'\\n':\n word.append(byte)\n word = b''.join(word).decode('utf-8')\n words.append(word)\n vec = inf.read(cols * matrix.itemsize)\n matrix[row] = np.frombuffer(vec, dtype=np.float32)\n norms = np.linalg.norm(matrix, axis=1)\n matrix /= np.expand_dims(norms, axis=1)\n return Embeddings(storage=NdArray(matrix),\n norms=Norms(norms),\n vocab=SimpleVocab(words))",
"def load_embed_text(embed_file):\n \n emb_dict = dict()\n emb_size = None\n with codecs.getreader(\"utf-8\")(tf.gfile.GFile(embed_file, \"rb\")) as f:\n for line in f:\n tokens = line.strip().split(\" \")\n word = tokens[0]\n vec = list(map(float, tokens[1:]))\n emb_dict[word] = vec\n if emb_size:\n assert emb_size == len(vec), \"All embeddings should be same size\"\n else:\n emb_size = len(vec)\n return emb_dict, emb_size"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
Given a list of dividends whose ex_dates are all the next trading day, calculate and store the cash and/or stock payments to be paid on each dividend's pay date.
|
def earn_dividends(self, cash_dividends, stock_dividends):
for cash_dividend in cash_dividends:
self._dirty_stats = True # only mark dirty if we pay a dividend
# Store the earned dividends so that they can be paid on the
# dividends' pay_dates.
div_owed = self.positions[cash_dividend.instrument].earn_dividend(
cash_dividend,
)
try:
self._unpaid_dividends[cash_dividend.pay_date].append(div_owed)
except KeyError:
self._unpaid_dividends[cash_dividend.pay_date] = [div_owed]
for stock_dividend in stock_dividends:
self._dirty_stats = True # only mark dirty if we pay a dividend
div_owed = self.positions[
stock_dividend.instrument
].earn_stock_dividend(stock_dividend)
try:
self._unpaid_stock_dividends[stock_dividend.pay_date].append(
div_owed,
)
except KeyError:
self._unpaid_stock_dividends[stock_dividend.pay_date] = [
div_owed,
]
|
[
"def _calculate_next_dividend(self, symbols):\n yahoo_financials = YahooFinancials(symbols)\n logging.debug(\"[_calculate_next_dividend] Fetching get_exdividend_date\")\n data = yahoo_financials.get_exdividend_date()\n logging.debug(\"[_calculate_next_dividend] Finished fetching get_exdividend_date\")\n next_div_dates = {}\n for symbol in symbols:\n dividends = self._next_div(data, symbol)\n next_div_dates[symbol] = dividends\n\n return next_div_dates",
"def pay_dividends(self, next_trading_day):\n net_cash_payment = 0.0\n\n try:\n payments = self._unpaid_dividends[next_trading_day]\n # Mark these dividends as paid by dropping them from our unpaid\n del self._unpaid_dividends[next_trading_day]\n except KeyError:\n payments = []\n\n # representing the fact that we're required to reimburse the owner of\n # the stock for any dividends paid while borrowing.\n for payment in payments:\n net_cash_payment += payment['amount']\n\n # Add stock for any stock dividends paid. Again, the values here may\n # be negative in the case of short positions.\n try:\n stock_payments = self._unpaid_stock_dividends[next_trading_day]\n except KeyError:\n stock_payments = []\n\n for stock_payment in stock_payments:\n payment_instrument = stock_payment['payment_instrument']\n share_count = stock_payment['share_count']\n # note we create a Position for stock dividend if we don't\n # already own the instrument\n if payment_instrument in self.positions:\n position = self.positions[payment_instrument]\n else:\n position = self.positions[payment_instrument] = Position(\n payment_instrument,\n )\n\n position.amount += share_count\n\n return net_cash_payment",
"def demo_dividends(self):\n dividends = pd.DataFrame(index=range(10))\n dividends['symbol'] = ['MSFT', 'AAPL', 'CVX', 'XOM', 'BND', 'CAT', 'BA', 'TIF', 'BAC', 'JPM']\n dividends['position'] = 100\n dividends['amount'] = [20, 30, 40, 50, 60, 70, 80, 90, 100, 110]\n dividends['rate'] = dividends['amount'] / dividends['position']\n dividends['paid_at'] = pd.Timestamp('2019-01-15', tz='UTC')\n dividends['payable_date'] = pd.Timestamp('2019-01-02', tz='UTC')\n return dividends",
"def list_dividends(\n self,\n ticker: Optional[str] = None,\n ticker_lt: Optional[str] = None,\n ticker_lte: Optional[str] = None,\n ticker_gt: Optional[str] = None,\n ticker_gte: Optional[str] = None,\n ex_dividend_date: Optional[Union[str, date]] = None,\n ex_dividend_date_lt: Optional[Union[str, date]] = None,\n ex_dividend_date_lte: Optional[Union[str, date]] = None,\n ex_dividend_date_gt: Optional[Union[str, date]] = None,\n ex_dividend_date_gte: Optional[Union[str, date]] = None,\n record_date: Optional[Union[str, date]] = None,\n record_date_lt: Optional[Union[str, date]] = None,\n record_date_lte: Optional[Union[str, date]] = None,\n record_date_gt: Optional[Union[str, date]] = None,\n record_date_gte: Optional[Union[str, date]] = None,\n declaration_date: Optional[Union[str, date]] = None,\n declaration_date_lt: Optional[Union[str, date]] = None,\n declaration_date_lte: Optional[Union[str, date]] = None,\n declaration_date_gt: Optional[Union[str, date]] = None,\n declaration_date_gte: Optional[Union[str, date]] = None,\n pay_date: Optional[Union[str, date]] = None,\n pay_date_lt: Optional[Union[str, date]] = None,\n pay_date_lte: Optional[Union[str, date]] = None,\n pay_date_gt: Optional[Union[str, date]] = None,\n pay_date_gte: Optional[Union[str, date]] = None,\n frequency: Optional[Union[int, Frequency]] = None,\n cash_amount: Optional[float] = None,\n cash_amount_lt: Optional[float] = None,\n cash_amount_lte: Optional[float] = None,\n cash_amount_gt: Optional[float] = None,\n cash_amount_gte: Optional[float] = None,\n dividend_type: Optional[Union[str, DividendType]] = None,\n limit: Optional[int] = None,\n sort: Optional[Union[str, Sort]] = None,\n order: Optional[Union[str, Order]] = None,\n params: Optional[Dict[str, Any]] = None,\n raw: bool = False,\n options: Optional[RequestOptionBuilder] = None,\n ) -> Union[Iterator[Dividend], HTTPResponse]:\n url = \"/v3/reference/dividends\"\n\n return self._paginate(\n path=url,\n params=self._get_params(self.list_dividends, locals()),\n raw=raw,\n deserializer=Dividend.from_dict,\n options=options,\n )",
"def get_stock_dividends(self, sid, trading_days):\n\n if self._adjustment_reader is None:\n return []\n\n if len(trading_days) == 0:\n return []\n\n start_dt = trading_days[0].value / 1e9\n end_dt = trading_days[-1].value / 1e9\n\n dividends = self._adjustment_reader.conn.execute(\n \"SELECT declared_date, ex_date, pay_date, payment_sid, ratio, \"\n \"record_date, sid FROM stock_dividend_payouts \"\n \"WHERE sid = ? AND ex_date > ? AND pay_date < ?\",\n (int(sid), start_dt, end_dt,)\n ).fetchall()\n\n dividend_info = []\n for dividend_tuple in dividends:\n dividend_info.append({\n \"declared_date\": pd.Timestamp(dividend_tuple[0], unit=\"s\"),\n \"ex_date\": pd.Timestamp(dividend_tuple[1], unit=\"s\"),\n \"pay_date\": pd.Timestamp(dividend_tuple[2], unit=\"s\"),\n \"payment_sid\": dividend_tuple[3],\n \"ratio\": dividend_tuple[4],\n \"record_date\": pd.Timestamp(dividend_tuple[5], unit=\"s\"),\n \"sid\": dividend_tuple[6],\n })\n\n return dividend_info",
"def calculate_due_payments(self):\n\n # variables are renamed to make the math more explicit\n p = self.principal\n d = self.daily_interest_rate\n k = len(self.return_days)\n\n return [\n b * ((1 + d) ** (n - m) - 1) + p / k\n for b, n, m in zip(\n self.balance[:-1],\n self.return_days, [0] + self.return_days[:-1]\n )\n ]",
"def price_generator(self, start, end, periods):\r\n tickers = [self.SelectedTicker]\r\n tick_yahoo = YahooFinancials(tickers)\r\n data = tick_yahoo.get_historical_price_data(start, \r\n end, \r\n periods)\r\n \r\n df = pd.DataFrame({\r\n a: {x['formatted_date']: x['adjclose'] for x in data[a]['prices']} for a in tickers})\r\n \r\n self.prices = df.dropna()\r\n self.returns = self.prices.pct_change().dropna()\r\n try:\r\n self.div_yield = tick_yahoo.get_dividend_yield()\r\n #print(self.div_yield[self.SelectedTicker])\r\n if self.div_yield[self.SelectedTicker] == None:\r\n self.div_yield = 0.00\r\n else:\r\n self.div_yield = self.div_yield[self.SelectedTicker]\r\n except:\r\n print(\"no dividend yield\")",
"def _dividend_profit(self, dividends, prices):\n dividend_profit = 0\n for i in range(len(dividends)):\n dividend = dividends[i]\n price = prices[i]\n dividend_profit += price * dividend / 100\n return dividend_profit",
"def _recompute(self):\n current_date = self.start_date\n self.quarterly_date_list = []\n self.daily_date_list = []\n while current_date <= self.end_date:\n current_quarter = get_quarter(current_date)\n current_year = current_date.year\n next_year, next_quarter = add_quarter(current_year, current_quarter)\n next_start_quarter_date = date(next_year, get_month(next_quarter),\n 1)\n\n days_till_next_quarter = (next_start_quarter_date -\n current_date).days\n days_till_end = (self.end_date - current_date).days\n if days_till_next_quarter <= days_till_end:\n current_start_quarter_date = date(current_year,\n get_month(current_quarter), 1)\n if current_start_quarter_date == current_date:\n self.quarterly_date_list.append(\n (current_year, current_quarter, lambda x: True))\n current_date = next_start_quarter_date\n elif days_till_next_quarter > self.balancing_point:\n self.quarterly_date_list.append(\n (current_year, current_quarter,\n lambda x: date(x['date_filed']) >= self.start_date))\n current_date = next_start_quarter_date\n else:\n while current_date < next_start_quarter_date:\n self.daily_date_list.append(current_date)\n current_date += timedelta(days=1)\n else:\n if days_till_end > self.balancing_point:\n if days_till_next_quarter - 1 == days_till_end:\n self.quarterly_date_list.append(\n (current_year, current_quarter, lambda x: True))\n current_date = next_start_quarter_date\n else:\n self.quarterly_date_list.append(\n (current_year, current_quarter,\n lambda x: date(x['date_filed']) <= self.end_date))\n current_date = self.end_date\n else:\n while current_date <= self.end_date:\n self.daily_date_list.append(current_date)\n current_date += timedelta(days=1)",
"def add_model_return(self):\n\n\t\t# start with $10k\n\t\tSTART_CASH = float(10000)\n\n\t\t# preallocate numpy arrays. index 1 corresponds to day 0.\n\t\tnumel = len(self.symbol_seq)\n\t\tself.invested_value = np.zeros(numel+1)\n\t\tself.invested_shares = np.zeros(numel+1)\n\t\tself.dividend_value = np.zeros(numel+1)\n\t\tself.cash_after_sell = np.zeros(numel+1)\n\t\tself.cash_before_buy_after_div = np.zeros(numel+1)\n\t\tself.cash_after_buy_and_div = np.zeros(numel+1)\n\n\t\tself.cash_after_buy_and_div[0] = START_CASH\n\n\t\t# loop through days seems easiest for now ...\n\t\tfor i in range(len(self.symbol_seq)):\n\n\t\t\t# get today's dividend\n\t\t\tself.dividend_value[i+1] = (self.invested_shares[i] * \n\t\t\t\tself.stock_objs[self.symbol_seq[i]].dividend[i])\n\n\t\t\tself.cash_after_sell[i+1] = self.cash_after_buy_and_div[i]\n\n\t\t\t# get and add sell value if sell is signaled\n\t\t\tif i > 0:\n\t\t\t\tsell_signal = not (self.symbol_seq[i-1] == self.symbol_seq[i])\n\t\t\telse:\n\t\t\t\tsell_signal = False\n\n\t\t\tif sell_signal:\n\t\t\t\tsell_value = (self.invested_shares[i] * \n\t\t\t\t\tself.stock_objs[self.symbol_seq[i-1]].open_value[i])\n\t\t\telse:\n\t\t\t\tsell_value = 0\n\n\t\t\tself.cash_after_sell[i+1] += sell_value\n\t\t\t\n\t\t\tself.cash_before_buy_after_div[i+1] = (self.cash_after_sell[i+1] + \n\t\t\t\tself.dividend_value[i+1])\n\n\t\t\tcurrent_value = self.stock_objs[self.symbol_seq[i]].open_value[i]\n\t\t\tshares_to_buy = math.floor(\n\t\t\t\tself.cash_before_buy_after_div[i+1] / current_value)\n\n\t\t\tself.invested_shares[i+1] = self.invested_shares[i] + shares_to_buy\n\t\t\tself.invested_value[i+1] = self.invested_shares[i+1] * current_value\n\n\t\t\t# adjust remaining cash\n\t\t\tself.cash_after_buy_and_div[i+1] = (self.cash_before_buy_after_div[i+1] - \n\t\t\t\t(shares_to_buy * current_value))\n\n\t\tself.end_of_day_value = self.cash_after_buy_and_div + self.invested_value",
"def payOnlyInterests(self, only_percent_pairs, last_date):\n current_dept = 0\n for date in self.report_dates.values():\n pay_interest_date = lastDayNextMonth(date)\n if pay_interest_date > last_date: # exit , next payments will be with payInterestsWithPrincipal\n return\n\n for debt_date, value in only_percent_pairs:\n if date == debt_date:\n current_dept += value # calculate total dept at current date\n\n debt_interest = current_dept * self.debt_rate / 12 # calculate monthly interest\n self.debt_percents[pay_interest_date] += debt_interest # increasing debt_percents [date] += percent payments for cur date\n self.debt_rest_payments_principal[date] += current_dept",
"def depletion_calc(inidep, swhc, df):\n\n # dates = df.index.to_list()\n # deficit = df['deficit'].to_list()\n et = df['eta']\n precip = df['precip']\n\n depletion = []\n # recharge/runoff i.e. depletion greater than zero\n rr = []\n depletion_condition = None\n for i, e, p in zip(range(len(et)), et, precip):\n\n # todays depletion\n d = e - p\n\n if i == 0:\n # the initial depletion\n day_one_depletion = inidep + d\n depletion.append(day_one_depletion)\n depletion_condition = day_one_depletion\n rr.append(0)\n else:\n depletion_condition += d\n if depletion_condition <= 0:\n # recharge is any negative depletion\n rr.append(abs(depletion_condition))\n # the depletion is capped at zero since the positive depletion goes to rr\n depletion_condition = 0\n else:\n rr.append(0)\n # the depletion cannot get smaller than the theoretical SWHC\n if depletion_condition >= swhc:\n depletion_condition = swhc\n depletion.append(depletion_condition)\n\n return depletion, rr\n\n # === old mistaken version ===\n # dep_list = []\n # temp = None\n # for i, d in enumerate(deficit):\n # if i == 0:\n # # the initial depletion\n # day_one_depletion = inidep + d\n # temp = day_one_depletion\n # dep_list.append(temp)\n # else:\n # running_dep = temp + d\n # # a negative depletion is runoff or recharge\n # if running_dep <= 0:\n # running_dep = 0.0\n # # if greater than swhc, we'll just assume the forest died?\n # if running_dep >= swhc:\n # running_dep = swhc\n # dep_list.append(running_dep)\n # temp = running_dep\n #\n # return dep_list",
"def __handle_dividend(self, resp):\n\n self.fxn = self.getfxn()\n\n try:\n r = resp['resp'].json()\n\n\n except Exception:\n self.error('Dividend response was invalid (error on resp.json()) - No effect')\n return None\n\n\n else:\n\n try:\n raw = r['chart']['result'][0]['events']['dividends'] # ['splits']\n\n\n except Exception:\n\n # self.error('Dividend extraction failed (error in parsing resp.json()) - No effect')\n return None\n\n\n else:\n\n\n keys = ['dateu', 'datec', 'amount']\n\n values = [\n np.array([raw[x]['date'] for x in sorted(raw)]),\n self.__h_create_datec([raw[x]['date'] for x in sorted(raw)]),\n np.array([raw[x]['amount'] for x in sorted(raw)])\n ]\n\n to_return = pd.DataFrame(dict(zip(keys,values)))\n to_return = to_return.sort_values(by=['dateu'],ascending=True)\n\n\n return to_return",
"def revision_to_cashflows(rev, end_date):\n end_date = rev.end_date or end_date\n if not end_date:\n end_date = next_month(date.today(),1)\n\n start_date = rev.start_date\n\n delta_r = (end_date - start_date).days \n\n revision_date_list = [start_date + timedelta(days=x) for x in range(0, delta_r+1)]\n\n result = []\n\n month_year_list = [(d.year, d.month) for d in revision_date_list]\n\n #generate-list-of-tuples (year,month, days-in-month, full-month) from-list-of-dates\n\n year_month_ndays_full = [(k[0],k[1],v , True if monthrange(k[0], k[1])[1] == v else False) for k,v in Counter(month_year_list).iteritems()]\n\n\n for d in year_month_ndays_full:\n\n date_info = date(d[0],d[1],1)\n #if full month\n if d[3] == True:\n result.append(Cashflow(date_info, -float(rev.rent), _('RENT '),rev.tag ))\n if rev.provision != 0:\n result.append(Cashflow(date_info, -float(rev.provision), _('PROVISION '),rev.tag ))\n\n #if partial month, divide by days of month rented\n else:\n #generate start date of rent/provision\n # if start_date != date_info and start_date.month == date_info.month:\n # date_info = start_date\n #\n\t\t\t#Cashflow = namedtuple('Cashflow', ['date', 'amount', 'description', 'tag'])\n \n daysinmonth = monthrange(date_info.year, date_info.month)[1]\n rented_days = d[2]\n #partialrent = round((rev.rent/daysinmonth*rented_days),2)\n partialrent = rev.rent/daysinmonth*rented_days\n\n result.append(Cashflow(date_info, -round(partialrent,2),\\\n ungettext(\"RENT for %(day)s day in partial month\",\\\n \"RENT for %(day)s days in in partial month\", rented_days) % {'day': str(rented_days)},rev.tag))\n \n if rev.provision != 0:\n #partialprovision = round((rev.provision/daysinmonth*rented_days),2)\n partialprovision = rev.provision/daysinmonth*rented_days\n\n result.append(Cashflow(date_info, -round(partialprovision,2), \\\n ungettext(\"PROVISION for %(day)s day in partial month\",\\\n \"PROVISION for %(day)s days in in partial month\", rented_days) % {'day': str(rented_days)},rev.tag))\n return result",
"def _expiration_days_2_expiry(self):\n # TODO: This is an approximation that assumes there is only one day between expiration date and last day of\n # contract\n # exp_cols = [col for col in self.raw_tsm_df.columns if 'exp' in col]\n # expiries = self.raw_tsm_df[exp_cols].fillna(0).astype(int).apply(pd.to_datetime,\n # format='%Y%m%d',\n # errors='coerce')\n # expiry_dates = expiries.subtract(self.raw_tsm_df.index, axis=0)\n # # Dates in TSM are last trading day so add one day for expiration\n # expiry_dates = expiry_dates + pd.Timedelta(days=1)\n # expiry_days = pd.concat([expiry_dates[cols].dt.days for cols in expiry_dates.columns], axis=1)\n\n exp_cols = [col for col in self.raw_tsm_df.columns if 'exp' in col]\n expiries = self.raw_tsm_df[exp_cols].fillna(0).astype(int).apply(pd.to_datetime,\n format='%Y%m%d',\n errors='coerce')\n # Dates in TSM are last trading day so add one day for expiration\n expiry_list = [expiries[cols].add(pd.Timedelta(days=1)) for cols in expiries.columns]\n num_bus_days = [np.busday_count(item.index.values.astype('<M8[D]'), item.values.astype('<M8[D]')) for item in\n expiry_list[:-1]]\n num_bus_days = pd.DataFrame(index=expiries.index, data=np.transpose(num_bus_days), columns=expiries.columns[:-1])\n return num_bus_days",
"def compute_portvals(start_date, end_date, orders_file, start_val):\n \n #Read order file\n orders = pd.read_csv( orders_file, parse_dates = [0])\n \n #Get symbols making up the portfolio\n stock_symbols = list( set( orders[\"Symbol\"] ) )\n dates = pd.date_range(start_date, end_date)\n \n #Read stock prices\n stock_prices = get_data(stock_symbols, dates)\n \n #Create a portfolio keeping track of positions, \n #_CASH column indicates cash position, _VALUE total portfolio value\n #_LEVERAGE the leverage of portfolio when we allow for short selling\n symbols = stock_symbols[:] #Shallow copy of the list\n symbols.append(\"_CASH\")\n symbols.append(\"_VALUE\")\n symbols.append(\"_LEVERAGE\")\n \n #Index contains only business days, same dates as stock prices\n portfolio = pd.DataFrame(index=stock_prices.index, columns = symbols )\n portfolio.fillna(0) \n portfolio[\"_CASH\"][0] = start_val\n portfolio[\"_VALUE\"][0] = start_val\n \n #Snapshot of a portfolio at any time. To avoid using numerical indexes\n portfolio_snapshot = dict.fromkeys ( symbols, 0 )\n portfolio_snapshot[\"_CASH\"] = start_val\n portfolio[\"_VALUE\"] = start_val\n \n #Now calcualte portfolio day by day\n for date in portfolio.index:\n #Check transactions for the day\n day_orders = orders[ orders[\"Date\"] == date ] \n \n for ord in day_orders.iterrows():\n symbol = ord[1][ \"Symbol\"] \n stock_price = stock_prices[ symbol ][ date ]\n shares = ord[1][\"Shares\" ]\n side = ord[1][\"Order\"]\n \n if side == \"BUY\":\n portfolio_snapshot[ \"_CASH\" ] -= stock_price * shares\n portfolio_snapshot[ symbol ] += shares \n elif side == \"SELL\":\n portfolio_snapshot[ \"_CASH\" ] += stock_price * shares\n portfolio_snapshot[ symbol ] -= shares\n else:\n raise \"Order not recognized.\"\n \n #Compute portfolio value\n portfolio_snapshot[ \"_VALUE\" ] = portfolio_snapshot[ \"_CASH\" ]\n shorts = longs = 0\n for symbol in stock_symbols: \n stock_price = stock_prices[ symbol ][ date ]\n shares = portfolio_snapshot[ symbol ]\n notional = stock_price*shares\n if shares > 0:\n longs += notional\n else:\n shorts += notional\n \n portfolio_snapshot[ \"_VALUE\" ] += notional\n \n #Compute leverage\n leverage = (longs+shorts)/(longs-shorts + portfolio_snapshot[ \"_CASH\" ] )\n portfolio_snapshot[ \"_LEVERAGE\" ] = leverage\n \n #Assert we never achieve a leverage > 2.0\n if leverage > 2:\n raise \"Leverage > 2.0 achieved\"\n \n #Update portfolio from the daily snapshot\n #TODO: Is this causing performance issues?\n for symbol in portfolio.keys():\n portfolio[ symbol ][ date ] = portfolio_snapshot[ symbol ]\n \n return portfolio",
"def fill_prices_using_dates(ls_ls_prices, ls_ls_dates, ls_master_dates):\n dict_corrections = {}\n dict_errors = []\n for indiv_ind, ls_prices in enumerate(ls_ls_prices):\n for day_ind, price in enumerate(ls_prices):\n if price != price:\n relative_day = 0\n while (day_ind + relative_day < len(ls_master_dates)-1) and\\\n (ls_ls_prices[indiv_ind][day_ind + relative_day] !=\\\n ls_ls_prices[indiv_ind][day_ind + relative_day]):\n relative_day += 1\n next_valid_date = ls_ls_dates[indiv_ind][day_ind + relative_day]\n # if next_valid_date is not None (end of series full of None)\n if next_valid_date and next_valid_date != '--':\n try:\n # could have bad info in date (check with regex?)\n next_valid_date_int = int(u'20%s%s%s' %(next_valid_date[6:],\n next_valid_date[3:5],\n next_valid_date[:2]))\n # next date must be the same or anterior to the current date\n if next_valid_date_int <= int(ls_master_dates[day_ind]):\n ls_ls_prices[indiv_ind][day_ind] = ls_ls_prices[indiv_ind][day_ind + relative_day]\n dict_corrections.setdefault(indiv_ind, []).append(day_ind)\n except:\n dict_errors.setdefault(indiv_ind, []).append(day_ind)\n return (ls_ls_prices, dict_corrections, dict_errors)",
"def enterprise_value(income_statement, cashflow_statement, balance_statement, period, discount_rate, earnings_growth_rate, cap_ex_growth_rate, perpetual_growth_rate):\n # XXX: statements are returned as historical list, 0 most recent\n if income_statement[0]['EBIT']:\n ebit = float(income_statement[0]['EBIT'])\n else:\n ebit = float(input(f\"EBIT missing. Enter EBIT on {income_statement[0]['date']} or skip: \"))\n tax_rate = float(income_statement[0]['Income Tax Expense']) / \\\n float(income_statement[0]['Earnings before Tax'])\n non_cash_charges = float(cashflow_statement[0]['Depreciation & Amortization'])\n cwc = (float(balance_statement[0]['Total assets']) - float(balance_statement[0]['Total non-current assets'])) - \\\n (float(balance_statement[1]['Total assets']) - float(balance_statement[1]['Total non-current assets']))\n cap_ex = float(cashflow_statement[0]['Capital Expenditure'])\n discount = discount_rate\n\n flows = []\n\n # Now let's iterate through years to calculate FCF, starting with most recent year\n print('Forecasting flows for {} years out, starting at {}.'.format(period, income_statement[0]['date']),\n ('\\n DFCF | EBIT | D&A | CWC | CAP_EX | '))\n for yr in range(1, period+1): \n\n # increment each value by growth rate\n ebit = ebit * (1 + (yr * earnings_growth_rate))\n non_cash_charges = non_cash_charges * (1 + (yr * earnings_growth_rate))\n cwc = cwc * 0.7 # TODO: evaluate this cwc rate? 0.1 annually?\n cap_ex = cap_ex * (1 + (yr * cap_ex_growth_rate)) \n\n # discount by WACC\n flow = ulFCF(ebit, tax_rate, non_cash_charges, cwc, cap_ex)\n PV_flow = flow/((1 + discount)**yr)\n flows.append(PV_flow)\n\n print(str(int(income_statement[0]['date'][0:4]) + yr) + ' ',\n '%.2E' % Decimal(PV_flow) + ' | ',\n '%.2E' % Decimal(ebit) + ' | ',\n '%.2E' % Decimal(non_cash_charges) + ' | ',\n '%.2E' % Decimal(cwc) + ' | ',\n '%.2E' % Decimal(cap_ex) + ' | ')\n\n NPV_FCF = sum(flows)\n \n # now calculate terminal value using perpetual growth rate\n final_cashflow = flows[-1] * (1 + perpetual_growth_rate)\n TV = final_cashflow/(discount - perpetual_growth_rate)\n NPV_TV = TV/(1+discount)**(1+period)\n\n return NPV_TV+NPV_FCF",
"def get_returns(self, start_date=None, end_date=None, stocks=None):\n if stocks is None:\n stocks = self.stocks\n\n if start_date is None:\n start_date = self.dates[0]\n\n if end_date is None:\n end_date = self.dates[-1]\n\n if type(end_date) is not datetime.datetime and type(end_date) is not pd.tslib.Timestamp:\n end_date = datetime.datetime.strptime(end_date, \"%Y-%m-%d\")\n\n if type(start_date) is not datetime.datetime and type(start_date) is not pd.tslib.Timestamp:\n start_date = datetime.datetime.strptime(start_date, \"%Y-%m-%d\")\n\n dates_to_check = self.dates[self.dates.index(start_date): self.dates.index(end_date) + 1]\n\n stock_money = []\n\n for date in dates_to_check:\n stock_money += [self.get_day_returns(stocks, date)]\n\n stock_money = pd.DataFrame({\"stock value\": stock_money}).set_index([self.dates])\n\n return_info = join_features(stock_money, self.cash)\n return_info['value'] = return_info['cash'] + return_info['stock value']\n\n return return_info"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
Returns a cash payment based on the dividends that should be paid out according to the accumulated bookkeeping of earned, unpaid, and stock dividends.
|
def pay_dividends(self, next_trading_day):
net_cash_payment = 0.0
try:
payments = self._unpaid_dividends[next_trading_day]
# Mark these dividends as paid by dropping them from our unpaid
del self._unpaid_dividends[next_trading_day]
except KeyError:
payments = []
# representing the fact that we're required to reimburse the owner of
# the stock for any dividends paid while borrowing.
for payment in payments:
net_cash_payment += payment['amount']
# Add stock for any stock dividends paid. Again, the values here may
# be negative in the case of short positions.
try:
stock_payments = self._unpaid_stock_dividends[next_trading_day]
except KeyError:
stock_payments = []
for stock_payment in stock_payments:
payment_instrument = stock_payment['payment_instrument']
share_count = stock_payment['share_count']
# note we create a Position for stock dividend if we don't
# already own the instrument
if payment_instrument in self.positions:
position = self.positions[payment_instrument]
else:
position = self.positions[payment_instrument] = Position(
payment_instrument,
)
position.amount += share_count
return net_cash_payment
|
[
"def cash_flow(self):\n _cash_flow = self.after_tax_profit() + self.depreciation()\n return _cash_flow",
"def get_cash(self):\n\n\t\tpass",
"def test_discounted_payment_below_debit(self):\n debit_jobs([(self.job, A(600), Entry.WORK_DEBIT)])\n credit_jobs([(self.job, A(480), A(20), A(0))], D(480))\n self.assert_balances(\n bank=A(480, 0, 0),\n balance=A(100), # debited (600) + credited (-500) = balance (100)\n debited=A(600),\n invoiced=A(600), # debited (600) + adjustment (0) = invoiced (600)\n paid=A(-500),\n credited=A(-500), # payment (-500) + adjustment (0) = credited (-500)\n promised=A(100),\n partial=A(480).net_amount,\n tax=A(480).tax_amount,\n )",
"def calcPaidInCapitalPart(self, part):\n capital_share = (1 - self.debt_share)\n capital_value = capital_share * part * self.investments #calc investment value in $\n\n if self.paid_in_rest > 0: #if we have not paid all sum\n\n if capital_value > self.paid_in_rest:\n paid_from_initial_capital = self.paid_in_rest\n self.paid_in_rest = 0 #we dont need to pay more\n else:\n paid_from_initial_capital = capital_value\n self.paid_in_rest -= capital_value #decreses rest payments\n else:\n paid_from_initial_capital = 0\n\n return capital_value, paid_from_initial_capital",
"def earn_dividends(self, cash_dividends, stock_dividends):\n for cash_dividend in cash_dividends:\n self._dirty_stats = True # only mark dirty if we pay a dividend\n\n # Store the earned dividends so that they can be paid on the\n # dividends' pay_dates.\n div_owed = self.positions[cash_dividend.instrument].earn_dividend(\n cash_dividend,\n )\n try:\n self._unpaid_dividends[cash_dividend.pay_date].append(div_owed)\n except KeyError:\n self._unpaid_dividends[cash_dividend.pay_date] = [div_owed]\n\n for stock_dividend in stock_dividends:\n self._dirty_stats = True # only mark dirty if we pay a dividend\n\n div_owed = self.positions[\n stock_dividend.instrument\n ].earn_stock_dividend(stock_dividend)\n try:\n self._unpaid_stock_dividends[stock_dividend.pay_date].append(\n div_owed,\n )\n except KeyError:\n self._unpaid_stock_dividends[stock_dividend.pay_date] = [\n div_owed,\n ]",
"def test_split_payment_with_discount_and_adjustment(self):\n debit_jobs(\n [\n (self.job, A(480), Entry.FLAT_DEBIT),\n (self.job2, A(480), Entry.WORK_DEBIT),\n ]\n )\n self.assertEquals(A(480), self.job2.account.balance)\n self.assert_balances(promised=A(960), balance=A(480), invoiced=A(480))\n credit_jobs(\n [\n (self.job, A(440), A(0), A(40)), # adjusted\n (self.job2, A(460), A(20), A(0)), # discounted\n ],\n D(900),\n )\n self.assert_balances(\n bank=A(900, 0, 0),\n debited=A(480),\n invoiced=A(440), # debited (480) + adjustment (-40) = invoiced (440)\n paid=A(-440),\n credited=A(-480), # payment (-440) + adjustment (-40) = credited (-480)\n partial=A(900).net_amount,\n tax=A(900).tax_amount,\n )\n self.assert_balances(\n bank=A(900, 0, 0),\n debited=A(480),\n invoiced=A(480), # debited (480) + adjustment (0) = invoiced (480)\n paid=A(-480),\n credited=A(-480), # payment (-480) + adjustment (0) = credited (-480)\n partial=A(900).net_amount,\n tax=A(900).tax_amount,\n switch_to_job=self.job2,\n )",
"def test_discounted_payment_matching_debit(self):\n debit_jobs([(self.job, A(500), Entry.WORK_DEBIT)])\n credit_jobs([(self.job, A(480), A(20), A(0))], D(480))\n self.assert_balances(\n bank=A(480, 0, 0),\n debited=A(500),\n invoiced=A(500), # debited (500) + adjustment (0) = invoiced (500)\n paid=A(-500),\n credited=A(-500), # payment (-500) + adjustment (0) = credited (-500)\n partial=A(480).net_amount,\n tax=A(480).tax_amount,\n )",
"def cash_income(df):\n return (df.aftertax_income -\n (1 - tc.HOUSING_CASH_SHARE) * df.housing_ben -\n (1 - tc.MCAID_CASH_SHARE) * df.mcaid_ben -\n (1 - tc.MCARE_CASH_SHARE) * df.mcare_ben -\n (1 - tc.OTHER_CASH_SHARE) * df.other_ben -\n (1 - tc.SNAP_CASH_SHARE) * df.snap_ben -\n (1 - tc.SSI_CASH_SHARE) * df.ssi_ben -\n (1 - tc.TANF_CASH_SHARE) * df.tanf_ben -\n (1 - tc.VET_CASH_SHARE) * df.vet_ben -\n (1 - tc.WIC_CASH_SHARE) * df.wic_ben)",
"def get_cash(self):\n return self.cash",
"def getCash(self, includeShort=True):\n raise NotImplementedError()",
"def test_adjusted_payment_still_below_invoice(self):\n debit_jobs([(self.job, A(600), Entry.WORK_DEBIT)])\n credit_jobs([(self.job, A(480), A(0), A(20))], D(480))\n self.assert_balances(\n bank=A(480, 0, 0),\n balance=A(100), # debited (600) + credited (-500) = balance (100)\n debited=A(600),\n invoiced=A(580), # debited (600) + adjustment (-20) = invoiced (580)\n paid=A(-480),\n credited=A(-500), # payment (-480) + adjustment (-20) = credited (-500)\n promised=A(100),\n partial=A(480).net_amount,\n tax=A(480).tax_amount,\n )",
"def calculate(self):\r\n if self.__calculation_type == self.__DIFFERENTIATED_PAY:\r\n for month in range(1, self.__principal_term+1):\r\n self.__differentiated_pay.append(\r\n ceil(\r\n (self.__credit_principal/self.__principal_term)\r\n + self.__credit_interest*(self.__credit_principal\r\n - (self.__credit_principal\r\n * (month-1))\r\n / self.__principal_term)\r\n )\r\n )\r\n self.__overpayment = sum(self.__differentiated_pay) - self.__credit_principal\r\n\r\n for i, dp in enumerate(self.__differentiated_pay, 1):\r\n print(f'Month {i}: paid out {dp}')\r\n print()\r\n print(f'Overpayment = {self.__overpayment}')\r\n\r\n elif self.__calculation_type == self.__ANNUITY:\r\n if self.__user_choice == self.__SEEK_ANNUITY_MONTHLY:\r\n self.__annuity_monthly = ceil(\r\n self.__credit_principal * ((self.__credit_interest\r\n * pow(1+self.__credit_interest\r\n , self.__principal_term)\r\n )\r\n / (pow(1+self.__credit_interest\r\n , self.__principal_term)\r\n - 1)\r\n )\r\n )\r\n self.__overpayment = (self.__annuity_monthly * self.__principal_term\r\n - self.__credit_principal\r\n )\r\n print(f'Your annuity payment = {self.__annuity_monthly}!')\r\n\r\n elif self.__user_choice == self.__SEEK_TERM:\r\n self.__principal_term = ceil(\r\n log(self.__annuity_monthly / (self.__annuity_monthly\r\n - (self.__credit_interest\r\n * self.__credit_principal))\r\n , 1+self.__credit_interest)\r\n )\r\n self.__overpayment = ceil(self.__annuity_monthly\r\n * self.__principal_term\r\n - self.__credit_principal\r\n )\r\n years = self.__principal_term // 12\r\n months = self.__principal_term % 12\r\n\r\n print(f'You need {years} year{\"s\" if self.__principal_term > 1 else \"\"}'\r\n f'{\" and \" + str(months) + \" months\" if months > 0 else \"\"}'\r\n f' to repay this credit!')\r\n\r\n elif self.__user_choice == self.__SEEK_CREDIT_PRINCIPAL:\r\n self.__credit_principal = ceil(\r\n self.__annuity_monthly\r\n / ((self.__credit_interest\r\n * pow(1+self.__credit_interest, self.__principal_term)\r\n )\r\n / (pow(1+self.__credit_interest, self.__principal_term)\r\n - 1)\r\n )\r\n )\r\n self.__overpayment = ceil(self.__annuity_monthly\r\n * self.__principal_term\r\n - self.__credit_principal)\r\n\r\n print(f'Your credit principal = {self.__credit_principal}!')\r\n print(f'Overpayment = {self.__overpayment}')\r\n\r\n else:\r\n print('Incorrect parameters')\r\n self.usage()",
"def demo_dividends(self):\n dividends = pd.DataFrame(index=range(10))\n dividends['symbol'] = ['MSFT', 'AAPL', 'CVX', 'XOM', 'BND', 'CAT', 'BA', 'TIF', 'BAC', 'JPM']\n dividends['position'] = 100\n dividends['amount'] = [20, 30, 40, 50, 60, 70, 80, 90, 100, 110]\n dividends['rate'] = dividends['amount'] / dividends['position']\n dividends['paid_at'] = pd.Timestamp('2019-01-15', tz='UTC')\n dividends['payable_date'] = pd.Timestamp('2019-01-02', tz='UTC')\n return dividends",
"def calculate_due_payments(self):\n\n # variables are renamed to make the math more explicit\n p = self.principal\n d = self.daily_interest_rate\n k = len(self.return_days)\n\n return [\n b * ((1 + d) ** (n - m) - 1) + p / k\n for b, n, m in zip(\n self.balance[:-1],\n self.return_days, [0] + self.return_days[:-1]\n )\n ]",
"def profit(self, prices, dividends):\n assert len(prices) == len(dividends) + 1\n capital_profit = prices[-1] - prices[0]\n dividend_profit = self._dividend_profit(dividends, prices)\n return capital_profit + dividend_profit",
"def test_adjusted_payment_matching_invoice(self):\n debit_jobs([(self.job, A(500), Entry.WORK_DEBIT)])\n credit_jobs([(self.job, A(480), A(0), A(20))], D(480))\n self.assert_balances(\n bank=A(480, 0, 0),\n debited=A(500),\n invoiced=A(480), # debited (500) + adjustment (-20) = invoiced (480)\n paid=A(-480),\n credited=A(-500), # payment (-480) + adjustment (-20) = credited (-500)\n partial=A(480).net_amount,\n tax=A(480).tax_amount,\n )",
"def _vcash(totmoney, totcftable, cashobj):\n cashl = []\n cashl.append(totmoney + totcftable.iloc[0].cash)\n for i in range(len(totcftable) - 1):\n date = totcftable.iloc[i + 1].date\n delta = totcftable.iloc[i + 1].cash\n if delta < 0:\n cashl.append(\n myround(\n delta\n / cashobj.price[cashobj.price[\"date\"] <= date].iloc[-1].netvalue\n )\n )\n else:\n cashl.append(delta)\n datadict = {\"date\": totcftable.loc[:, \"date\"], \"mf\": cashl}\n return pd.DataFrame(data=datadict)",
"def test_payment(self):\n debit_jobs([(self.job, A(480), Entry.WORK_DEBIT)])\n credit_jobs([(self.job, A(480), A(0), A(0))], D(480))\n self.assert_balances(\n bank=A(480, 0, 0),\n invoiced=A(480),\n paid=A(-480),\n partial=A(480).net_amount,\n tax=A(480).tax_amount,\n )",
"def calc_cash_flow(self):\n s = self # shortcut variable\n\n # determine the changes caused by the heat pump on an annual basis.\n # First calculate annual totals for base case and heat pump case and\n # then calculate the change.\n ann_base = s.df_mo_dol_base.sum()\n ann_hp = s.df_mo_dol_hp.sum()\n ann_chg = ann_hp - ann_base\n initial_cost = np.zeros(s.hp_life+1)\n \n # Am not automatically adding sales tax to the initial cost as the user was\n # supposed to includes sales tax in their input.\n initial_cost[0] = -s.capital_cost * (1 - s.pct_financed) + s.rebate_dol\n loan_pmt = npf.pmt(s.loan_interest, s.loan_term, s.capital_cost * s.pct_financed)\n if loan_pmt < -0.01: # loan payment is negative\n loan_cost = [0.0] + [loan_pmt] * s.loan_term + [0.0] * (s.hp_life - s.loan_term)\n loan_cost = np.array(loan_cost)\n else:\n loan_cost = 0.0\n op_cost = -s.op_cost_chg * make_pattern(s.inflation_rate, s.hp_life)\n fuel_cost = -ann_chg.secondary_fuel_dol * make_pattern(s.fuel_esc_rate, s.hp_life)\n elec_cost = -ann_chg.elec_dol * make_pattern(s.elec_esc_rate, s.hp_life)\n cash_flow = initial_cost + loan_cost + op_cost + fuel_cost + elec_cost\n\n # calculate cumulative, discounted cash flow.\n disc_factor = np.ones(s.hp_life) * (1 + s.discount_rate)\n disc_factor = np.insert(disc_factor.cumprod(), 0, 1.0)\n cum_disc_cash_flow = np.cumsum(cash_flow / disc_factor)\n \n s.df_cash_flow = pd.DataFrame(\n {'initial_cost': initial_cost,\n 'loan_cost': loan_cost,\n 'op_cost': op_cost,\n 'fuel_cost': fuel_cost,\n 'elec_cost': elec_cost,\n 'cash_flow': cash_flow,\n 'cum_disc_cash_flow': cum_disc_cash_flow,\n }\n )\n s.df_cash_flow.index.name = 'year'\n \n # Calculate IRR and NPV for w/ and w/o PCE.\n s.summary['irr'] = npf.irr(s.df_cash_flow.cash_flow)\n s.summary['npv'] = npf.npv(s.discount_rate, s.df_cash_flow.cash_flow)\n \n # Add some summary fuel and electric usage and unit cost info\n s.summary['fuel_use_base'] = ann_base.secondary_fuel_units\n s.summary['fuel_use_hp'] = ann_hp.secondary_fuel_units\n s.summary['fuel_use_chg'] = ann_chg.secondary_fuel_units\n if ann_chg.secondary_fuel_units != 0.0:\n s.summary['fuel_price_incremental'] = ann_chg.secondary_fuel_dol / ann_chg.secondary_fuel_units\n else:\n s.summary['fuel_price_incremental'] = np.nan\n s.summary['elec_use_base'] = ann_base.elec_kwh\n s.summary['elec_use_hp'] = ann_hp.elec_kwh\n s.summary['elec_use_chg'] = ann_chg.elec_kwh\n s.summary['elec_rate_avg_base'] = ann_base.elec_dol / ann_base.elec_kwh\n s.summary['elec_rate_avg_hp'] = ann_hp.elec_dol / ann_hp.elec_kwh\n s.summary['elec_rate_incremental'] = ann_chg.elec_dol / ann_chg.elec_kwh"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
Force a computation of the current portfolio state.
|
def update_portfolio(self):
if not self._dirty_portfolio:
return
portfolio = self._portfolio
pt = self.position_tracker
portfolio.positions = pt.get_positions()
position_stats = pt.stats
portfolio.positions_value = position_value = (
position_stats.net_value
)
portfolio.positions_exposure = position_stats.net_exposure
self._cash_flow(self._get_payout_total(pt.positions))
start_value = portfolio.portfolio_value
# update the new starting value
portfolio.portfolio_value = end_value = portfolio.cash + position_value
pnl = end_value - start_value
if start_value != 0:
returns = pnl / start_value
else:
returns = 0.0
portfolio.pnl += pnl
portfolio.returns = (
(1 + portfolio.returns) *
(1 + returns) -
1
)
# the portfolio has been fully synced
self._dirty_portfolio = False
|
[
"def _initalize_portfolio_with_cash(self):\n self.cash = copy.copy(self.starting_cash)\n\n if self.starting_cash > 0.0:\n self.history.append(\n PortfolioEvent.create_subscription(\n self.current_dt, self.starting_cash, self.starting_cash\n )\n )\n\n self.logger.info(\n '(%s) Funds subscribed to portfolio \"%s\" '\n '- Credit: %0.2f, Balance: %0.2f' % (\n self.current_dt.strftime(settings.LOGGING[\"DATE_FORMAT\"]),\n self.portfolio_id,\n round(self.starting_cash, 2),\n round(self.starting_cash, 2)\n )\n )",
"def _ExecuteBeforeSolve(self):\n pass",
"def portfolio_manager(portfolio_start, portfolio_now,volume,price,gain):\n hysteresis = 1.2 # get each from ML Model\n base = 100.00\n outer_scale = 1.2\n inner_scale = 100.00\n \n total_price = volume * price \n gain_percentage = gain / (total_price)\n\n portfolio_percentage = (hysteresis - outer_scale*(base ** -(inner_scale*gain_percentage)))\n #check out this equation on wolfram alpha with the query: (1.2 - 1.2*(100 ^ -(100*x))) from 0 to .005\n #x axis is percentage gain, y is portfolio percentage that can be used\n\n acceptable_total_price = portfolio_percentage * portfolio_start\n \n portfolio_currently_invested = portfolio_start - portfolio_now\n acceptable_total_price -= portfolio_currently_invested #account for money already invested. if you have 100 $, and we are willing to invest 10, but we've already invested 6, we are only willing to invest 4.\n \n if(acceptable_total_price <= 0):\n return 0\n \n if(acceptable_total_price > total_price): #if we're willing to invest more than is possible at current volume, invest all\n return volume\n #else, invest the amount we're willing to based on the percentage\n acceptable_volume = int(acceptable_total_price / price)\n \n return acceptable_volume",
"def updateState(self):\n self.state = self.microgridPolicy.computeState();",
"def portfolio(self):\n self.update_portfolio()\n return self._immutable_portfolio",
"def track_portfolio(self, p):\n\n global st_refresh_thread\n\n if self.terminate:\n return\n\n p.refresh()\n\n self.lock.acquire()\n self.active_portfolio = p\n self.display_portfolio(p)\n self.lock.release()\n\n if not self.refresh_thread:\n thr_args = list()\n thr_args.append(self)\n self.refresh_thread = threading.Thread(target=st_refresh_thread,\n args=thr_args)\n self.refresh_thread.start()",
"def __calculate_portfolio_returns(self):\n\n p_bar = tqdm(range(1), desc=\" Calculating returns\", leave=False)\n\n trade_data = self.historical_trade_data\n\n # Helper functions to calculate cash inflow and outflow\n def f_min(x):\n return x.apply(lambda x: min(x, 0))\n\n def f_max(x):\n return x.apply(lambda x: max(x, 0))\n\n # Calculate cash inflow and outflow\n trade_data[\n pd.MultiIndex.from_product(\n [[\"Period cash inflow\"], self.tickers_list + [\"Total\"]]\n )\n ] = -1 * trade_data[\"Investment delta\"][:].apply(lambda x: f_min(x), axis=0)\n\n trade_data[\n pd.MultiIndex.from_product(\n [[\"Period cash outflow\"], self.tickers_list + [\"Total\"]]\n )\n ] = trade_data[\"Investment delta\"][:].apply(lambda x: f_max(x), axis=1)\n\n # Calculate period return\n\n trade_data[\n pd.MultiIndex.from_product(\n [[\"Period absolute return\"], self.tickers_list + [\"Total\"]]\n )\n ] = (trade_data[\"End Value\"] + trade_data[\"Period cash inflow\"]) - (\n trade_data[\"End Value\"].shift(1).fillna(0)\n + trade_data[\"Period cash outflow\"]\n )\n\n trade_data[\n pd.MultiIndex.from_product(\n [[\"Period percentage return\"], self.tickers_list + [\"Total\"]]\n )\n ] = (trade_data[\"End Value\"] + trade_data[\"Period cash inflow\"]) / (\n trade_data[\"End Value\"].shift(1).fillna(0)\n + trade_data[\"Period cash outflow\"]\n ) - 1\n\n trade_data[\"Period percentage return\"].fillna(0, inplace=True)\n\n self.historical_trade_data = trade_data\n\n self.portfolio_returns = self.historical_trade_data[\"Period percentage return\"][\n \"Total\"\n ]\n\n p_bar.n += 1\n p_bar.refresh()",
"def update_current_state(self) -> NoReturn:\n self._currentState = self.state(self._currentPosition, self._currentTemperature,\n self._currentTotE, self._currentTotPot, self._currentTotKin,\n self._currentForce, self._currentVelocities)",
"def update_portfolio_on_market(self, market: MarketEvent):\n self._portfolio.update_market_value(market)",
"def _ExecuteAfterSolve(self):\n pass",
"def portfolioRiskUpdated(data):\n update = AdjustRisk(data)\n df = pd.DataFrame(index=update['Unnamed: 0'].values)\n df['nominal'] = update['nominal'].values\n df['pricePaid'] = update['price'].values \n df['weights'] = (update['MinCVaR'].values) / sum(update['MinCVaR'].values) # new weights according to Update and ensures it is 100%\n df['notionalStart'] = sum(df['nominal'] * df['pricePaid'])\n df['oldLiquidity'] = update['liquid'].values\n stocks = list(df.index.values)\n df['priceToday'] = update['lastPrice'].values\n df['notionalToday'] = sum(df['priceToday'] * df['nominal'])\n df['PnLpercent'] = df['notionalToday'] / df['notionalStart']\n df['PnLpercentEach'] = df['priceToday'] / df['pricePaid']\n # En nuevo nominal sumamos el resultado obtenido mas el remanente liquido para reinvertir, siendo nuestro total disponible\n df['nominalNew'] = ((df['weights'] * (df['notionalToday'] + df['oldLiquidity'])) // df['priceToday']) # nuevo nominal\n df['adjust'] = df['nominalNew'] - df['nominal'] # ajuste nominal\n df['percentReb'] = (df['nominalNew'] * df['priceToday']) / sum(df['nominalNew'] * df['priceToday'])\n # Columnas vinculantes para conectar mes anterior con el proximo ya armado\n df['notionalRebalance'] = sum(df['nominalNew'] * df['priceToday'])\n df['liquidityToReinvest'] = (df['notionalToday'] + df['oldLiquidity']) - df['notionalRebalance']\n return df",
"def __update_portfolio_handler(self, msg):\n pass",
"def freeze(self):\n self.temperature = IceCream.freezing_point # (2) Set to freezing point",
"def apply_coupled(self) -> NoReturn:\r\n if self.system.step % self._tau == 0:\r\n new_current_position, new_current_velocity = self.apply(current_position=self.system._currentPosition)\r\n self.system._currentPosition += new_current_position\r\n self.system._currentForce += new_current_velocity",
"def precalculate():\n pass",
"def _update_state_from_traj(self) -> NoReturn:\n self.currentState = self.state(**self.trajectory.iloc[-1].to_dict())\n self._update_current_vars_from_current_state()\n return",
"def __compute(self, states) -> None: # pylint: disable=dangerous-default-value\n if callable(self.compute):\n self.compute(self, states) # pylint: disable=not-callable",
"def before_trading_start(context, data):\n factors = pipeline_output('ff_example')\n\n # get the data we're going to use\n returns = factors['returns']\n mkt_cap = factors.sort_values(['market_cap'], ascending=True)\n be_me = factors.sort_values(['be_me'], ascending=True)\n\n # to compose the six portfolios, split our universe into portions\n half = int(len(mkt_cap)*0.5)\n small_caps = mkt_cap[:half]\n big_caps = mkt_cap[half:]\n \n thirty = int(len(be_me)*0.3)\n seventy = int(len(be_me)*0.7)\n growth = be_me[:thirty]\n neutral = be_me[thirty:seventy]\n value = be_me[seventy:]\n\n # now use the portions to construct the portfolios.\n # note: these portfolios are just lists (indices) of equities\n small_value = small_caps.index.intersection(value.index)\n small_neutral = small_caps.index.intersection(neutral.index)\n small_growth = small_caps.index.intersection(growth.index)\n \n big_value = big_caps.index.intersection(value.index)\n big_neutral = big_caps.index.intersection(neutral.index)\n big_growth = big_caps.index.intersection(growth.index)\n\n # take the mean to get the portfolio return, assuming uniform\n # allocation to its constituent equities.\n sv = returns[small_value].mean()\n sn = returns[small_neutral].mean()\n sg = returns[small_growth].mean()\n \n bv = returns[big_value].mean()\n bn = returns[big_neutral].mean()\n bg = returns[big_growth].mean()\n\n # computing SMB\n context.smb = (sv + sn + sg)/3 - (bv + bn + bg)/3\n\n # computing HML\n context.hml = (sv + bv)/2 - (sg + bg)/2",
"def apply(self, gameState):\n pass"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
Compute the current portfolio. Notes This is cached, repeated access will not recompute the portfolio until the portfolio may have changed.
|
def portfolio(self):
self.update_portfolio()
return self._immutable_portfolio
|
[
"def update_portfolio(self):\n if not self._dirty_portfolio:\n return\n\n portfolio = self._portfolio\n pt = self.position_tracker\n\n portfolio.positions = pt.get_positions()\n position_stats = pt.stats\n\n portfolio.positions_value = position_value = (\n position_stats.net_value\n )\n portfolio.positions_exposure = position_stats.net_exposure\n self._cash_flow(self._get_payout_total(pt.positions))\n\n start_value = portfolio.portfolio_value\n\n # update the new starting value\n portfolio.portfolio_value = end_value = portfolio.cash + position_value\n\n pnl = end_value - start_value\n if start_value != 0:\n returns = pnl / start_value\n else:\n returns = 0.0\n\n portfolio.pnl += pnl\n portfolio.returns = (\n (1 + portfolio.returns) *\n (1 + returns) -\n 1\n )\n\n # the portfolio has been fully synced\n self._dirty_portfolio = False",
"def get_portfolio_object(self):\n return self.__get_portfolio_object(self.portfolio_name, self.portfolio_user)",
"def get_portfolio_pnl(self):\n\n return self._portfolio",
"def portfolio():\n #Query transactions by user id\n trans = Transactions.query.filter_by(owner=session['user_id']).all()\n \n #Create list of comanies user owns stock in\n companies = []\n for t in trans:\n if t.symbol not in companies:\n companies.append(t.symbol)\n\n #Create list of current stock dictionaries and total their values\n total = 0\n stocks = []\n for company in companies:\n trans = Transactions.query.filter_by(owner=session['user_id'], symbol=company).all()\n stock = {}\n stock['shares'] = 0\n for t in trans:\n stock['shares'] += t.shares\n if stock['shares'] > 0:\n stock['symbol'] = company\n stock['name'] = lookup(company)['name']\n stock['price'] = lookup(company)['price']\n stock['total'] = stock['shares'] * stock['price']\n stock['price'] = usd(stock['price'])\n stock['total'] = usd(stock['total'])\n total += float(stock['total'][1:].replace(',', ''))\n stocks.append(stock)\n\n #Set user cash and total values\n value = {}\n value['cash'] = usd(Users.query.filter_by(id=session['user_id']).first().cash)\n value['total'] = usd(total + float(value['cash'][1:].replace(',', '')))\n\n #Add values to list\n stocks.append(value)\n\n #Return list of dictionaries\n return stocks",
"def current_portfolio_weights(self) -> 'pd.Series[float]':\n position_values = pd.Series({\n asset: (\n position.last_sale_price *\n position.amount *\n asset.price_multiplier\n )\n for asset, position in self.positions.items()\n }, dtype=\"float64\")\n return position_values / self.portfolio_value",
"def generate_portfolio_data(self):\n self.__load_portfolio_historical_prices()\n self.__populate_historical_trade_data()\n self.__calculate_portfolio_returns()\n self.__calculate_portfolio_performance()",
"def portfolio():\n\n portf = db.execute(\n \"SELECT stock, SUM(shares) as totshares FROM history WHERE id = :id GROUP BY stock HAVING totshares > 0\", id=session[\"user_id\"])\n\n curprice = {}\n totstocks = 0\n\n for banco in portf:\n curprice[banco[\"stock\"]] = lookup(banco[\"stock\"])\n totstocks += curprice[banco[\"stock\"]][\"price\"] * banco[\"totshares\"]\n\n cashnow = float(db.execute(\"SELECT cash FROM users WHERE id = :id\", id=session[\"user_id\"])[0][\"cash\"])\n grandtot = float(cashnow + totstocks)\n\n return render_template(\"/portfolio.html\", portf=portf, curprice=curprice, cashnow=cashnow, totstocks=totstocks, grandtot=grandtot)",
"def portfolio_value(stocks, alloc, base):\n\n # Calculate % return\n stock_alloc = alloc * stocks\n portfolio_cumulative = stock_alloc.sum(axis=1)\n\n # Portfolio Value\n p_value = portfolio_cumulative * base\n\n return p_value",
"def portfolio_manager(portfolio_start, portfolio_now,volume,price,gain):\n hysteresis = 1.2 # get each from ML Model\n base = 100.00\n outer_scale = 1.2\n inner_scale = 100.00\n \n total_price = volume * price \n gain_percentage = gain / (total_price)\n\n portfolio_percentage = (hysteresis - outer_scale*(base ** -(inner_scale*gain_percentage)))\n #check out this equation on wolfram alpha with the query: (1.2 - 1.2*(100 ^ -(100*x))) from 0 to .005\n #x axis is percentage gain, y is portfolio percentage that can be used\n\n acceptable_total_price = portfolio_percentage * portfolio_start\n \n portfolio_currently_invested = portfolio_start - portfolio_now\n acceptable_total_price -= portfolio_currently_invested #account for money already invested. if you have 100 $, and we are willing to invest 10, but we've already invested 6, we are only willing to invest 4.\n \n if(acceptable_total_price <= 0):\n return 0\n \n if(acceptable_total_price > total_price): #if we're willing to invest more than is possible at current volume, invest all\n return volume\n #else, invest the amount we're willing to based on the percentage\n acceptable_volume = int(acceptable_total_price / price)\n \n return acceptable_volume",
"def get_portfolio(user_id, portfolio_id):",
"def get_portfolio(self):\n \n self.accountUser.check_auth()\n\n response = self.accountUser.oauth.get(self.portfolio_url)\n response.raise_for_status()\n return xmltodict.parse(response.text)",
"def __calculate_portfolio_returns(self):\n\n p_bar = tqdm(range(1), desc=\" Calculating returns\", leave=False)\n\n trade_data = self.historical_trade_data\n\n # Helper functions to calculate cash inflow and outflow\n def f_min(x):\n return x.apply(lambda x: min(x, 0))\n\n def f_max(x):\n return x.apply(lambda x: max(x, 0))\n\n # Calculate cash inflow and outflow\n trade_data[\n pd.MultiIndex.from_product(\n [[\"Period cash inflow\"], self.tickers_list + [\"Total\"]]\n )\n ] = -1 * trade_data[\"Investment delta\"][:].apply(lambda x: f_min(x), axis=0)\n\n trade_data[\n pd.MultiIndex.from_product(\n [[\"Period cash outflow\"], self.tickers_list + [\"Total\"]]\n )\n ] = trade_data[\"Investment delta\"][:].apply(lambda x: f_max(x), axis=1)\n\n # Calculate period return\n\n trade_data[\n pd.MultiIndex.from_product(\n [[\"Period absolute return\"], self.tickers_list + [\"Total\"]]\n )\n ] = (trade_data[\"End Value\"] + trade_data[\"Period cash inflow\"]) - (\n trade_data[\"End Value\"].shift(1).fillna(0)\n + trade_data[\"Period cash outflow\"]\n )\n\n trade_data[\n pd.MultiIndex.from_product(\n [[\"Period percentage return\"], self.tickers_list + [\"Total\"]]\n )\n ] = (trade_data[\"End Value\"] + trade_data[\"Period cash inflow\"]) / (\n trade_data[\"End Value\"].shift(1).fillna(0)\n + trade_data[\"Period cash outflow\"]\n ) - 1\n\n trade_data[\"Period percentage return\"].fillna(0, inplace=True)\n\n self.historical_trade_data = trade_data\n\n self.portfolio_returns = self.historical_trade_data[\"Period percentage return\"][\n \"Total\"\n ]\n\n p_bar.n += 1\n p_bar.refresh()",
"def __get_portfolio_object(self, name, user):\n portfolio = self.__get_object_portfolio_bulk(name, user)\n if portfolio is None:\n portfolio = self.__get_object_portfolio_bulk(name, user, \"portfolio_update\")\n if portfolio is None:\n portfolio = self.db_tool.session.query(Portfolio) \\\n .outerjoin(Orders)\\\n .join(Stock)\\\n .filter(name == Portfolio.name) \\\n .filter(user == Portfolio.user).first()\n self.bulk_data[\"portfolio_update\"].append(portfolio)\n return portfolio",
"def backtest_portfolio(self):\n self.rank=dict()\n self.accuracy=dict()\n portfolio = dict()\n \n for algo in self.algos:\n portfolio[algo]=pd.DataFrame(index=self.positions.index)\n self.pos_diff=dict()\n self.pos_diff[algo] = self.positions[algo].diff()\n \n portfolio[algo]['price_diff'] = self.bars['Close']-self.bars['Open']\n #portfolio['price_diff'][0:5] = 0.0\n portfolio[algo]['profit'] = self.positions[algo] * portfolio[algo]['price_diff']\n portfolio[algo]['total'] = self.initial_capital + portfolio[algo]['profit'].cumsum()\n portfolio[algo]['returns'] = portfolio[algo]['total'].pct_change()\n d=np.array(portfolio[algo]['profit']).copy()\n d[d>0]=1\n d[d<0]=0\n d[np.array(self.positions[algo])==0]=1\n for i in np.arange(1,len(d)+1):\n c=float(sum(d[0:i]))/(i)\n d[i-1]=c\n portfolio[algo]['accuracy']=d\n self.rank[algo]=float(portfolio[algo]['total'][-1] - portfolio[algo]['total'][0])\n self.returns=portfolio\n c=np.array(self.returns[algo]['profit'])\n c[c>0]=1\n c[c<0]=0\n c[np.array(self.positions[algo])==0]=1\n accuracy=round(float(c.sum())/len(c),2)*self.rank[algo]\n self.accuracy[algo]=accuracy\n #self.ranking= sorted(self.rank.items(), key=operator.itemgetter(1), reverse=True)\n self.ranking= sorted(self.accuracy.items(), key=operator.itemgetter(1))\n self.ready=True\n return (portfolio, self.rank, self.ranking)",
"def compute_portfolio_return(weights, alphas, betas, market_return):\n portfolio_return = sum([(betas[i] * market_return + alphas[i]) * weights[i] for i in range(len(weights))])\n return portfolio_return",
"def portfolio(self,userid):\n #资金查询\n self.query_order(630002,userid)\n \"\"\"\n 这里应该留一个时间差出来?\n \"\"\"\n self.sync()\n df=self.df[\"fund_report\"].tail(1).reset_index()\n rec=df.iloc[0].to_dict()\n inoutcash=rec[\"cashin\"]-rec[\"cashout\"]\n avlcash=rec[\"AvlFund\"] # 可用资金\n lockedcash=rec[\"LockedFund\"] # 冻结资金\n #margin=rec[\"AvlFund\"]\n positions,shortpositions=self.position(userid)\n longpositions=positions\n #totalvalue=rec[\"AssetValue\"]\n ref=\"ZS\"\n p = Portfolio(inout_cash=inoutcash,available_cash=avlcash,locked_cash=lockedcash,\n positions=positions,long_positions=longpositions,\n total_value=totalvalue,ref=ref,raw=rec)\n\n return p",
"def compute_portvals(start_date, end_date, orders_file, start_val):\n \n #Read order file\n orders = pd.read_csv( orders_file, parse_dates = [0])\n \n #Get symbols making up the portfolio\n stock_symbols = list( set( orders[\"Symbol\"] ) )\n dates = pd.date_range(start_date, end_date)\n \n #Read stock prices\n stock_prices = get_data(stock_symbols, dates)\n \n #Create a portfolio keeping track of positions, \n #_CASH column indicates cash position, _VALUE total portfolio value\n #_LEVERAGE the leverage of portfolio when we allow for short selling\n symbols = stock_symbols[:] #Shallow copy of the list\n symbols.append(\"_CASH\")\n symbols.append(\"_VALUE\")\n symbols.append(\"_LEVERAGE\")\n \n #Index contains only business days, same dates as stock prices\n portfolio = pd.DataFrame(index=stock_prices.index, columns = symbols )\n portfolio.fillna(0) \n portfolio[\"_CASH\"][0] = start_val\n portfolio[\"_VALUE\"][0] = start_val\n \n #Snapshot of a portfolio at any time. To avoid using numerical indexes\n portfolio_snapshot = dict.fromkeys ( symbols, 0 )\n portfolio_snapshot[\"_CASH\"] = start_val\n portfolio[\"_VALUE\"] = start_val\n \n #Now calcualte portfolio day by day\n for date in portfolio.index:\n #Check transactions for the day\n day_orders = orders[ orders[\"Date\"] == date ] \n \n for ord in day_orders.iterrows():\n symbol = ord[1][ \"Symbol\"] \n stock_price = stock_prices[ symbol ][ date ]\n shares = ord[1][\"Shares\" ]\n side = ord[1][\"Order\"]\n \n if side == \"BUY\":\n portfolio_snapshot[ \"_CASH\" ] -= stock_price * shares\n portfolio_snapshot[ symbol ] += shares \n elif side == \"SELL\":\n portfolio_snapshot[ \"_CASH\" ] += stock_price * shares\n portfolio_snapshot[ symbol ] -= shares\n else:\n raise \"Order not recognized.\"\n \n #Compute portfolio value\n portfolio_snapshot[ \"_VALUE\" ] = portfolio_snapshot[ \"_CASH\" ]\n shorts = longs = 0\n for symbol in stock_symbols: \n stock_price = stock_prices[ symbol ][ date ]\n shares = portfolio_snapshot[ symbol ]\n notional = stock_price*shares\n if shares > 0:\n longs += notional\n else:\n shorts += notional\n \n portfolio_snapshot[ \"_VALUE\" ] += notional\n \n #Compute leverage\n leverage = (longs+shorts)/(longs-shorts + portfolio_snapshot[ \"_CASH\" ] )\n portfolio_snapshot[ \"_LEVERAGE\" ] = leverage\n \n #Assert we never achieve a leverage > 2.0\n if leverage > 2:\n raise \"Leverage > 2.0 achieved\"\n \n #Update portfolio from the daily snapshot\n #TODO: Is this causing performance issues?\n for symbol in portfolio.keys():\n portfolio[ symbol ][ date ] = portfolio_snapshot[ symbol ]\n \n return portfolio",
"def _obtain_broker_portfolio_total_equity(self):\n return self.broker.get_portfolio_total_equity(self.broker_portfolio_id)",
"def getPortfolioValue(self, start_t, t):\n sum_tmp=0\n for item in self.portfolio.keys():\n if \"DJI_\" in item:\n t_tmp=datetime.strftime(pd.date_range(end=t,periods=1,freq='B')[0],'%Y-%m-%d')\n price=universe.get_price_in_currency(item,t_tmp,'CAD')\n elif 'rf_rate' in item:\n price=universe.get_security(item).get_cc_return(start_t,t) \n else:\n price=universe.get_price_in_currency(item,t,'CAD')\n #price=universe.get_security(item).price[t]\n amount=self.portfolio[item]\n sum_tmp=sum_tmp+price*amount\n \n return sum_tmp"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
Override fields on ``self.account``.
|
def override_account_fields(self,
settled_cash=not_overridden,
accrued_interest=not_overridden,
buying_power=not_overridden,
equity_with_loan=not_overridden,
total_positions_value=not_overridden,
total_positions_exposure=not_overridden,
regt_equity=not_overridden,
regt_margin=not_overridden,
initial_margin_requirement=not_overridden,
maintenance_margin_requirement=not_overridden,
available_funds=not_overridden,
excess_liquidity=not_overridden,
cushion=not_overridden,
day_trades_remaining=not_overridden,
leverage=not_overridden,
net_leverage=not_overridden,
net_liquidation=not_overridden):
# mark that the portfolio is dirty to override the fields again
self._dirty_account = True
self._account_overrides = kwargs = {
k: v for k, v in locals().items() if v is not not_overridden
}
del kwargs['self']
|
[
"def fix_account(self, account):\n pass",
"def account(self, account):\n\n self._account = account",
"def onAccountUpdate(self, data):\n pass",
"def change_account(self, account):\r\n check_account = Account(account, steem_instance=self.steem)\r\n self.account = check_account[\"name\"]\r\n self.refresh()",
"def get_account_details(self):\n pass",
"def put_account(self, account):\n \n pass",
"def update_account_data(self):\n self.ensure_one()\n getattr(self, '%s_update_account_data' % self.provider, lambda: None)()",
"def __init__(self, client, account_id):\n\n super(AccountsMixin, self).__init__(client)\n self._account_id = account_id",
"def save_account(self, *args, **kwargs):\n pass",
"def user_account(self, user_account):\n self._user_account = user_account",
"def load_account(self):\n pass",
"def _set_additional_fields(self, po):\n pass",
"def set_accounts(self, trx):\n other_account_id = self.get_account(trx['summary'])\n if Decimal(trx['amount']) < 0:\n trx['credit'] = self.main_account_id\n trx['debit'] = other_account_id\n else:\n trx['debit'] = self.main_account_id\n trx['credit'] = other_account_id\n trx['amount'] = str(abs(Decimal(trx['amount'])))",
"def _account(self) -> Account:\n if isinstance(self._node_cached_account, Account):\n return self._node_cached_account\n account = Account.retrieve(\n session=self.entity.session,\n entity=self.entity,\n account_id=self.account_id\n )\n self._node_cached_account = account\n return account",
"def account_no(self, account_no):\n self._account_no = account_no",
"def account_amount(self, account_amount):\n\n self._account_amount = account_amount",
"def account_id(self, account_id):\n\n self._account_id = account_id",
"def disable_account(self):\n pass",
"def account_code(self, account_code):\n self._account_code = account_code"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
Called when the partition's reference count reaches zero. If the partition contains a temporary file which is not referenced by any other partition then the temporary file is removed from disk. If the partition contains a nontemporary file which is not referenced by any other partition then the file is closed.
|
def __del__(self):
# subarray = getattr(self, '_subarray', None)
subarray = self._subarray
# If the subarray is unique it will have 2 references to
# it plus 1 within this method, making 3. If it has more
# than 3 references to it then it is not unique.
if getrefcount is not None:
self._decrement_file_counter()
if subarray is None or getrefcount(subarray) > 3:
return
else:
# getrefcount has itself been deleted or is in the process
# of being torn down
return
_partition_file = getattr(subarray, "_partition_file", None)
if _partition_file is not None:
# This partition contains a temporary file which is not
# referenced by any other partition on this process, so if
# there are no lock files present remove the file from
# disk.
_remove_temporary_files(_partition_file)
else:
try:
if FileArray is not None and isinstance(subarray, FileArray):
try:
filename = subarray.get_filename()
except Exception:
filename = None
if self.file_counter.get(filename, 999) <= 0:
# This partition contains a non-temporary file
# which is not referenced by any other
# partitions, so close the file.
subarray.close()
except Exception:
# If we're here then it is likely that FileArray has been
# torn down, so just do nothing.
pass
# --- End: if
|
[
"def test_file_deleted(self):\n try:\n with get_temp_file() as (fd, name):\n os.unlink(name)\n except Exception as err:\n self.fail('Failed with exception \"{}\"'.format(err))",
"def __del__(self):\n if self.has_temp_file:\n logging.warning('Temp file {tf} is still present, will be cleaned up as S3FileTransfer {sft} is destroyed'\n .format(tf=self.temp_file, sft=str(self)))\n self.cleanup_temp_file()",
"def destroy(self) -> None:\n if self.temp_file is not None:\n self.temp_file.close()",
"def release_file_handle(self):\n pass",
"def abort(self):\r\n f = self.file\r\n if f:\r\n self.idx = None\r\n self.file = None\r\n f.close()\r\n os.unlink(self.filename + '.pack')",
"def _blob_tpc_abort(self):\n while self.dirty_oids:\n oid, serial = self.dirty_oids.pop()\n clean = self.fshelper.getBlobFilename(oid, serial)\n if os.path.exists(clean):\n remove_committed(clean)",
"def destroy_temp_file(self) -> None:\n self.thumbnail.destroy()",
"def _cleanup(self, fnum):\n while os.path.exists('%s.%s' % (self.name, fnum)):\n try:\n fname = '%s.%s' % (self.name, fnum)\n os.unlink(fname)\n # self.log.debug(\"Cleaned up file: %s\", fname)\n except:\n pass\n fnum -= 1",
"def __del__(self):\n\n self.tempfile.close()",
"def __del__(self):\n if self.locked:\n self.unlock()\n self._file.close()",
"def __purge_old_files(self):\n\n chkpts = self.checkpointer.sorted_checkpoints()\n p_chkpts = []\n e_chkpts = []\n for c in chkpts:\n if c.startswith(self.checkpointer.prefix + CheckpointingCallback.PERIODIC_PREFIX):\n p_chkpts.append(c)\n\n if c.startswith(self.checkpointer.prefix + CheckpointingCallback.EPOCH_PREFIX):\n e_chkpts.append(c)\n\n # Delete periodic checkpoints\n if self.max_files is not None and len(p_chkpts) > self.max_files:\n for c in p_chkpts[self.max_files:]:\n LOG.debug(\"CheckpointingCallback deleting {}\".format(c))\n self.checkpointer.delete(c)\n\n # Delete older epochs\n if self.max_epochs is not None and len(e_chkpts) > self.max_epochs:\n for c in e_chkpts[self.max_epochs:]:\n LOG.debug(\"CheckpointingCallback deleting (epoch) {}\".format(c))\n self.checkpointer.delete(c)",
"def test_file_unused(self):\n try:\n with get_temp_file() as (fd, name):\n pass\n except Exception as err:\n self.fail('Failed with exception \"{}\"'.format(err))\n else:\n file_exists = os.access(name, os.F_OK)\n self.assertFalse(file_exists)",
"def __purge_old_files(self):\n\n chkpts = self.checkpointer.sorted_checkpoints()\n p_chkpts = []\n e_chkpts = []\n for c in chkpts:\n if c.startswith(self.checkpointer.prefix + CheckpointingCallback.PERIODIC_PREFIX):\n p_chkpts.append(c)\n\n if c.startswith(self.checkpointer.prefix + CheckpointingCallback.EPOCH_PREFIX):\n e_chkpts.append(c)\n\n # Delete periodic checkpoints\n if self.max_files is not None and len(p_chkpts) > self.max_files:\n for c in p_chkpts[self.max_files:]:\n log.debug(\"CheckpointingCallback deleting {}\".format(c))\n self.checkpointer.delete(c)\n\n # Delete older epochs\n if self.max_epochs is not None and len(e_chkpts) > self.max_epochs:\n for c in e_chkpts[self.max_epochs:]:\n log.debug(\"CheckpointingCallback deleting (epoch) {}\".format(c))\n self.checkpointer.delete(c)",
"def test_removing_already_deleted_file_with_retry(self):\n filepath = os.path.join(self.tempdir, *stubs.files[1])\n\n thread = FileOpenCloseThread(filepath, .8, True)\n thread.start()\n\n # Wait a bit so we can be sure the file has been opened and gets deleted\n # while remove() waits for the next retry\n time.sleep(.5)\n mozfile.remove(filepath)\n thread.join()\n\n # Check deletion was successful\n self.assertFalse(os.path.exists(filepath))",
"def release_files(self) -> None:\n\n headroom_needed = self._headroom - self._lfs.quota_remaining(self.node.root)\n\n # Nothing to do\n if headroom_needed <= 0:\n return\n\n def _async(task, node, lfs, headroom_needed):\n total_files = 0\n total_bytes = 0\n\n # loop through file copies until we've released enough\n # (or we run out of files)\n for copy in (\n ArchiveFileCopy.select()\n .where(\n ArchiveFileCopy.node == node,\n ArchiveFileCopy.has_file == \"Y\",\n ArchiveFileCopy.ready == True,\n )\n .order_by(ArchiveFileCopy.last_update)\n ):\n # Skip unarchived files\n if not lfs.hsm_archived(copy.path):\n continue\n\n log.debug(\n f\"releasing file copy {copy.path} \"\n f\"[={pretty_bytes(copy.file.size_b)}] on node {self.node.name}\"\n )\n lfs.hsm_release(copy.path)\n # Update copy record immediately\n ArchiveFileCopy.update(ready=False).where(\n ArchiveFileCopy.id == copy.id\n ).execute()\n total_files += 1\n total_bytes += copy.file.size_b\n if total_bytes >= headroom_needed:\n break\n log.info(\n f\"released {pretty_bytes(total_bytes)} in \"\n f\"{total_files} files on node {self.node.name}\"\n )\n return\n\n # Do the rest asynchronously\n Task(\n func=_async,\n queue=self._queue,\n key=self.node.name,\n args=(self.node, self._lfs, headroom_needed),\n name=f\"Node {self.node.name}: HSM release {pretty_bytes(headroom_needed)}\",\n )",
"def flow_file_chunk_delete(sender, instance, **kwargs):\n instance.file.delete(False)",
"def __del__(self):\n\t\tif self.temp_dir:\n\t\t\tself.temp_dir.cleanup()",
"def cleanup():\n if DEBUG_FILEHANDLE:\n DEBUG_FILEHANDLE.close()",
"def Close(self):\n super(CPIOArchiveFile, self).Close()\n self._file_entries = None"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
Add i to the count of subarrays referencing the file of this partition's subarray. Only do this if self._subarray is an instance of FileArray, but not a temporary FileArray.
|
def _add_to_file_counter(self, i):
# subarray = getattr(self, '_subarray', None)
subarray = self._subarray
if subarray is None:
return
try:
if isinstance(subarray, FileArray) and not isinstance(
subarray, CachedArray
):
try:
filename = subarray.get_filename()
except Exception:
filename = None
if filename is None:
return
file_counter = self.file_counter
# count = file_counter.get(filename, 0)
# file_counter[filename] = count + i
# if file_counter[filename] <= 0:
count = file_counter.get(filename, 0) + i
if count <= 0:
# Remove the file from the dictionary if its count has
# dropped to zero
file_counter.pop(filename, None)
else:
file_counter[filename] = count
except Exception:
# If we're here then it is likely that FileArray has been
# torn down, so just do nothing.
pass
|
[
"def append_subint_array(self,table):\n fits_to_append = F.FITS(table)",
"def iterappend(self, arrayiterable):\n if self._accessmode != 'r+':\n raise OSError(f\"Accesmode should be 'r+' \"\n f\"(now is '{self._accessmode}')\")\n if not hasattr(arrayiterable, '__iter__'):\n raise TypeError(\"'arrayiterable' is not iterable\")\n self.check_arraywriteable()\n arrayiterable = iter(arrayiterable)\n if np.product(self._shape) == 0:\n # numpy cannot write to a fd of an empty file.\n # Hence we overwrite the file. It is not beautiful but it works.\n array = self._checkarrayforappend(next(arrayiterable))\n array.tofile(str(self._datapath))\n self._update_len(lenincrease=array.shape[0])\n with self._open_array() as (v, fd):\n oldshape = v.shape\n lenincrease = 0\n try:\n for array in arrayiterable:\n lenincrease += self._append(array=array, fd=fd)\n except Exception as exception:\n if fd.closed:\n fd = open(file=self._datapath, mode=self._accessmode)\n fd.flush()\n self._update_len(lenincrease=lenincrease)\n fd.truncate(self._size * self._dtype.itemsize)\n fd.close()\n s = f\"{exception}\\nAppending of data did not (completely) \" \\\n f\"succeed. Shape of array was {oldshape} and is now \" \\\n f\"{self._shape} after an increase in length \" \\\n f\"(along first dimension) of {lenincrease}.\"\n raise AppendDataError(s)\n self._update_len(lenincrease=lenincrease)",
"def update(self, i, v):\n # index in BTree is 1 more than index in arr[]\n i += 1\n\n # Traverse to ancestors of BITree[i]\n while i <= self.size:\n self.BITree[i] += v\n\n # Update index to next set bit in binary representation\n i += i & (-i)",
"def __del__(self):\n # subarray = getattr(self, '_subarray', None)\n subarray = self._subarray\n\n # If the subarray is unique it will have 2 references to\n # it plus 1 within this method, making 3. If it has more\n # than 3 references to it then it is not unique.\n if getrefcount is not None:\n self._decrement_file_counter()\n if subarray is None or getrefcount(subarray) > 3:\n return\n else:\n # getrefcount has itself been deleted or is in the process\n # of being torn down\n return\n\n _partition_file = getattr(subarray, \"_partition_file\", None)\n if _partition_file is not None:\n # This partition contains a temporary file which is not\n # referenced by any other partition on this process, so if\n # there are no lock files present remove the file from\n # disk.\n _remove_temporary_files(_partition_file)\n\n else:\n try:\n if FileArray is not None and isinstance(subarray, FileArray):\n try:\n filename = subarray.get_filename()\n except Exception:\n filename = None\n\n if self.file_counter.get(filename, 999) <= 0:\n # This partition contains a non-temporary file\n # which is not referenced by any other\n # partitions, so close the file.\n subarray.close()\n except Exception:\n # If we're here then it is likely that FileArray has been\n # torn down, so just do nothing.\n pass\n # --- End: if",
"def add(self, i: int, v: int) -> None:\n while i < self.size:\n self.tree[i] += v\n i += self._lsb(i)",
"def add_subsegment(self, subsegment):\n super().add_subsegment(subsegment)\n self.increment()",
"def __setitem__(self, filenr, data_arr):\n cvcfile = self.filenames[filenr]\n cvcpath = os.path.join(self.filefolder, cvcfile)\n data_arr.tofile(cvcpath)",
"def _increment_file_counter(self):\n self._add_to_file_counter(1)",
"def setSubRequestFiles( self, ind, rType, files ):\n if not self.subRequests.has_key( rType ):\n return S_ERROR( \"No requests of type specified found.\" )\n elif len( self.subRequests[rType] ) < ind:\n return S_ERROR( \"Subrequest index is out of range.\" )\n else:\n if not self.subRequests[rType][ind].has_key( 'Files' ):\n # Make deep copy\n self.subRequests[rType][ind]['Files'] = copy.deepcopy( files )\n else:\n for fDict in files:\n self.subRequests[rType][ind]['Files'].append( copy.deepcopy( fDict ) )\n return S_OK()",
"def write_num_subsections(self):\n\n start = self.tell()\n self.write_bytes(b'\\x00' * 4)\n try:\n yield\n finally:\n with self:\n self.seek(start)\n self.write_uint(self.num_subsections)",
"def _extend_contiguous_traj_field(self, run_idx, traj_idx, field_path, field_data):\n\n traj_grp = self.h5['{}/{}/{}/{}'.format(RUNS, run_idx, TRAJECTORIES, traj_idx)]\n field = traj_grp[field_path]\n\n # make sure this is a feature vector\n assert len(field_data.shape) > 1, \\\n \"field_data must be a feature vector with the same number of dimensions as the number\"\n\n # of datase new frames\n n_new_frames = field_data.shape[0]\n\n # check the field to make sure it is not empty\n if all([i == 0 for i in field.shape]):\n\n # check the feature shape against the maxshape which gives\n # the feature dimensions for an empty dataset\n assert field_data.shape[1:] == field.maxshape[1:], \\\n \"field feature dimensions must be the same, i.e. all but the first dimension\"\n\n # if it is empty resize it to make an array the size of\n # the new field_data with the maxshape for the feature\n # dimensions\n feature_dims = field.maxshape[1:]\n field.resize( (n_new_frames, *feature_dims) )\n\n # set the new data to this\n field[0:, ...] = field_data\n\n else:\n # make sure the new data has the right dimensions against\n # the shape it already has\n assert field_data.shape[1:] == field.shape[1:], \\\n \"field feature dimensions must be the same, i.e. all but the first dimension\"\n\n\n # append to the dataset on the first dimension, keeping the\n # others the same, these must be feature vectors and therefore\n # must exist\n field.resize( (field.shape[0] + n_new_frames, *field.shape[1:]) )\n # add the new data\n field[-n_new_frames:, ...] = field_data",
"def add_subsegment(self, subsegment):\n super().add_subsegment(subsegment)\n self.parent_segment.increment()",
"def append_filepath(self, filepath):\n idx = len(self.t_sect['filepaths'])\n self.t_sect['filepaths'].append(filepath)\n return idx",
"def _create_data_array(self):\n for i in range(len(self.bucket_array)):\n for j in range(self.bucket_array[i]):\n self.data_array.append(i)\n self.size = len(self.data_array)",
"def write_sub_4(self):\n self.subIndex[constants.sub_4_genre_albums].offset = (\n self.db_file.tell())\n self.subIndex[constants.sub_4_genre_albums].size = 8\n self.subIndex[constants.sub_4_genre_albums].count = (\n len(self.genreIndex) - 1)\n\n entry_offset = 0\n for giEntry in self.genreIndex[1:]:\n self.db_file.write(\n struct.pack(\n \"<HHHH\",\n giEntry.number,\n entry_offset,\n giEntry.number_of_albums,\n 0x0000))\n entry_offset += giEntry.number_of_albums",
"def attach(self, i, t1):\n\n if not isinstance(t1, ArrayBinaryTree):\n raise TreeException(\"The tree types are not the same.\")\n\n if not type(self) is type(t1):\n raise TreeException(\"The tree types are not the same.\")\n\n if not self.isLeaf(i):\n raise TreeException(\"The index is not a Leaf.\")\n\n # No need to call validatePosition, cos this would have been done when testing if it is leaf\n self.__attachPreOrder(t1, i, t1.root())\n\n t1.clear()",
"def write_all_sub_indices(self):\n\n # remember where we are.\n temp_offset_1 = self.db_file.tell()\n\n # Write a filler for the relative offset to the first table\n self.db_file.write(struct.pack(\"<I\", 0x00000000))\n\n # Write the sub index entries (blank at this stage)\n self.write_sub_index()\n\n # self.subIndex[constants.sub_0_genre_performers].offset = \\\n # self.db_file.tell()\n self.write_sub_0()\n\n # self.subIndex[constants.sub_1_genre_performer_albums].offset = \\\n # self.db_file.tell()\n self.write_sub_1()\n\n # self.subIndex[constants.sub_2_genre_performer_album_titles].offset = \\\n # self.db_file.tell()\n self.write_sub_2()\n\n # self.subIndex[constants.sub_3_genre_ordered_titles].offset = \\\n # self.db_file.tell()\n self.write_sub_3()\n\n # self.subIndex[constants.sub_4_genre_albums].offset = \\\n # self.db_file.tell()\n self.write_sub_4()\n\n # self.subIndex[constants.sub_5_genre_album_titles].offset = \\\n # self.db_file.tell()\n self.write_sub_5()\n\n # self.subIndex[constants.sub_6_genre_titles].offset = \\\n # self.db_file.tell()\n self.write_sub_6()\n\n # self.subIndex[constants.sub_7_performer_albums].offset = \\\n # self.db_file.tell()\n self.write_sub_7()\n\n # self.subIndex[constants.sub_8_performer_album_titles].offset = \\\n # self.db_file.tell()\n self.write_sub_8()\n\n # self.subIndex[constants.sub_9_performer_titles].offset = \\\n # self.db_file.tell()\n self.write_sub_9()\n\n # self.subIndex[constants.sub_10_genre_performers].offset = \\\n # self.db_file.tell()\n self.write_sub_10()\n\n # self.subIndex[constants.sub_11_genre_performer_titles].offset = \\\n # self.db_file.tell()\n self.write_sub_11()\n\n # self.subIndex[constants.sub_12_genre_ordered_titles].offset = \\\n # self.db_file.tell()\n self.write_sub_12()\n\n # Remeber where we are\n temp_offset_2 = self.db_file.tell()\n\n # Go back to the start\n self.db_file.seek(temp_offset_1)\n\n # Write the offset to the first table\n self.db_file.write(\n struct.pack(\n \"<I\",\n self.subIndex[constants.sub_0_genre_performers].offset -\n temp_offset_1))\n\n # Write the real data now\n self.write_sub_index()\n\n # Go to the end\n self.db_file.seek(temp_offset_2)",
"def update_subvarga(self, subvarga):\n\t\tself.subvarga = subvarga\n\t\tself.subvargaNum += 1",
"def extend(self, i):\n for x in i:\n self.add(x)"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
Add 1 to the Partition.file_counter if self._subarray is an instance of FileArray and not a temporary FileArray.
|
def _increment_file_counter(self):
self._add_to_file_counter(1)
|
[
"def _add_to_file_counter(self, i):\n # subarray = getattr(self, '_subarray', None)\n subarray = self._subarray\n\n if subarray is None:\n return\n\n try:\n if isinstance(subarray, FileArray) and not isinstance(\n subarray, CachedArray\n ):\n try:\n filename = subarray.get_filename()\n except Exception:\n filename = None\n\n if filename is None:\n return\n\n file_counter = self.file_counter\n # count = file_counter.get(filename, 0)\n # file_counter[filename] = count + i\n # if file_counter[filename] <= 0:\n count = file_counter.get(filename, 0) + i\n if count <= 0:\n # Remove the file from the dictionary if its count has\n # dropped to zero\n file_counter.pop(filename, None)\n else:\n file_counter[filename] = count\n except Exception:\n # If we're here then it is likely that FileArray has been\n # torn down, so just do nothing.\n pass",
"def _decrement_file_counter(self):\n self._add_to_file_counter(-1)",
"def fileCount(self):\n pass",
"def count_files(self):\n self.file_count = 0\n self.count_files_loop(self.dirpath)\n return",
"def obs_file_count(self):\n return self._obs_file_count",
"def next_file(self): \n if not self.cnt == len(self.files) -1:\n self.cnt +=1\n self.current_file = self.files[self.cnt]",
"def set_next_idx(self):\r\n if self.idx_of_file_indices <= len(self.file_indices)-1:\r\n self.idx_of_file_indices += 1\r\n self.file_idx = self.file_indices[self.idx_of_file_indices]",
"def sequential_files(self, ctr=0):\n self._tempfiles[-1].ctr = ctr",
"def __del__(self):\n # subarray = getattr(self, '_subarray', None)\n subarray = self._subarray\n\n # If the subarray is unique it will have 2 references to\n # it plus 1 within this method, making 3. If it has more\n # than 3 references to it then it is not unique.\n if getrefcount is not None:\n self._decrement_file_counter()\n if subarray is None or getrefcount(subarray) > 3:\n return\n else:\n # getrefcount has itself been deleted or is in the process\n # of being torn down\n return\n\n _partition_file = getattr(subarray, \"_partition_file\", None)\n if _partition_file is not None:\n # This partition contains a temporary file which is not\n # referenced by any other partition on this process, so if\n # there are no lock files present remove the file from\n # disk.\n _remove_temporary_files(_partition_file)\n\n else:\n try:\n if FileArray is not None and isinstance(subarray, FileArray):\n try:\n filename = subarray.get_filename()\n except Exception:\n filename = None\n\n if self.file_counter.get(filename, 999) <= 0:\n # This partition contains a non-temporary file\n # which is not referenced by any other\n # partitions, so close the file.\n subarray.close()\n except Exception:\n # If we're here then it is likely that FileArray has been\n # torn down, so just do nothing.\n pass\n # --- End: if",
"def on_disk(self):\n return isinstance(self._subarray, FileArray)",
"def get_num_files(self):\r\n return self.nfile",
"def fileCounter(directory):",
"def n_files(self):\n return len(self._library)",
"def evio_files_count(self):\n # the last file is something like: hd_rawdata_011410_055.evio\n if not self.evio_files:\n return None\n last_file = self.evio_last_file\n u_pos = last_file.rfind('_')\n d_pos = last_file.rfind('.')\n # noinspection PyBroadException\n try:\n return int(last_file[u_pos + 1:d_pos]) + 1\n except:\n log.warning(Lf(\"Can't parse file index for '{}' file\", last_file))\n return None",
"def setNextFile(self):\n\n if (self.nReadBlocks >= self.processingHeaderObj.dataBlocksPerFile):\n self.nReadFiles=self.nReadFiles+1\n if self.nReadFiles > self.nTotalReadFiles:\n self.flagNoMoreFiles=1\n raise schainpy.admin.SchainWarning('No more files to read')\n\n print('------------------- [Opening file] ------------------------------',self.nReadFiles)\n self.nReadBlocks = 0\n #if self.nReadBlocks==0:\n # self.readFirstHeader()",
"def obs_file_count(self, obs_file_count):\n self._obs_file_count = obs_file_count",
"def fileCount(self):\n return sum(len(f) for f in self.files)",
"def embeddedFileCount(self):\n return len(self.embeddedFileNames())",
"def _add_file_progress(self, status_message):\n file_url_string = status_message.source_url.url_string\n if file_url_string not in self._tracked_file_progress:\n self._tracked_file_progress[file_url_string] = 0\n\n known_progress = self._tracked_file_progress[file_url_string]\n # status_message.processed_bytes includes bytes from past messages.\n self._processed_bytes += status_message.processed_bytes - known_progress\n\n if status_message.finished:\n self._tracked_file_progress[file_url_string] = -1\n self._completed_files += 1\n else:\n self._tracked_file_progress[file_url_string] = (\n status_message.processed_bytes)"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
Subtract 1 from the Partition.file_counter if self._subarray is an instance of FileArray and not a temporary FileArray.
|
def _decrement_file_counter(self):
self._add_to_file_counter(-1)
|
[
"def _add_to_file_counter(self, i):\n # subarray = getattr(self, '_subarray', None)\n subarray = self._subarray\n\n if subarray is None:\n return\n\n try:\n if isinstance(subarray, FileArray) and not isinstance(\n subarray, CachedArray\n ):\n try:\n filename = subarray.get_filename()\n except Exception:\n filename = None\n\n if filename is None:\n return\n\n file_counter = self.file_counter\n # count = file_counter.get(filename, 0)\n # file_counter[filename] = count + i\n # if file_counter[filename] <= 0:\n count = file_counter.get(filename, 0) + i\n if count <= 0:\n # Remove the file from the dictionary if its count has\n # dropped to zero\n file_counter.pop(filename, None)\n else:\n file_counter[filename] = count\n except Exception:\n # If we're here then it is likely that FileArray has been\n # torn down, so just do nothing.\n pass",
"def __del__(self):\n # subarray = getattr(self, '_subarray', None)\n subarray = self._subarray\n\n # If the subarray is unique it will have 2 references to\n # it plus 1 within this method, making 3. If it has more\n # than 3 references to it then it is not unique.\n if getrefcount is not None:\n self._decrement_file_counter()\n if subarray is None or getrefcount(subarray) > 3:\n return\n else:\n # getrefcount has itself been deleted or is in the process\n # of being torn down\n return\n\n _partition_file = getattr(subarray, \"_partition_file\", None)\n if _partition_file is not None:\n # This partition contains a temporary file which is not\n # referenced by any other partition on this process, so if\n # there are no lock files present remove the file from\n # disk.\n _remove_temporary_files(_partition_file)\n\n else:\n try:\n if FileArray is not None and isinstance(subarray, FileArray):\n try:\n filename = subarray.get_filename()\n except Exception:\n filename = None\n\n if self.file_counter.get(filename, 999) <= 0:\n # This partition contains a non-temporary file\n # which is not referenced by any other\n # partitions, so close the file.\n subarray.close()\n except Exception:\n # If we're here then it is likely that FileArray has been\n # torn down, so just do nothing.\n pass\n # --- End: if",
"def _increment_file_counter(self):\n self._add_to_file_counter(1)",
"def obs_file_count(self):\n return self._obs_file_count",
"def fileCount(self):\n pass",
"def file_number(self):\n return self._file_number",
"def file_close(self):\n if self.on_disk:\n self._subarray.close()",
"def count_files(self):\n self.file_count = 0\n self.count_files_loop(self.dirpath)\n return",
"def evio_files_count(self):\n # the last file is something like: hd_rawdata_011410_055.evio\n if not self.evio_files:\n return None\n last_file = self.evio_last_file\n u_pos = last_file.rfind('_')\n d_pos = last_file.rfind('.')\n # noinspection PyBroadException\n try:\n return int(last_file[u_pos + 1:d_pos]) + 1\n except:\n log.warning(Lf(\"Can't parse file index for '{}' file\", last_file))\n return None",
"def on_disk(self):\n return isinstance(self._subarray, FileArray)",
"def get_num_files(self):\r\n return self.nfile",
"def sequential_files(self, ctr=0):\n self._tempfiles[-1].ctr = ctr",
"def nullify_file(self, index):\n self.image_scores[index] = 0",
"def file_count(self) -> int:\n if self.dataset is None:\n raise ValueError('No known dataset found!')\n return self._max_file_count",
"def current_file_number(self):\n file_number = get_value(\n database_name=Config.DATABASE_CHAINSTATE, key=b\"file_number\"\n )\n\n # If there is not a current file we'll start by 0\n if file_number is None or file_number == \"\" or file_number == b\"\":\n file_number = 0\n else:\n file_number = int.from_bytes(\n file_number, byteorder=\"little\", signed=False\n )\n\n file_name = get_current_file_name(blk_file_format(file_number))\n if get_blk_file_size(file_name) >= Config.MAX_FILE_SIZE:\n file_number += 1\n\n return file_number",
"def next_file(self): \n if not self.cnt == len(self.files) -1:\n self.cnt +=1\n self.current_file = self.files[self.cnt]",
"def nfiles(self):\n return len(self.table)",
"def close(self, **kwargs):\n config = getattr(self, \"config\", None)\n\n if config is None:\n return\n\n if kwargs:\n config.update(kwargs)\n\n original = getattr(self, \"_original\", None)\n logger.partitioning(\"Partition.close: original = {}\".format(original))\n\n if not original:\n originally_on_disk = False\n original_subarray = None\n else:\n originally_on_disk = not original.in_memory\n original_subarray = original._subarray\n\n config = self.config\n logger.partitioning(\" config = {}\".format(config))\n\n if config[\"serial\"]:\n # --------------------------------------------------------\n # SERIAL\n # --------------------------------------------------------\n logger.partitioning(\" serial\")\n\n if config[\"readonly\"]:\n logger.partitioning(\" readonly=True\")\n\n if originally_on_disk:\n logger.partitioning(\" subarray originally on disk\")\n\n if config.get(\"to_disk\", False):\n # 1.1.1.1 The original subarray was on disk,\n # we don't want to keep the current\n # subarray in memory, and we are happy\n # to discard any changes that may have\n # been made to the subarray.\n logger.partitioning(\" 1.1.1.1 revert\")\n self.revert()\n elif free_memory() <= cf_fm_threshold():\n # 1.1.1.2 The original subarray was on disk,\n # we are happy to keep the current\n # subarray in memory, but there is not\n # enough free memory to do so.\n logger.partitioning(\n \" 1.1.1.2 revert ({} <= {})\".format(\n free_memory(), cf_fm_threshold()\n )\n )\n self.revert()\n else:\n # 1.1.1.3 The original subarray was on disk\n # and there is enough memory to keep\n # the current subarray in memory\n if config[\"unique_subarray\"] and isinstance(\n original_subarray, CachedArray\n ):\n # The original subarray was a temporary\n # file which is not referenced by any\n # other partitions\n _remove_temporary_files(\n original_subarray._partition_file\n )\n\n del self.masked\n logger.partitioning(\n \" 1.1.1.3 del masked ({} > {})\".format(\n free_memory(), cf_fm_threshold()\n )\n )\n\n else:\n logger.partitioning(\" subarray originally in memory\")\n if config.get(\"to_disk\", False):\n # 1.1.2.1 Original subarray was in memory and\n # we don't want to keep the current\n # subarray in memory\n logger.partitioning(\" 1.1.2.1 to_disk\")\n self.to_disk(reopen=False)\n elif free_memory() <= cf_fm_threshold():\n # 1.1.2.2 Original subarray was in memory and\n # unique but there is not enough\n # memory to keep the current subarray\n logger.partitioning(\" 1.1.2.2 to_disk\")\n self.to_disk(reopen=False)\n else:\n # 1.1.2.3 Original subarray was in memory and\n # unique and there is enough memory to\n # keep the current subarray in memory\n logger.partitioning(\" 1.1.2.3 pass\")\n pass\n else:\n # config['readonly'] is False\n if originally_on_disk:\n if config.get(\"to_disk\", False):\n # 1.2.1.1 Original subarray was on disk and\n # there and we don't want to keep the\n # array\n if config[\"unique_subarray\"] and isinstance(\n original_subarray, CachedArray\n ):\n # Original subarray was a temporary file\n # on disk which is not referenced by any\n # other partitions\n _remove_temporary_files(\n original_subarray._partition_file\n )\n\n logger.partitioning(\" 1.2.1.1 to_disk\")\n self.to_disk(reopen=False)\n elif free_memory() <= cf_fm_threshold():\n # 1.2.1.2 Original subarray was on disk but\n # there is not enough memory to keep\n # it\n if config[\"unique_subarray\"] and isinstance(\n original_subarray, CachedArray\n ):\n # Original subarray was a temporary file\n # on disk which is not referenced by any\n # other partitions\n _remove_temporary_files(\n original_subarray._partition_file\n )\n\n logger.partitioning(\" 1.2.1.2 to_disk\")\n self.to_disk(reopen=False)\n else:\n # 1.2.1.3 Original subarray was on disk and\n # there is enough memory to keep it\n logger.partitioning(\" 1.2.1.3 pass\")\n del self.masked\n else:\n if config.get(\"to_disk\", False):\n # 1.2.2.1 Original subarray was in memory but\n # we don't want to keep it\n logger.partitioning(\" 1.2.2.1 to_disk\")\n self.to_disk(reopen=False)\n elif free_memory() <= cf_fm_threshold():\n # 1.2.2.2 Original subarray was an in memory\n # but there is not enough memory to\n # keep it\n logger.partitioning(\" 1.2.2.2 to_disk\")\n self.to_disk(reopen=False)\n else:\n # 1.2.2.3 Original subarray was in memory and\n # there is enough memory to keep it\n logger.partitioning(\" 1.2.2.3 del masked\")\n del self.masked\n else:\n logger.partitioning(\"Partition.close: parallel\")\n # --------------------------------------------------------\n # PARALLEL\n # --------------------------------------------------------\n pass\n\n # if hasattr(self, '_original'):\n # del self._original\n\n # print(hasattr(self, 'config')),\n try:\n del self.config\n except AttributeError:\n pass",
"def embeddedFileCount(self):\n return len(self.embeddedFileNames())"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
Add the auxiliary mask to the config dictionary. Assumes that ``self.config`` already exists.
|
def _configure_auxiliary_mask(self, auxiliary_mask):
indices = self.indices
new = [
mask[
tuple(
[
(slice(None) if n == 1 else index)
for n, index in zip(mask.shape, indices)
]
)
]
for mask in auxiliary_mask
]
# # If the partition is to be parallelised then get rid of mask
# # components which are all False so the mask component does
# # not get copied to the child process
# if not config['serial']:
# new = [mask for mask in new if not mask.any()]
self.config["auxiliary_mask"] = new
|
[
"def hybrid_dict_mask(self, test=False, a='6', msg=msgs.m_hydi_atk):\n self.argv = self.build_args()\n mask = self.masks_file or self.mask\n if not mask:\n return\n try:\n self.argv.insert(0, mask)\n self.common_attack_pattern(test, a, msg)\n except IndexError:\n print(msgs.m_hydi_fail)\n return",
"def _updateMaskedValueSet():\n global masked_value_set\n for confName in controller.CONF:\n # Add all needed values to masked_value_set\n if (controller.getParamKeyValue(confName, \"MASK_INPUT\") == True):\n masked_value_set.add(controller.CONF[confName])",
"def apply_mask(self, mask):\n self.mask = mask",
"def AddToMask(self, index: int, mask: float) -> None:\n ...",
"def update_mask(self, mask):\n\n # Get general mask\n general_mask = self.general_mask\n\n # Complete with the input mask\n new_mask = (general_mask | mask)\n\n # Update attribute\n self.mask = new_mask\n\n # Correct i_bounds if it was not specified\n # self.update_i_bnds()\n\n # Re-compute weights\n self.weights, self.weights_k_idx = self.compute_weights()\n\n return",
"def update_mask(self):\r\n \r\n # Binary mask from ML detection\r\n if len(self.selected_ML_Index) > 0:\r\n # Delete items in dictionary that are not roi items\r\n roi_dict = self.selected_cells_infor_dict.copy()\r\n del_key_list=[]\r\n for key in roi_dict:\r\n print(key)\r\n if 'ROIitem' not in key:\r\n del_key_list.append(key)\r\n for key in del_key_list:\r\n del roi_dict[key]\r\n \r\n self.MLmask = ProcessImage.ROIitem2Mask(roi_dict, mask_resolution = (self.MLtargetedImg.shape[0], self.MLtargetedImg.shape[1]))\r\n # Binary mask of added rois\r\n self.addedROIitemMask = ProcessImage.ROIitem2Mask(self.roi_list_freehandl_added, mask_resolution = (self.MLtargetedImg.shape[0], self.MLtargetedImg.shape[1]))\r\n \r\n self.intergrate_into_final_mask()",
"def setProxPhotoMask(self, mask):\n\t\n self._write_byte_data(REG_CONFIG3, (self._read_byte_data(REG_CONFIG3) & ~0xf) | (mask & 0xf))",
"def addIllumMaskToDQ(self, adinputs=None, suffix=None, illum_mask=None):\n log = self.log\n log.debug(gt.log_message(\"primitive\", self.myself(), \"starting\"))\n timestamp_key = self.timestamp_keys[self.myself()]\n\n # Getting all the filenames first prevents reopening the same file\n # for each science AD\n if illum_mask is None:\n illum_mask = [self._get_illum_mask_filename(ad) for ad in adinputs]\n\n for ad, illum in zip(*gt.make_lists(adinputs, illum_mask, force_ad=True)):\n if ad.phu.get(timestamp_key):\n log.warning('No changes will be made to {}, since it has '\n 'already been processed by addIllumMaskToDQ'.\n format(ad.filename))\n continue\n\n if illum is None:\n # So it can be zipped with the AD\n final_illum = [None] * len(ad)\n else:\n log.fullinfo(\"Using {} as illumination mask\".format(illum.filename))\n final_illum = gt.clip_auxiliary_data(ad, aux=illum, aux_type='bpm',\n return_dtype=DQ.datatype)\n\n for ext, illum_ext in zip(ad, final_illum):\n if illum_ext is not None:\n # Ensure we're only adding the unilluminated bit\n iext = np.where(illum_ext.data > 0, DQ.unilluminated,\n 0).astype(DQ.datatype)\n ext.mask = iext if ext.mask is None else ext.mask | iext\n\n # Timestamp and update filename\n gt.mark_history(ad, primname=self.myself(), keyword=timestamp_key)\n ad.update_filename(suffix=suffix, strip=True)\n\n return adinputs",
"def add_mask(self, col, val, mask):\n self._masks[col][val] = mask",
"def setup_mask(self, d25scale): \n\n logger = logging.getLogger(name=\"ShotSensitivity\")\n \n # see if this is a bad shot\n #print(\"Bad shot from \", self.conf.badshot)\n badshot = loadtxt(self.conf.badshot, dtype=int)\n badtpshots = loadtxt(self.conf.lowtpshots, dtype=int)\n if (self.shotid in badshot) or (self.shotid in badtpshots):\n logger.warn(\"Shot is in bad. Making mask zero everywhere\")\n self.badshot = True\n else:\n self.badshot = False\n \n # set up bad amps\n logger.info(\"Bad amps from {:s}\".format(self.conf.badamp))\n self.bad_amps = Table.read(self.conf.badamp)\n sel_shot = (self.bad_amps[\"shotid\"] == self.shotid)\n self.bad_amps = self.bad_amps[sel_shot]\n \n # set up galaxy mask\n logger.info(\"Galaxy mask from {:s}\".format(self.conf.rc3cat))\n galaxy_cat = Table.read(self.conf.rc3cat, format='ascii')\n gal_coords = SkyCoord(galaxy_cat['Coords'], frame='icrs')\n shot_coords = SkyCoord(ra=self.shot_ra, dec=self.shot_dec,\n unit=\"deg\")\n sel_reg = where(shot_coords.separation(gal_coords) < 1.*u.deg)[0]\n\n self.gal_regions = []\n if len(sel_reg) > 0:\n for idx in sel_reg:\n self.gal_regions.append(create_gal_ellipse(galaxy_cat, \n row_index=idx, \n d25scale=d25scale))\n \n # set up meteor mask\n # check if there are any meteors in the shot:\n logger.info(\"Meteors from {:s}\".format(self.conf.meteor))\n self.met_tab = Table.read(self.conf.meteor, format=\"ascii\")\n self.met_tab = self.met_tab[self.shotid == self.met_tab[\"shotid\"]]",
"def _set_mask(self):\n # Do not work for CopyRaster, https://github.com/storm-fsv-cvut/model.smoderp2d.issues/46\n # dem_copy = os.path.join(self.data['temp'], 'dem_copy')\n dem_copy = self.storage.output_filepath('dem_copy')\n\n arcpy.CopyRaster_management(\n self._input_params['elevation'], dem_copy\n )\n\n # align computation region to DTM grid\n arcpy.env.snapRaster = self._input_params['elevation']\n\n dem_mask = self.storage.output_filepath('dem_mask')\n self.gp.Reclassify_sa(\n dem_copy, \"VALUE\", \"-100000 100000 1\", dem_mask, \"DATA\"\n )\n \n return dem_copy, dem_mask",
"def update_mask(self, new_atom):\n if self.Wf is not None: \n # convert last selected atom into an index\n # so far it will only be a frequency index\n# freq = int(new_atom.reduced_frequency * new_atom.fs)\n # The Ws matrix is assumed scaled to this frequency\n atom_idx_in_w = int(new_atom.reduced_frequency * self.scale)\n# print self.add_mask.shape, self.W.shape\n# self.add_mask += self.W[atom_idx_in_w,:]\n# self.add_mask = self.Wf[atom_idx_in_w,:]\n# # now tile it and add to the penalty mask term\n# add_term = np.tile(self.add_mask, self.frame_num) \n## add_term = add_term[:self.projs_matrix.shape[0]] \n# self.pen_mask += add_term\n# self.entropies = self._entropy(self.pen_mask)\n # replicate it only on neighboring frames: don't need to \n # penalize far from this point\n \n new_mask = self.Wf[atom_idx_in_w,:]\n trans_frame = int(new_atom.time_position / (self.scale/2))\n# print atom_idx_in_w, trans_frame\n # HEURISTIC HERE: SHOULD BE REPLACED BY A Wt matrix\n if self.Wt is not None:\n nb_tile_frames = self.Wt\n else:\n nb_tile_frames = int(np.log2(new_atom.length))\n \n# print nb_tile_frames, self.frame_num\n add_term = np.tile(new_mask, nb_tile_frames)\n start_pos = max(0, (trans_frame- nb_tile_frames/2)*(self.scale/2) ) \n L = min(add_term.shape[0], self.pen_mask.shape[0]-start_pos)\n# print nb_tile_frames, start_pos\n self.pen_mask[start_pos:start_pos+L] += add_term[:L]\n \n self.entropies[start_pos:start_pos+L] = self._entropy(self.pen_mask[start_pos:start_pos+L])",
"def prepareMask(self, mask):\n\n # Make sure that the mask has the same\n # number of voxels as the atlas image.\n # Use nearest neighbour interpolation\n # for resampling, as it is most likely\n # that the mask is binary.\n try:\n mask, xform = resample.resample(\n mask, self.shape[:3], dtype=np.float32, order=0)\n\n except ValueError:\n raise MaskError('Mask has wrong number of dimensions')\n\n # TODO allow non-aligned mask - as long as it overlaps\n # in world coordinates, it should be allowed\n if not fslimage.Image(mask, xform=xform).sameSpace(self):\n raise MaskError('Mask is not in the same space as atlas')\n\n return mask",
"def add_additional_configuration(self, namespace):\n pass",
"def _calculate_and_add_layer_of_task_config(self, param_task_config):\n # calculate the value of `Task.task_config` using it's definition\n # (check for all inheritance and find value for `task_config`\n param_task_config_value = self._build_parameter_value(param_task_config)\n if param_task_config_value.value:\n # Support two modes:\n # 1. Task.param_name:333\n # 2. {\"section\": {\"key\":\"value\"}}\n # dict parameter value can't have non string as a key\n param_task_config_value.value = parse_and_build_config_store(\n config_values=param_task_config_value.value,\n source=self._source_name(\"task_config\"),\n )\n # merging `Task.task_config` into current configuration\n # we are adding \"ultimate\" layer on top of all layers\n self.config.set_values(param_task_config_value.value)\n\n return param_task_config_value",
"def add_extra(self, entry, value):\n\n config_spec = vim.vm.ConfigSpec()\n self.logger.info(\"Adding/Updating extra config: {0} = {1}\".format(entry, value))\n opt = vim.option.OptionValue()\n opt.key = entry\n opt.value = value\n config_spec.extraConfig = [opt]\n return self.vm_obj.ReconfigVM_Task(spec=config_spec)",
"def extend_config(self, extend_config):\n self._extend_config = extend_config",
"def _update_config_w_custom(config, lane_info):\n config = copy.deepcopy(config)\n analysis_type = lane_info.get(\"analysis\", \"\")\n custom = config[\"custom_algorithms\"].get(analysis_type, None)\n if custom:\n for key, val in custom.iteritems():\n config[\"algorithm\"][key] = val\n return config",
"def _make_atten_mask_layer(self) -> keras.layers.Layer:\n return keras.layers.Lambda(\n lambda weight_mask: weight_mask[0] + (1.0 - weight_mask[1]) * -1e7,\n name=\"atten_mask\")"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
True if and only if the partition's subarray is in memory as opposed to on disk.
|
def in_memory(self):
return hasattr(self._subarray, "__array_interface__")
|
[
"def in_memory(self) -> bool:\n return all(isinstance(x, np.ndarray) for x in self.chunks.values())",
"def extra_memory(self):\n if not self.in_memory:\n # --------------------------------------------------------\n # The subarray is on disk so getting the partition's data\n # array will require extra memory\n # --------------------------------------------------------\n extra_memory = True\n else:\n # --------------------------------------------------------\n # The subarray is already in memory\n # --------------------------------------------------------\n config = self.config\n\n p_part = self.part\n if p_part:\n extra_memory = True\n elif not config[\"unique_subarray\"]:\n extra_memory = True\n else:\n p_data = self._subarray\n\n if not numpy_ma_isMA(p_data):\n # The p_data is not a masked array\n extra_memory = isinstance(p_data.base, numpy_ndarray)\n else:\n # The p_data is a masked array\n memory_overlap = isinstance(\n p_data.data.base, numpy_ndarray\n )\n if not (\n p_data.mask is numpy_ma_nomask\n or not numpy_ma_is_masked(p_data)\n ):\n # There is at least one missing data point\n memory_overlap |= isinstance(\n p_data.mask.base, numpy_ndarray\n )\n\n extra_memory = memory_overlap\n # --- End: if\n\n p_dtype = p_data.dtype\n\n if not extra_memory:\n if config[\"func\"] is not None:\n extra_memory = True\n else:\n p_units = self.Units\n units = config[\"units\"]\n if (\n not p_units.equals(units)\n and bool(p_units) is bool(units)\n and not (\n p_data.flags[\"C_CONTIGUOUS\"]\n and p_dtype.kind == \"f\"\n )\n ):\n extra_memory = True\n\n # ------------------------------------------------------------\n # Extra memory is required if the dtype needs changing\n # ------------------------------------------------------------\n if not extra_memory:\n dtype = config[\"dtype\"]\n if dtype is not None and dtype != p_data.dtype:\n extra_memory = True\n # --- End: if\n\n # ------------------------------------------------------------\n # Amount of extra memory (in bytes) required to access the\n # array\n # ------------------------------------------------------------\n return self.nbytes if extra_memory else 0",
"def on_disk(self):\n return isinstance(self._subarray, FileArray)",
"def has_shareable_memory(a):\r\n return _get_backing_memmap(a) is not None",
"def is_full(self):\n if len(self._page_map) >= self.memory_size:\n return True\n return False",
"def IsAllocated(self):\n return self._fsntfs_file_entry.is_allocated()",
"def is_stored(mi):\n return any(mi[d] < max_mi[d] for d in range(self.dim))",
"def is_full(self):\n elements_in_sects = sum(\n map(opr.attrgetter(\"size\"), self.sects.values())\n )\n elements_in_total = fct.reduce(\n opr.mul, type(self).flatten_shape(self.shape), 1\n )\n res = elements_in_sects >= elements_in_total\n return res",
"def pageable(self):\n return maxSRAM(self.mem) <= self.dev.SRAM_PAGE_LEN",
"def is_space_available(partition, size):\n available_space = psutil.disk_usage(partition).free\n return False if available_space < size else True",
"def full(self):\n\n return True if self.qsize() >= self.chunksize else False",
"def contains(self, offset):\n nmin = self.getoffset()\n nmax = nmin + self.blocksize()\n return (offset >= nmin) and (offset < nmax)",
"def is_full(self):\n return self.list_length >= len(self.the_array)",
"def is_full(self):\n return self.heap_size >= self.capacity",
"def _mem_heap(self):\n return False",
"def isSetSize(self):\n return _libsbml.Compartment_isSetSize(self)",
"def has_enough_memory(self, required_memory):\r\n if self._memory >= required_memory:\r\n return True\r\n return False",
"def has_vmem(self):\n return len(glob.glob('{}/*.vmem'.format(self.directory))) > 0",
"def is_partition(self):\n return self._is_partition"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
True if and only if the partition's subarray is on disk as opposed to in memory.
|
def on_disk(self):
return isinstance(self._subarray, FileArray)
|
[
"def in_memory(self):\n return hasattr(self._subarray, \"__array_interface__\")",
"def in_memory(self) -> bool:\n return all(isinstance(x, np.ndarray) for x in self.chunks.values())",
"def extra_memory(self):\n if not self.in_memory:\n # --------------------------------------------------------\n # The subarray is on disk so getting the partition's data\n # array will require extra memory\n # --------------------------------------------------------\n extra_memory = True\n else:\n # --------------------------------------------------------\n # The subarray is already in memory\n # --------------------------------------------------------\n config = self.config\n\n p_part = self.part\n if p_part:\n extra_memory = True\n elif not config[\"unique_subarray\"]:\n extra_memory = True\n else:\n p_data = self._subarray\n\n if not numpy_ma_isMA(p_data):\n # The p_data is not a masked array\n extra_memory = isinstance(p_data.base, numpy_ndarray)\n else:\n # The p_data is a masked array\n memory_overlap = isinstance(\n p_data.data.base, numpy_ndarray\n )\n if not (\n p_data.mask is numpy_ma_nomask\n or not numpy_ma_is_masked(p_data)\n ):\n # There is at least one missing data point\n memory_overlap |= isinstance(\n p_data.mask.base, numpy_ndarray\n )\n\n extra_memory = memory_overlap\n # --- End: if\n\n p_dtype = p_data.dtype\n\n if not extra_memory:\n if config[\"func\"] is not None:\n extra_memory = True\n else:\n p_units = self.Units\n units = config[\"units\"]\n if (\n not p_units.equals(units)\n and bool(p_units) is bool(units)\n and not (\n p_data.flags[\"C_CONTIGUOUS\"]\n and p_dtype.kind == \"f\"\n )\n ):\n extra_memory = True\n\n # ------------------------------------------------------------\n # Extra memory is required if the dtype needs changing\n # ------------------------------------------------------------\n if not extra_memory:\n dtype = config[\"dtype\"]\n if dtype is not None and dtype != p_data.dtype:\n extra_memory = True\n # --- End: if\n\n # ------------------------------------------------------------\n # Amount of extra memory (in bytes) required to access the\n # array\n # ------------------------------------------------------------\n return self.nbytes if extra_memory else 0",
"def is_partition(disk): #TODO: Could change to use \"Whole\" attrib. Good idea?\n\n return \"s\" in disk.split(\"disk\")[1]",
"def is_partition(self):\n return self._is_partition",
"def has_subfile(self) -> bool:\n\t\tself._update_subfiles()\n\t\treturn bool(len(self.subfiles))",
"def _is_partitioned(self):\n ## check if the table are partitioned, need the split because of a change in the type of partitions in pydantic\n partitions = self.table_config[\"partitions\"]\n if partitions is None or len(partitions) == 0:\n return False\n\n if isinstance(partitions, list):\n # check if any None inside list.\n # False if it is the case Ex: [None, 'partition']\n # True otherwise Ex: ['partition1', 'partition2']\n return all(item is not None for item in partitions)\n\n raise ValueError(\"Partitions must be a list or None\")",
"def IsAllocated(self):\n return self._fsntfs_file_entry.is_allocated()",
"def is_space_available(partition, size):\n available_space = psutil.disk_usage(partition).free\n return False if available_space < size else True",
"def _isInSegment(offset:int, segment:Elfile.ElfSegHeaderType) -> bool:\n\t\t\treturn (0 <= (offset - segment.offset) < segment.size)",
"def has_blobstore_file(self, filename):\n return filename in self.blobstore_files",
"def has_fs(self):\n return self.partitiondata[2] or self.existing_format",
"def partition_exists(partition):\n if \"/dev/\" in partition:\n partition = partition[len(\"/dev/\"):]\n\n exists = False\n with open(\"/proc/partitions\") as partitions:\n if partition in partitions.read():\n exists = True\n return exists",
"def _are_features_already_extracted(self, output_path: str, subset: str) -> bool:\n file_path = join(output_path, subset + '.npy')\n return os.path.exists(file_path)",
"def is_partition_the_last(dbapi, partition):\n idisk_uuid = partition.get('idisk_uuid')\n onidisk_parts = dbapi.partition_get_by_idisk(idisk_uuid)\n part_number = get_part_number(partition.get('device_path'))\n\n if int(part_number) != len(onidisk_parts):\n return False\n\n return True",
"def exists(self):\r\n return self.filereferencedata != {}",
"def _is_size_bound(self, path):\n return path.suffix == \".bin\"",
"def hasIndexedStructure(rootPath: unicode) -> bool:\n ...",
"def isSlice(self):\r\n return self._wrap(type(self.obj) is type(slice))"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
The partition's subarray of data.
|
def subarray(self):
return self._subarray
|
[
"def partition(self, sep):\n return asarray(partition(self, sep))",
"def GetPartitioningArray(self):\n return _hypre.HypreParVector_GetPartitioningArray(self)",
"def get_slice(self, array_slice, shape, dtype, s3_bucket, s3_key): # pylint: disable=too-many-locals\n # convert array_slice into into sub-slices of maximum contiguous blocks\n\n # Todo:\n # - parallelise reads and writes\n # - option 1. get memory rows in parallel and merge\n # - option 2. smarter byte range subsets depending on:\n # - data size\n # - data contiguity\n\n if self.enable_compression:\n return self.get_slice_by_bbox(array_slice, shape, dtype, s3_bucket, s3_key)\n\n # truncate array_slice to shape\n # array_slice = [slice(max(0, s.start) - min(sh, s.stop)) for s, sh in zip(array_sliced, shape)]\n array_slice = [slice(max(0, s.start), min(sh, s.stop)) for s, sh in zip(array_slice, shape)]\n\n cdim = self.cdims(array_slice, shape)\n\n try:\n end = cdim[::-1].index(False)+1\n except ValueError:\n end = len(shape)\n\n start = len(shape) - end\n\n outer = array_slice[:-end]\n outer_ranges = [range(s.start, s.stop) for s in outer]\n outer_cells = list(product(*outer_ranges))\n blocks = list(zip(outer_cells, repeat(array_slice[start:])))\n item_size = np.dtype(dtype).itemsize\n\n results = []\n for cell, sub_range in blocks:\n # print(cell, sub_range)\n s3_start = (np.ravel_multi_index(cell+tuple([s.start for s in sub_range]), shape)) * item_size\n s3_end = (np.ravel_multi_index(cell+tuple([s.stop-1 for s in sub_range]), shape)+1) * item_size\n # print(s3_start, s3_end)\n data = self.s3io.get_byte_range(s3_bucket, s3_key, s3_start, s3_end)\n results.append((cell, sub_range, data))\n\n result = np.empty([s.stop - s.start for s in array_slice], dtype=dtype)\n offset = [s.start for s in array_slice]\n\n for cell, sub_range, data in results:\n t = [slice(x.start-o, x.stop-o) if isinstance(x, slice) else x-o for x, o in\n zip(cell+tuple(sub_range), offset)]\n if data.dtype != dtype:\n data = np.frombuffer(data, dtype=dtype, count=-1, offset=0)\n result[t] = data.reshape([s.stop - s.start for s in sub_range])\n\n return result",
"def data_slice(data, start, end):\r\n data = data[start:end]\r\n print(\"Data slicing is in process...\")\r\n return(data)",
"def partition(data, indecies):\n\tsplitdata = [data[:indecies[0]]]\n\tsplitdata += [data[indecies[i-1]:indecies[i]] for i in range(1,len(indecies))]\n\tsplitdata.append(data[indecies[-1]:])\n\treturn splitdata",
"def get_slice(data,j,n):\n Length = data.Force.size\n n_per_float = Length/n\n _offset_per_curve = n_per_float\n data_per_curve = int(np.floor(n_per_float))\n offset = int(np.floor(j*_offset_per_curve))\n s = slice(offset,offset+data_per_curve,1)\n return s",
"def subarray(array, shape, center, fill=None):\n pass",
"def get_subvertex_slices(self, vertex):\n return self._subvertex_slices[vertex]",
"def rpartition(self, sep):\n return asarray(rpartition(self, sep))",
"def split_data(self, data):\n l = len(data)\n sec_len = int(np.ceil(l / self.num_break))\n return np.array_split(data, self.num_break)",
"def __getslice__(self, i, j):\n return self.dtrs[i:j]",
"def get_slice(self):\n # assert self._final_dimensions == tuple(self._current_dimensions)\n self._slice = []\n for axis in range(4):\n indices = self._get_slice_by_axis(axis)\n self._slice.append(slice(*indices))\n self._slice = tuple(self._slice)\n return self",
"def get_slice_mp(self, array_slice, shape, dtype, s3_bucket, s3_key): # pylint: disable=too-many-locals\n # pylint: disable=too-many-locals\n def work_get_slice(block, array_name, offset, s3_bucket, s3_key, shape, dtype):\n result = sa.attach(array_name)\n cell, sub_range = block\n\n item_size = np.dtype(dtype).itemsize\n s3_start = (np.ravel_multi_index(cell+tuple([s.start for s in sub_range]), shape)) * item_size\n s3_end = (np.ravel_multi_index(cell+tuple([s.stop-1 for s in sub_range]), shape)+1) * item_size\n data = self.s3io.get_byte_range(s3_bucket, s3_key, s3_start, s3_end)\n\n t = [slice(x.start-o, x.stop-o) if isinstance(x, slice) else x-o for x, o in\n zip(cell+tuple(sub_range), offset)]\n if data.dtype != dtype:\n data = np.frombuffer(data, dtype=dtype, count=-1, offset=0)\n # data = data.reshape([s.stop - s.start for s in sub_range])\n\n result[t] = data.reshape([s.stop - s.start for s in sub_range])\n\n if self.enable_compression:\n return self.get_slice_by_bbox(array_slice, shape, dtype, s3_bucket, s3_key)\n\n cdim = self.cdims(array_slice, shape)\n\n try:\n end = cdim[::-1].index(False)+1\n except ValueError:\n end = len(shape)\n\n start = len(shape) - end\n\n outer = array_slice[:-end]\n outer_ranges = [range(s.start, s.stop) for s in outer]\n outer_cells = list(product(*outer_ranges))\n blocks = list(zip(outer_cells, repeat(array_slice[start:])))\n offset = [s.start for s in array_slice]\n\n array_name = '_'.join(['S3AIO', str(uuid.uuid4()), str(os.getpid())])\n sa.create(array_name, shape=[s.stop - s.start for s in array_slice], dtype=dtype)\n shared_array = sa.attach(array_name)\n\n self.pool.map(work_get_slice, blocks, repeat(array_name), repeat(offset), repeat(s3_bucket),\n repeat(s3_key), repeat(shape), repeat(dtype))\n\n sa.delete(array_name)\n return shared_array",
"def getslice(data, dim, slicenr=None):\n\n if slicenr is None:\n slicenr = int(data.shape[dim] / 2)\n assert -1 < slicenr < data.shape[dim], f\"Index {slicenr} is out of range\"\n\n return np.take(data, slicenr, axis=dim)",
"def subdataset(self):\n return self._clip_metadata.get(\"subdataset\")",
"def getData(self, slice=None):\n\t\traise NotImplementedError",
"def get_partition(numNodes, numSubsets):\n\n nodes = np.arange(numNodes)\n np.random.shuffle(nodes)\n\n lenSplit = int(np.floor(numNodes / numSubsets))\n\n partitionData = []\n\n for part in range(0, numNodes, lenSplit):\n try:\n partitionData.append(nodes[part: part + lenSplit])\n except IndexError:\n partitionData.append(nodes[part: -1])\n\n return partitionData",
"def _get_slice(self, rank):\n ind = []\n buffer_size = []\n mpi_coords = self.mpi_topo.cartcomm.Get_coords(rank)\n # print('coords', rank, mpi_coords)\n for i in range(self.dim):\n ind.append(\n slice(self.region[i][mpi_coords[i]], self.region[i][mpi_coords[i] + 1])\n )\n buffer_size.append(\n self.region[i][mpi_coords[i] + 1] - self.region[i][mpi_coords[i]]\n )\n return ind[::-1], buffer_size[::-1]",
"def __getslice__( self, *args):\n return array.array.__getslice__(self, *args).tostring()"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
Change the axis names. The axis names are arbitrary, so mapping them to another arbitrary collection does not change the data array values, units, nor axis order.
|
def change_axis_names(self, axis_map):
axes = self.axes
# Partition axes
self.axes = [axis_map[axis] for axis in axes]
# Flipped axes
flip = self.flip
if flip:
self.flip = [axis_map[axis] for axis in flip]
|
[
"def setAxisName(name, axes='XYZ'):\n dislin.name(name, axes)",
"def setAxesNames(self):\n \n labels = ['T', 'Z', 'Y', 'X'] + [chr(ord('S')-i) for i in xrange(18)]\n if (len(self.axisList) >= 4):\n i = 0\n else:\n i = 4 - len(self.axisList)\n \n for axis in self.axisList:\n self.axesNames.append(labels[i] + ' - ' + axis.id)\n i += 1",
"def set_axis_names(self, x=\"x\", y=\"y\"):\n self.ax.set_xlabel(x)\n self.ax.set_ylabel(y)",
"def set_index_names(self, names, axis=0):\n self.get_axis(axis).names = names",
"def axesNames(self, data, info):\n return []",
"def setAxis(self, *__args): # real signature unknown; restored from __doc__ with multiple overloads\n pass",
"def _default_axis_names(n_dims):\n _DEFAULT_NAMES = (\"z\", \"y\", \"x\")\n return _DEFAULT_NAMES[-n_dims:]",
"def setAllAxisUnits(self,units): \n self.__axis_units__ = units",
"def setAxisUnits(self, dim, units): \n try:\n self.__axis_units__[dim] = units\n except IndexError:\n self.__axis_units__.append(units)",
"def set_index_name(self, name, axis=0):\n self.get_axis(axis).name = name",
"def setAllAxisLabels(self, labels):\n self.__axis_labels__ = labels",
"def _update_axes(self):\n data_shape = self.data.shape\n if len(self.axes) < self.data.ndim + 1:\n self._axes.append(Axis())\n for index in range(self.data.ndim):\n if len(self.axes[index].values) != data_shape[index]:\n self.axes[index].values = np.arange(data_shape[index],\n dtype=np.float64)",
"def process_custom_axes(axis_names):\n return axis_names.strip().strip(\"'\").strip('\"').split(',')",
"def axis_name(self):\n return self._axis_name",
"def setAxisNameColor(idx=-1, axes='XYZ'):\n dislin.axclrs(idx, 'Name', axes)",
"def setAxisNameJustification(jus, axes='XYZ'):\n dislin.namjus(justdict[jus],axes)",
"def axesnames(self):\n return self._axesnames",
"def _handle_setup_axis(self, axis_args):\n axis_name = axis_args['name']\n axes_dict = self.server.axes\n\n if axis_name not in [name for name, _ in axes_dict.items()]:\n print \"Adding a new axis:\", axis_name\n axis_count = len(axes_dict)\n newaxis = self.server.figure.add_subplot(axis_count+1, 1, axis_count+1)\n axes_dict[axis_name] = newaxis\n axes_dict[axis_name].grid(True)\n axes_dict[axis_name].set_xlabel(axis_args['x_label'])\n axes_dict[axis_name].set_ylabel(axis_args['y_label'])\n # TODO: support *.set_title(\"Title\")\n if FLAGS.logy:\n axes_dict[axis_name].set_yscale('log', nonposy='clip')\n\n if axis_count != 0:\n # Resize other axes if the above wasn't the first.\n axis_count = len(axes_dict)\n for row,(name, _) in enumerate(axes_dict.items(), 1):\n print name, axis_count, row\n axes_dict[name].change_geometry(axis_count, 1, row)",
"def rename_dimension(self, ind, name):\n if not isinstance(ind, int):\n raise ValueError('Dimension must be an integer')\n if 0 > ind >= len(self.shape):\n raise ValueError('Dimension must be an integer between 0 and {}'\n ''.format(len(self.shape)-1))\n if not isinstance(name, str):\n raise ValueError('New Dimension name must be a string')\n delattr(self, self.axes[ind].name)\n self.axes[ind].name = name\n setattr(self, name, self.axes[ind])"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
Close the partition after it has been conformed. The partition should usually be closed after its `array` method has been called to prevent memory leaks. Closing the partition does one of the following, depending on the values of the partition's `!_original` attribute and on the
|
def close(self, **kwargs):
config = getattr(self, "config", None)
if config is None:
return
if kwargs:
config.update(kwargs)
original = getattr(self, "_original", None)
logger.partitioning("Partition.close: original = {}".format(original))
if not original:
originally_on_disk = False
original_subarray = None
else:
originally_on_disk = not original.in_memory
original_subarray = original._subarray
config = self.config
logger.partitioning(" config = {}".format(config))
if config["serial"]:
# --------------------------------------------------------
# SERIAL
# --------------------------------------------------------
logger.partitioning(" serial")
if config["readonly"]:
logger.partitioning(" readonly=True")
if originally_on_disk:
logger.partitioning(" subarray originally on disk")
if config.get("to_disk", False):
# 1.1.1.1 The original subarray was on disk,
# we don't want to keep the current
# subarray in memory, and we are happy
# to discard any changes that may have
# been made to the subarray.
logger.partitioning(" 1.1.1.1 revert")
self.revert()
elif free_memory() <= cf_fm_threshold():
# 1.1.1.2 The original subarray was on disk,
# we are happy to keep the current
# subarray in memory, but there is not
# enough free memory to do so.
logger.partitioning(
" 1.1.1.2 revert ({} <= {})".format(
free_memory(), cf_fm_threshold()
)
)
self.revert()
else:
# 1.1.1.3 The original subarray was on disk
# and there is enough memory to keep
# the current subarray in memory
if config["unique_subarray"] and isinstance(
original_subarray, CachedArray
):
# The original subarray was a temporary
# file which is not referenced by any
# other partitions
_remove_temporary_files(
original_subarray._partition_file
)
del self.masked
logger.partitioning(
" 1.1.1.3 del masked ({} > {})".format(
free_memory(), cf_fm_threshold()
)
)
else:
logger.partitioning(" subarray originally in memory")
if config.get("to_disk", False):
# 1.1.2.1 Original subarray was in memory and
# we don't want to keep the current
# subarray in memory
logger.partitioning(" 1.1.2.1 to_disk")
self.to_disk(reopen=False)
elif free_memory() <= cf_fm_threshold():
# 1.1.2.2 Original subarray was in memory and
# unique but there is not enough
# memory to keep the current subarray
logger.partitioning(" 1.1.2.2 to_disk")
self.to_disk(reopen=False)
else:
# 1.1.2.3 Original subarray was in memory and
# unique and there is enough memory to
# keep the current subarray in memory
logger.partitioning(" 1.1.2.3 pass")
pass
else:
# config['readonly'] is False
if originally_on_disk:
if config.get("to_disk", False):
# 1.2.1.1 Original subarray was on disk and
# there and we don't want to keep the
# array
if config["unique_subarray"] and isinstance(
original_subarray, CachedArray
):
# Original subarray was a temporary file
# on disk which is not referenced by any
# other partitions
_remove_temporary_files(
original_subarray._partition_file
)
logger.partitioning(" 1.2.1.1 to_disk")
self.to_disk(reopen=False)
elif free_memory() <= cf_fm_threshold():
# 1.2.1.2 Original subarray was on disk but
# there is not enough memory to keep
# it
if config["unique_subarray"] and isinstance(
original_subarray, CachedArray
):
# Original subarray was a temporary file
# on disk which is not referenced by any
# other partitions
_remove_temporary_files(
original_subarray._partition_file
)
logger.partitioning(" 1.2.1.2 to_disk")
self.to_disk(reopen=False)
else:
# 1.2.1.3 Original subarray was on disk and
# there is enough memory to keep it
logger.partitioning(" 1.2.1.3 pass")
del self.masked
else:
if config.get("to_disk", False):
# 1.2.2.1 Original subarray was in memory but
# we don't want to keep it
logger.partitioning(" 1.2.2.1 to_disk")
self.to_disk(reopen=False)
elif free_memory() <= cf_fm_threshold():
# 1.2.2.2 Original subarray was an in memory
# but there is not enough memory to
# keep it
logger.partitioning(" 1.2.2.2 to_disk")
self.to_disk(reopen=False)
else:
# 1.2.2.3 Original subarray was in memory and
# there is enough memory to keep it
logger.partitioning(" 1.2.2.3 del masked")
del self.masked
else:
logger.partitioning("Partition.close: parallel")
# --------------------------------------------------------
# PARALLEL
# --------------------------------------------------------
pass
# if hasattr(self, '_original'):
# del self._original
# print(hasattr(self, 'config')),
try:
del self.config
except AttributeError:
pass
|
[
"def file_close(self):\n if self.on_disk:\n self._subarray.close()",
"def close_all(self):\n self.partition_map.close_all()",
"def close(self):\n self.drill = None",
"def close(self):\n self.dataset.close()",
"def exit(self):\n for acc in self.to_close:\n acc.close()",
"def _close(self):\n pass",
"def __del__(self):\n # subarray = getattr(self, '_subarray', None)\n subarray = self._subarray\n\n # If the subarray is unique it will have 2 references to\n # it plus 1 within this method, making 3. If it has more\n # than 3 references to it then it is not unique.\n if getrefcount is not None:\n self._decrement_file_counter()\n if subarray is None or getrefcount(subarray) > 3:\n return\n else:\n # getrefcount has itself been deleted or is in the process\n # of being torn down\n return\n\n _partition_file = getattr(subarray, \"_partition_file\", None)\n if _partition_file is not None:\n # This partition contains a temporary file which is not\n # referenced by any other partition on this process, so if\n # there are no lock files present remove the file from\n # disk.\n _remove_temporary_files(_partition_file)\n\n else:\n try:\n if FileArray is not None and isinstance(subarray, FileArray):\n try:\n filename = subarray.get_filename()\n except Exception:\n filename = None\n\n if self.file_counter.get(filename, 999) <= 0:\n # This partition contains a non-temporary file\n # which is not referenced by any other\n # partitions, so close the file.\n subarray.close()\n except Exception:\n # If we're here then it is likely that FileArray has been\n # torn down, so just do nothing.\n pass\n # --- End: if",
"def close_all(self):\n for each_partition in self.file_map.items():\n each_partition[1].close()\n self.meta.close()",
"def close(self):\n if self.closed:\n return\n\n self.closed = True\n try:\n if self.mode in (\"a\", \"w\", \"x\"):\n self.fileobj.write(NUL * (BLOCKSIZE * 2))\n self.offset += (BLOCKSIZE * 2)\n # fill up the end with zero-blocks\n # (like option -b20 for tar does)\n blocks, remainder = divmod(self.offset, RECORDSIZE)\n if remainder > 0:\n self.fileobj.write(NUL * (RECORDSIZE - remainder))\n finally:\n if not self._extfileobj:\n self.fileobj.close()",
"def close(self):\n return _yarp.PolyDriver_close(self)",
"def close(self) -> None:\n self.uninitialize().close()",
"def Close(self):\n self._RaiseIfNotWritable()\n\n self._storage_file.Close()\n self._storage_file = None",
"async def end_array(self):",
"def close(self):\r\n\t\tself.pipeline.close()",
"def close(self):\r\n\r\n if not self.isClosed:\r\n self.__is_closed = True\r\n else:\r\n raise HDDOPermissionException('Tried to close a closed HealthDominoDataObject.')",
"def close(self):\r\n self._flush_adjusted_lines()",
"def close(self):\n self._simple_serial.close()",
"def close_orders(self):",
"def close(self):\n if self.current_file_number is not None:\n self.fh_raw.close()\n self.current_file_number = None"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
True if the subarray contains datetime objects.
|
def isdt(self):
return self.Units.isreftime and self._subarray.dtype == _dtype_object
|
[
"def _contains_cftime_datetimes(array) -> bool:\n # Copied / adapted from xarray.core.common\n from xarray.core.pycompat import is_duck_dask_array\n\n if cftime is None:\n return False\n else:\n if array.dtype == np.dtype(\"O\") and array.size > 0:\n sample = array.ravel()[0]\n if is_duck_dask_array(sample):\n sample = sample.compute()\n if isinstance(sample, np.ndarray):\n sample = sample.item()\n return isinstance(sample, cftime.datetime)\n else:\n return False",
"def has_datetime_type(obj: _std_typing.Any) -> bool:\n return obj.dtype == sc.DType.datetime64",
"def _uses_datetimeblock(dtype: Union[np.dtype, ExtensionDtype]) -> bool:\n vtype = dtype.type\n return issubclass(vtype, np.datetime64)",
"def isTimestampContained(modelTypes):\n\n for type in modelTypes:\n if isinstance(type, model.ComplexType):\n for property in type.properties:\n if (property.type is not None) and (isinstance(property.type, model.DateTimeType)):\n return True\n return False",
"def is_datetime(self) -> bool:\n return False",
"def is_valid_datetime64(arr, unit=None):\n try:\n as_datetime64(arr, unit=unit)\n return True\n except ValueError:\n return False",
"def isTimeSeries(self):\n return self._isDateTime",
"def has_date_points(self):\n return self.__contains_datefields",
"def are_all_datetimes(values: List[Union[str, int, float]]):\n for value in values:\n if not is_datetime(value):\n return False\n return True",
"def is_timedata(self):\n return isinstance(self.dtype, TimeDtype)",
"def __contains__(self, date):\n if not isinstance(date, datetime.date):\n return False\n return date.year == self.year and date.month == self.month",
"def isDateContained(modelTypes):\n\n for type in modelTypes:\n if isinstance(type, model.ComplexType):\n for property in type.properties:\n if (property.type is not None) and (isinstance(property.type, model.DateType)):\n return True\n return False",
"def _check_timeseries(self, timeseries, units):\n\n try:\n if timeseries.dtype != basic_types.datetime_value_2d:\n\n # Both 'is' or '==' work in this case. There is only one\n # instance of basic_types.datetime_value_2d.\n # Maybe in future we can consider working with a list,\n # but that's a bit more cumbersome for different dtypes\n\n raise ValueError('timeseries must be a numpy array containing basic_types.datetime_value_2d dtype'\n )\n except AttributeError, err:\n\n msg = 'timeseries is not a numpy array. {0}'\n raise AttributeError(msg.format(err.message))\n\n # check to make sure the time values are in ascending order\n\n if np.any(timeseries['time'][np.argsort(timeseries['time'])]\n != timeseries['time']):\n raise ValueError('timeseries are not in ascending order. The datetime values in the array must be in ascending order'\n )\n\n # check for duplicate entries\n\n unique = np.unique(timeseries['time'])\n if len(unique) != len(timeseries['time']):\n msg = \\\n 'timeseries must contain unique time entries. Number of duplicate entries {0}'\n raise ValueError(msg.format(len(timeseries) - len(unique)))",
"def _is_offset(self, arr_or_obj):\n if isinstance(arr_or_obj, pd.DateOffset):\n return True\n elif is_list_like(arr_or_obj):\n return all(isinstance(x, pd.DateOffset) for x in arr_or_obj)\n else:\n return False",
"def has_time(self):\n return isinstance(self._start, datetime.datetime)",
"def is_datetime(self):\n answer = self._call('is_datetime')\n return answer.yes",
"def has_time(dataset):\n try:\n return len(dataset.dimensions['time']) > 0\n except KeyError:\n return False",
"def isTimeContained(modelTypes):\n\n for type in modelTypes:\n if isinstance(type, model.ComplexType):\n for property in type.properties:\n if (property.type is not None) and (isinstance(property.type, model.TimeType)):\n return True\n return False",
"def test_julian2datetime_array():\n dt = julian2datetime(np.array([2457533.9306828701,\n 2457533.9306828701]))\n dts = datetime.datetime(2016, 5, 25, 10, 20, 10, 999976)\n dt_should = np.array([dts, dts])\n\n assert type(dt) == np.ndarray\n assert np.all(dt == dt_should)"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
Close the file containing the subarray, if there is one.
|
def file_close(self):
if self.on_disk:
self._subarray.close()
|
[
"def closeFile():\r\n global datafile\r\n if datafile is not None:\r\n datafile.close()",
"def close(self):\n if self.closed:\n return\n\n self.closed = True\n try:\n if self.mode in (\"a\", \"w\", \"x\"):\n self.fileobj.write(NUL * (BLOCKSIZE * 2))\n self.offset += (BLOCKSIZE * 2)\n # fill up the end with zero-blocks\n # (like option -b20 for tar does)\n blocks, remainder = divmod(self.offset, RECORDSIZE)\n if remainder > 0:\n self.fileobj.write(NUL * (RECORDSIZE - remainder))\n finally:\n if not self._extfileobj:\n self.fileobj.close()",
"def __del__(self):\n # subarray = getattr(self, '_subarray', None)\n subarray = self._subarray\n\n # If the subarray is unique it will have 2 references to\n # it plus 1 within this method, making 3. If it has more\n # than 3 references to it then it is not unique.\n if getrefcount is not None:\n self._decrement_file_counter()\n if subarray is None or getrefcount(subarray) > 3:\n return\n else:\n # getrefcount has itself been deleted or is in the process\n # of being torn down\n return\n\n _partition_file = getattr(subarray, \"_partition_file\", None)\n if _partition_file is not None:\n # This partition contains a temporary file which is not\n # referenced by any other partition on this process, so if\n # there are no lock files present remove the file from\n # disk.\n _remove_temporary_files(_partition_file)\n\n else:\n try:\n if FileArray is not None and isinstance(subarray, FileArray):\n try:\n filename = subarray.get_filename()\n except Exception:\n filename = None\n\n if self.file_counter.get(filename, 999) <= 0:\n # This partition contains a non-temporary file\n # which is not referenced by any other\n # partitions, so close the file.\n subarray.close()\n except Exception:\n # If we're here then it is likely that FileArray has been\n # torn down, so just do nothing.\n pass\n # --- End: if",
"def close(self):\n if self.mode == 0:\n try: \n files['read'][self.filename] -= 1\n if files['read'][self.filename] == 0:\n files['read'].pop(self.filename)\n except KeyError:\n raise core.LASException(\"File %s was not found in accounting dictionary!\" % self.filename)\n\n core.las.LASReader_Destroy(self.handle)\n else:\n try:\n files['append'].remove(self.filename)\n except:\n files['write'].remove(self.filename)\n core.las.LASWriter_Destroy(self.handle)\n \n if self.ownheader:\n core.las.LASHeader_Destroy(self._header)\n \n self._header = None\n self.handle = None",
"def close_raster_file(self):\n try:\n if self.dataset:\n del self.dataset\n self.dataset = None\n except AttributeError:\n pass",
"def close_file(self):\r\n self.file.close()",
"def close(self):\n if self.current_file_number is not None:\n self.fh_raw.close()\n self.current_file_number = None",
"def write_array_info(subarray, output_filename):\n\n serialize_meta = True\n\n subarray.to_table().write(\n output_filename,\n path=\"/instrument/subarray/layout\",\n serialize_meta=serialize_meta,\n append=True\n )\n\n subarray.to_table(kind='optics').write(\n output_filename,\n path='/instrument/telescope/optics',\n append=True,\n serialize_meta=serialize_meta\n )\n for telescope_type in subarray.telescope_types:\n ids = set(subarray.get_tel_ids_for_type(telescope_type))\n if len(ids) > 0: # only write if there is a telescope with this camera\n tel_id = list(ids)[0]\n camera = subarray.tel[tel_id].camera\n camera_name = str(camera)\n\n with tables.open_file(output_filename, mode='a') as f:\n telescope_chidren = f.root['instrument/telescope']._v_children.keys()\n if 'camera' in telescope_chidren:\n cameras_name = f.root['instrument/telescope/camera']._v_children.keys()\n if camera_name in cameras_name:\n print(\n f'WARNING during lstchain.io.write_array_info():',\n f'camera {camera_name} seems to be already present in the h5 file.'\n )\n continue\n\n camera.geometry.to_table().write(\n output_filename,\n path=f'/instrument/telescope/camera/{camera_name}',\n append=True,\n serialize_meta=serialize_meta,\n )",
"def close_file(self):\n self.file.close()",
"def Close(self):\n super(CPIOArchiveFile, self).Close()\n self._file_entries = None",
"def close_csv_file(self):\n if self.file is not None:\n self.file.close()",
"def closeSpecFile(self):\n if self.write_binary_specs and self.data_spec_exec is None:\n self.file_handle_bin.close()\n self.file_handle_bin = None\n if self.write_text_specs:\n self.file_handle_txt.close()\n self.file_handle_txt = None\n return",
"def close(self):\n if self.mode == \"w\":\n # Write the content index\n self.cnt.write(self.file)\n\n self.file.close()",
"def closeJson(f):\n f.write(']')\n f.close()",
"def close_debug_file(self) -> None:\n if self.lib is None:\n raise PEMicroException(\"Library is not loaded\")\n\n self.lib.close_debug_file()\n\n self.opened_debug_file = False",
"def close(self):\n self.f.close()",
"def close(self):\n if self.isOpen():\n self.__file.close()\n del self.__file\n self.__file = None\n else:\n self.raiseAWarning('Tried to close',self.getFilename(),'but file not open!')",
"def close(self):\n if not self.data_file is None:\n self.data_file.close()\n self.reader.close()\n self.data_file = None\n self.reader = None",
"def close(self):\n\t\tif self.is_open:\n\t\t\tself.hdf5file.close()\n\t\t\tself.is_open = False"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
Return an iterator over indices of the master array which are spanned by the data array.
|
def master_ndindex(self): # itermaster_indices(self):
return itertools_product(
*[range(*r) for r in self.location]
) # TODO check
|
[
"def indicesIter(self):\n \n pass",
"def indices(self):",
"def getArrayIndices(self):\n \n pass",
"def __iter__(self):\n start = 0\n for i, dist in enumerate(self.dists):\n count = self.ndims[i]\n if count == 1:\n idx = start\n else:\n idx = slice(start,start+count)\n yield dist, idx\n start += count",
"def get_used_indices(self):\n\n if self.npass==1:\n return numpy.arange(self.data1.shape[0])\n else:\n if not hasattr(self, 'used_indices'):\n raise RuntimeError(\"run a 2 pass calculation first\")\n\n return self.used_indices",
"def iter_slices(table, other, mode: str, keep_empty: bool):\n for _c, bin_rows, src_rows in by_shared_chroms(other, table, keep_empty):\n if src_rows is None:\n # Emit empty indices since 'table' is missing this chromosome\n for _ in range(len(bin_rows)):\n yield pd.Index([], dtype=\"int64\")\n else:\n for slc, _s, _e in idx_ranges(src_rows, bin_rows.start, bin_rows.end, mode):\n indices = src_rows.index[slc].values\n if keep_empty or len(indices):\n yield indices",
"def get_subset_inds(self, adata_parent):\r\n subset_inds = np.ones(len(adata_parent), dtype=bool)\r\n for condition, values in self.subset_cond.items():\r\n subset_inds *= adata_parent.obs[condition].isin(values)\r\n return subset_inds",
"def list_indices(self):",
"def getIndexes(self):\n self.Indexes = []\n for x in range(MapSize):\n for y in range(MapSize):\n self.Indexes.append((x, y))\n return self.Indexes",
"def index_iterator( shp ):\n if len(shp) == 0:\n return\n elif len( shp ) == 1:\n for i in range( shp[0] ):\n yield [i]\n else:\n shp_foo = shp[1:]\n for i in range( shp[0] ):\n for foo in index_iterator( shp_foo ):\n yield [i] + foo",
"def indexing_list(self, daisy_units):\n for daisy_unit in daisy_units:\n for index in range(daisy_unit.bit_count):\n yield daisy_unit, index",
"def iterCoordsets(self):\n\n for i in range(self.numCoordsets()):\n yield self._ag._coords[i, self._index].copy()",
"def _natural_indices(self):\n if self.ndim == 2:\n nx, ny = self.shape\n for iy in range(ny):\n for ix in range(nx):\n yield (ix, iy)\n else:\n nx, ny, nz = self.shape\n for iz in range(nz):\n for iy in range(ny):\n for ix in range(nx):\n yield (ix, iy, iz)",
"def get_index_array(self):\n return self.region_pairs",
"def _iterCoordsets(self):\n\n for i in range(self.numCoordsets()):\n yield self._ag._coords[i, self._index]",
"def indices(self):\n return self.mask.true_indices",
"def __iter__(self):\n while True:\n if self.batches is None:\n for indexed_sentence in self.indexed_sentences:\n yield indexed_sentence\n else:\n for batch in self.batches:\n yield batch[:-1, :], batch[1:, :] # Return batch and target indices\n\n if not self.repeat:\n return",
"def get_data_idx(self)->list:\n return self.__data_idx",
"def indices(self):\n return self._indices"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
Update the `!part` attribute inplace for new indices of the master array.
|
def new_part(self, indices, master_axis_to_position, master_flip):
shape = self.shape
if indices == [slice(0, stop, 1) for stop in shape]:
return
# ------------------------------------------------------------
# If a dimension runs in the wrong direction then change its
# index to account for this.
#
# For example, if a dimension with the wrong direction has
# size 10 and its index is slice(3,8,2) then after the
# direction is set correctly, the index needs to changed to
# slice(6,0,-2):
#
# >>> a = [9, 8, 7, 6, 5, 4, 3, 2, 1, 0]
# >>> a[slice(3, 8, 2)]
# [6, 4, 2]
# >>> a.reverse()
# >>> print(a)
# >>> a = [0, 1, 2, 3, 4, 5, 6, 7, 8, 9]
# >>> a[slice(6, 0, -2)]
# [6, 4, 2]
# ------------------------------------------------------------
if self._subarray.size > 1:
indices = indices[:]
p_flip = self.flip
for axis, i in master_axis_to_position.items():
if (axis not in p_flip and axis not in master_flip) or (
axis in p_flip and axis in master_flip
):
# This axis runs in the correct direction
continue
# Still here? Then this axis runs in the wrong
# direction.
# Reset the direction
p_flip = p_flip[:]
if axis in self.flip:
p_flip.remove(axis)
else:
p_flip.append(axis)
# Modify the index to account for the changed
# direction
size = shape[i]
if isinstance(indices[i], slice):
start, stop, step = indices[i].indices(size)
# Note that step is assumed to be always +ve here
div, mod = divmod(stop - start - 1, step)
start = size - 1 - start
stop = start - div * step - 1
if stop < 0:
stop = None
indices[i] = slice(start, stop, -step)
else:
size -= 1
indices[i] = [size - j for j in indices[i]]
# --- End: for
self.flip = p_flip
# --- End: if
slice_None = slice(None)
# Reorder the new indices
indices = [
(
indices[master_axis_to_position[axis]]
if axis in master_axis_to_position
else slice_None
)
for axis in self.axes
]
part = self.part
if not part:
self.part = indices
return
# Still here? update an existing part
p_part = []
for part_index, index, size in zip(
part, indices, self._subarray.shape
):
if index == slice_None:
p_part.append(part_index)
continue
if isinstance(part_index, slice):
if isinstance(index, slice):
start, stop, step = part_index.indices(size)
size1, mod = divmod(stop - start - 1, step)
start1, stop1, step1 = index.indices(size1 + 1)
size2, mod = divmod(stop1 - start1, step1)
if mod != 0:
size2 += 1
start += start1 * step
step *= step1
stop = start + (size2 - 1) * step
if step > 0:
stop += 1
else:
stop -= 1
if stop < 0:
stop = None
p_part.append(slice(start, stop, step))
continue
else:
new_part = list(range(*part_index.indices(size)))
new_part = [new_part[i] for i in index]
else:
if isinstance(index, slice):
new_part = part_index[index]
else:
new_part = [part_index[i] for i in index]
# --- End: if
# Still here? Then the new element of p_part is a list of
# integers, so let's see if we can convert it to a slice
# before appending it.
new_part0 = new_part[0]
if len(new_part) == 1:
# Convert a single element list to a slice object
new_part = slice(new_part0, new_part0 + 1, 1)
else:
step = new_part[1] - new_part0
if step:
if step > 0:
start, stop = new_part0, new_part[-1] + 1
else:
start, stop = new_part0, new_part[-1] - 1
if new_part == list(range(start, stop, step)):
if stop < 0:
stop = None
new_part = slice(start, stop, step)
# --- End: if
p_part.append(new_part)
# --- End: for
self.part = p_part
|
[
"def _update_assessment_parts_map(self, part_list):\n for part in part_list:\n # perhaps look for a \"level offset\"?\n level = part._level_in_section # plus or minus \"level offset\"?\n if str(part.get_id()) not in self._part_ids():\n self._insert_part_map(get_default_part_map(\n part.get_id(), level, part.are_items_sequential()),\n index=part_list.index(part))",
"def _reconstruct(self) -> None:\n\n self._dummy_cell_count = 0\n self._offset = 0\n\n old_storage = self._storage\n self._storage = [elm for elm in old_storage if elm is not _DUMMY_CELL]\n\n self._indexer.update(switch(enumerate(self._storage)))",
"def update_sub_assembly(self):\r\n self._part_count.clear()\r\n self.master_register = {self.ref: weakref.ref(self)}\r\n sub_list = []\r\n for sub in self._sub_assembly.values():\r\n sub_list.append(sub)\r\n self._sub_assembly = {}\r\n self.add_components(sub_list)",
"def worker(idx, arr):\n arr[idx] = idx",
"def set_pieces(self, pieces):\n self.pieces = np.copy(pieces)",
"def _loadpart(self, part):\n new_partidx = util.Partname(part.partname).idx\n for idx, seq_part in enumerate(self._values):\n partidx = util.Partname(seq_part.partname).idx\n if partidx > new_partidx:\n self._values.insert(idx, part)\n return\n self._values.append(part)",
"def __update(self, idx):\n parent = (idx - 1) // 2\n while parent >= 0:\n left, right = 2 * parent + 1, 2 * parent + 2\n self.__tree[parent] = self.__tree[left] + self.__tree[right]\n parent = (parent - 1) // 2",
"def _minor_slice(self, idx, copy=False):\n M, N = self._swap(*self.shape)\n start, stop, step = idx.indices(N)\n\n if start == 0 and stop == N and step == 1:\n return self.copy() if copy else self\n\n N = len(range(start, stop, step))\n new_shape = self._swap(M, N)\n\n if N == 0 or self.nnz == 0:\n return self.__class__(new_shape, dtype=self.dtype)\n if step == 1:\n return self.__class__(\n _index._get_csr_submatrix_minor_axis(\n self.data, self.indices, self.indptr, start, stop),\n shape=new_shape, copy=False)\n cols = cupy.arange(start, stop, step, dtype=self.indices.dtype)\n return self._minor_index_fancy(cols)",
"def updateSlices( self, slices ):\n self._slices = list( slices )",
"def on_part_change(self, selected):\r\n\r\n\t\tself.part.set(selected)",
"def part_ids(self, part_ids):\n\n self._part_ids = part_ids",
"def parts(self, parts):\n\n self._parts = parts",
"def update_chunk(self):\n for key, value in self.piece_coordinates.items():\n # Why is the key a numpy.int type ???\n self.chunk[value] = key",
"def update_idx(self):\n self.idx = (self.F * self.FMUL +\n self.E * self.EMUL +\n self.Z * self.ZMUL +\n self.A * self.AMUL +\n self.B * self.BMUL )",
"def setInternalIndex(self,ind):\n\t\tself.trMtrxNode_ind = ind",
"def sync_internal_representation(self, values: List[float], slots: Mapping[str, List[float]]):\n self.weights.update_from_values(values=values)\n if not self.disable_updating:\n for name, values in slots.items():\n np.copyto(self.sparse_slots[name].np_variable, values)",
"def _update(self, idx, value):\r\n pass",
"def _major_slice(self, idx, copy=False):\n M, N = self._swap(*self.shape)\n start, stop, step = idx.indices(M)\n\n if start == 0 and stop == M and step == 1:\n return self.copy() if copy else self\n\n M = len(range(start, stop, step))\n new_shape = self._swap(M, N)\n\n if step == 1:\n if M == 0 or self.nnz == 0:\n return self.__class__(new_shape, dtype=self.dtype)\n return self.__class__(\n _index._get_csr_submatrix_major_axis(\n self.data, self.indices, self.indptr, start, stop),\n shape=new_shape, copy=copy)\n rows = cupy.arange(start, stop, step, dtype=self.indptr.dtype)\n return self._major_index_fancy(rows)",
"def reindex_subcomponent_taxa(self):\n ti_mutable = self.taxon_set._is_mutable\n self.taxon_set._is_mutable = True\n new_map = CharacterDataMap()\n for taxon, seq in self.taxon_seq_map.items():\n taxon = self.taxon_set.require_taxon(label=taxon.label)\n new_map[taxon] = seq\n self.taxon_set._is_mutable = ti_mutable\n self.taxon_seq_map = new_map"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
The extra memory required to access the array.
|
def extra_memory(self):
if not self.in_memory:
# --------------------------------------------------------
# The subarray is on disk so getting the partition's data
# array will require extra memory
# --------------------------------------------------------
extra_memory = True
else:
# --------------------------------------------------------
# The subarray is already in memory
# --------------------------------------------------------
config = self.config
p_part = self.part
if p_part:
extra_memory = True
elif not config["unique_subarray"]:
extra_memory = True
else:
p_data = self._subarray
if not numpy_ma_isMA(p_data):
# The p_data is not a masked array
extra_memory = isinstance(p_data.base, numpy_ndarray)
else:
# The p_data is a masked array
memory_overlap = isinstance(
p_data.data.base, numpy_ndarray
)
if not (
p_data.mask is numpy_ma_nomask
or not numpy_ma_is_masked(p_data)
):
# There is at least one missing data point
memory_overlap |= isinstance(
p_data.mask.base, numpy_ndarray
)
extra_memory = memory_overlap
# --- End: if
p_dtype = p_data.dtype
if not extra_memory:
if config["func"] is not None:
extra_memory = True
else:
p_units = self.Units
units = config["units"]
if (
not p_units.equals(units)
and bool(p_units) is bool(units)
and not (
p_data.flags["C_CONTIGUOUS"]
and p_dtype.kind == "f"
)
):
extra_memory = True
# ------------------------------------------------------------
# Extra memory is required if the dtype needs changing
# ------------------------------------------------------------
if not extra_memory:
dtype = config["dtype"]
if dtype is not None and dtype != p_data.dtype:
extra_memory = True
# --- End: if
# ------------------------------------------------------------
# Amount of extra memory (in bytes) required to access the
# array
# ------------------------------------------------------------
return self.nbytes if extra_memory else 0
|
[
"def allocated_memory(self):\n return self._allocated_memory",
"def memory(self):\n return self._memory",
"def get_array_size(self):\r\n return conf.lib.clang_getArraySize(self)",
"def arraysize(self):\n return self._arraysize",
"def MAXMEM(self):",
"def memory_size(self):\n return self._memory_size",
"def getAdjustedMemorySize(self) -> long:\n ...",
"def get_array_size(self):\n return conf.lib.clang_getArraySize(self)",
"def size(self):\n return len(self.memory)",
"def _get_memory_low_free(self):\n return self.__memory_low_free",
"def _get_memory_total_free(self):\n return self.__memory_total_free",
"def __len__(self):\n mem_size = len(self.memory)\n return mem_size",
"def num_extra_bytes(self) -> int:\n return int(sum(dim.num_bits for dim in self.extra_dimensions) // 8)",
"def _read(self):\n return np.copy(self.memory[self.head_pos])",
"def kb(object):\n return sys.getsizeof(object) * 0.01",
"def memory_real_in_bytes(self):\n cdef INT64_t total_memory = 0\n\n # col\n total_memory += self.__nnz * sizeof(INT64_t)\n # ind\n total_memory += (self.__nrow + 1) * sizeof(INT64_t)\n # val\n total_memory += self.__nnz * sizeof(FLOAT32_t)\n\n return total_memory",
"def get_allocated_memory_units(self, runner) -> int:",
"def BytesOfStorage(self):\n return (self.NumBits() + 7) / 8",
"def _get_memory_high_free(self):\n return self.__memory_high_free"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
Move the partition's subarray to a temporary file on disk.
|
def to_disk(self, reopen=True):
# try:
tfa = CachedArray(self.array)
# except Exception:
# return False
fd, _lock_file = mkstemp(
prefix=tfa._partition_file + "_", dir=tfa._partition_dir
)
close(fd)
self.subarray = tfa
_temporary_files[tfa._partition_file] = (
tfa._partition_dir,
_lock_file,
set(),
)
if reopen:
# Re-open the partition
self.open(self.config)
return True
|
[
"def close(self, **kwargs):\n config = getattr(self, \"config\", None)\n\n if config is None:\n return\n\n if kwargs:\n config.update(kwargs)\n\n original = getattr(self, \"_original\", None)\n logger.partitioning(\"Partition.close: original = {}\".format(original))\n\n if not original:\n originally_on_disk = False\n original_subarray = None\n else:\n originally_on_disk = not original.in_memory\n original_subarray = original._subarray\n\n config = self.config\n logger.partitioning(\" config = {}\".format(config))\n\n if config[\"serial\"]:\n # --------------------------------------------------------\n # SERIAL\n # --------------------------------------------------------\n logger.partitioning(\" serial\")\n\n if config[\"readonly\"]:\n logger.partitioning(\" readonly=True\")\n\n if originally_on_disk:\n logger.partitioning(\" subarray originally on disk\")\n\n if config.get(\"to_disk\", False):\n # 1.1.1.1 The original subarray was on disk,\n # we don't want to keep the current\n # subarray in memory, and we are happy\n # to discard any changes that may have\n # been made to the subarray.\n logger.partitioning(\" 1.1.1.1 revert\")\n self.revert()\n elif free_memory() <= cf_fm_threshold():\n # 1.1.1.2 The original subarray was on disk,\n # we are happy to keep the current\n # subarray in memory, but there is not\n # enough free memory to do so.\n logger.partitioning(\n \" 1.1.1.2 revert ({} <= {})\".format(\n free_memory(), cf_fm_threshold()\n )\n )\n self.revert()\n else:\n # 1.1.1.3 The original subarray was on disk\n # and there is enough memory to keep\n # the current subarray in memory\n if config[\"unique_subarray\"] and isinstance(\n original_subarray, CachedArray\n ):\n # The original subarray was a temporary\n # file which is not referenced by any\n # other partitions\n _remove_temporary_files(\n original_subarray._partition_file\n )\n\n del self.masked\n logger.partitioning(\n \" 1.1.1.3 del masked ({} > {})\".format(\n free_memory(), cf_fm_threshold()\n )\n )\n\n else:\n logger.partitioning(\" subarray originally in memory\")\n if config.get(\"to_disk\", False):\n # 1.1.2.1 Original subarray was in memory and\n # we don't want to keep the current\n # subarray in memory\n logger.partitioning(\" 1.1.2.1 to_disk\")\n self.to_disk(reopen=False)\n elif free_memory() <= cf_fm_threshold():\n # 1.1.2.2 Original subarray was in memory and\n # unique but there is not enough\n # memory to keep the current subarray\n logger.partitioning(\" 1.1.2.2 to_disk\")\n self.to_disk(reopen=False)\n else:\n # 1.1.2.3 Original subarray was in memory and\n # unique and there is enough memory to\n # keep the current subarray in memory\n logger.partitioning(\" 1.1.2.3 pass\")\n pass\n else:\n # config['readonly'] is False\n if originally_on_disk:\n if config.get(\"to_disk\", False):\n # 1.2.1.1 Original subarray was on disk and\n # there and we don't want to keep the\n # array\n if config[\"unique_subarray\"] and isinstance(\n original_subarray, CachedArray\n ):\n # Original subarray was a temporary file\n # on disk which is not referenced by any\n # other partitions\n _remove_temporary_files(\n original_subarray._partition_file\n )\n\n logger.partitioning(\" 1.2.1.1 to_disk\")\n self.to_disk(reopen=False)\n elif free_memory() <= cf_fm_threshold():\n # 1.2.1.2 Original subarray was on disk but\n # there is not enough memory to keep\n # it\n if config[\"unique_subarray\"] and isinstance(\n original_subarray, CachedArray\n ):\n # Original subarray was a temporary file\n # on disk which is not referenced by any\n # other partitions\n _remove_temporary_files(\n original_subarray._partition_file\n )\n\n logger.partitioning(\" 1.2.1.2 to_disk\")\n self.to_disk(reopen=False)\n else:\n # 1.2.1.3 Original subarray was on disk and\n # there is enough memory to keep it\n logger.partitioning(\" 1.2.1.3 pass\")\n del self.masked\n else:\n if config.get(\"to_disk\", False):\n # 1.2.2.1 Original subarray was in memory but\n # we don't want to keep it\n logger.partitioning(\" 1.2.2.1 to_disk\")\n self.to_disk(reopen=False)\n elif free_memory() <= cf_fm_threshold():\n # 1.2.2.2 Original subarray was an in memory\n # but there is not enough memory to\n # keep it\n logger.partitioning(\" 1.2.2.2 to_disk\")\n self.to_disk(reopen=False)\n else:\n # 1.2.2.3 Original subarray was in memory and\n # there is enough memory to keep it\n logger.partitioning(\" 1.2.2.3 del masked\")\n del self.masked\n else:\n logger.partitioning(\"Partition.close: parallel\")\n # --------------------------------------------------------\n # PARALLEL\n # --------------------------------------------------------\n pass\n\n # if hasattr(self, '_original'):\n # del self._original\n\n # print(hasattr(self, 'config')),\n try:\n del self.config\n except AttributeError:\n pass",
"def cleanup(job, tempOutputFileStoreID, outputFile, cores=1, memory=sortMemory, disk=\"3G\"):\n fileName = job.fileStore.readGlobalFile(tempOutputFileStoreID)\n shutil.copyfile(fileName, outputFile)\n job.fileStore.logToMaster(\"Finished copying sorted file to output: %s\" % outputFile)",
"def _register_temporary_file(self):\n _partition_file = self._subarray._partition_file\n _partition_dir = self._subarray._partition_dir\n if _partition_file not in _temporary_files:\n fd, _lock_file = mkstemp(\n prefix=_partition_file + \"_\", dir=_partition_dir\n )\n close(fd)\n _temporary_files[_partition_file] = (\n _partition_dir,\n _lock_file,\n set(),\n )\n else:\n _, _lock_file, _ = _temporary_files[_partition_file]\n\n return _lock_file",
"def _write_temp_files(self):\n\n # Write the serialsed edges.\n with open(self._input_addr, 'w') as f:\n for e1, e2 in self._serialized_edges:\n f.write(\"%d\\t%d\\n\" % (e1, e2))",
"def quick_save_array(data, file_name, delimiter=',', ):\n data.tofile(file_name, sep=delimiter)",
"def write_partition(self):\n \n# np_pv = np.array(self.part_vert,dtype=np.int32);\n# fn = 'parts.lbm';\n# np_pv.astype('int32').tofile(fn)\n parts = open('parts.lbm','w')\n for p in self.part_vert:\n parts.write('%d \\n'% p)\n\n parts.close()",
"def save_tmp_file(self, data):\n with open(self.tmp_file, 'wb') as f:\n f.write(data)",
"def move_to_temp_location(self):\n self.download()\n self.temp_file = S3FileTransfer.get_unique_temp_name(self.local_file)\n os.rename(self.local_file, self.temp_file)\n self.has_temp_file = True\n return self.temp_file",
"def test_deleting_local_file_using_file_io_output_file() -> None:\n with tempfile.TemporaryDirectory() as tmpdirname:\n # Write to the temporary file\n file_location = os.path.join(tmpdirname, \"foo.txt\")\n with open(file_location, \"wb\") as f:\n f.write(b\"foo\")\n\n # Instantiate the file-io\n file_io = PyArrowFileIO()\n\n # Confirm that the file initially exists\n assert os.path.exists(file_location)\n\n # Instantiate the custom OutputFile\n output_file = PyArrowFileIO().new_output(location=f\"{file_location}\")\n\n # Delete the file using the file-io implementations delete method\n file_io.delete(output_file)\n\n # Confirm that the file no longer exists\n assert not os.path.exists(file_location)",
"def test_save_arr(self):\n\n self.C.savearr(self.arr1, 'test_composites/test_write.tif')\n self.assertTrue(os.path.exists('test_composites/test_write.tif'))",
"def temp_to_epub(self):\n with zipfile.ZipFile(self._new_fname, 'w') as zip_arc:\n for directory, _, files in os.walk(self._temp_directory):\n arc_directory = os.path.relpath(directory, self._temp_directory)\n zip_arc.write(directory, arc_directory)\n for file_ in files:\n path = os.path.join(directory, file_)\n zip_arc.write(path, os.path.relpath(path, self._temp_directory))\n\n shutil.rmtree(str(self._temp_directory))",
"def test_printarray(self):\n file_lst = os.listdir('./smallData')\n data_list = []\n for i in range(len(file_lst)):\n obj = data_import.ImportData('./smallData/' + file_lst[i])\n data_list.append(data_import.roundTimeArray(obj, 5))\n\n result = data_import.printArray(data_list, file_lst, 'test_printarray',\n 'meal_small.csv')\n self.assertNotEqual(result, -1)\n self.assertTrue(os.path.exists('test_printarray.csv'))\n os.remove('test_printarray.csv')",
"def test_deleting_local_file_using_file_io() -> None:\n with tempfile.TemporaryDirectory() as tmpdirname:\n # Write to the temporary file\n output_file_location = os.path.join(tmpdirname, \"foo.txt\")\n with open(output_file_location, \"wb\") as f:\n f.write(b\"foo\")\n\n # Instantiate the file-io\n file_io = PyArrowFileIO()\n\n # Confirm that the file initially exists\n assert os.path.exists(output_file_location)\n\n # Delete the file using the file-io implementations delete method\n file_io.delete(output_file_location)\n\n # Confirm that the file no longer exists\n assert not os.path.exists(output_file_location)",
"def write_prep_arr(self, arr, index=None):\n if index is None:\n prep_data_dir = os.path.join(self.experiment_dir, 'prep')\n else:\n prep_data_dir = os.path.join(self.experiment_dir, *('scan_' + str(index), 'prep'))\n data_file = os.path.join(prep_data_dir, 'prep_data.tif')\n if not os.path.exists(prep_data_dir):\n os.makedirs(prep_data_dir)\n arr = self.detector.clear_seam(arr, self.roi)\n ut.save_tif(arr, data_file)",
"def test_save_npy(temp_dir):\n data = np.array([[1, 2, 3], [4, 5, 6]])\n save_npy(temp_dir, data, step=1)\n\n assert os.path.exists(os.path.join(temp_dir, \"npy\", \"1.npy\"))",
"def test_truncate_read_write(tmppath):\n fd = get_tsta_file(tmppath)\n fb = get_copy_file(fd)\n fp, fc = fd[\"full_path\"], fd[\"contents\"]\n fp2 = fb[\"full_path\"]\n\n sp = len(fc) // 2\n wstr = \"I am the string\"\n\n pfile = open(fp2, \"r+\")\n xfile = XRootDPyFile(mkurl(fp), \"r+\")\n\n xfile.truncate(sp), pfile.truncate(sp)\n assert xfile.tell() == pfile.tell()\n assert xfile.read() == pfile.read().encode()\n assert xfile.tell() == pfile.tell()\n\n xfile.write(wstr), pfile.write(wstr)\n assert xfile.tell() == pfile.tell()\n assert xfile.read() == pfile.read().encode()\n\n xfile.seek(0), pfile.seek(0)\n assert xfile.tell() == pfile.tell()\n assert xfile.read() == pfile.read().encode()",
"def test_truncate_read_write2(tmppath):\n fd = get_tsta_file(tmppath)\n fb = get_copy_file(fd)\n fp, fc = fd[\"full_path\"], fd[\"contents\"]\n fp2 = fb[\"full_path\"]\n\n sp = len(fc) // 2\n wstr = \"I am the string\"\n\n pfile = open(fp2, \"r+\")\n xfile = XRootDPyFile(mkurl(fp), \"r+\")\n\n xfile.truncate(sp), pfile.truncate(sp)\n assert xfile.tell() == pfile.tell()\n assert xfile.read() == pfile.read().encode()\n assert xfile.tell() == pfile.tell()\n\n xfile.seek(0), pfile.seek(0)\n assert xfile.tell() == pfile.tell()\n assert xfile.read() == pfile.read().encode()\n xfile.seek(0), pfile.seek(0)\n\n xfile.write(wstr), pfile.write(wstr)\n assert xfile.tell() == pfile.tell()\n assert xfile.read() == pfile.read().encode()\n xfile.seek(0), pfile.seek(0)\n assert xfile.read() == pfile.read().encode()",
"def fileInTemp(self, cleanup, *args, **kwargs):\n segments = self.createFileInTemp(*args, **kwargs)\n cleanup(self.deleteFile, segments)\n return segments",
"def write_part_to_file(self, uid, part):\n filename = part.get_filename()\n filename = os.path.join(self.tmp_dir, os.path.basename(filename))\n try:\n open(filename, 'wb').write(part.get_payload(decode=True))\n except Exception as e:\n raise Exception(\n \"Error writing to filename %s with exception %s\" %\n (filename, str(e)))\n else:\n self.helper.log_debug(\n 'write_part_to_file: saved file %s from uid %s' %\n (filename, uid))\n return filename"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
Register a temporary file on this rank that has been created on another rank.
|
def _register_temporary_file(self):
_partition_file = self._subarray._partition_file
_partition_dir = self._subarray._partition_dir
if _partition_file not in _temporary_files:
fd, _lock_file = mkstemp(
prefix=_partition_file + "_", dir=_partition_dir
)
close(fd)
_temporary_files[_partition_file] = (
_partition_dir,
_lock_file,
set(),
)
else:
_, _lock_file, _ = _temporary_files[_partition_file]
return _lock_file
|
[
"def register_tmp_file(self, tmp_file: str):\n self.temp_files.add(pathlib.Path(tmp_file))",
"def set_temp_file(self):\n\n index = self.filename.rfind('/') + 1\n self.temp_filename = self.filename[:index] + \"tmp_\" + self.filename[index:]",
"def _upload_temp(cls, path, token, rtype):\n uri = cls.ADD_TEMP_URI.format(token, rtype)\n\n resp = RequestUtil.upload(uri, {}, path)\n\n return resp",
"def _create_tmp_file(config):\n tmp_dir = tempfile.gettempdir()\n rand_fname = py23_compat.text_type(uuid.uuid4())\n filename = os.path.join(tmp_dir, rand_fname)\n\n logger.info('filename: {}'.format(filename))\n # logger.info('config: {}'.format(config))\n with open(filename, 'wt') as fobj:\n fobj.write(config)\n return filename",
"def _temporary_file():\r\n return tempfile.mkstemp()[1]",
"def _create_file(self, rel_path, text):\n # FIXME: There are better/more secure APIs for creating tmp file paths.\n file_path = self.filesystem.join(self._temp_dir, rel_path)\n self.filesystem.write_text_file(file_path, text)\n return file_path",
"def _mktemp(self):\r\n fd, name = mkstemp(dir=self.temp_dir)\r\n try:\r\n file_obj = os.fdopen(fd, 'wb')\r\n return file_obj, name\r\n except:\r\n _removeIfPresent(name)\r\n raise",
"def tempfile ( suffix = '' , prefix = 'tmp-' , dir = None , date = True ) :\n fname = CleanUp.get_temp_file ( suffix = suffix , prefix = prefix ,\n dir = dir , date = date )\n assert not os.path.exists ( fname )\n CleanUp._tmpfiles.add ( fname )\n return fname",
"def _temp_file(self, val):\n fd, fn = tempfile.mkstemp()\n fp = os.fdopen(fd, \"wb\")\n if val:\n if not isinstance(val, bytes):\n fp.write(val.encode(\"utf-8\", \"surrogateescape\"))\n else:\n fp.write(val)\n fp.close()\n return fn",
"def _open_temp_file():\n temp_dir=os.path.join(_storage_folder,\".tmp\",\"files\")\n file_utils.retry_ensure_dir(temp_dir)\n return file_utils.TempFile(folder=temp_dir)",
"def test_create_tmpfile(self):\n with rika.ScopedFile() as file:\n self.assertTrue(os.path.isfile(file.path))",
"def _create_unique_file(self):\n with open(self.uniquefile, 'w') as f:\n f.write(self._uniquename)\n self._uniquefile_created = True\n self._extend_expiration_time()\n self._p(\"Unique file created: %s\" % self.uniquefile)",
"def generate_temp_file():\n return tempfile.mktemp()",
"def temporaryFile(prefix= None, extension=None, access= \"w\", temporaryDir= None, justPath= False):\n\tif None == temporaryDir:\n\t\ttemporaryDir= tempfile.gettempdir()\n\tif None == prefix:\n\t\tprefix= \"tempfile\"\n\tif None == extension:\n\t\textension= \".temp\"\n\tfile_name= os.path.join(temporaryDir, prefix+extension)\n\twhile 1:\n\t\ttry:\n\t\t\tfd = os.open(file_name, os.O_CREAT | os.O_EXCL | os.O_WRONLY)\n\t\t\tif justPath:\n\t\t\t\tos.fdopen(fd, access).close()\n\t\t\t\treturn file_name\n\t\t\treturn (os.fdopen(fd, access), file_name)\n\t\texcept OSError:\n\t\t\tpass\n\t\tfile_name= os.path.join(temporaryDir, prefix+'_'+str(random.randint(0,1000000000))+extension)",
"def temporary_file(self, close=True):\n\n try:\n misc.makedirs(self.__tmpdir)\n except (apx.PermissionsException,\n apx.ReadOnlyFileSystemException):\n self.__tmpdir = tempfile.mkdtemp(prefix=\"pkg5tmp-\")\n atexit.register(shutil.rmtree,\n self.__tmpdir, ignore_errors=True)\n return self.temporary_file(close=close)\n\n try:\n fd, name = tempfile.mkstemp(dir=self.__tmpdir)\n if close:\n os.close(fd)\n except EnvironmentError as e:\n if e.errno == errno.EACCES or e.errno == errno.EROFS:\n self.__tmpdir = tempfile.mkdtemp(prefix=\"pkg5tmp-\")\n atexit.register(shutil.rmtree,\n self.__tmpdir, ignore_errors=True)\n return self.temporary_file(close=close)\n raise apx._convert_error(e)\n\n if close:\n return name\n else:\n return fd, name",
"def _tempfile(filename):\n return tempfile.NamedTemporaryFile(mode='w',\n dir=os.path.dirname(filename),\n prefix=os.path.basename(filename),\n suffix=os.fsencode('.tmp'),\n delete=False)",
"def test_reg_file(tmp_path):\n real_path = tmp_path / \"my_file.txt\"\n with open(real_path, \"w\") as fh: fh.write(\"dummy content\")\n r_file = RegistryFile(real_path)\n assert r_file.filename == \"my_file.txt\"",
"def make_temp_file():\n with tempfile.NamedTemporaryFile() as f:\n return f.name",
"def create_temporary_file(self, directory, name, size):\n\n assert os.path.isabs(directory) == True\n directory = directory[1:]\n\n file_data = os.urandom(size)\n\n temporary_file_path = self.remote_directory_path / directory / name\n temporary_file = temporary_file_path.open('wb')\n temporary_file.write(file_data)\n temporary_file.close()\n\n return temporary_file_path, file_data"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
Add the lock files listed in lock_files to the list of lock files managed by other ranks.
|
def _update_lock_files(self, lock_files):
_, _lock_file, _other_lock_files = _temporary_files[
self._subarray._partition_file
]
_other_lock_files.update(set(lock_files))
if _lock_file in _other_lock_files:
# If the lock file managed by this rank is in the list of
# lock files managed by other ranks, remove it from there
_other_lock_files.remove(_lock_file)
|
[
"def LockFiles(self, entries):\n self._model.lock(entries)",
"def addFiles(self, file_list):\n \n # Add the files to the queue\n for file_name in file_list:\n self.file_queue.put(file_name)\n \n # Write the queue to disk\n self.saveQueue()\n \n # Upload the data\n self.uploadData()",
"def stageChanges(self, username):\n self.index.add_all()\n entriesToAdd = []\n entriesToNotAdd = []\n for changedFile in self.index:\n lockEntry = self.__locking.findLock(changedFile.path)\n if (((lockEntry is None or lockEntry[\"user\"] == username) and self.__permissions.can_write(username, changedFile.path)) or\n changedFile.path == Locking.LOCKFILE_PATH or changedFile.path == Permissions.PERMISSION_PATH):\n entriesToAdd.append(changedFile)\n else:\n entriesToNotAdd.append(changedFile)\n self.index.read(force=True)\n for entry in entriesToAdd:\n self.index.add(entry)\n self.index.write()\n return entriesToNotAdd",
"def add(self,file):\r\n self.sync_list.append(file)",
"def files_to_sync(self):\n return []",
"def get_file_list(self):\n try:\n for filename in os.listdir(SHARED_DIR):\n self.file_list.append(filename)\n except Exception as e:\n print \"Error: retriving file list, %s\" % e",
"def resolveLockingPermissionsMerge(self):\n resolved_conflicts = []\n for conflict in self.index.conflicts:\n path = self.pathFromConflict(conflict)\n # For conflicting Locks or Permissions that have been changed on remote, it is safest to discard local version and accept the remote\n # Then reload the respective modules\n if path == Locking.LOCKFILE_PATH or path == Permissions.PERMISSION_PATH:\n self.writeConflictResolution(conflict[2], path)\n self.__locking.load()\n self.__permissions.load()\n # Remove all resolved conflicts\n for conflict in resolved_conflicts:\n del self.index.conflicts[conflict]",
"def _distribute_files(self, distribution='one'):\n for k, files in self.file_lists.items():\n self.idle[k] = False\n if distribution.lower() == 'single':\n self.distribution_comms[k] = None\n if self.comm.rank >= 1:\n self.local_file_lists[k] = None\n self.idle[k] = True\n else:\n self.local_file_lists[k] = files\n elif distribution.lower() == 'even':\n if len(files) <= self.comm.size:\n if self.comm.rank >= len(files):\n self.local_file_lists[k] = None\n self.distribution_comms[k] = None\n self.idle[k] = True\n else:\n self.local_file_lists[k] = [files[self.comm.rank],]\n self.distribution_comms[k] = self.comm.Create(self.comm.Get_group().Incl(np.arange(len(files))))\n else:\n files_per = int(np.floor(len(files) / self.comm.size))\n excess_files = int(len(files) % self.comm.size)\n if self.comm.rank >= excess_files:\n self.local_file_lists[k] = list(files[int(self.comm.rank*files_per+excess_files):int((self.comm.rank+1)*files_per+excess_files)])\n else:\n self.local_file_lists[k] = list(files[int(self.comm.rank*(files_per+1)):int((self.comm.rank+1)*(files_per+1))])\n self.distribution_comms[k] = self.comm",
"def _distribute_files(self, distribution='single'):\n for k, files in self.file_lists.items():\n self.idle[k] = False\n if distribution.lower() == 'single':\n self.distribution_comms[k] = None\n if self.comm.rank >= 1:\n self.local_file_lists[k] = None\n self.idle[k] = True\n else:\n self.local_file_lists[k] = files\n elif distribution.lower() == 'even':\n if len(files) <= self.comm.size:\n if self.comm.rank >= len(files):\n self.local_file_lists[k] = None\n self.distribution_comms[k] = None\n self.idle[k] = True\n else:\n self.local_file_lists[k] = [files[self.comm.rank],]\n self.distribution_comms[k] = self.comm.Create(self.comm.Get_group().Incl(np.arange(len(files))))\n else:\n files_per = int(np.floor(len(files) / self.comm.size))\n excess_files = int(len(files) % self.comm.size)\n if self.comm.rank >= excess_files:\n self.local_file_lists[k] = list(files[int(self.comm.rank*files_per+excess_files):int((self.comm.rank+1)*files_per+excess_files)])\n else:\n self.local_file_lists[k] = list(files[int(self.comm.rank*(files_per+1)):int((self.comm.rank+1)*(files_per+1))])\n self.distribution_comms[k] = self.comm",
"def removeLocks():\n global lockFnames\n for lockFname in lockFnames:\n if isfile(lockFname):\n logging.debug('Removing lockfile %s' % lockFname)\n os.remove(lockFname)\n\n lockFnames = []",
"def cleanup_file_locks():\n\n hostname = socket.gethostname()\n sentinel_re = hostname + r'-.*\\.(\\d+$)'\n lockfile_re = r'order_check-.*\\.lock'\n files = os.listdir(options.lock_path)\n\n # cleanup sentinels\n for filename in files:\n match = re.match(sentinel_re, filename)\n if match is None:\n continue\n pid = match.group(1)\n LOG.debug('Found sentinel %(filename)s for pid %(pid)s' %\n {'filename': filename, 'pid': pid})\n try:\n os.kill(int(pid), 0)\n except OSError, e:\n # PID wasn't found\n delete_if_exists(os.path.join(FLAGS.lock_path, filename))\n LOG.debug('Cleaned sentinel %(filename)s for pid %(pid)s' %\n {'filename': filename, 'pid': pid})\n\n # cleanup lock files\n for filename in files:\n match = re.match(lockfile_re, filename)\n if match is None:\n continue\n try:\n stat_info = os.stat(os.path.join(FLAGS.lock_path, filename))\n except OSError as e:\n if e.errno == errno.ENOENT:\n continue\n else:\n raise\n msg = ('Found lockfile %(file)s with link count %(count)d' %\n {'file': filename, 'count': stat_info.st_nlink})\n LOG.debug(msg)\n if stat_info.st_nlink == 1:\n delete_if_exists(os.path.join(FLAGS.lock_path, filename))\n msg = ('Cleaned lockfile %(file)s with link count %(count)d' %\n {'file': filename, 'count': stat_info.st_nlink})\n LOG.debug(msg)",
"def add_files(self, files):\n pass",
"def UnlockFiles(list_of_files):#list_of_files is List of files to unlock\r\n \r\n USER_LOCK = GetUserLockName()\r\n userLockDict = UserLockGet()\r\n userLockNew = dict()\r\n \r\n for element in list_of_files:\r\n os.chmod(element, stat.S_IREAD)\r\n del userLockDict[element]\r\n \r\n os.chmod(USER_LOCK, stat.S_IWRITE) \r\n if not userLockDict:#if the dict is empty\r\n open(USER_LOCK, 'w')\r\n USER_LOCK.close()\r\n \r\n else:\r\n for element in userLockDict:#Cut off part of the way. Because it's differ from user to user\r\n newElem = element[len(getLocalPath())-len(os.path.dirname(getLocalPath())):len(element)]\r\n userLockNew[newElem] = userLockDict[element]\r\n with open(USER_LOCK, 'w') as f:\r\n pickle.dump(userLockNew,f)\r\n os.chmod(USER_LOCK, stat.S_IREAD)",
"def __add_to_list(self, filenames, email=None):\n\n\t\t## Defaults\n\t\tif email == None:\n\t\t\temail = self.get_sender()[1]\n\n\t\t## Make sure it's not one of our own emails...\n\t\tif self.is_from_ourselves(email):\n\t\t\tself.log.write(1, \" __add_to_list(): \\\"%s\\\" is listed as one of our emails. It will not be added to the list (%s)\" % (email, filenames[0]))\n\t\t\treturn -1\n\n\t\t## Do not add if it's already there... (Compare sender only)\n\t\tif self.__inlist(filenames,\n\t\t\t\t\t\t sender = email,\n\t\t\t\t\t\t recipients = [(\"*IGNORE*\",\"*IGNORE*\",\"*IGNORE*\")],\n\t\t\t\t\t\t subj = \"*IGNORE*\"):\n\t\t\tself.log.write(1, \" __add_to_list(): \\\"%s\\\" is already present in \\\"%s\\\". It will not be re-added\" % (email, filenames[0]))\n\t\t\treturn -1\n\n\t\tself.log.write(10, \" __add_to_list(): filename=%s, email=%s\" % (filenames[0], email))\n\n\t\tlck = asklock.AskLock()\n\n\t\tlockf = \"\"\n\t\tif self.config.rc_lockfile != \"\":\n\t\t\tlockf = self.config.rc_lockfile + \".\" + os.path.basename(filenames[0])\n\n\t\tlck.open(filenames[0], \"a\", lockf)\n\t\t\n\t\t## We suppose the file is unlocked when we get here...\n\t\tlck.write(\"from \" + self.__escape_regex(email) + \"\\n\")\n\t\tlck.close()\n\n\t\tself.log.write(1, \" __add_to_list(): \\\"%s\\\" added to %s\" % (email, filenames[0]))\n\n\t\treturn 0",
"def AddFiles(self, files, retries=1):\n JobPacketInfo(self.conn, self.id).AddFiles(files, retries)",
"def group_files_by_rwx(files):\n read_list = [] \n write_list = []\n exec_list = []\n for file in files:\n # TODO:\n # 1. if file has read permission add to read_list\n # 2. if file has write permission add to write_list\n # 3. if file has exec permission add to exec_list\n # hint: use os.access and os.R_OK, os.W_OK, os.X_OK\n\n return (read_list,write_list,exec_list)",
"def pipfile_lock_names(self):\n return ext_split(self.pipfile_locks, \"Pipfile.lock\")",
"def add_filelist_to_cache(self, file_list=None):\n if file_list is None:\n return False\n for fileinfo in file_list:\n fn_ = fileinfo.filename\n self.cache_file_list_dict[fn_] = fileinfo\n return True",
"def add_files(self, files, commit_msg):\n paths = []\n for rpath in files:\n path = os.path.join(self.repodir, rpath)\n paths.append(path)\n with open(path, 'w') as f:\n f.write(files[rpath])\n if paths:\n self.git_cmd(['add'] + paths)\n self.commit(commit_msg)"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
r""" Samples a 2d function f over specified intervals and returns two arrays (X, Y) suitable for plotting with matlab (matplotlib) syntax. See examples\mplot2d.py. f is a function of one variable, such as x2. x_args is an interval given in the form (var, min, max, n)
|
def sample2d(f, x_args):
try:
f = sympify(f)
except SympifyError:
raise ValueError("f could not be interpreted as a SymPy function")
try:
x, x_min, x_max, x_n = x_args
except (TypeError, IndexError):
raise ValueError("x_args must be a tuple of the form (var, min, max, n)")
x_l = float(x_max - x_min)
x_d = x_l/float(x_n)
X = np.arange(float(x_min), float(x_max) + x_d, x_d)
Y = np.empty(len(X))
for i in range(len(X)):
try:
Y[i] = float(f.subs(x, X[i]))
except TypeError:
Y[i] = None
return X, Y
|
[
"def PlotF(f,start,stop,x_label='x axis',y_label='y axis',lab='f(x)',\\\r\n my_title='Graph',arguments=()):\r\n \r\n #initiate figure object, plot object\r\n my_fig, my_plot = pyplot.subplots();\r\n\r\n #if no additional arguments, args is an empty tuple, so just plot\r\n\r\n if arguments==():\r\n \r\n # generate x array using linspace\r\n x_arr = np.linspace(start, stop, 1000)\r\n \r\n # generate y array by passing it the x array\r\n # this requires that the function be fully vectorized\r\n try:\r\n y_arr = f(x_arr)\r\n \r\n except: # the function is not vectorized\r\n # iter thru instead\r\n y_arr = np.zeros( len(x_arr) );\r\n for i in range( len(x_arr) ):\r\n y_arr[i] = f( x_arr[i])\r\n\r\n my_plot.plot(x_arr,y_arr,label=lab);\r\n\r\n #handle plotting if there are additional arguments \r\n else:\r\n\r\n # generate x array using linspace\r\n x_arr = np.linspace(start, stop, 1000)\r\n \r\n # generate y array by passing it the x array\r\n # this requires that the function be fully vectorized\r\n y_arr = f(x_arr, *arguments)\r\n\r\n my_plot.plot(x_arr,y_arr,label=lab);\r\n\r\n #format the graph\r\n my_plot.set( xlabel = x_label, ylabel = y_label, title = my_title );\r\n my_plot.grid()\r\n \r\n #show the graph\r\n pyplot.show();\r\n\r\n return my_fig;",
"def sample3d(f, x_args, y_args):\n x, x_min, x_max, x_n = None, None, None, None\n y, y_min, y_max, y_n = None, None, None, None\n try:\n f = sympify(f)\n except SympifyError:\n raise ValueError(\"f could not be interpreted as a SymPy function\")\n try:\n x, x_min, x_max, x_n = x_args\n y, y_min, y_max, y_n = y_args\n except (TypeError, IndexError):\n raise ValueError(\"x_args and y_args must be tuples of the form (var, min, max, intervals)\")\n\n x_l = float(x_max - x_min)\n x_d = x_l/float(x_n)\n x_a = np.arange(float(x_min), float(x_max) + x_d, x_d)\n\n y_l = float(y_max - y_min)\n y_d = y_l/float(y_n)\n y_a = np.arange(float(y_min), float(y_max) + y_d, y_d)\n\n def meshgrid(x, y):\n \"\"\"\n Taken from matplotlib.mlab.meshgrid.\n \"\"\"\n x = np.array(x)\n y = np.array(y)\n numRows, numCols = len(y), len(x)\n x.shape = 1, numCols\n X = np.repeat(x, numRows, 0)\n\n y.shape = numRows, 1\n Y = np.repeat(y, numCols, 1)\n return X, Y\n\n X, Y = np.meshgrid(x_a, y_a)\n\n Z = np.ndarray((len(X), len(X[0])))\n for j in range(len(X)):\n for k in range(len(X[0])):\n try:\n Z[j][k] = float(f.subs(x, X[j][k]).subs(y, Y[j][k]))\n except (TypeError, NotImplementedError):\n Z[j][k] = 0\n return X, Y, Z",
"def _proxy_2d(args, fs=None, f_range=None, return_samples=None):\n\n sig, kwargs = args[0], args[1:]\n\n return compute_features(sig, fs=fs, f_range=f_range,\n return_samples=return_samples, **kwargs[0])",
"def fcontourf(f, x1range, x2range, yrange, **kwargs):\n x1s = np.linspace(x1range[0], x1range[1])\n x2s = np.linspace(x2range[0], x2range[1])\n ys = np.linspace(yrange[0], yrange[1], 20)\n fs = [[f(np.array([x1,x2])) for x1 in x1s] for x2 in x2s]\n plt.contourf(x1s, x2s, fs, ys, **kwargs)\n plt.axis('scaled')",
"def plot_function(f, min_x:float, max_x:float, steps:int=100):\n xs = [min_x + (max_x-min_x)*i/(steps-1) for i in range(steps)] #generate the x-values and store them in a list\n ys = [f(x) for x in xs] #generate the y-values and store them in a list\n plt.plot(xs, ys) #generagte the plot based of the lists\n plt.show() #show the plot",
"def show_trace_2d(f, results):\n plt.close()\n # draw input points\n plt.plot(*zip(*results), '-o', color='#ff7f0e')\n # get the field of figure\n x1, x2 = np.meshgrid(np.arange(-5.5, 1.0, 0.1), np.arange(-3.0, 1.0, 0.1))\n # draw the contour of function using x1,x2 as step\n plt.contour(x1, x2, f(x1, x2), colors='#1f77b4')\n plt.xlabel('x1')\n plt.ylabel('x2')\n plt.show()",
"def sample_function(function, limits, N):\n xmin = limits[0];\n xmax = limits[1];\n (X,h) = np.linspace(xmin,xmax,N,retstep=True);\n Y = function(X);\n return (Y,h);",
"def interpolate2(self, f: callable, a: float, b: float, n: int) -> callable:\n\n # create the array of points\n pointsArr = []\n deltaOfXBetweenPoints = (float)(b - a) / float(n)\n xCounter = a\n\n xs = []\n ys = []\n for i in range(n):\n if (xCounter > b):\n xCounter = b\n yy = float(f(xCounter))\n xx = float(xCounter)\n\n pointsArr.append((xx, yy))\n xs.append(xx)\n ys.append(yy)\n xCounter = xCounter + deltaOfXBetweenPoints\n\n return lagrange(xs,ys)",
"def plot_function(function: Callable, list_range: List[float], step: float = 0.01 ) -> None:\n\n x_values = range(list_range[0], list_range[1], step)\n\n y_values = list(function(x) for x in x_values)\n\n return y_values",
"def show_trace_2d(f, results): #@save\n set_figsize()\n plt.plot(*zip(*results), '-o', color='#ff7f0e')\n x1, x2 = torch.meshgrid(torch.arange(-5.5, 1.0, 0.1),torch.arange(-3.0, 1.0, 0.1))\n plt.contour(x1, x2, f(x1, x2), colors='#1f77b4')\n plt.xlabel('x1')",
"def mplot(xs,f,**kw):\n plot(xs,map(f,xs),**kw)",
"def map2d(xdata, ydata, func):\n xdata = np.squeeze(xdata)\n ydata = np.squeeze(ydata)\n assert(xdata.ndim == ydata.ndim == 1)\n nx = len(xdata)\n ny = len(ydata)\n indices = np.ndindex((nx, ny))\n # Appy function to data and reshape array\n grid = np.reshape([func((xdata[i], ydata[j])) for i, j in indices], (nx, ny))\n return grid",
"def draw_slice(f, x, y=None, scale=True, shift=False):\n import numpy\n\n if y is None:\n y = 0.0\n x, y = numpy.meshgrid(x, y)\n plotx = True if numpy.all(y == y[0,0]) else False\n\n z = 0*x\n s,t = x.shape\n for i in range(s):\n for j in range(t):\n xx,yy = x[i,j], y[i,j]\n z[i,j] = f([xx,yy])\n if shift:\n if shift is True: shift = max(-numpy.min(z), 0.0) + 0.5 # exact minimum\n z = z+shift\n if scale: z = numpy.log(4*z*scale+1)+2\n #XXX: need to 'correct' the z-axis (or provide easy conversion)\n\n fig = plt.figure()\n ax = fig.gca()\n ax.autoscale(tight=True)\n if plotx:\n ax.plot(x.reshape(-1), z.reshape(-1))\n else:\n ax.plot(y.reshape(-1), z.reshape(-1))\n return fig",
"def plot_integration(f, x_low, x_high, num=10):\n pass",
"def gen_sample(f, n):\n x = np.linspace(0,1,2**n+1)\n return x, f(x)",
"def plot(x, func):\n fig = plt.figure(figsize=(10, 5))\n y = [func(x_i) for x_i in x]\n\n plt.plot(x, y, label='Debug plot')\n plt.show()\n plt.close(fig)",
"def draw_points(m):\r\n X = np.random.multivariate_normal([0, 0], np.eye(2), m).T\r\n y = f(X)\r\n return X, y",
"def create_data(f, x_vals):\n y_vals = []\n for i in x_vals:\n y_vals.append(f(x_vals[i]))\n return np.array(y_vals)",
"def sample(f, *var_args):\n if len(var_args) == 1:\n return sample2d(f, var_args[0])\n elif len(var_args) == 2:\n return sample3d(f, var_args[0], var_args[1])\n else:\n raise ValueError(\"Only 2d and 3d sampling are supported at this time.\")"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
r""" Samples a 3d function f over specified intervals and returns three 2d arrays (X, Y, Z) suitable for plotting with matlab (matplotlib) syntax. See examples\mplot3d.py. f is a function of two variables, such as x2 + y2. x_args and y_args are intervals given in the form (var, min, max, n)
|
def sample3d(f, x_args, y_args):
x, x_min, x_max, x_n = None, None, None, None
y, y_min, y_max, y_n = None, None, None, None
try:
f = sympify(f)
except SympifyError:
raise ValueError("f could not be interpreted as a SymPy function")
try:
x, x_min, x_max, x_n = x_args
y, y_min, y_max, y_n = y_args
except (TypeError, IndexError):
raise ValueError("x_args and y_args must be tuples of the form (var, min, max, intervals)")
x_l = float(x_max - x_min)
x_d = x_l/float(x_n)
x_a = np.arange(float(x_min), float(x_max) + x_d, x_d)
y_l = float(y_max - y_min)
y_d = y_l/float(y_n)
y_a = np.arange(float(y_min), float(y_max) + y_d, y_d)
def meshgrid(x, y):
"""
Taken from matplotlib.mlab.meshgrid.
"""
x = np.array(x)
y = np.array(y)
numRows, numCols = len(y), len(x)
x.shape = 1, numCols
X = np.repeat(x, numRows, 0)
y.shape = numRows, 1
Y = np.repeat(y, numCols, 1)
return X, Y
X, Y = np.meshgrid(x_a, y_a)
Z = np.ndarray((len(X), len(X[0])))
for j in range(len(X)):
for k in range(len(X[0])):
try:
Z[j][k] = float(f.subs(x, X[j][k]).subs(y, Y[j][k]))
except (TypeError, NotImplementedError):
Z[j][k] = 0
return X, Y, Z
|
[
"def frontiere_3d(f, data, step=20):\n ax = plt.gca(projection='3d')\n xmin, xmax = data[:, 0].min() - 1., data[:, 0].max() + 1.\n ymin, ymax = data[:, 1].min() - 1., data[:, 1].max() + 1.\n xx, yy = np.meshgrid(np.arange(xmin, xmax, (xmax - xmin) * 1. / step),\n np.arange(ymin, ymax, (ymax - ymin) * 1. / step))\n z = np.array([f(vec) for vec in np.c_[xx.ravel(), yy.ravel()]])\n z = z.reshape(xx.shape)\n ax.plot_surface(xx, yy, z, rstride=1, cstride=1,\n linewidth=0., antialiased=False,\n cmap=plt.cm.coolwarm)",
"def frontiere_3d(f, data, step=20):\n ax = plt.gca(projection='3d')\n xmin, xmax = data[:, 0].min() - 1., data[:, 0].max() + 1.\n ymin, ymax = data[:, 1].min() - 1., data[:, 1].max() + 1.\n xx, yy = np.meshgrid(np.arange(xmin, xmax, (xmax - xmin) * 1. / step),\n np.arange(ymin, ymax, (ymax - ymin) * 1. / step))\n z = np.array([f(vec) for vec in np.c_[xx.ravel(), yy.ravel()]])\n z = z.reshape(xx.shape)\n ax.plot_surface(xx, yy, z, rstride=1, cstride=1,\n linewidth=0., antialiased=False, cmap=cm.jet)",
"def plot_3d(function, x_range, y_range=None, piecewise=False, z_limits=None, spacing=0.05):\n if y_range is None:\n y_range = x_range\n fig = plt.figure(figsize=(10, 8))\n ax = fig.gca(projection='3d')\n X = np.arange(x_range[0], x_range[1], spacing)\n Y = np.arange(y_range[0], y_range[1], spacing)\n if piecewise:\n Z = np.zeros((len(X), len(Y)))\n for i in range(len(X)):\n for j in range(len(Y)):\n Z[i][j] = function(X[i], Y[j])\n X, Y = np.meshgrid(X, Y)\n else:\n X, Y = np.meshgrid(X, Y)\n Z = function(X, Y)\n surf = ax.plot_surface(X, Y, Z, cmap=cm.winter, linewidth=0, antialiased=False)\n if z_limits:\n ax.set_zlim(z_limits[0], z_limits[1])\n ax.set_xlabel('Effective Distance')\n ax.set_ylabel('Edge Value')\n ax.set_zlabel('Info Score')\n # ax.set_xlabel('x')\n # ax.set_ylabel('y')\n # ax.set_zlabel('z')\n plt.show()",
"def _proxy_3d(args, fs=None, f_range=None, return_samples=None):\n\n sigs, kwargs = args[0], args[1]\n\n return compute_features_2d(sigs, fs, f_range, compute_features_kwargs=kwargs, axis=None,\n return_samples=return_samples)",
"def sample2d(f, x_args):\n try:\n f = sympify(f)\n except SympifyError:\n raise ValueError(\"f could not be interpreted as a SymPy function\")\n try:\n x, x_min, x_max, x_n = x_args\n except (TypeError, IndexError):\n raise ValueError(\"x_args must be a tuple of the form (var, min, max, n)\")\n\n x_l = float(x_max - x_min)\n x_d = x_l/float(x_n)\n X = np.arange(float(x_min), float(x_max) + x_d, x_d)\n\n Y = np.empty(len(X))\n for i in range(len(X)):\n try:\n Y[i] = float(f.subs(x, X[i]))\n except TypeError:\n Y[i] = None\n return X, Y",
"def gradient_descent_plot3D(f, f_vals, X_vals):\n fig = plt.figure()\n ax = fig.add_subplot(111, projection='3d')\n x = y = np.arange(-1, 1.0, 0.05)\n X, Y = np.meshgrid(x, y)\n zs = np.array([f([x, y]) for x, y in zip(np.ravel(X), np.ravel(Y))])\n Z = zs.reshape(X.shape)\n ax.plot_surface(X, Y, Z, alpha=0.2)\n ax.scatter(xs=np.array(X_vals)[:, 0], ys=np.array(X_vals)[:, 1], zs=f_vals, color='red', lw=5)\n plt.show()",
"def newplot3(*args, **kwargs):\n\n if 'linewidth' and 'lw' not in kwargs.keys():\n kwargs['linewidth'] = 2\n\n fig = plt.figure(figsize=FIGURE_SIZE, dpi=FIGURE_DPI)\n ax = fig.add_subplot(111, projection='3d')\n\n x = np.asarray(args[0], dtype=float)\n y = np.asarray(args[1], dtype=float)\n z = np.asarray(args[2], dtype=float)\n\n if z.ndim == 2:\n if x.ndim < 2:\n x = np.tile(x, z.shape[1]).reshape(z.T.shape).T\n if y.ndim < 2:\n y = np.tile(y, z.shape[0]).reshape(z.shape)\n\n # Plot each array independently\n for n in range(len(z)):\n ax.plot(x[n], y[n], z[n], *args[3:], **kwargs)\n else:\n ax.plot(*args, **kwargs)",
"def sample(f, *var_args):\n if len(var_args) == 1:\n return sample2d(f, var_args[0])\n elif len(var_args) == 2:\n return sample3d(f, var_args[0], var_args[1])\n else:\n raise ValueError(\"Only 2d and 3d sampling are supported at this time.\")",
"def DrawSurface(fig, varxrange, varyrange, function):\n ax = fig.gca(projection='3d')\n # ax = fig.add_subplot(111, projection='3d', proj_type='ortho')\n xx, yy = np.meshgrid(varxrange, varyrange, sparse=False)\n z = function(xx, yy)\n # ax.plot_surface(xx, yy, z, cmap='RdBu') # color map can be adjusted, or removed!\n ax.contour(xx, yy, z, zdir='y', offset=8, cmap='rainbow')\n ax.contour(xx, yy, z, zdir='z', offset=-14, cmap='rainbow')\n fig.canvas.draw()\n return ax",
"def plot3d(*args, show=True, **kwargs):\n from spb.defaults import THREE_D_B\n\n args = _plot_sympify(args)\n kwargs.setdefault(\"backend\", THREE_D_B)\n kwargs = _set_discretization_points(kwargs, SurfaceOver2DRangeSeries)\n series = []\n plot_expr = _check_arguments(args, 1, 2)\n series = [SurfaceOver2DRangeSeries(*arg, **kwargs) for arg in plot_expr]\n xlabel = series[0].var_x.name\n ylabel = series[0].var_y.name\n kwargs.setdefault(\"xlabel\", xlabel)\n kwargs.setdefault(\"ylabel\", ylabel)\n kwargs.setdefault(\"zlabel\", \"f(%s, %s)\" % (xlabel, ylabel))\n plots = Plot(*series, **kwargs)\n if show:\n plots.show()\n return plots",
"def PlotF(f,start,stop,x_label='x axis',y_label='y axis',lab='f(x)',\\\r\n my_title='Graph',arguments=()):\r\n \r\n #initiate figure object, plot object\r\n my_fig, my_plot = pyplot.subplots();\r\n\r\n #if no additional arguments, args is an empty tuple, so just plot\r\n\r\n if arguments==():\r\n \r\n # generate x array using linspace\r\n x_arr = np.linspace(start, stop, 1000)\r\n \r\n # generate y array by passing it the x array\r\n # this requires that the function be fully vectorized\r\n try:\r\n y_arr = f(x_arr)\r\n \r\n except: # the function is not vectorized\r\n # iter thru instead\r\n y_arr = np.zeros( len(x_arr) );\r\n for i in range( len(x_arr) ):\r\n y_arr[i] = f( x_arr[i])\r\n\r\n my_plot.plot(x_arr,y_arr,label=lab);\r\n\r\n #handle plotting if there are additional arguments \r\n else:\r\n\r\n # generate x array using linspace\r\n x_arr = np.linspace(start, stop, 1000)\r\n \r\n # generate y array by passing it the x array\r\n # this requires that the function be fully vectorized\r\n y_arr = f(x_arr, *arguments)\r\n\r\n my_plot.plot(x_arr,y_arr,label=lab);\r\n\r\n #format the graph\r\n my_plot.set( xlabel = x_label, ylabel = y_label, title = my_title );\r\n my_plot.grid()\r\n \r\n #show the graph\r\n pyplot.show();\r\n\r\n return my_fig;",
"def plot_real_matplotlib(f, xbounds=(-1, 1), ybounds=(-1, 1), res=401):\n X, Y, vals = get_vals(f, xbounds, ybounds, res)\n fig = plt.figure()\n ax = fig.gca(projection='3d')\n ax.plot_surface(X, Y, vals.real)\n plt.show()",
"def F_bilinear_interp_3d(input, coords):\n x = torch.clamp(coords[:,0], 0, input.size(0)-1.00001)\n x0 = x.floor().long()\n x1 = x0 + 1\n\n y = torch.clamp(coords[:,1], 0, input.size(1)-1.00001)\n y0 = y.floor().long()\n y1 = y0 + 1\n\n z = torch.clamp(coords[:,2], 0, input.size(2)-1.00001)\n z0 = z.floor().long()\n z1 = z0 + 1\n\n c_000 = torch.stack([x0,y0,z0])\n c_111 = torch.stack([x1,y1,z1])\n c_001 = torch.stack([x0,y0,z1])\n c_010 = torch.stack([x0,y1,z0])\n c_011 = torch.stack([x0,y1,z1])\n c_100 = torch.stack([x1,y0,z0])\n c_110 = torch.stack([x1,y1,z0])\n c_101 = torch.stack([x1,y0,z1])\n\n vals_000 = th_gather_nd(input, c_000.detach())\n vals_111 = th_gather_nd(input, c_111.detach())\n vals_001 = th_gather_nd(input, c_001.detach())\n vals_010 = th_gather_nd(input, c_010.detach())\n vals_011 = th_gather_nd(input, c_011.detach())\n vals_100 = th_gather_nd(input, c_100.detach())\n vals_110 = th_gather_nd(input, c_110.detach())\n vals_101 = th_gather_nd(input, c_101.detach())\n\n xd = ((x-x0)/(x1-x0))\n yd = (y-y0)/(y1-y0)\n zd = (z-z0)/(z1-z0)\n\n c00 = vals_000*(1-xd) + vals_100*xd\n c01 = vals_001*(1-xd) + vals_101*xd\n c10 = vals_010*(1-xd) + vals_110*xd\n c11 = vals_011*(1-xd) + vals_111*xd\n\n c0 = c00*(1-yd) + c10*yd\n c1 = c01*(1-yd) + c11*yd\n\n c = c0*(1-zd) + c1*zd\n\n return c.view_as(input)",
"def drawCurve3D(xlist, ylist, zlist):\n dislin.curv3d(xlist,ylist,zlist,len(xlist))",
"def draw_slice(f, x, y=None, scale=True, shift=False):\n import numpy\n\n if y is None:\n y = 0.0\n x, y = numpy.meshgrid(x, y)\n plotx = True if numpy.all(y == y[0,0]) else False\n\n z = 0*x\n s,t = x.shape\n for i in range(s):\n for j in range(t):\n xx,yy = x[i,j], y[i,j]\n z[i,j] = f([xx,yy])\n if shift:\n if shift is True: shift = max(-numpy.min(z), 0.0) + 0.5 # exact minimum\n z = z+shift\n if scale: z = numpy.log(4*z*scale+1)+2\n #XXX: need to 'correct' the z-axis (or provide easy conversion)\n\n fig = plt.figure()\n ax = fig.gca()\n ax.autoscale(tight=True)\n if plotx:\n ax.plot(x.reshape(-1), z.reshape(-1))\n else:\n ax.plot(y.reshape(-1), z.reshape(-1))\n return fig",
"def _make_mesh(msh_name, kwargs):\r\n if 'z' in kwargs or 'xyfun' in kwargs:\r\n if 'x' not in kwargs:\r\n if 'z' in kwargs and len(np.shape(kwargs['z'])) == 2:\r\n x = np.arange(0, np.shape(kwargs['z'])[0])\r\n elif 'xyfun' in kwargs:\r\n x = np.arange(-2, 2, 0.1)\r\n else:\r\n x = kwargs['x']\r\n if 'y' not in kwargs:\r\n if 'z' in kwargs and len(np.shape(kwargs['z'])) == 2:\r\n y = np.arange(0, np.shape(kwargs['z'])[1])\r\n elif 'xyfun' in kwargs:\r\n y = np.arange(-2, 2, 0.1)\r\n else:\r\n y = kwargs['y']\r\n\r\n if 'xyfun' in kwargs:\r\n xyfun = kwargs['xyfun']\r\n assert isinstance(xyfun, types.FunctionType)\r\n assert xyfun.__code__.co_argcount == 2 # function has two input arguments\r\n kwargs['z'] = np.array([[xyfun(xv, yv) for yv in y] for xv in x])\r\n\r\n if 'z' in kwargs:\r\n z = np.array(kwargs['z'])\r\n if len(np.shape(z)) == 2: # 2D array, surface plot\r\n nX = len(x)\r\n nY = len(y)\r\n assert nX == np.shape(z)[0]\r\n assert nY == np.shape(z)[1]\r\n # matrix to vertices and faces\r\n kwargs['v'] = [(xv, yv, z[ix][iy]) for iy, yv in enumerate(y) for ix, xv in enumerate(x)]\r\n kwargs['f'] = [(iy*nX+ix, iy*nX+ix+1, (iy+1)*nX+(ix+1), (iy+1)*nX+ix) for iy in np.arange(0, nY-1) for ix in np.arange(0, nX-1)]\r\n if len(np.shape(z)) == 1: # 3D plot!\r\n kwargs['v'], kwargs['e'], _ = vef.xyz2vef(x, y, z)\r\n\r\n if 'v' in kwargs and ('f' in kwargs or 'e' in kwargs):\r\n if 'e' not in kwargs:\r\n kwargs['e'] = []\r\n if 'f' not in kwargs:\r\n kwargs['f'] = []\r\n msh = bpy.data.meshes.new(msh_name)\r\n msh.from_pydata(kwargs['v'], kwargs['e'], kwargs['f'])\r\n if not kwargs['e']:\r\n msh.update(calc_edges=True)\r\n else:\r\n msh.update()\r\n return msh # blender mesh\r",
"def plot_3d(*args, **kwargs):\n ax = plt.subplot(projection=\"hro 3d\")\n ax.plot(*args, **kwargs)",
"def plot3d(x,y,z,*args,**kwargs):\n if not kwargs.has_key(\"ax\"):\n if kwargs.has_key(\"fig\"):\n fig = kwargs.pop(\"fig\")\n Show = False\n else:\n fig = plt.figure()\n Show = True\n ax=fig.add_subplot(111,projection=\"3d\")\n else:\n ax = kwargs.pop(\"ax\")\n Show=False\n ax.plot(x,y,z,*args,**kwargs)\n if Show:\n plt.show()\n return ax",
"def axis3D(xlow,xhigh,xfirst,xstep,ylow,yhigh,yfirst,ystep,\\\n zlow,zhigh,zfirst,zstep):\n dislin.graf3d(xlow,xhigh,xfirst,xstep,ylow,yhigh,yfirst,ystep,\\\n zlow,zhigh,zfirst,zstep)"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
Samples a 2d or 3d function over specified intervals and returns a dataset suitable for plotting with matlab (matplotlib) syntax. Wrapper for sample2d and sample3d. f is a function of one or two variables, such as x2. var_args are intervals for each variable given in the form (var, min, max, n)
|
def sample(f, *var_args):
if len(var_args) == 1:
return sample2d(f, var_args[0])
elif len(var_args) == 2:
return sample3d(f, var_args[0], var_args[1])
else:
raise ValueError("Only 2d and 3d sampling are supported at this time.")
|
[
"def sample3d(f, x_args, y_args):\n x, x_min, x_max, x_n = None, None, None, None\n y, y_min, y_max, y_n = None, None, None, None\n try:\n f = sympify(f)\n except SympifyError:\n raise ValueError(\"f could not be interpreted as a SymPy function\")\n try:\n x, x_min, x_max, x_n = x_args\n y, y_min, y_max, y_n = y_args\n except (TypeError, IndexError):\n raise ValueError(\"x_args and y_args must be tuples of the form (var, min, max, intervals)\")\n\n x_l = float(x_max - x_min)\n x_d = x_l/float(x_n)\n x_a = np.arange(float(x_min), float(x_max) + x_d, x_d)\n\n y_l = float(y_max - y_min)\n y_d = y_l/float(y_n)\n y_a = np.arange(float(y_min), float(y_max) + y_d, y_d)\n\n def meshgrid(x, y):\n \"\"\"\n Taken from matplotlib.mlab.meshgrid.\n \"\"\"\n x = np.array(x)\n y = np.array(y)\n numRows, numCols = len(y), len(x)\n x.shape = 1, numCols\n X = np.repeat(x, numRows, 0)\n\n y.shape = numRows, 1\n Y = np.repeat(y, numCols, 1)\n return X, Y\n\n X, Y = np.meshgrid(x_a, y_a)\n\n Z = np.ndarray((len(X), len(X[0])))\n for j in range(len(X)):\n for k in range(len(X[0])):\n try:\n Z[j][k] = float(f.subs(x, X[j][k]).subs(y, Y[j][k]))\n except (TypeError, NotImplementedError):\n Z[j][k] = 0\n return X, Y, Z",
"def sample2d(f, x_args):\n try:\n f = sympify(f)\n except SympifyError:\n raise ValueError(\"f could not be interpreted as a SymPy function\")\n try:\n x, x_min, x_max, x_n = x_args\n except (TypeError, IndexError):\n raise ValueError(\"x_args must be a tuple of the form (var, min, max, n)\")\n\n x_l = float(x_max - x_min)\n x_d = x_l/float(x_n)\n X = np.arange(float(x_min), float(x_max) + x_d, x_d)\n\n Y = np.empty(len(X))\n for i in range(len(X)):\n try:\n Y[i] = float(f.subs(x, X[i]))\n except TypeError:\n Y[i] = None\n return X, Y",
"def sample_function(function, limits, N):\n xmin = limits[0];\n xmax = limits[1];\n (X,h) = np.linspace(xmin,xmax,N,retstep=True);\n Y = function(X);\n return (Y,h);",
"def __create_sample_data__(npts = 20):\n\t#data function\n\tdef wavy(x, y):\n\t\treturn np.sin(0.2*np.pi*x)*np.cos(0.4*np.pi*y)\n\t\n\t#make grid\n\txs = np.linspace(0, 2*20, 2*npts + 1)\n\tys = np.linspace(0, 20, npts + 1)\n\t(xgrid, ygrid) = np.meshgrid(xs, ys)\n\tzgrid = wavy(xgrid, ygrid)\n\t\n\treturn (xgrid, ygrid, zgrid)",
"def gen_sample(f, n):\n x = np.linspace(0,1,2**n+1)\n return x, f(x)",
"def _proxy_3d(args, fs=None, f_range=None, return_samples=None):\n\n sigs, kwargs = args[0], args[1]\n\n return compute_features_2d(sigs, fs, f_range, compute_features_kwargs=kwargs, axis=None,\n return_samples=return_samples)",
"def produce_data(n, Fn):\n X = []\n for xmin, xmax in zip(Fn.xmin, Fn.xmax):\n x = np.random.uniform(xmin, xmax, n)\n X.append(x)\n X = np.array(X).transpose()\n y = []\n for x in X:\n y.append(Fn.func(*x))\n y = np.array(y)\n y = y.reshape(n, y.size//n)\n return (X, y)",
"def sampleFunction1D(function, range, number_of_samples=1000,\n units=None):\n\n # Construct the sampled function.\n axis = Axis(float, range=range)\n if units is not None:\n axis.units = units\n result = SampledFunction1D(axis)\n result.name = str(function)\n # Add samples.\n lo, hi = range\n for x in hep.num.range(lo, hi, (hi - lo) / (number_of_samples - 1)):\n result.addSample(x, function(x))\n\n return result",
"def generate_data(values, function=non_linear_fn, length=25, range_=[-1, 1]):\n\n # build x vector\n x = np.linspace(range_[0], range_[1], length)\n\n data = np.zeros((values.shape[0], length))\n\n for i in range(values.shape[0]):\n data[i, :] = function(x, values[i, 0], values[i, 1], values[i, 2])\n\n return data",
"def PlotF(f,start,stop,x_label='x axis',y_label='y axis',lab='f(x)',\\\r\n my_title='Graph',arguments=()):\r\n \r\n #initiate figure object, plot object\r\n my_fig, my_plot = pyplot.subplots();\r\n\r\n #if no additional arguments, args is an empty tuple, so just plot\r\n\r\n if arguments==():\r\n \r\n # generate x array using linspace\r\n x_arr = np.linspace(start, stop, 1000)\r\n \r\n # generate y array by passing it the x array\r\n # this requires that the function be fully vectorized\r\n try:\r\n y_arr = f(x_arr)\r\n \r\n except: # the function is not vectorized\r\n # iter thru instead\r\n y_arr = np.zeros( len(x_arr) );\r\n for i in range( len(x_arr) ):\r\n y_arr[i] = f( x_arr[i])\r\n\r\n my_plot.plot(x_arr,y_arr,label=lab);\r\n\r\n #handle plotting if there are additional arguments \r\n else:\r\n\r\n # generate x array using linspace\r\n x_arr = np.linspace(start, stop, 1000)\r\n \r\n # generate y array by passing it the x array\r\n # this requires that the function be fully vectorized\r\n y_arr = f(x_arr, *arguments)\r\n\r\n my_plot.plot(x_arr,y_arr,label=lab);\r\n\r\n #format the graph\r\n my_plot.set( xlabel = x_label, ylabel = y_label, title = my_title );\r\n my_plot.grid()\r\n \r\n #show the graph\r\n pyplot.show();\r\n\r\n return my_fig;",
"def gendata(params,xmin,xmax,npts=4000):\n F = lorentzian.ForwardFactory\n def gensample(F, xmin, xmax):\n from numpy import arange\n import random\n a = arange(xmin, xmax, (xmax-xmin)/200.)\n ymin = 0\n ymax = F(a).max()\n while 1:\n t1 = random.random() * (xmax-xmin) + xmin\n t2 = random.random() * (ymax-ymin) + ymin\n t3 = F(t1)\n if t2 < t3:\n return t1\n fwd = F(params)\n return array([gensample(fwd, xmin,xmax) for i in xrange(npts)])",
"def _proxy_2d(args, fs=None, f_range=None, return_samples=None):\n\n sig, kwargs = args[0], args[1:]\n\n return compute_features(sig, fs=fs, f_range=f_range,\n return_samples=return_samples, **kwargs[0])",
"def sample(self,f,N,p=100):\n return [f(x) for x in np.linspace(0,N,p)]",
"def plot_multidimensional_function_slices(\n func: Callable[[np.ndarray], NDAorTuple],\n slice_loc: np.ndarray,\n bounds: Union[np.ndarray, List[Tuple[float, float]]],\n input_names: Optional[List[str]] = None,\n obs_points: Optional[Union[np.ndarray, List[np.ndarray]]] = None,\n input_scales: Optional[List[PLOT_SCALE]] = None,\n output_scale: PLOT_SCALE = \"linear\",\n output_label: str = \"Objective Value\",\n size: float = 3,\n slice_2d_resolution: int = 50,\n # slide_1d_resolution: int = 100,\n func_returns_confidence_intervals: bool = False,\n) -> Tuple[plt.Figure, np.ndarray]:\n # Input validation checks\n assert output_scale in [\"linear\", \"log\", \"symlog\"]\n\n def func_return_just_mean(x):\n \"\"\"\n If the supplied function is a predictor returning lower and upper confidence bounds as well as mean,\n return just the mean prediction. If not, return the function value evaluated at x.\n \"\"\"\n return func(x)[0] if func_returns_confidence_intervals else func(x)\n\n n_dims: int = len(bounds)\n # If multiple batches of points supplied as a list in obs_points, make a colour palette\n n_batches = len(obs_points) if isinstance(obs_points, (list, tuple)) else 1\n scatter_colours = sns.color_palette(\"viridis\", n_colors=n_batches)\n # If input_scales not specified, default all to 'linear'\n input_scales = input_scales if input_scales else [\"linear\"] * n_dims # type: ignore # auto\n # Keep track of contour sets returned for each axis\n contour_sets = []\n\n # Construct axes\n fig = plt.figure(figsize=(size * n_dims, size * n_dims))\n axes, cbar_axes = make_lower_triangular_axis_grid_with_colorbar_axes(\n fig=fig, num_cols=n_dims, num_colorbars=2, share_y_on_diagonal=True\n )\n\n # Keep a running minimum and maximum of function values in 2D slices\n func_values_min: float = np.inf\n func_values_max: float = -np.inf\n\n with sns.axes_style(\"darkgrid\"):\n for i in range(n_dims): # i iterates over the rows of the plots\n for j in range(n_dims): # j iterates over the columns of the plots\n ax = axes[i, j]\n # 1D-slice plots along the diagonal\n if i == j:\n if func_returns_confidence_intervals:\n plot_1d_slice_through_function_with_confidence_intervals(\n func, # type: ignore\n dim=i,\n slice_loc=slice_loc,\n slice_bounds=bounds[i],\n ax=ax,\n x_scale=input_scales[i],\n )\n else:\n plot_1d_slice_through_function(\n func, # type: ignore\n dim=i,\n slice_loc=slice_loc,\n slice_bounds=bounds[i],\n ax=ax,\n x_scale=input_scales[i],\n )\n ax.set_yscale(output_scale)\n\n # lower triangle\n elif i > j:\n dim_x, dim_y = j, i\n # Compute the data for the 2D slice plots\n xx, yy, func_values_slice = calc_2d_slice(\n func=func_return_just_mean, # type: ignore # auto\n dim_x=dim_x,\n dim_y=dim_y,\n slice_loc=slice_loc,\n slice_bounds_x=bounds[dim_x],\n slice_bounds_y=bounds[dim_y],\n x_scale=input_scales[dim_x],\n y_scale=input_scales[dim_y],\n resolution=slice_2d_resolution,\n )\n # Plot the 2D slice\n _, im = plot_2d_slice_from_arrays(\n xx,\n yy,\n func_values_slice,\n ax=ax,\n x_scale=input_scales[dim_x],\n y_scale=input_scales[dim_y],\n output_scale=output_scale,\n )\n contour_sets.append(im)\n # Keep a running minimum and maximum of function values in slices\n func_values_min = min(func_values_min, func_values_slice.min()) # type: ignore\n func_values_max = max(func_values_max, func_values_slice.max()) # type: ignore\n # Scatter points on the slices if given\n if obs_points is not None: # pragma: no cover\n if isinstance(obs_points, np.ndarray):\n # If just one array given, scatter with the colour reflecting objective value\n ax.scatter(\n obs_points[:, dim_x], obs_points[:, dim_y], color=scatter_colours[0], s=20, zorder=15\n )\n else:\n assert isinstance(obs_points, (list, tuple))\n # If multiple arrays given, colour the points according to the batch number\n for batch_num, batch_arr in enumerate(obs_points):\n ax.scatter(\n batch_arr[:, dim_x],\n batch_arr[:, dim_y],\n color=scatter_colours[batch_num],\n s=25,\n lw=0.0,\n alpha=0.8,\n zorder=15,\n )\n # Add axis labels\n if input_names is not None: # pragma: no cover\n # If plot in the first column (but not first row), add a y_label\n if i != 0 and j == 0:\n axes[i, j].set_ylabel(input_names[i])\n # If plot is at the bottom, add an x_label\n if i == n_dims - 1:\n axes[i, j].set_xlabel(input_names[j])\n if i >= j:\n # Remove redundant ticks on inner plots\n if i != n_dims - 1:\n axes[i, j].xaxis.set_visible(False)\n if j != 0:\n axes[i, j].yaxis.set_visible(False)\n # # Prune the upper-most tick from plot, so that the ticks don't overlap each other between plots\n # ax.yaxis.set_major_locator(ticker.MaxNLocator(prune='upper'))\n ax.tick_params(axis=\"both\", which=\"major\", labelsize=9)\n ax.tick_params(axis=\"both\", which=\"minor\", labelsize=6)\n # Update the colour limits of the slice plots\n for contour_set in contour_sets:\n contour_set.set_clim(vmin=func_values_min, vmax=func_values_max)\n # Add the colourbars\n if n_dims > 1:\n # make a colourbar for the contour plots\n cb1 = fig.colorbar(contour_sets[-1], cax=cbar_axes[0], aspect=50)\n cb1.set_label(output_label)\n cbar_axes[0].yaxis.set_ticks_position(\"left\")\n # make a colourbar for different batches\n if n_batches > 1: # pragma: no cover\n cb2 = matplotlib.colorbar.ColorbarBase( # type: ignore # auto\n cbar_axes[1],\n cmap=matplotlib.colors.ListedColormap(scatter_colours),\n boundaries=[x - 0.5 for x in range(n_batches + 1)],\n ticks=list(range(n_batches)),\n spacing=\"proportional\",\n )\n cb2.set_label(\"Batch Number\")\n else:\n cbar_axes[1].set_visible(False)\n return fig, axes",
"def create_data(f, x_vals):\n y_vals = []\n for i in x_vals:\n y_vals.append(f(x_vals[i]))\n return np.array(y_vals)",
"def get_normal_data(n=1000, plot=False, xy_features=(2,1)):\n x0 = np.concatenate([\n np.random.normal(20, 2, size=n),\n np.random.normal(20, 2, size=n),\n np.random.normal(40, 2, size=n),\n np.random.normal(40, 2, size=n),\n ])\n x1 = np.concatenate([\n np.random.normal(60, 2, size=n),\n np.random.normal(80, 2, size=n),\n np.random.normal(60, 2, size=n),\n np.random.normal(80, 2, size=n),\n ])\n y = np.concatenate([\n np.random.normal(100, 2, size=n),\n np.random.normal(120, 2, size=n),\n np.random.normal(140, 2, size=n),\n np.random.normal(160, 2, size=n),\n ])\n \n # Shuffle data before returning\n data = np.stack((x0, x1, y), axis=1).astype(np.float32)\n np.random.shuffle(data)\n \n # 3D plot of dataset\n if plot:\n plot_data(data[:,:2], data[:,2:])\n \n # Able to specify 2D-X and 1D-y or vice versa\n return (data[:,:2], data[:,2:]) \\\n if xy_features == (2,1) else \\\n (data[:,:1], data[:,1:])",
"def sample_function(self, f, N):\n x = self.interpolation_points(N+1)\n try:\n return f(x)\n except: # needed when trying to sample functions which can't take a vector argument\n return np.vectorize(f)(x)",
"def sample(f, z_dist, n):\r\n zs = list(z_dist(size=n))\r\n ps = f(zs)\r\n ys = list(np.random.binomial(1, p=ps))\r\n return (zs, ys)",
"def draw_slice(f, x, y=None, scale=True, shift=False):\n import numpy\n\n if y is None:\n y = 0.0\n x, y = numpy.meshgrid(x, y)\n plotx = True if numpy.all(y == y[0,0]) else False\n\n z = 0*x\n s,t = x.shape\n for i in range(s):\n for j in range(t):\n xx,yy = x[i,j], y[i,j]\n z[i,j] = f([xx,yy])\n if shift:\n if shift is True: shift = max(-numpy.min(z), 0.0) + 0.5 # exact minimum\n z = z+shift\n if scale: z = numpy.log(4*z*scale+1)+2\n #XXX: need to 'correct' the z-axis (or provide easy conversion)\n\n fig = plt.figure()\n ax = fig.gca()\n ax.autoscale(tight=True)\n if plotx:\n ax.plot(x.reshape(-1), z.reshape(-1))\n else:\n ax.plot(y.reshape(-1), z.reshape(-1))\n return fig"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
iterate through each restaurant name from restaurant names and aggregate to results
|
def results_aggregator(self, names):
for name in names:
result = self.main(name)
self.results.append(result)
print("'%s' has been written to the file." % result[0])
"""result is formatted name, number, rating, review count"""
|
[
"def resolveResult(self, restaurants):\n restaurant_list = []\n for restaurant in restaurants:\n restaurant_list.append({'Name': restaurant['restaurant']['name'], \"cuisines\": [x.strip() for x in restaurant['restaurant']['cuisines'].split(',')],\n \"lat\": restaurant['restaurant']['location']['latitude'], \"long\": restaurant['restaurant']['location']['longitude'], \"highlights\": restaurant['restaurant']['highlights'], \"Thumb\": restaurant['restaurant']['thumb'],\n \"user_Rating\": restaurant['restaurant']['user_rating']['aggregate_rating'],\"phone_Numbers\": restaurant['restaurant']['phone_numbers']})\n cuisineDict = { \"Chinese\":1, \"Korean\":2,\"Australia\":3,\"Japanese\":4,}\n WordDict = {1: \"cozy\",2: \"tasty\",3:'amazing',4:'flavorful',5:'yummy'}\n for i in range(len(restaurant_list)):\n icon = 5\n cuisines = restaurant_list[i][\"cuisines\"]\n adjective = WordDict[random.randint(1,5)]\n comment = \"This is a \"+ adjective\n if cuisines:\n if \"Chinese\" in cuisines:\n icon = 1\n elif \"Korean\" in cuisines:\n icon = 2\n elif \"Australia\" in cuisines:\n icon = 3\n elif \"Japanese\" in cuisines:\n icon = 4\n else:\n icon = 5\n comment = comment + \" \" + cuisines[0]\n restaurant_list[i]['icon'] = icon\n comment = comment + \" restaurant\"\n restaurant_list[i]['comment'] = comment\n res = {\"restaurants\":restaurant_list }\n return res",
"def restaurant_name(restaurant):\n # BEGIN Question 2\n return restaurant[0]\n # END Question 2",
"def calc_foods(meals):\n foods = {}\n for meal in meals:\n # Add up foods\n for food_name in meal.ingredients:\n if food_name in foods:\n u = foods[food_name].units\n foods[food_name] += meal.ingredients[food_name].to(u)\n else:\n foods[food_name] = meal.ingredients[food_name]\n \n return foods",
"def getRestaurantAddresses(restaurants):\n addresslist = []\n for rest in restaurants:\n if 'address' in rest:\n addressstring = str(rest['address']) + ' ' + str(rest['city'])\n addresslist.append(addressstring)\n\n # pprint.pprint(addresslist)\n return addresslist",
"def restaurant_ratings(restaurant):\n return [review_rating(x) for x in restaurant[4]]",
"def get_dishes_ar(self):\n restaurant_name = self.get_restaurant_name()\n dishes_ar = self.get_dishes()\n for i in xrange(len(dishes_ar)):\n dishes_ar[i] = dishes_ar[i].lower()\n dishes_ar[i] = \"-\".join(dishes_ar[i].split(\" \")) + \"_\" + restaurant_name\n #print dishes_ar\n return dishes_ar",
"def getRestaurantAddressDict(restaurants):\n addressdict = {}\n for rest in restaurants:\n if 'address' in rest:\n addressstring = str(rest['address']) + ' ' + str(rest['city'])\n addressdict[addressstring] = rest['name']\n\n return addressdict",
"def analyse(self):\n self.__gather_tagged_reviews(self._restaurants)",
"def parse_restaurant_name(text):\n stripped = text.lower()\n\n for name_list in RESTAURANT_NAMES:\n for name in name_list:\n if name.lower() in stripped:\n return name_list[0]\n\n return \"\"",
"def summarize_food_data(unprocessed_food_list: List[str]) -> List[Dict[str, str]]:\n summary: List[Dict[str, str]] = []\n item_count_data: Dict[str, int] = {}\n\n for item in unprocessed_food_list:\n if item not in item_count_data:\n item_count_data[item] = 1\n else:\n item_count_data[item] += 1\n \n for product in item_count_data:\n item_information: Dict[str, str] = {}\n item_information[\"name\"] = product\n item_information[\"quantity\"] = str(item_count_data[product])\n item_information[\"units\"] = \"-\"\n summary.append(item_information)\n \n return summary",
"def user_reviewed_restaurants(user, restaurants):\n names = user_reviews(user).keys()\n return {name: restaurants[name] for name in names if name in restaurants}",
"def Collection_search_name(C:list, name:str) -> list:\r\n restaurants = []\r\n for r in C:\r\n for dish in r.menu:\r\n if name in dish.name:\r\n restaurants.append(r)\r\n return restaurants",
"def restaurant_query(term, cities):\n bearer_token = obtain_bearer_token(API_HOST, TOKEN_PATH)\n output_path = DEFAULT_OUTPUT_PATH\n # output csv: open a file for writing. Note: use utf-8 encoding to deal with special chars\n rest_data = open(output_path.format(term), 'w', 1, 'utf-8', None, newline='')\n csv_writer = csv.writer(rest_data)\n # write header\n header = ['id', 'name', 'display_phone', 'longitude', 'latitude', 'image_url', 'country', 'address1', 'state',\n 'display_address', 'address2', 'address3', 'city', 'zip_code', 'url', 'rating', 'review_count', 'price',\n 'tags']\n csv_writer.writerow(header)\n\n for city in cities:\n city_to_search = city + ', ' + DEFAULT_STATE\n response = search(bearer_token, term, city_to_search)\n businesses = response.get('businesses')\n\n if not businesses:\n print(u'No businesses for {0} in {1} found.'.format(term, city))\n continue\n\n print(u'{0} businesses found in {1}, now processing search result'.format(len(businesses), city_to_search))\n\n for bus in businesses:\n tags = ''\n cats = bus['categories']\n # make tags from yelp api categories alias' values\n for cat in cats:\n tags += cat['alias'] + ','\n\n tags = tags[:-1]\n # check for two conditions:\n # 1. the business is actually in the selected city.\n # 2. the tags include the given word.\n # if any of the two conditions is not meet, it will not be added to the list\n if bus['location']['city'].strip() != city:\n continue\n if term not in tags:\n continue\n\n price = ''\n if 'price' in bus:\n price = bus['price']\n\n row_data = [bus['id'], bus['name'], bus['display_phone'], bus['coordinates']['longitude'], bus['coordinates']['latitude'], bus['image_url'], bus['location']['country'], bus['location']['address1'], bus['location']['state'], bus['location']['display_address'], bus['location']['address2'], bus['location']['address3'], bus['location']['city'], bus['location']['zip_code'], bus['url'], bus['rating'], bus['review_count'], price, tags]\n csv_writer.writerow(row_data)\n\n # close the file\n rest_data.close()",
"def user_reviewed_restaurants(user, restaurants):\n names = list(user_reviews(user))\n return [r for r in restaurants if restaurant_name(r) in names]",
"def make_restaurant(name, location, categories, price, reviews):\n # BEGIN Question 2\n return [name, location, categories, price, reviews]\n # END Question 2",
"def parse_restaurant(response):\n\n soup = BeautifulSoup(response.text, 'html.parser')\n item = Restaurant()\n\n item['name'] = soup.find('h1', {'class': 'ui_header h1'}).text\n item['location'] = soup.find('span', {'class': 'detail'}).text\n\n yield item",
"def count_drugs(bag_of_drugs, drugs_result): \n for drug_name, values in bag_of_drugs.items():\n # get the list of last_name and fist_name of all prescribers\n list_prescriber = [ tuple([ i[1].lower(), i[2].lower() ]) for i in values ]\n # count the number of unique prescribers who prescribe the drug\n num_prescriber = len(set(list_prescriber))\n # get the total cost of the drug across all prescribers\n total_cost = sum([ int(float(i[-1])) for i in values ])\n # create a list of all drugs, the total number of UNIQUE prescribers, and the total drug cost\n drugs_result.append(tuple([drug_name, num_prescriber, total_cost]))\n \n #sort the drug name in ascending order\n drugs_result.sort(key=lambda x: x[0], reverse=False)\n \n #sort the total drug cost in descending order \n drugs_result.sort(key=lambda x: x[2], reverse=True)",
"def search(query, restaurants):\n # returns a list of Restaurants\n restless = []\n for x in restaurants:\n if query in restaurant_categories(x):\n restless.append(x)\n return restless",
"def get_restaurant_name_match_score(restaurant1, restaurant2):\n rest1_words = restaurant1.split()\n rest2_words = restaurant2.split()\n rest1_words_match_count = 0\n for word in rest1_words:\n if word in restaurant2 or close_match_in_word_list(word, rest2_words):\n rest1_words_match_count += 1\n rest1_words_match_score = (\n int((rest1_words_match_count / float(len(rest1_words))) * 100))\n return rest1_words_match_score"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
Récupère la date de publication d'un CVE si celleci est disponible
|
def parser_cve_date_publi(self,cve):
try:
pageCVE = opener.open(cve.get('href'))
except(ssl.CertificateError) as e:
return None
soupCVE = BeautifulSoup(pageCVE, 'html.parser')
res = soupCVE.find('strong', text=re.compile("Last Modified"))
if res != None:
res = res.next_sibling.next_sibling.next_sibling
res = res.getText()
res = re.sub("[A-Za-z0-9/\s]*: ", "", res)
res = res.split("/")
res = res[2] + '-' + res[0] + '-' + res[1]
return res
return None
|
[
"def receive_date(self):\n if 'v112' in self.data['article']:\n return tools.get_publication_date(self.data['article']['v112'][0]['_'])\n return None",
"def acceptance_date(self):\n if 'v114' in self.data['article']:\n return tools.get_publication_date(self.data['article']['v114'][0]['_'])\n return None",
"def publish_date(self):\n pass",
"def ahead_publication_date(self):\n if 'v223' in self.data['article']:\n return tools.get_publication_date(self.data['article']['v223'][0]['_'])\n return None",
"def get_pub_date():\n return datetime.datetime.now()",
"def publication_date(self):\n\n return tools.get_publication_date(self.data['article']['v65'][0]['_'])",
"def default_pub_date():\n return",
"def publication_date(self):\n return self.start_publication or self.creation_date",
"def review_date(self):\n if 'v116' in self.data['article']:\n return tools.get_publication_date(self.data['article']['v116'][0]['_'])\n return None",
"def pub_date(self):\n pub_date = getattr(self, 'PUBLISH_DATE', None)\n if not pub_date:\n pub_date = self.package.get('date')\n else:\n pub_date = datetime.fromisoformat(str(pub_date))\n\n return pub_date",
"def publication_date(self):\n return self._publication_date",
"def processing_date(self):\n\n return tools.get_publication_date(self.data['article']['v91'][0]['_'])",
"def publication_date(self):\n try:\n return PartialDate.loads(\n get_value(self.record, \"imprints.date[0]\")\n or LiteratureReader(self.record).publication_date\n )\n except ValueError:\n return None",
"def get_ext_date(self):\n return self.publication_date",
"def valid_date(self):\n if self.pubkey is None:\n raise ValueError(\"Public key should be loaded for fetch valid date.\")\n\n s = self.pub_cert[0][4][0].asOctets()\n valid_from = \"20%s-%s-%s %s:%s:%s\" % (s[0:2], s[2:4], s[4:6], s[6:8], s[8:10], s[10:12])\n\n s = self.pub_cert[0][4][1].asOctets()\n valid_until = \"20%s-%s-%s %s:%s:%s\" % (s[0:2], s[2:4], s[4:6], s[6:8], s[8:10], s[10:12])\n return (valid_from, valid_until)",
"def item_pubdate(self, item):\r\n return item.date",
"def publication_date(self) -> datetime:\n publication_date = self._search_in_properties(ATTR_PUB_DATE)\n if publication_date:\n # Parse the date. Example: 15/09/2018 9:31:00 AM\n date_struct = strptime(publication_date, \"%d/%m/%Y %I:%M:%S %p\")\n publication_date = datetime.fromtimestamp(calendar.timegm(\n date_struct), tz=pytz.utc)\n return publication_date",
"def publish_date_string(self,) -> str:\n return self.__data['PublishDate']",
"def publish_date(self):\n return self._group_data.get('publishDate')"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
Récupère les CVE, leurs liens vers NVD, dates de création, et les texte associées chaque vulnérabilité Remplie les listes initialisées au préalable (voir fonction __init__ cidessus)
|
def cve_parser(self):
cves = self.soup.findAll('a', text=re.compile("CVE-"))
for cve in cves:
id = cve.getText()
self.cve+=[id]
cve_date = re.sub("CVE-","",id)
cve_date = re.sub("-[0-9\s]*","",cve_date)
cve_date += "-01-01"
self.cve_date += [cve_date]
cve_link = cve.get('href')
self.cve_link += [cve_link]
cve_date_publi = self.parser_cve_date_publi(cve)
self.cve_date_publi += [cve_date_publi]
#text = cve.parent.previous_sibling.getText()
#self.cve_text += [text]
|
[
"def _initCvsVersion(self):\n\n output = _exec('cvs -vf')\n m = re.match(\n r'Concurrent Versions System \\(CVS\\) '\n r'(?P<numericpart>(\\d+\\.)+\\d+)(?P<rest>\\S*)'\n r' \\(client\\/server\\)',\n output[1]\n )\n if m:\n v = [int(x) for x in m.group('numericpart').split('.')]\n if m.group('rest'):\n v.append(m.group('rest'))\n self._cvs_version = tuple(v)\n else:\n # If we can't parse the version output, we just assume it\n # was a very \"old\" version:\n self._cvs_version = (0,)",
"def readVLTUS(self): \n fname= os.environ['VMECFDIR'] +\"/CFG/ctp/DB/VALID.LTUS\"\n try:\n database=open(fname,\"r\") \n except IOError:\n print \"Cannot open \",fname\n return None\n else:\n print \"File \",fname,\" open successfuly.\"\n #print \"database= \",database\n lines=database.readlines()\n database.close() \n #print lines,len(lines) \n Detectors=[] \n for i in lines:\n for j in i:\n if j == ' ': continue\n else: break\n if j=='#': continue\n items=i.split('=')\n detector={}\n detector['name']=items[0]\n Detectors.append(detector)\n #print Detectors \n #print '-----------------------------' \n return Detectors",
"def extract_cve():\n cve_data = ['CVE-Modified.json', 'CVE-Recent.json',\n 'CVE-2002.json', 'CVE-2003.json',\n 'CVE-2004.json', 'CVE-2005.json',\n 'CVE-2006.json', 'CVE-2007.json',\n 'CVE-2008.json', 'CVE-2009.json',\n 'CVE-2010.json', 'CVE-2011.json',\n 'CVE-2012.json', 'CVE-2013.json',\n 'CVE-2014.json', 'CVE-2015.json',\n 'CVE-2016.json', 'CVE-2017.json',\n 'CVE-2018.json']\n\n vulnerability_lst = []\n for cve_datum in cve_data:\n entries = get_cve_entries(\"./data/\"+cve_datum)\n \n for entry in entries:\n vulnerability_lst.append(AttackVector(db_id=entry[\"cve\"][\"CVE_data_meta\"][\"ID\"][4:],\n db_name=\"CVE\",\n name=entry[\"cve\"][\"CVE_data_meta\"][\"ID\"],\n related_weakness=parse_cve_relationships(entry[\"cve\"][\"problemtype\"][\"problemtype_data\"]),\n related_attack_pattern=[\"N/A\"],\n related_vulnerability=[\"N/A\"],\n contents=parse_desc_data(entry[\"cve\"][\"description\"][\"description_data\"])))\n\n return vulnerability_lst",
"def __init__(self, c, v):\r\n self.Couleur = c\r\n self.Valeur = v",
"def __init__(self, nome, versao):\r\n super().__init__()\r\n self.nome = nome\r\n self.versao = versao\r\n print(\"{} ligando na versão {}!\".format(self.nome, str(self.versao)))",
"def __init__(self, cuvant):\n self.__cuvant = cuvant\n self.__text = self.__get_definitie()",
"def __init__(self, *args, **kwargs):\n _core_.VersionInfo_swiginit(self,_core_.new_VersionInfo(*args, **kwargs))",
"def __init__(self):\n \n self.CNVData = []\n self.label_replace_dictionary = {\n 'SM1_centroids_nucleotides': 'Chelydra_serpentina',\n 'SM2_centroids_nucleotides': 'Anolis_sagrei',\n 'SM3_centroids_nucleotides': 'Elgaria_multicarinata',\n 'SM4_centroids_nucleotides': 'Lamprophis',\n 'SM5_centroids_nucleotides': 'Agkistrodon_piscivorus',\n 'SM6_centroids_nucleotides': 'Xenopeltis_unicolor',\n 'SM7_centroids_nucleotides': 'Alligator_mississippiensis',\n 'SM8_centroids_nucleotides': 'Sceloporus_undulatus',\n 'SM9_centroids_nucleotides': 'Pogona',\n 'SM10_centroids_nucleotides': 'Sternotherus_odoratus',\n 'SM11_centroids_nucleotides': 'Sternotherus_odoratus',\n 'SM12_centroids_nucleotides': 'Scincella_lateralis',\n 'SM13_centroids_nucleotides': 'Terrapene_carolina',\n 'SM14_centroids_nucleotides': 'Agkistrodon_piscivorus',\n 'SM15_centroids_nucleotides': 'Eublepharis_macularius',\n 'TC_centroids_nucleotides': 'Thamnophis_sirtalis',\n 'HS08_centroids_nucleotides': 'Thamnophis_elegans',\n 'HS11_centroids_nucleotides': 'Thamnophis_couchii'\n }",
"def list_ec_vit(self):\n dico_vit = {}\n for term in self.result: # itère les termes\n if term.predicate == \"enzymeV\": # ne retient que les terms enzymeV\n dico_vit[term.arguments[0]] = dico_vit.get(term.arguments[0], []) + [term.arguments[1]]\n # si la cle n'existe pas, le get initialise une liste vide\n print(dico_vit)\n return dico_vit",
"def init_VNFs_Services():\n test_VNFs_Services = []\n\n # add info to list in memory, one by one, following signature values\n vnf_serv_ID = 1\n vnf_serv_name = \"vCPE-1\"\n vnf_serv_info = \"virtual CPE in Arm pod\"\n vnf_serv_IPAddress = \"5.4.3.2\"\n vnf_serv_URL = \"http://5.4.3.2:8080\"\n vnf_serv_related_phys_rsrcIDs = [1,2]\n vnf_serv_related_cloudvirt_rsrcIDs = [1]\n\n test_VNFs_Services.append(VNFService(vnf_serv_ID, vnf_serv_name,\n vnf_serv_info,\n vnf_serv_IPAddress,\n vnf_serv_URL,\n vnf_serv_related_phys_rsrcIDs,\n vnf_serv_related_cloudvirt_rsrcIDs))\n\n\n vnf_serv_ID = 2\n vnf_serv_name = \"vFW-1\"\n vnf_serv_info = \"virtual Firewall in x86 pod\"\n vnf_serv_IPAddress = \"6.7.8.9\"\n vnf_serv_URL = \"http://6.7.8.9:8080\"\n vnf_serv_related_phys_rsrcIDs = [3]\n vnf_serv_related_cloudvirt_rsrcIDs = [2,3]\n\n test_VNFs_Services.append(VNFService(vnf_serv_ID, vnf_serv_name,\n vnf_serv_info,\n vnf_serv_IPAddress,\n vnf_serv_URL,\n vnf_serv_related_phys_rsrcIDs,\n vnf_serv_related_cloudvirt_rsrcIDs))\n\n # write list to binary file\n write_list_bin(test_VNFs_Services, FILE_VNFS_SERVICES)\n\n return test_VNFs_Services",
"def create_code_list_versions():\n # Création de l'élement racine <CodeListVersions>\n arbre_code_list_versions = et.Element(\"CodeListVersions\")\n\n # Je créé le sous-élément <ReplyCodeListVersion> enfant de la balise <CodeListVersions> et je lui donne comme valeur\n # textuelle \"ReplyCodeListVersion0\"\n reply_code_list_version = et.SubElement(arbre_code_list_versions, 'ReplyCodeListVersion')\n reply_code_list_version.text = 'ReplyCodeListVersion0'\n\n # Je créé le sous-élément <MessageDigestAlgorithmCodeListVersion> enfant de la balise <CodeListVersions> et je\n # lui donne comme valeur textuelle \"MessageDigestAlgorithmCodeListVersion0\"\n message_digest_algorithm_code_list_version = et.SubElement(arbre_code_list_versions,\n 'MessageDigestAlgorithmCodeListVersion')\n message_digest_algorithm_code_list_version.text = 'MessageDigestAlgorithmCodeListVersion0'\n\n # Je créé le sous-élément <MimeTypeCodeListVersion> enfant de la balise <CodeListVersions> et je\n # lui donne comme valeur textuelle \"MimeTypeCodeListVersion0\"\n mime_type_code_list_version = et.SubElement(arbre_code_list_versions, 'MimeTypeCodeListVersion')\n mime_type_code_list_version.text = 'MimeTypeCodeListVersion0'\n\n # Je créé le sous-élément <EncodingCodeListVersion> enfant de la balise <CodeListVersions> et je\n # lui donne comme valeur textuelle \"EncodingCodeListVersion0\"\n encoding_code_list_version = et.SubElement(arbre_code_list_versions, 'EncodingCodeListVersion')\n encoding_code_list_version.text = 'EncodingCodeListVersion0'\n\n # Je créé le sous-élément <FileFormatCodeListVersion> enfant de la balise <CodeListVersions> et je\n # lui donne comme valeur textuelle \"FileFormatCodeListVersion0\"\n file_format_code_list_version = et.SubElement(arbre_code_list_versions, 'FileFormatCodeListVersion')\n file_format_code_list_version.text = 'FileFormatCodeListVersion0'\n\n # Je créé le sous-élément <CompressionAlgorithmCodeListVersion> enfant de la balise <CodeListVersions> et je\n # lui donne comme valeur textuelle \"CompressionAlgorithmCodeListVersion0\"\n compression_algorithm_code_list_version = et.SubElement(arbre_code_list_versions,\n 'CompressionAlgorithmCodeListVersion')\n compression_algorithm_code_list_version.text = 'CompressionAlgorithmCodeListVersion0'\n\n # Je créé le sous-élément <DataObjectVersionCodeListVersion> enfant de la balise <CodeListVersions> et je\n # lui donne comme valeur textuelle \"DataObjectVersionCodeListVersion0\"\n data_object_version_code_list_version = et.SubElement(arbre_code_list_versions, 'DataObjectVersionCodeListVersion')\n data_object_version_code_list_version.text = 'DataObjectVersionCodeListVersion0'\n\n # Je créé le sous-élément <StorageRuleCodeListVersion> enfant de la balise <CodeListVersions> et je\n # lui donne comme valeur textuelle \"StorageRuleCodeListVersion0\"\n storage_rule_code_list_version = et.SubElement(arbre_code_list_versions, 'StorageRuleCodeListVersion')\n storage_rule_code_list_version.text = 'StorageRuleCodeListVersion0'\n\n # Je créé le sous-élément <AppraisalRuleCodeListVersion> enfant de la balise <CodeListVersions> et je\n # lui donne comme valeur textuelle \"AppraisalRuleCodeListVersion0\"\n appraisal_rule_code_list_version = et.SubElement(arbre_code_list_versions, 'AppraisalRuleCodeListVersion')\n appraisal_rule_code_list_version.text = 'AppraisalRuleCodeListVersion0'\n\n # Je créé le sous-élément <AccessRuleCodeListVersion> enfant de la balise <CodeListVersions> et je\n # lui donne comme valeur textuelle \"AccessRuleCodeListVersion0\"\n access_rule_code_list_version = et.SubElement(arbre_code_list_versions, 'AccessRuleCodeListVersion')\n access_rule_code_list_version.text = 'AccessRuleCodeListVersion0'\n\n # Je créé le sous-élément <DisseminationRuleCodeListVersion> enfant de la balise <CodeListVersions> et je\n # lui donne comme valeur textuelle \"DisseminationRuleCodeListVersion0\"\n dissemination_rule_code_list_version = et.SubElement(arbre_code_list_versions, 'DisseminationRuleCodeListVersion')\n dissemination_rule_code_list_version.text = 'DisseminationRuleCodeListVersion0'\n\n # Je créé le sous-élément <ReuseRuleCodeListVersion> enfant de la balise <CodeListVersions> et je\n # lui donne comme valeur textuelle \"ReuseRuleCodeListVersion0\"\n reuse_rule_code_list_version = et.SubElement(arbre_code_list_versions, 'ReuseRuleCodeListVersion')\n reuse_rule_code_list_version.text = 'ReuseRuleCodeListVersion0'\n\n # Je créé le sous-élément <ClassificationRuleCodeListVersion> enfant de la balise <CodeListVersions> et je\n # lui donne comme valeur textuelle \"ClassificationRuleCodeListVersion0\"\n classification_rule_code_list_version = et.SubElement(arbre_code_list_versions, 'ClassificationRuleCodeListVersion')\n classification_rule_code_list_version.text = 'ClassificationRuleCodeListVersion0'\n\n # Je créé le sous-élément <AuthorizationReasonCodeListVersion> enfant de la balise <CodeListVersions> et je\n # lui donne comme valeur textuelle \"AuthorizationReasonCodeListVersion0\"\n authorization_reason_code_list_version = et.SubElement(arbre_code_list_versions,\n 'AuthorizationReasonCodeListVersion')\n authorization_reason_code_list_version.text = 'AuthorizationReasonCodeListVersion0'\n\n # Je créé le sous-élément <RelationshipCodeListVersion> enfant de la balise <CodeListVersions> et je\n # lui donne comme valeur textuelle \"RelationshipCodeListVersion0\"\n relationship_code_list_version = et.SubElement(arbre_code_list_versions, 'RelationshipCodeListVersion')\n relationship_code_list_version.text = 'RelationshipCodeListVersion0'\n\n # Une fois cet arbre créé la fonction le renvoie vers la partie de code qui a appelée la fonction.\n return arbre_code_list_versions",
"def __init__(self, vcs):\n super(Histedit, self).__init__(vcs)",
"def initialiser(self):\n\n # Vider le dictionnaire (pratique si on veut recommencer le jeu).\n self.cases.clear()\n # Parcourir le dictionnaire et mettre des objets de la classe Case.\n # dont l'attribut \"contenu\" serait un espace (\" \").\n for i in range(0, 3):\n for j in range(0, 3):\n self.cases[i, j] = Case(\" \")",
"def __init__(self):\n\n # Dictionnaire de cases.\n # La clé est une position (ligne, colonne), et la valeur est une instance de la classe Case.\n self.cases = {}\n\n # Appel d'une méthode qui initialise un plateau contenant des cases vides.\n self.initialiser()",
"def addVitals(self):\n\n def getBP(vt):\n \"\"\" Format a bloodPressure for templating into Vitals documents. \"\"\"\n vt['indivo_prefix'] = 'bp_' + vt['name']\n return getVital(vt)\n \n def getVital(vt):\n \"\"\" Format a vitalSign for templating into Vitals documents. \"\"\"\n\n if hasattr(v, vt['name']):\n val = getattr(v, vt['name'])\n sys, title, ident = self.coded_value(vt['uri'])\n return VITAL_SIGN.sub(\n {'unit': vt['unit'],\n 'val': val,\n 'name_title': title,\n 'name_id': ident,\n 'name_system': sys\n }\n ).sub(\n {'prefix': vt['indivo_prefix'] if 'indivo_prefix' in vt else vt['name']}, \n escape=False\n ).done()\n\n def cleanVitalsDate(date_str):\n \"\"\" Convert dates coming from raw Vitals data into UTC ISO8601 Timestamps.\"\"\"\n if date_str[-1] != 'Z':\n date_str += 'Z'\n return date_str.replace(' ', 'T')\n \n if self.pid in VitalSigns.vitals:\n for v in VitalSigns.vitals[self.pid]:\n measurements = []\n for vt in VitalSigns.vitalTypes:\n measurements.append(getVital(vt))\n\n if v.systolic:\n measurements.append(getBP(VitalSigns.systolic))\n measurements.append(getBP(VitalSigns.diastolic))\n\n encounter_str = ENCOUNTER.sub(\n {'start':cleanVitalsDate(v.start_date),\n 'end':cleanVitalsDate(v.end_date)\n }\n ).sub(\n {'encounterType':ENCOUNTER_TYPE.done() if v.encounter_type == 'ambulatory' else ''}, \n escape=False\n ).done()\n\n vitals_str = VITAL_SIGNS.sub(\n {'date': cleanVitalsDate(v.timestamp),\n }\n ).sub(\n {'encounter': encounter_str,\n 'vitals_str': ''.join(measurements)}, \n escape=False\n ).done()\n self.data.append(SDMX.sub({'models':vitals_str}, escape=False).done())",
"def __init__(self, *args):\n _snap.TStrFltPrV_swiginit(self, _snap.new_TStrFltPrV(*args))",
"def __init__(self): \r\n self.datalist = []\r\n self.indx = []\r\n self.insearchflag = 0\r\n self.indx_save = []\r\n self.indx_compare = []\r\n self.dictyear1 = dict()\r\n self.dicttypeof = dict()\r\n self.dictyear2 = dict()\r\n self.dictseason = dict()\r\n self.dictseries = dict()\r\n self.dictsuspended = {None : set(), 1 : set()}\r\n self.p = re.compile('''\"?([^\"]+)\"?\\s+ # Название\r\n \\(([\\d?IVX/-]{4,})\\)\\s+ # Год\r\n (?:\\(([VGT]{1,2})\\)\\s+)? # Тип\r\n (?:(?:\\{(?:((?!\\(\\#)[^{}]+?))?\\s*)? # Название серии\r\n (?:\\(\\#(\\d+).(\\d+)\\))? \\}\\s+)? # Сезон, серия\r\n (?:\\{\\{(SUSPENDED)\\}\\}\\s+)? # Отменен?\r\n ([\\d?-]+) # Год\r\n $''', re.VERBOSE)",
"def __init__(self):\n #MdvData. __init__(self,model.target_fragments)\n #self.mdv = {}\n self.mdvtc ={}\n self.mode = \"timecourse\"",
"def __init__(self):\n self.comparer_title = ComparerTitle()\n self.comparer_desciption = ComparerDescription()\n self.comparer_text = ComparerText()\n self.comparer_author = ComparerAuthor()\n self.comparer_date = ComparerDate()"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
Récupère les vecteur CVSS, les split dans des listes ; récupère aussi les scores CVSS Remplie les listes initialisées au préalable (voir fonction __init__ cidessus)
|
def cvss_parser(self):
cvsss = self.soup.findAll('a', text=re.compile("AV:"))
for cvss in cvsss:
id = cvss.getText()
id = re.sub("[()]*","",id)
id = id.split('/')
self.cvss += [id]
score = cvss.parent.getText()
score = re.sub("[A-Za-z0-9\-.,;\s]* score of ","",score)
score = re.sub(" has[A-Za-z0-9\-.,;\s\(\):/]*","",score)
self.score += [score]
|
[
"def __init__(self, n):\r\n self.lcs = [LearningCurve(name=f\"cv_{i}\") for i in range(n)]",
"def __init__(self):\n self.svclassifier = SVC(kernel='linear')",
"def test_split_data_cv():\n from parrot import process_input_data as pid\n\n data_file = os.path.abspath(\"../data/seq_class_dataset.tsv\")\n cvs, train, val, test = pid.split_data_cv(data_file, datatype='sequence',\n problem_type='classification', num_classes=3)\n\n assert (len(train) == 210) and (len(val) == 45) and (len(test) == 45) and (\n len(train[0]) == 3) and (len(cvs) == 5) and (len(cvs[0]) == 2)",
"def __init__(self, proportion, splits):\n\t\tself.proportion = proportion\n\t\tself.splits = splits",
"def extract_sub_scores(self, idxs):\n\n\t\tnew_cv_res = CV_Results()\n\n\t\tfor j, (model_s, cur_res) in enumerate(self.res.items()):\n\t\t\tfor i,(k, lmbdas) in enumerate(cur_res.items()):\n\t\t\t\tfor lmbda, res_list in lmbdas.items():\n\t\t\t\t\tfor res in res_list:\n\t\t\t\t\t\tif res.ranks != None:\n\t\t\t\t\t\t\tres = Result(res.preds[idxs], res.true_vals[idxs], res.ranks[idxs], res.raw_ranks[idxs])\n\t\t\t\t\t\telse:\n\t\t\t\t\t\t\tres = Result(res.preds[idxs], res.true_vals[idxs], None, None)\n\t\t\t\t\t\t\n\t\t\t\t\t\tnew_cv_res.add_res(res, model_s, k, lmbda, self.nb_params_used[model_s][k])\n\n\t\treturn new_cv_res",
"def populate_score_matrices(self):\n\n ### FILL IN ###",
"def __init__(self, scores, genotypes):\n\t\tcalled_scores = sorted([score for (score, genotype) in zip(scores, genotypes) if genotype != 0])\n\t\tself.gc_10 = ScoreStatistics.percentile(called_scores, 10)\n\t\tself.gc_50 = ScoreStatistics.percentile(called_scores, 50)",
"def cv_score_table(res_sprm_cv):\n \n n_settings = len(res_sprm_cv.cv_results_['params'])\n etas = [res_sprm_cv.cv_results_['params'][i]['eta'] for i in range(0,n_settings)]\n components = [res_sprm_cv.cv_results_['params'][i]['n_components'] for i in range(0,n_settings)]\n cv_score_table_ = ps.DataFrame({'etas':etas, 'n_components':components, 'score':res_sprm_cv.cv_results_['mean_test_score']})\n return(cv_score_table_)",
"def __init__(self):\n this = _libsbml.new_ListWrapperCVTerm()\n try: self.this.append(this)\n except: self.this = this",
"def __init__(self, b_num, side, ss_size, sulci_list, saving_dir,\n data_dir=_DEFAULT_DATA_DIR, bbox_dir=_DEFAULT_BBOX_DIR):\n self.b_num = b_num\n self.side = side\n self.ss_size = ss_size\n self.sulci_list = sulci_list\n self.sulci_list = complete_sulci_name(self.sulci_list, self.side)\n self.data_dir = data_dir\n self.saving_dir = saving_dir\n self.abnormality_test = []\n self.bbmin, self.bbmax = compute_max_box(self.sulci_list, side,\n talairach_box=True, src_dir=bbox_dir)\n print(self.bbmin, self.bbmax)\n self.cpt_skel_1 = 't1mri/default_acquisition/default_analysis/segmentation'\n self.cpt_skel_2 = 'skeleton_'\n self.cpt_skel_3 = '.nii.gz'",
"def GetXYListAndPolyListFromCVLS(cVLS, allValsByFrame, orderOfSCsByValueByFrame):\n numFrames = len(cVLS)\n xyList = [\n sorted(list(set(tuple(pt) for c in cvlsByVal for pt in c[2])))\n for cvlsByVal in cVLS\n ]\n polyList = []\n\n for t in range(numFrames):\n polyList.append({})\n for v in allValsByFrame[t]:\n subContours = [\n (cVLS[t][-index][2][::-1] if index < 0 else cVLS[t][index][2])\n for index in orderOfSCsByValueByFrame[t][v]\n ] # reconstruct the sc's, flipping if index is negative\n polyList[-1][v] = [\n xyList[t].index(totuple(pt)) for sc in subContours for pt in sc[:-1]\n ]\n polyList[-1][v] = polyList[-1][v] + [\n polyList[-1][v][0]\n ] # Tack on the first point at the end to close the loop\n # VFMin doesn't like this format; make sure to remove this last point before saving to a file or passing to VFM...\n # polyList[-1][v] = removeDuplicates(polyList[-1][v])+[polyList[-1][v][0]] # Remove interior duplication...\n return xyList, polyList",
"def __init__(self, index_scores):\n self.index_scores = index_scores",
"def __init__(self, xss):\n self._mat = xss",
"def construirMatriz_votacion():",
"def get_score_list(self):\n return self.scorelist",
"def __init__(self, clusters, array_in):\n self.data = []\n self.array = array_in\n self.clusters = []\n self.vv_matrix = []\n self.res = 0\n self.prcp_samples = {}\n self.dim = len(clusters[0][\"cluster\"])\n for cl in clusters:\n self.clusters.append(cl[\"cluster\"])\n self.b_tensor = np.ones((self.array.shape[0], self.array.shape[1], self.array.shape[1]))",
"def __init__(self, data_points):\n\n self.data_points = data_points\n self.ssrc_frequencies = misc.NormalizeCounter(\n collections.Counter([pt.ssrc for pt in self.data_points]))\n self.ssrc_size_table = misc.SsrcNormalizedSizeTable(self.data_points)\n self.bandwidth_kbps = None\n self.smooth_bw_kbps = None",
"def buildSplits(self, args):\n trainData = []\n testData = []\n splits = []\n trainDir = args[0]\n if len(args) == 1:\n print '[INFO]\\tPerforming %d-fold cross-validation on data set:\\t%s' % (self.numFolds, trainDir)\n\n posTrainFileNames = os.listdir('%s/pos/' % trainDir)\n negTrainFileNames = os.listdir('%s/neg/' % trainDir)\n for fold in range(0, self.numFolds):\n split = self.TrainSplit()\n for fileName in posTrainFileNames:\n example = self.Example()\n example.words = self.readFile('%s/pos/%s' % (trainDir, fileName))\n example.klass = 'pos'\n example.fileName = fileName\n if fileName[2] == str(fold):\n split.test.append(example)\n else:\n split.train.append(example)\n for fileName in negTrainFileNames:\n example = self.Example()\n example.words = self.readFile('%s/neg/%s' % (trainDir, fileName))\n example.klass = 'neg'\n example.fileName = fileName\n if fileName[2] == str(fold):\n split.test.append(example)\n else:\n split.train.append(example)\n splits.append(split)\n elif len(args) == 2:\n split = self.TrainSplit()\n testDir = args[1]\n print '[INFO]\\tTraining on data set:\\t%s testing on data set:\\t%s' % (trainDir, testDir)\n posTrainFileNames = os.listdir('%s/pos/' % trainDir)\n negTrainFileNames = os.listdir('%s/neg/' % trainDir)\n for fileName in posTrainFileNames:\n example = self.Example()\n example.words = self.readFile('%s/pos/%s' % (trainDir, fileName))\n example.klass = 'pos'\n example.fileName = fileName\n split.train.append(example)\n for fileName in negTrainFileNames:\n example = self.Example()\n example.words = self.readFile('%s/neg/%s' % (trainDir, fileName))\n example.klass = 'neg'\n example.fileName = fileName\n split.train.append(example)\n\n posTestFileNames = os.listdir('%s/pos/' % testDir)\n negTestFileNames = os.listdir('%s/neg/' % testDir)\n for fileName in posTestFileNames:\n example = self.Example()\n example.words = self.readFile('%s/pos/%s' % (testDir, fileName))\n example.klass = 'pos'\n split.test.append(example)\n for fileName in negTestFileNames:\n example = self.Example()\n example.words = self.readFile('%s/neg/%s' % (testDir, fileName))\n example.klass = 'neg'\n split.test.append(example)\n splits.append(split)\n return splits",
"def crossValidationSplits(self, trainDir):\n splits = []\n posTrainFileNames = os.listdir('%s/pos/' % trainDir)\n negTrainFileNames = os.listdir('%s/neg/' % trainDir)\n #for fileName in trainFileNames:\n for fold in range(0, self.numFolds):\n split = self.TrainSplit()\n for fileName in posTrainFileNames:\n example = self.Example()\n example.words = self.readFile('%s/pos/%s' % (trainDir, fileName))\n example.klass = 'pos'\n if fileName[2] == str(fold):\n split.test.append(example)\n else:\n split.train.append(example)\n for fileName in negTrainFileNames:\n example = self.Example()\n example.words = self.readFile('%s/neg/%s' % (trainDir, fileName))\n example.klass = 'neg'\n if fileName[2] == str(fold):\n split.test.append(example)\n else:\n split.train.append(example)\n splits.append(split)\n return splits"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
Récupère les références CWE, leurs lien, abstraction et structures répertoriées et leurs arborescences SFP1 et SFP2 Remplie les listes initialisées au préalable (voir fonction __init__ cidessus)
|
def cwe_parser(self):
cwes = self.soup.findAll('a', text=re.compile("CWE-"))
for cwe in cwes:
id = cwe.getText()
id = re.sub("[A-Za-z0-9;,:\s\-\(\)\"\']* CWE-","CWE-",id)
# Lien possiblement cassé
try:
pageCWE = opener.open(cwe.get('href'))
except(urllib.error.URLError) as e:
continue
soupCWE = BeautifulSoup(pageCWE, 'html.parser')
self.cwe += [id]
# Si l'id désigne une classe de CWE :
if soupCWE.find('h2', text=re.compile('CWE-')) == None:
cwe_link = cwe.get('href')
self.cwe_link += [cwe_link]
self.abstraction += ["None"]
self.structure += ["class"]
self.sfp2 += ["None"]
self.sfp1 += ["None"]
# Si l'id désigne un CWE classique :
else:
cwe_link = cwe.get('href')
self.cwe_link += [cwe_link]
abstruct=soupCWE.find("div",text=re.compile('Weakness')).next_sibling.getText()
abs = re.sub("Abstraction: ","",abstruct)
abs = re.sub("Structure: [A-Za-z:\s]*","",abs)
abs = re.sub(" ", "", abs)
struct = re.sub("[A-Za-z:\s]*: ","",abstruct)
self.abstraction += [abs]
self.structure += [struct]
sfp2 = soupCWE.find("a",text=re.compile('SFP Secondary'))
# Possible absence de SFP2 :
if sfp2 != None:
addr = re.sub("/data[A-Za-z0-9/\-:.\s]*",sfp2.get('href'),cwe.get('href'))
pageSFP1 = opener.open(addr)
soupSFP1 = BeautifulSoup(pageSFP1, 'html.parser')
sfp2 = sfp2.getText()
sfp2 = re.sub("[A-Z-a-z\s]*: ","",sfp2)
self.sfp2 += [sfp2]
sfp1 = soupSFP1.find('a', text=re.compile('SFP Primary'))
sfp1 = sfp1.getText()
sfp1 = re.sub('[A-Za-z\s]*: ','',sfp1)
self.sfp1 += [sfp1]
else:
self.sfp2 += ["None"]
self.sfp1 += ["None"]
|
[
"def __init__(self):\n self._declarations = self.get_declarations()",
"def __init__(self):\n # just the list of class/construct types\n self.lut = {}\n self.lut[\"struct\"] = structure\n self.lut[\"typedef\"] = typedef\n self.lut[\"define\"] = define\n self.lut[\"enum\"] = enum\n self.lut[\"enumEntry\"] = enumEntry\n self.lut[\"ifdef\"] = ifdef\n self.lut[\"ifndef\"] = ifndef\n self.lut[\"hashIf\"] = hashIf\n self.lut[\"hashElse\"] = hashElse\n self.lut[\"hashElif\"] = hashElif\n self.lut[\"endif\"] = endif\n self.lut[\"banner\"] = banner\n self.lut[\"general\"] = general\n self.lut[\"listDefine\"] = listDefine\n self.lut[\"listEntry\"] = listEntry\n self.lut[\"listNumEls\"] = listNumEls\n self.lut[\"union\"] = union\n\n # and the dictionary of all symbols we declare\n self.symbols = {}",
"def __init__(self, vcffiles):\n self.vcffilenames = vcffiles\n self.snpsites = {}\n self.snp_positions = {}",
"def __init__(self):\n self.data = []\n self.headers = []\n self.lamps_data = []\n self.lamps_headers = []",
"def __init__(self):\n self.conceptsFromText = [] #list of concepts extracted from text\n self.conceptsFromUrl = [] #list of concepts extracted from Url",
"def __init__(self):\n self.target = [[\"ADP\", \"case\"],\n [\"ADP\",\"mark\"],\n [\"ADP\", \"dep\"],\n [\"SCONJ\",\"mark\"],\n [\"ADV\",\"mark\"],\n [\"PART\",\"case\"],\n [\"PART\",\"mark\"]]",
"def __init__(self):\n FRegulatoryInfoUpgrade.__init__(self)",
"def _read_init(self):",
"def __init__(self, lfp_data):\n self.lfp_data = lfp_data",
"def __init__(self, seq, glycan_compo, glycan_sites, mod_list):\n # Filter the glycan composition. Get the max number of HexNAc\n self.seq = Sequence(seq) # Sequence object\n self.glycan_composition = glycan_compo\n self.candidate_sites = glycan_sites\n self.modifications = mod_list",
"def __init__(self):\n self.data = None\n self.decisionTree = {}\n self.enClass = 0\n self.nlClass = 0\n self.listAttributes = [\"Contains-het\", \"Contains-de\", \"Contains-een\", \"Contains-en/aan\", \"Contains-ij\", \"wordLength14\",\n \"Contains-a/an\", \"Contains-are/were\", \"Contains-and\", \"Contains-on/to\", \"Contains-the\"]\n self.infoGain = []\n self.entropy = 0",
"def __init__(self):\n self.wav_list = [] # 1维列表, 存放音频文件路径\n self.pny_list = [] # 2维列表, 存放拼音\n self.han_list = [] # 1维列表, 存放一条条短语\n self.am_vocab = [] # 1维列表, 所有数据构成的不重复的拼音\n self.pny_vocab = [] # 1维列表, 所有数据构成的不重复的拼音\n self.han_vocab = [] # 1维列表, 所有数据构成的不重复的汉字\n self.source_init() # 初始化所有实例变量",
"def __init__(self, *args):\n this = _libsbml.new_ListOfInitialAssignments(*args)\n try: self.this.append(this)\n except: self.this = this",
"def __init__(self):\r\n Lexicon.__init__(self)\r\n\r\n self.id = \"SgE\"\r\n\r\n # Put all the data-heavy bits further down for readability\r\n self.rules = self.get_rules()\r\n self.lexicon = self.get_lexicon()",
"def __init__(self,useICD10UKBB=False,hierarchyFile=None,chapterFile=None):\n\n if hierarchyFile==None:\n if useICD10UKBB:\n hierarchyFile=ICD_PATH+'icd10_ukbb.txt'\n else:\n hierarchyFile=ICD_PATH+'icd10cm_order_2018.txt'\n if chapterFile==None:\n chapterFile = ICD_PATH+'ICD10_Chapters.txt'\n\n #quick reference, to avoid having to search full tree for codes.\n\n\n #Full list of linked codes\n self.UsableICDCodes=[]\n self.usableCodeToIndexMap={}\n self.setOfUsableCodes=set()\n\n self.UnusableICDCodes=[]\n self.unusableCodeToIndexMap={}\n self.setOfUnusableCodes=set()\n #first load the chapters\n chapter_breakpoints=[]\n chapter_list=[]\n currentUsableCodeCount = 0\n currentUnusableCodeCount = 0\n with open(chapterFile,'rb') as f:\n f.readline()\n for line in f:\n line=self._convertToUnicode(line)\n line=line.strip('\\n').split('\\t')\n self.UnusableICDCodes+=[ICDCode('Chapter_'+line[0],line[2],False)]\n start,stop = line[1].split('-')\n chapter_breakpoints+=[self._convertCodeToIntVal(stop[0:3])]\n chapter_list+=['Chapter_'+line[0]]\n self.unusableCodeToIndexMap['Chapter_'+line[0]]=currentUnusableCodeCount\n self.setOfUnusableCodes.add('Chapter_'+line[0])\n currentUnusableCodeCount+=1\n #now load hierarchy file\n\n with open(hierarchyFile,'rb') as f:\n currentParentList = []\n for line in f:\n line=self._convertToUnicode(line)\n parsedLine=[]\n parsedLine+=[line[0:6].strip()]\n parsedLine+=[line[6:14].strip()]\n parsedLine+=[line[14:16].strip()]\n parsedLine+=[line[77:].strip()]\n\n\n currentParentList = self._findParentInList(parsedLine[1],currentParentList)\n\n if len(currentParentList) == 0:\n intVal = self._convertCodeToIntVal(parsedLine[1][0:3])\n try:\n icd_chapter = chapter_list[next(x[0] for x in enumerate(chapter_breakpoints) if intVal <= x[1])]\n except StopIteration:\n raise ValueError('{}'.format(parsedLine[1]))\n currentParentList +=[icd_chapter]\n\n if int(parsedLine[2])==1:\n self.UsableICDCodes+=[ICDCode(parsedLine[1],parsedLine[3],True,self.ReturnCodeObject(currentParentList[-1]))]\n self.usableCodeToIndexMap[parsedLine[1]]=currentUsableCodeCount\n self.setOfUsableCodes.add(parsedLine[1])\n currentUsableCodeCount+=1\n else:\n self.UnusableICDCodes+=[ICDCode(parsedLine[1],parsedLine[3],False,self.ReturnCodeObject(currentParentList[-1]))]\n self.unusableCodeToIndexMap[parsedLine[1]]=currentUnusableCodeCount\n self.setOfUnusableCodes.add(parsedLine[1])\n currentUnusableCodeCount+=1\n currentParentList+=[parsedLine[1]]",
"def __init__(self, *args):\n this = _libsbml.new_ListOfLocalParameters(*args)\n try: self.this.append(this)\n except: self.this = this",
"def _parseSFSR(self, files):\n\t\tallAttr = []\n\t\tr = Register()\n\t\tfor f in files:\n\t\t\tsoup = Util.loadSoup(f)\n\t\t\tr.rubrik = Util.elementText(soup.body('table')[2]('tr')[1]('td')[0])\n\t\t\tchanges = soup.body('table')[3:-2]\n\n\t\t\tfor table in changes:\n\t\t\t\tkwargs = {'id': 'undefined',\n\t\t\t\t\t\t 'uri': u'http://rinfo.lagrummet.se/publ/sfs/undefined'}\n\t\t\t\trp = Registerpost(**kwargs) #TODO: Is this needed?\n\t\t\t\t\n\t\t\t\tfor row in table('tr'):\n\t\t\t\t\tkey = Util.elementText(row('td')[0])\n\t\t\t\t\tif key.endswith(':'):\n\t\t\t\t\t\tkey = key [:-1]\n\t\t\t\t\tif key == '': \n\t\t\t\t\t\tcontinue\n\t\t\t\t\tval = Util.elementText(row('td')[1]).replace(u'\\xa0',' ')\n\t\t\t\t\tif val != '':\n\t\t\t\t\t\tif key == u'SFS-nummer':\n\t\t\t\t\t\t\tif val.startswith('N'):\n\t\t\t\t\t\t\t\traise NotSFS()\n\t\t\t\t\t\t\tif len(r) == 0:\n\t\t\t\t\t\t\t\tstartNode = self.lagrumParser.parse(val)[0]\n\t\t\t\t\t\t\t\tif hasattr(startNode, 'uri'):\n\t\t\t\t\t\t\t\t\tdocUri = startNode.uri\n\t\t\t\t\t\t\t\t#else:\n\t\t\t\t\t\t\t\t\t#TODO: Log warning, can't read the SFS nr\n\t\t\t\t\t\t\trp[key] = UnicodeSubject(val, predicate=self.labels[key])\n\t\t\t\t\t\t\trp.id = u'L' + val #Starts with L cause NCNames has to start with a letter\n\t\t\t\t\t\t\tstartNode = self.lagrumParser.parse(val)[0]\n\n\t\t\t\t\t\t\tif hasattr(startNode, 'uri'):\n\t\t\t\t\t\t\t\trp.uri = startNode.uri\n\t\t\t\t\t\t\t#else:\n\t\t\t\t\t\t\t\t#TODO: Log warning, can't read the SFS nr\n\n\t\t\t\t\t\telif key == u'Ansvarig myndighet':\n\t\t\t\t\t\t\ttry: \n\t\t\t\t\t\t\t\tauthRec = self.findAuthRec(val)\n\t\t\t\t\t\t\t\trp[key] = LinkSubject(val, uri=unicode(authRec),\n\t\t\t\t\t\t\t\t\t\t\t\t\t predicate=self.labels[key])\n\t\t\t\t\t\t\texcept Exception, e:\n\t\t\t\t\t\t\t\trp[key] = val\n\t\t\t\t\t\telif key == u'Rubrik':\n\t\t\t\t\t\t\trp[key] = UnicodeSubject(val, predicate=self.labels[key])\n\t\t\t\t\t\telif key == u'Observera':\n\t\t\t\t\t\t\tif u'Författningen är upphävd/skall upphävas: ' in val:\n\t\t\t\t\t\t\t\tif datetime.strptime(val[41:51], '%Y-%m-%d') < datetime.today():\n\t\t\t\t\t\t\t\t\traise RevokedDoc()\n\t\t\t\t\t\t\trp[key] = UnicodeSubject(val, predicate=self.labels[key])\n\t\t\t\t\t\telif key == u'Ikraft':\n\t\t\t\t\t\t\trp[key] = DateSubject(datetime.strptime(val[:10], '%Y-%m-%d'), predicate=self.labels[key])\n\t\t\t\t\t\telif key == u'Omfattning':\n\t\t\t\t\t\t\trp[key] = []\n\t\t\t\t\t\t\tfor changecat in val.split(u'; '):\n\t\t\t\t\t\t\t\tif (changecat.startswith(u'ändr.') or \n\t\t\t\t\t\t\t\t\tchangecat.startswith(u'ändr ') or \n\t\t\t\t\t\t\t\t\tchangecat.startswith(u'ändring ')):\n\t\t\t\t\t\t\t\t\tpred = RINFO['ersatter']\n\t\t\t\t\t\t\t\telif (changecat.startswith(u'upph.') or \n\t\t\t\t\t\t\t\t\t changecat.startswith(u'utgår')):\n\t\t\t\t\t\t\t\t\tpred = RINFO['upphaver']\n\t\t\t\t\t\t\t\telif (changecat.startswith(u'ny') or \n\t\t\t\t\t\t\t\t\t changecat.startswith(u'ikrafttr.') or \n\t\t\t\t\t\t\t\t\t changecat.startswith(u'ikrafftr.') or \n\t\t\t\t\t\t\t\t\t changecat.startswith(u'ikraftr.') or \n\t\t\t\t\t\t\t\t\t changecat.startswith(u'ikraftträd.') or \n\t\t\t\t\t\t\t\t\t changecat.startswith(u'tillägg')):\n\t\t\t\t\t\t\t\t\tpred = RINFO['inforsI']\n\t\t\t\t\t\t\t\telif (changecat.startswith(u'nuvarande') or \n\t\t\t\t\t\t\t\t\t changecat == 'begr. giltighet' or \n\t\t\t\t\t\t\t\t\t changecat == 'Omtryck' or \n\t\t\t\t\t\t\t\t\t changecat == 'omtryck' or \n\t\t\t\t\t\t\t\t\t changecat == 'forts.giltighet' or \n\t\t\t\t\t\t\t\t\t changecat == 'forts. giltighet' or \n\t\t\t\t\t\t\t\t\t changecat == 'forts. giltighet av vissa best.'):\n\t\t\t\t\t\t\t\t\tpred = None\n\t\t\t\t\t\t\t\telse:\n\t\t\t\t\t\t\t\t\tpred = None\n\n\t\t\t\t\t\t\t\trp[key].extend(self.lagrumParser.parse(changecat, docUri, pred))\n\t\t\t\t\t\t\t\trp[key].append(u';')\n\t\t\t\t\t\t\trp[key] = rp[key][:-1]\n\t\t\t\t\t\telif key == u'F\\xf6rarbeten':\n\t\t\t\t\t\t\trp[key] = self.forarbeteParser.parse(val, docUri, RINFO['forarbete'])\t\t\t\t\t\t\t\n\t\t\t\t\t\telif key == u'CELEX-nr':\n\t\t\t\t\t\t\trp[key] = self.forarbeteParser.parse(val, docUri, RINFO['forarbete'])\t\t\t\t\t\t\t\n\t\t\t\t\t\telif key == u'Tidsbegränsad':\n\t\t\t\t\t\t\trp[key] = DateSubject(datetime.strptime(val[:10], '%Y-%m-%d'), predicate=self.labels[key])\n\t\t\t\t\t\t\tif rp[key] < datetime.today():\n\t\t\t\t\t\t\t\traise RevokedDoc()\n\t\t\t\t\t\telse:\n\t\t\t\t\t\t\t#TODO: Log Warning unknown key\n\t\t\t\t\t\t\tpass\n\t\t\t\tif rp:\n\t\t\t\t\tif config.debug:\n\t\t\t\t\t\tprint \"Registerpost: \"\n\t\t\t\t\t\tprint rp\n\t\t\t\t\tr.append(rp)\n\t\t\t\t\t\n\n\t\treturn r",
"def _fp_setup2(self):\n # TODO: right now it's hard to implement this required stage",
"def __init__(self):\n self.hmm = \"\"\n self.seqfile = \"\"\n self.flat_hsps = []"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
Default reducer for distinctions. Expects all distinctions to follow
|
def __reduce__(self):
return instanceReducer(self)
|
[
"def evaluation_reducer(self) -> Union[Reducer, Dict[str, Reducer]]:\n return Reducer.AVG",
"def _reduce(self, action):\n assert len(self.stack) >= 2, \"ERROR: Cannot reduce with stack length less than 2\"\n \n # STUDENT\n # hint: use list.pop()\n # END STUDENT\n rightarc = self.stack.pop()\n leftarc = self.stack.pop()\n head = rightarc if action == Actions.REDUCE_L else leftarc\n mod = leftarc if action == Actions.REDUCE_L else rightarc\n self.stack.append( StackEntry(head.headword, head.headword_pos, self.combiner(head.embedding,mod.embedding)) )\n return DepGraphEdge((head.headword, head.headword_pos),(mod.headword, mod.headword_pos))",
"def show(self, reducer: Reducer):",
"def __reduce__(self):\n raise NotImplementedError('implement __reduce__')",
"def get_aligner_reducer(metric_kind, metric_val_type):\n if metric_kind == config.GAUGE:\n if metric_val_type == config.BOOL:\n crossSeriesReducer = config.REDUCE_MEAN\n perSeriesAligner = config.ALIGN_FRACTION_TRUE\n elif metric_val_type in [config.INT64, config.DOUBLE, config.DISTRIBUTION]:\n crossSeriesReducer = config.REDUCE_SUM\n perSeriesAligner = config.ALIGN_SUM\n elif metric_val_type == config.STRING:\n crossSeriesReducer = config.REDUCE_COUNT\n perSeriesAligner = config.ALIGN_NONE\n else:\n logging.debug(\n \"No match for GAUGE {},{}\".format(metric_kind, metric_val_type)\n )\n elif metric_kind == config.DELTA:\n if metric_val_type in [config.INT64, config.DOUBLE, config.DISTRIBUTION]:\n crossSeriesReducer = config.REDUCE_SUM\n perSeriesAligner = config.ALIGN_SUM\n else:\n logging.debug(\n \"No match for DELTA {},{}\".format(metric_kind, metric_val_type)\n )\n elif metric_kind == config.CUMULATIVE:\n if metric_val_type in [config.INT64, config.DOUBLE, config.DISTRIBUTION]:\n crossSeriesReducer = config.REDUCE_SUM\n perSeriesAligner = config.ALIGN_DELTA\n else:\n logging.debug(\n \"No match for CUMULATIVE {},{}\".format(metric_kind, metric_val_type)\n )\n else:\n logging.debug(\"No match for {},{}\".format(metric_kind, metric_val_type))\n\n return crossSeriesReducer, perSeriesAligner",
"def reduce(self, reduce_op, value, axis): # pylint: disable=useless-super-delegation\n return super(OneDeviceStrategy, self).reduce(reduce_op, value, axis)",
"def cross_series_reducer(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"cross_series_reducer\")",
"def cross_series_reducer(self) -> Optional[pulumi.Input['AggregationCrossSeriesReducer']]:\n return pulumi.get(self, \"cross_series_reducer\")",
"def _reducer(self, input, output, input_keys, worker_number, chunksize):\n print(\"Thread {0} is working\".format(worker_number))\n id_start = worker_number * chunksize\n id_end = min((worker_number + 1) * chunksize, len(input_keys))\n print(\"It processes keys: \", input_keys[id_start: id_end])\n for key in input_keys[id_start:id_end]:\n if key in output:\n self.the_reducer(key, input[key], output)\n else:\n output[key] = 0\n self.the_reducer(key, input[key], output)",
"def all_reduce_worker(self, input, output):\n pass",
"def set_gate_reducer(self, reducer: ReductionRule):\n self.gate_reducer_ = reducer",
"def toposorted_actions(self) -> Iterable[Action]:\n # Here we execute two \"nanopasses\" (a term borrowed from compiler implementation)\n #\n # 1. Traverse a values-and-actions graph, reducing it to a dependency graph containing actions\n #\n # 2. Perform a toposort over actions (using Kahn's algorithm https://en.wikipedia.org/wiki/Topological_sorting)\n #\n # TODO: switch to graphlib from standard library\n #\n # TODO: Consider using Tarjan's strongly connected components algorithm\n # Rationale: Tarjan's SCC would find loops and produce a helpful diagnostic\n\n # 1. Dependency graph representation optimized for toposort\n o: dict[Action, set[Action]] = {} # for actions: action -> set of outgoing dependency edges\n i: dict[Action, set[Action]] = {} # for actions: action -> set of incoming dependency edges\n\n # set of nodes without incoming edges\n s: Set[Action] = set()\n\n # 1. Transform execution plan into dependency graph\n for action in self.actions:\n # if action does not depend on any other action, add it to set s\n if all(inp.producer() is None for inp in action.inputs()):\n s.add(action)\n # add outgoing edges to graph, if any\n for output in action.outputs():\n for depending_action in output.consumers():\n # add an edge action -> depending_action to the graph\n if action not in o:\n o[action] = set()\n if depending_action not in i:\n i[depending_action] = set()\n o[action].add(depending_action)\n i[depending_action].add(action)\n\n # 2. Now run Kahn's algorithm (could be separated from previous to improve abstraction)\n # resulting list\n l: list[Action] = []\n\n while len(s) > 0:\n n = s.pop()\n l.append(n)\n if n in o:\n o_n = o[n]\n del o[n]\n else:\n o_n = set()\n while len(o_n) > 0:\n # remove edge from the graph\n m = o_n.pop()\n i[m].remove(n)\n if len(i[m]) == 0:\n del i[m]\n s.add(m)\n\n if len(o) != 0 or len(i) != 0:\n for (node, edges) in o.items():\n print(\"Source: \" + str(node))\n for e in edges:\n print(\" Edge: \" + str(e))\n raise Exception(\"Dependency graph has at least one cycle\")\n else:\n return l",
"def _call_reduce_action(self, context, subresults):\n debug = self.debug\n result = None\n bt_result = None\n production = context.production\n\n if self.build_tree:\n # call action for building tree node if enabled.\n if debug:\n h_print(\"Building non-terminal node\",\n \"'{}'.\".format(production.symbol.name), level=2)\n\n bt_result = NodeNonTerm(context, children=subresults,\n production=production)\n context.node = bt_result\n if not self.call_actions_during_tree_build:\n return bt_result\n\n sem_action = production.symbol.action\n if sem_action:\n assignments = production.assignments\n if assignments:\n assgn_results = {}\n for a in assignments.values():\n if a.op == '=':\n assgn_results[a.name] = subresults[a.index]\n else:\n assgn_results[a.name] = bool(subresults[a.index])\n\n if type(sem_action) is list:\n if assignments:\n result = sem_action[production.prod_symbol_id](\n context, subresults, **assgn_results)\n else:\n result = sem_action[production.prod_symbol_id](context,\n subresults)\n else:\n if assignments:\n result = sem_action(context, subresults, **assgn_results)\n else:\n result = sem_action(context, subresults)\n\n else:\n if debug:\n h_print(\"No action defined\",\n \" for '{}'.\".format(production.symbol.name), level=1)\n if len(subresults) == 1:\n if debug:\n h_print(\"Unpacking a single subresult.\", level=1)\n result = subresults[0]\n else:\n if debug:\n h_print(\"Result is a list of subresults.\", level=1)\n result = subresults\n\n if debug:\n h_print(\"Action result =\",\n \"type:{} value:{}\"\n .format(type(result), repr(result)), level=1)\n\n # If build_tree is set to True, discard the result of the semantic\n # action, and return the result of treebuild_reduce_action.\n return bt_result if bt_result is not None else result",
"def reducer_override(self, obj):\n if sys.version_info[:2] < (3, 7) and _is_parametrized_type_hint(obj): # noqa # pragma: no branch\n return (\n _create_parametrized_type_hint,\n parametrized_type_hint_getinitargs(obj)\n )\n t = type(obj)\n try:\n is_anyclass = issubclass(t, type)\n except TypeError: # t is not a class (old Boost; see SF #502085)\n is_anyclass = False\n\n if is_anyclass:\n return _class_reduce(obj)\n elif isinstance(obj, types.FunctionType):\n return self._function_reduce(obj)\n else:\n # fallback to save_global, including the Pickler's\n # dispatch_table\n return NotImplemented",
"def _reduce_distances(self, threshold):\n reduced = self.orig_dists.copy()\n reduced[reduced <= threshold] = 0\n # Remove ignored from all consideration\n ignrd_indices = [self.index[name] for name in self.ignored]\n if ignrd_indices:\n reduced[:,ignrd_indices] = np.inf\n reduced[ignrd_indices,:] = np.inf\n # Check if the given parameters are feasible\n chsn_indices = set(self.index[name] for name in self.chosen)\n avail_indices = set(self.index[name] for name in self.available)\n ca_indices = chsn_indices | avail_indices\n unassigned_indices = np.array(list(self._not_ignored_inds - ca_indices))\n if len(unassigned_indices) == 0:\n unassigned_orphans = unassigned_indices\n else:\n ca_indices = list(ca_indices)\n avail_in_range = np.count_nonzero(reduced[np.ix_(unassigned_indices,ca_indices)] == 0, axis=1)\n unassigned_orphans = unassigned_indices[avail_in_range == 0]\n return reduced, unassigned_orphans",
"def cross_series_reducer(self) -> str:\n return pulumi.get(self, \"cross_series_reducer\")",
"def allreduce_hook(state: AllReduceState, grad: torch.Tensor):\n if state.gradient_predivide_factor > 1:\n grad.div_(state.gradient_predivide_factor)\n dist.all_reduce(grad, group=state.process_group)\n if state.gradient_postdivide_factor > 1:\n grad.div_(state.gradient_postdivide_factor)",
"def _get_reduction(self):\n if (not self._allow_sum_over_batch_size and\n distribute_lib.has_strategy() and\n (self.reduction == losses_utils.ReductionV2.AUTO or\n self.reduction == losses_utils.ReductionV2.SUM_OVER_BATCH_SIZE)):\n raise ValueError(\n 'Please use `tf.keras.losses.Reduction.SUM` or '\n '`tf.keras.losses.Reduction.NONE` for loss reduction when losses are '\n 'used with `tf.distribute.Strategy` outside of the built-in training '\n 'loops. You can implement '\n '`tf.keras.losses.Reduction.SUM_OVER_BATCH_SIZE` using global batch '\n 'size like:\\n```\\nwith strategy.scope():\\n'\n ' loss_obj = tf.keras.losses.CategoricalCrossentropy('\n 'reduction=tf.keras.losses.Reduction.NONE)\\n....\\n'\n ' loss = tf.reduce_sum(loss_obj(labels, predictions)) * '\n '(1. / global_batch_size)\\n```\\nPlease see '\n 'https://www.tensorflow.org/tutorials/distribute/custom_training'\n ' for more details.')\n\n if self.reduction == losses_utils.ReductionV2.AUTO:\n return losses_utils.ReductionV2.SUM_OVER_BATCH_SIZE\n return self.reduction",
"def _reduce_operators(operators: OperatorDict, dtype: DType) -> OperatorDict:\n\n red_ops = zero_defaultdict(dtype)\n terms = list(operators.keys())\n weights = list(operators.values())\n for term, weight in zip(*_normal_ordering(terms, weights)):\n red_ops[term] += weight\n red_ops = _remove_dict_zeros(dict(red_ops))\n return red_ops"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
For conjugate distinctions this should be overridden and return the base distinctions used. For none conjugate it will automatically return an empty list.
|
def getBaseDistinctions(self):
return []
|
[
"def get_conjugate_bases_of(chebi_ent):\n if hasattr(chebi_ent, 'OntologyParents'):\n return [ent.chebiId for ent in chebi_ent.OntologyParents if\n (ent.type == \"is conjugate base of\")]\n else:\n return []",
"def conjugate(self):\n pass",
"def conjugate(self):\r\n return self.__class__(self._real, -self._imag)",
"def conjugate(self) -> 'GaussInt':\n return self.ring(self.x, -self.y)",
"def conjugate(self,g):\r\n return g*self*g**-1",
"def conjugate(self):\n # NB unary '+' makes a new object with the same uncertainty and value\n return UncertainComplex(+self.real,-self.imag)",
"def conjugate(self):\n return Complex(self.real, self.imag*-1)",
"def base(self):\n if self._base == []:\n self.schreier_sims()\n return self._base",
"def conjugate(self, x):\n\n a = self.array_form\n b = x.array_form\n n = len(a)\n if len(b) != n:\n raise ValueError(\"The number of elements in the permutations \\\ndon\\'t match.\")\n invb = [None]*n\n for i in xrange(n):\n invb[b[i]] = i\n return _new_from_array_form([invb[a[i]] for i in b])",
"def conjugate(self):\n return self.__class__(scalar=self.scalar, vector= -self.vector)",
"def conjugate(self) -> 'MultiVector':\n\n return (~self).gradeInvol()",
"def conjugate(self):\n parts = list(self)\n # Destroy the diagram column by column, adding each column\n # to the new partition\n eat_diagram = [\n [x - k for x in parts if x - k > 0] for k in range(parts[0])]\n conj_part = [len(y) for y in eat_diagram]\n B = BosonicPartitions()\n return B(conj_part)",
"def get_conjugated_nodes(self):\n sets = []\n self.get_backbone()\n m = self.mbb\n for bi in m.GetBonds():\n #print ' -- idx = ', bi.GetIdx()\n n = len(sets)\n iconj = bi.GetIsConjugated()\n ins = ( bt2bo[ bi.GetBondType() ] > 1 ) # is non-single bond?\n if iconj or ins:\n ia1, ia2 = bi.GetBeginAtomIdx(), bi.GetEndAtomIdx()\n set_i = set([ia1, ia2])\n if n == 0:\n sets.append( set_i )\n else:\n for j, set_j in enumerate(sets):\n if set_i.intersection( set_j ) > set([]):\n sets[j].update( set_i )\n else:\n if set_i not in sets: sets.append( set_i )\n #print '-- sets = ', sets\n sets_u = cim.merge_sets(sets)\n return sets_u",
"def conjugate(self):\n return ComplexNumber(self.real, - self.imag)",
"def conjugate(self):\n return ComplexNumber(self.real, -1*self.imag)",
"def concentration(self):\n return [node.concentration for node in self]",
"def conjugacy_class(self):\r\n return Set([g*self*g**-1 for g in self.group])",
"def omega(self): \n return [a.omega for a in self]",
"def _gates(self):\n gates = []\n if isinstance(self.circuit, Mul):\n for g in reversed(self.circuit.args):\n if isinstance(g, Gate):\n gates.append(g)\n elif isinstance(self.circuit, Gate):\n gates.append(self.circuit)\n return gates"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
Generates a random distinction of this type than is valid for the schema config.schema and for the given graphs. This function for must take graphs as its first argument, and if its a conjugate distinction it must then take, as separate args, not a tuple,
|
def getRandomDistinction(config, graphs, *base_distinctions):
raise AbstractMethodException(Distinction)
|
[
"def generate_regular_graph(variable_names, dist_func, num_neigh=10, **kwargs):\n shuffle(variable_names)\n num_vars = len(variable_names)\n num_neigh = min(num_neigh, num_vars-1)\n graphs = nx.random_graphs.random_regular_graph(num_neigh, num_vars)\n edges = np.array(graphs.edges())\n edges.sort(axis=-1)\n\n return graph_from_edges(variable_names, dist_func, edges)",
"def generate_full(variable_names, dist_func, **kwargs):\n return generate_random_graph(variable_names, dist_func, edge_prob=1.0)",
"def simulate_random_dag(\n d: int, degree: float, graph_type: str, w_range: tuple = (0.5, 2.0)\n) -> nx.DiGraph:\n if graph_type == \"erdos-renyi\":\n prob = float(degree) / (d - 1)\n B = np.tril((np.random.rand(d, d) < prob).astype(float), k=-1)\n elif graph_type == \"barabasi-albert\":\n m = int(round(degree / 2))\n B = np.zeros([d, d])\n bag = [0]\n for ii in range(1, d):\n dest = np.random.choice(bag, size=m)\n for jj in dest:\n B[ii, jj] = 1\n bag.append(ii)\n bag.extend(dest)\n elif graph_type == \"full\": # ignore degree, only for experimental use\n B = np.tril(np.ones([d, d]), k=-1)\n else:\n raise ValueError(\"unknown graph type\")\n # random permutation\n P = np.random.permutation(np.eye(d, d)) # permutes first axis only\n B_perm = P.T.dot(B).dot(P)\n U = np.random.uniform(low=w_range[0], high=w_range[1], size=[d, d])\n U[np.random.rand(d, d) < 0.5] *= -1\n W = (B_perm != 0).astype(float) * U\n G = nx.DiGraph(W)\n return G",
"def random_contraction(graph_to_contract):\n gtc = graph_to_contract.copy()\n while len(gtc) > 2:\n rk1, rk2 = choose_random_edge(gtc)\n # change all of the instance of node 2 to node 1\n for l in gtc:\n gtc[l] = [rk1 if x == rk2 else x for x in gtc[l]]\n # all edges from node 2 are now edges from node 1\n gtc[rk1].extend(gtc[rk2])\n # remove self loops\n while rk1 in gtc[rk1]:\n gtc[rk1].remove(rk1)\n # remove node 2\n del gtc[rk2]\n # both should be the same length, so either will work.\n return len(gtc[random.choice(list(gtc.keys()))])",
"def simulate_random_dag(d: int,\n degree: float,\n graph_type: str,\n w_range: tuple = (0.5, 2.0)) -> nx.DiGraph:\n if graph_type == 'erdos-renyi':\n prob = float(degree) / (d - 1)\n B = np.tril((np.random.rand(d, d) < prob).astype(float), k=-1)\n elif graph_type == 'barabasi-albert':\n m = int(round(degree / 2))\n B = np.zeros([d, d])\n bag = [0]\n for ii in range(1, d):\n dest = np.random.choice(bag, size=m)\n for jj in dest:\n B[ii, jj] = 1\n bag.append(ii)\n bag.extend(dest)\n elif graph_type == 'full': # ignore degree, only for experimental use\n B = np.tril(np.ones([d, d]), k=-1)\n else:\n raise ValueError('unknown graph type')\n # random permutation\n P = np.random.permutation(np.eye(d, d)) # permutes first axis only\n B_perm = P.T.dot(B).dot(P)\n U = 1*np.random.uniform(low=w_range[0], high=w_range[1], size=[d, d])\n U[np.random.rand(d, d) < 0.5] *= -1\n W = (B_perm != 0).astype(float) * U\n G = nx.DiGraph(W)\n return G",
"def test_unique_graph(self):\n g0_graph = tf.Graph()\n with g0_graph.as_default():\n tf.constant(1, name=\"a\")\n tf.constant(2, name=\"b\")\n g1_graph = tf.Graph()\n with g1_graph.as_default():\n tf.constant(1, name=\"a\")\n tf.constant(2, name=\"b\")\n\n g0 = gde.Graph(g0_graph.as_graph_def())\n g1 = gde.Graph(g1_graph.as_graph_def())\n a0, b0, a1, b1 = (g0[\"a\"], g0[\"b\"], g1[\"a\"], g1[\"b\"])\n\n print(\"g0['a'] returns {} (type {})\".format(g0['a'], type(g0['a'])))\n\n # Same graph, should be fine.\n self.assertIsNone(gde.util.check_graphs(a0, b0))\n # Two different graphs, should assert.\n with self.assertRaises(ValueError):\n gde.util.check_graphs(a0, b0, a1, b1)\n # a0 and b0 belongs to the same graph, should be fine.\n self.assertEqual(gde.util.get_unique_graph([a0, b0]), g0)\n # Different graph, should raise an error.\n with self.assertRaises(ValueError):\n gde.util.get_unique_graph([a0, b0, a1, b1])",
"def gnp_random_graph(self, seed=None, directed=True):\r\n if directed:\r\n edges = itertools.permutations(range(self.size), 2)\r\n G = nx.DiGraph()\r\n else:\r\n edges = itertools.combinations(range(self.size), 2)\r\n G = nx.Graph()\r\n nodes = list(range(self.size)) # nodes are labeled 0 to n-1\r\n for i in nodes:\r\n G.add_node(i, belief_strength=random.randint(-100, 100), uncertainty=random.uniform(0, 1), probability=random.uniform(0, 1))\r\n if self.probability <= 0:\r\n return G\r\n if self.probability >= 1:\r\n return complete_graph(self.size, create_using=G)\r\n\r\n if seed is not None:\r\n random.seed(seed)\r\n\r\n for e in edges:\r\n if random.random() < self.probability:\r\n G.add_edge(*e, weight= random.uniform(-1,1))\r\n return G",
"def create_synthetic_ER_graphs(N1,N2,p1,p2,c1,c2,mu):\n\n #### Generate two ER random graphs\n G = nx.gnp_random_graph(n=N1,p=p1)\n F = nx.gnp_random_graph(n=N2,p=p1)\n \n #### Nodes of F and G are both named 1... N initally, so rename nodes of F in order to \n #### compose the graphs to a single large graph H\n mapping = {n:n+N1 for n in F.nodes()}\n F = nx.relabel_nodes(F,mapping=mapping)\n H = nx.compose(F,G)\n \n #### Now randomly choose int(c*N) nodes from both graphs that will connect to the other graph\n connectors_G = list(G.nodes())[:int(N1*c1)]\n connectors_F = list(F.nodes())[:int(N2*c2)]\n maximum_number_of_connections = len(connectors_G)*len(connectors_F)\n \n #### save connectors in node attributes \n nc = {n:0.0 for n in H.nodes()}\n for n in connectors_G:\n nc[n] = 1.0\n for n in connectors_F:\n nc[n] = 1.0\n nx.set_node_attributes(H,nc,'connectors')\n \n #### Save information about nodes belonging to subgraph G\n nodes_G = {n:0.0 for n in H.nodes()}\n for n in G.nodes():\n nodes_G[n] = 1.0\n nx.set_node_attributes(H,nodes_G,'subgraph_nodes')\n \n #### Set up variables to count the share of connections, in order to be able to compare it to mu\n current_number_of_connections = 0\n fraction_of_connections = 0\n \n ### now randomly add edges until required value of mu is reached\n while fraction_of_connections<mu:\n node_G = connectors_G[np.random.randint(low=0,high=len(connectors_G))]\n node_F = connectors_F[np.random.randint(low=0,high=len(connectors_F))]\n if not H.has_edge(node_G,node_F):\n H.add_edge(node_G,node_F)\n current_number_of_connections +=1\n fraction_of_connections = current_number_of_connections/maximum_number_of_connections\n return H",
"def simulate_random_dag(d: int,\n degree: float,\n graph_type: str,\n w_range: tuple = (0.5, 2.0)) -> nx.DiGraph:\n if graph_type == 'erdos-renyi':\n prob = float(degree) / (d - 1)\n B = np.tril((np.random.rand(d, d) < prob).astype(float), k=-1)\n elif graph_type == 'barabasi-albert':\n m = int(round(degree / 2))\n B = np.zeros([d, d])\n bag = [0]\n for ii in range(1, d):\n dest = np.random.choice(bag, size=m)\n for jj in dest:\n B[ii, jj] = 1\n bag.append(ii)\n bag.extend(dest)\n elif graph_type == 'full': # ignore degree, only for experimental use\n B = np.tril(np.ones([d, d]), k=-1)\n else:\n raise ValueError('unknown graph type')\n # random permutation\n P = np.random.permutation(np.eye(d, d)) # permutes first axis only\n B_perm = P.T.dot(B).dot(P)\n U = np.random.uniform(low=w_range[0], high=w_range[1], size=[d, d])\n U[np.random.rand(d, d) < 0.5] *= -1\n W = (B_perm != 0).astype(float) * U\n G = nx.DiGraph(W)\n return G",
"def test_random_triad():\n G = nx.karate_club_graph()\n G = G.to_directed()\n for i in range(100):\n assert is_triad(nx.random_triad(G))",
"def random_triad(G):\n nodes = sample(G.nodes(), 3)\n G2 = G.subgraph(nodes)\n return G2",
"def rand_graph(num_nodes, num_edges, idtype=F.int64, device=F.cpu()):\n # TODO(minjie): support RNG as one of the arguments.\n eids = random.choice(num_nodes * num_nodes, num_edges, replace=False)\n eids = F.zerocopy_to_numpy(eids)\n rows = F.zerocopy_from_numpy(eids // num_nodes)\n cols = F.zerocopy_from_numpy(eids % num_nodes)\n rows = F.copy_to(F.astype(rows, idtype), device)\n cols = F.copy_to(F.astype(cols, idtype), device)\n return convert.graph(\n (rows, cols), num_nodes=num_nodes, idtype=idtype, device=device\n )",
"def generate_test_graph(sameDomain = False):\n num = 100\n\n urls = []\n emails = []\n nodes={}\n if sameDomain:\n domain = generate_domainname()\n else:\n domain = None\n for i in range(num):\n urls.append(generate_url(domain))\n emails.append(generate_email())\n \n used_urls = set()\n used_emails = set()\n for u in urls:\n l = random.choices(urls, k = floor(num/4))\n #l = [u for u in urls]\n e = random.choices(emails, k = floor(num/10))\n #e = [e for e in emails]\n used_urls.update(l)\n used_emails.update(e)\n nodes[u] = testNode(u, l, e)\n nodes[u].generate_page()\n \n return nodes, urls, emails",
"def random_graph_generator(vertices_number, directed=False):\n graph = Graph(directed)\n for i in range(0, vertices_number):\n graph.insert_vertex(i)\n\n for i in graph.vertices():\n for j in graph.vertices():\n if random.getrandbits(1):\n try:\n graph.insert_edge(i, j, None)\n except:\n pass\n \n return graph",
"def test_gp_composer_random_graph_generation_looping():\n task = Task(TaskTypesEnum.regression)\n\n params = GraphGenerationParams(\n adapter=PipelineAdapter(),\n rules_for_constraint=None,\n advisor=PipelineChangeAdvisor(task=task)\n )\n\n requirements = GPComposerRequirements(\n primary=['simple_imputation'],\n secondary=['ridge', 'dtreg'],\n timeout=datetime.timedelta(seconds=300),\n max_pipeline_fit_time=None,\n max_depth=2,\n max_arity=2,\n cv_folds=None,\n advisor=PipelineChangeAdvisor(task=task),\n pop_size=10,\n num_of_generations=5,\n crossover_prob=0.8,\n mutation_prob=0.8,\n mutation_strength=MutationStrengthEnum.mean\n )\n\n graph = random_graph(params=params, requirements=requirements, max_depth=None)\n nodes_name = list(map(str, graph.nodes))\n\n for primary_node in requirements.primary:\n assert primary_node in nodes_name\n assert nodes_name.count(primary_node) == 1\n assert constraint_function(graph, params) is True",
"def create_random_heterogeneous_crosslinking(graph, data_output, b, c, shape, dclm, k, f):#!!!\n d = densities(Vt=b[2][0]*b[2][1]*b[2][2], dclm=dclm, c=k, f=f)\n if shape == 'croix': z = croix(graph, c, b, f)\n elif shape == 'sphere': z = sphere(graph, c, b, f=f)\n else: print \"heu\", shape\n m = list(set(graph.nodes()).difference(set(z)))\n modified_graph = reticuler(graph, ['miniboucle'], d['dclh'], zone = z, visuel = False, blabla=True)\n twice_modified_graph = reticuler(modified_graph, ['alea'], d['dclm'], zone = m, visuel = False, sauvdata = data_output)",
"def generate_categorical_graph(num_vars,\n min_categs,\n max_categs,\n inputs_independent=False,\n use_nn=True,\n deterministic=False,\n graph_func=generate_random_graph,\n seed=-1,\n **kwargs):\n if seed >= 0:\n np.random.seed(seed)\n random.seed(seed)\n torch.manual_seed(seed)\n\n if num_vars <= 26: # For less than 26 variables, we call the variables alphabetically, otherwise numerically\n variable_names = [n for i, n in zip(range(1, num_vars+1), string.ascii_uppercase)]\n else:\n variable_names = [r\"$X_{%s}$\" % i for i in range(1, num_vars+1)]\n var_num_categs = np.random.randint(min_categs, max_categs+1, size=(num_vars,))\n\n def dist_func(input_names, name):\n if min_categs != max_categs:\n input_num_categs = [var_num_categs[variable_names.index(v_name)] for v_name in input_names]\n num_categs = var_num_categs[variable_names.index(name)]\n else:\n input_num_categs, num_categs = [min_categs]*len(input_names), min_categs\n dist = get_random_categorical(input_names=input_names,\n input_num_categs=input_num_categs,\n num_categs=num_categs,\n inputs_independent=inputs_independent,\n use_nn=use_nn,\n deterministic=deterministic)\n return dist\n\n return graph_func(variable_names, dist_func, **kwargs)",
"def generate_random_graphs(n):\n def is_valid(G):\n if nx.number_connected_components(G) == 1:\n if nx.radius(G) >= 3:\n return True\n return False\n\n graphs = []\n for i in range(n):\n G = nx.gnm_random_graph(15, 30)\n while not is_valid(G):\n G = nx.gnm_random_graph(15, 30)\n graphs.append(G)\n return graphs",
"def test_dist_gates(self):\n generate = g.TEST_TYPE_TO_GENERATOR_BY_DEPTH[g.TEST_TYPES.RANDOM]\n L = 100\n D = 10\n W = 10\n num_trials = 50\n # a dictionary mapping gate type to a list of the number of times\n # that gate type appears in each circuit generated:\n gate_type_dist = dict(\n (gate_type, []) for gate_type in g.GATE_TYPES.values_generator())\n # a dictionary mapping gate type to the number of times that gate type\n # appears as the output gate:\n output_gate_type_dist = dict(\n (gate_type, 0) for gate_type in g.GATE_TYPES.values_generator())\n for trial_num in xrange(num_trials):\n # create a new circuit:\n circ = generate(L, D, W)\n # get a dictionary mapping each gate type to the number of times it\n # appears in the new circuit: \n dist = dict(\n (gate_type, 0) for gate_type in g.GATE_TYPES.values_generator())\n levels = circ.get_levels()\n for level_num in xrange(1, len(levels)):\n level = levels[level_num]\n for gate in level:\n dist[gate.get_func_name()] += 1\n # add these numbers to gate_type_dist:\n for key in dist.keys():\n gate_type_dist[key].append(dist[key])\n # keep track of the number of output gates of each type:\n output_gate_type = circ.get_levels()[-1][0].get_func_name()\n output_gate_type_dist[output_gate_type] += 1\n # the average number of gates of each type per circuit:\n averages = [float(sum(gate_type_dist[key])) / float(num_trials)\n for key in gate_type_dist.keys()]\n # make sure that the average number of gates of each type per circuit\n # is between 1/24 and 1/3 of the total of the averages:\n for num in averages:\n self.assertTrue(float(num) / float(sum(averages)) > 1.0 / 24.0)\n self.assertTrue(float(num) / float(sum(averages)) < 1.0 / 3.0)\n # make sure that the number of output gates of each type is between\n # 1/24 and 1/3 of the total number of circuits created\n for key in output_gate_type_dist.keys():\n self.assertTrue(\n float(output_gate_type_dist[key]) / float(num_trials)\n > 1.0 / 24.0)\n self.assertTrue(\n float(output_gate_type_dist[key]) / float(num_trials)\n < 1.0 / 3.0)"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
Get an estimate of the number of different subtypes for this distinction. This is used to estimate a PDF for randomly sampling the distinction space. Examine the code of other distinctions to get a feel for how things are estimated.
|
def getNumberOfSubtypes(config, low_estimate=True):
raise AbstractMethodException(Distinction)
|
[
"def test_type_distribution(self):\n np.random.seed(SEED)\n total = 100000\n tolerance = 0.02\n astro_generator = Generator(1e9, source='astrophysical')\n counts = Counter(astro_generator.get_particle_type()\n for _ in range(total))\n assert (counts[Particle.Type.electron_neutrino]/total ==\n pytest.approx(1/6, rel=tolerance))\n assert (counts[Particle.Type.electron_antineutrino]/total ==\n pytest.approx(1/6, rel=tolerance))\n assert (counts[Particle.Type.muon_neutrino]/total ==\n pytest.approx(1/6, rel=tolerance))\n assert (counts[Particle.Type.muon_antineutrino]/total ==\n pytest.approx(1/6, rel=tolerance))\n assert (counts[Particle.Type.tau_neutrino]/total ==\n pytest.approx(1/6, rel=tolerance))\n assert (counts[Particle.Type.tau_antineutrino]/total ==\n pytest.approx(1/6, rel=tolerance))\n astro_generator2 = Generator(1e9, source='astrophysical',\n flavor_ratio=(1, 2, 0))\n counts = Counter(astro_generator2.get_particle_type()\n for _ in range(total))\n assert (counts[Particle.Type.electron_neutrino]/total ==\n pytest.approx(1/6, rel=tolerance))\n assert (counts[Particle.Type.electron_antineutrino]/total ==\n pytest.approx(1/6, rel=tolerance))\n assert (counts[Particle.Type.muon_neutrino]/total ==\n pytest.approx(2/6, rel=tolerance))\n assert (counts[Particle.Type.muon_antineutrino]/total ==\n pytest.approx(2/6, rel=tolerance))\n assert counts[Particle.Type.tau_neutrino]/total == 0\n assert counts[Particle.Type.tau_antineutrino]/total == 0\n cosmo_generator = Generator(1e9, source='cosmogenic')\n counts = Counter(cosmo_generator.get_particle_type()\n for _ in range(total))\n assert (counts[Particle.Type.electron_neutrino]/total ==\n pytest.approx(0.78/3, rel=tolerance))\n assert (counts[Particle.Type.electron_antineutrino]/total ==\n pytest.approx(0.22/3, rel=tolerance))\n assert (counts[Particle.Type.muon_neutrino]/total ==\n pytest.approx(0.61/3, rel=tolerance))\n assert (counts[Particle.Type.muon_antineutrino]/total ==\n pytest.approx(0.39/3, rel=tolerance))\n assert (counts[Particle.Type.tau_neutrino]/total ==\n pytest.approx(0.61/3, rel=tolerance))\n assert (counts[Particle.Type.tau_antineutrino]/total ==\n pytest.approx(0.39/3, rel=tolerance))\n cosmo_generator2 = Generator(1e9, source='cosmogenic',\n flavor_ratio=(1, 2, 0))\n counts = Counter(cosmo_generator2.get_particle_type()\n for _ in range(total))\n assert (counts[Particle.Type.electron_neutrino]/total ==\n pytest.approx(0.78/3, rel=tolerance))\n assert (counts[Particle.Type.electron_antineutrino]/total ==\n pytest.approx(0.22/3, rel=tolerance))\n assert (counts[Particle.Type.muon_neutrino]/total ==\n pytest.approx(2*0.61/3, rel=tolerance))\n assert (counts[Particle.Type.muon_antineutrino]/total ==\n pytest.approx(2*0.39/3, rel=tolerance))\n assert counts[Particle.Type.tau_neutrino]/total == 0\n assert counts[Particle.Type.tau_antineutrino]/total == 0",
"def get_tree_size_distribution( self ):\n stat = {}\n cursor = connection.cursor()\n #cur = cursor.execute( \" select tree_id, count(taxon_id) from djangophylocore_reltreecoltaxa%s GROUP BY tree_id;\" % (self.id))\n #VR\n #cur = cursor.execute( \" select tree_id, count(rel.user_taxon_name) from djangophylocore_reltreecoltaxa%s as rel, djangophylocore_taxonomy as taxonomy where taxonomy.id = rel.taxon_id GROUP BY tree_id;\" % (self.id))\n cur = cursor.execute( \" select tree_id, count(rel.user_taxon_name) from djangophylocore_reltreecoltaxa%s as rel GROUP BY tree_id;\" % (self.id))\n \n if settings.DATABASE_ENGINE == 'sqlite3':\n result = cur.fetchall()\n cur.close()\n else:\n result = cursor.fetchall()\n cursor.close()\n for (tree_id, nbtaxa) in result:\n if not stat.has_key( nbtaxa ):\n stat[nbtaxa] = 0\n stat[nbtaxa] += 1\n if not stat.keys():\n raise ValueError, \"your collection must have trees\"\n nbmax = max( stat.keys() )\n ratio = int( nbmax*10.0/100) or 1 # 1 to prevent crash in xrange\n result_stat = {}\n for i in xrange( 0, nbmax+1, ratio):\n result_stat[i] = 0\n for i,j in stat.iteritems():\n for key in result_stat.keys():\n if key <= i < key+ratio:\n result_stat[key] += j\n return result_stat",
"def computeDensity(self,typeOfMultiStream=\"normal\"):\n durationLinks=0\n durationNodes=0\n for j in self.em.giveListOfLinks():\n durationLinks=durationLinks+j.giveIntervals2().duration()\n for i in self.layers.giveLayers():\n for j in self.layers.giveLayers():\n for k in i.giveNodesT().giveListOfNodes():\n for l in j.giveNodesT().giveListOfNodes():\n if i!=j or k!=l:\n durationNodes=durationNodes+k.giveIntervals2().intersection(l.giveIntervals2()).duration()\n return(2*durationLinks/durationNodes)",
"def GetSubdivisionCount(self) -> int:\n ...",
"def num_specifications(self):",
"def test_get_tax_return_frequencies(self):\n pass",
"def get_type_stats(self):\n if not self.fitted:\n raise ValueError(\"Vocabulary hasn't been computed yet\")\n\n total_types = len(self.freqs)\n known_types = len(self) - len(self.reserved)\n return known_types, total_types, known_types / total_types",
"def _genotype(team, other_team):\r\n num_programs_intersection = len(set(team.active_programs_).intersection(other_team.active_programs_))\r\n num_programs_union = len(set(team.active_programs_).union(other_team.active_programs_))\r\n if num_programs_union > 0:\r\n distance = 1.0 - (float(num_programs_intersection)/float(num_programs_union))\r\n else:\r\n print \"Error: No union between teams' active programs! Look for bugs.\"\r\n raise SystemExit\r\n return distance",
"def get_taxon_frequency_distribution( self ):\n stat = {}\n cursor = connection.cursor()\n #VR cur = cursor.execute( \"select rel.tree_id, taxonomy.name from djangophylocore_reltreecoltaxa%s as rel, djangophylocore_taxonomy as taxonomy where taxonomy.id = rel.taxon_id\" % self.id )\n cur = cursor.execute( \" select rel.tree_id, rel.user_taxon_name from djangophylocore_reltreecoltaxa%s as rel\" % (self.id))\n if settings.DATABASE_ENGINE == 'sqlite3':\n results = cur.fetchall()\n cur.close()\n else:\n results = cursor.fetchall()\n cursor.close()\n d_results = {}\n for (tree, taxa_name) in results:\n if tree not in d_results:\n d_results[tree] = []\n d_results[tree].append( taxa_name )\n for tree in d_results:\n already_done = set()\n for taxon in d_results[tree]:\n if taxon not in already_done:\n if not stat.has_key( taxon ):\n stat[taxon] = 0\n stat[taxon] += 1 \n already_done.add( taxon )\n if not stat.values():\n raise ValueError, \"your collection must have trees\"\n nbmax = max( stat.values() )\n ratio = int( nbmax*10.0/100) or 1\n result_stat = {}\n for i in xrange( 0, nbmax+1, ratio):\n result_stat[i] = 0\n for i in stat.values():\n for key in result_stat.keys():\n if key <= i < key+ratio:\n result_stat[key] += 1\n return result_stat",
"def calc_ratio(df):\n fc = df[\"Functional_Class\"].value_counts()\n\n mis = 0\n syn = 0\n ns = 0\n sp = 0\n other = 0\n \n for typ in fc.index:\n if (\"NONSENSE\" in typ):\n ns = ns + fc[typ]\n elif (\"NONSENSE\" not in typ) and (\"MISSENSE\" in typ):\n mis = mis+ fc[typ]\n elif (\"NONSENSE\" not in typ) and (\"MISSENSE\" not in typ) and (\"SILENT\" in typ) :\n syn = syn+ fc[typ]\n else:\n typlist = df.loc[df[\"Functional_Class\"] == typ,\"Effect\"].tolist()\n for i in typlist:\n if \"synonymous\" in i:\n syn = syn+1\n elif (\"synonymous\" not in i) and (\"splice\" in i):\n sp = sp+1\n elif (\"synonymous\" not in i) and (\"splice\" not in i):\n other = other + 1\n #print(\"variant type included in other: \", i)\n \n if mis+syn+ns+sp+other > len(df):\n print(\"duplicate detected\")\n \n return mis, syn, ns, sp, other",
"def prob_t_N(genotype, base):\n cnter = Counter(genotype)\n return cnter.get(base, 0) * 1/len(genotype)",
"def calculate_size(self, num_dots):\n self.objects = num_dots\n square = sqrt(self.objects)\n if self.objects % square == 0:\n return int(square), int(square)\n else:\n denom = self.objects // sqrt(self.objects)\n while self.objects % denom != 0:\n denom -= 1\n return int(denom), int(self.objects // denom)",
"def get_usage_ratio() :\n return (CountryResults._country_results_counter # Ratio of how often CountryResults subclass was used to process results \n / CountryResults._processing_counter)",
"def get_proportions(self):\n data = []\n for member in self.members:\n proportion = 1\n for m, b in zip(member, self.bases):\n proportion *= b.composition[m]\n data.append(proportion)\n return data",
"def test_init_expected_sizes(self):\n self.assertEquals(self.legis_size,self.legislature.size())\n self.assertEquals(self.legis_size,len(self.legislature.get_utilities()))\n self.assertEquals(self.legis_size,len(self.legislature.get_ideologies()))\n self.assertEquals(self.legis_size,len(self.legislature.legislators()))\n party_sum=self.legislature.get_num_liberals()+self.legislature.get_num_conservatives()\n self.assertEquals(self.legis_size,party_sum)",
"def subtype_counts(node_set, G, log=False):\n subtypes = Counter()\n for n in node_set:\n subtype = G.node[n]['subtype']\n subtypes[subtype] += 1\n\n if log:\n for k, v in subtypes.items():\n subtypes[k] = np.log10(v)\n \n return subtypes",
"def _calcTypeDist(self, uSignType, uPassiveShape,\n dbSignType, dbPassiveShape):\n if dbSignType != uSignType:\n # different type\n typeDist = 1\n else:\n # the same type\n if dbSignType == 'passive hand':\n if dbPassiveShape == uPassiveShape:\n # the same shape\n typeDist = 0\n else:\n # different shape\n typeDist = 0.5\n else:\n # the same type other than 'passive hand'\n typeDist = 0\n return typeDist",
"def ggml_type_size(type: int) -> int:\n ...",
"def get_makowski_diversity(self):\n d = decimal.Decimal(0)\n P = self.size_proteins\n\n degeneracy = self.degeneracy_table['Degeneracy']\n protein_counts = self.degeneracy_table['Proteins']\n\n for x, y in zip(protein_counts, degeneracy):\n p = y / self.size_oligonucleotides\n d += x * (p ** 2)\n\n self.makowski_diversity = 1 / (P * d)\n return self.makowski_diversity"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
Given a schema return True if this type of distinction is valid for the schema. Default is True. Should be overridden if there are any schemas a distinction is not valid for.
|
def isValidForSchema(schema):
return True
|
[
"def is_a_dde_schema(self, schema):\n return schema in self.registered_dde_schemas",
"def compatibleSchema(self,\n schema: schemaconverter.TDXSchema,\n raise_error: bool = True\n ) -> bool:\n db_tdx_schema = self.tdx_schema\n # see https://stackoverflow.com/a/41579450/10149169\n is_subset = db_tdx_schema.items() <= schema.items()\n if not is_subset and raise_error:\n raise ValueError((\n \"The given database schema is not compatible with the\"\n \" existing database schema. The given schema was {}\"\n \" but the existing schema was {}\").format(\n schema, db_tdx_schema))\n return is_subset",
"def is_a_dde_schema(schema):\n return schema in registered_dde_schemas()",
"def _validate_schema(self):\n # TODO (ryandeivert): check the value of types defined in the schema to ensure\n # they are valid before erroring out at the _convert_type stage\n values = [\n (self._schema, self._optional_top_level_keys),\n (self._envelope_schema, self._optional_envelope_keys)\n ]\n\n return all(optionals.issubset(schema) for schema, optionals in values)",
"def has_schema_url(self):\n return self.get_schema_url() is not None",
"def schema_validator(self, schema: list):\n \n if len(self.data) != len(schema):\n return False\n \n def helper(data, keys):\n for key in data:\n if DataGenerator.is_dict(data[key]):\n keys.append([key, []]) # the empty dict is for the inner keys\n helper(data[key], keys[-1][1] if type(keys[-1]) is list else keys)\n else:\n keys.append(key)\n return keys\n\n return schema == helper(self.data, [])",
"def _has_schema(self):\n return self.get_attr('_has_schema', False)",
"def can_access_schema(self, datasource: \"BaseDatasource\") -> bool:\n\n return (\n self.can_access_all_datasources()\n or self.can_access_database(datasource.database)\n or self.can_access(\"schema_access\", datasource.schema_perm or \"\")\n )",
"def is_schema_types_valid(self):\n valid_types = {\"string\", \"int\", \"float\", \"datetime\", \"boolean\"}\n invalid_types = []\n if self.schema_content:\n for dataset in self.schema_content:\n attributes = self.schema_content.get(dataset)\n for attr in attributes.values():\n type_to_validate = attr.get(\"type\")\n if type_to_validate not in valid_types:\n invalid_types.append(type_to_validate)\n\n if invalid_types:\n error_message, error_code = Errors.modeling_rule_schema_types_invalid(\n invalid_types\n )\n if self.handle_error(\n error_message, error_code, file_path=self.file_path\n ):\n self._is_valid = False\n return False\n return True",
"def check_schema_exists(self, schema):\n sql_query = \"\"\"\n select schema_name\n from information_schema.schemata\n where schema_name = '%s'\n \"\"\" % schema\n try:\n table_schema = self.execute(sql_query)[0][0]\n to_return = table_schema == schema\n except:\n to_return = False\n return to_return",
"def ensure_schema(schema):\n return SCHEMA.check(schema)",
"def validate_data(self):\n schemafile = self.name.replace(\".xml\", \".xsd\")\n if os.path.exists(schemafile):\n try:\n schema = lxml.etree.XMLSchema(file=schemafile)\n except:\n logger.error(\"Failed to process schema for %s\" % self.name)\n return False\n else:\n # no schema exists\n return True\n\n if not schema.validate(self.xdata):\n logger.error(\"Data for %s fails to validate; run bcfg2-lint for \"\n \"more details\" % self.name)\n return False\n else:\n return True",
"def _validate_bool(instance: typing.Dict[str, typing.Any], schema: typing.Dict[str, typing.Any], path: typing.List[str]) -> None:\n if not isinstance(instance, dict):\n raise ValidationError('instance must be dict', path)\n valid_keys = {'_type', 'value'}\n required_keys = valid_keys\n schema_keys = set(instance.keys())\n invalid_keys = schema_keys - valid_keys - opt_federation_keys\n if invalid_keys:\n raise ValidationError('unexpected keys in schema: {}'.format(invalid_keys), path)\n missing_keys = required_keys - schema_keys\n if missing_keys:\n raise ValidationError('missing keys in schema: {}'.format(missing_keys), path)\n if instance['_type'] != 'bool':\n raise ValidationError('expected _type \"bool\"', path)\n if not isinstance(instance['value'], bool):\n raise ValidationError('value must be bool', path)",
"def validate(self, schema=os.path.join(os.path.dirname(__file__), 'am.xsd')):\n return validate_xml(schema, self.path, from_path=True)",
"def is_nested_schema(value: marshmallow.fields.Field) -> bool:\n return isinstance(get_nested_schema(value=value), marshmallow.Schema)",
"def has_schema(self, schema_name: str, **kw: Any) -> bool:\n with self._operation_context() as conn:\n return self.dialect.has_schema(\n conn, schema_name, info_cache=self.info_cache, **kw\n )",
"def _validate_bool(instance: typing.Dict[str, typing.Any], schema: typing.Dict[str, typing.Any], path: typing.List[str]) -> None:\n if not isinstance(instance, dict):\n raise ValidationError('instance must be dict', path)\n valid_keys = {'_type', 'value'}\n required_keys = valid_keys\n schema_keys = set(instance.keys())\n invalid_keys = schema_keys - valid_keys - OPT_IMPORT_KEYS\n if invalid_keys:\n raise ValidationError(f'unexpected keys in schema: {invalid_keys}', path)\n missing_keys = required_keys - schema_keys\n if missing_keys:\n raise ValidationError(f'missing keys in schema: {missing_keys}', path)\n if instance['_type'] != 'bool':\n raise ValidationError('expected _type \"bool\"', path)\n if not isinstance(instance['value'], bool):\n raise ValidationError('value must be bool', path)",
"def isValid(dp: frictionless.package.Package, new_dp: frictionless.package.Package):\n val = frictionless.validate(new_dp)\n if (\n val[\"valid\"]\n and dp[\"resources\"][0][\"schema\"] == new_dp[\"resources\"][0][\"schema\"]\n ):\n logging.info(\"Returning valid and schema-compliant data\")\n return True\n else:\n logging.error(\"Data is not valid or the schema has changed\")\n print(val)\n return False",
"def _validate(cls, schema, data, path):\n # String, Number, Boolean, None\n if schema == str:\n schema = unicode\n if schema in (int, long, unicode, float, bool, None):\n return cls._validate_type(schema, data, path)\n # Function\n if callable(schema):\n return cls._validate_function(schema, data, path)\n # Object\n if isinstance(schema, dict):\n return cls._validate_dict(schema, data, path)\n # Array\n if isinstance(schema, list):\n return cls._validate_list(schema, data, path)\n raise SchemaError(\"Unsupported schema data type: {0}\".format(schema))"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
Matrix multiplication of chains of square matrices
|
def chain_matmul_square(As):
As_matmul = As
while As_matmul.shape[0] > 1:
if As_matmul.shape[0] % 2:
A_last = As_matmul[-1:]
else:
A_last = None
As_matmul = torch.matmul(As_matmul[0:-1:2], As_matmul[1::2])
if A_last is not None:
As_matmul = torch.cat([As_matmul, A_last], dim=0)
return As_matmul.squeeze(0)
|
[
"def matrix_chain_multiply(A: List[np.ndarray], s: List[List[int]], i: int, j: int) -> np.ndarray:\n if i == j:\n return A[i]\n if i + 1 == j:\n return np.dot(A[i], A[j])\n Ak = matrix_chain_multiply(A, s, i, s[i][j])\n Ak1 = matrix_chain_multiply(A, s, s[i][j]+1, j)\n prod = np.dot(Ak, Ak1)\n return prod",
"def matmul_tup(matrices):\n for i in range(len(matrices) - 1):\n if i == 0:\n product = np.matmul(matrices[0], matrices[1])\n else:\n product = np.matmul(product, matrices[i + 1])\n return product",
"def multiply_matrix(mat_a, mat_b, total_threads):",
"def matrix_mult(m1, m2):\n pass",
"def _multi_matmul_chain_order(arrays):\n n = len(arrays)\n # p stores the dimensions of the matrices\n # Example for p: A_{10x100}, B_{100x5}, C_{5x50} --> p = [10, 100, 5, 50]\n # Using -2 to generalize for shapes that are more than 2 dimmensions\n p = [a.shape[-2] for a in arrays] + [arrays[-1].shape[-1]]\n # m is a matrix of costs of the subproblems\n # m[i,j]: min number of scalar multiplications needed to compute A_{i..j}\n m = np.zeros((n, n), dtype=np.double)\n # s is the actual ordering\n # s[i, j] is the value of k at which we split the product A_i..A_j\n s = np.empty((n, n), dtype=np.intp)\n\n for l in range(1, n):\n for i in range(n - l):\n j = i + l\n m[i, j] = np.inf\n for k in range(i, j):\n q = m[i, k] + m[k + 1, j] + p[i] * p[k + 1] * p[j + 1]\n if q < m[i, j]:\n m[i, j] = q\n s[i, j] = k # Note that Cormen uses 1-based index\n return s",
"def squareMatrixMultiplyRecursive(A, B):\r\n # Base case when size of matrices is 1x1 \r\n if len(A) == 1: \r\n return A * B \r\n \r\n # Splitting the matrices into quadrants recursively.\r\n A11, A12, A21, A22 = divide(A) \r\n B11, B12, B21, B22 = divide(B) \r\n \r\n # Computing the values of the 4 quadrants of the final matrix c \r\n C11 = squareMatrixMultiplyRecursive(A11, B11) + squareMatrixMultiplyRecursive (A12, B21)\r\n C12 = squareMatrixMultiplyRecursive(A11, B12) + squareMatrixMultiplyRecursive (A12, B22)\r\n C21 = squareMatrixMultiplyRecursive(A21, B11) + squareMatrixMultiplyRecursive (A22, B21)\r\n C22 = squareMatrixMultiplyRecursive(A21, B22) + squareMatrixMultiplyRecursive (A22, B22)\r\n \r\n # Combining the 4 quadrants into a single matrix by stacking horizontally and vertically. \r\n C = np.vstack((np.hstack((C11, C12)), np.hstack((C21, C22)))) \r\n \r\n return C",
"def multiply_matrices(a, b):\n try:\n x = len(b[0])\n except:\n b = make_2D(b)\n try:\n x = len(a[0])\n except:\n a = make_2D(a)\n if len(a[0]) != len(b):\n print 'error: matrices cannot be multiplied'\n return\n out = np.zeros((len(a), len(b[0])))\n for i in range(len(out)):\n for j in range(len(out[0])):\n sum = 0\n for k in range(len(a[i])):\n sum += a[i][k] * b[k][j]\n out[i][j] = sum\n return out",
"def multiply_matrices(list):\n # Section 1: Start matrix product using 1st matrix in list\n matrix_product = list[0]\n\n # Section 2: Loop thru list to create product\n for matrix in list[1:]:\n matrix_product = matrix_multiply(matrix_product, matrix)\n\n return matrix_product",
"def matrixes_multiply(a, b):\n result = [[0,0,0], \n [0,0,0], \n [0,0,0]] \n \n \n for i in range(len(a)): \n for j in range(len(b[0])): \n for k in range(len(b)): \n result[i][j] += a[i][k] * b[k][j]\n return result",
"def matrix_chain_order(p: List[int]) -> List:\n # n is the number of matrices to multiply\n n = len(p) - 1\n # Initialize m, s with None\n m = []\n s = []\n for i in range(n):\n m += [[None] * n]\n s += [[None] * n]\n # For chains consisting of one matrix, m[i,i] = 0\n for i in range(n):\n m[i][i] = 0\n # l is the chain lengths\n for l in range(2, n+1):\n # i is the start index of chain \n for i in range(n-l+1):\n # j is the end index of the chain\n j = i + l - 1 \n # Initialize # scalar mult to large value\n m[i][j] = INF\n # Find value of k that minimizes scalar mults\n for k in range(i, j):\n # Compute # scalar mults\n q = m[i][k] + m[k+1][j] + p[i]*p[k+1]*p[j+1]\n # Update best solution\n if q < m[i][j]:\n m[i][j] = q\n s[i][j] = k\n return [m, s]",
"def matrix_mult( m1, m2 ):\n temp = new_matrix(len(m1), len(m2[0]))\n for row in range(len(temp)):\n for col in range(len(temp[0])):\n for i in range(len(m1[0])):\n temp[row][col] += m1[row][i] * m2[i][col]\n i = 0\n while i < len(m2):\n if m2[i]:\n m2[i] = temp[i]\n elif temp[i]:\n m2.append(temp[i])\n i += 1\n return m2",
"def _multiplyMatrix(self, other):\n return Matrix(self.getA()*other.getA()+self.getB()*other.getC(),\n self.getA()*other.getB()+self.getB()*other.getD(),\n self.getC()*other.getA()+self.getD()*other.getC(),\n self.getC()*other.getB()+self.getD()*other.getD())",
"def matmult(*x):\n return reduce(np.dot, x)",
"def python_nonsquare_matrix_mult(matrix):\n\n transposed_matrix = np.zeros([matrix.shape[1],matrix.shape[0]])\n start = time.time()\n # for i in range(matrix.shape[0]):\n # for j in range(matrix.shape[1]):\n # transposed_matrix[j,i] = matrix[i,j]\n\n transposed_matrix = np.transpose(matrix)\n product = matrix.dot(transposed_matrix)\n\n # transposed_matrix = np.transpose(matrix)\n end = time.time()-start\n\n # print(\"Python Golden Transpose: %s\" % product)\n # print('python transpose time: %.2E' % end)\n return [product, end]",
"def __mul__(self, other):\n # \n # TODO - your code here\n #\n # Function call for transpose of other matrix\n transpose_mul=Matrix.T(other)\n \n # Code for multiplication of two rows or list\n def dot_product(vector1, vector2):\n mul_sum = 0\n for i in range(len(vector1)):\n mul_sum += vector1[i] * vector2[i]\n return mul_sum\n \n # Variables for matrix \n multiplication = []\n multiplication_row = []\n \n # For Loop for calculation of multiplication of two matrix\n for i in range(len(self.g)):\n multiplication_row = []\n for j in range(len(transpose_mul.g)):\n multiplication_row . append(dot_product(self.g[i], transpose_mul.g[j]))\n multiplication . append(multiplication_row) \n return Matrix(multiplication)",
"def matrixMul(self, matrix, matrix2):\n matrix0 = matrix[:]\n matrix[0] = matrix0[0] * matrix2[0] + matrix0[2]*matrix2[1] # + matrix0[4]*0\n matrix[1] = matrix0[1] * matrix2[0] + matrix0[3]*matrix2[1] # + matrix0[5]*0\n matrix[2] = matrix0[0] * matrix2[2] + matrix0[2]*matrix2[3] # + matrix0[4]*0\n matrix[3] = matrix0[1] * matrix2[2] + matrix0[3]*matrix2[3] # + matrix0[5]*0\n matrix[4] = matrix0[0] * matrix2[4] + matrix0[2]*matrix2[5] + matrix0[4]\n matrix[5] = matrix0[1] * matrix2[4] + matrix0[3]*matrix2[5] + matrix0[5]",
"def matrix_mul(self, other):\n if len(self.matrix[0]) != len(other.matrix):\n raise Exception(\"Invalid matrix operation - size mismatch\")\n m = []\n o_x, o_y = other.shape()\n for row in self.matrix:\n m.append([reduce((lambda x,y : x + y), row * other.get_col(i)) for i in range(o_y)]) \n return matrix(m)",
"def __matmul__(self, other):\n\n if self.multipliable(other):\n\n result = Matrix(rowcount=self.rowcount, columncount=other.columncount)\n\n for row in range(0, result.rowcount):\n for column in range(result.columncount):\n result.entries[row][column] = self.__dot_product(other, row, column)\n\n return result\n\n else:\n\n raise ValueError(\"Matrices not of the right shapes to be multiplied\")",
"def combine_one_matrices(mul):\n factor, args = mul.as_coeff_matrices()\n new_args = [args[0]]\n\n for B in args[1:]:\n A = new_args[-1]\n if not isinstance(A, OneMatrix) or not isinstance(B, OneMatrix):\n new_args.append(B)\n continue\n new_args.pop()\n new_args.append(OneMatrix(A.shape[0], B.shape[1]))\n factor *= A.shape[1]\n\n return newmul(factor, *new_args)"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
Print Bento details by providing the bento_tag. \b
|
def get(bento_tag: str, output: str) -> None: # type: ignore (not accessed)
bento = bento_store.get(bento_tag)
if output == "path":
console.print(bento.path)
elif output == "json":
info = json.dumps(bento.info.to_dict(), indent=2, default=str)
console.print_json(info)
else:
info = yaml.dump(bento.info, indent=2, sort_keys=False)
console.print(Syntax(info, "yaml"))
|
[
"def print_entity(entity):\n print 'entity.original_text:', entity.original_text\n print 'entity.display_text:', entity.display_text\n print 'entity.display_html:', entity.display_html\n print 'entity.start_index:', entity.start_index\n print 'entity.end_index:', entity.end_index",
"def print_tags():\n for tag in Tag.query.all():\n print tag.__repr__()",
"def print_tags(self, oid):\n tags = self.oxide.get_tags(oid)\n if not tags:\n return\n print\n tags_dict = {\"tags\":tags}\n self.print_item(tags_dict)",
"def printROB(self): \n pass\n # for entry in self.buffer:\n # print entry\n\n # print",
"def show_target(self, target):\n print \" \" + repr(target.subject) \\\n + \" \" + target.meaning \\\n + \" \" + target.verb \\\n + \" \" + repr(target.object)",
"def test_info_with_votes(self):\n agnt=TwoPartyAgent()\n self.bill.add_record(agnt,True)\n import StringIO\n buff=StringIO.StringIO()\n sys.stdout=buff\n self.bill.info()\n sys.stdout=sys.__stdout__",
"def tag_line(self):\r\n print(\"Taste the rainbow!\")",
"def printme(self, line):\n self.otag.printme(line)",
"def tag_line(self):\n print(\"Taste the rainbow!\")",
"def print_cwb(document, tag='<s>'):\n\n doc = NLP(document)\n for sentence in doc.sents:\n print(tag)\n\n sent = NLP(sentence.text)\n for token in sent:\n print('{word}\\t{pos}\\t{lemma}'.format(\n word=token.text,\n pos=token.pos_,\n lemma=token.lemma_))\n\n print(tag.replace('<', '</'))",
"def print_animal_info(self):",
"def book_info(self):\n print(\"ID : \", self.ID,\n \"\\nName : \", self.name,\n \"\\nAuthor : \", self.author,\n \"\\nGenre : \", self.genre,\n \"\\nPrice : \", self.price,\n \"\\nQuantity of this book : \", self.quantity)",
"async def info(self, ctx: \"IceTeaContext\", *, otag: TagConverter):\n tag: models.Tag = otag\n if not tag.alias:\n embed = discord.Embed(description=f\"{ctx.message.guild.name} ``{tag.title}`` tag information\")\n user = ctx.guild.get_member(tag.author)\n embed.set_author(name=user.display_name, icon_url=user.avatar_url)\n embed.add_field(name=\"Tag name\", value=tag.title)\n embed.add_field(name=\"Amount used\", value=str(tag.count))\n embed.timestamp = tag.created\n await ctx.send(embed=embed)\n else:\n embed = discord.Embed(description=f\"{ctx.message.guild.name} ``{tag.title}`` alias information\")\n user = ctx.guild.get_member(tag.author)\n embed.add_field(name=\"Author\", value=user or \"Unknown\")\n embed.add_field(name=\"Amount used\", value=str(tag.count))\n embed.timestamp = tag.created\n await ctx.send(embed=embed)",
"def pretty_print_entity(entity: tg.tl.TLObject) -> str:\n\n return bprint.bprint(entity, stream=str, skip_predicate=_bprint_skip_predicate)",
"def print_please(obj):\n\n print(obj.title) \n print(obj.torrent_size) \n print(obj.torrent_category) \n print(obj.torrent_date) \n print(obj.torrent_page_link)\n print(obj.magnet_link)",
"def list_bentos(bento_name: str, output: str) -> None: # type: ignore (not accessed)\n bentos = bento_store.list(bento_name)\n res = [\n {\n \"tag\": str(bento.tag),\n \"path\": display_path_under_home(bento.path),\n \"size\": human_readable_size(calc_dir_size(bento.path)),\n \"creation_time\": bento.info.creation_time.astimezone().strftime(\n \"%Y-%m-%d %H:%M:%S\"\n ),\n }\n for bento in sorted(\n bentos, key=lambda x: x.info.creation_time, reverse=True\n )\n ]\n\n if output == \"json\":\n info = json.dumps(res, indent=2)\n console.print(info)\n elif output == \"yaml\":\n info = yaml.safe_dump(res, indent=2)\n console.print(Syntax(info, \"yaml\"))\n else:\n table = Table(box=None)\n table.add_column(\"Tag\")\n table.add_column(\"Size\")\n table.add_column(\"Creation Time\")\n table.add_column(\"Path\")\n for bento in res:\n table.add_row(\n bento[\"tag\"],\n bento[\"size\"],\n bento[\"creation_time\"],\n bento[\"path\"],\n )\n console.print(table)",
"def print_obs(self,obs):\n print(obs)",
"def dump_headlines(self, root: Position = None, tag: str = None) -> None: # pragma: no cover\n print('')\n if tag:\n print(tag)\n _iter = root.self_and_subtree if root else self.c.all_positions\n for p in _iter():\n print('level:', p.level(), p.h)",
"def test_info_no_votes(self):\n import StringIO\n buff=StringIO.StringIO()\n sys.stdout=buff\n self.bill.info()\n sys.stdout=sys.__stdout__"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
List Bentos in local store \b show all bentos saved $ bentoml list \b show all verions of bento with the name FraudDetector $ bentoml list FraudDetector
|
def list_bentos(bento_name: str, output: str) -> None: # type: ignore (not accessed)
bentos = bento_store.list(bento_name)
res = [
{
"tag": str(bento.tag),
"path": display_path_under_home(bento.path),
"size": human_readable_size(calc_dir_size(bento.path)),
"creation_time": bento.info.creation_time.astimezone().strftime(
"%Y-%m-%d %H:%M:%S"
),
}
for bento in sorted(
bentos, key=lambda x: x.info.creation_time, reverse=True
)
]
if output == "json":
info = json.dumps(res, indent=2)
console.print(info)
elif output == "yaml":
info = yaml.safe_dump(res, indent=2)
console.print(Syntax(info, "yaml"))
else:
table = Table(box=None)
table.add_column("Tag")
table.add_column("Size")
table.add_column("Creation Time")
table.add_column("Path")
for bento in res:
table.add_row(
bento["tag"],
bento["size"],
bento["creation_time"],
bento["path"],
)
console.print(table)
|
[
"def view_command():\n list1.delete(0,END)\n for row in AppbookstoredbBACKEND.view_data():\n list1.insert(END,row)",
"async def list(self, ctx: commands.Context, name: str = None):\n if not name:\n try:\n data = self.memory[ctx.guild.id]\n except KeyError:\n await ctx.send(\"There is no word trigger set for this server\")\n else:\n temp = []\n mes = \"\"\n for i in data:\n if i.label not in data:\n temp.append(i.label)\n a = \"Delete Word List\" if i.delete else \"Trigger Word List\"\n if i.active:\n mes += f\"✅ **{i.label}** - {a}\\n\"\n else:\n mes += f\"❌ **{i.label}** - {a}\\n\"\n await ctx.send(embed=discord.Embed(\n title=\"Server Word Lists\",\n description=mes,\n colour=0x81ecec\n ))\n else:\n data = self.findin(ctx.guild.id, name)\n\n if not data:\n await ctx.send(f\"word list `{name}` don't exist\")\n return\n\n if len(data.words) <= 0:\n await ctx.send(f\"word list `{name}` is empty\")\n return\n\n temp = \"\"\n data.words.sort()\n for i in data.words:\n temp += f\"**-** {i} \\n\"\n\n embed = discord.Embed(\n title=f\"Words in **{name}**\",\n timestamp=ctx.message.created_at,\n colour=0xffdd59\n )\n ret = CustomTools.split_string(temp, 1000)\n for i in range(len(ret)):\n embed.add_field(name=f\"Word list Page{i+1}\", value=ret[i])\n\n await ctx.send(embed=embed)",
"def list():\n return Town.query.all(), HTTPStatus.OK",
"def view_all(entities, table, db):\n print \n print \"TABLE:\",table\n for ii in entities:\n print ii\n print",
"def list(self) -> None:\n words = self.db.all()\n if not words:\n print(\"[-] No words found in the database\")\n return\n\n entries = [self.dict2entry(word) for word in words]\n for entry in entries:\n print(entry.get_str())",
"def listar():\n db = conectar()\n if db:\n if db.info()['doc_count'] > 0:\n print('Listando produtos...')\n print('-' * 30)\n for doc in db:\n print(f\"ID: {db[doc]['_id']}\")\n print(f\"Rev: {db[doc]['_rev']}\")\n print(f\"Produto: {db[doc]['nome']}\")\n print(f\"Preço {db[doc]['preco']}\")\n print(f\"Estoque: {db[doc]['estoque']}\")\n print('-' * 30)\n else:\n print('Não existem produtos cadastrados!')\n else:\n print('Não foi possível conectar com o servidor!')",
"def list(self, subcmd):\n\n self.__connect_db()\n tariffs = []\n\n for tariff in self.db.get_tariffs():\n tariffs.append(tariff.name)\n print(tariff.name)\n\n #print(\"\\n\".join(sorted(tariffs)))",
"def list(self):\n\t\treturn http.db_list()",
"def list():\n rino.remote.list()",
"async def liststoareact(self, ctx):\n reactions = await self.conf.guild(ctx.guild).reactions()\n msg = f\"Smart Reactions for {ctx.guild.name}:\\n\"\n for response in reactions:\n for command in reactions[response]:\n msg += f\"{response}: {command}\\n\"\n for page in pagify(msg, delims=[\"\\n\"]):\n await ctx.send(page)",
"async def __list(self, ctx):\n server = ctx.message.server\n if server.id not in self.db:\n self.db[server.id] = {}\n self.save_db()\n else:\n db = self.db[server.id]\n if \"bookkeeper\" not in self.db[server.id]:\n self.db[server.id][\"bookkeeper\"] = []\n self.save_db()\n await self.bot.say(\"Bookkeeper list is currently empty, add new bookkeepers using points keeper add\"\n \" <Discord name or nickname>\")\n return\n else:\n bookkeeper = db[\"bookkeeper\"][:]\n msg = \"\"\n for x in bookkeeper:\n bookkeeper[bookkeeper.index(x)] = discord.utils.find(lambda N: N.id == x, server.members).display_name\n bookkeeper = sorted(bookkeeper, key=lambda item: (int(item.partition(' ')[0])\n if item[0].isdigit() else float('inf'), item))\n msg = \", \".join(bookkeeper[:-2] + [\" and \".join(bookkeeper[-2:])])\n await self.bot.say(\"Current bookkeepers assigned are: {}\".format(msg))",
"def list(self):\r\n return self.vmrun('list')",
"def do_bay_list(cs, args):\n bays = cs.bays.list(marker=args.marker, limit=args.limit,\n sort_key=args.sort_key,\n sort_dir=args.sort_dir)\n columns = ['uuid', 'name', 'node_count', 'master_count', 'status']\n columns += utils._get_list_table_columns_and_formatters(\n args.fields, bays,\n exclude_fields=(c.lower() for c in columns))[0]\n utils.print_list(bays, columns,\n {'versions': magnum_utils.print_list_field('versions')},\n sortby_index=None)",
"def ls(filter=None):",
"def list(self):\n\n gts_pokemons = meta.Session.query(gts_model.GTSPokemon).all()\n\n c.game_language = db.get_by_identifier_query(t.Language, u'en').one()\n c.savefiles = []\n for gts_pokemon in gts_pokemons:\n savefile = SaveFilePokemon(gts_pokemon.pokemon_blob)\n savefile.use_database_session(db.pokedex_session)\n c.savefiles.append(savefile)\n\n return render('/gts/list.mako')",
"def list():\n persons = Person.query.all()\n return render_template('person.list.html',persons=persons)",
"async def ls():\n confirm_logged_in()\n\n # The /webhooks API lives inside the /accounts/{id}/workspaces/{id} routing tree\n async with get_cloud_client(host=PREFECT_API_URL.value()) as client:\n retrieved_webhooks = await client.request(\"POST\", \"/webhooks/filter\")\n display_table = _render_webhooks_into_table(retrieved_webhooks)\n app.console.print(display_table)",
"async def list(self, ctx):\n server = ctx.message.server\n if server.id not in self.db:\n self.db[server.id] = {}\n dataIO.save_json(\"data/lootbox/servers.json\", self.db)\n if len(self.db[server.id]) < 1:\n await self.bot.say(\"No boxes have been created for this server yet, please create some using [p]box create\"\n \" first, thanks\")\n return\n boxes = self.db[server.id].keys()\n await self.bot.say(\"Here are this server's boxes:\\n{}\".format(\"\\n\".join(boxes)))",
"def list_litnacionals_cmd():\n return ListLitnacionalCommand()"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
Export a Bento to an external file archive \b
|
def export(bento_tag: str, out_path: str) -> None: # type: ignore (not accessed)
bento = bento_store.get(bento_tag)
out_path = bento.export(out_path)
logger.info("%s exported to %s.", bento, out_path)
|
[
"def export_obo(path_to_file, connection=None):\n db = DbManager(connection)\n db.export_obo(path_to_export_file=path_to_file)\n db.session.close()",
"def archive(po_filename, bl_filename):\n\n # Store archive in same dir as this script\n root = os.path.abspath(os.path.dirname(sys.argv[0]))\n\n po_archive = root + '/po.csv.%s' % datetime.date.today()\n bl_archive = root + '/bl.csv.%s' % datetime.date.today()\n\n shutil.move(po_filename, po_archive)\n shutil.move(bl_filename, bl_archive)\n\n perms = stat.S_IRUSR | stat.S_IWUSR | stat.S_IRGRP | stat.S_IROTH\n os.chmod(po_archive, perms)\n os.chmod(bl_archive, perms)",
"def export_python(self, filename):",
"def export_object_to_file(ob, filename):\n t1 = time.clock()\n current_object = bpy.context.active_object\n # duplicate the object\n bpy.ops.object.duplicate()\n # apply all modifiers\n bpy.ops.object.convert(target='MESH', keep_original=False)\n\n try:\n # convert all faces (of duplicated object) to triangles\n bpy.ops.object.mode_set(mode='EDIT')\n bpy.ops.mesh.select_all(action='SELECT')\n bpy.ops.mesh.quads_convert_to_tris()\n bpy.ops.object.mode_set(mode='OBJECT')\n bpy.context.scene.update()\n\n # ob je kopia originalu\n ob = bpy.types.Object\n ob = bpy.context.active_object\n me = ob.data\n\n matrix = ob.matrix_world\n\n if not has_triangles_only(me):\n raise Exception(\"The mesh contains non triangles\")\n\n export_status = [{'INFO'}, 'Exported ']\n status = 'OK'\n\n # aplikuj transformacie a prepocitaj normaly\n me.transform(matrix)\n me.calc_normals()\n\n mesh = export_mesh(ob, me, has_bones(ob) and ob.atom.export_bones)\n\n with open(filename, \"w+\") as output:\n # compact format\n #data = json.dumps({ 'mesh' : mesh }, separators=(',', ':'))\n data = json.dumps({ 'mesh' : mesh }, indent=2)\n output.write(data)\n\n t2 = time.clock()\n print(\"Total time {0}s\".format(t2 - t1))\n export_status = [{'INFO'}, status]\n\n except Exception as e:\n export_status = [{'ERROR'}, e.args[0]]\n\n finally:\n # delete duplicate object and set previous object\n bpy.ops.object.delete()\n bpy.ops.object.select_all(action='DESELECT')\n current_object.select = True\n bpy.context.scene.objects.active = current_object\n\n return export_status",
"def export_bn_headless():\n options = GetOptions(False)\n bv = BinaryViewType.get_view_of_file(options.bn_database)\n bv.update_analysis_and_wait()\n (success, error_message) = export_bn(options.json_file, bv)\n if not success:\n print \"Error: {}\".format(error_message)",
"def export(self, bobber_version: str) -> NoReturn:\n tag = self.get_tag(bobber_version)\n self._build_if_not_built(tag, bobber_version)\n filename = tag.replace('/', '_').replace(':', '_')\n print(f'Exporting {tag} to \"{filename}.tar\". This may take a while...')\n image = self.cli.get_image(tag)\n with open(f'{filename}.tar', 'wb') as image_file:\n for chunk in image:\n image_file.write(chunk)\n print(f'{tag} saved to {filename}.tar')",
"def test_export(self):\n structure = {\n \"README.rst\": \"Hi this is 1.0.0.\",\n \"twisted\": {\n \"newsfragments\": {\"README\": \"Hi this is 1.0.0\"},\n \"_version.py\": genVersion(\"twisted\", 1, 0, 0),\n \"web\": {\n \"newsfragments\": {\"README\": \"Hi this is 1.0.0\"},\n \"_version.py\": genVersion(\"twisted.web\", 1, 0, 0),\n },\n },\n }\n reposDir = self.makeRepository(self.tmpDir)\n self.createStructure(reposDir, structure)\n self.commitRepository(reposDir)\n\n exportDir = FilePath(self.mktemp()).child(\"export\")\n self.createCommand.exportTo(reposDir, exportDir)\n self.assertStructure(exportDir, structure)",
"def BT_export(self):\n src = os.path.join(self.resMan.base_path, Config.instance().weld_BT_root_folder)\n srcs=self.BTMan.get_subdirs(src)\n dst = os.path.join(self.project.rootdir, Config.instance().weld_BT_root_folder)\n #this operation has lots of exceptions to output...\n try:\n for src in srcs:\n self.BTMan.export(src, dst)\n except Exception, e:\n print >> sys.__stderr, 'ERROR in Weld.BT_export():'\n print >> sys.__stderr, e.args[0]\n print >> sys.__stderr, 'export cancelled (some cleanup might be needed in %s)' % dst",
"def s3_export(dataset_path, target, treeish):\n subprocess.check_call(\n ['git-annex', 'export', treeish, '--to', target], cwd=dataset_path)",
"def wrapped_tarball(export_context, context):\n result = export_result_dict(export_context)\n RESPONSE = context.REQUEST.RESPONSE\n RESPONSE.setHeader('Content-type', 'application/x-gzip')\n RESPONSE.setHeader('Content-disposition',\n 'attachment; filename=%s' % result['filename'])\n return result['tarball']",
"def exportBulletFile(*argv):",
"def dump(self, archive):",
"def export_to_file(self, dest, txn_list):\n raise NotImplementedError",
"def export(tagname,export_dir=None,local=False):\n if not export_dir:\n export_dir = tagname\n if tagname == 'trunk':\n dirname = os.path.join(fabric.api.env.conf['REPO'],tagname)\n else:\n dirname = os.path.join(fabric.api.env.conf['REPO'],fabric.api.env.conf['VCS_TAGS'],tagname)\n if local:\n command = 'svn export %s %s' % (dirname, export_dir)\n fabric.api.local(command)\n else:\n command = 'svn export %s %s' % (dirname, os.path.join(fabric.api.env.conf['SRC_DIR'],export_dir))\n fabric.api.run(command)",
"def download_export():\n html = urllib2.urlopen(WCA_EXPORT_URL + '/export.html')\n soup = BeautifulSoup(html, 'html.parser')\n latest = soup.find('dl').find_all('a', href=re.compile('tsv'))[0].get('href')\n zippath = SCRIPT_DIR + WCA_EXPORT_DIR + '/' + latest\n if not os.path.isfile(zippath):\n with open(zippath, 'wb') as file:\n file.write(urllib2.urlopen(WCA_EXPORT_URL + '/' + latest).read())",
"def export(self, stream):\n pass",
"def saveto(file, tmpfile):\n args = {\"file\": file, \"tmpfile\": tmpfile}\n send_command(\"saveto\", args)",
"def export_data_for_download(data, file_name, file_extension, content_type=\"application/octet-stream\"):\n import pickle\n global result\n data_dumped = pickle.dumps(data)\n result = data_dumped\n resultMetadata.put(\"file.name\", file_name)\n resultMetadata.put(\"file.extension\", file_extension)\n resultMetadata.put(\"content.type\", content_type)",
"def extract_to_disk(self):\n archive_name, extension = os.path.splitext(os.path.basename(self.file.name))\n if not os.path.isdir(os.path.join(os.getcwd(), archive_name)):\n os.mkdir(archive_name)\n os.chdir(archive_name)\n for filename, data in self.extract().items():\n f = open(filename, 'wb')\n f.write(data or b'')\n f.close()"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
Import a previously exported Bento archive file \b
|
def import_bento_(bento_path: str) -> None: # type: ignore (not accessed)
bento = import_bento(bento_path)
logger.info("%s imported.", bento)
|
[
"def import_archive(self):\n if self.archive:\n archive = IrkruTildaArchive(self.archive, material=self)\n archive.process()",
"def import_into_beets(self):\n # TODO: Rework this and properly call the beets API.\n os.system(f'beet import {self.downloader.temp_path.name}')",
"def import_idb(self, idb_file):\n self.__run_import_script(file=idb_file, is_bin=False)",
"def import_fusion_archive(filename, name=\"import\"):\n import_options = app().importManager.createFusionArchiveImportOptions(filename)\n\n document = app().importManager.importToNewDocument(import_options)\n imported_root = document.products[0].rootComponent\n\n bodies = []\n\n for body in imported_root.bRepBodies:\n bodies.append(brep().copy(body))\n for occurrence in imported_root.allOccurrences:\n for body in occurrence.bRepBodies:\n bodies.append(brep().copy(body))\n\n document.close(saveChanges=False)\n\n return BRepComponent(*bodies, name=name)",
"def _import(self, fp):\n pass",
"def importar2(self):\n self.set_session()\n fileinfo = self.request.files['archivo'][0]\n fname = fileinfo['filename']\n extn = os.path.splitext(fname)[1]\n cname = str(uuid.uuid4()) + extn\n fh = open(\"server/common/resources/uploads/\" + cname, 'wb')\n fh.write(fileinfo['body'])\n fh.close()\n if extn == '.xlsx':\n mee = self.manager2(self.db).import_excel(cname)\n self.respond(message=mee['message'], success=mee['success'])\n else:\n self.respond(message='Formato de Archivo no aceptado¡¡', success=False)\n self.db.close()",
"def do_import(args):\n base64str = b''\n for infile_name in args.infile_names:\n if args.png:\n chunk = subprocess.check_output(['zbarimg', '--raw', infile_name])\n base64str += chunk\n elif args.base64:\n with open(infile_name, 'rb') as infile:\n chunk = infile.read()\n base64str += chunk\n\n raw = base64.b64decode(base64str)\n paperkey = subprocess.Popen(['paperkey', '--pubring', args.pubkey],\n stdin=subprocess.PIPE,\n stdout=subprocess.PIPE)\n (paperkey_stdout, _) = paperkey.communicate(raw)\n gpg = subprocess.Popen(['gpg', '--import'], stdin=subprocess.PIPE)\n gpg.communicate(paperkey_stdout)",
"def import_project_dump(self, key):",
"def do_import(self, file):\n UglyBibtex(file).do_import()",
"def importAbcAsset ():\n\n help(importAbcAsset)\n\n import hou\n import os\n \n \n #set path\n hipPath = hou.expandString('$HIP')\n path = hipPath + \"/abc/\"\n print (path)\n \n listPath = os.listdir(path)\n \n obj = hou.node(\"/obj\")\n alembicImport= obj.createNode (\"geo\",\"alembicImport\")\n \n file1 = hou.node(\"/obj/alembicImport/file1\")\n file1.destroy()\n \n for n in listPath:\n print (n)\n currentFile=alembicImport.createNode(\"alembic\",n)\n #set fileName\n currentFile.setParms({\"fileName\":\"$\"+\"HIP/abc/\"+n})\n\n #reload geo callback\n #prepa param\n parm_group = alembicImport.parmTemplateGroup()\n parm_folder = hou.FolderParmTemplate(\"folder\",\"reload\")\n #button run code\n button=hou.ButtonParmTemplate(\"reload\",\"Reload\")\n button.setTags({\"script_callback_language\":\"python\",\"script_callback\":\"import y \\ny.reloadAlembic()\"})\n parm_folder.addParmTemplate(button)\n #append param\n parm_group.append(parm_folder)\n alembicImport.setParmTemplateGroup(parm_group)",
"def import_infile(in_path):\n # Try general import, for zlib or plaintext files\n try:\n inv = Inventory(in_path)\n except AttributeError:\n pass # Punt to JSON attempt\n else:\n return inv\n\n # Maybe it's JSON\n try:\n inv = Inventory(readjson(in_path))\n except JSONDecodeError:\n return None\n else:\n return inv",
"def importExternal(*args):\n goTo = pi.currentProject\n impFile = cmds.fileDialog2(fm=1, dir = goTo)[0]\n if impFile:\n cmds.file(impFile, i=True)",
"def importfiles(self, irc, msg, args, e):\n self.db.importFiles()",
"def export(bento_tag: str, out_path: str) -> None: # type: ignore (not accessed)\n bento = bento_store.get(bento_tag)\n out_path = bento.export(out_path)\n logger.info(\"%s exported to %s.\", bento, out_path)",
"def importGeoAsset ():\n\n help(importGeoAsset)\n\n import hou\n import os\n \n \n #set path\n hipPath = hou.expandString('$HIP')\n path = hipPath + \"/geo/\"\n print (path)\n \n listPath = os.listdir(path)\n \n obj = hou.node(\"/obj\")\n geoImport= obj.createNode (\"geo\",\"geoImport\")\n\n file1 = hou.node(\"/obj/geoImport/file1\")\n file1.destroy()\n \n for n in listPath :\n print (n)\n currentFile=geoImport.createNode(\"file\",n)\n #set fileNames\n currentFile.setParms({\"file\":\"$\"+\"HIP/geo/\"+n})\n \n #reload geo callback\n #prepa param\n parm_group = geoImport.parmTemplateGroup()\n parm_folder = hou.FolderParmTemplate(\"folder\",\"reload\")\n #button run code\n button=hou.ButtonParmTemplate(\"reload\",\"Reload\")\n button.setTags({\"script_callback_language\":\"python\",\"script_callback\":\"import y \\ny.reloadGeo()\"})\n parm_folder.addParmTemplate(button)\n #append param\n parm_group.append(parm_folder)\n geoImport.setParmTemplateGroup(parm_group)",
"def import_from_file(jamsite, source='jammers.csv', fieldnames=None):\n\t# import jammers.csv\n\twith open(source) as csvfile:\n\t\tjamsite.mergeinsert( import_jammers(csvfile, fieldnames=fieldnames) )",
"def importButtonAction(self):\r\n animLayer = self.UI.targetAnimLayer_lineEdit.text() # get this val from UI\r\n delKeys = self.UI.deleteAnim_checkBox.isChecked()\r\n if self.loadedData:\r\n self.ExImFuncs.importAnim(animLayer, delKeys, [self.loadedInit, self.loadedData])\r\n self.loadedData = None\r\n self.dataFile = None\r\n self.loadedInit = None\r\n self.UI.loadedMAF_listWidget.clear()\r\n self.UI.saveMAFData_pushButton.setEnabled(False)\r\n\r\n else:\r\n self.ExImFuncs.importAnim(animLayer, delKeys)",
"def importer(self) -> ImporterType:",
"def unpack(self, tarball=None, location=None, use_vhost=True):"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
Pull Bento from a yatai server.
|
def pull(bento_tag: str, force: bool) -> None: # type: ignore (not accessed)
yatai_client.pull_bento(bento_tag, force=force)
|
[
"def pull():",
"def pull_from_postmaster(self):\n # TODO: Assuming first server is good - need to make fallback logic\n return self.session.get_any(\"{base}{request_url}\".format(base=self.servers[0],\n request_url=F\"/Destiny2/Actions/Items/PullFromPostmaster/\"))",
"def repo_pull(self):\n\n if self.clowder_repo is None:\n exit_clowder_not_found()\n\n if is_offline():\n print(fmt.offline_error())\n sys.exit(1)\n\n self.clowder_repo.print_status(fetch=True)\n self.clowder_repo.pull()",
"def pull(self):\n origin = self.git_repo.remotes.origin\n origin.pull()",
"def fetch_pull(ref):\n origin.fetch(tags=True)\n repo.git.checkout(\"{}\".format(ref))\n repo.git.pull(\"origin\", \"{}\".format(ref))",
"def pull_master():\n _pull()",
"def pull():\n _with_deploy_env(['git pull'])",
"def _Fetch(self, version):\n repo = maven_repo.MavenRepository(\n remotes=[\n maven_repo.FIJI_PUBLIC_REPO,\n maven_repo.FIJI_SNAPSHOT_REPO,\n ],\n )\n local_path = repo.Get(\n group='com.moz.fiji.bento',\n artifact='bento-cluster',\n version=version,\n classifier='release',\n type='tar.gz',\n )\n ExtractArchive(archive=local_path, work_dir=self.path, strip_components=1)\n assert os.path.exists(os.path.join(self.path, 'bin', 'bento'))",
"def test_pull_from_origin(tmpdir):\n gitwrapper.clone_from('git://github.com/Tinche/bower-cache', tmpdir)\n gitwrapper.pull_from_origin(tmpdir)",
"def bzpull(request, target):\n if target.startswith(\"sha1:\"):\n target = target[5:]\n url = \"http://bitzi.com/lookup/%s?v=tventtxt\" % target\n tventtxt = urllib.urlopen(url)\n tventdict = {}\n targets_to_update = set()\n count = 0\n text = \"\"\n try:\n for line in tventtxt:\n text += '\\n'\n line = line.strip()\n text += line\n if not line:\n if 'user' in tventdict and 'target_id' in tventdict and 'when' in tventdict:\n tvent = Tvent()\n tvent.when = tventdict['when']\n tvent.user = tventdict['user']\n target, created = Target.objects.get_or_create(id=tventdict['target_id'])\n tvent.target = target\n targets_to_update.add(target.id)\n tvent.tagtext = tventdict['tagtext']\n tvent.save()\n count += 1\n else:\n # error; required field not present\n text += '\\nERROR: incomplete tvent ' + str(tventdict)\n tventdict = {}\n continue\n if line.startswith(\"=\"):\n tventdict['when'] = line[1:]\n continue\n if line.startswith(\"~\"):\n tventdict['user'] = line[1:]\n continue\n if line.startswith(\"@\"):\n tventdict['target_id'] = line[1:]\n continue\n tventdict['tagtext'] = tventdict.get('tagtext','') + line + '\\n'\n if 'user' in tventdict and 'target_id' in tventdict and 'when' in tventdict:\n # TODO: reduce duplication with above\n tvent = Tvent()\n tvent.when = tventdict['when']\n tvent.user = tventdict['user']\n target = Target.objects.get_or_create(id=tventdict['target_id'])\n tvent.target = target\n targets_to_update.add(target.id)\n # TODO: cleanup tags here? \n tvent.save()\n count += 1\n else:\n # error; required field not present\n text += '\\nERROR: incomplete tvent ' + str(tventdict)\n finally:\n tventtxt.close()\n # trigger update of any possibly-changed Target summaries\n for id in targets_to_update:\n Target.objects.get(id=id).updateFromTvents()\n return HttpResponse('Pulled %d tvents from: %s\\n %s' % (count, url, text), mimetype='text/plain')",
"def pull(self):\n for tag_ttype in self.note_store.listTags(self.auth_token):\n self.app.log(\n 'Pulling tag \"%s\" from remote server.' % tag_ttype.name)\n try:\n tag = self._update_tag(tag_ttype)\n except NoResultFound:\n tag = self._create_tag(tag_ttype)\n self._exists.append(tag.id)\n\n self.session.commit()\n self._remove_tags()",
"def pull(self):\n try:\n self.run_command('pull')\n\n except CommandError:\n # Fail if no remotes is OK.\n\n if self.get_remotes():\n raise",
"def post(self):\n my_data = json.loads(self.request.body.decode('utf-8'))\n origin = my_data[\"origin\"]\n master = my_data[\"master\"]\n curr_fb_path = my_data[\"curr_fb_path\"]\n my_output = self.git.pull(origin, master, curr_fb_path)\n self.finish(my_output)\n print(\"You Pulled\")",
"def pull(args):\n cache = set(args.remote_cache).union(set(args.cache))\n for path in sorted(cache):\n if not os.path.exists(os.path.join(args.base, path)) and remote_exists(args.sftp, os.path.join(args.remote_base, path)):\n print('pull: {}'.format(path))\n ensure_local(os.path.dirname(os.path.join(args.base, path)))\n args.sftp.get(\n os.path.join(args.remote_base, path),\n os.path.join(args.base, path)\n )\n args.cache.append(path)\n args.update = True\n return",
"def execute_pull():\n mint = mint_login(cursor)\n logger.info(\"Login successful\")\n if refresh:\n logger.info(\"Started refreshing\")\n refresh_accounts(mint)\n logger.info(\"Refreshed accounts\")\n\n net_worth, account_details = get_account_details(mint)\n\n if GENERATE_TEST:\n generate_test_data(account_details, 'account_details')\n\n logger.info(\"Successfully retrieved {} accounts\".format(len(account_details)))\n write_accounts(account_details=account_details,\n included_investments=included_investments, cursor=cursor)\n logger.info(\"Finished pulling data from mint\")",
"def local_bonds_prices():\n url1 = \"https://api.invertironline.com/token\"\n\n data = {\n \"username\": usuario,\n \"password\": password,\n \"grant_type\": \"password\" \n }\n response = requests.post(url1, data=data)\n if response.status_code == 200:\n content = response.text\n access_key = token_key(content)\n\n url2 = f'https://api.invertironline.com/api/v2/Cotizaciones/Bonos/Merval/argentina'\n datos = requests.get(url2, headers={\n 'Authorization': 'Bearer '+access_key\n })\n datos = json.loads(datos.text)\n datos = datos['titulos']\n datos = clean_assets(datos)\n return datos",
"def pull1(repo, **kwargs):\n ret = do_pull(repo, \"topology.virl\")\n if not ret:\n exit(1)",
"def git_pull(project='testplan'):\n _setup_host()\n run('cd %s && git pull' % project)",
"def _pullbundle2(pullop):\n kwargs = {b'bundlecaps': caps20to10(pullop.repo, role=b'client')}\n\n # make ui easier to access\n ui = pullop.repo.ui\n\n # At the moment we don't do stream clones over bundle2. If that is\n # implemented then here's where the check for that will go.\n streaming = streamclone.canperformstreamclone(pullop, bundle2=True)[0]\n\n # declare pull perimeters\n kwargs[b'common'] = pullop.common\n kwargs[b'heads'] = pullop.heads or pullop.rheads\n\n # check server supports narrow and then adding includepats and excludepats\n servernarrow = pullop.remote.capable(wireprototypes.NARROWCAP)\n if servernarrow and pullop.includepats:\n kwargs[b'includepats'] = pullop.includepats\n if servernarrow and pullop.excludepats:\n kwargs[b'excludepats'] = pullop.excludepats\n\n if streaming:\n kwargs[b'cg'] = False\n kwargs[b'stream'] = True\n pullop.stepsdone.add(b'changegroup')\n pullop.stepsdone.add(b'phases')\n\n else:\n # pulling changegroup\n pullop.stepsdone.add(b'changegroup')\n\n kwargs[b'cg'] = pullop.fetch\n\n legacyphase = b'phases' in ui.configlist(b'devel', b'legacy.exchange')\n hasbinaryphase = b'heads' in pullop.remotebundle2caps.get(b'phases', ())\n if not legacyphase and hasbinaryphase:\n kwargs[b'phases'] = True\n pullop.stepsdone.add(b'phases')\n\n if b'listkeys' in pullop.remotebundle2caps:\n if b'phases' not in pullop.stepsdone:\n kwargs[b'listkeys'] = [b'phases']\n\n bookmarksrequested = False\n legacybookmark = b'bookmarks' in ui.configlist(b'devel', b'legacy.exchange')\n hasbinarybook = b'bookmarks' in pullop.remotebundle2caps\n\n if pullop.remotebookmarks is not None:\n pullop.stepsdone.add(b'request-bookmarks')\n\n if (\n b'request-bookmarks' not in pullop.stepsdone\n and pullop.remotebookmarks is None\n and not legacybookmark\n and hasbinarybook\n ):\n kwargs[b'bookmarks'] = True\n bookmarksrequested = True\n\n if b'listkeys' in pullop.remotebundle2caps:\n if b'request-bookmarks' not in pullop.stepsdone:\n # make sure to always includes bookmark data when migrating\n # `hg incoming --bundle` to using this function.\n pullop.stepsdone.add(b'request-bookmarks')\n kwargs.setdefault(b'listkeys', []).append(b'bookmarks')\n\n # If this is a full pull / clone and the server supports the clone bundles\n # feature, tell the server whether we attempted a clone bundle. The\n # presence of this flag indicates the client supports clone bundles. This\n # will enable the server to treat clients that support clone bundles\n # differently from those that don't.\n if (\n pullop.remote.capable(b'clonebundles')\n and pullop.heads is None\n and list(pullop.common) == [pullop.repo.nullid]\n ):\n kwargs[b'cbattempted'] = pullop.clonebundleattempted\n\n if streaming:\n pullop.repo.ui.status(_(b'streaming all changes\\n'))\n elif not pullop.fetch:\n pullop.repo.ui.status(_(b\"no changes found\\n\"))\n pullop.cgresult = 0\n else:\n if pullop.heads is None and list(pullop.common) == [pullop.repo.nullid]:\n pullop.repo.ui.status(_(b\"requesting all changes\\n\"))\n if obsolete.isenabled(pullop.repo, obsolete.exchangeopt):\n remoteversions = bundle2.obsmarkersversion(pullop.remotebundle2caps)\n if obsolete.commonversion(remoteversions) is not None:\n kwargs[b'obsmarkers'] = True\n pullop.stepsdone.add(b'obsmarkers')\n _pullbundle2extraprepare(pullop, kwargs)\n\n remote_sidedata = bundle2.read_remote_wanted_sidedata(pullop.remote)\n if remote_sidedata:\n kwargs[b'remote_sidedata'] = remote_sidedata\n\n with pullop.remote.commandexecutor() as e:\n args = dict(kwargs)\n args[b'source'] = b'pull'\n bundle = e.callcommand(b'getbundle', args).result()\n\n try:\n op = bundle2.bundleoperation(\n pullop.repo, pullop.gettransaction, source=b'pull'\n )\n op.modes[b'bookmarks'] = b'records'\n bundle2.processbundle(pullop.repo, bundle, op=op)\n except bundle2.AbortFromPart as exc:\n pullop.repo.ui.error(_(b'remote: abort: %s\\n') % exc)\n raise error.RemoteError(_(b'pull failed on remote'), hint=exc.hint)\n except error.BundleValueError as exc:\n raise error.RemoteError(_(b'missing support for %s') % exc)\n\n if pullop.fetch:\n pullop.cgresult = bundle2.combinechangegroupresults(op)\n\n # processing phases change\n for namespace, value in op.records[b'listkeys']:\n if namespace == b'phases':\n _pullapplyphases(pullop, value)\n\n # processing bookmark update\n if bookmarksrequested:\n books = {}\n for record in op.records[b'bookmarks']:\n books[record[b'bookmark']] = record[b\"node\"]\n pullop.remotebookmarks = books\n else:\n for namespace, value in op.records[b'listkeys']:\n if namespace == b'bookmarks':\n pullop.remotebookmarks = bookmod.unhexlifybookmarks(value)\n\n # bookmark data were either already there or pulled in the bundle\n if pullop.remotebookmarks is not None:\n _pullbookmarks(pullop)"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
Push Bento to a yatai server.
|
def push(bento_tag: str, force: bool, threads: int) -> None: # type: ignore (not accessed)
bento_obj = bento_store.get(bento_tag)
if not bento_obj:
raise click.ClickException(f"Bento {bento_tag} not found in local store")
yatai_client.push_bento(bento_obj, force=force, threads=threads)
|
[
"def push(context_service: ContextService):\n cli_output: CliOutput = context_service.get_cli_output()\n cli_output.info(\"Pushing changes to remote...\")",
"def push(self, obj):\n pass",
"def push(self):\n logger.debug('PUSHING...')\n self._rest()\n self._trigger(self.STATE.PUSHING)",
"def push(self):\n origin = self.git_repo.remotes.origin\n origin.push()",
"def post(self):\n my_data = json.loads(self.request.body.decode('utf-8'))\n origin = my_data[\"origin\"]\n master = my_data[\"master\"]\n curr_fb_path = my_data[\"curr_fb_path\"]\n my_output = self.git.push(origin, master, curr_fb_path)\n self.finish(my_output)\n print(\"You Pushed\")",
"def push():\n branch = git.current_branch().name\n shell.run('git push -u origin {}'.format(branch))",
"def _push_buffer(self):\r\n \r\n self._push_writer()",
"def push(self, e):\n self._data.append(e) # novo item armazenado no topo da pilha",
"def push(self, data):\n self.data.append(data)",
"def push(self, data=None): # real signature unknown; restored from __doc__\n return False",
"def push(self, url):\n self.pool.push(url)",
"def push ():\n\n tagname = get_tag (comp_versions, 'ACE')\n\n if opts.push:\n if opts.take_action:\n vprint (\"Pushing ACE_TAO\", opts.ace_tao_branch, \"to origin\")\n ex (\"cd $DOC_ROOT/ACE_TAO && git push origin \" + opts.ace_tao_branch)\n\n vprint (\"Pushing tag %s on ACE_TAO\" % (tagname))\n ex (\"cd $DOC_ROOT/ACE_TAO && git push origin tag \" + tagname)\n\n vprint (\"Pushing tag %s on MPC\" % (tagname))\n ex (\"cd $DOC_ROOT/MPC && git push origin tag \" + tagname)\n\n # Push release branches\n latest_branch_helper (push_latest_branch, opts.release_type)\n else:\n vprint (\"Pushing tag %s on ACE_TAO\" % (tagname))\n vprint (\"Pushing tag %s on MPC\" % (tagname))\n print (\"Pushing tags:\\n\")\n print (\"Pushing tag \" + tagname + \"\\n\")",
"def push(self, value):\r\n self._write_back_buffer(value)",
"def push(host, delete):\n src = get_music_location()\n dst = get_music_location(host)\n rsync(src, dst, delete=delete)",
"def post(self):\n text = 'HELLO from socnet API Server!'\n return push_to_mattermost(text)",
"def push():\n # test()\n # TODO - need to improve image detection algorithm. maybe need variable threshold or configurable\n local('git add .;git commit; git push;')\n with settings(user=\"pi\", host_string=\"raspberry.pi\"):\n with cd('/home/pi/src/rpi'):\n run('git pull')",
"def repo_push(self):\n\n if self.clowder_repo is None:\n exit_clowder_not_found()\n\n if is_offline():\n print(fmt.offline_error())\n sys.exit(1)\n\n self.clowder_repo.print_status(fetch=True)\n self.clowder_repo.push()",
"def push(args):\n if args.type == 'ssh':\n cache = set(args.remote_cache).union(set(args.cache))\n for path in sorted(cache):\n if os.path.exists(os.path.join(args.base, path)) and not remote_exists(args.sftp, os.path.join(args.remote_base, path)):\n print('push: {}'.format(path))\n ensure_remote(args.sftp, os.path.dirname(os.path.join(args.remote_base, path)))\n args.sftp.put(\n os.path.join(args.base, path),\n os.path.join(args.remote_base, path)\n )\n args.remote_cache.append(path)\n args.remote_update = True\n elif args.type == 's3':\n raise NotImplementedError('s3:// remote type not yet supported!')\n elif args.type == 'gs':\n raise NotImplementedError('gs:// remote type not yet supported!')\n return",
"def push(self, token):\r\n self._pushed.append(token)"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
Build a new Bento from current directory.
|
def build(build_ctx: str, bentofile: str, version: str) -> None: # type: ignore (not accessed)
if sys.path[0] != build_ctx:
sys.path.insert(0, build_ctx)
build_bentofile(bentofile, build_ctx=build_ctx, version=version)
|
[
"def newproject(self):\n \n self.path = os.path.join(self.base, self.name)\n subpath = os.path.join(self.path, self.lowname)\n check_build_path(subpath)\n \n for filename, content in self.files.items():\n self.buildfile(filename, content, self.path)\n\n script = open(SCRIPT, 'r').read().format(self.lowname)\n self.buildfile('{0}.py'.format(self.lowname), script, subpath) \n self.buildfile('__init__.py', '', subpath)\n \n #optionals\n if self.git:\n self.buildfile('.gitignore', '*.pyc', self.path)\n if self.db:\n datapath = os.path.join(self.path, 'data')\n os.makedirs(datapath)\n copydb = os.path.join(datapath, '{0}.db'.format(self.lowname))\n copy = subprocess.call(['cp', DATA, \"%s\" % copydb])\n if self.test:\n testpath = os.path.join(self.path, 'tests')\n os.makedirs(testpath)\n self.buildfile('__init__.py', '', testpath)",
"def makeProject(self, version, baseDirectory=None):\n if baseDirectory is None:\n baseDirectory = FilePath(self.mktemp())\n baseDirectory.createDirectory()\n segments = version.package.split('.')\n directory = baseDirectory\n for segment in segments:\n directory = directory.child(segment)\n if not directory.exists():\n directory.createDirectory()\n directory.child('__init__.py').setContent('')\n directory.child('topfiles').createDirectory()\n directory.child('topfiles').child('README').setContent(version.base())\n replaceProjectVersion(\n directory.child('_version.py').path, version)\n return Project(directory)",
"def _create_checkout(self):\n parent_git_dir = os.path.join(self._parent_repo, self._run_git_command(\n ['rev-parse', '--git-dir']).strip())\n self._workdir = tempfile.mkdtemp(prefix='drover_%s_' % self._branch)\n logging.debug('Creating checkout in %s', self._workdir)\n git_dir = os.path.join(self._workdir, '.git')\n git_common.make_workdir_common(parent_git_dir, git_dir, self.FILES_TO_LINK,\n self.FILES_TO_COPY, mk_symlink)\n self._run_git_command(['config', 'core.sparsecheckout', 'true'])\n with open(os.path.join(git_dir, 'info', 'sparse-checkout'), 'w') as f:\n f.write('/codereview.settings')\n\n branch_name = os.path.split(self._workdir)[-1]\n self._run_git_command(['checkout', '-b', branch_name, self._branch_ref])\n self._branch_name = branch_name",
"def build(self, workingDir):\n master = BuildMaster(workingDir)\n master(self)\n return",
"def _build(self):\n\n repo_dirs = [ \n self.config['TMP_FOLDER'],\n self.config['TRASH_FOLDER'],\n self.config['DRAFTS_FOLDER'],\n self.config['DRAFTS_METADATA_FOLDER'],\n self.config['DRAFTS_DATA_FOLDER'],\n self.config['DATASETS_FOLDER'],\n ]\n\n for rd in repo_dirs:\n if not os.path.exists(rd):\n os.makedirs(rd)",
"def build():\n if 'build_path' in env:\n with lcd(env.build_path):\n yield\n return\n\n env.author = scm_get_repo_author()\n env.dev_path = scm_get_repo_root()\n env.build_path = mkdtemp() + '/build'\n scm_clone_repo(env.dev_path, env.build_path)\n try:\n with lcd(env.build_path):\n local(\"python bootstrap.py\")\n local(\"./bin/buildout\")\n local(\"./bin/python setup.py sdist\")\n _check_release()\n yield\n finally:\n rmtree(env.build_path)",
"def from_cwd(cls) -> Content:\n repo = cls.git()\n if repo:\n content = Content(repo.working_tree_dir) # type: ignore\n else:\n content = Content(Path.cwd())\n\n return content",
"def bootstrap(self, skeleton=None):\n\t\t\n\t\tskeletonArchive = skeletonFile = None\n\t\tif skeleton is None:\n\t\t\tfrom .skeleton import data\n\t\t\tlogging.info(\"Building from data\")\n\t\t\ttemp = tempfile.NamedTemporaryFile(delete=False, suffix='.tar.gz')\n\t\t\ttemp.write(base64.b64decode(data))\n\t\t\ttemp.close()\n\t\t\tskeletonArchive = tarfile.open(name=temp.name, mode='r')\n\t\telif os.path.isfile(skeleton):\n\t\t\tskeletonFile = skeleton\n\t\telse: \n\t\t\t# Assume it's a URL\n\t\t\tskeletonFile, headers = urllib.urlretrieve(skeleton)\n\n\t\tif skeletonFile:\n\t\t\tif tarfile.is_tarfile(skeletonFile):\n\t\t\t\tskeletonArchive = tarfile.open(name=skeletonFile, mode='r')\n\t\t\telif zipfile.is_zipfile(skeletonFile):\n\t\t\t\tskeletonArchive = zipfile.ZipFile(skeletonFile)\n\t\t\telse:\n\t\t\t\tlogging.error(\"File %s is an unknown file archive type. At this time, skeleton argument must be a directory, a zipfile, or a tarball.\" % skeletonFile)\n\t\t\t\tsys.exit()\n\n\t\tif skeletonArchive:\n\t\t\tos.mkdir(self.path)\n\t\t\tskeletonArchive.extractall(path=self.path)\n\t\t\tskeletonArchive.close()\n\t\t\tlogging.info('New project generated at %s', self.path)\n\t\telif os.path.isdir(skeleton):\n\t\t\tshutil.copytree(skeleton, self.path)\n\t\t\tlogging.info('New project generated at %s', self.path)\n\t\telse:\n\t\t\tlogging.error(\"Cannot process skeleton '%s'. At this time, skeleton argument must be a directory, a zipfile, or a tarball.\" % skeleton)",
"def temp(cls, basedir=None):\r\n context = cls(BuildFile(get_buildroot(), basedir or 'BUILD.temp', must_exist=False))\r\n with cls.activate(context):\r\n yield",
"def build(self):\n self.cuisine.development.golang.get(\"github.com/gowncloud/gowncloud\")\n self._cuisine.core.file_copy(\"$goDir/bin/gowncloud\", \"$binDir\")",
"def source_build(args, l, st, rc):\n\n from ambry.identity import Identity\n from ..source.repository import new_repository\n\n repo = new_repository(rc.sourcerepo(args.name))\n\n dir_ = None\n name = None\n\n if args.dir:\n if os.path.exists(args.dir):\n dir_ = args.dir\n name = None\n else:\n name = args.dir\n try:\n Identity.parse_name(name)\n except:\n fatal(\"Argument '{}' must be either a bundle name or a directory\".format(name))\n return\n\n if not dir_:\n dir_ = rc.sourcerepo.dir\n\n def build(bundle_dir):\n from ambry.library import new_library\n\n # Import the bundle file from the directory\n\n bundle_class = load_bundle(bundle_dir)\n bundle = bundle_class(bundle_dir)\n\n l = new_library(rc.library(args.library_name))\n\n if l.get(bundle.identity.vid) and not args.force:\n prt(\"{} Bundle is already in library\", bundle.identity.name)\n return\n elif bundle.is_built and not args.force and not args.clean:\n prt(\"{} Bundle is already built\", bundle.identity.name)\n return\n else:\n\n if args.dryrun:\n prt(\"{} Would build but in dry run \", bundle.identity.name)\n return\n\n repo.bundle = bundle\n\n if args.clean:\n bundle.clean()\n\n # Re-create after cleaning is important for something ...\n\n bundle = bundle_class(bundle_dir)\n\n prt(\"{} Building \", bundle.identity.name)\n\n if not bundle.run_prepare():\n fatal(\"{} Prepare failed\", bundle.identity.name)\n\n if not bundle.run_build():\n fatal(\"{} Build failed\", bundle.identity.name)\n\n if args.install and not args.dryrun:\n if not bundle.run_install(force=True):\n fatal('{} Install failed', bundle.identity.name)\n\n build_dirs = {}\n\n # Find all of the dependencies for the named bundle, and make those first.\n for root, _, files in os.walk(rc.sourcerepo.dir):\n if 'bundle.yaml' in files:\n bundle_class = load_bundle(root)\n bundle = bundle_class(root)\n build_dirs[bundle.identity.name] = root\n\n if name:\n deps = repo.bundle_deps(name)\n deps.append(name)\n\n else:\n\n deps = []\n\n # Walk the subdirectory for the files to build, and\n # add all of their dependencies\n for root, _, files in os.walk(dir_):\n if 'bundle.yaml' in files:\n\n bundle_class = load_bundle(root)\n bundle = bundle_class(root)\n\n for dep in repo.bundle_deps(bundle.identity.name):\n if dep not in deps:\n deps.append(dep)\n\n deps.append(bundle.identity.name)\n\n for n in deps:\n try:\n dir_ = build_dirs[n]\n except KeyError:\n fatal(\"Failed to find directory for bundle {}\".format(n))\n\n prt('')\n prt(\"{} Building in {}\".format(n, dir_))\n build(dir_)",
"def copy_skeleton(self):\n fabric_op.local('cp -R project %s' % self.path, capture=self.capture)",
"def createDirectories(self):\n make_directory(self.config.top_working_directory)\n make_directory(self.config.todo)\n make_directory(self.config.done)\n make_directory(self.config.failed)\n make_directory(self.config.running)\n make_directory(self.config.archive.top_working_directory)\n make_directory(self.config.archive.todo)\n make_directory(self.config.archive.failed)\n make_directory(self.config.archive.done)\n os.chdir(self.config.todo)\n log(INFO,'Working directory:'+os.getcwd())\n return self",
"def copy_skeleton(self):\n super(GAEProjectCreator, self).copy_skeleton()\n fabric_op.local(\n 'cp -f project_gae/* %s' % self.path, \n capture=self.capture)",
"def build(working_directory=None, args=None):\n from .buildme import main\n if args is None:\n args = []\n return main(working_directory, args)",
"def Build(self, out_file):\n raise NotImplementedError",
"def build(self, conanfile):\n app = ConanApp(self._conan_api.cache_folder)\n conanfile.folders.set_base_package(conanfile.folders.base_build)\n conanfile.folders.set_base_pkg_metadata(os.path.join(conanfile.build_folder, \"metadata\"))\n run_build_method(conanfile, app.hook_manager)",
"def _create_builder(self, tmp_dir):\n return cifuzz.InternalGithubBuilder(self.PROJECT_NAME,\n self.PROJECT_REPO_NAME, tmp_dir,\n self.SANITIZER, self.COMMIT_SHA,\n self.PR_REF)",
"def build_it(swagger_file: str):\n load_stable(swagger_file)\n prep_package(model_package)\n write_modules(model_package)"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
Set the value. (And calls the base class) This will also check for Options to set the bools. FAULTS_ACTIVE FAULTS_CURRENT >>> BIT_FAULT_PROBE = 0 >>> BIT_FAULT_OVERTEMP = 1 >>> BIT_FAULT_PANEL_OPEN = 2 >>> BIT_FAULT_HIGH_VOLTAGE = 3 >>> BIT_FAULT_RAM_CRC = 4 >>> BIT_FAULT_EEPROM_CRC = 5 >>> BIT_FAULT_GPIO_ERROR = 6 >>> BIT_FAULT_LTFAULT_ERROR = 7 >>> BIT_FAULT_TRIGGER_ERROR = 8 >>> BIT_FAULT_HARDWARE_EXC = 9 >>> BIT_FAULT_TRIGGER_GLITCH = 10 >>> BIT_FAULT_OVERVOLTAGE = 11 >>> BIT_FAULT_TEMP_SENSOR = 12
|
def set_value(self, item, value):
super(t_16_Bit_Options, self).set_value(item, value)
if(item == t_16_Bit_Options.FAULT_ACTIVE):
self.set_bools(value, self.faults_current, t_16_Bit_Options.BIT_FAULT_MAX )
if(item == t_16_Bit_Options.FAULT_LATCHED):
self.set_bools(value, self.faults_latched, t_16_Bit_Options.BIT_FAULT_MAX )
|
[
"def set(self, value): # interface for BlueSky plans\n if str(value).lower() not in (\"fly\", \"taxi\", \"return\"):\n msg = \"value should be either Taxi, Fly, or Return.\"\n msg + \" received \" + str(value)\n raise ValueError(msg)\n\n if self.busy.value:\n raise RuntimeError(\"spin is operating\")\n\n status = DeviceStatus(self)\n \n def action():\n \"\"\"the real action of ``set()`` is here\"\"\"\n if str(value).lower() == \"taxi\":\n self.taxi()\n elif str(value).lower() == \"fly\":\n self.pre_fly()\n self.fly()\n self.post_fly()\n elif str(value).lower() == \"return\":\n self.motor.move(self.return_position)\n\n def run_and_wait():\n \"\"\"handle the ``action()`` in a thread\"\"\"\n self.busy.put(True)\n action()\n self.busy.put(False)\n status._finished(success=True)\n \n threading.Thread(target=run_and_wait, daemon=True).start()\n return status",
"def set(self, value): # interface for BlueSky plans\n if str(value).lower() not in (\"fly\", \"taxi\"):\n msg = \"value should be either Taxi or Fly.\"\n msg + \" received \" + str(value)\n raise ValueError(msg)\n\n if self.busy.value:\n raise RuntimeError(\"shutter is operating\")\n\n status = DeviceStatus(self)\n \n def action():\n \"\"\"the real action of ``set()`` is here\"\"\"\n if str(value).lower() == \"taxi\":\n self.taxi()\n elif str(value).lower() == \"fly\":\n self.fly()\n\n def run_and_wait():\n \"\"\"handle the ``action()`` in a thread\"\"\"\n self.busy.put(True)\n action()\n self.busy.put(False)\n status._finished(success=True)\n \n threading.Thread(target=run_and_delay, daemon=True).start()\n return status",
"def set_value(self, item, value):\n super(t_8_Bit_Options, self).set_value(item, value)\n\n if(item == t_8_Bit_Options.BOOLEAN_CONFIG_1):\n self.set_bools(value, self.bools, t_8_Bit_Options.BIT_MAX)",
"def set_overflow_status(self, value):\n TikCheckUtil.check_equality(\n get_soc_name(), ASCEND_910,\n \"this api doesn't support version: %s\" % get_soc_name())\n TikCheckUtil.check_type_match(\n value, int, \"value should be Int, \"\n \"invalid type: {}\".format(type(value)))\n TikCheckUtil.check_in_range(\n value, range(_MAX_OVERFLOW_STATUS),\n \"value should be 0 or 1, invalid value: {}\".format(value))\n with self.new_scope():\n self.emit(\n tvm.call_extern(\"uint64_t\", \"set_overflow\",\n type_convert(Expr(value, dtype=\"uint64\"))),\n ONE_IR)",
"def fault_debug(value: bool = False) -> None:",
"def set_flag(self, flag_name, value):\n flags = {'C':0, # Carry\n 'Z':1, # Zero\n 'I':2, # Interrupt mask\n 'D':3, # Decimal\n 'B':4, # Break\n 'V':6, # Overflow\n 'N':7} # Negative\n\n flag_reg = self.get_register('P')\n if value == 1:\n new_flag = flag_reg | 1 << flags[flag_name]\n else:\n new_flag = flag_reg & ~(1 << flags[flag_name])\n\n self.set_register('P', new_flag)",
"def set_value(self, value):\n try:\n self.type_is_correct(value)\n if self.type == INT:\n self.value = (int(value))\n if self.type == FLOAT:\n self.value = (float(value))\n elif self.type == BOOL:\n self.value = strtobool(value)\n else:\n self.value = value\n except ValueError:\n print('*** Error while setting the value for parameter \"' +\n self.name + '\"\" : ' + self.type + ' expected, ' +\n 'but value was \"' + value + '\"')\n raise ValueError",
"def _setEnumFeature(self, valueToSet):\n\n errorCode = VimbaDLL.featureEnumSet(self._handle,\n self._name,\n valueToSet)\n if errorCode != 0:\n raise VimbaException(errorCode)",
"def _setIntFeature(self, valueToSet):\n\n errorCode = VimbaDLL.featureIntSet(self._handle,\n self._name,\n valueToSet)\n if errorCode != 0:\n raise VimbaException(errorCode)",
"def setFlag(self, flag, value) -> None:\n ...",
"def _setBoolFeature(self, valueToSet):\n\n errorCode = VimbaDLL.featureBoolSet(self._handle,\n self._name,\n valueToSet)\n if errorCode != 0:\n raise VimbaException(errorCode)",
"def fset(self, value):\n message = \"Overriding a constant value is an illegal operation: {0} = {1}.\".format(\n name.__name__,\n value)\n raise TypeError(message)",
"def flags(self, value):\r\n dll.kvaDbSetMsgFlags(self._handle, value)",
"def set_security_state(self, value):\n _LOGGER.debug(\"%s: Set security state to %d\", self.entity_id, value)\n hass_value = HOMEKIT_TO_HASS[value]\n service = STATE_TO_SERVICE[hass_value]\n\n params = {ATTR_ENTITY_ID: self.entity_id}\n if self._alarm_code:\n params[ATTR_CODE] = self._alarm_code\n self.async_call_service(DOMAIN, service, params)",
"def set_bombpln(self, value: bool):\r\n self.__data[1] = value",
"def flags(self, value):\r\n dll.kvaDbSetFlags(self._handle, value)",
"def test_set_light_intensity_with_rejected_answer(self):\n comm = Mock()\n comm.blocking_call.return_value=call_result.ChangeConfigurationPayload(status='Rejected')\n socket = MockSocket()\n cpp = ChargePointProxy(\"CP1\", socket, comm)\n with self.assertRaises(err.ChangeConfigurationException) as e:\n cpp.light_intensity = 90",
"def setValue(self, value):\n validValue = self.getValidValue(value, self.currentAmount, self.empireDict['CR'])\n self.amountBar.setMyValues(validValue, self.empireDict['CR'])\n self.currentAmount = validValue\n color = globals.resourceColors['CR']\n self.amountBar.setColor(globals.colors[color])\n if validValue > 0:\n self.enableButton('S')\n else:\n self.disableButton('S')",
"def test_set_fails_when_setting_non_primitive_type(self):\n with pytest.raises(\n ClickException, match=\"Attribute `behaviours` is not allowed to be updated!\"\n ):\n self.runner.invoke(\n cli,\n [*CLI_LOG_OPTION, \"config\", \"set\", \"skills.dummy.behaviours\", \"value\"],\n standalone_mode=False,\n catch_exceptions=False,\n )"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
Set the value. (And calls the base class) This will also check for Options to set the bools. BOOLEAN_CONFIG_1 >>> BIT_PROBE_TERMINATION = 0 >>> BIT_TMODE = 1 >>> BIT_EMODE = 2 >>> BIT_MUTE = 3 >>> BIT_PATTERN_TRIGGER = 4 >>> BIT_DEBUG_REALTIME = 5 >>> BIT_DEBUGPRINT = 6 >>> BIT_DEBUG_HW_OVERRIDE = 7
|
def set_value(self, item, value):
super(t_8_Bit_Options, self).set_value(item, value)
if(item == t_8_Bit_Options.BOOLEAN_CONFIG_1):
self.set_bools(value, self.bools, t_8_Bit_Options.BIT_MAX)
|
[
"def setbool(self, strcommand, value):\n command = ct.c_wchar_p(strcommand)\n value = ct.c_bool(value)\n self.lib.AT_SetBool(self.AT_H, command, value)",
"def set_bool_node_value(node_name,value):\n\n\timport Mgmt\n code, msg = Mgmt.set((node_name,'bool', value))\n return code,msg",
"def set(self, value=True):\n print(\"GPIO {} set to {}\".format(self.name, '1' if value else '0'))",
"def setValue(self, trueOrFalse):\r\n self.value = trueOrFalse",
"def set_value(self, value):\n try:\n self.type_is_correct(value)\n if self.type == INT:\n self.value = (int(value))\n if self.type == FLOAT:\n self.value = (float(value))\n elif self.type == BOOL:\n self.value = strtobool(value)\n else:\n self.value = value\n except ValueError:\n print('*** Error while setting the value for parameter \"' +\n self.name + '\"\" : ' + self.type + ' expected, ' +\n 'but value was \"' + value + '\"')\n raise ValueError",
"def setBoolValue(self, *args):\n return _libsbml.ConversionOption_setBoolValue(self, *args)",
"def setBooleanOption(self, option, value):\n result = self.__lib.voikkoSetBooleanOption(self.__handle, option, _boolToInt(value))\n if result == 0:\n raise VoikkoException(\"Could not set boolean option %s to value %s\" % (option, value))",
"def _setBoolFeature(self, valueToSet):\n\n errorCode = VimbaDLL.featureBoolSet(self._handle,\n self._name,\n valueToSet)\n if errorCode != 0:\n raise VimbaException(errorCode)",
"def SetValue(self, truthValue):\n callResult = self._Call(\"SetValue\", truthValue)",
"def set_value(self, item, value):\n super(t_16_Bit_Options, self).set_value(item, value)\n\n if(item == t_16_Bit_Options.FAULT_ACTIVE):\n self.set_bools(value, self.faults_current, t_16_Bit_Options.BIT_FAULT_MAX )\n\n if(item == t_16_Bit_Options.FAULT_LATCHED):\n self.set_bools(value, self.faults_latched, t_16_Bit_Options.BIT_FAULT_MAX )",
"def set_gateway(self, bool_value):\n self.chkbtn_gateway.set(bool_value)",
"def set(self, attr, value=True):\n if type(value) == bool:\n self.__dict__['_'+attr] = value\n print attr, \"set to\", value\n else:\n print 'Value must be a bool, either \"True\" or \"False\" (no quotes)!'",
"def setBit(self,i,boolval):\n self.boolVals[i]=boolval",
"def setBooleanType(self, booleanType: cern.japc.value.BooleanType) -> None:\n ...",
"def set_flag(self, flag, value):\n self.engine.set_flag(flag, value)",
"def gguf_set_val_bool(ctx: ffi.CData, key: ffi.CData, val: bool) -> None:\n ...",
"def set_flag(self, flag_name, value):\n flags = {'C':0, # Carry\n 'Z':1, # Zero\n 'I':2, # Interrupt mask\n 'D':3, # Decimal\n 'B':4, # Break\n 'V':6, # Overflow\n 'N':7} # Negative\n\n flag_reg = self.get_register('P')\n if value == 1:\n new_flag = flag_reg | 1 << flags[flag_name]\n else:\n new_flag = flag_reg & ~(1 << flags[flag_name])\n\n self.set_register('P', new_flag)",
"def writeBoolean(self, value: bool):\n self.writeByte(1 if value else 0)",
"def set_bombpln(self, value: bool):\r\n self.__data[1] = value"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
Builds a command packet
|
def build_command_packet(self, command):
packet = bytearray()
# All option fields are 0
packet.append(0)
packet.append(0)
packet.append(0)
packet.append(command)
return packet
|
[
"def _build_command(self, command_name, hardware_address = '', comp_var_dict = None):\n # Start command adn set name\n command = \"<Command><Name>{command_name}</Name>\".format(command_name=command_name)\n\n if hardware_address:\n command += \"<DeviceDetails><HardwareAddress>{hardware_address}</HardwareAddress></DeviceDetails>\".format(hardware_address=hardware_address)\n\n if comp_var_dict is not None:\n comp_keys = comp_var_dict.keys()\n if len(comp_keys) > 0:\n for comp_key in comp_keys:\n # Build requested variable list\n command += \"<Components><Component><Name>{comp_key}</Name><Variables>\".format(comp_key=comp_key)\n variables = comp_var_dict[comp_key]\n for var in variables:\n command += \"<Variable><Name>{var}</Name></Variable>\".format(var=var)\n command += \"</Variables></Component></Components>\"\n else:\n # Request all variables from all components\n command += \"<Components><All>Y</All></Components>\"\n\n # Close command\n command += \"</Command>\"\n \n return command",
"def build_message(cmd, data):\n\n if len(data) > MAX_DATA_LENGTH or len(cmd) > 16:\n return ERROR_RETURN\n command_part = cmd + \" \" * (16 - len(cmd))\n length_of_data = str(len(data))\n # length_part = \"0\" * (4 - len(length_of_data)) + length_of_data\n length_part = length_of_data.zfill(4)\n\n return command_part + \"|\" + length_part + \"|\" + data",
"def build_command(self, dct):\n pass",
"def _build_command(self, cmd, unit):\n return '#' + unit + cmd + NEWLINE",
"def _build_send_optode_command(self, cmd, command):\n return \"%s=%s%s\" % (cmd, command, self._newline)",
"def build(self, command, payload=None):\r\n p = [ord(x) for x in prefix]\r\n p.append(splitter[0])\r\n if isinstance(command, tuple):\r\n p += AAE_COMMAND[command[0]][0]\r\n payload = command[1]\r\n else:\r\n p += AAE_COMMAND[command][0]\r\n p.append(splitter[1])\r\n if payload is None:\r\n p += [0, splitter[3]]\r\n elif not isinstance(payload, list):\r\n p += [1, splitter[2], payload, splitter[3]]\r\n else:\r\n p += [len(payload), splitter[2]]\r\n p += payload\r\n p += [splitter[3]]\r\n p.append(self.checksum(p))\r\n return p",
"def buildCmd( tcmpCmd, cmd, target, sequence, fieldList):\n cmdList = [tcmpCmd, cmd, target, sequence, fieldList]\n\n return \"<{cmd}>\".format(cmd=\":\".join(cmdList))",
"def __create_cmd(self, msg_type, content):\n cmd = bytearray()\n content_len = 0\n\n cmd.append(0x02) #STX\n # Message Header\n cmd.append(0x00) #DID, Destination ID\n cmd.append(0x00) #SID, SourceID\n cmd.append(0x00) #MTI, Message Type Indicator\n if content is not None:\n content_len = len(content) + 6 # add ETX & LRC\n else:\n content_len = 6 # add ETX & LRC\n msg_len = int(content_len//256)\n cmd.extend(msg_len.to_bytes(1, byteorder='big')) #MSB Len\n msg_len = int(content_len % 256)\n cmd.extend(msg_len.to_bytes(1, byteorder='big')) #LSB Len\n\n # Message Info\n cmd.append(0x01) # Type, 01-command, 02-response\n cmd.append(msg_type) # ID\n cmd.append(0x00) # P1\n cmd.append(0x00) # P2\n if content is not None:\n content_len = len(content)\n else:\n content_len = 0 # add ETX & LRC\n msg_len = int(content_len // 256)\n cmd.extend(msg_len.to_bytes(1, byteorder='big')) # MSB Len\n msg_len = int(content_len % 256)\n cmd.extend(msg_len.to_bytes(1, byteorder='big')) # LSB Len\n if content is not None:\n cmd.extend(content)\n cmd.append(0x03) # ETX\n msg_len = int(self.__calc_lrc(cmd[1:]))\n cmd.extend(msg_len.to_bytes(1, byteorder='big'))\n print(cmd)\n return cmd",
"def init_cmd( cmd_num=0):\n if cmd_num in [12,16,2,4,9,10,13,17,18,24]:\n log.warning(\"Command %d is not supported on SDIO, sending anyway but what are you doing?!\" %cmd_num)\n\n cmd = BinaryValue(bits=48,bigEndian=False)\n cmd[47] = 0 # Start value\n cmd[46] = 1 # Direction , 1 = towards device, 0 = towards host\n cmd[45:40] = BinaryValue(value=cmd_num, bits=6, bigEndian=False).integer\n cmd[0] = 1 # Stop bit\n return cmd",
"def build_message(cmd, data):\r\n\tif len(cmd) > CMD_FIELD_LENGTH or len(data) > MAX_DATA_LENGTH:\r\n\t\treturn None\r\n\tfull_cmd = cmd + \" \"*(CMD_FIELD_LENGTH-len(cmd))\r\n\tdata_len = str(len(data))\r\n\tfull_data_len = \"0\"*(LENGTH_FIELD_LENGTH-len(data_len))+data_len\r\n\tfull_msg = DELIMITER.join([full_cmd, full_data_len, data])\r\n\treturn full_msg",
"def generateCommand(self, mode, cmdType, sessionTime, payload, pinCode):\n self.encodedSessionTime = self.getFourByteInt(0, self.encodeFourByteInt(sessionTime))\n commandPayloadLength = 16 + len(payload)\n commandPayload = bytearray()\n commandPayload.append(mode & 0xFF) # Start of payload is 36\n commandPayload.append(cmdType & 0xFF) # The Command bytes\n commandPayload = commandPayload + self.encodeTwoByteInt(commandPayloadLength)\n commandPayload = commandPayload + self.encodeFourByteInt(sessionTime)\n commandPayload = commandPayload + self.encodeTwoByteInt(pinCode)\n commandPayload.append(0) # Nothing\n commandPayload.append(0) # Nothing\n if len(payload) > 0:\n commandPayload = commandPayload + payload\n # Generating the Checksum & End of payload\n checkSumIndex = 0\n checkSum = 0\n while checkSumIndex < (commandPayloadLength - 4):\n checkSum += commandPayload[checkSumIndex] & 0xFF\n checkSumIndex += 1\n commandPayload.append(((checkSum ^ -1) >> 8) & 0xFF)\n commandPayload.append(((checkSum ^ -1) >> 0) & 0xFF)\n commandPayload.append(13)\n commandPayload.append(10)\n return commandPayload",
"def _build_packet(self, address, write):\n data = b''\n\n # destination_id - must be 0\n data += b'\\x00\\x00'\n\n # tl - transaction label\n data += b'\\xf8'\n\n # tcode/pri - must be 0\n data += b'\\x00'\n\n # source_id\n data += b'\\x00\\x00'\n\n # destination offset\n d_off = bytearray.fromhex(address)\n if len(d_off) != 6:\n raise PACException(\n \"Destination offset data did not have length 6 : {0}\"\n .format(d_off))\n data += d_off\n\n # quadlet data\n quad_data = bytearray.fromhex(write)\n if len(quad_data) != 4:\n raise PACException(\n \"Quadlet data did not have length 4 : {0}\".format(quad_data))\n data += quad_data\n\n return data",
"def __binaryCmdRecord(self, cmd_obj):\n\n def __time_tag(cmd_obj):\n \"\"\"\n TODO: support a timebase in the cmd obj? This is mission specific, so it is tough to handle. For now\n I am hardcoding this to 2 which is TB_NONE\n \"\"\"\n # return TimeType(timeBase=2, seconds=cmd_obj.getSeconds(), useconds=cmd_obj.getUseconds()).serialize()\n # TKC - new command time format\n return (\n U32Type(cmd_obj.getSeconds()).serialize()\n + U32Type(cmd_obj.getUseconds()).serialize()\n )\n\n def __descriptor(cmd_obj):\n # subtract 1 from the value because enum34 enums start at 1, and this can't be changed\n return U8Type(cmd_obj.getDescriptor().value - 1).serialize()\n\n def __command(cmd_obj):\n self.desc_obj.val = DataDescType[\"FW_PACKET_COMMAND\"].value\n self.opcode_obj.val = cmd_obj.getOpCode()\n command = self.desc_obj.serialize() # serialize combuffer type enum: FW_PACKET_COMMAND\n command += self.opcode_obj.serialize() # serialize opcode\n # Command arguments\n for arg in cmd_obj.getArgs():\n command += arg[2].serialize()\n return command\n\n def __length(command):\n self.len_obj.val = len(command)\n return self.len_obj.serialize()\n\n def __print(byteBuffer):\n print(\"Byte buffer size: %d\" % len(byteBuffer))\n for entry in range(0, len(byteBuffer)):\n print(\n \"Byte %d: 0x%02X (%c)\"\n % (\n entry,\n struct.unpack(\"B\", byteBuffer[entry])[0],\n struct.unpack(\"B\", byteBuffer[entry])[0],\n )\n )\n\n # This is no longer in the sequence file format.\n # def __checksum(data):\n # csum = 0\n # for entry in range(0,len(data)):\n # byte = struct.unpack(\"B\",data[entry])[0]\n # csum += byte\n # return U64Type(long(csum)).serialize()\n\n # Form header:\n descriptor = __descriptor(cmd_obj)\n time = __time_tag(cmd_obj)\n header = descriptor + time\n\n # Command opcode:\n command = __command(cmd_obj)\n\n # Command length:\n length = __length(command)\n\n # Checksum:\n # This is no longer in the sequence file format.\n # checksum = __checksum(header + length + command)\n\n # Debug printing (comment out when not debugging):\n # print \"descriptor:\"\n # __print(descriptor)\n # print \"time:\"\n # __print(time)\n # print \"length:\"\n # __print(length)\n # print \"command:\"\n # __print(command)\n # print \"total record:\"\n # __print(header + checksum + length + command)\n\n # Construct the record:\n return header + length + command",
"def _generate_message(self, cmd, addr, port, version = None):\r\n\r\n # Set default version of needed. Sometime need version 0 so check None.\r\n if version is None:\r\n version = self.version\r\n\r\n # Create message.\r\n msg = pack(\"!BBH\", version, cmd, port)\r\n msg += socket.inet_aton(addr)\r\n\r\n return msg",
"def build_command_depricated(device_dict, command_tuple):\n command = \" \" # The final command which should be send in the end\n return_list = [] # Is list of commands which can be returned if need be\n only_command = False # Flag if only a command was passed, important if such a command doesnt need syntax!\n\n if (\n type(command_tuple) == type(u\"Unicode\")\n or type(command_tuple) == str\n or type(command_tuple) == float\n or type(command_tuple) == int\n ):\n command_tuple = (str(command_tuple), \"\") # so only tuple are now prevelent\n only_command = True\n elif type(command_tuple[1]) == list:\n command_tuple = (\n command_tuple[0],\n [str(x) for x in command_tuple[1]],\n ) # so no unicode is present\n\n # Preparations\n # look for a syntax (paranteses and so on)\n if \"syntax\" in device_dict:\n syntax = str(device_dict[\"syntax\"])\n syntax = syntax.split(\"###\")\n if not syntax[0]:\n syntax = [\"\", \"\"] # Most devices have no paranteses or whatsoever\n else:\n syntax = [\"\", \"\"] # Most devices have no paranteses or whatsoever\n\n # Looks if a separator is needed to sepatare mulitple orders\n if \"separator\" in device_dict:\n sepa = str(device_dict[\"separator\"])\n else:\n sepa = \" \" # This should be the standard for most devices\n\n if command_tuple[0] in device_dict:\n # here all the magic happens\n # First look if the order is swichted or not (command value, or value command)\n\n # Check if multiple commands so list or so\n if type(device_dict[command_tuple[0]]) == str or type(\n device_dict[command_tuple[0]]\n ) == type(u\"Unicode\"):\n command_list = [device_dict[command_tuple[0]]]\n else:\n command_list = device_dict[command_tuple[0]]\n\n for command_item in command_list:\n command_item = str(command_item)\n command = \"\"\n\n # Value -> Command\n if int(device_dict.get(\"command_order\", 1)) == -1:\n # Now look if a csv structure is necessary for the command to work\n start_ind = command_tuple[0].find(\n \"_\"\n ) # finds the index of the command, to search for\n if (\n \"CSV\" + command_tuple[0][start_ind:] in device_dict\n ): # looks if an actual csv-command is there\n # Todo: test CSV command\n csv_commands = device_dict[\n \"CSV\" + str(command_tuple[0])[start_ind:]\n ]\n csv_commands = (\n csv_commands.strip()\n .strip(\"(\")\n .strip(\")\")\n .strip(\"[\")\n .strip(\"]\")\n .strip()\n ) # get rid of some caracters which should not be there\n csv_commands = csv_commands.split(\n \",\"\n ) # now split it for easy access\n\n # Make sure you always got a list of the next commandblock will fail\n if (\n type(command_tuple[1]) == list\n or type(command_tuple[1]) == tuple\n ):\n value_list = command_tuple[1]\n elif type(command_tuple[1]) == str or type(command_tuple) == type(\n u\"Unicode\"\n ):\n value_list = (\n command_tuple[1]\n .strip()\n .strip(\"(\")\n .strip(\")\")\n .strip(\"[\")\n .strip(\"]\")\n .strip()\n .replace(\" \", \"\")\n )\n value_list = value_list.split(\",\")\n\n csv_list = (\n \",\".join(map(str, value_list))\n .strip()\n .strip(\"(\")\n .strip(\")\")\n .strip(\"[\")\n .strip(\"]\")\n .strip()\n )\n csv_list = csv_list.split(\",\")\n\n for i, com in enumerate(csv_list):\n # here the input will be checked if enough parameters are passed for this command.\n # If not a 0 will be entered and a warning will be printed\n command += str(csv_list[i]).strip() + sepa\n\n if i + 1 < len(csv_commands) and len(csv_commands) > 1:\n for j in range(\n i + 1, len(csv_commands)\n ): # Fill the rest of the missing paramters\n l.error(\n \"Warning: Not enough parameters passed for function: \"\n + str(command_item)\n + \" the command must consist of \"\n + str(csv_commands)\n + \" '\"\n + str(csv_commands[j])\n + \"' is missing! Inserted 0 instead.\"\n )\n command += \"0\" + sepa\n\n command = command.strip(\" \").strip(\",\") # to get rid of last comma\n\n else: # So if no CSV was found for this command, just build the command with the value and the separator\n # First check if a List is present or so\n if (\n type(command_tuple[1]) == list\n or type(command_tuple[1]) == tuple\n ):\n string = \"\"\n for item in command_tuple[1]:\n command = syntax[1] + str(item) + \" \" + command_item\n command = command.strip()\n # Add a command terminator if one is needed and the last part of the syntax\n command += device_dict.get(\"execution_terminator\", \"\")\n return_list.append(command)\n return return_list\n\n else: # If only a command was passed\n string = str(command_tuple[1])\n command += syntax[1] + str(string).strip()\n\n if (\n only_command\n and device_dict.get(\"no_syntax_with_single_commmand\", False)\n and syntax[1] != \" \"\n and syntax[0] != \" \"\n ):\n command = command.replace(syntax[1], \"\")\n command = command.replace(syntax[0], \"\")\n\n # command += \" \" + str(device_dict[str(command_item)]).strip() + syntax[0] # adds the order to the command\n command += (\n \" \" + str(command_item).strip() + syntax[0]\n ) # adds the order to the command\n # Add a command terminator if one is needed and the last part of the syntax\n command = command.strip()\n command += device_dict.get(\"execution_terminator\", \"\")\n # command += syntax[0] # adds the order to the command\n return_list.append(command)\n\n # Command -> Value\n else:\n command += (\n str(command_item).strip() + \" \" + syntax[0]\n ) # adds the order to the command\n\n # Now look if a csv structure is necessary for the command to work\n start_ind = command_tuple[0].find(\n \"_\"\n ) # finds the index of the command, to search for\n if (\n \"CSV\" + command_tuple[0][start_ind:] in device_dict\n ): # looks if an actual csv-command is there\n # Todo: test CSV command\n csv_commands = device_dict[\n \"CSV\" + str(command_tuple[0])[start_ind:]\n ]\n csv_commands = (\n csv_commands.strip()\n .strip(\"(\")\n .strip(\")\")\n .strip(\"[\")\n .strip(\"]\")\n .strip()\n ) # get rid of some caracters which should not be there\n csv_commands = csv_commands.split(\n \",\"\n ) # now split it for easy access\n\n # Make sure you always got a list of the next commandblock will fail\n if (\n type(command_tuple[1]) == list\n or type(command_tuple[1]) == tuple\n ):\n value_list = command_tuple[1]\n elif type(command_tuple[1]) == str or type(command_tuple) == type(\n u\"Unicode\"\n ):\n value_list = (\n command_tuple[1]\n .strip()\n .strip(\"(\")\n .strip(\")\")\n .strip(\"[\")\n .strip(\"]\")\n .strip()\n .replace(\" \", \"\")\n )\n value_list = value_list.split(\",\")\n\n csv_list = (\n \",\".join(map(str, value_list))\n .strip()\n .strip(\"(\")\n .strip(\")\")\n .strip(\"[\")\n .strip(\"]\")\n .strip()\n )\n csv_list = csv_list.split(\",\")\n\n for i, com in enumerate(csv_list):\n # here the input will be checked if enough parameters are passed for this command.\n # If not a 0 will be entered and a warning will be printed\n command += str(csv_list[i]).strip() + sepa + \" \"\n\n if i + 1 < len(csv_commands) and len(csv_commands) > 1:\n for j in range(\n i + 1, len(csv_commands)\n ): # Fill the rest of the missing paramters\n l.warning(\n \"Not enough parameters passed for function: \"\n + str(command_tuple[0])\n + \" the command must consist of \"\n + str(csv_commands)\n + \" '\"\n + str(csv_commands[j])\n + \"' is missing! Inserted 0 instead.\"\n )\n command += \" \" + \"0\" + sepa\n\n command = command.strip(\" \").strip(\n \",\"\n ) # to get rid of last comma and space at the end if csv\n command += syntax[1]\n\n else: # So if no CSV was found for this command, just build the command with the value and the separator\n # First check if a List is present or so\n if (\n type(command_tuple[1]) == list\n or type(command_tuple[1]) == tuple\n ):\n string = \"\"\n for item in command_tuple[1]:\n command = str(item) + \" \" + command_item + syntax[1]\n command = command.strip()\n # Add a command terminator if one is needed and the last part of the syntax\n command += device_dict.get(\"execution_terminator\", \"\")\n return_list.append(command)\n return return_list\n\n else: # If its just one value or no value\n string = str(command_tuple[1])\n command += string.strip() + syntax[1]\n command = command.strip()\n\n if (\n only_command\n and device_dict.get(\"no_syntax_with_single_commmand\", False)\n and syntax[1] != \" \"\n and syntax[0] != \" \"\n ):\n command = command.replace(syntax[1], \"\")\n command = command.replace(syntax[0], \"\")\n\n # Add a command terminator if one is needed and the last part of the syntax\n command += device_dict.get(\"execution_terminator\", \"\")\n return_list.append(command.strip())\n else:\n # If the command is not found in the device only command tuple will be send\n l.error(\n \"Command \"\n + str(command_tuple[0])\n + \" was not found in device! Unpredictable behavior may happen. No commad build!\"\n )\n return \"\"\n\n # Add a command terminator if one is needed and the last part of the syntax\n # command += device_dict.get(\"execution_terminator\",\"\")\n\n # Todo: multiple commands return\n if len(return_list) > 1:\n return return_list\n else:\n return str(return_list[0])",
"def _gen_cmd(cmd, address):\n family = {4: 'inet', 6: 'inet6'}[address[0].version]\n args = ['addr', cmd, '%s/%s' % (address[0], address[1])]\n if family == 'inet' and cmd == 'add':\n args += ['brd', '+']\n args += ['dev', real_ifname]\n if family == 'inet6':\n args = ['-6'] + args\n return args",
"def _create_packet(self, request):\n\n data_len = struct.pack('<Q', len(request))\n packet = b'ZBXD\\x01' + data_len + request\n\n def ord23(x):\n if not isinstance(x, int):\n return ord(x)\n else:\n return x\n\n logger.debug('Packet [str]: %s', packet)\n logger.debug('Packet [hex]: %s', ':'.join(hex(ord23(x))[2:] for x in packet))\n return packet",
"def makeCommandMsg(cmd, source, dest=None, args=[]):\n return Message({\"message\":COMMAND_MSG_TYPE,\n \"source\":source,\n \"dest\":dest,\n \"command\":cmd,\n \"args\":args})",
"def command_to_packet(command: str) -> bytes:\n return eISCPPacket(ISCPMessage(command)).get_raw()"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
This will get the current faults on the system.
|
def get_faults_current(self):
request = self.get_option_from_shouter([t_16_Bit_Options.FAULT_ACTIVE], BP_TOOL.REQUEST_16)
return self.__get_faults_list(self.config_16.faults_current)
|
[
"def get_faults(self):\n status = self.get_status()\n return [k for k in status if k.endswith('_FAULT') and status[k]]",
"def get_faults(self):\n try:\n faults_parent = self.rootelement.findall(\"{\"+self.xmlns+\"}Faults\")[0]\n self.faults = faults_parent.findall(\"{\"+self.xmlns+\"}Fault\")\n except IndexError:\n print(\"No faults found in model\")",
"def faults(self):\n debug(\"Getting faults...\")\n code = int(\"01001000\",2)\n command = pack('B',code)\n reply = self.query(command,count=2)\n faults = \" \"\n # The reply is 0xC8 followed by a faults status byte.\n if len(reply) != 2:\n if len(reply)>0:\n warn(\"%r: expecting 2-byte reply, got %r\" % (command,reply))\n elif self.connected:\n warn(\"%r: expecting 2-byte reply, got no reply\" % command)\n else:\n reply_code,bits = unpack('<BB',reply)\n if reply_code != code:\n warn(\"reply %r: expecting 0x%X(%s), got 0x%X(%s)\" %\n (reply,code,bin(code),reply_code,bin(reply_code)))\n else:\n fault_names = {0:\"Tank Level Low\",2:\"Temperature above alarm range\",\n 4:\"RTD Fault\",5:\"Pump Fault\",7:\"Temperature below alarm range\"}\n faults = \"\"\n for i in range(0,8):\n if (bits >> i) & 1:\n if i in fault_names: faults += fault_names[i]+\", \"\n else: faults += str(i)+\", \"\n faults = faults.strip(\", \")\n if faults == \"\": faults = \"none\"\n debug(\"Faults %s\" % faults)\n return faults",
"def get_faults_latched(self):\n request = self.get_option_from_shouter([t_16_Bit_Options.FAULT_LATCHED], BP_TOOL.REQUEST_16)\n return self.__get_faults_list(self.config_16.faults_latched)",
"def appfirewallviolxmlsoapfaultviolationsperprofilerate(self) :\n\t\ttry :\n\t\t\treturn self._appfirewallviolxmlsoapfaultviolationsperprofilerate\n\t\texcept Exception as e:\n\t\t\traise e",
"def get_fault_parameters(self):\n pass",
"def ListFaults(ctx,\n exceptions = None,\n best_practices = None,\n update = None,\n fault_types = None):\n \"\"\"With this method, both current and resolved faults can be retrieved. The system caches faults every 30 seconds.\"\"\"\n if ctx.element is None:\n ctx.logger.error(\"You must establish at least one connection and specify which you intend to use.\")\n exit()\n\n\n\n ctx.logger.info(\"\"\"exceptions = \"\"\"+str(exceptions)+\"\"\";\"\"\"+\"\"\"best_practices = \"\"\"+str(best_practices)+\"\"\";\"\"\"+\"\"\"update = \"\"\"+str(update)+\"\"\";\"\"\"+\"\"\"fault_types = \"\"\"+str(fault_types)+\"\"\";\"\"\"+\"\")\n try:\n ListClusterFaultsResult = ctx.element.list_cluster_faults(exceptions=exceptions, best_practices=best_practices, update=update, fault_types=fault_types)\n except common.ApiServerError as e:\n ctx.logger.error(e.message)\n exit()\n except BaseException as e:\n ctx.logger.error(e.__str__())\n exit()\n\n cli_utils.print_result(ListClusterFaultsResult, ctx.logger, as_json=ctx.json, depth=ctx.depth, filter_tree=ctx.filter_tree)",
"def fault_counters(self):\n done, data = self._request('GF')\n if done:\n return {\n 'GFI self test': int(data[0], 16),\n 'Ground': int(data[1], 16),\n 'Stuck relay': int(data[2], 16)\n }\n\n raise EvseError",
"def crashes(self):\n\n return self._get_crashing_inputs([signal.SIGSEGV, signal.SIGILL])",
"def getFault(self):\n if self.fault:\n return self.fault, faultCodes[self.fault].format(self.cs)\n else:\n return None",
"def reset_faults(self):\n if not self.isremote():\n raise GenixError('Not in remote mode')\n logger.debug('Resetting faults.')\n self._write_coil(249, 1)\n self._update_instrumentproperties()",
"def check_fault_status():\n\treturn dsslib.SolutionI(ctypes.c_int32(27), ctypes.c_int32(0))",
"def _extend_fault_map(self):\n faults.FAULT_MAP.update({nsx_lib_exc.ManagerError:\n webob.exc.HTTPBadRequest,\n nsx_lib_exc.ServiceClusterUnavailable:\n webob.exc.HTTPServiceUnavailable,\n nsx_lib_exc.ClientCertificateNotTrusted:\n webob.exc.HTTPBadRequest,\n nsx_exc.SecurityGroupMaximumCapacityReached:\n webob.exc.HTTPBadRequest,\n nsx_lib_exc.NsxLibInvalidInput:\n webob.exc.HTTPBadRequest,\n nsx_exc.NsxENSPortSecurity:\n webob.exc.HTTPBadRequest,\n nsx_exc.NsxPluginTemporaryError:\n webob.exc.HTTPServiceUnavailable\n })",
"def fault_detection_init(self):\r\n from time import time\r\n self.fault = 0 # no faults\r\n self.warning = 0 # no warnings\r\n self.warn_value = {}\r\n self.warn_value[b'pressure_difference'] = 0\r\n self.warn_value[b'pump_stroke_counter'] = 0\r\n self.warn_value[b'pressure_drop'] = 0\r\n self.warn_value[b'slow_leak_counter'] = 0\r\n\r\n\r\n self.warn_index = {}\r\n self.warn_index[b'pressure_difference'] = 0\r\n self.warn_index[b'pump_stroke_counter'] = 0\r\n self.warn_index[b'pressure_drop'] = 0\r\n self.warn_index[b'slow_leak_counter'] = 0\r\n\r\n\r\n self.fault_description = {}\r\n self.fault_description['None'] = 'None'\r\n self.fault_description['pressure_difference'] = 'pressure_difference'\r\n self.fault_description['pump_stroke_counter'] = 'pump_stroke_counter'\r\n self.fault_description['pressure_drop'] = 'pressure_drop'\r\n self.fault_description['slow_leak_counter'] = 'slow_leak_counter'\r\n\r\n\r\n self.warning_description = {}\r\n self.warning_description['None'] = 'None'\r\n\r\n self.warning_status = {}\r\n self.fault_status = {}",
"def fault():\n return FaultCohesiveKin()",
"def get_faults_history(self, epg_dn):\n class_query = ClassQuery('faultRecord')\n class_query.propFilter = 'eq(faultRecord.affected, \"' + epg_dn + '\")'\n return self.moDir.query(class_query)",
"def fault_status(self):\n string = self.fault_status.get()\n return string",
"def _isfault(self):\n return self.dp.state()==PyTango.DevState.FAULT",
"def fill_faults(self):\n uuids = [inst.uuid for inst in self]\n faults = objects.InstanceFaultList.get_by_instance_uuids(\n self._context, uuids)\n faults_by_uuid = {}\n for fault in faults:\n if fault.instance_uuid not in faults_by_uuid:\n faults_by_uuid[fault.instance_uuid] = fault\n\n for instance in self:\n if instance.uuid in faults_by_uuid:\n instance.fault = faults_by_uuid[instance.uuid]\n else:\n # NOTE(danms): Otherwise the caller will cause a lazy-load\n # when checking it, and we know there are none\n instance.fault = None\n instance.obj_reset_changes(['fault'])\n\n return faults_by_uuid.keys()"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
This will get the latched faults on the system.
|
def get_faults_latched(self):
request = self.get_option_from_shouter([t_16_Bit_Options.FAULT_LATCHED], BP_TOOL.REQUEST_16)
return self.__get_faults_list(self.config_16.faults_latched)
|
[
"def get_faults(self):\n status = self.get_status()\n return [k for k in status if k.endswith('_FAULT') and status[k]]",
"def get_faults_current(self):\n request = self.get_option_from_shouter([t_16_Bit_Options.FAULT_ACTIVE], BP_TOOL.REQUEST_16)\n return self.__get_faults_list(self.config_16.faults_current)",
"def get_faults(self):\n try:\n faults_parent = self.rootelement.findall(\"{\"+self.xmlns+\"}Faults\")[0]\n self.faults = faults_parent.findall(\"{\"+self.xmlns+\"}Fault\")\n except IndexError:\n print(\"No faults found in model\")",
"def reset_faults(self):\n if not self.isremote():\n raise GenixError('Not in remote mode')\n logger.debug('Resetting faults.')\n self._write_coil(249, 1)\n self._update_instrumentproperties()",
"def get_fault_parameters(self):\n pass",
"def faults(self):\n debug(\"Getting faults...\")\n code = int(\"01001000\",2)\n command = pack('B',code)\n reply = self.query(command,count=2)\n faults = \" \"\n # The reply is 0xC8 followed by a faults status byte.\n if len(reply) != 2:\n if len(reply)>0:\n warn(\"%r: expecting 2-byte reply, got %r\" % (command,reply))\n elif self.connected:\n warn(\"%r: expecting 2-byte reply, got no reply\" % command)\n else:\n reply_code,bits = unpack('<BB',reply)\n if reply_code != code:\n warn(\"reply %r: expecting 0x%X(%s), got 0x%X(%s)\" %\n (reply,code,bin(code),reply_code,bin(reply_code)))\n else:\n fault_names = {0:\"Tank Level Low\",2:\"Temperature above alarm range\",\n 4:\"RTD Fault\",5:\"Pump Fault\",7:\"Temperature below alarm range\"}\n faults = \"\"\n for i in range(0,8):\n if (bits >> i) & 1:\n if i in fault_names: faults += fault_names[i]+\", \"\n else: faults += str(i)+\", \"\n faults = faults.strip(\", \")\n if faults == \"\": faults = \"none\"\n debug(\"Faults %s\" % faults)\n return faults",
"def check_fault_status():\n\treturn dsslib.SolutionI(ctypes.c_int32(27), ctypes.c_int32(0))",
"def ListFaults(ctx,\n exceptions = None,\n best_practices = None,\n update = None,\n fault_types = None):\n \"\"\"With this method, both current and resolved faults can be retrieved. The system caches faults every 30 seconds.\"\"\"\n if ctx.element is None:\n ctx.logger.error(\"You must establish at least one connection and specify which you intend to use.\")\n exit()\n\n\n\n ctx.logger.info(\"\"\"exceptions = \"\"\"+str(exceptions)+\"\"\";\"\"\"+\"\"\"best_practices = \"\"\"+str(best_practices)+\"\"\";\"\"\"+\"\"\"update = \"\"\"+str(update)+\"\"\";\"\"\"+\"\"\"fault_types = \"\"\"+str(fault_types)+\"\"\";\"\"\"+\"\")\n try:\n ListClusterFaultsResult = ctx.element.list_cluster_faults(exceptions=exceptions, best_practices=best_practices, update=update, fault_types=fault_types)\n except common.ApiServerError as e:\n ctx.logger.error(e.message)\n exit()\n except BaseException as e:\n ctx.logger.error(e.__str__())\n exit()\n\n cli_utils.print_result(ListClusterFaultsResult, ctx.logger, as_json=ctx.json, depth=ctx.depth, filter_tree=ctx.filter_tree)",
"def appfirewallviolxmlsoapfaultviolationsperprofilerate(self) :\n\t\ttry :\n\t\t\treturn self._appfirewallviolxmlsoapfaultviolationsperprofilerate\n\t\texcept Exception as e:\n\t\t\traise e",
"def _extend_fault_map(self):\n faults.FAULT_MAP.update({nsx_lib_exc.ManagerError:\n webob.exc.HTTPBadRequest,\n nsx_lib_exc.ServiceClusterUnavailable:\n webob.exc.HTTPServiceUnavailable,\n nsx_lib_exc.ClientCertificateNotTrusted:\n webob.exc.HTTPBadRequest,\n nsx_exc.SecurityGroupMaximumCapacityReached:\n webob.exc.HTTPBadRequest,\n nsx_lib_exc.NsxLibInvalidInput:\n webob.exc.HTTPBadRequest,\n nsx_exc.NsxENSPortSecurity:\n webob.exc.HTTPBadRequest,\n nsx_exc.NsxPluginTemporaryError:\n webob.exc.HTTPServiceUnavailable\n })",
"def fault_detection_init(self):\r\n from time import time\r\n self.fault = 0 # no faults\r\n self.warning = 0 # no warnings\r\n self.warn_value = {}\r\n self.warn_value[b'pressure_difference'] = 0\r\n self.warn_value[b'pump_stroke_counter'] = 0\r\n self.warn_value[b'pressure_drop'] = 0\r\n self.warn_value[b'slow_leak_counter'] = 0\r\n\r\n\r\n self.warn_index = {}\r\n self.warn_index[b'pressure_difference'] = 0\r\n self.warn_index[b'pump_stroke_counter'] = 0\r\n self.warn_index[b'pressure_drop'] = 0\r\n self.warn_index[b'slow_leak_counter'] = 0\r\n\r\n\r\n self.fault_description = {}\r\n self.fault_description['None'] = 'None'\r\n self.fault_description['pressure_difference'] = 'pressure_difference'\r\n self.fault_description['pump_stroke_counter'] = 'pump_stroke_counter'\r\n self.fault_description['pressure_drop'] = 'pressure_drop'\r\n self.fault_description['slow_leak_counter'] = 'slow_leak_counter'\r\n\r\n\r\n self.warning_description = {}\r\n self.warning_description['None'] = 'None'\r\n\r\n self.warning_status = {}\r\n self.fault_status = {}",
"def crashes(self):\n\n return self._get_crashing_inputs([signal.SIGSEGV, signal.SIGILL])",
"def fault():\n return FaultCohesiveKin()",
"def fill_faults(self):\n uuids = [inst.uuid for inst in self]\n faults = objects.InstanceFaultList.get_by_instance_uuids(\n self._context, uuids)\n faults_by_uuid = {}\n for fault in faults:\n if fault.instance_uuid not in faults_by_uuid:\n faults_by_uuid[fault.instance_uuid] = fault\n\n for instance in self:\n if instance.uuid in faults_by_uuid:\n instance.fault = faults_by_uuid[instance.uuid]\n else:\n # NOTE(danms): Otherwise the caller will cause a lazy-load\n # when checking it, and we know there are none\n instance.fault = None\n instance.obj_reset_changes(['fault'])\n\n return faults_by_uuid.keys()",
"def errors_fatal(self) -> List[Error]:",
"def getErrorsFromErrorQueue(self):\n \n entries = []\n while True:\n errorCheck = self._receive(':SYST:ERR?')\n errorCheckText = errorCheck.split(',')[1]\n if errorCheckText == '\"No error\"':\n break;\n entries.append(errorCheck)\n return entries",
"def check_page_faults(con, host, warning, critical,perf_data):\n warning = warning or 10\n critical = critical or 30\n data=get_server_status(con)\n\n try:\n page_faults=float(data['extra_info']['page_faults']) \n except:\n # page_faults unsupported on the underlaying system\n return exit_with_general_critical(\"page_faults unsupported on the underlaying system\")\n \n err,delta=maintain_delta([page_faults],host,\"page_faults\")\n if err==0:\n page_faults_ps=delta[1]/delta[0]\n message = \"Page faults : %.2f ps\" % page_faults_ps\n message+=performance_data(perf_data,[(\"%.2f\" %page_faults_ps,\"page_faults_ps\",warning,critical)])\n return check_levels(page_faults_ps,warning,critical,message)\n else:\n return exit_with_general_warning(\"problem reading data from temp file\")",
"def getFault(self):\n if self.fault:\n return self.fault, faultCodes[self.fault].format(self.cs)\n else:\n return None",
"def get_bad_requests(self):\r\n\t\treturn [i.request for i in self.logs if str(i.status).startswith('4')]"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
Gets the pattern wave pat_wave 101011110011 .... >>> Request >>> 0> >>> Pattern Wave [More to follow] >>> >> Request Next block >>> 0> >>> Pattern Wave [More to follow] >>> >> >>> ..... >>> >>> Request Next block >>> 0> >>> Pattern Wave [No More to follow] >>> <)
|
def __request_pat_wave(self, r_number):
packet = bytearray()
packet.append(0) # 16 bit options
packet.append(0) # 8 bit options
packet.append(1) # Request the 1 option
# ---------------------------------------------------------------------
# Request the variable length options. pattern wave.
packet.append(0x01 << t_var_size_Options.PATTERN_WAVE)
# ---------------------------------------------------------------------
# Packets to follow
packet.append(r_number)
# ---------------------------------------------------------------------
# Length of the bytes to follow
packet.append(0)
rval = self.interact_with_shouter(packet)
if rval != False:
return rval
return []
|
[
"def waveband(self):\n return self.get(\"waveband\", default=\"\", decode=True).split(\"#\")",
"def _wave(self):\n try:\n return wave.open(StringIO(self.contents))\n except wave.Error, err:\n err.message += \"\\nInvalid wave file: %s\" % self\n err.args = (err.message,)\n raise",
"def getWave(self):\n return self._wave",
"def wave_chain(self, data):\n # I p1 0\n # I p2 0\n # I p3 len\n ## extension ##\n # s len data bytes\n\n return _u2i(_pigpio_command_ext(\n self.sl, _PI_CMD_WVCHA, 0, 0, len(data), [data]))",
"def read_wave(path):\n rate, signal = wav.read(path)\n return signal, rate",
"def read_wav(self, wave_name):\n file_name = self.inpath+'/'+wave_name+'.wav'\n # TODO should resample if necessary\n channels,sample_rate = self.pcm_channels(file_name)\n #print 'Read', wave_name,' at', sample_rate\n return channels[0];",
"def extract_charge(self, waveforms):",
"def _make_waves(self):\n wf = []\n wf.append(pigpio.pulse(1<<self.gpio, 0, self.t0))\n wf.append(pigpio.pulse(0, 1<<self.gpio, self.gap))\n self.pi.wave_add_generic(wf)\n self._amble = self.pi.wave_create()\n\n wf = []\n wf.append(pigpio.pulse(1<<self.gpio, 0, self.t0))\n wf.append(pigpio.pulse(0, 1<<self.gpio, self.t1))\n self.pi.wave_add_generic(wf)\n self._wid0 = self.pi.wave_create()\n\n wf = []\n wf.append(pigpio.pulse(1<<self.gpio, 0, self.t1))\n wf.append(pigpio.pulse(0, 1<<self.gpio, self.t0))\n self.pi.wave_add_generic(wf)\n self._wid1 = self.pi.wave_create()",
"def waveband(self):\n return self._band",
"def get_bitwave(wave):\n return (wave * 32767).astype(np.int16)",
"def get_waveforms(self, network, station, location, channel, starttime,\n endtime):\n # padding channel with spaces does not make sense\n if len(channel) < 3 and channel != \".*\":\n msg = \"channel expression matches less than 3 characters \" + \\\n \"(use e.g. 'BHZ', 'BH?', 'BH[Z12]', 'B??')\"\n raise Exception(msg)\n seedname = '%-2s%-5s%s%-2s' % (network, station, channel, location)\n # allow UNIX style \"?\" wildcard\n seedname = seedname.replace(\"?\", \".\")\n return self.get_waveforms_nscl(seedname, starttime,\n endtime - starttime)",
"def getRicker(f,t):\n # assert len(f) == 1, 'Ricker wavelet needs 1 frequency as input'\n # f = f[0]\n pift = pi*f*t\n wav = (1 - 2*pift**2)*np.exp(-pift**2)\n return wav",
"def get_interval_sound(root,interval,category):\n generator = Interval()\n interval_url = generator.get_interval_audio_url(root,interval,category)\n return send_file(interval_url)",
"def wave_samples(self):\n return self._quantized_subsamples",
"def get_pattern(self) -> \"Pattern\":\n return self.__pattern",
"def getWaveNumber(self):\n return self._wave_number",
"def wave_send_repeat(wave_id):\n return _u2i(_pigpio_command(_control, _PI_CMD_WVTXR, wave_id, 0))",
"def read_wav(fileName):\r\n samplingRate, data = wf.read(fileName)\r\n if data.dtype == 'int16':\r\n data = data/(2**15)\r\n if data.dtype == 'int32':\r\n data = data/(2**31)\r\n signal = SignalObj(data, 'time', samplingRate=samplingRate)\r\n return signal",
"def wave_send_repeat(self, wave_id):\n return _u2i(_pigpio_command(self.sl, _PI_CMD_WVTXR, wave_id, 0))"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
The resource ID of the Network Fabric l3IsolationDomain.
|
def l3_isolation_domain_id(self) -> pulumi.Input[str]:
return pulumi.get(self, "l3_isolation_domain_id")
|
[
"def l3_isolation_domain_id(self) -> pulumi.Output[str]:\n return pulumi.get(self, \"l3_isolation_domain_id\")",
"def l3_id(self):\n return self._l3_id",
"def resource_id(self) -> str:\n return pulumi.get(self, \"resource_id\")",
"def cluster_resource_id(self) -> str:\n return pulumi.get(self, \"cluster_resource_id\")",
"def resource_guid(self) -> str:\n return pulumi.get(self, \"resource_guid\")",
"def external_network_id(self) -> str:\n return pulumi.get(self, \"external_network_id\")",
"def get_domain_id(self):\r\n return self.__domain_id",
"def domain_id(self):\n return self._domain_id",
"def resource_group_id(self) -> str:\n return pulumi.get(self, \"resource_group_id\")",
"def dns_zone_resource_id(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"dns_zone_resource_id\")",
"def resource_group_id(self) -> pulumi.Output[str]:\n return pulumi.get(self, \"resource_group_id\")",
"def resource_id(self):\n if self.dutinformation:\n return self.dutinformation.resource_id\n return None",
"def resource_pool_id(self) -> str:\n return pulumi.get(self, \"resource_pool_id\")",
"def failover_group_id(self) -> str:\n return pulumi.get(self, \"failover_group_id\")",
"def custom_compliance_domain_id(self):\n return self._custom_compliance_domain_id",
"def vulnerability_resilience_id():\n\n if S3VulnerabilityModel.resilience_pid is None:\n # Get the parameter_id of the aggregated_indicator\n db = current.db\n table = db.vulnerability_aggregated_indicator\n row = db(table.uuid == \"Resilience\").select(table.parameter_id,\n limitby=(0, 1)).first()\n try:\n S3VulnerabilityModel.resilience_pid = row.parameter_id\n except:\n # DB not initialised\n pass\n\n return S3VulnerabilityModel.resilience_pid",
"def get_resource_id(self) -> str:\n c_expected = tankerlib.tanker_encryption_session_get_resource_id(self.c_session)\n c_id = ffihelpers.unwrap_expected(c_expected, \"char*\")\n return ffihelpers.c_string_to_str(c_id)",
"def chain_id(self) -> str:\n return pulumi.get(self, \"chain_id\")",
"def managed_rule_identifier(self) -> str:\n return pulumi.get(self, \"managed_rule_identifier\")"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
The default interface name for this L3 network in the virtual machine. This name can be overridden by the name supplied in the network attachment configuration of that virtual machine.
|
def interface_name(self) -> Optional[pulumi.Input[str]]:
return pulumi.get(self, "interface_name")
|
[
"def get_default_iface_name():\n return netifaces.gateways()['default'][netifaces.AF_INET][1]",
"def interface_name(self) -> pulumi.Output[Optional[str]]:\n return pulumi.get(self, \"interface_name\")",
"def l3_network_name(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"l3_network_name\")",
"def network_attachment_name(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"network_attachment_name\")",
"def get_default_interface():\n gws = netifaces.gateways()\n if 'default' not in gws:\n return ''\n\n route = gws['default'].get(netifaces.AF_INET)\n if route:\n return route[1]",
"def default_cni_network_id(self) -> str:\n return pulumi.get(self, \"default_cni_network_id\")",
"def interface_name(self) -> pulumi.Output[str]:\n return pulumi.get(self, \"interface_name\")",
"def network_interface_id(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"network_interface_id\")",
"def _get_interface_tunnel_name(self):\n return self.__interface_tunnel_name",
"def get_interface_name(self, interface_element):\n return interface_element.find(\"{\"+self.xmlns+\"}Data\").get(\"Name\")",
"def network_instance_name(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"network_instance_name\")",
"def get_interface_name():\n interface_name = ''\n interfaces = psutil.net_if_addrs()\n for name, details in interfaces.items():\n for detail in details:\n if detail.family == socket.AF_INET:\n ip_address = ipaddress.ip_address(detail.address)\n if not (ip_address.is_link_local or ip_address.is_loopback):\n interface_name = name\n break\n return interface_name",
"def name(self) -> str:\n return self.__configuration['network']['name']",
"def get_name(self) -> str:\n return decode_cstr(self.__info.interfaceName)",
"def getDefaultLayerName(self):\n\t\treturn self._fileSystem.getDefaultLayerName()",
"def default_name(self):\n raise NotImplementedError",
"def GetInterface(self):\n if self.parent.GetIPInterface(self.interface) is None:\n return ''\n return self.interface",
"def _find_local_interface_name(self, network_type):\n host_id = self.get_my_host_id()\n interface_list = self.dbapi.iinterface_get_all(host_id, expunge=True)\n ifaces = dict((i['ifname'], i) for i in interface_list)\n port_list = self.dbapi.port_get_all(host_id)\n ports = dict((p['interface_id'], p) for p in port_list)\n for interface in interface_list:\n if network_type in interface.networktypelist:\n return cutils.get_interface_os_ifname(interface, ifaces, ports)",
"def get_logical_ifname(self, interface_name, proto='provision'): # pragma: no cover\n output = check_output(['uci', 'show', 'network'])\n network_list = output.strip().split('\\n')\n for config in network_list:\n cfg, option = config.split('=')\n net_prex = cfg.split(\".\")\n if net_prex[-1] == \"proto\" and str(option) != proto:\n ifname = '.'.join(net_prex[:-1]) + '.ifname'\n interface = check_output(['uci', 'get', ifname]).split('\\n')[0]\n if interface == interface_name:\n return net_prex[1]\n return ''"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
The name of the L3 network.
|
def l3_network_name(self) -> Optional[pulumi.Input[str]]:
return pulumi.get(self, "l3_network_name")
|
[
"def name(self) -> str:\n return self.__configuration['network']['name']",
"def computer_network_name(self) -> str:\n return self._computer_network_name",
"def network_instance_name(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"network_instance_name\")",
"def name(self) -> str:\n return f\"{self._inst} NAT {self._data['name']}\"",
"def managed_network_name(self) -> pulumi.Input[str]:\n return pulumi.get(self, \"managed_network_name\")",
"def _get_layer_name(self):\n return self.__layer_name",
"def layer_name(self) -> pulumi.Input[str]:\n return pulumi.get(self, \"layer_name\")",
"def cloud_services_network_name(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"cloud_services_network_name\")",
"def get_name(self):\n name_str = \"Brain\"\n name_str += \"_\" + self._memory.get_name() \n name_str += \"_ImgSize\" + str(self._img_size[0])\n name_str += \"_Nov\" + self._novelty_loss_type.upper()\n name_str += \"_Train\" + str(self._train_epochs_per_iter)\n name_str += \"_Lrate\" + str(self._learning_rate)\n return name_str",
"def __str__(self):\n\n return \"Network: {0}\".format(self.topology)",
"def get_layer_name(self, layer):\n return self.name + str(layer)",
"def get_network_name(f_in):\n lines = read_lines(f_in)\n first_row = lines[0].split()\n net_type = first_row[0].replace(\"'\", \"\").replace('\"', '')\n value = first_row[1].replace(\"'\", \"\").replace('\"', '')\n\n if net_type == \"name\":\n return value\n else:\n warn('Name of network not found. Exit(\"\")')\n return \"\"",
"def layer_protocol_name(self) -> str:\n return self._layer_protocol_name",
"def get_network_name(self): # type: () -> str\n networks = self.get_network_names()\n\n if not networks:\n raise ApplicationError('No network found for Docker container: %s.' % self.id)\n\n if len(networks) > 1:\n raise ApplicationError('Found multiple networks for Docker container %s instead of only one: %s' % (self.id, ', '.join(networks)))\n\n return networks[0]",
"def network_names(self):\n # FIXME\n \"\"\"networks = []\n link = '<a href=\\\"%s\\\"> %s </a>'\n for net in self.networks.all().distinct():\n networks.append(link % (net.get_admin_url(),net.desc))\n return \", \".join(networks)\"\"\"\n return \"\"",
"def mobile_network_name(self) -> pulumi.Input[str]:\n return pulumi.get(self, \"mobile_network_name\")",
"def network_id(self) -> str:\n return pulumi.get(self, \"network_id\")",
"def layer_name(self):\n return self.__class__.__name__",
"def network_type(self) -> str:\n return pulumi.get(self, \"network_type\")"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
Get an existing L3Network resource's state with the given name, id, and optional extra properties used to qualify the lookup.
|
def get(resource_name: str,
id: pulumi.Input[str],
opts: Optional[pulumi.ResourceOptions] = None) -> 'L3Network':
opts = pulumi.ResourceOptions.merge(opts, pulumi.ResourceOptions(id=id))
__props__ = L3NetworkArgs.__new__(L3NetworkArgs)
__props__.__dict__["associated_resource_ids"] = None
__props__.__dict__["cluster_id"] = None
__props__.__dict__["detailed_status"] = None
__props__.__dict__["detailed_status_message"] = None
__props__.__dict__["extended_location"] = None
__props__.__dict__["hybrid_aks_clusters_associated_ids"] = None
__props__.__dict__["hybrid_aks_ipam_enabled"] = None
__props__.__dict__["hybrid_aks_plugin_type"] = None
__props__.__dict__["interface_name"] = None
__props__.__dict__["ip_allocation_type"] = None
__props__.__dict__["ipv4_connected_prefix"] = None
__props__.__dict__["ipv6_connected_prefix"] = None
__props__.__dict__["l3_isolation_domain_id"] = None
__props__.__dict__["location"] = None
__props__.__dict__["name"] = None
__props__.__dict__["provisioning_state"] = None
__props__.__dict__["system_data"] = None
__props__.__dict__["tags"] = None
__props__.__dict__["type"] = None
__props__.__dict__["virtual_machines_associated_ids"] = None
__props__.__dict__["vlan"] = None
return L3Network(resource_name, opts=opts, __props__=__props__)
|
[
"def get(resource_name: str,\n id: pulumi.Input[str],\n opts: Optional[pulumi.ResourceOptions] = None) -> 'Layer':\n opts = pulumi.ResourceOptions.merge(opts, pulumi.ResourceOptions(id=id))\n\n __props__ = LayerArgs.__new__(LayerArgs)\n\n __props__.__dict__[\"attributes\"] = None\n __props__.__dict__[\"auto_assign_elastic_ips\"] = None\n __props__.__dict__[\"auto_assign_public_ips\"] = None\n __props__.__dict__[\"custom_instance_profile_arn\"] = None\n __props__.__dict__[\"custom_json\"] = None\n __props__.__dict__[\"custom_recipes\"] = None\n __props__.__dict__[\"custom_security_group_ids\"] = None\n __props__.__dict__[\"enable_auto_healing\"] = None\n __props__.__dict__[\"install_updates_on_boot\"] = None\n __props__.__dict__[\"lifecycle_event_configuration\"] = None\n __props__.__dict__[\"load_based_auto_scaling\"] = None\n __props__.__dict__[\"name\"] = None\n __props__.__dict__[\"packages\"] = None\n __props__.__dict__[\"shortname\"] = None\n __props__.__dict__[\"stack_id\"] = None\n __props__.__dict__[\"tags\"] = None\n __props__.__dict__[\"type\"] = None\n __props__.__dict__[\"use_ebs_optimized_instances\"] = None\n __props__.__dict__[\"volume_configurations\"] = None\n return Layer(resource_name, opts=opts, __props__=__props__)",
"def get_state_by_name(exploration_id, state_name, strict=True):\n exploration = get_exploration_by_id(exploration_id)\n assert state_name\n\n # TODO(sll): This is too slow; improve it.\n state = None\n for candidate_state in exploration.states:\n if candidate_state.name == state_name:\n state = candidate_state\n break\n\n if strict and not state:\n raise Exception('State %s not found' % state_name)\n return state",
"def get_state_by_id(state_id):\n for key, value in storage.all(\"State\").items():\n if state_id == value.id:\n return jsonify(value.to_dict())\n abort(404)",
"def get_state(state_id):\n try:\n ''' Check that state_id exists '''\n query = State.select().where(State.id == state_id)\n if not query.exists():\n raise LookupError('state_id')\n\n state = State.get(State.id == state_id)\n return state.to_dict(), 200\n except LookupError as e:\n abort(404)\n except Exception as e:\n abort(500)",
"def get_network(self, name_or_id, filters=None):\n if not filters:\n filters = {}\n return self.network.find_network(\n name_or_id=name_or_id, ignore_missing=True, **filters\n )",
"def get(resource_name: str,\n id: pulumi.Input[str],\n opts: Optional[pulumi.ResourceOptions] = None,\n address_space_prefixes: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]] = None,\n allow_forwarded_traffic: Optional[pulumi.Input[bool]] = None,\n allow_gateway_transit: Optional[pulumi.Input[bool]] = None,\n allow_virtual_network_access: Optional[pulumi.Input[bool]] = None,\n name: Optional[pulumi.Input[str]] = None,\n remote_address_space_prefixes: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]] = None,\n remote_virtual_network_id: Optional[pulumi.Input[str]] = None,\n resource_group_name: Optional[pulumi.Input[str]] = None,\n use_remote_gateways: Optional[pulumi.Input[bool]] = None,\n virtual_network_id: Optional[pulumi.Input[str]] = None,\n workspace_id: Optional[pulumi.Input[str]] = None) -> 'VirtualNetworkPeering':\n opts = pulumi.ResourceOptions.merge(opts, pulumi.ResourceOptions(id=id))\n\n __props__ = _VirtualNetworkPeeringState.__new__(_VirtualNetworkPeeringState)\n\n __props__.__dict__[\"address_space_prefixes\"] = address_space_prefixes\n __props__.__dict__[\"allow_forwarded_traffic\"] = allow_forwarded_traffic\n __props__.__dict__[\"allow_gateway_transit\"] = allow_gateway_transit\n __props__.__dict__[\"allow_virtual_network_access\"] = allow_virtual_network_access\n __props__.__dict__[\"name\"] = name\n __props__.__dict__[\"remote_address_space_prefixes\"] = remote_address_space_prefixes\n __props__.__dict__[\"remote_virtual_network_id\"] = remote_virtual_network_id\n __props__.__dict__[\"resource_group_name\"] = resource_group_name\n __props__.__dict__[\"use_remote_gateways\"] = use_remote_gateways\n __props__.__dict__[\"virtual_network_id\"] = virtual_network_id\n __props__.__dict__[\"workspace_id\"] = workspace_id\n return VirtualNetworkPeering(resource_name, opts=opts, __props__=__props__)",
"def get(resource_name: str,\n id: pulumi.Input[str],\n opts: Optional[pulumi.ResourceOptions] = None) -> 'CloudServicesNetwork':\n opts = pulumi.ResourceOptions.merge(opts, pulumi.ResourceOptions(id=id))\n\n __props__ = CloudServicesNetworkArgs.__new__(CloudServicesNetworkArgs)\n\n __props__.__dict__[\"additional_egress_endpoints\"] = None\n __props__.__dict__[\"associated_resource_ids\"] = None\n __props__.__dict__[\"cluster_id\"] = None\n __props__.__dict__[\"detailed_status\"] = None\n __props__.__dict__[\"detailed_status_message\"] = None\n __props__.__dict__[\"enable_default_egress_endpoints\"] = None\n __props__.__dict__[\"enabled_egress_endpoints\"] = None\n __props__.__dict__[\"extended_location\"] = None\n __props__.__dict__[\"hybrid_aks_clusters_associated_ids\"] = None\n __props__.__dict__[\"interface_name\"] = None\n __props__.__dict__[\"location\"] = None\n __props__.__dict__[\"name\"] = None\n __props__.__dict__[\"provisioning_state\"] = None\n __props__.__dict__[\"system_data\"] = None\n __props__.__dict__[\"tags\"] = None\n __props__.__dict__[\"type\"] = None\n __props__.__dict__[\"virtual_machines_associated_ids\"] = None\n return CloudServicesNetwork(resource_name, opts=opts, __props__=__props__)",
"def getstate(self,name):\n state = self.states[name]\n debug('kfnode.getstate ',(name,state))\n return state",
"def get(resource_name: str,\n id: pulumi.Input[str],\n opts: Optional[pulumi.ResourceOptions] = None,\n arn: Optional[pulumi.Input[str]] = None,\n auth_mode: Optional[pulumi.Input[str]] = None,\n default_s3_location: Optional[pulumi.Input[str]] = None,\n description: Optional[pulumi.Input[str]] = None,\n engine_security_group_id: Optional[pulumi.Input[str]] = None,\n idp_auth_url: Optional[pulumi.Input[str]] = None,\n idp_relay_state_parameter_name: Optional[pulumi.Input[str]] = None,\n name: Optional[pulumi.Input[str]] = None,\n service_role: Optional[pulumi.Input[str]] = None,\n subnet_ids: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]] = None,\n tags: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]] = None,\n tags_all: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]] = None,\n url: Optional[pulumi.Input[str]] = None,\n user_role: Optional[pulumi.Input[str]] = None,\n vpc_id: Optional[pulumi.Input[str]] = None,\n workspace_security_group_id: Optional[pulumi.Input[str]] = None) -> 'Studio':\n opts = pulumi.ResourceOptions.merge(opts, pulumi.ResourceOptions(id=id))\n\n __props__ = _StudioState.__new__(_StudioState)\n\n __props__.__dict__[\"arn\"] = arn\n __props__.__dict__[\"auth_mode\"] = auth_mode\n __props__.__dict__[\"default_s3_location\"] = default_s3_location\n __props__.__dict__[\"description\"] = description\n __props__.__dict__[\"engine_security_group_id\"] = engine_security_group_id\n __props__.__dict__[\"idp_auth_url\"] = idp_auth_url\n __props__.__dict__[\"idp_relay_state_parameter_name\"] = idp_relay_state_parameter_name\n __props__.__dict__[\"name\"] = name\n __props__.__dict__[\"service_role\"] = service_role\n __props__.__dict__[\"subnet_ids\"] = subnet_ids\n __props__.__dict__[\"tags\"] = tags\n __props__.__dict__[\"tags_all\"] = tags_all\n __props__.__dict__[\"url\"] = url\n __props__.__dict__[\"user_role\"] = user_role\n __props__.__dict__[\"vpc_id\"] = vpc_id\n __props__.__dict__[\"workspace_security_group_id\"] = workspace_security_group_id\n return Studio(resource_name, opts=opts, __props__=__props__)",
"def find_state(self, name):\n return self.state_index.get(name, None)",
"def get(resource_name: str,\n id: pulumi.Input[str],\n opts: Optional[pulumi.ResourceOptions] = None) -> 'NetworkGroup':\n opts = pulumi.ResourceOptions.merge(opts, pulumi.ResourceOptions(id=id))\n\n __props__ = dict()\n\n __props__[\"conditional_membership\"] = None\n __props__[\"description\"] = None\n __props__[\"display_name\"] = None\n __props__[\"etag\"] = None\n __props__[\"group_members\"] = None\n __props__[\"member_type\"] = None\n __props__[\"name\"] = None\n __props__[\"provisioning_state\"] = None\n __props__[\"system_data\"] = None\n __props__[\"type\"] = None\n return NetworkGroup(resource_name, opts=opts, __props__=__props__)",
"def get_state_by_id(states: [State], state_id: str, id_type: str) -> State:\n if id_type == 'new':\n for state in states:\n if state.new_id == state_id:\n return state\n if id_type == 'old':\n for state in states:\n if state.id == state_id:\n return state\n return states[0]",
"def get_state_from_id(self, id_s):\n if self._disc_state:\n return id_s\n else:\n vec = self.get_coordinates_from_id(id_s)\n return self.state_points[range(self.obs_dims), vec]",
"def get(resource_name: str,\n id: pulumi.Input[str],\n opts: Optional[pulumi.ResourceOptions] = None,\n address_spaces: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]] = None,\n bgp_settings: Optional[pulumi.Input[pulumi.InputType['LocalNetworkGatewayBgpSettingsArgs']]] = None,\n gateway_address: Optional[pulumi.Input[str]] = None,\n gateway_fqdn: Optional[pulumi.Input[str]] = None,\n location: Optional[pulumi.Input[str]] = None,\n name: Optional[pulumi.Input[str]] = None,\n resource_group_name: Optional[pulumi.Input[str]] = None,\n tags: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]] = None) -> 'LocalNetworkGateway':\n opts = pulumi.ResourceOptions.merge(opts, pulumi.ResourceOptions(id=id))\n\n __props__ = _LocalNetworkGatewayState.__new__(_LocalNetworkGatewayState)\n\n __props__.__dict__[\"address_spaces\"] = address_spaces\n __props__.__dict__[\"bgp_settings\"] = bgp_settings\n __props__.__dict__[\"gateway_address\"] = gateway_address\n __props__.__dict__[\"gateway_fqdn\"] = gateway_fqdn\n __props__.__dict__[\"location\"] = location\n __props__.__dict__[\"name\"] = name\n __props__.__dict__[\"resource_group_name\"] = resource_group_name\n __props__.__dict__[\"tags\"] = tags\n return LocalNetworkGateway(resource_name, opts=opts, __props__=__props__)",
"def get_network_by_id(self, id):\n return self.network.get_network(id)",
"def get_by_id(id: int) -> \"Network\":\n return Network.query.filter_by(id=id).first()",
"def get(self, name):\n return self.network.get(name)",
"def get_layer(self, id: str) -> Layer:\n for layer in self.layers:\n if layer.ID == id:\n return layer\n raise Exception(\"Could not find the layer {}\".format(id))",
"def get_state(api, entity_id):\n try:\n req = api(METH_GET, URL_API_STATES_ENTITY.format(entity_id))\n\n # req.status_code == 422 if entity does not exist\n\n return ha.State.from_dict(req.json()) \\\n if req.status_code == 200 else None\n\n except (HomeAssistantError, ValueError):\n # ValueError if req.json() can't parse the json\n _LOGGER.exception(\"Error fetching state\")\n\n return None"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
The list of resource IDs for the other Microsoft.NetworkCloud resources that have attached this network.
|
def associated_resource_ids(self) -> pulumi.Output[Sequence[str]]:
return pulumi.get(self, "associated_resource_ids")
|
[
"def resource_ids(self):\n return self._resource_ids",
"def network_ids(self):\n return self._network_ids",
"def get_resource_identifiers(self):\n return self.__resourceIdentifiers",
"def resource_share_ids(self):\n return self._resource_share_ids",
"def resource_list(self):\r\n return self.D",
"def resource_names(self):\n return self._resource_names",
"def get_resource_ids():\n worker = global_worker\n worker.check_connected()\n\n if _mode() == LOCAL_MODE:\n raise RuntimeError(\n \"ray._private.worker.get_resource_ids() does not work in local_mode.\"\n )\n\n return global_worker.core_worker.resource_ids()",
"def refer_resources(self):\n return self._refer_resources",
"def global_ids(self):\n return [resource.global_id for resource in self.resources]",
"def workload_resource_ids(self) -> pulumi.Output[Sequence[str]]:\n return pulumi.get(self, \"workload_resource_ids\")",
"def network_names_list(self):\n return [network_object[\"networkID\"] for network_object in self.__networks_objects_list]",
"def resources(self):\n return self._resources",
"def resource_share_invitation_ids(self):\n return self._resource_share_invitation_ids",
"def _get_networks():\n\n database = Database()\n networks = database.query(\"SELECT Identifier FROM Networks;\")\n database.close()\n\n return [identifier[0] for identifier in networks]",
"def GetResourceNames(self):\r\n return [x.name for x in self.resources]",
"def get_asset_ids(self):\n return # osid.id.IdList",
"def network_interface_ids(self) -> pulumi.Output[Sequence[str]]:\n return pulumi.get(self, \"network_interface_ids\")",
"def resource_names(self) -> List[str]:\n return [resource.name for resource in self.resources if resource.name is not None] # type: ignore",
"def resources(self):\n seen = set()\n # seen.add always returns None, so 'not seen.add(x)' is always True,\n # but will only be called if the value is not already in seen (because\n # 'and' short-circuits)\n return [x for x in self._resources if x not in seen and not seen.add(x)]"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
The more detailed status of the L3 network.
|
def detailed_status(self) -> pulumi.Output[str]:
return pulumi.get(self, "detailed_status")
|
[
"def status(ctx):\n return show_network_status()",
"def status(self):\n res = \"\"\n for tlight in self.trafficLights:\n res += \"Traffic light {} status: {}\\n\".format(self.trafficLights[tlight].id,self.trafficLights[tlight].getState())\n return res",
"def status(self):\n # print(self.target.config, self.target.server_link)\n if not self.target.network:\n print('no server connected to')\n return\n connected_servers = self.target.network.get_servers()\n print_server_info(connected_servers)",
"def status():\n try:\n log.info('network config: %s', net.ifconfig())\n except AttributeError:\n log.warning('network not connected')",
"def get_status(self):\n if self.status:\n print(f\"Server '{self.server_name}' is online\")\n else:\n print(f\"Server '{self.server_name}' is offline\")",
"def display_status(self):\n avail = 0\n if self._status_last & 0b11 > 0:\n avail = 2 * (self._status_last & 0b11) + 2\n print(\"STATUS register = 0x{0:02X}\".format(self._status_last))\n print(\"BURST Mode :\", (self._status_last & (1 << 7)) > 0)\n print(\"WOC Mode :\", (self._status_last & (1 << 6)) > 0)\n print(\"SM Mode :\", (self._status_last & (1 << 5)) > 0)\n print(\"Error :\", (self._status_last & (1 << 4)) > 0)\n print(\"Single error detection :\", (self._status_last & (1 << 3)) > 0)\n print(\"Reset status :\", (self._status_last & (1 << 2)) > 0)\n print(\"Response bytes available :\", avail)",
"def status(self):\n uri = common.genuri('transport-node', self.uuid, 'status')\n return super(TransportNode, self)._action('GET', uri)",
"def printstatus(self):\n data = self.statuslist()\n if not data:\n print(\n \"Unable to communicate to the OpenSprinkler \"\n \"at %s\" % self.hostname\n )\n return None\n print('Station\\t%-15.15s\\tStatus' % 'Name')\n for item in data:\n print('%d\\t%-15.15s\\t%s' % (item[0], item[1], item[2]))\n return",
"def status(self):\n uri = common.genuri('lswitch', self.uuid, 'status')\n return super(LSwitch, self)._action('GET', uri)",
"def detailed_status_message(self) -> pulumi.Output[str]:\n return pulumi.get(self, \"detailed_status_message\")",
"def status(self):\n uri = common.genuri('lswitch', self.lswitch_uuid, 'lport', self.uuid,\n 'status')\n return super(LSwitchPort, self)._action(\"GET\", uri)",
"def print_status(self):\n wlan = ''\n ip = ''\n mac = ''\n homepage = ''\n pairing_code = ''\n\n ssid = helpers.get_ssid()\n wlan = '\\nWireless network:\\n%s\\n\\n' % ssid\n\n interfaces = ni.interfaces()\n ips = []\n for iface_id in interfaces:\n iface_obj = ni.ifaddresses(iface_id)\n ifconfigs = iface_obj.get(ni.AF_INET, [])\n for conf in ifconfigs:\n if conf.get('addr') and conf.get('addr'):\n ips.append(conf.get('addr'))\n if len(ips) == 0:\n ip = '\\nERROR: Could not connect to LAN\\n\\nPlease check that the IoTBox is correc-\\ntly connected with a network cable,\\n that the LAN is setup with DHCP, and\\nthat network addresses are available'\n elif len(ips) == 1:\n ip = '\\nIP Address:\\n%s\\n' % ips[0]\n else:\n ip = '\\nIP Addresses:\\n%s\\n' % '\\n'.join(ips)\n\n if len(ips) >= 1:\n ips_filtered = [i for i in ips if i != '127.0.0.1']\n main_ips = ips_filtered and ips_filtered[0] or '127.0.0.1'\n mac = '\\nMAC Address:\\n%s\\n' % helpers.get_mac_address()\n homepage = '\\nHomepage:\\nhttp://%s:8069\\n\\n' % main_ips\n\n code = connection_manager.pairing_code\n if code:\n pairing_code = '\\nPairing Code:\\n%s\\n' % code\n\n commands = RECEIPT_PRINTER_COMMANDS[self.receipt_protocol]\n title = commands['title'] % b'IoTBox Status'\n self.print_raw(commands['center'] + title + b'\\n' + wlan.encode() + mac.encode() + ip.encode() + homepage.encode() + pairing_code.encode() + commands['cut'])",
"def status_detail(self):\n return self._status_detail",
"def print_status(self):\n status_code = self._get_wifi_status_code()\n prop = DBUS_GENERAL_PROPS[self.wifi_prop]\n status = self.translate_status_code(prop, status_code)\n click.echo(f'WiFi is {status}')",
"def detailed_status_message(self) -> str:\n return pulumi.get(self, \"detailed_status_message\")",
"def print_network_state(self):\n print(\"\\nNetwork state:\")\n for index_layer, layer in enumerate(self.layer):\n for node in layer:\n output = str(format(node.output, '.2f'))\n print(output, end='\\t')\n print(\"\")\n print(\"\")",
"def status(self, station=1):\n return self.statuslist()[station][2]",
"def get_lacp_status(protocol=True):\r\n # TODO\r\n pass",
"def getClusterStatus(self):\n data = self.connect('get','cluster/status', None)\n return data"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
The extended location of the cluster associated with the resource.
|
def extended_location(self) -> pulumi.Output['outputs.ExtendedLocationResponse']:
return pulumi.get(self, "extended_location")
|
[
"def cluster_extended_location(self) -> pulumi.Output['outputs.ExtendedLocationResponse']:\n return pulumi.get(self, \"cluster_extended_location\")",
"def cluster_location(self) -> pulumi.Output[Optional[str]]:\n return pulumi.get(self, \"cluster_location\")",
"def cluster_location(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"cluster_location\")",
"def set_up_extended_location(self, mc: ManagedCluster) -> ManagedCluster:\n self._ensure_mc(mc)\n\n edge_zone = self.context.get_edge_zone()\n if edge_zone:\n mc.extended_location = self.models.ExtendedLocation(\n name=edge_zone,\n type=self.models.ExtendedLocationTypes.EDGE_ZONE\n )\n return mc",
"def cluster_ns(self):\n return self._cluster_ns",
"def hybrid_aks_extended_location(self) -> pulumi.Output['outputs.ExtendedLocationResponse']:\n return pulumi.get(self, \"hybrid_aks_extended_location\")",
"def cluster(self):\n return self._cluster",
"def cluster_resource_id(self) -> str:\n return pulumi.get(self, \"cluster_resource_id\")",
"def primary_fabric_location(self) -> Optional[str]:\n return pulumi.get(self, \"primary_fabric_location\")",
"def _course_location(self):\r\n return \"location:{org}+{number}+{run}+course+{run}\".format(**self._course_dict)",
"def getClusterInfo(self):\n pass",
"def cluster(self) -> Optional['outputs.DefaultClusterPropertiesResponse']:\n return pulumi.get(self, \"cluster\")",
"def mount_location(self):\n return self._mount_location",
"def cluster_name(self):\n return self._cluster_name",
"def compute_location(self) -> Optional[str]:\n return pulumi.get(self, \"compute_location\")",
"def recovery_fabric_location(self) -> Optional[str]:\n return pulumi.get(self, \"recovery_fabric_location\")",
"def local_replication_cluster(self):\n return self._local_replication_cluster",
"def get_default_alt_loc(self):\n return self.default_alt_loc",
"def cluster_name(self) -> str:\n return pulumi.get(self, \"cluster_name\")"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
The default interface name for this L3 network in the virtual machine. This name can be overridden by the name supplied in the network attachment configuration of that virtual machine.
|
def interface_name(self) -> pulumi.Output[Optional[str]]:
return pulumi.get(self, "interface_name")
|
[
"def get_default_iface_name():\n return netifaces.gateways()['default'][netifaces.AF_INET][1]",
"def interface_name(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"interface_name\")",
"def l3_network_name(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"l3_network_name\")",
"def network_attachment_name(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"network_attachment_name\")",
"def get_default_interface():\n gws = netifaces.gateways()\n if 'default' not in gws:\n return ''\n\n route = gws['default'].get(netifaces.AF_INET)\n if route:\n return route[1]",
"def default_cni_network_id(self) -> str:\n return pulumi.get(self, \"default_cni_network_id\")",
"def interface_name(self) -> pulumi.Output[str]:\n return pulumi.get(self, \"interface_name\")",
"def network_interface_id(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"network_interface_id\")",
"def _get_interface_tunnel_name(self):\n return self.__interface_tunnel_name",
"def get_interface_name(self, interface_element):\n return interface_element.find(\"{\"+self.xmlns+\"}Data\").get(\"Name\")",
"def network_instance_name(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"network_instance_name\")",
"def get_interface_name():\n interface_name = ''\n interfaces = psutil.net_if_addrs()\n for name, details in interfaces.items():\n for detail in details:\n if detail.family == socket.AF_INET:\n ip_address = ipaddress.ip_address(detail.address)\n if not (ip_address.is_link_local or ip_address.is_loopback):\n interface_name = name\n break\n return interface_name",
"def name(self) -> str:\n return self.__configuration['network']['name']",
"def get_name(self) -> str:\n return decode_cstr(self.__info.interfaceName)",
"def getDefaultLayerName(self):\n\t\treturn self._fileSystem.getDefaultLayerName()",
"def default_name(self):\n raise NotImplementedError",
"def GetInterface(self):\n if self.parent.GetIPInterface(self.interface) is None:\n return ''\n return self.interface",
"def _find_local_interface_name(self, network_type):\n host_id = self.get_my_host_id()\n interface_list = self.dbapi.iinterface_get_all(host_id, expunge=True)\n ifaces = dict((i['ifname'], i) for i in interface_list)\n port_list = self.dbapi.port_get_all(host_id)\n ports = dict((p['interface_id'], p) for p in port_list)\n for interface in interface_list:\n if network_type in interface.networktypelist:\n return cutils.get_interface_os_ifname(interface, ifaces, ports)",
"def get_logical_ifname(self, interface_name, proto='provision'): # pragma: no cover\n output = check_output(['uci', 'show', 'network'])\n network_list = output.strip().split('\\n')\n for config in network_list:\n cfg, option = config.split('=')\n net_prex = cfg.split(\".\")\n if net_prex[-1] == \"proto\" and str(option) != proto:\n ifname = '.'.join(net_prex[:-1]) + '.ifname'\n interface = check_output(['uci', 'get', ifname]).split('\\n')[0]\n if interface == interface_name:\n return net_prex[1]\n return ''"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
The type of the IP address allocation, defaulted to "DualStack".
|
def ip_allocation_type(self) -> pulumi.Output[Optional[str]]:
return pulumi.get(self, "ip_allocation_type")
|
[
"def ip_address_type(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"ip_address_type\")",
"def _get_address_type(self):\n return self.__address_type",
"def IpType(self):\n\t\treturn self._get_attribute('ipType')",
"def address_type(self):\n return self._address_type",
"def get_ip_type1(self) -> str:\n hex_ip = hexlify(self.message)[152:160]\n ip_addr = int(hex_ip[6:8] + hex_ip[4:6] + hex_ip[2:4] + hex_ip[0:2], 16)\n return inet_ntoa(pack(\"<L\", ip_addr))",
"def ip_type(self) -> pulumi.Output[str]:\n return pulumi.get(self, \"ip_type\")",
"def type (self):\n return self.eth_type",
"def ip_type(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"ip_type\")",
"def ip_tag_type(self) -> str:\n return pulumi.get(self, \"ip_tag_type\")",
"def network_type(self) -> str:\n return pulumi.get(self, \"network_type\")",
"def get_network_type(self):\n net_type = self._data['type']\n if net_type == 'Shared':\n return 'guest'\n elif net_type == 'Isolated':\n return 'isolated'",
"def get_ip_addressing_mode(self):\n return IPAddressingMode.get(utils.bytes_to_int(self.get_parameter(ATStringCommand.MA.command)))",
"def allocate_subnet(self):\n if len(self.subnet_list) == 0:\n subnet = '192.168.1.0/24'\n self.subnet_list.append(subnet)\n return subnet\n else:\n subnet = self.subnet_list[::-1][0]\n ip = ipaddress.IPv4Network(subnet)[0]\n s = ipaddress.IPv4Address(ip) + 256\n return '{}{}'.format(s, '/24')",
"def test_external_ip_get_kind(self):\n assert_equal(self.test_external_ip.get_kind(), 'mpexternalip')",
"def get_network_type(self):\n\t\treturn call_sdk_function('PrlVirtNet_GetNetworkType', self.handle)",
"def SocketType(self) -> SocketType:",
"def ip_protocol(self) -> str:\n protocol = f\"ipv{self.ip_address.version}\"\n\n log.debug(\"Host %s: IP protocol for paramiko is %s.\", self.host)\n return protocol",
"def packet_type(self):\r\n return self._packet_type",
"def address_net_type(address):\n if address[0] in (MAINNET_SCRIPT_ADDRESS_PREFIX,\n MAINNET_ADDRESS_PREFIX):\n return \"mainnet\"\n elif address[:2] == MAINNET_SEGWIT_ADDRESS_PREFIX:\n return \"mainnet\"\n elif address[0] in (TESTNET_SCRIPT_ADDRESS_PREFIX,\n TESTNET_ADDRESS_PREFIX,\n TESTNET_ADDRESS_PREFIX_2):\n return \"testnet\"\n elif address[:2] == TESTNET_SEGWIT_ADDRESS_PREFIX:\n return \"testnet\"\n return None"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
The resource ID of the Network Fabric l3IsolationDomain.
|
def l3_isolation_domain_id(self) -> pulumi.Output[str]:
return pulumi.get(self, "l3_isolation_domain_id")
|
[
"def l3_isolation_domain_id(self) -> pulumi.Input[str]:\n return pulumi.get(self, \"l3_isolation_domain_id\")",
"def l3_id(self):\n return self._l3_id",
"def resource_id(self) -> str:\n return pulumi.get(self, \"resource_id\")",
"def cluster_resource_id(self) -> str:\n return pulumi.get(self, \"cluster_resource_id\")",
"def resource_guid(self) -> str:\n return pulumi.get(self, \"resource_guid\")",
"def external_network_id(self) -> str:\n return pulumi.get(self, \"external_network_id\")",
"def get_domain_id(self):\r\n return self.__domain_id",
"def domain_id(self):\n return self._domain_id",
"def resource_group_id(self) -> str:\n return pulumi.get(self, \"resource_group_id\")",
"def dns_zone_resource_id(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"dns_zone_resource_id\")",
"def resource_group_id(self) -> pulumi.Output[str]:\n return pulumi.get(self, \"resource_group_id\")",
"def resource_id(self):\n if self.dutinformation:\n return self.dutinformation.resource_id\n return None",
"def resource_pool_id(self) -> str:\n return pulumi.get(self, \"resource_pool_id\")",
"def failover_group_id(self) -> str:\n return pulumi.get(self, \"failover_group_id\")",
"def custom_compliance_domain_id(self):\n return self._custom_compliance_domain_id",
"def vulnerability_resilience_id():\n\n if S3VulnerabilityModel.resilience_pid is None:\n # Get the parameter_id of the aggregated_indicator\n db = current.db\n table = db.vulnerability_aggregated_indicator\n row = db(table.uuid == \"Resilience\").select(table.parameter_id,\n limitby=(0, 1)).first()\n try:\n S3VulnerabilityModel.resilience_pid = row.parameter_id\n except:\n # DB not initialised\n pass\n\n return S3VulnerabilityModel.resilience_pid",
"def get_resource_id(self) -> str:\n c_expected = tankerlib.tanker_encryption_session_get_resource_id(self.c_session)\n c_id = ffihelpers.unwrap_expected(c_expected, \"char*\")\n return ffihelpers.c_string_to_str(c_id)",
"def chain_id(self) -> str:\n return pulumi.get(self, \"chain_id\")",
"def managed_rule_identifier(self) -> str:\n return pulumi.get(self, \"managed_rule_identifier\")"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
join the input string
|
def my_join(iters, string):
out = ""
for i in range(iters):
out += "," + string
return out
|
[
"def join(self, iterable): # real signature unknown; restored from __doc__\n return \"\"",
"def join(self, iterable) -> String:\n pass",
"def join_strings(words):\n joined_string = ''\n for word in words:\n joined_string += word\n\n return joined_string",
"def my_join(iters, string):\n out = ''\n for i in range(iters):\n out += string.join(\", \")\n return out",
"def strjoin(s):\n\n return lambda l: s.join(map(str, l))",
"def my_join(iters, string):\n out=''\n for i in range(iters):\n out += string.join(\", \")\n #add string together with , as seperator\n #repeat iters numbers of times\n return out",
"def join(chars=()):\n return ''.join(chars)",
"def join_text(*args):\n return \" \".join(filter(None, args))",
"def join(self, tokens):\n if self._alphanum_only:\n return \" \".join(tokens)\n else:\n # Fully invertible\n return \"\".join(tokens)",
"def __join(ids):\n return \";\".join((str(id) for id in ids))",
"def join(self, tokens):\n if self.chars:\n joiner = ''\n else:\n joiner = ' '\n return joiner.join(tokens)",
"def primitive_string_join(\n sep: strings.String, elements: tuples.Tuple\n) -> strings.String:\n chunks: t.List[str] = []\n for element in elements.components:\n if not isinstance(element, strings.String):\n raise InvalidOperationError(f\"expected string but found {type(element)}\")\n chunks.append(element.value)\n return strings.create(sep.value.join(chunks))",
"def _urljoin(self, *args):\r\n\t\treturn \"/\".join(map(lambda x: str(x).rstrip('/'), args))",
"def _joinSequence(seq, lastSeparator=''):\n count = len(seq)\n return ', '.join(_formatElement(element, count, i, lastSeparator)\n for i, element in enumerate(seq))",
"def join_string(part1, part2, concatenation_string = 'AND', seperator=' '):\n\n if part1 == '':\n return part2\n\n elif part2 == '':\n return part1\n\n\n if part1[-1] == seperator:\n sep1 = ''\n else:\n sep1 = seperator\n\n\n if part2[0] == seperator:\n sep2 = ''\n else:\n sep2 = ' '\n\n\n return part1 + sep1 + concatenation_string + sep2 + part2",
"def _join(self, items, separator=' '):\n \n return separator.join(map(lambda s: self._encode(s), items));",
"def word_join(self, words):\n return \" \".join(words)",
"def list_join(the_list):\n return ' '.join(the_list)",
"def rejoin(textList):\n return ','.join(textList)"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
Adds the keys 'logits' and 'probs' to the end points dictionary of ResNet50v2.
|
def _get_updated_endpoints(original_end_points, name):
end_points = dict(original_end_points)
end_points['logits'] = tf.squeeze(end_points[name], [1, 2])
end_points['probs'] = tf.nn.softmax(end_points['logits'])
return end_points
|
[
"def extend_network_dict(self, session, base_model, result):\n self._call_on_dict_driver(\"extend_network_dict\", session, base_model,\n result)",
"def augment(self):\n n1 = { 'edges': [ self.next_insert['pred'], self.next_insert ], 'pred': self.next_insert['pred'] }\n n2 = { 'edges': [ n1, self.next_insert ], 'pred': n1 }\n self.next_insert['pred'] = n2\n self.next_insert = n2\n self.nodect += 2",
"def init_output_dict(self):\n return {\n \"outputs\": torch.FloatTensor(),\n \"pred_probs\": torch.FloatTensor(),\n \"labels\": torch.LongTensor(),\n }",
"def feed_dict(self):\n return {self.lr_tensor: self.lr()}",
"def _get_point_feed_dict(self, data, mode='training', lr=None):\n data = list(zip(*data))\n labels = data[-1]\n soft_labels = np.array([[1 if t == i else 0\n for i in range(self.args.num_class)] \\\n for t in labels])\n sig_labels = labels\n if(lr is None):\n lr = self.args.learn_rate\n feed_dict = {\n self.q1_inputs:data[self.imap['q1_inputs']],\n self.q2_inputs:data[self.imap['q2_inputs']],\n self.q1_len:data[self.imap['q1_len']],\n self.q2_len:data[self.imap['q2_len']],\n self.c1_inputs:data[self.imap['c1_inputs']],\n self.c2_inputs:data[self.imap['c2_inputs']],\n self.c1_len:data[self.imap['c1_len']],\n self.c2_len:data[self.imap['c2_len']],\n self.learn_rate:lr,\n self.batch_size:len(data[self.imap['q2_len']]),\n self.dropout:self.args.dropout,\n self.rnn_dropout:self.args.rnn_dropout,\n self.emb_dropout:self.args.emb_dropout,\n self.soft_labels:soft_labels,\n self.sig_labels:sig_labels\n }\n\n if self.args.implicit == 1:\n feed_dict[self.user_id] = data[self.imap['user_id']]\n feed_dict[self.item_id] = data[self.imap['item_id']]\n if (mode!='infer'):\n #feed_dict[self.gen_outputs] = self.imap['gen_outputs']\n feed_dict[self.gen_len] = data[self.imap['gen_len']]\n\n max_len = 0\n for i in range(len(feed_dict[self.gen_len])):\n if max_len < feed_dict[self.gen_len][i]:\n max_len = feed_dict[self.gen_len][i]\n\n padding_outputs = []\n for i in range(len(data[self.imap['gen_outputs']])):\n padding_outputs.append(pad_to_max(data[self.imap['gen_outputs']][i], max_len))\n feed_dict[self.gen_outputs] = padding_outputs\n #print (len(feed_dict[self.gen_outputs]), len(feed_dict[self.gen_outputs][0]))\n \n if(mode!='training'):\n feed_dict[self.dropout] = 1.0\n feed_dict[self.rnn_dropout] = 1.0\n feed_dict[self.emb_dropout] = 1.0\n return feed_dict",
"def extend_l2_policy_dict(self, session, result):\n pass",
"def extend_policy_classifier_dict(self, session, result):\n pass",
"def extend_port_dict(self, session, base_model, result):\n self._call_on_dict_driver(\"extend_port_dict\", session, base_model,\n result)",
"def _set_resnet_arg_scope(self):\n vs_initializer = tf.keras.initializers.VarianceScaling(2.0)\n l2_regularizer = tf.keras.regularizers.l2(self.config.GENERATOR_WEIGHT_DECAY)\n for layer in self.resnet50V2.layers:\n if isinstance(layer, layers.Conv2D):\n # original implementations slim `resnet_arg_scope` additionally sets\n # `normalizer_fn` and `normalizer_params` which in TF 2.0 need to be implemented\n # as own layers. This is not possible using keras ResNet50V2 application.\n # Nevertheless this is not needed as training seems to be likely stable.\n # See https://www.tensorflow.org/guide/migrate#a_note_on_slim_contriblayers for more\n # migration insights\n setattr(layer, 'padding', 'same')\n setattr(layer, 'kernel_initializer', vs_initializer)\n setattr(layer, 'kernel_regularizer', l2_regularizer)\n if isinstance(layer, layers.BatchNormalization):\n setattr(layer, 'momentum', 0.997)\n setattr(layer, 'epsilon', 1e-5)\n if isinstance(layer, layers.MaxPooling2D):\n setattr(layer, 'padding', 'same')",
"def build_feed_dict(self, input_frames, gt_output_frames, generator):\n feed_dict = {}\n batch_size = np.shape(gt_output_frames)[0]\n\n ##\n # Get generated frames from GeneratorModel\n ##\n\n g_feed_dict = {generator.input_frames_train: input_frames,\n generator.gt_frames_train: gt_output_frames}\n g_scale_preds = self.sess.run(generator.scale_preds_train, feed_dict=g_feed_dict)\n\n ##\n # Create discriminator feed dict\n ##\n for scale_num in xrange(self.num_scale_nets):\n scale_net = self.scale_nets[scale_num]\n\n # resize gt_output_frames\n scaled_gt_output_frames = np.empty([batch_size, scale_net.height, scale_net.width, 3])\n for i, img in enumerate(gt_output_frames):\n\t\t# for skimage.transform.resize, images need to be in range [0, 1], so normalize to\n # [0, 1] before resize and back to [-1, 1] after\n sknorm_img = (img / 2) + 0.5\n\n\n # https://github.com/dyelax/Adversarial_Video_Generation/issues/18\n sknorm_img = np.minimum(sknorm_img, 1)\n sknorm_img = np.maximum(sknorm_img, 0)\n\n\n\n resized_frame = resize(sknorm_img, [scale_net.height, scale_net.width, 3])\n scaled_gt_output_frames[i] = (resized_frame - 0.5) * 2\n\n # combine with resized gt_output_frames to get inputs for prediction\n scaled_input_frames = np.concatenate([g_scale_preds[scale_num],\n scaled_gt_output_frames])\n\n # convert to np array and add to feed_dict\n feed_dict[scale_net.input_frames] = scaled_input_frames\n\n # add labels for each image to feed_dict\n batch_size = np.shape(input_frames)[0]\n feed_dict[self.labels] = np.concatenate([np.zeros([batch_size, 1]),\n np.ones([batch_size, 1])])\n\n return feed_dict",
"def build_resnet50(self):\n use_batch_norm = self.use_batch_norm\n\n imgs = tf.placeholder(tf.float32, [self.batch_size]+self.img_shape)\n is_train = tf.placeholder(tf.bool)\n\n conv1_feats = convolution(imgs, 7, 7, 64, 2, 2, 'conv1')\n conv1_feats = batch_norm(conv1_feats, 'bn_conv1', is_train, use_batch_norm)\n conv1_feats = nonlinear(conv1_feats, 'relu')\n pool1_feats = max_pool(conv1_feats, 3, 3, 2, 2, 'pool1')\n\n res2a_feats = self.basic_block(pool1_feats, 'res2a', 'bn2a', is_train, use_batch_norm, 64, 1)\n res2b_feats = self.basic_block2(res2a_feats, 'res2b', 'bn2b', is_train, use_batch_norm, 64)\n res2c_feats = self.basic_block2(res2b_feats, 'res2c', 'bn2c', is_train, use_batch_norm, 64)\n \n res3a_feats = self.basic_block(res2c_feats, 'res3a', 'bn3a', is_train, use_batch_norm, 128)\n res3b_feats = self.basic_block2(res3a_feats, 'res3b', 'bn3b', is_train, use_batch_norm, 128)\n res3c_feats = self.basic_block2(res3b_feats, 'res3c', 'bn3c', is_train, use_batch_norm, 128)\n res3d_feats = self.basic_block2(res3c_feats, 'res3d', 'bn3d', is_train, use_batch_norm, 128)\n\n res4a_feats = self.basic_block(res3d_feats, 'res4a', 'bn4a', is_train, use_batch_norm, 256)\n res4b_feats = self.basic_block2(res4a_feats, 'res4b', 'bn4b', is_train, use_batch_norm, 256)\n res4c_feats = self.basic_block2(res4b_feats, 'res4c', 'bn4c', is_train, use_batch_norm, 256)\n res4d_feats = self.basic_block2(res4c_feats, 'res4d', 'bn4d', is_train, use_batch_norm, 256)\n res4e_feats = self.basic_block2(res4d_feats, 'res4e', 'bn4e', is_train, use_batch_norm, 256)\n res4f_feats = self.basic_block2(res4e_feats, 'res4f', 'bn4f', is_train, use_batch_norm, 256)\n\n res5a_feats = self.basic_block(res4f_feats, 'res5a', 'bn5a', is_train, use_batch_norm, 512)\n res5b_feats = self.basic_block2(res5a_feats, 'res5b', 'bn5b', is_train, use_batch_norm, 512)\n res5c_feats = self.basic_block2(res5b_feats, 'res5c', 'bn5c', is_train, use_batch_norm, 512)\n\n res5c_feats_flat = tf.reshape(res5c_feats, [self.batch_size, 49, 2048])\n self.conv_feats = res5c_feats_flat\n self.conv_feat_shape = [49, 2048]\n self.num_ctx = 49 \n self.dim_ctx = 2048\n\n self.imgs = imgs\n self.is_train = is_train",
"def extend_policy_target_dict(self, session, result):\n pass",
"def _extend_network_dict_profile(self, context, network):\n binding = n1kv_db_v2.get_network_binding(context.session,\n network['id'])\n network[n1kv.PROFILE_ID] = binding.profile_id",
"def _extend_network_dict_profile(self, context, network):\n binding = n1kv_db_v2.get_network_binding(context.session,\n network['id'])\n network[n1kv_profile.PROFILE_ID] = binding.profile_id",
"def on_predict_end(self, logs=None):",
"def extend_policy_rule_dict(self, session, result):\n pass",
"def extend_nat_pool_dict(self, session, result):\n pass",
"def _add_loss(self, return_dict, key, value):\n\t\treturn_dict['losses'][key] = value",
"def add_metrics_to_db(self) -> None:\n\n model = {\n 'id': 'model1',\n 'name': 'Housing Price Prediction',\n 'metrics': {\n 'mean_squared_error': mean_squared_error(self._y_test, self._predictions),\n 'mean_absolute_error': mean_absolute_error(self._y_test, self._predictions),\n 'r2_score': r2_score(self._y_test, self._predictions)\n }\n }\n\n self._db.add_model(model)"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
Load weights from a checkpoint file into the tensorflow graph.
|
def load_weights(self, checkpoint_path, sess=None):
if sess is None:
sess = tf.get_default_session()
assert sess is not None
saver = tf.train.Saver(self.variables_to_restore)
saver.restore(sess, checkpoint_path)
|
[
"def load(self):\n latest = tf.train.latest_checkpoint(self.checkpoint_dir)\n self.model.load_weights(latest)",
"def load_weights(model, checkpoint_path):\n # Your code here\n \n model.load_state_dict(torch.load(checkpoint_path))\n model.eval()",
"def load_weights(self, path=None):\n\n if path is None:\n path = self.checkpoints_dir\n\n self.model.load_weights(tf.train.latest_checkpoint(path))\n logging.info(f'\\tWeights loaded from {path}')",
"def load_weights(self, model_name: str, checkpoint: int, path: str = './models/'):\n path_to_model = path + model_name + '/checkpoint_' + str(checkpoint) + '/model_weights'\n self.model.load_weights(path_to_model)",
"def load_weights(self, filename):",
"def loadweights(self, filename):",
"def load_weights(self, filepath):\n self.model.load_weights(filepath)",
"def load_weights(self, weight_file):\r\n self.model.load_weights(weight_file)",
"def load_trained_weights(self, weights):\n\t\tself.model.load_weights(weights)\n\t\t#self.model = tf.keras.models.load_model(weights)\n\t\tprint('Weights from {} loaded successfully'.format(weights))",
"def load_network_weights(self, path_to_network):\n ckpt = tf.train.get_checkpoint_state(path_to_network)\n if ckpt and ckpt.model_checkpoint_path:\n self.saver.restore(self.sess, ckpt.model_checkpoint_path)\n self.weights_loaded = True\n else:\n self.weights_loaded = False",
"def load_model_weights(model, checkpoint, epoch='latest'):\n checkpoint_dir = checkpoint if os.path.basename(checkpoint) == 'weights' \\\n else os.path.join(checkpoint, 'weights') # checkpts in 'weights' dir\n checkpoint_path = CheckpointHandler.get_resume_ckpt_file(epoch, checkpoint_dir)\n CheckpointHandler.load_weights(checkpoint_path, model=model)",
"def load_weights(weight_path: str) -> Dict[str, Any]:\n if not gfile.exists(weight_path):\n raise ValueError('Matching checkpoint not found: {}'.format(weight_path))\n else:\n logging.info('Loading weights from %s', weight_path)\n with gfile.GFile(weight_path, 'rb') as fp:\n params = serialization.from_bytes(None, fp.read())\n return jax.tree_map(jnp.asarray, params)",
"def load_checkpoint(self, checkpoint_path=None):\n if checkpoint_path is None:\n checkpoint_path = self.get_latest_path()\n\n if os.path.isfile(checkpoint_path):\n key = 'cuda' if torch.cuda.is_available() else 'cpu'\n checkpoint = torch.load(checkpoint_path, map_location=key)\n self.network.load_state_dict(checkpoint['network'])\n self.network_target.load_state_dict(checkpoint['network_target'])\n self.optimizer.load_state_dict(checkpoint['optimizer'])\n\n print('checkpoint loaded at {}'.format(checkpoint_path))\n else:\n raise OSError(\"Checkpoint file not found.\")",
"def load_trained_weights(self, weights):\n self.model.load_weights(weights)\n print('Weights from {} loaded successfully'.format(weights))",
"def load_model_weights(self, filename):\n self.model.load_weights(filename)",
"def load_checkpoint(self, checkpoint_path: str, *args, **kwargs) -> Dict:\n return torch.load(checkpoint_path, *args, **kwargs)",
"def load_checkpoint(checkpoint_path, model, optimizer=None,\n model_key='model_state_dict', optimizer_key='optimizer_state_dict'):\n if not os.path.exists(checkpoint_path):\n raise IOError(f\"Checkpoint '{checkpoint_path}' does not exist\")\n\n state = torch.load(checkpoint_path, map_location='cpu')\n model.load_state_dict(state[model_key])\n\n if optimizer is not None:\n optimizer.load_state_dict(state[optimizer_key])\n\n return state",
"def _load_weights_to_model(self, model: nn.Module,\n checkpoint: Optional[dict],\n cfg: Optional[ConfigType]) -> None:\n if checkpoint is not None:\n _load_checkpoint_to_model(model, checkpoint)\n else:\n warnings.warn('Checkpoint is not loaded, and the inference '\n 'result is calculated by the randomly initialized '\n 'model!')",
"def _load_checkpoint(self):\n\t\tself.saver = tf.train.Saver(max_to_keep=5)\n\t\tcheckpoint = tf.train.get_checkpoint_state(self.checkpoint_dir)\n\n\t\t# If checkpoint exists and is reachable, load checkpoint state into 'sess'\n\t\tif checkpoint and checkpoint.model_checkpoint_path:\n\t\t\t\tself.saver.restore(self.sess, checkpoint.model_checkpoint_path)\n\t\t\t\tprint('loaded checkpoint: {}'.format(\n\t\t\t\t\tcheckpoint.model_checkpoint_path))\n\t\telse:\n\t\t\t\tprint(\n\t\t\t\t\t\t'Could not find old checkpoint. '\n\t\t\t\t\t\t'Creating new checkpoint directory.'\n\t\t\t\t)\n\t\t\t\tif not os.path.exists(self.checkpoint_dir):\n\t\t\t\t\t\tos.mkdir(self.checkpoint_dir)"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
Load weights from a checkpoint file into the tensorflow graph.
|
def load_weights(self, checkpoint_path, sess=None):
if sess is None:
sess = tf.get_default_session()
assert sess is not None
saver = tf.train.Saver(self.variables_to_restore)
saver.restore(sess, checkpoint_path)
|
[
"def load(self):\n latest = tf.train.latest_checkpoint(self.checkpoint_dir)\n self.model.load_weights(latest)",
"def load_weights(model, checkpoint_path):\n # Your code here\n \n model.load_state_dict(torch.load(checkpoint_path))\n model.eval()",
"def load_weights(self, path=None):\n\n if path is None:\n path = self.checkpoints_dir\n\n self.model.load_weights(tf.train.latest_checkpoint(path))\n logging.info(f'\\tWeights loaded from {path}')",
"def load_weights(self, model_name: str, checkpoint: int, path: str = './models/'):\n path_to_model = path + model_name + '/checkpoint_' + str(checkpoint) + '/model_weights'\n self.model.load_weights(path_to_model)",
"def load_weights(self, filename):",
"def loadweights(self, filename):",
"def load_weights(self, filepath):\n self.model.load_weights(filepath)",
"def load_weights(self, weight_file):\r\n self.model.load_weights(weight_file)",
"def load_trained_weights(self, weights):\n\t\tself.model.load_weights(weights)\n\t\t#self.model = tf.keras.models.load_model(weights)\n\t\tprint('Weights from {} loaded successfully'.format(weights))",
"def load_network_weights(self, path_to_network):\n ckpt = tf.train.get_checkpoint_state(path_to_network)\n if ckpt and ckpt.model_checkpoint_path:\n self.saver.restore(self.sess, ckpt.model_checkpoint_path)\n self.weights_loaded = True\n else:\n self.weights_loaded = False",
"def load_model_weights(model, checkpoint, epoch='latest'):\n checkpoint_dir = checkpoint if os.path.basename(checkpoint) == 'weights' \\\n else os.path.join(checkpoint, 'weights') # checkpts in 'weights' dir\n checkpoint_path = CheckpointHandler.get_resume_ckpt_file(epoch, checkpoint_dir)\n CheckpointHandler.load_weights(checkpoint_path, model=model)",
"def load_weights(weight_path: str) -> Dict[str, Any]:\n if not gfile.exists(weight_path):\n raise ValueError('Matching checkpoint not found: {}'.format(weight_path))\n else:\n logging.info('Loading weights from %s', weight_path)\n with gfile.GFile(weight_path, 'rb') as fp:\n params = serialization.from_bytes(None, fp.read())\n return jax.tree_map(jnp.asarray, params)",
"def load_checkpoint(self, checkpoint_path=None):\n if checkpoint_path is None:\n checkpoint_path = self.get_latest_path()\n\n if os.path.isfile(checkpoint_path):\n key = 'cuda' if torch.cuda.is_available() else 'cpu'\n checkpoint = torch.load(checkpoint_path, map_location=key)\n self.network.load_state_dict(checkpoint['network'])\n self.network_target.load_state_dict(checkpoint['network_target'])\n self.optimizer.load_state_dict(checkpoint['optimizer'])\n\n print('checkpoint loaded at {}'.format(checkpoint_path))\n else:\n raise OSError(\"Checkpoint file not found.\")",
"def load_trained_weights(self, weights):\n self.model.load_weights(weights)\n print('Weights from {} loaded successfully'.format(weights))",
"def load_model_weights(self, filename):\n self.model.load_weights(filename)",
"def load_checkpoint(self, checkpoint_path: str, *args, **kwargs) -> Dict:\n return torch.load(checkpoint_path, *args, **kwargs)",
"def load_checkpoint(checkpoint_path, model, optimizer=None,\n model_key='model_state_dict', optimizer_key='optimizer_state_dict'):\n if not os.path.exists(checkpoint_path):\n raise IOError(f\"Checkpoint '{checkpoint_path}' does not exist\")\n\n state = torch.load(checkpoint_path, map_location='cpu')\n model.load_state_dict(state[model_key])\n\n if optimizer is not None:\n optimizer.load_state_dict(state[optimizer_key])\n\n return state",
"def _load_weights_to_model(self, model: nn.Module,\n checkpoint: Optional[dict],\n cfg: Optional[ConfigType]) -> None:\n if checkpoint is not None:\n _load_checkpoint_to_model(model, checkpoint)\n else:\n warnings.warn('Checkpoint is not loaded, and the inference '\n 'result is calculated by the randomly initialized '\n 'model!')",
"def _load_checkpoint(self):\n\t\tself.saver = tf.train.Saver(max_to_keep=5)\n\t\tcheckpoint = tf.train.get_checkpoint_state(self.checkpoint_dir)\n\n\t\t# If checkpoint exists and is reachable, load checkpoint state into 'sess'\n\t\tif checkpoint and checkpoint.model_checkpoint_path:\n\t\t\t\tself.saver.restore(self.sess, checkpoint.model_checkpoint_path)\n\t\t\t\tprint('loaded checkpoint: {}'.format(\n\t\t\t\t\tcheckpoint.model_checkpoint_path))\n\t\telse:\n\t\t\t\tprint(\n\t\t\t\t\t\t'Could not find old checkpoint. '\n\t\t\t\t\t\t'Creating new checkpoint directory.'\n\t\t\t\t)\n\t\t\t\tif not os.path.exists(self.checkpoint_dir):\n\t\t\t\t\t\tos.mkdir(self.checkpoint_dir)"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
Warn about unused static variables.
|
def _find_unused_static_warnings(filename, lines, ast_list):
static_declarations = {
node.name: node
for node in ast_list
if (isinstance(node, ast.VariableDeclaration) and
'static' in node.type.modifiers)
}
def find_variables_use(body):
for child in body:
if child.name in static_declarations:
static_use_counts[child.name] += 1
static_use_counts = collections.Counter()
for node in ast_list:
if isinstance(node, ast.Function) and node.body:
find_variables_use(node.body)
elif isinstance(node, ast.Class) and node.body:
for child in node.body:
if isinstance(child, ast.Function) and child.body:
find_variables_use(child.body)
for name in sorted(static_declarations):
if not static_use_counts[name]:
print("{}:{}: unused variable '{}'".format(
filename,
lines.get_line_number(static_declarations[name].start),
name))
|
[
"def unusedVars(self):\n fullcode = self.code_cfg\n variables = set([x[1:] for x in codeconfig_getvars(fullcode)])\n exceptions = set(['complexity', 'code_cfg'])\n clsvars = set(vars(self).keys())\n nones = set(filter(lambda x: self.__dict__[x] is None, clsvars))\n nones = nones.union(set(filter(lambda x: str(self.__dict__[x]) == \"\", clsvars)))\n unused = clsvars - variables - exceptions - nones\n return unused",
"def check_for_unused_names(self):\n for s in self.unused_names:\n self.warning(\"'%s' is unused.\"%s)\n\n# warns for param that specified with -c (but also if name gets defined in __main__,\n# e.g. by default_density=global_params.default_density in a script file\n## for name in self.params():\n## if name in self.context:\n## self.warning(\"'%s' still exists in global_params.context\"%name)\n\n # detect duplicate param value that wasn't used (e.g. specified with after script)\n for name,val in self.params().items():\n if name in self.context:\n if self.context[name]!=self.inspect_value(name):\n self.warning(\"'%s=%s' is unused.\"%(name,self.context[name]))",
"def test_unusedVariable(self):\r\n self.flakes('''\r\n def a():\r\n b = 1\r\n ''', m.UnusedVariable)",
"def checkUnusedAssignments():\r\n for name, binding in self.scope.unusedAssignments():\r\n self.report(messages.UnusedVariable, binding.source, name)",
"def test_warningSuppressed(self):\r\n self.flakes('''\r\n import foo\r\n __all__ = [\"foo\"]\r\n ''')",
"def test_ignoredInFunction(self):\n self.flakes('''\n def foo():\n import bar\n __all__ = [\"bar\"]\n ''', m.UnusedImport, m.UnusedVariable)",
"def init_warnings():\n warnings.simplefilter(\"ignore\", category=AstropyWarning)",
"def test_fail_priv_var_used(self): # suppress(no-self-use)\n script = \"message (${_VALUE})\\n\"\n with ExpectedException(LinterFailure):\n run_linter_throw(script, whitelist=[\"access/private_var\"])",
"def _suppress_warnings():\n import warnings\n import sys\n import os\n if os.path.basename(sys.argv[0]) != \"trial\":\n warnings.simplefilter(\"ignore\")",
"def _check_unused_parameters(self):\n all_params = set(self.parameters.keys())\n processed_params = set(self.processed_parameters)\n unused_params = all_params - processed_params - RESERVED_ARGS\n\n if unused_params:\n self.log.warning(\"The following parameters were ignored: %s\",\n ', '.join(sorted(unused_params)))",
"def test_ignoredInClass(self):\n self.flakes('''\n import bar\n class foo:\n __all__ = [\"bar\"]\n ''', m.UnusedImport)",
"def test_instances(self):\n\n @deprecate(bar=\"use baz instead\")\n def foo(bar=None, baz=None):\n pass\n\n @deprecate(baz=\"use bar instead\")\n def food(bar=None, baz=None):\n pass\n\n with warnings.catch_warnings(record=True) as w:\n foo(bar=True)\n food(baz=True)\n self.assertEqual(len(w), 2, \"Not all warnings preserved.\")",
"def test_unusedVariableAsLocals(self):\r\n self.flakes('''\r\n def a():\r\n b = 1\r\n return locals()\r\n ''')",
"def filter_warnings():\n warnings.simplefilter(\"ignore\", category=UserWarning)\n warnings.simplefilter(\"ignore\", category=LightningDeprecationWarning)",
"def log_unused(self, error=True):\n have_unused = False\n log = get_logger().error if error else get_logger().info\n for name in self._all_names:\n current_set = getattr(self, name, None)\n if current_set:\n log('Unused from %s: %s', name.upper(), current_set)\n have_unused = True\n return have_unused",
"def test_unusedImport_underscore(self):\n self.flakes('import fu as _', m.UnusedImport)",
"def test_pyflakesWarning(self):\r\n sourcePath = self.makeTempFile(\"import foo\")\r\n count, errors = self.getErrors(sourcePath)\r\n self.assertEqual(count, 1)\r\n self.assertEqual(\r\n errors, [('flake', str(UnusedImport(sourcePath, Node(1), 'foo')))])",
"def test_magicGlobalsName(self):\r\n self.flakes('__name__')",
"def get_vars_noload(self):\n pass"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
Return the parsed contents of the config file.
|
def get_config():
return json.loads(CONFIG_FILE.read_text())
|
[
"def read(self):\n if self.default_file:\n self.read_default_config()\n return self.read_config_files(self.all_config_files())",
"def read_config():\n\n\tfilename = \"config.json\"\n\n\tfile_object = open(filename, \"r\")\n\n\treturn json.loads(file_object.read())",
"def _get_config(self, unit, filename):\n file_contents = unit.file_contents(filename)\n config = ConfigParser.ConfigParser()\n config.readfp(io.StringIO(file_contents))\n return config",
"def parse_config(self):\n # TODO: parse config file\n pass",
"def read_config():\n config = cp.ConfigParser()\n config.read(\"config.ini\")\n return config",
"def get_config_data(self, config_file):\n config_file = Path(config_file)\n return YamlManger(config_file).get()",
"def read_config(self):\n now = timestamp()\n with open(self.path) as config:\n data = config.read()\n return data, now",
"def read_config():\n\n content = None\n\n baseDir = Util.get_base_directory()\n configDir = baseDir + '/resources/config.json'\n\n try:\n with open(configDir) as json_file:\n content = json.load(json_file)\n\n except IOError:\n Util.write('Config nicht gefunden unter dem Dateipfad {configDir}\\n')\n\n return content",
"def _read_config_file(self):\r\n\r\n try:\r\n with open(self.config, 'r') as f:\r\n config_data = json.load(f)\r\n except FileNotFoundError:\r\n config_data = {}\r\n\r\n return config_data",
"def read_config(file_name):\n return json.loads(open(file_name).read())",
"def get_config():\n handle = open(\"config.json\", \"r\")\n raw_json = handle.read()\n handle.close()\n return json.loads(raw_json)",
"def _config_read():\n bmcConfig = ConfigParser.RawConfigParser()\n configFile = os.path.join(os.path.dirname(__file__), 'op_ci_tools.cfg')\n bmcConfig.read(configFile)\n return dict(bmcConfig.items('bmc')), dict(bmcConfig.items('test')), dict(bmcConfig.items('host'))",
"def readConfig():\n hosts = []\n domains = []\n with open(\"./host.conf\", \"r\") as fd:\n for line in fd.readlines():\n line = line.strip().split()\n if line != []:\n # Parse config for zone files and hosts\n if line[0] == \"ZONE_FILE:\":\n zoneFile = line[1]\n if line[0] == \"REVERSE_ZONE_FILE:\":\n reverseZoneFile = line[1]\n if line[0] == \"HOST:\":\n hosts.append((line[1], line[2], line[3]))\n if line[0] == \"DOMAIN:\":\n domains.append((line[1], line[2], line[3]))\n\n return zoneFile, reverseZoneFile, hosts, domains",
"def read_settings():\n settings_path = join(dirname(dirname(__file__)), '.settings')\n filename = settings_path\n settings = configparser.ConfigParser()\n settings.read(filename)\n return settings",
"def load_config():\n config = ConfigParser()\n config.read(os.path.join(os.path.dirname(__file__), 'config.ini'))\n return config",
"def read_config():\n with open(\"config.yml\", \"r\") as ymlfile:\n config = yaml.safe_load(ymlfile)\n \n return config",
"def get_config_file_content(self):\n\n config_content: List[str] = [\n 'server {',\n\t ' listen {};'.format(self.port),\n '',\n ' ##',\n ' # PHP-FPM',\n ' ##',\n ' #location ~ \\.php$ {',\n \t ' #include /etc/nginx/fastcgi_params;',\n\t\t ' #root /var/www/src;',\n ' #fastcgi_split_path_info ^(.+?\\.php)(/.*)$;',\n ' #fastcgi_pass\tphpfpm:3002;',\n\t\t ' #fastcgi_param SCRIPT_FILENAME $document_root/$fastcgi_script_name;',\n ' #}',\n '',\n ' location / {',\n\t\t ' root /var/www/src;',\n ' index index.html;'\n\t\t ' #index index.php;',\n\t\t ' #rewrite ^ /index.php?$args last; break;',\n\t ' }',\n '}'\n ]\n return config_content",
"def get_config(self, file_path=None):\n if file_path == None:\n json_config = sys.stdin.read()\n else:\n config_file = open(file_path)\n json_config = config_file.readlines()\n config_file.close()\n\n config = json.loads(json_config)\n\n return config",
"def read_config_file(filename):\n\n # Read the config file\n toml_data = open(filename).read()\n\n # Load the definitions in the config file\n data = toml.loads(toml_data)\n\n return data"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
seed users. by defualt set to 5 users
|
def seed_User(number=5, overwrite=False):
if overwrite:
print('Overwriting all users')
User.objects.all().delete()
count = 0
for i in range(number):
username = fake.first_name()
User.objects.create_user(
email=username + "@blogmail.com",
password="vns12345",
name=username,
date_joined=datetime.datetime.now(),
is_active=1,
is_superadmin=0,
avatar='',
is_staff=1
)
count += 1
percent_complete = count / number * 100
print(
"Adding {} new Users: {:.2f}%".format(
number, percent_complete),
end='\r',
flush=True
)
print()
|
[
"def populate(self, nbUsers):\n users = []\n f = faker.Faker()\n\n for i in range(nbUsers):\n user, addr = self.create_user(f.name(), f.address())\n users.append(user)\n\n self.session.add_all(users)\n self.session.commit()",
"def generate_users(count=10):\n for i in range(count):\n user = generate_random_user()\n db.session.add(user)\n db.session.commit()",
"def _create_users(self, number_users: int) -> None:\n for i in range(1, number_users + 1):\n self.register_user(\n \"user%d\" % i,\n \"pass%d\" % i,\n admin=False,\n displayname=\"Name %d\" % i,\n )",
"def create_users(self):\n self.test_runner.run_users_create()",
"def create_users(self, count):\n\n users = []\n\n for i in range(1, count+1):\n user = User(f'user{i}', '', 'password')\n user.save()\n users.append(user)\n\n return users",
"def populate(N=5):\n for entry in range(N):\n # Create the fake data for the entry\n fake_name = fakegen.name().split()\n fake_first_name = fake_name[0]\n fake_last_name = fake_name[1]\n fake_email = fakegen.email()\n\n # Create the new User entry\n user = User.objects.get_or_create(first_name=fake_first_name, last_name=fake_last_name, email=fake_email)[0]",
"def generate_users(self, num_users=200):\n if num_users < 2:\n raise Exception('num_users should be more than 2')\n\n self.user_list = [] # initialize the self.user_list\n for x in range(num_users):\n user, created = User.objects.get_or_create( username='user%s'%str(x))\n self.user_list.append( user )\n user.set_password('test')\n # add 10000 to their cash account\n ct = CashTransaction( user )\n ct.deposit(10000.00) # add funds\n\n return self.user_list # return it",
"def insert_default_users():\n user1 = User(email=current_app.config['ADMIN_EMAIL'],\n password=current_app.config['ADMIN_PW'],\n first_name=current_app.config['ADMIN_FIRST_NAME'],\n last_name=current_app.config['ADMIN_LAST_NAME'],\n confirmed=True)\n user1.role = Role.query.filter_by(name='Administrator').first()\n db.session.add(user1)\n\n user2 = User(email=current_app.config['USERMANAGER_EMAIL'],\n password=current_app.config['USERMANAGER_PW'],\n first_name=current_app.config['USERMANAGER_FIRST_NAME'],\n last_name=current_app.config['USERMANAGER_LAST_NAME'],\n confirmed=True)\n user2.role = Role.query.filter_by(name='Usermanager').first()\n db.session.add(user2)\n\n user3 = User(email=current_app.config['USER_EMAIL'],\n password=current_app.config['USER_PW'],\n first_name=current_app.config['USER_FIRST_NAME'],\n last_name=current_app.config['USER_LAST_NAME'],\n confirmed=True)\n user3.role = Role.query.filter_by(name='User').first()\n db.session.add(user3)\n\n db.session.commit()",
"def add_users():\n tokens_initialize.create_all()",
"def _create_users(self):\r\n users = []\r\n for i in range(8):\r\n username = \"user{}\".format(i)\r\n email = \"test+user{}@edx.org\".format(i)\r\n user = User.objects.create_user(username, email, 'foo')\r\n user.is_active = True\r\n user.save()\r\n users.append(user)\r\n return users",
"def user_batch():\n return [\n UserFactory(roles=RoleFactory.create_batch(randint(0, 3)))\n for _ in range(randint(3, 5))\n ]",
"def load_users():\n\n \n\n User.query.delete()\n\n with open(\"seed_data/seed_users.psv\") as users:\n for row in users:\n username, fname, lname, email, password, user_role = row.strip().split(\"|\")\n\n user = User(username=username,\n fname=fname,\n lname=lname,\n email=email,\n password=generate_password_hash(password),\n user_role=user_role)\n\n db.session.add(user)\n\n db.session.commit()",
"def simulate(self, num_users,seed = random.randint(0,1000000)):\n ## Sets the random seed (deterministic if specified as arg)\n random.seed(seed)\n\n ## Calculates the set of unused ids\n possible_ids = set(range(9000000))\n pre_existing_ids = set([user.user_id for user in self.users])\n unused_ids = list(possible_ids - pre_existing_ids)\n\n ## Draws a set of non-overlapping unique ids for\n ## the simulated users\n user_ids = random.sample(unused_ids,num_users)\n\n ## Simulates a user for each user_id, and adds\n ## to the set of users\n for user_id in user_ids:\n self.users.add(self.__simulate_one(user_id))",
"def load_users():\n\n print \"Users\"\n\n User.query.delete()\n\n for row in open(\"seed_data/u.user\"):\n row = row.rstrip()\n ID, password, name, first_entry_at = row.split(\"|\")\n first_entry_at = datetime.strptime(first_entry_at, \"%m-%d-%y\")\n\n user = User(ID=ID, password=password, name=name, first_entry_at=first_entry_at)\n\n db.session.add(user)\n\n db.session.commit()",
"def _users_create(self, tenant, users_per_tenant, name_length=10):\n for i in range(users_per_tenant):\n name = self._generate_random_name(length=name_length)\n password = name\n email = (name + \"@rally.me\")\n self.admin_clients(\"keystone\").users.create(name, password, email,\n tenant_id=tenant.id)",
"def seed_users(project_env, runlevel):\n\n db_client_maker = core_db.get_nest_users_sqla_maker()\n md = nest_db.get_global_sqlalchemy_metadata()\n engine = nest_db.get_global_sqlalchemy_engine()\n #note this is a tablelike client, not a NestUser client\n db_client = db_client_maker.get_db_client(engine, md)\n\n #needs a unique *instance* of system_user to act as 'owner' \n #as we will alter the instance that we add to the table\n db_client.set_requesting_user(core_db.get_system_user())\n\n user_configs = nest_config.generate_seed_users(project_env, runlevel)\n \n success = _add_users_from_configs(db_client, user_configs)\n return success",
"def createUsers():\n\n # create superuser\n superuser = User.objects.create_superuser(username='admin', email='instantmusicapp+admin@gmail.com', password='admin')\n print(f'Created superuser {superuser}')\n\n # create normal users\n for user_params in ['user', 'user2', 'user3']:\n friends = list(User.objects.all()) # to avoid being friends with yourself\n user = User.objects.create_user(username=user_params, email=f'instantmusicapp+{user_params}@gmail.com', password=user_params)\n user.friends.set(friends)\n user.pause_song = getRandomObject(Song)\n user.pause_second = randint(1, user.pause_song.duration - 1)\n user.albums.set(getRandomObject(Album, randint(1, 5)))\n print('Created normal user:', user)",
"def create_users(num):\n users = []\n for person in range(1, num):\n user = User(\n \tusername=fake.first_name(),\n \temail=fake.safe_email(),\n \tpassword='password'\n \t)\n users.append(user)\n try:\n User.objects.bulk_create(users)\n except IntegrityError:\n \tprint 'One of the usernames already exists'\n\n return User.objects.all().order_by('-id')[:num-1]",
"def _create_and_enroll_users(self, count):\n users = []\n for _ in range(count):\n user = UserFactory()\n CourseEnrollmentFactory.create(user=user, course_id=self.course.id)\n users.append(user)\n return users"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
set_score increments the score by change can be negative
|
def set_score(self, change):
self._score = self._score + change
|
[
"def set_score(self, score: float):\n self.score = score",
"def set_score(self, score):\n self._score = score",
"def update_score():\n pass",
"def increase_score(self, score):\r\n self.score += score",
"def set_score(self, score):\n self.score_function = score",
"def score(self, score):\n\n self._score = score",
"def increase_score(self):\n self.score += 10",
"def increaseScore(self):\n self.score = self.score+1",
"def updateScore(score):\n return score + 1",
"def increase_score(self):\n self.score += 1",
"def increment_score(self):\n self.score += 1",
"def change_score(self, change: float=1):\n self._score += change",
"def change_score(self, change: float = 1):\n self._score += change",
"def _change_score(self, score):\n self.score = score\n\n level = (self.score // 10) + 1\n if level != self.level:\n self.level = level\n print('Level: {}'.format(level))\n\n for level_change_callback in self._on_level_change:\n level_change_callback(level)\n\n for score_change_callback in self.on_score_change:\n score_change_callback(score)",
"def _set_score(self, new_score):\n if not isinstance(new_score, int):\n raise ValueError('Score can only be an integer')\n if self._score > new_score:\n raise ValueError(f'New score ({new_score}) must be higher than previous score ({self._score})')\n self._score = new_score\n belt = self._get_belt(new_score)\n if self._last_earned_belt is None or self._last_earned_belt_number < belt:\n self._last_earned_belt_number = belt\n print(f'Congrats, you earned {new_score} points obtaining the PyBites Ninja '\n f'{self._last_earned_belt.title()} Belt')\n else:\n print(f'Set new score to {new_score}')",
"def increase_score(self, increase):\n if increase > 0:\n self.__score += increase",
"def update_score(self, amount=1):\n self.score = (self.score + amount)",
"def adjust_score(self):\n self.score += game.temporary_score",
"def decrement_score(self):\n if self.score > 0:\n self.score -= 1\n self.parent_post.decrement_total_score()"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
move_ray this is the primary function which is responsible for recursively moving a ray. Although it primarily look after the action of the Ray.Ray class it lives in the Game instance itself. THIS IS HOW WE DETERMINE THE EXIT POINT OF ALL RAYS HORIZONTAL, VERTICAL, OR WITH DETOURS
|
def move_ray(self, ray):
# look to the next spot in the ray's trajectory
next_coordinates = ray.get_next_location()
next_location = self._board.get_board_square(next_coordinates)
# check for a collisition - return if it occurs
if ray.check_for_collision(next_location):
return
# if we didn't collide as we moved we need to look to check our
# diagonals for atoms
ccw_diag_coordinates, cw_diag_coordinates = ray.get_diagonals()
ccw_diagonal = self._board.get_board_square(ccw_diag_coordinates)
cw_diagonal = self._board.get_board_square(cw_diag_coordinates)
if ccw_diagonal.is_atom() or cw_diagonal.is_atom():
# If we're on our first move and the immediately diagonals contain an atom we have a reflection
if ray.get_current_location() == ray.get_origin_location():
terminal_square = self._board.get_board_square(
ray.get_current_location())
# let's the ray know it's finished and the square that it's an endpoint
# self.end_ray(ray, terminal_square)
return ray.record_edge_collision(terminal_square)
# otherwise they cause a bend in the path
else:
# we have to calculate our trajectory based on the pull
# of the atoms in our path
ray.recalculate_trajectory(ccw_diagonal, cw_diagonal)
# get the coordinates of the next location in our new trajectory
next_coordinates = ray.get_next_location()
# determine the next coordinate will result in a collision - return if it would
if ray.check_for_collision(
self._board.get_board_square(next_coordinates)):
return
# move the ray to the next step forward in its current trajectory
ray.set_current_location(next_coordinates)
# finally, recursively call our current function from the next step in its path.
self.move_ray(ray)
|
[
"def shoot_ray(self, row, column):\n # check if row/column is an allowed entry point\n if (row, column) not in self._allowed_entry_points:\n return False\n\n # add entry to entry/exit point list and deduct point if entry hasn't already been used\n if (row, column) not in self._entry_and_exit_points:\n self._entry_and_exit_points.append((row, column))\n self._score -= 1\n\n # determine which direction ray will initially be traveling\n direction = self.check_direction(row, column)\n\n # Check the reflection edge case where atom is along a border and ray is shot\n # right next to said atom\n if self.check_edge_case_reflection(row, column, direction):\n return (row, column)\n\n # If we haven't hit a return statement then follow the rest of the ray's path\n # Call the correct method based on the initial direction that the ray is traveling\n # Exit point will hold tuple of exit point or None if a hit occurred\n if direction == 'right':\n exit_point = self.travel_right(row, column)\n elif direction == 'left':\n exit_point = self.travel_left(row, column)\n elif direction == 'up':\n exit_point = self.travel_up(row, column)\n elif direction == 'down':\n exit_point = self.travel_down(row, column)\n\n # If exit point was not a hit and isn't already an entry/exit point, subtract 1\n # from the score and add exit_point to entry/exit point list\n if exit_point is not None and exit_point not in self._entry_and_exit_points:\n self._entry_and_exit_points.append(exit_point)\n self._score -= 1\n return exit_point",
"def _trace_ray(self, start, piece, ray):\n res_moves = []\n\n for end in ray:\n\n sym = piece.lower()\n del_x = abs(end - start) % 8\n move = [Game.i2xy(start) + Game.i2xy(end)]\n tgt_owner = self.board.get_owner(end)\n\n # Abort if the current player owns the piece at the end point\n if tgt_owner == self.state.player:\n break\n\n # Test castling exception for king\n if sym == 'k' and del_x == 2:\n gap_owner = self.board.get_owner((start + end) // 2)\n out_owner = self.board.get_owner(end - 1)\n rights = {62: 'K', 58: 'Q', 6: 'k', 2: 'q'}.get(end, ' ')\n if (tgt_owner or gap_owner or rights not in self.state.rights\n or (rights.lower() == 'q' and out_owner)):\n # Abort castling because missing castling rights\n # or piece in the way\n break\n\n if sym == 'p':\n # Pawns cannot move forward to a non-empty square\n if del_x == 0 and tgt_owner:\n break\n\n # Test en passant exception for pawn\n elif del_x != 0 and not tgt_owner:\n ep_coords = self.state.en_passant\n if ep_coords == '-' or end != Game.xy2i(ep_coords):\n break\n\n # Pawn promotions should list all possible promotions\n if (end < 8 or end > 55):\n move = [move[0] + s for s in ['b', 'n', 'r', 'q']]\n\n res_moves.extend(move)\n\n # break after capturing an enemy piece\n if tgt_owner:\n break\n\n return res_moves",
"def propagate_ray(self, ray):\n \n kmag = ray._k / np.linalg.norm(ray._k)\n self.intercept(ray)\n \n if self._intersection == None:\n raise Exception(\"Ray Terminated; does not intersect with lens.\")\n \n else:\n if self._curvature == 0:\n normal = np.array([0,0,1])\n \n elif self._curvature > 0:\n normal = self._intersection - self._centreofcurv\n \n else:\n normal = self._centreofcurv - self._intersection\n \n nhat = normal / np.linalg.norm(normal)\n \n newdirection = self.refraction(kmag, nhat, self._n1, self._n2)\n \n if newdirection == None:\n raise Exception(\"Invalid refracted ray direction\")\n \n else:\n ray.append(self._intersection, newdirection)",
"def shoot_ray(self, row, column):\n\n # check if ray is being shot from corner square\n if (row == 0 or row == 9) and (column == 0 or column == 9):\n return False\n\n # check if ray is being shot from non-border square\n if row in range(1, 9) and column in range(1, 9):\n return False\n\n if self._ray_color is None:\n self._ray_color = 0\n else:\n self._ray_color += 1\n\n self.adjust_score(row, column, self._ray_color) # adjust score for entry ray position\n self._ray_status = 'Play' # set flag variable for ray status\n\n if column == 0 or column == 9: # if shooting from horizontal position\n if column == 0: # if ray is moving to the right\n self.horiz_move_right(row, column)\n elif column == 9: # if ray is moving to the left\n self.horiz_move_left(row, column)\n\n if row == 0 or row == 9: # if shooting from vertical position\n if row == 0: # if ray is moving down\n self.vert_move_down(row, column)\n elif row == 9: # if ray is moving up\n self.vert_move_up(row, column)\n\n if self._ray_status == \"Hit\": # if ray hits an atom\n return None\n\n elif self._ray_status == \"Exit\": # if ray is a miss\n self.adjust_score(self._ray_row, self._ray_column, self._ray_color) # adjust score with exit ray position\n return self._ray_row, self._ray_column",
"def follow(ray: Ray, scene: Scene, max_iters=1000, renderer=None) -> [Tuple[Ray, Decision]]:\n path = [(ray, Decision.EMIT)]\n idx = 0\n last_ray = ray\n while ray.is_alive:\n intersections = scene.intersections(ray.position, ray.direction)\n points, nodes = zip(*[(x.point, x.hit) for x in intersections])\n for ray, decision in step(ray, points, nodes, renderer=renderer):\n path.append((ray, decision))\n if points_equal(ray.position, last_ray.position) and np.allclose(ray.direction, last_ray.direction):\n raise TraceError(\"Ray did not move.\")\n last_ray = ray\n if idx > max_iters:\n raise TraceError(\"Ray got stuck.\")\n return path",
"def propagate_ray(self, ray):\n \t\traise NotImplementedError()",
"def horiz_move_left(self, ray_path_r, ray_path_c):\n\n self._ray_row = ray_path_r\n self._ray_column = ray_path_c\n\n while self._ray_status == \"Play\": # continue ray path determination while still in \"play\"\n\n # check for edge case reflection\n if ray_path_c == 9 and self._ray_column == 9:\n if (self._board[ray_path_r+1][ray_path_c-1]) or (self._board[ray_path_r-1][ray_path_c-1]) == 'A':\n self._ray_column +=1\n self._ray_status = \"Exit\"\n\n self._ray_column -= 1 # move ray path one square to the left\n\n # check for exit\n if self._ray_column == 0:\n self._ray_status = \"Exit\"\n\n # check for hit\n elif self._board[self._ray_row][self._ray_column] == 'A': # if atom is in next ray path square\n self._ray_status = \"Hit\"\n\n # check for reflection\n elif (self._board[self._ray_row+1][self._ray_column]) and (self._board[self._ray_row-1][self._ray_column]) == 'A':\n self._ray_column += 1\n self.horiz_move_right(self._ray_row, self._ray_column)\n\n # check for detour (change to 'up' direction)\n elif self._board[self._ray_row + 1][self._ray_column] == 'A':\n self._ray_column +=1\n self.vert_move_up(self._ray_row, self._ray_column)\n\n # check for detour (change to 'down' direction)\n elif self._board[self._ray_row-1][self._ray_column] == 'A':\n self._ray_column +=1\n self.vert_move_down(self._ray_row, self._ray_column)",
"def shoot_ray(self, origin_row, origin_column):\n\n # get the the square object at row x column\n origin = self._board.get_board_square((origin_row, origin_column))\n\n # check that it is a valid \"edge\" to send a ray from\n origin_check = origin.is_edge()\n\n # if it's not then return false\n if origin_check == False:\n return False\n\n # if we pass the origin check create shoot a new Ray.Ray object from row x column\n new_ray = Ray.Ray(origin_row, origin_column)\n\n # let the square we shot from know its an orign square\n origin.set_originating_ray(new_ray)\n # Deduct 1 from the score since we now have on exit point\n self.set_score(-1)\n\n # while the ray object has a direction (will be set to none when it reaches an endpoint)\n # send it to the helper function that will move it\n while new_ray.get_direction() != None:\n self.move_ray(new_ray)\n\n # if we hit an exit point (other than through reflection) deduct the point for that\n terminus = new_ray.get_terminal_location()\n # check the the terminal point is an edge (hitting an atom returns none as terminus)\n\n if terminus != None:\n # check that the terminus is not a reflection, which shouldn't be counted twice\n terminal_square = self._board.get_board_square(terminus)\n terminal_square.set_terminating_ray(new_ray)\n if terminus != (origin_row, origin_column):\n self.set_score(-1)\n\n return terminus",
"def move_to_exit(self, time_move=0.25):\n \n #While the agent is not on the exit, we keep going through the labyrinth\n while self.agent_node.labyrinth_position != self.exit_point.labyrinth_position:\n\n #We use breadth first search to create the tree with the distance of every node from the agent position\n self.breadth_first_search()\n node_to_move_on = self.find_node_to_move_on(self.exit_point)\n self.update_statistics_after_move(node_to_move_on)\n self.set_datas_after_move(node_to_move_on)\n\n #We clear the terminal to print the labyrinth with the new position of the agent\n clear = \"cls\" if platform.system() == \"Windows\" else \"clear\"\n os.system(clear)\n self.print_labyrinth()\n time.sleep(time_move)",
"def moveBasedOnRetreatAction(self, time_passed):\n cpos = self.toScreenCoordinate()\n mpos = pygame.mouse.get_pos()\n toMouse = Vector2.from_points(cpos,mpos)\n toMouse.normalize()\n rheading = -toMouse\n \n heading = self.heading\n angle_between = heading.angle_between(rheading)\n if angle_between>=-30 and angle_between<=30:\n return\n \n distance = time_passed * self.speed\n movement = rheading * distance\n x = movement.get_x()\n y = movement.get_y()\n if not self.checkCollision(x, y) and self.checkValidCoord(x, y):\n self.move(x, y)",
"def rayShooting():\r\n \r\n \r\n if nbRay==1:\r\n maxi=1\r\n mini=1\r\n peaceofAngle=angleMax\r\n #to trace one ray at angleMax\r\n else:\r\n maxi=(nbRay-1)/2\r\n mini=-maxi\r\n peaceofAngle=2*angleMax/(nbRay-1)\r\n #to trace rays at regular intervals between [-angleMax;angleMax] \r\n\r\n tot=0 #to count the number of peace of ray\r\n indice=0 #to browse raysIndex\r\n\r\n raysMatrix=np.empty(shape=(0,5),dtype=np.float64)#will contain all the rays in a row\r\n raysIndex=np.empty(shape=(nbRay,),dtype=np.int16)#indexation of the rays in raysMatrix\r\n \r\n for i in np.arange(mini,maxi+1,1):#put maxi+1 to include maxi in the loop\r\n \r\n rayon=Rayon(source.position,angleToVector(peaceofAngle*i))#rayon is\r\n #the ray we will trace\r\n ray,compt=traceRay(rayon)\r\n tot+=(compt+1)\r\n\r\n \r\n raysIndex[indice]=tot #the rays index contains the indice just above\r\n #of the end of the i th ray\r\n\r\n raysMatrix=np.vstack((raysMatrix,ray))\r\n #the form of the ray matrix is a stack of peace of rays describe by\r\n #a,b,c,x1,reflexion. the polynome of the peace of ray being ax^2+bx+c and the\r\n #abscisses of the limiting point being x1, reflexion indicating if a reflexion happened\r\n #when we meet a 5-uple with a coefficient b or c infinite it means\r\n #a new ray begin\r\n \r\n indice+=1\r\n print(\"ray at indice\",i,\"and at angle\",peaceofAngle*i/np.pi*180,'degree(s)')\r\n \r\n print(\"the total number of peaces of ray is :\", tot)\r\n\r\n return(raysMatrix,raysIndex)",
"def propagate_ray(self, ray):\n \n self.intercept(ray)\n \n if self._intersection2 == None:\n return \"Ray does not intersect with output plane.\"\n \n else:\n ray.append(self._intersection2, ray._k)",
"def rollout(leaf, depth):\n if depth <= 0:\n return 0\n\n total_reward = 0\n prev_state = leaf\n\n for i in range(depth):\n cur_state = prev_state.copy()\n agent_actions = rollout_policy(cur_state)\n agents_obs, _, done, _ = cur_state.game_env.step(agent_actions)\n\n # After making a move, update the memory kept on this node\n cur_state.agent_memory = utility.update_agent_memory(cur_state.agent_memory,\n agents_obs[cur_state.agent_id])\n\n reward = decide_reward(prev_state, cur_state)\n total_reward += reward\n\n prev_state = cur_state\n\n if done:\n break\n\n return total_reward",
"def moveStep(self):\n\t\tif self.pos[0] <= self.boundsX[0] or \\\n\t\t(self.pos[0]+ 2*(self.radius)) >= self.boundsX[1]:\n\t\t\tself.dir[0] *= -1\n\t\t\t\n\t\tself.pos[0] += self.dir[0]*self.speed\n\t\tself.pos[1] += self.dir[1]*self.speed",
"def _move_agent(self, action):\n assert len(action) == self.dimension, \"Dimension of action does not align, expected\" \\\n \" ({0},) but got {1}\".format(self.dimension, action.shape)\n new_pos = self.agent_position + action\n new_x, new_y = new_pos[0], new_pos[1]\n old_x, old_y = self.agent_position[0], self.agent_position[1]\n act_x, act_y = action[0], action[1]\n\n bound_x, bound_y = self.bound, self.bound\n\n if act_y == 0: # Moving left/right\n new_x = min(new_x, bound_x) if new_x > bound_x else max(-bound_x, new_x)\n elif act_x == 0: # Moving up/down\n new_y = min(new_y, bound_y) if new_y > bound_y else max(-bound_y, new_y)\n elif np.abs(new_x) > bound_x or np.abs(new_y) > bound_y:\n check_x, check_y = None, None\n if act_x > 0 and act_y > 0: # NE direction\n check_x, check_y = bound_x, bound_y\n elif act_x > 0 and act_y < 0: # SE direction\n check_x, check_y = bound_x, -bound_y\n elif act_x < 0 and act_y < 0: # SW direction\n check_x, check_y = -bound_x, -bound_y\n elif act_x < 0 and act_y > 0: # NW direction\n check_x, check_y = -bound_x, bound_y\n\n slope = act_y / act_x\n intersect_x = (slope * old_x + check_y - old_y) / slope\n intersect_y = slope * (check_x - old_x) + old_y\n if np.abs(intersect_x) <= bound_x:\n new_x, new_y = intersect_x, check_y\n elif np.abs(intersect_y) <= bound_y:\n new_x, new_y = check_x, intersect_y\n\n self.agent_position = np.array([new_x, new_y])\n return self.agent_position",
"def move_draught_end(event):\n global red_draughts, white_draughts\n global board_array\n global old_point\n global die_1_num, die_2_num, doubles\n draught = board.find_withtag(CURRENT)[0]\n #Figure out which point they want to put it on\n bottom = (event.y-click_offset[1] >= board_height//2)\n point_left_edges = [board_divisions*i for i in xrange(0,15) if i != 7]\n is_red = draught in red_draughts\n if bottom == False:\n new_point = 12+point_left_edges.index(min(point_left_edges, key=lambda x:abs(x-event.x+click_offset[0])))\n else:\n new_point = 13-point_left_edges.index(min(point_left_edges, key=lambda x:abs(x-event.x+click_offset[0]))) \n #Check legality\n if(board_array[new_point][1] > 1 and is_red) or (board_array[new_point][0] > 1 and not is_red): #if too many opposite color on square\n draw_draughts()\n return\n if(board_array[0][0] > 0 and is_red and old_point != 0)or(board_array[25][1] > 0 and not is_red and old_point != 25):#Obligated to move off bar first\n draw_draughts()\n return\n if(new_point == 0 and not is_red): #if white trying to bear off\n for i in xrange(7,26):\n if(board_array[i][1] > 0): #If white has a piece outside home, can't bear off\n draw_draughts()\n return\n if(new_point == 25 and is_red): #if red trying to bear off\n for i in xrange(0,18):\n if(board_array[i][0] > 0): #If red has a piece outside home, can't bear off\n draw_draughts()\n return \n \n if(new_point-old_point == die_1_num and is_red) or (old_point-new_point == die_1_num and not is_red):\n if(doubles == False) or (die_2_num != 0):\n die_1_num = 0\n else: \n die_2_num = die_1_num\n doubles = False\n elif(new_point-old_point == die_2_num and is_red) or (old_point-new_point == die_2_num and not is_red):\n if(doubles == False) or (die_1_num != 0):\n die_2_num = 0\n else: \n die_1_num = die_2_num\n doubles = False\n else: #Can't move there on this roll\n draw_draughts()\n return\n update_dice()\n #Update board_array\n if is_red:\n board_array[old_point][0] -= 1\n board_array[new_point][0] += 1\n if(board_array[new_point][1] == 1): #Handle hits\n board_array[new_point][1] -= 1\n board_array[25][1] += 1\n else:\n board_array[old_point][1] -= 1\n board_array[new_point][1] += 1\n if(board_array[new_point][0] == 1): #Handle hits\n board_array[new_point][0] -= 1\n board_array[0][0] += 1\n\n draw_draughts()\n if(die_1_num == 0 and die_2_num == 0):\n comp_turn()",
"def Enmove(self):\r\n if self.vel > 0:\r\n if self.rect.x + self.vel < self.path[1]:\r\n self.rect.x += self.vel #Moving enemy towards end of path\r\n else:\r\n if self.flipped: #flip enemy and move along opposite direction\r\n self.image = pygame.transform.flip(self.image, True, False)\r\n self.flipped = False\r\n self.vel = -self.vel\r\n else:\r\n if self.rect.x - self.vel > self.path[0]:\r\n self.rect.x += self.vel #Moving enemy back to starting point\r\n else:\r\n if not self.flipped: #determining whether image should be flipped\r\n self.image = pygame.transform.flip(self.image, True, False)\r\n self.flipped = True \r\n self.vel = -self.vel",
"def ray(self):\n return self._ray",
"def draw_ray(env, ray, dist=0.03, linewidth=2, color=None):\n if dist < 0:\n newpos = ray.pos() + dist*ray.dir()\n newray = orpy.Ray(newpos, ray.dir())\n else:\n newray = ray\n iktype = orpy.IkParameterizationType.TranslationDirection5D\n ikparam = orpy.IkParameterization(ray, iktype)\n h = orpy.misc.DrawIkparam2(env, ikparam, dist=dist, linewidth=linewidth,\n coloradd=color)\n return h"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
shoot_ray shoots a ray from a given row and column if possible
|
def shoot_ray(self, origin_row, origin_column):
# get the the square object at row x column
origin = self._board.get_board_square((origin_row, origin_column))
# check that it is a valid "edge" to send a ray from
origin_check = origin.is_edge()
# if it's not then return false
if origin_check == False:
return False
# if we pass the origin check create shoot a new Ray.Ray object from row x column
new_ray = Ray.Ray(origin_row, origin_column)
# let the square we shot from know its an orign square
origin.set_originating_ray(new_ray)
# Deduct 1 from the score since we now have on exit point
self.set_score(-1)
# while the ray object has a direction (will be set to none when it reaches an endpoint)
# send it to the helper function that will move it
while new_ray.get_direction() != None:
self.move_ray(new_ray)
# if we hit an exit point (other than through reflection) deduct the point for that
terminus = new_ray.get_terminal_location()
# check the the terminal point is an edge (hitting an atom returns none as terminus)
if terminus != None:
# check that the terminus is not a reflection, which shouldn't be counted twice
terminal_square = self._board.get_board_square(terminus)
terminal_square.set_terminating_ray(new_ray)
if terminus != (origin_row, origin_column):
self.set_score(-1)
return terminus
|
[
"def shoot_ray(self, row, column):\n\n # check if ray is being shot from corner square\n if (row == 0 or row == 9) and (column == 0 or column == 9):\n return False\n\n # check if ray is being shot from non-border square\n if row in range(1, 9) and column in range(1, 9):\n return False\n\n if self._ray_color is None:\n self._ray_color = 0\n else:\n self._ray_color += 1\n\n self.adjust_score(row, column, self._ray_color) # adjust score for entry ray position\n self._ray_status = 'Play' # set flag variable for ray status\n\n if column == 0 or column == 9: # if shooting from horizontal position\n if column == 0: # if ray is moving to the right\n self.horiz_move_right(row, column)\n elif column == 9: # if ray is moving to the left\n self.horiz_move_left(row, column)\n\n if row == 0 or row == 9: # if shooting from vertical position\n if row == 0: # if ray is moving down\n self.vert_move_down(row, column)\n elif row == 9: # if ray is moving up\n self.vert_move_up(row, column)\n\n if self._ray_status == \"Hit\": # if ray hits an atom\n return None\n\n elif self._ray_status == \"Exit\": # if ray is a miss\n self.adjust_score(self._ray_row, self._ray_column, self._ray_color) # adjust score with exit ray position\n return self._ray_row, self._ray_column",
"def shoot_ray(self, row, column):\n # check if row/column is an allowed entry point\n if (row, column) not in self._allowed_entry_points:\n return False\n\n # add entry to entry/exit point list and deduct point if entry hasn't already been used\n if (row, column) not in self._entry_and_exit_points:\n self._entry_and_exit_points.append((row, column))\n self._score -= 1\n\n # determine which direction ray will initially be traveling\n direction = self.check_direction(row, column)\n\n # Check the reflection edge case where atom is along a border and ray is shot\n # right next to said atom\n if self.check_edge_case_reflection(row, column, direction):\n return (row, column)\n\n # If we haven't hit a return statement then follow the rest of the ray's path\n # Call the correct method based on the initial direction that the ray is traveling\n # Exit point will hold tuple of exit point or None if a hit occurred\n if direction == 'right':\n exit_point = self.travel_right(row, column)\n elif direction == 'left':\n exit_point = self.travel_left(row, column)\n elif direction == 'up':\n exit_point = self.travel_up(row, column)\n elif direction == 'down':\n exit_point = self.travel_down(row, column)\n\n # If exit point was not a hit and isn't already an entry/exit point, subtract 1\n # from the score and add exit_point to entry/exit point list\n if exit_point is not None and exit_point not in self._entry_and_exit_points:\n self._entry_and_exit_points.append(exit_point)\n self._score -= 1\n return exit_point",
"def rayShooting():\r\n \r\n \r\n if nbRay==1:\r\n maxi=1\r\n mini=1\r\n peaceofAngle=angleMax\r\n #to trace one ray at angleMax\r\n else:\r\n maxi=(nbRay-1)/2\r\n mini=-maxi\r\n peaceofAngle=2*angleMax/(nbRay-1)\r\n #to trace rays at regular intervals between [-angleMax;angleMax] \r\n\r\n tot=0 #to count the number of peace of ray\r\n indice=0 #to browse raysIndex\r\n\r\n raysMatrix=np.empty(shape=(0,5),dtype=np.float64)#will contain all the rays in a row\r\n raysIndex=np.empty(shape=(nbRay,),dtype=np.int16)#indexation of the rays in raysMatrix\r\n \r\n for i in np.arange(mini,maxi+1,1):#put maxi+1 to include maxi in the loop\r\n \r\n rayon=Rayon(source.position,angleToVector(peaceofAngle*i))#rayon is\r\n #the ray we will trace\r\n ray,compt=traceRay(rayon)\r\n tot+=(compt+1)\r\n\r\n \r\n raysIndex[indice]=tot #the rays index contains the indice just above\r\n #of the end of the i th ray\r\n\r\n raysMatrix=np.vstack((raysMatrix,ray))\r\n #the form of the ray matrix is a stack of peace of rays describe by\r\n #a,b,c,x1,reflexion. the polynome of the peace of ray being ax^2+bx+c and the\r\n #abscisses of the limiting point being x1, reflexion indicating if a reflexion happened\r\n #when we meet a 5-uple with a coefficient b or c infinite it means\r\n #a new ray begin\r\n \r\n indice+=1\r\n print(\"ray at indice\",i,\"and at angle\",peaceofAngle*i/np.pi*180,'degree(s)')\r\n \r\n print(\"the total number of peaces of ray is :\", tot)\r\n\r\n return(raysMatrix,raysIndex)",
"def shoot(self, (x, y)):\n return self.__wt.shoot(self, (x, y))",
"def where_to_shoot():\n # Global variables being modified inside this function\n global letters\n global grid\n\n # Setting some variables to store user input etc.\n coord_correct = False\n row = -1\n column = -1\n while coord_correct is False:\n placement = input(\"Enter row and column fx D3: \\n\")\n placement = placement.upper()\n if len(placement) <= 1 or len(placement) > 2:\n print(\"Error: Only enter one row and column such fx D3\")\n continue\n row = placement[0]\n column = placement[1]\n if not row.isalpha() or not column.isnumeric():\n print(\"Error: Enter letter for row and number for column fx D3\")\n continue\n row = letters.find(row)\n if not (-1 < row < grid_size):\n print(\"Error: Enter letter for row and number for column fx D3\")\n continue\n column = int(column)\n if not (-1 < column < grid_size):\n print(\"Error: Enter letter for row and number for column fx D3\")\n continue\n if grid[row][column] == \"0\" or grid[row][column] == \"X\":\n print(\"Location fired at previously. Try again\")\n continue\n if grid[row][column] == \".\" or grid[row][column] == \"#\":\n coord_correct = True\n\n # sends the coordinates back to the fire() function\n return row, column",
"def _trace_ray(self, start, piece, ray):\n res_moves = []\n\n for end in ray:\n\n sym = piece.lower()\n del_x = abs(end - start) % 8\n move = [Game.i2xy(start) + Game.i2xy(end)]\n tgt_owner = self.board.get_owner(end)\n\n # Abort if the current player owns the piece at the end point\n if tgt_owner == self.state.player:\n break\n\n # Test castling exception for king\n if sym == 'k' and del_x == 2:\n gap_owner = self.board.get_owner((start + end) // 2)\n out_owner = self.board.get_owner(end - 1)\n rights = {62: 'K', 58: 'Q', 6: 'k', 2: 'q'}.get(end, ' ')\n if (tgt_owner or gap_owner or rights not in self.state.rights\n or (rights.lower() == 'q' and out_owner)):\n # Abort castling because missing castling rights\n # or piece in the way\n break\n\n if sym == 'p':\n # Pawns cannot move forward to a non-empty square\n if del_x == 0 and tgt_owner:\n break\n\n # Test en passant exception for pawn\n elif del_x != 0 and not tgt_owner:\n ep_coords = self.state.en_passant\n if ep_coords == '-' or end != Game.xy2i(ep_coords):\n break\n\n # Pawn promotions should list all possible promotions\n if (end < 8 or end > 55):\n move = [move[0] + s for s in ['b', 'n', 'r', 'q']]\n\n res_moves.extend(move)\n\n # break after capturing an enemy piece\n if tgt_owner:\n break\n\n return res_moves",
"def ship_shoot(ship, x, y):\n click.echo('Ship %s fires to %s,%s' % (ship, x, y))",
"def obj_ray_cast(obj, matrix):\n \n # get the ray relative to the object\n matrix_inv = matrix.inverted()\n ray_origin_obj = matrix_inv * ray_origin\n ray_target_obj = matrix_inv * ray_target\n \n # cast the ray\n hit, normal, face_index = obj.ray_cast(ray_origin_obj, ray_target_obj)\n \n if face_index != -1:\n return hit, normal, face_index\n else:\n return None, None, None",
"def point_along_ray(eye, ray_dir, ray_dist):\n return eye[np.newaxis, np.newaxis, :] + ray_dist[:, :, np.newaxis] * ray_dir.transpose(1, 0)[np.newaxis, ...]",
"def obj_ray_cast(obj, matrix):\r\n \r\n # get the ray relative to the object\r\n matrix_inv = matrix.inverted()\r\n ray_origin_obj = matrix_inv * ray_origin\r\n ray_target_obj = matrix_inv * ray_target\r\n ray_direction_obj = ray_target_obj - ray_origin_obj\r\n \r\n # cast the ray\r\n success, location, normal, face_index = obj.ray_cast(ray_origin_obj, ray_direction_obj)\r\n \r\n if success:\r\n return location, normal, face_index\r\n else:\r\n return None, None, None",
"def maybe_shoot(self):\n res = self.space.segment_query_first((self.tank.body.position[0] - \\\n 0.6 * math.sin(self.tank.body.angle), self.tank.body.position[1] +\\\n 0.6 * math.cos(self.tank.body.angle)), (self.tank.body.position[0] -\\\n 10*math.sin(self.tank.body.angle), self.tank.body.position[1] + \\\n 10*math.cos(self.tank.body.angle)), 0, pymunk.ShapeFilter())\n if res is not None:\n try:\n if hasattr(res, 'shape'):\n if isinstance(res.shape.parent, gameobjects.Tank):\n bullet = self.tank.shoot(self.space)\n if bullet is not None:\n self.game_objects_list.append(bullet)\n elif isinstance(res.shape.parent, gameobjects.Box):\n if res.shape.parent.boxmodel.destructable is True:\n bullet = self.tank.shoot(self.space)\n if bullet is not None:\n self.game_objects_list.append(bullet)\n except:\n pass",
"def draw_ray(env, ray, dist=0.03, linewidth=2, color=None):\n if dist < 0:\n newpos = ray.pos() + dist*ray.dir()\n newray = orpy.Ray(newpos, ray.dir())\n else:\n newray = ray\n iktype = orpy.IkParameterizationType.TranslationDirection5D\n ikparam = orpy.IkParameterization(ray, iktype)\n h = orpy.misc.DrawIkparam2(env, ikparam, dist=dist, linewidth=linewidth,\n coloradd=color)\n return h",
"def _cast_ray(self, point):\n\n ray_direction_cam_frame = self.K_inv @ np.hstack([point[0], point[1], 1])\n point_on_image_plane_world_frame = self.C2W @ np.hstack([ray_direction_cam_frame, 1])\n point_on_image_plane_world_frame = point_on_image_plane_world_frame / point_on_image_plane_world_frame[3]\n\n # p1 - point in camera center, p2 - point on image plane\n p1 = self.position\n p2 = point_on_image_plane_world_frame[:2]\n color = (0, 0, 0)\n for wall in self.environment.map.walls:\n # q1, q2 - wall vertices\n q1 = wall.vertex1\n q2 = wall.vertex2\n t = intersect_ray_segment(p1, p2, q1, q2)\n if t is not None:\n # Check that point is in front of the camera\n intersection_point = q1 + t * (q2 - q1)\n direction = np.dot(p2 - p1, intersection_point - p2)\n if direction > 0:\n color = wall.get_color_at(t)\n return color",
"def obj_ray_cast(obj, matrix):\n\n\n # get the ray relative to the object\n matrix_inv = matrix.inverted();\n ray_origin_obj = matrix_inv * ray_origin;\n ray_target_obj = matrix_inv * ray_target;\n ray_direction_obj = ray_target_obj - ray_origin_obj;\n\n # cast the ray\n try:\n hit, normal, face_index = obj.ray_cast(ray_origin_obj, ray_target_obj);\n except ValueError:\n result, hit, normal, face_index = obj.ray_cast(ray_origin_obj, ray_direction_obj);\n \n if face_index != -1:\n return hit, normal, face_index;\n else:\n return None, None, None;",
"def ray_depth(bathy_grid, beam_angle, x0, y0, beam_dx, beam_dy, resolution, \\\n seed_depth=0):\n # No need to trace straight down\n if beam_angle == 0:\n return (x0, y0, bathy_grid.get_depth(x0, y0))\n\n ba_rad = np.radians(beam_angle)\n ray_z = float(seed_depth)\n if seed_depth != 0:\n beam_len = ray_z / np.cos(ba_rad)\n ray_x = x0 + beam_dx * np.sin(ba_rad) * beam_len\n ray_y = y0 + beam_dy * np.sin(ba_rad) * beam_len\n else:\n ray_x = x0\n ray_y = y0\n\n # Trace downward\n while ray_z < bathy_grid.get_depth(ray_x, ray_y):\n ray_z = ray_z + np.cos(ba_rad) * resolution\n ray_x = ray_x + beam_dx * np.sin(ba_rad) * resolution\n ray_y = ray_y + beam_dy * np.sin(ba_rad) * resolution\n \n # Trace upward if needed\n if seed_depth != 0 and abs(ray_z - bathy_grid.get_depth(ray_x, ray_y)) > \\\n (np.cos(ba_rad) * resolution * 1.5):\n while ray_z > bathy_grid.get_depth(ray_x, ray_y):\n ray_z = ray_z - np.cos(ba_rad) * resolution\n ray_x = ray_x - beam_dx * np.sin(ba_rad) * resolution\n ray_y = ray_y - beam_dy * np.sin(ba_rad) * resolution\n\n return (ray_x, ray_y, bathy_grid.get_depth(ray_x, ray_y))",
"def react_to_shot_result(self, x: int, y: int, hit_a_ship: bool) -> Any:\n raise NotImplementedError(\"Please implement react_to_shot_result.\")",
"def ray_tracing(\n x: float, y: float, cal: Calibration, mm: MultimediaPar\n) -> Tuple[np.ndarray, np.ndarray]:\n # Initial ray direction in global coordinate system\n tmp1 = np.r_[x, y, -1 * cal.int_par.cc]\n tmp1 = unit_vector(tmp1)\n start_dir = np.empty(3, dtype=float)\n matmul(start_dir, cal.ext_par.dm, tmp1, 3, 3, 1, 3, 3)\n\n primary_point = np.r_[cal.ext_par.x0, cal.ext_par.y0, cal.ext_par.z0]\n\n tmp1 = np.r_[cal.glass_par.vec_x, cal.glass_par.vec_y, cal.glass_par.vec_z]\n glass_dir = unit_vector(tmp1)\n c = vec_norm(tmp1) + mm.d[0]\n\n # Project start ray on glass vector to find n1/n2 interface.\n dist_cam_glass = vec_dot(glass_dir, primary_point) - c\n tmp1 = vec_dot(glass_dir, start_dir)\n\n # avoid division by zero\n if tmp1 == 0:\n tmp1 = 1.0\n d1 = -dist_cam_glass / tmp1\n\n tmp1 = vec_scalar_mul(start_dir, d1)\n Xb = vec_add(primary_point, tmp1)\n\n # Break down ray into glass-normal and glass-parallel components. */\n n = vec_dot(start_dir, glass_dir)\n tmp1 = vec_scalar_mul(glass_dir, n)\n\n tmp2 = vec_subt(start_dir, tmp1)\n bp = unit_vector(tmp2)\n\n # Transform to direction inside glass, using Snell's law\n p = np.sqrt(1 - n * n) * mm.n1 / mm.n2[0]\n # glass parallel\n n = -np.sqrt(1 - p * p)\n # glass normal\n\n # Propagation length in glass parallel to glass vector */\n tmp1 = vec_scalar_mul(bp, p)\n tmp2 = vec_scalar_mul(glass_dir, n)\n a2 = vec_add(tmp1, tmp2)\n\n tmp1 = np.abs(vec_dot(glass_dir, a2))\n\n # avoid division by zero\n if tmp1 == 0:\n tmp1 = 1.0\n d2 = mm.d[0] / tmp1\n\n # point on the horizontal plane between n2,n3 */\n tmp1 = vec_scalar_mul(a2, d2)\n X = vec_add(Xb, tmp1)\n\n # Again, direction in next medium */\n n = vec_dot(a2, glass_dir)\n tmp2 = vec_subt(a2, tmp2)\n bp = unit_vector(tmp2)\n\n p = np.sqrt(1 - n * n)\n p = p * mm.n2[0] / mm.n3\n n = -np.sqrt(1 - p * p)\n\n tmp1 = vec_scalar_mul(bp, p)\n tmp2 = vec_scalar_mul(glass_dir, n)\n out = vec_add(tmp1, tmp2)\n\n return X, out",
"def test_compute_pixel_rays() -> None:\n u = 12\n v = 2\n img_w = 20\n img_h = 10\n fx = 10\n fy = 10\n\n ray_dir = _compute_pixel_ray_direction(u, v, fx, fy, img_w, img_h)\n\n gt_ray_dir: NDArrayFloat = np.array([2.0, -3.0, 10.0])\n gt_ray_dir /= np.linalg.norm(gt_ray_dir)\n\n assert np.allclose(gt_ray_dir, ray_dir)",
"def shoot(self, pos_to_shoot):\n return [SHOOT, pos_to_shoot]"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
atoms_left returns the number of unguessed atoms still left
|
def atoms_left(self):
return len(self._atoms)
|
[
"def atoms_left(self):\n return self._atoms_remaining",
"def _get_mark_count_left(self):\n return self._number_of_bombs - sum([sum([1 for c in row if c.is_marked]) for row in self._cells])",
"def num_pieces_left(self):\n return self.num_white_pieces + self.num_black_pieces",
"def get_num_cards_left(self):\n return len(self.__card_list) - self.__top",
"def number_of_their_pieces_to_left(column):\n row = __get_top_of_stack(column)\n return number_pieces_of_type_in_direction(column, row, THEM, 'left')",
"def count_left_players(definition):\n return int(parse_player_definition(definition)[1]['left_players'])",
"def get_num_nodes_remaining(self):\n return CPX_PROC.getnodeleftcnt(self._env._e, self._cplex._lp)",
"def n_atoms(self):\n return len(self.atoms)",
"def get_num_atoms(self):\n\n return len(self.atoms)",
"def n_charged_atoms(mol: Mol) -> int:\n return sum([at.GetFormalCharge() != 0 for at in mol.GetAtoms()])",
"def spaces_left(self):\n spaces = 0 # Initialize counter\n # Iterate through each location on board\n for row in range(self.rows):\n for column in range(self.columns):\n # If board location is empty\n # increment count\n if self.board[row][column] == 0:\n spaces += 1\n return spaces",
"def _contributions_left(self):\r\n if self.is_complete:\r\n return 0, 0\r\n online_left = self.online_quota - self.stats.num_online_contributions\r\n if online_left < 0:\r\n online_left = 0\r\n tickets_left = self.num_tickets_total - self.stats.num_tickets_redeemed\r\n return (online_left, tickets_left)",
"def num_atoms(self):\n return self._num_atoms",
"def num_tickets_left(self):\r\n return self._contributions_left[1]",
"def number_of_my_pieces_to_left(column):\n row = __get_top_of_stack(column)\n return number_pieces_of_type_in_direction(column, row, ME, 'left')",
"def get_num_atoms(self) -> int:\n\n return len(self._atoms)",
"def _left_encoder():\n _verify() # Check if create is connected\n return (int(robot.left_encoder_counts) - int(left_encoder_initial))",
"def neighbours_left_playable_count(self, board: np.ndarray) -> int:\n cnt = 0\n nb = self.neighbour_left()\n\n while nb.can_be_played(board):\n cnt += 1\n nb = nb - self.direction\n\n return cnt",
"def count_all_atoms(self):\n n = 0\n for atm in self.atom_order_list:\n if isinstance(atm, Atom):\n n += 1\n else:\n n += len(atm)\n return n"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
Test GRU gnmt encoder. time_major=True
|
def runGRUEncoder(self, encoder, num_layers):
inputs_ph = tf.placeholder(
dtype=tf.float32,
shape=(None, common_utils.TIME_STEPS, common_utils.DEPTH))
inputs_length_ph = tf.placeholder(dtype=tf.int32, shape=(None))
outputs, states = encoder.encode(
mode=tf.estimator.ModeKeys.TRAIN,
sequence_inputs=inputs_ph,
sequence_length=inputs_length_ph)
num_bi_layers = 1
num_uni_layers = num_layers - num_bi_layers
if num_uni_layers == 1:
states_bi_bw, states_uni = states
# states_bi_bw = (states_bi_bw,)
self.assertEqual(1, len(states_bi_bw))
self.assertEqual(num_uni_layers, len(states_uni))
# unlike lstm, whose states is a tuple of (c,h),
# gru states has only one element
# states_bi_bw[0] is a states tensor
states_list = [states_bi_bw[0]]
for i in range(num_uni_layers):
states_list.append(states_uni[i])
states = tf.convert_to_tensor(states_list)
else:
states_uni = states
self.assertEqual(num_uni_layers, len(states_uni))
states_list = []
for i in range(num_uni_layers):
states_list.append(states_uni[i])
states = tf.convert_to_tensor(states_list)
inputs, inputs_length = common_utils.get_encoder_test_inputs()
with self.test_session() as sess:
sess.run(tf.global_variables_initializer())
outputs, states = sess.run(
[outputs, states],
feed_dict={
inputs_ph: inputs,
inputs_length_ph: inputs_length
})
self.assertAllEqual(
[common_utils.TIME_STEPS, common_utils.BATCH_SIZE, common_utils.DEPTH],
outputs.shape)
if num_uni_layers == 1:
self.assertEqual(num_layers, len(states))
self.assertAllEqual(
[num_layers, common_utils.BATCH_SIZE, common_utils.DEPTH],
states.shape)
else:
self.assertEqual(num_uni_layers, len(states))
self.assertAllEqual(
[num_uni_layers, common_utils.BATCH_SIZE, common_utils.DEPTH],
states.shape)
|
[
"def test_agilent_2d_rnmrtk():\n # prepare agilent converter\n vdic, vdata = ng.varian.read(os.path.join(DATA_DIR, \"agilent_2d\"))\n uvdic = ng.varian.guess_udic(vdic, vdata)\n vC = ng.convert.converter()\n vC.from_varian(vdic, vdata, uvdic)\n\n # prepare rnmrtk converter\n rdic, rdata = ng.rnmrtk.read(os.path.join(DATA_DIR, \"rnmrtk_2d\",\n \"time_2d.sec\"))\n urdic = ng.rnmrtk.guess_udic(rdic, rdata)\n rC = ng.convert.converter()\n rC.from_rnmrtk(rdic, rdata, urdic, agilent_compatible=True)\n\n # agilent -> rnmrtk\n cdic, cdata = vC.to_rnmrtk(agilent_compatible=True)\n assert_array_equal(rdata, cdata)\n # check_rdic(rdic, cdic, 2, bad_rnmrtk_keys) # XXX do not check\n # write and readback\n tf = tempfile.mktemp(suffix='.sec', dir='.')\n ng.rnmrtk.write(tf, cdic, cdata)\n rrdic, rrdata = ng.rnmrtk.read(tf)\n assert_array_equal(cdata, rrdata)\n check_rdic(cdic, rrdic, 2)\n os.remove(tf)\n os.remove(tf.replace('.sec', '.par'))\n\n # rnmrtk -> rnmrtk\n cdic, cdata = rC.to_rnmrtk(agilent_compatible=True)\n assert_array_equal(rdata, cdata)\n check_rdic(rdic, cdic, 2, bad_rnmrtk_keys)\n # write and readback\n tf = tempfile.mktemp(suffix='.sec', dir='.')\n ng.rnmrtk.write(tf, cdic, cdata)\n rrdic, rrdata = ng.rnmrtk.read(tf)\n assert_array_equal(rdata, rrdata)\n check_rdic(rdic, rrdic, 2, bad_rnmrtk_keys)\n os.remove(tf)\n os.remove(tf.replace('.sec', '.par'))\n\n # rnmrtk -> agilent\n cdic, cdata = rC.to_varian()\n assert_array_equal(vdata, cdata)\n check_dic(vdic, cdic, bad_varian_keys)\n # write and readback\n td = tempfile.mkdtemp(dir=\".\")\n ng.varian.write(td, cdic, cdata)\n rrdic, rrdata = ng.varian.read(td)\n assert_array_equal(vdata, rrdata)\n check_dic(vdic, cdic, bad_varian_keys)\n shutil.rmtree(td)",
"def MG94GTR(**kw):\n required = dict(\n name=\"MG94GTR\",\n predicates=_gtr_preds + [_omega],\n mprob_model=\"monomer\",\n model_gaps=False,\n )\n kwargs = dict(recode_gaps=True, motif_probs=None)\n kwargs.update(kw)\n kwargs.update(required)\n return substitution_model.TimeReversibleCodon(**kwargs)",
"def test_rnmrtk_1d():\n\n # prepare rnmrtk converter\n rdic, rdata = ng.rnmrtk.read(\n os.path.join(DATA_DIR, \"rnmrtk_1d\", \"freq_1d.sec\"))\n urdic = ng.rnmrtk.guess_udic(rdic, rdata)\n rC = ng.convert.converter()\n rC.from_rnmrtk(rdic, rdata, urdic)\n\n # prepare Pipe converter\n pdic, pdata = ng.pipe.read(\n os.path.join(DATA_DIR, \"rnmrtk_1d\", \"test.ft\"))\n updic = ng.pipe.guess_udic(pdic, pdata)\n pC = ng.convert.converter()\n pC.from_pipe(pdic, pdata, updic)\n\n # rnmrtk -> rnmrtk\n cdic, cdata = rC.to_rnmrtk()\n assert_array_equal(rdata, cdata)\n check_rdic(rdic, cdic, 1, exclude=bad_rnmrtk_keys)\n # write and readback\n tf = tempfile.mktemp(suffix='.sec', dir='.')\n ng.rnmrtk.write(tf, cdic, cdata)\n rrdic, rrdata = ng.rnmrtk.read(tf)\n assert_array_equal(cdata, rrdata)\n check_rdic(rdic, rrdic, 1, exclude=bad_rnmrtk_keys)\n os.remove(tf)\n os.remove(tf.replace('.sec', '.par'))\n\n # rnmrtk -> pipe\n cdic, cdata = rC.to_pipe()\n assert_array_equal(pdata, cdata)\n check_pdic(pdic, cdic, bad_pipe_keys + bad_rnmrtk2pipe_keys, v=True)\n # write and readback\n tf = tempfile.mktemp(dir=\".\")\n ng.pipe.write(tf, cdic, cdata)\n rrdic, rrdata = ng.pipe.read(tf)\n assert_array_equal(pdata, rrdata)\n check_pdic(pdic, cdic, bad_pipe_keys + bad_rnmrtk2pipe_keys, v=True)\n os.remove(tf)\n\n # pipe -> pipe\n cdic, cdata = pC.to_pipe()\n assert_array_equal(pdata, cdata[:])\n check_pdic(pdic, cdic, bad_pipe_keys + bad_rnmrtk2pipe_keys, v=True)\n # write and readback\n tf = tempfile.mktemp(dir=\".\")\n ng.pipe.write(tf, cdic, cdata)\n rrdic, rrdata = ng.pipe.read(tf)\n assert_array_equal(pdata, rrdata[:])\n check_pdic(pdic, cdic, bad_pipe_keys + bad_rnmrtk2pipe_keys, v=True)\n os.remove(tf)\n\n # pipe -> rnmrtk\n cdic, cdata = pC.to_rnmrtk()\n assert_array_equal(rdata, cdata)\n check_rdic(rdic, cdic, 1, exclude=bad_rnmrtk_keys)\n # write and readback\n tf = tempfile.mktemp(suffix='.sec', dir='.')\n ng.rnmrtk.write(tf, cdic, cdata)\n rrdic, rrdata = ng.rnmrtk.read(tf)\n assert_array_equal(rrdata, rdata)\n check_rdic(rdic, rrdic, 1, exclude=bad_rnmrtk_keys)\n os.remove(tf)\n os.remove(tf.replace('.sec', '.par'))",
"def test_generate_raw(self):\n raw_result = self.raw_test_particle.generate_raw()\n decoded_raw = json.loads(raw_result)\n \n driver_time = decoded_raw[\"driver_timestamp\"]\n self.sample_raw_particle[\"driver_timestamp\"] = driver_time\n \n # run it through json so unicode and everything lines up\n standard = json.dumps(self.sample_raw_particle, sort_keys=True)\n self.assertEqual(raw_result, standard)",
"def GTR(**kw):\n required = dict(\n name=\"GTR\", predicates=_gtr_preds, mprob_model=\"conditional\", model_gaps=False\n )\n kwargs = dict(recode_gaps=True, motif_probs=None)\n kwargs.update(kw)\n kwargs.update(required)\n return substitution_model.TimeReversibleNucleotide(**kwargs)",
"def test_red_noise_add_backend(self):\n # set up signals\n pl = utils.powerlaw(log10_A=parameter.Uniform(-18, -12), gamma=parameter.Uniform(1, 7))\n selection = Selection(selections.by_backend)\n cpl = utils.powerlaw(\n log10_A=parameter.Uniform(-18, -12)(\"log10_Agw\"), gamma=parameter.Uniform(1, 7)(\"gamma_gw\")\n )\n\n # parameters\n log10_As = [-14, -14.4, -15, -14.8]\n gammas = [2.3, 4.4, 1.8, 5.6]\n log10_Ac, gammac = -15.5, 1.33\n params = {\n \"B1855+09_red_noise_430_ASP_gamma\": gammas[0],\n \"B1855+09_red_noise_430_PUPPI_gamma\": gammas[1],\n \"B1855+09_red_noise_L-wide_ASP_gamma\": gammas[2],\n \"B1855+09_red_noise_L-wide_PUPPI_gamma\": gammas[3],\n \"B1855+09_red_noise_430_ASP_log10_A\": log10_As[0],\n \"B1855+09_red_noise_430_PUPPI_log10_A\": log10_As[1],\n \"B1855+09_red_noise_L-wide_ASP_log10_A\": log10_As[2],\n \"B1855+09_red_noise_L-wide_PUPPI_log10_A\": log10_As[3],\n \"log10_Agw\": log10_Ac,\n \"gamma_gw\": gammac,\n }\n\n Tmax = self.psr.toas.max() - self.psr.toas.min()\n tpars = [\n (30, 20, Tmax, Tmax),\n (20, 30, Tmax, Tmax),\n (30, 30, Tmax, Tmax),\n (30, 20, Tmax, 1.123 * Tmax),\n (20, 30, Tmax, 1.123 * Tmax),\n (30, 30, 1.123 * Tmax, Tmax),\n (30, 20, None, Tmax),\n ]\n\n for (nf1, nf2, T1, T2) in tpars:\n\n rn = gp_signals.FourierBasisGP(spectrum=pl, components=nf1, Tspan=T1, selection=selection)\n crn = gp_signals.FourierBasisGP(spectrum=cpl, components=nf2, Tspan=T2)\n s = rn + crn\n rnm = s(self.psr)\n\n # get the basis\n bflags = self.psr.backend_flags\n Fmats, fs, phis = [], [], []\n F2, f2 = utils.createfourierdesignmatrix_red(self.psr.toas, nf2, Tspan=T2)\n p2 = utils.powerlaw(f2, log10_Ac, gammac)\n for ct, flag in enumerate(np.unique(bflags)):\n mask = bflags == flag\n F1, f1 = utils.createfourierdesignmatrix_red(self.psr.toas[mask], nf1, Tspan=T1)\n Fmats.append(F1)\n fs.append(f1)\n phis.append(utils.powerlaw(f1, log10_As[ct], gammas[ct]))\n\n Fmats.append(F2)\n phis.append(p2)\n nf = sum(F.shape[1] for F in Fmats)\n F = np.zeros((len(self.psr.toas), nf))\n phi = np.hstack([p for p in phis])\n nftot = 0\n for ct, flag in enumerate(np.unique(bflags)):\n mask = bflags == flag\n nn = Fmats[ct].shape[1]\n F[mask, nftot : nn + nftot] = Fmats[ct]\n nftot += nn\n F[:, -2 * nf2 :] = F2\n\n msg = \"Combined red noise PSD incorrect \"\n msg += \"for {} {} {} {}\".format(nf1, nf2, T1, T2)\n assert np.all(rnm.get_phi(params) == phi), msg\n\n msg = \"Combined red noise PSD inverse incorrect \"\n msg += \"for {} {} {} {}\".format(nf1, nf2, T1, T2)\n assert np.all(rnm.get_phiinv(params) == 1 / phi), msg\n\n msg = \"Combined red noise Fmat incorrect \"\n msg += \"for {} {} {} {}\".format(nf1, nf2, T1, T2)\n assert np.allclose(F, rnm.get_basis(params)), msg",
"def test_gmtime_epoch():\n epoch_st = time.struct_time([1970, 1, 1, 0, 0, 0, 3, 1, 0])\n st = time.gmtime(0)\n assert st == epoch_st",
"def test_save_tsc_old_version(uvm_nano):\n uvm_nano.start()\n uvm_nano.snapshot_full(target_version=\"0.24.0\")\n uvm_nano.check_log_message(\"Saving to older snapshot version, TSC freq\")",
"def test_11_fallback(self):\n # Short segments\n for nsamp in range(1, 20):\n ts = self._get_ts(1, nsamp, sigma=0, dtype='int32')\n ts.encode()\n self._readback_compare(ts)\n \n # Random time vector.\n n = 200\n ts = self._get_ts(1, n, sigma=0, dtype='int32')\n ts.times = core.G3VectorTime(\n (np.random.uniform(size=n*8) * 256).astype('uint8').view(dtype='int64'))\n self._readback_compare(ts)\n\n # Random data array.\n n = 200\n ts = self._get_ts(1, n, sigma=0, dtype='int64')\n ts.data = (np.random.uniform(size=n*8) * 256).astype('uint8').view(dtype='int64').reshape(1,-1)\n self._readback_compare(ts)\n\n # Small n_det (note 1-10 weren't causing a problem by 11+ were...)\n for n_det in range(1, 20):\n ts = self._get_ts(n_det, 1, sigma=0, dtype='int64')\n ts.data = (np.random.uniform(size=n_det*8) * 256).astype('uint8') \\\n .view(dtype='int64').reshape(-1, 1)\n ts.encode()\n self._readback_compare(ts)",
"def gmst(self): # default epoch is 2000, but can be changed using epoch optional argument\n # Assume epoch 2000 January 1d 12h UT1\n epoch = self.ymd2jd(self.epoch,1,1)\n # GMST at 0h UT\n JD = self.ymd2jd(self.date_object.year, 1, 0) # beginning of the year on day 0 at 12h (noon)\n Du = (JD - 0.5) - epoch # at 0h (midnight)\n D=self.date_object.timetuple().tm_yday\n H = (self.date_object.hour*3600+self.date_object.minute*60+(self.date_object.second+self.date_object.microsecond/1e6))/3600.\n ERA = 24.*((0.7790572732640 + \\\n 0.00273781191135448*(Du*self.m_day%self.m_day)*self.m_angle) + \\\n 0.00273781191135448*D + (1./self.s_day)*H) %24.\n T=((Du*24*3600+67)/3600./24. + D + 24./self.s_day*H)/36525. #UT\n part = (0.014506 + 4612.156534*T + 1.3915817*T*T - 0.00000044*T**3 - 0.000029956*T**4 - 3.68e-8*T**5)/3600./15.\n GMST = (ERA+part)%24\n return GMST",
"def test_convert_reddit_timestamp(time, correct):\n assert google_utils.convert_reddit_timestamp(time) == correct",
"def gst_to_ut(gst, gdate):\n gdate = start_of_day(gdate)\n c = (cd_to_jd(gdate) - 2451545.0) / 36525.0\n e = (6.697374558 + c * (2400.051336 + c * 0.000025862)) % 24.0\n return ((gst - e) % 24.0) * 0.9972695663",
"def test_gru_forward_and_back():\n\n # Setup\n hidden_len = 1\n input_len = 2\n weight_init_val = 0.5\n x = np.array([0.02, -0.3])\n dh = np.array([2.1])\n init_function = lambda shape: np.full(shape, weight_init_val)\n zero_init = lambda shape : np.zeros(shape)\n gru = vanilla_draw.Gru(input_len, hidden_len, init_fctn=init_function)\n # Don't have the matrices all the same value, otherwise some bugs might\n # slip past.\n gru.Wr[(0, 2)] = gru.Wz[(0, 2)] = gru.Wp[(0, 2)] = 0.1\n dgru = vanilla_draw.Gru(input_len, hidden_len, init_fctn=zero_init)\n h, activations = vanilla_draw.gru_forward(gru, x, h=np.zeros(hidden_len))\n\n # Forward\n # Check activations against hand calculations.\n assert np.isclose(activations.r[0], 0.589040434057), 'wrong r activation.'\n assert np.isclose(activations.z[0], 0.589040434057), 'wrong z activation.'\n assert np.isclose(activations.p[0], 0.345214034135), 'wrong p activation.'\n assert np.isclose(activations.h[0], 0.141869009625), 'wrong h activation.'\n\n # Back-propagation\n \"\"\"\n\n (8) h = (1 - z).p + z.h_prev\n dz = dh * (-p + h_prev) \n = [2.1] * (-[0.34512] + [0]) \n = [-0.72495] \n dp = dh * (1 - z)\n = [2.1] * ([1] - [0.58904])\n = [0.86302]\n\n (3) z_linear = Wz[x, h_prev] + bz\n (4) z = sigmoid(z_linear)\n dz_linear = dz * z*(1 - z) \n = [-0.72495] * [0.24207] \n = [-0.17549]\n\n (6) p_linear = Wr[x, h_reset] + bp\n (7) p = tanh(p_linear)\n dp_linear = dp * (1 - p**2)\n = [0.86302] * ([1] - [0.34521]**2)\n = [0.76017]\n dxh_reset = transpose(Wp) @ dp_linear\n = [[0.5], [0.5], [0.1]] @ [0.76017] \n = [[0.38008], [0.38008], [0.076017]]\n dh_reset = dxh_reset[input_len:] = [0.076017]\n\n (5) h_reset = r.h_prev \n dr = dh_reset * h_prev\n = [0.076017] * [0]\n = [0]\n\n (1) r_linear = Wr[x, h_prev] + br\n (2) r = sigmoid(r_linear)\n dr_linear = dr * r /(1 - r)\n = [0] * ...\n = [0]\n\n # Collect dx h_prev from (8), (5), (3) and (1).\n dxh_prev = transpose(Wr) @ dr_linear\n = [[0.5], [0.5], [0.1]] @ [0]\n = [0, 0, 0]\n dxh_prev = transpose(Wz) @ dz_linear \n = [[0.5], [0.5], [0.1]] @ [-0.17549] \n = [[-0.087745], [-0.087745], [-0.017549]]\n dx = [0, 0] + [-0.087745, -0.087745] + dxh_reset[:input_len]\n = [0, 0] + [-0.087745, -0.087745] + [0.38008, 0.38008]\n = [0.29234, 0.29234]\n dh_prev = (dh * z) + [0] + [-0.017549] + (dxh_reset[input_len:] * r)\n = [2.1] * [0.58904] + [0] + [-0.017549] + [0.044777]\n = [1.26421] \n \"\"\"\n dx, dh_prev = vanilla_draw.backprop_gru(dh, gru, dgru, activations)\n assert np.isclose(dh_prev, 1.26421), 'h_prev gradient signal is wrong.'\n assert np.allclose(dx, [0.29234, 0.29234]), 'x gradient signal is wrong.'\n\n # Finally, let's check the updates to the weights.\n \"\"\"\n dWz = dz_linear @ xh_prev \n = [-0.17549] @ [0.02, -0.3, 0]\n = [[-0.0035098, 0.052647, 0]\n dWr = dr_linear @ xh_prev\n = [0] @ [0.02, -0.3, 0]\n = [[0, 0, 0]]\n dWp = dp_linear @ xh_reset\n = [0.76017] @ [0.02, -0.3, 0]\n = [[0.015203, -0.22805, 0]]\n dbz = dz_linear\n = [-0.17549]\n dbr = dr_linear\n = [0]\n dbp = dp_linear\n = [0.76017]\n \"\"\"\n assert np.allclose(dgru.Wz, [[-0.0035098, 0.052647, 0.]]), \\\n 'Gradient of Wz is wrong.'\n assert np.allclose(dgru.Wr, [[0., 0., 0.]]), 'Gradient of Wr is wrong.'\n assert np.allclose(dgru.Wp, [[0.0152033, -0.22805, 0.]]), \\\n 'Gradient of Wp is wrong.'\n assert np.isclose(dgru.bz, -0.17549), 'Gradient of bz is wrong.'\n assert np.isclose(dgru.br, 0.), 'Gradient of br is wrong.'\n assert np.isclose(dgru.bp, 0.76017), 'Gradient of bp is wrong.'",
"def test_includinggaps(self):\n model = substitution_model.TimeReversibleNucleotide(model_gaps=True)\n assert len(model.get_alphabet()) == 5",
"def test_initialization_with_encoder(self):\n encoder = encoders.build_encoder(\n config=encoders.EncoderConfig(type='mobilebert'))\n pretrainer = model_builder.build_bert_pretrainer(\n pretrainer_cfg=self.pretrainer_config,\n encoder=encoder)\n encoder_network = pretrainer.encoder_network\n self.assertEqual(encoder_network, encoder)",
"def test_gmtime(self):\n from supvisors.utils import simple_gmtime\n self.assertEqual('07:07:00', simple_gmtime(1476947220.416198))",
"def generate_simple_gru_model(hyperparameters):\n model = keras.Sequential()\n\n hp_output_dim = hyperparameters.Int('embedding_output_dim', min_value=8, max_value=32, step=8)\n model.add(keras.layers.Embedding(\n input_dim=NUMBER_OF_WORDS + 1,\n output_dim=hp_output_dim,\n input_length=MAXIMUM_SENTENCE_LENGTH\n ))\n\n hp_hidden_units = hyperparameters.Int('gru_hidden_units', min_value=8, max_value=128, step=8)\n hp_dropout_rate = hyperparameters.Float('gru_dropout', min_value=0, max_value=0.5, step=0.1)\n model.add(keras.layers.GRU(\n units=hp_hidden_units,\n dropout=hp_dropout_rate\n ))\n\n model.add(keras.layers.Dense(5))\n\n hp_learning_rate = hyperparameters.Choice('learning_rate', values=[1e-2, 1e-3, 1e-4, 1e-5])\n model.compile(\n optimizer=keras.optimizers.Adam(learning_rate=hp_learning_rate),\n loss=keras.losses.CategoricalCrossentropy(from_logits=True),\n metrics=['accuracy']\n )\n\n return model",
"def compile_gru_model(input_dim=101, output_dim=4563, recur_layers=3, nodes=1000,\n conv_context=11, conv_border_mode='valid', conv_stride=2,\n initialization='glorot_uniform', batch_norm=True, num_gpu=1):\n logger.info(\"Building gru model\")\n # Main acoustic input\n acoustic_input = Input(shape=(None, input_dim), name='acoustic_input')\n\n # Setup the network\n #conv_1d = Conv1D(nodes, conv_context, name='conv_1d',\n # padding='same', strides=conv_stride,\n # kernel_initializer=initialization,\n # activation='relu')(acoustic_input)\n conv_1d = Convolution1D(nodes, conv_context, name='conv1d',\n border_mode=conv_border_mode,\n subsample_length=conv_stride, init=initialization,\n activation='relu')(acoustic_input)\n if batch_norm:\n output = normalization.BatchNormalization(name='bn_conv_1d')(conv_1d, training=True)\n else:\n output = conv_1d\n\n for r in range(recur_layers):\n # output = GRU(nodes, activation='relu',\n # name='rnn_{}'.format(r + 1), init=initialization,\n # return_sequences=True)(output)\n output = Bidirectional(GRU(nodes, return_sequences=True),name='bi_lstm_{}'.format(r + 1))(output)\n if batch_norm:\n bn_layer = normalization.BatchNormalization(name='bn_rnn_{}'.format(r + 1),moving_mean_initializer='zeros')\n output = bn_layer(output, training=True)\n\n network_output = TimeDistributed(Dense(\n output_dim+1, name='dense', activation='softmax', init=initialization,\n ))(output)\n model = Model(input=acoustic_input, output=network_output)\n #model.conv_output_length = lambda x: conv_output_length(\n # x, conv_context, conv_border_mode, conv_stride)\n # model = ParallelModel(model, num_gpu)\n return model",
"def model_gru(self):\n model = tf.keras.models.Sequential()\n \"\"\"\"\"\"\n model.add(tf.keras.layers.GRU(128, input_dim=196, return_sequences=True))\n model.add(tf.keras.layers.Dropout(0.1))\n for i in range(4):\n model.add(tf.keras.layers.GRU(128, return_sequences=True))\n model.add(tf.keras.layers.Dropout(0.1))\n model.add(tf.keras.layers.GRU(128, return_sequences=False))\n model.add(tf.keras.layers.Dropout(0.1))\n model.add(tf.keras.layers.Dense(1, activation=tf.keras.activations.sigmoid))\n\n model.compile(optimizer=tf.keras.optimizers.Adam(),\n loss=tf.keras.losses.binary_crossentropy,\n metrics=[tf.keras.metrics.binary_accuracy])\n model.summary()\n\n return model"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
Create a CourseGraph, fetching unitary weights and edge weights from database, creating CourseNodes for each course, and
|
def __init__(self, database, session, max_suggestions=5, max_courses=30, cache_mult=4):
self._nodes = dict() # dict with courseid keys, CourseNode vals
self._max_suggestions = max_suggestions
self._max_courses = max_courses
self._cache_mult = cache_mult
db = database
# Get dict mapping courses to unitary weights
unitary_dict = db.get_unitary_dict(session)
# Get dict mapping courses to adjacent courses and weights
edge_dict = db.get_edges_dict(session)
# Create CourseNodes
for courseid in unitary_dict:
courseNode = CourseGraph.CourseNode(courseid=courseid, edges=dict(), popularity=unitary_dict[courseid])
self._nodes[courseid] = courseNode
# Create course edge dict for each CourseNode
for courseid in edge_dict:
node = self._nodes[courseid] # get node of interest
adj_courses = edge_dict[courseid] # get inner dict {otherid: edge_weight}
for otherid in adj_courses:
other_node = self._nodes[otherid]
node.addEdge(other_node, adj_courses[otherid])
|
[
"def build_computational_graph():\n pass",
"def _initilise_graph_db(self):\n for collector in self.collectors:\n collector.init_graph_db()",
"def build_graph():\n file = open(\"../data/data.json\", \"r\")\n data = json.load(file)\n node_dict = {}\n for id in data:\n node_dict[id] = Node(data[id][\"name\"], data[id][\"product\"], data[id][\"production_volume\"])\n for id in data:\n current_node = node_dict[id]\n for costumer_id in data[id][\"costumers\"]:\n current_node.costumers.append(node_dict[str(costumer_id)])\n current_node.out_edge_capacity_drop[node_dict[str(costumer_id)].name] = 0\n for supplier_id in data[id][\"suppliers\"]:\n current_node.suppliers.append(node_dict[str(supplier_id)])\n current_node.in_edge_capacity_drop[node_dict[str(supplier_id)].name] = 0\n return node_dict",
"def create_graph(self, lat, lon):\n # Open connection to the database (nodes)\n cur = armaps.model.get_db()\n\n # Get the waypoints\n cur.execute(\n \"SELECT * FROM waypoints WHERE venue_id = %s\", \n (self.venue_id,)\n )\n waypoints = cur.fetchall()\n\n # Get the paths (edges)\n cur.execute(\n \"SELECT * FROM paths WHERE venue_id = %s\",\n (self.venue_id,)\n )\n paths = cur.fetchall()\n\n # Transform list of waypoints into dictionary with key = waypoint_id\n for waypoint in waypoints:\n self.waypoints[int(waypoint[\"waypoint_id\"])] = {\n \"lat\": float(waypoint[\"latitude\"]),\n \"lon\": float(waypoint[\"longitude\"]),\n \"waypoint_id\": int(waypoint[\"waypoint_id\"])\n }\n\n # Calculate weights of edges in graph\n for path in paths:\n # Get two nodes (waypoints) associated with edge\n inNode = int(path[\"innode\"])\n outNode = int(path[\"outnode\"])\n\n # Get the coordinates of nodes\n inNode_coords = (self.waypoints[inNode][\"lat\"], self.waypoints[inNode][\"lon\"])\n outNode_coords = (self.waypoints[outNode][\"lat\"], self.waypoints[outNode][\"lon\"])\n distance = geopy.distance.distance(inNode_coords, outNode_coords).miles\n\n # Add to graph (both ways for undirected)\n self.graph.add_edge(inNode, outNode, distance)\n self.graph.add_edge(outNode, inNode, distance)",
"def build_graph(self):\n self._create_audio_model()\n self._create_placeholders()\n self._create_embedding()\n self._initialize_embedding()\n self._create_recursive_net()\n self._create_output_layers()\n self._create_optimizer()\n self._create_summary()",
"def populated_graph_db(request) -> graph_tuple_database.Database:\n with testing_databases.DatabaseContext(\n graph_tuple_database.Database, request.param\n ) as db:\n random_graph_tuple_database_generator.PopulateDatabaseWithRandomGraphTuples(\n db, graph_count=100, graph_y_dimensionality=2\n )\n yield db",
"def create_wiki_graph(self):\n\n print 'Creating wiki corpus graph representation'\n\n for path, subdirs, files in os.walk(self.wk_path):\n\n here = os.path.split(path)[1]\n parent = os.path.split(os.path.split(path)[0])[1]\n\n self.categories.add_edge(parent, here)\n\n self.categories[parent][\"path\"] = path\n self.categories[here][\"path\"] = path\n\n for name in files:\n if fnmatch(name, \"*.yaml\") and \"Index\" not in name and \"index\" not in name: # check if there is a text file\n \n category_name = name[0:-5]\n yaml_file_path = os.path.join(\n path, category_name + \".yaml\")\n\n # yaml\n yaml_file = open(yaml_file_path, \"r\")\n docs = yaml.load_all(yaml_file)\n\n # category_name\n for doc in docs:\n cat_parent = doc[\"CategoryPath\"][0]\n\n self.categories.add_edge(\n slugify(cat_parent), slugify(category_name))\n self.categories[slugify(cat_parent)][\"path\"] = path\n self.categories[slugify(category_name)][\"path\"] = path\n\n for cat in doc[\"Categories\"][0][self.language]:\n self.categories.add_edge(\n slugify(category_name), slugify(cat))\n self.categories[slugify(cat)][\"path\"] = path\n\n print(\"The categories graph %s has %d nodes with %d edges\"\n % (self.categories.name,\n nx.number_of_nodes(self.categories),\n nx.number_of_edges(self.categories)))\n for node in nx.nodes(self.categories):\n self.get_corpus_from_node(node)\n\n pickle.dump(self.categories, open(self.graph_path, 'w'))\n\n print \"Graph saved as %s\"%(self.graph_path)",
"def generate_coocurence_graph(self):\n g = gt.Graph(directed=False)\n g.add_vertex(self.label_count)\n\n self.weights = g.new_edge_property('double')\n\n for edge, weight in self.edge_map.items():\n e = g.add_edge(edge[0], edge[1])\n if self.is_weighted:\n self.weights[e] = weight\n else:\n self.weights[e] = 1.0\n\n self.coocurence_graph = g\n\n return g",
"def get_test_graph() -> WeightedGraph:\n countries_cases = get_main_data('datasets/test_data.csv', 'new_cases')\n countries_deaths = get_main_data('datasets/test_data.csv', 'new_deaths')\n graph = WeightedGraph()\n\n for country in countries_cases:\n population = get_population('datasets/test_data.csv', country)\n graph.add_vertex(country, countries_cases[country], countries_deaths[country], population)\n\n graph.add_vertex_restrictions(country, 'face-covering-policies',\n get_policy_restrictions('test-face-covering-policies',\n country))\n graph.add_vertex_restrictions(country, 'public-campaigns-covid',\n get_policy_restrictions('test-public-campaigns-covid',\n country))\n graph.add_vertex_restrictions(country, 'public-events-cancellation',\n get_policy_restrictions('test-public-events-cancellation',\n country))\n graph.add_vertex_restrictions(country, 'school-workplace-closures',\n get_policy_restrictions('test-school-workplace-closures',\n country))\n graph.add_vertex_restrictions(country, 'stay-at-home',\n get_policy_restrictions('test-stay-at-home', country))\n graph.add_vertex_restrictions(country, 'testing-policy',\n get_policy_restrictions('test-testing-policy', country))\n graph.add_vertex_restrictions(country, 'vaccination-policy',\n get_policy_restrictions('test-vaccination-policy', country))\n\n all_vertices = graph.get_all_vertices()\n\n for country in all_vertices:\n graph.find_and_add_edge(country)\n\n return graph",
"def __create_graph(self):\n if (self.height < 2 or self.width < 2):\n print(\"INCORRECT: Set values of height or width >= 2\")\n return\n\n self.clear() # Remove all the nodes and edges of the last graph\n current_node = 0\n for i in range(self.width):\n for j in range(self.height): \n\n self.__adding_edges(i,j,current_node)\n\n #Adding neighbors to each node\n #Adding the node to the network\n #NOTE: Here if there are few attributes in a node, we can defined\n #the attributes in this part. So, we will use state=0 instead \n #of data=node\n self.add_node(current_node, pos=(i,j)) \n current_node +=1",
"def generate_courses():\r\n for category in CourseCategory.objects.all():\r\n Course.objects.create(name=category.name, category=category, is_active=True,\r\n is_featured=True)",
"def _construct_graph(self):\n raise NotImplementedError",
"def _build_graph(self):\n\n graph = nx.DiGraph()\n self._graph = graph\n for table in self.metadata.tables.values():\n if not is_active(table):\n continue\n\n self._graph.add_node(table.fullname)\n neighbors = self.find_neighbor_tables(table)\n for neighbor in neighbors:\n self._graph.add_node(neighbor.table.fullname)\n self._graph.add_edge(\n table.fullname,\n neighbor.table.fullname,\n join_fields=neighbor.join_fields,\n )",
"def _generate_graph(self) -> None:\n self.g_ = nx.random_partition_graph(list(self._community_sizes),\n p_in=self.community_p_in,\n p_out=self.community_p_out,\n seed=self.seed)\n\n for _, nv in self.g_.nodes.data():\n nv[\"infected\"] = 0\n nv[\"immune\"] = False\n nv[\"alive\"] = True\n nv[\"_edges\"] = []\n nv[\"isolated\"] = False\n nv[\"mask\"] = 0.0",
"def create_graph(self):\n \n note_list = list(range(88))\n \n G=nx.Graph()\n G.add_nodes_from(note_list)\n \n for note1 in note_list:\n for note2 in note_list:\n d = self.note_distance(note1,note2)\n if d > 0: \n G.add_edge(note1,note2,weight=d)",
"def create_graph(infile):\n # This code works for gte.bad_40. Key modifications are in second 'for' loop.\n flow_file = open(infile)\n \n # Build multi-digraph M from infile\n M = nx.MultiDiGraph()\n \n node_demands = {}\n for line in flow_file:\n if line[0] == 'c'or line[0] == 'p':\n continue\n elif line[0] == 'n':\n s1, s2, s3 = line.strip().split()\n node_demands[s2]=int(s3) \n elif line[0] == 'a':\n s1, s2, s3, s4, s5, s6 = line.strip().split()\n M.add_edge(s3, s2, capacity=float(s5), weight=float(s6))\n \n # Build simple di-graph from M\n G = nx.DiGraph()\n \n # If an edge already exists in G, then add a new node for each subsequent edge in order to separate the edges. \n # Capacity on the new edge will be the same as original. Each of the two segments of the new edge will have original cost divided by 2\n counter_n = 1 # Set counter for new nodes \n for i,j,data in M.edges_iter(data=True):\n if G.has_edge(i,j):\n new_n = i+'_'+str(counter_n)\n G.add_edge(i, new_n, capacity = data['capacity'], weight = data['weight']/2)\n G.add_edge(new_n, j, capacity = data['capacity'], weight = data['weight']/2)\n counter_n += 1\n else:\n G.add_edge(i,j, capacity=data['capacity'], weight=data['weight'])\n\n # Create dict for node demands\n for i in G.nodes_iter():\n if i not in node_demands:\n G.node[i]['demand'] = 0\n else: \n G.node[i]['demand'] = node_demands[i]\n\n return G",
"def make_graph(self):\n\n # the root node\n self.graph.node(self.playbook_filename, style=\"dotted\", id=\"root_node\")\n\n # loop through the plays\n for play_counter, play in enumerate(self.playbook.get_plays(), 1):\n\n # the load basedir is relative to the playbook path\n if play._included_path is not None:\n self.data_loader.set_basedir(play._included_path)\n else:\n self.data_loader.set_basedir(self.playbook._basedir)\n self.display.vvv(\"Loader basedir set to {}\".format(self.data_loader.get_basedir()))\n\n play_vars = self.variable_manager.get_vars(play)\n play_hosts = [h.get_name() for h in self.inventory_manager.get_hosts(self.template(play.hosts, play_vars))]\n play_name = \"Play #{}: {} ({})\".format(play_counter, clean_name(play.get_name()), len(play_hosts))\n play_name = self.template(play_name, play_vars)\n\n self.display.banner(\"Graphing \" + play_name)\n\n play_id = \"play_\" + str(uuid.uuid4())\n\n self.graph_representation.add_node(play_id)\n\n with self.graph.subgraph(name=play_name) as play_subgraph:\n color, play_font_color = get_play_colors(play)\n # play node\n play_subgraph.node(play_name, id=play_id, style=\"filled\", shape=\"box\", color=color,\n fontcolor=play_font_color, tooltip=\" \".join(play_hosts))\n\n # edge from root node to plays\n play_edge_id = \"edge_\" + str(uuid.uuid4())\n play_subgraph.edge(self.playbook_filename, play_name, id=play_edge_id, style=\"bold\",\n label=str(play_counter), color=color, fontcolor=color)\n\n # loop through the pre_tasks\n self.display.v(\"Graphing pre_tasks...\")\n nb_pre_tasks = 0\n for pre_task_block in play.pre_tasks:\n nb_pre_tasks = self._include_tasks_in_blocks(current_play=play, graph=play_subgraph,\n parent_node_name=play_name, parent_node_id=play_id,\n block=pre_task_block, color=color,\n current_counter=nb_pre_tasks, play_vars=play_vars,\n node_name_prefix=\"[pre_task] \")\n\n # loop through the roles\n self.display.v(\"Graphing roles...\")\n role_number = 0\n for role in play.get_roles():\n # Don't insert tasks from ``import/include_role``, preventing duplicate graphing\n if role.from_include:\n continue\n\n # the role object doesn't inherit the tags from the play. So we add it manually.\n role.tags = role.tags + play.tags\n if not role.evaluate_tags(only_tags=self.options.tags, skip_tags=self.options.skip_tags,\n all_vars=play_vars):\n self.display.vv(\"The role '{}' is skipped due to the tags.\".format(role.get_name()))\n # Go to the next role\n continue\n\n role_number += 1\n role_name = \"[role] \" + clean_name(role.get_name())\n\n with self.graph.subgraph(name=role_name, node_attr={}) as role_subgraph:\n current_counter = role_number + nb_pre_tasks\n role_id = \"role_\" + str(uuid.uuid4())\n edge_id = \"edge_\" + str(uuid.uuid4())\n\n role_subgraph.node(role_name, id=role_id)\n # edge from play to role\n role_subgraph.edge(play_name, role_name, label=str(current_counter), color=color,\n fontcolor=color, id=edge_id)\n\n self.graph_representation.add_link(play_id, edge_id)\n self.graph_representation.add_link(edge_id, role_id)\n\n # loop through the tasks of the roles\n if self.options.include_role_tasks:\n role_tasks_counter = 0\n for block in role.compile(play):\n role_tasks_counter = self._include_tasks_in_blocks(current_play=play,\n graph=role_subgraph,\n parent_node_name=role_name,\n parent_node_id=role_id, block=block,\n color=color, play_vars=play_vars,\n current_counter=role_tasks_counter,\n node_name_prefix=\"[task] \")\n role_tasks_counter += 1\n self.display.v(\"{} roles added to the graph\".format(role_number))\n\n # loop through the tasks\n self.display.v(\"Graphing tasks...\")\n nb_tasks = 0\n for task_block in play.tasks:\n nb_tasks = self._include_tasks_in_blocks(current_play=play, graph=play_subgraph,\n parent_node_name=play_name, parent_node_id=play_id,\n block=task_block, color=color,\n current_counter=role_number + nb_pre_tasks,\n play_vars=play_vars, node_name_prefix=\"[task] \")\n\n # loop through the post_tasks\n self.display.v(\"Graphing post_tasks...\")\n for post_task_block in play.post_tasks:\n self._include_tasks_in_blocks(current_play=play, graph=play_subgraph, parent_node_name=play_name,\n parent_node_id=play_id, block=post_task_block, color=color,\n current_counter=nb_tasks, play_vars=play_vars,\n node_name_prefix=\"[post_task] \")\n\n self.display.banner(\"Done graphing {}\".format(play_name))\n self.display.display(\"\") # just an empty line\n # moving to the next play",
"def init_graph_db(self):\n LOG.info(\"[NEUTRON] Adding Neutron components to the landscape.\")\n now_ts = time.time()\n # Collect Networks\n networks = self.neutron.list_networks()\n for net in networks.get('networks', list()):\n net_id = net.get('id', \"UNDEFINED\")\n net_name = net.get('name', \"UNDEFINED\")\n self._add_network(net_id, net_name, now_ts)\n\n # Collect subnets\n subnets = self.neutron.list_subnets()\n for subnet in subnets.get('subnets', list()):\n subnet_id = subnet.get('id', \"UNDEFINED\")\n cidr = subnet.get('cidr', \"UNDEFINED\")\n network_id = subnet.get('network_id', \"UNDEFINED\")\n self._add_subnet(subnet_id, cidr, network_id, now_ts)\n\n # Collect ports\n ports = self.neutron.list_ports()\n for port in ports.get('ports', list()):\n port_id = port.get(\"id\", \"UNDEFINED\")\n mac, fixed_ip, device_id, net_id = self._get_port_info(port)\n self._add_port(port_id, mac, fixed_ip, device_id, net_id, now_ts)",
"def build_graph(self):\n for each_list in self.lab.look():\n vertice = self._add_vertice(each_list)\n if vertice:\n self.unvisited.add(vertice)\n self.graph.addEdge((self.current, vertice))\n \n self.unvisited -= self.visited\n self._connect_neighbours()"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
Gets the crosslistings of the top edges from a course
|
def getTopEdgesFrom(self, session, courseid):
node = self.getNode(courseid) # get CourseNode
if not node:
return []
edges = node.getEdges() # get its Edge dict
return sorted(edges.keys(), key=lambda k: edges[k], reverse=True)[:5]
|
[
"def assembly_courses(wall):\n courses = []\n vertices = set(wall.nodes())\n base = set(wall.nodes_where({'is_support': True}))\n\n if base:\n courses.append(list(base))\n\n seen = set()\n seen.update(base)\n\n vertices -= base\n\n while vertices:\n nbrs = set(nbr for key in courses[-1] for nbr in wall.neighbors(key))\n course = list(nbrs - seen)\n courses.append(course)\n seen.update(nbrs)\n vertices -= nbrs\n\n return courses",
"def get_crossings(self):\n return self.crossings",
"def compute_cross_edge(graph, partition):\n cross_list = []\n for n in graph.edges:\n if Partition.crosses_parts(partition, n):\n cross_list.append(n)\n return cross_list # cut edges of partition",
"def GetTopConfidenceEdges(g, dia, topn=20):\r\n edgez = {(e[0], e[1]): e[2]['z'] for e in g.edges(data=True)}\r\n edgeconf = {(e[0], e[1]): e[2]['frac_minority'] for e in g.edges(data=True)}\r\n edgenum = {(e[0], e[1]): e[2]['num_patients'] for e in g.edges(data=True)}\r\n edgez_sorted = sorted(edgez.items(), key=operator.itemgetter(1), reverse=True)[:topn]\r\n newedgez_sorted = []\r\n for e in edgez_sorted:\r\n e = list(e)\r\n edge = e[0]\r\n e.append(edgenum[e[0]])\r\n e.append(edgeconf[e[0]])\r\n newedgez_sorted.append(e)\r\n PrintCodeDescr(g, dia, newedgez_sorted, mode=\"edge\")",
"def getTopClusters(cluNet, node, ncutoff=8):\n neigh = cluNet[node]\n #make dict of weights keyed on neighbor IDs.\n edgeWeights = {n:neigh[n][\"weight\"] for n in neigh}\n #Add the node its own self with superhigh value weights\n edgeWeights[node] = 999999\n #Sort edgeWeights.items on the second element of each item, highest values descending to lowest\n sortedEdges = sorted(edgeWeights.items(), key=operator.itemgetter(1), reverse=True)\n #go back throgh and remove clusters keyed on identical proteins.\n resortedEdges = []\n lastCMID = \"\"\n for se in sortedEdges:\n wpID = se[0].split(\"|\")[2]\n if wpID != lastCMID:\n resortedEdges.append\n sortedClusters = tuple([e[0] for e in sortedEdges])[0:ncutoff]\n\n\n return (sortedClusters, sortedEdges)",
"def decreasing_cover_relations(self):\n relations = []\n for i in range(self.size(), 1, -1):\n for j in range(i - 1, 0, -1):\n if self.le(i, j):\n relations.append((i, j))\n break\n return relations",
"def get_lcc(G,S):\n if len(S) == 0:\n return (nx.Graph())\n else:\n g = nx.subgraph(G,S)\n if len(g.nodes()) > 0:\n lcc = max(connected_component_subgraphs(g), key=len)\n return (lcc)\n else:\n return(g)",
"def increasing_cover_relations(self):\n relations = []\n size = self.size()\n for i in range(1, size):\n for j in range(i + 1, size + 1):\n if self.le(i, j):\n relations.append((i, j))\n break\n return relations",
"def get_neighbour_edges(self, cur: Union[str, int]) -> list:\n\t\treturn [edge for edge in self.edges if cur in edge]",
"def edges(self):",
"def top_k_betweenness_centrality(self):\n d={}\n l=[]\n for v in vertices:\n a=self.betweenness_centrality(v)\n d[v]=a\n l.append(a)\n m=max(l)\n l1=[]\n for key in d:\n if d[key]==m:\n l1.append(key)\n\n return l1",
"def convex_hull(self):\r\n\r\n if len(self.nodes) < 2: return []\r\n\r\n # grab the topmost node (the one with the least y)\r\n topmost = sorted(self.nodes, key=lambda node:node.y)[0]\r\n\r\n segments = []\r\n # initially the current line is looking upwards\r\n current_line = Point2(topmost.x, topmost.y) - Point2(topmost.x, topmost.y - 1)\r\n current_node = topmost\r\n smallest = None\r\n\r\n node_list = list(self.nodes)\r\n\r\n while current_node and smallest != topmost:\r\n # calculate angles between current line\r\n angles = [(node, current_line.angle(current_node - Point2(node.x, node.y))) for node in node_list if node != current_node]\r\n\r\n if angles:\r\n smallest = sorted(angles, key = lambda x: x[1])[0][0]\r\n segments.append((current_node, smallest)) # add to the results\r\n\r\n # now we will be looking for next connection\r\n current_line = Point2(current_node.x, current_node.y) - Point2(smallest.x, smallest.y)\r\n current_node = smallest\r\n node_list.remove(smallest) # tiny optimization\r\n else:\r\n current_node = None\r\n\r\n\r\n return segments",
"def testTopEdges(self):\n G = nx.karate_club_graph()\n\n # top nodes for the Karate Club Graph\n top_def_edges = {'edge_betweeness': ((0, 31), (0, 6), (0, 5))}\n top_5_edges = {'edge_betweeness': ((0, 31), (0, 6), (0, 5), (0, 2), (0, 8))}\n \n top_all_edges = {'edge_betweeness': ((0, 31), (0, 6), (0, 5), (0, 2), (0, 8), (2, 32), (13, 33), (19, 33), \n (0, 11), (26, 33), (31, 33), (0, 4), (0, 10), (0, 12), (0, 19), (0, 13), \n (25, 31), (31, 32), (2, 27), (8, 33), (0, 17), (0, 21), (24, 31), (14, 33), \n (15, 33), (18, 33), (20, 33), (22, 33), (23, 33), (1, 30), (2, 9), (27, 33),\n (8, 32), (29, 33), (9, 33), (5, 16), (6, 16), (30, 33), (0, 1), (2, 7), \n (28, 33), (14, 32), (15, 32), (18, 32), (20, 32), (22, 32), (29, 32), (1, 2), \n (0, 7), (2, 28), (2, 3), (23, 32), (0, 3), (23, 25), (1, 17), (1, 21), (24, 27), \n (30, 32), (3, 13), (28, 31), (1, 19), (1, 13), (3, 12), (23, 27), (8, 30), \n (2, 8), (32, 33), (1, 3), (2, 13), (1, 7), (23, 29), (4, 6), (5, 10), \n (26, 29), (24, 25), (3, 7), (5, 6), (4, 10))}\n\n # test top_edges which outputs top 3 by default\n res_dict = top_edges(G)\n self.assertDictEqual(top_def_edges, res_dict)\n \n # test top_edges which outputs top 5 edges\n res_dict = top_edges(G, k=5)\n self.assertDictEqual(top_5_edges, res_dict)\n \n # test top_edges which outputs all edges\n res_dict = top_edges(G, k=-1)\n self.assertDictEqual(top_all_edges, res_dict)",
"def _getCollidingCourses(self):\n for courseID in range(self._courseCount):\n self._collidingCourses.append([])\n [self._collidingCourses[courseID].append(x) for x in range(self._courseCount) if x != courseID and self._haveSameStudents([courseID, x])]",
"def top_sort(self):\n unvisited = set(self.nodes)\n marked = set([])\n ret_val = []\n\n def visit(node):\n \"\"\"Visit a node; return True if all is well, False if a cycle is\n detected\"\"\"\n if node in marked:\n return False\n if node not in unvisited:\n return True\n marked.add(node)\n for nbr in self.edges[node]:\n if not visit(nbr):\n return False\n ret_val.append(node)\n marked.remove(node)\n unvisited.remove(node)\n return True\n\n while len(unvisited) != 0:\n node = unvisited.pop()\n unvisited.add(node)\n if not visit(node):\n return None\n return ret_val[::-1]",
"def ComptonCrossSection(self,E):\n return ScatterIncoherent(E)",
"def generate_top20_candidates(cosine_sim):\n top20_indices = cosine_sim[0].argsort()[:-21:-1]\n top20_cosine = [cosine_sim[0][i] for i in top20_indices]\n return top20_indices, top20_cosine",
"def _getOrderedCourses(self):\n self._orderedCourses = numpy.argsort([len(x) for x in self._availableRooms])",
"def get_vertical_cross_section(self, lon0, lat0, lon1, lat1, depth_bottom=6000.0, depth_top=0.0):\n # automatically adjust number of points\n d = gc_distance(lon0, lat0, lon1, lat1)\n d_cell = np.sqrt(self.cellarea.mean())\n npoints = int(np.ceil(d/d_cell*1000))\n print('Nearest neighbor interpolation to {} points.'.format(npoints))\n loni, lati = gc_interpolate(lon0, lat0, lon1, lat1, npoints)\n # adjust lon in range [0, 360)\n loni = np.where(loni<0.0, loni+360.0, loni)\n # select nearest neighbor\n pts = np.array(list(zip(loni,lati)))\n tree = spatial.KDTree(list(zip(self.lon, self.lat)))\n p = tree.query(pts)\n cidx = p[1]\n zidx0 = np.argmin(np.abs(self.depth-depth_top))\n zidx1 = np.argmin(np.abs(self.depth-depth_bottom))\n data = self.data[cidx,zidx0:zidx1]\n depth = self.depth[zidx0:zidx1]\n bottomdepth = self.bottomdepth[cidx]\n # calculate distance from [lon0, lat0]\n dist = np.zeros(npoints)\n for i in np.arange(npoints-1):\n dist[i+1] = gc_distance(lon0, lat0, loni[i+1], lati[i+1])\n obj = MPASOVertCrossSection(data=data, lon=loni, lat=lati, dist=dist, depth=depth, bottomdepth=bottomdepth, name=self.name, units=self.units)\n return obj"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
Initializes turtle instance for turtle game.
|
def initialize(turtle_shape, bg_color, turtle_color, turtle_speed):
turtle_instance = turtle.Turtle()
turtle_instance.shape(turtle_shape)
turtle.bgcolor(bg_color)
turtle_instance.color(turtle_color)
turtle_instance.speed(turtle_speed)
return turtle_instance
|
[
"def init_turtle():\n turtle.up()\n turtle.home()",
"def __init__(self):\r\n turtle.setup()\r\n turtle.screensize(100000, 100000)\r\n self.__risi_pot = turtle.Turtle()\r\n self.__risi_prijatelje = turtle.Turtle()\r\n self.__risi_pot.color('red')\r\n self.__risi_pot.pensize(2)\r\n self.__risi_pot.speed('fast')\r\n self.__risi_prijatelje.speed('fast')",
"def __init__(self, pen):\r\n self.__pen = turtle.Turtle()#initializing variables\r\n self.__state = 0",
"def __init__(self, commands=[], turtle_name=\"Terry\", speed=6, shape=\"classic\"):\n super().__init__()\n turtle.colormode(255)\n self._name = turtle_name\n super().speed(speed)\n super().shape(shape)\n self.commands = commands\n self._pc = 0\n self._loop_stack = []\n self._variables = {'x':0, 'y':0}",
"def setTurtle(t):\r\n t.pu()\r\n t.goto(initialCoordinates())",
"def start():\n LOGGER.debug('start: Initializing Pyturtle.')\n pygame.init()",
"def _prepare_turtle():\n turtle.setup(width=screen_width)\n turtle.shape(turtle_shape)\n turtle.title(title)",
"def turtle_setup():\n # ___ ___ _ _ ___ _____ __ __ ___ ___ ___ _____ __\n # | \\ / _ \\ | \\| |/ _ \\_ _| | \\/ |/ _ \\| \\_ _| __\\ \\ / /\n # | |) | (_) | | .` | (_) || | | |\\/| | (_) | |) | || _| \\ V /\n # |___/ \\___/ |_|\\_|\\___/ |_| |_| |_|\\___/|___/___|_| |_|\n # _____ _ _ ___ ___ ___ _ _ _ _ ___ _____ ___ ___ _ _\n # |_ _| || |_ _/ __| | __| | | | \\| |/ __|_ _|_ _/ _ \\| \\| |\n # | | | __ || |\\__ \\ | _|| |_| | .` | (__ | | | | (_) | .` |\n # |_| |_||_|___|___/ |_| \\___/|_|\\_|\\___| |_| |___\\___/|_|\\_|\n #\n # Create the turtle graphics screen and set a few basic properties.\n screen = turtle.Screen()\n screen.setup( WIDTH, HEIGHT, MARGIN, MARGIN )\n screen.bgcolor( \"SkyBlue\" )\n\n # Create two turtles, one for drawing and one for writing.\n artist = turtle.Turtle()\n writer = turtle.Turtle()\n\n # Change the artist turtle's shape so the artist and writer are distinguishable.\n artist.shape( \"turtle\" )\n\n # Make the animation as fast as possible and hide the turtles.\n if DRAW_FAST:\n screen.delay( 0 )\n artist.hideturtle()\n artist.speed( \"fastest\" )\n writer.hideturtle()\n writer.speed( \"fastest\" )\n\n # Set a few properties of the writing turtle useful since it will only be writing.\n writer.setheading( 90 ) # Straight up, which makes it look sort of like a cursor.\n writer.penup() # A turtle's pen does not have to be down to write text.\n writer.setposition( 0, HEIGHT // 2 - FONT_SIZE * 2 ) # Centered at top of the screen.\n\n return screen, artist, writer",
"def __init__(self, pos=(0, 0)):\n super().__init__() # Call 'turtle' initiation\n self.penup() # Stop displaying trail\n self.shapesize(stretch_wid=1, stretch_len=1) # Set dimensions of ball object to same height and width\n self.color(\"white\") # Set colour to white\n self.shape(\"circle\") # Set ball shape to round\n self.setpos(pos) # Move ball to desired position on screen\n self.x_dir = 1 # Set ball horizontal movement to right\n self.y_dir = 1 # Set ball vertical movement to up",
"def reset():\n self.turtle = Turtle()\n clear()",
"def create_turtle():\n name = turtle.Turtle()\n name.speed(0)\n name.ht()\n turtle.colormode(255)\n color = (randrange(256), randrange(256), randrange(256))\n name.color(color)\n name.fillcolor(color)\n return name",
"def add(self, turtle):\n\n self.start_turtles.append(turtle.position)\n self.turtles.append(turtle)\n self.items[turtle] = self.canvas.create_polygon(0, 0)\n self.update(turtle)",
"def screen_setup(screen_size):\n window = turtle.Screen()\n window.bgcolor(\"black\")\n window.title(\"Maze Game\")\n window.setup(screen_size, screen_size)",
"def cool_turtle():\n # Make the TurtleWindow.\n window = rg.TurtleWindow()\n\n # Make the SimpleTurtle.\n cool_turtle = rg.SimpleTurtle('turtle')\n cool_turtle.pen = rg.Pen('forest green', 1) # Try thickness 5 too\n cool_turtle.speed = 1 # Slow\n\n # Move the SimpleTurtle to her starting position.\n start_at = rg.Point(100, -50)\n cool_turtle.pen_up()\n cool_turtle.go_to(start_at)\n cool_turtle.pen_down()\n\n # Set up some parameters that control the nature of the shape drawn.\n size = 100 # Try 150 too\n angle = 1 # Try 20 too\n iterations = 360 # Try 90 too\n\n # Store the animation speed (to reset it later).\n tracer_n, tracer_d = window.tracer(), window.delay()\n\n # Make the animation go much faster.\n # First number: bigger means faster.\n # Second number: bigger means slower.\n window.tracer(5, 5)\n\n for _ in range(iterations):\n cool_turtle.right(angle)\n cool_turtle.draw_square(size)\n\n # Reset the animation to its original speed.\n window.tracer(tracer_n, tracer_d)\n\n window.close_on_mouse_click()",
"def initialize(self,tape):\n\t\tpass",
"def __init__(self, corners):\n # shallow copy of corners\n self.corners = corners[:]\n self._turtle = Turtle()\n self._set_perimeter()\n self._set_area()",
"def turtle(self,turtleType):\n if self.turtleType == turtleType:\n return\n if self.turtleType and self.turtleType != PLAYER:\n self.mc.removeEntity(self.turtleId)\n self.turtleType = turtleType\n if turtleType == PLAYER:\n self.turtleId = None\n elif turtleType:\n self.turtleId = self.mc.spawnEntity(turtleType,\n self.position.x,self.position.y,self.position.z,\n \"{NoAI:1}\")\n self.setEntityCommands()\n self.positionOut()\n self.directionOut()",
"def control_turtle():\n # create the turtle and set variables\n turt = create_turtle(0, 0)\n p = turt[0]\n x = turt[1]\n y = turt[2]\n\n # create first square\n getRGB(turt)\n p.begin_fill()\n\n for i in range(3):\n check_direction(turt, 5)\n p.left(90)\n p.end_fill()\n\n # create second square\n getRGB(turt)\n p.begin_fill()\n check_direction(turt, 10)\n p.left(90)\n check_direction(turt, 5)\n p.left(90)\n check_direction(turt, 5)\n p.left(90)\n check_direction(turt, 5)\n p.end_fill()\n\n # begin third square\n check_direction(turt, 5)\n getRGB(turt)\n p.begin_fill()\n\n # default motion begins\n while (-y_image/2 < y < y_image/2):\n square_maker(turt, i)\n\n # make sure turtle coordinates are saved\n x = turt[1]\n y = turt[2]\n print(x)\n print(y)\n\n save_image()",
"def init(self):\n\n pygame.init()\n pygame.joystick.init()\n self.controller = pygame.joystick.Joystick(0)\n self.controller.init()\n self.x=0\n self.y=0"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
Defines the turtle movement for the initialized turtle instance and executes that movement.
|
def turtle_movement(turtle_shape, bg_color, turtle_color, turtle_speed):
turtle_name = initialize(turtle_shape, bg_color,
turtle_color, turtle_speed)
for i in range(36):
for i in range(4):
turtle_name.forward(200)
turtle_name.right(90)
turtle_name.right(10)
|
[
"def setTurtle(t):\r\n t.pu()\r\n t.goto(initialCoordinates())",
"def init_turtle():\n turtle.up()\n turtle.home()",
"def movement(self):",
"def __init__(self, commands=[], turtle_name=\"Terry\", speed=6, shape=\"classic\"):\n super().__init__()\n turtle.colormode(255)\n self._name = turtle_name\n super().speed(speed)\n super().shape(shape)\n self.commands = commands\n self._pc = 0\n self._loop_stack = []\n self._variables = {'x':0, 'y':0}",
"def _move_turtle(self, x, y):\n self.canvas.xcor = x\n self.canvas.ycor = y\n self.canvas.move_turtle()\n if self.interactive_mode:\n self.display_coordinates()\n if self.running_sugar:\n self.selected_turtle.spr.set_layer(TURTLE_LAYER)\n self.lc.update_label_value('xcor',\n self.canvas.xcor / self.coord_scale)\n self.lc.update_label_value('ycor',\n self.canvas.ycor / self.coord_scale)",
"def control_turtle():\n # create the turtle and set variables\n turt = create_turtle(0, 0)\n p = turt[0]\n x = turt[1]\n y = turt[2]\n\n # create first square\n getRGB(turt)\n p.begin_fill()\n\n for i in range(3):\n check_direction(turt, 5)\n p.left(90)\n p.end_fill()\n\n # create second square\n getRGB(turt)\n p.begin_fill()\n check_direction(turt, 10)\n p.left(90)\n check_direction(turt, 5)\n p.left(90)\n check_direction(turt, 5)\n p.left(90)\n check_direction(turt, 5)\n p.end_fill()\n\n # begin third square\n check_direction(turt, 5)\n getRGB(turt)\n p.begin_fill()\n\n # default motion begins\n while (-y_image/2 < y < y_image/2):\n square_maker(turt, i)\n\n # make sure turtle coordinates are saved\n x = turt[1]\n y = turt[2]\n print(x)\n print(y)\n\n save_image()",
"def repositionTurtle(t, x, y):\n t.up()\n t.goto(x, y)\n t.down()",
"def _setup_movement_system(self):\n self.traj_object = UR5ExecTrajectory()",
"def move(self, friction = 0.0):\n try:\n newX = self.xcor() + self.dx\n newY = self.ycor() + self.dy\n self.goto(newX, newY)\n # apply friction\n self.dx = self.dx * (1 - friction)\n self.dy = self.dy * (1 - friction)\n except:\n print(\"Error, probably because dx and dy are not properties of the turtle\")",
"def test_move_step(self):\n t = AioBaseTurtle()\n t._move_step(Vec2D(-100, 0), 20, Vec2D(10,5))\n self.assertAlmostEqual(t._position[0], 100)\n self.assertAlmostEqual(t._position[1], 100)\n t.screen._drawline.assert_called_once_with(\n t.currentLineItem,\n ((-100.0, 0.0), (100.0, 100.0)), # called with mutable _position\n \"black\",\n 1,\n False\n )\n self.mock_update.assert_called_once_with()",
"def move(self, movement):\r\n if self.terrain:\r\n self.terrain.move(movement)",
"def setMovement(self, movement):\n self.ma = movement",
"def change_movement(self, action):\r\n if action == \"diagonal\" and self.movement != \"diagonal\":\r\n self.movement = \"diagonal\"\r\n self.x_speed = 3\r\n self.y_speed = 3\r\n self.canvas.after(50, self.move_diagonal)\r\n elif action == \"horizontal\" and self.movement != \"horizontal\":\r\n self.movement = \"horizontal\"\r\n self.x_speed = 3\r\n self.y_speed = 0\r\n self.canvas.after(50, self.move_horizontal)\r\n elif action == \"vertical\" and self.movement != \"vertical\":\r\n self.movement = \"vertical\"\r\n self.x_speed = 0\r\n self.y_speed = 3\r\n self.canvas.after(50, self.move_vertical)\r\n elif action == \"inward_outward\":\r\n self.movement = \"inward_outward\"\r\n self.canvas.after(50, self.move_inward_outward)",
"def step(self):\n \n self.steer()\n self.wobble()\n self.move()",
"def initialize(turtle_shape, bg_color, turtle_color, turtle_speed):\n turtle_instance = turtle.Turtle()\n turtle_instance.shape(turtle_shape)\n turtle.bgcolor(bg_color)\n turtle_instance.color(turtle_color)\n turtle_instance.speed(turtle_speed)\n return turtle_instance",
"def up():\n turtle.up()",
"def _move_straight(self, param_dis, param_speed, param_dir):\n obj_velocity_msg = Twist()\n\n # Store the start position of the turtle\n start_x = self._curr_x\n start_y = self._curr_y\n\n # Move the turtle till it reaches the desired\n # position by publishing to Velocity topic\n handle_pub_vel = rospy.Publisher(self._config_ros_pub_topic,\n Twist, queue_size=200)\n\n var_loop_rate = rospy.Rate(100)\n\n # Set the Speed of the Turtle according to the direction\n if param_dir == 'b':\n obj_velocity_msg.linear.x = (-1) * abs(int(param_speed))\n else:\n obj_velocity_msg.linear.x = abs(int(param_speed))\n\n # Move till desired distance is covered\n dis_moved = 0.0\n\n while not rospy.is_shutdown():\n\n # Send feedback to the client\n obj_msg_feedback = msgTurtleFeedback()\n\n obj_msg_feedback.cur_x = self._curr_x\n obj_msg_feedback.cur_y = self._curr_y\n obj_msg_feedback.cur_theta = self._curr_theta\n\n self._sas.publish_feedback(obj_msg_feedback)\n\n if dis_moved < param_dis:\n handle_pub_vel.publish(obj_velocity_msg)\n\n var_loop_rate.sleep()\n\n d_x = self._curr_x - start_x\n d_y = self._curr_y - start_y\n dis_moved = abs(math.sqrt((d_x ** 2) + (d_y ** 2)))\n rospy.logdebug('Distance Moved: {}'.format(dis_moved))\n else:\n break\n\n # Stop the Turtle after desired distance is covered\n obj_velocity_msg.linear.x = 0\n handle_pub_vel.publish(obj_velocity_msg)\n rospy.logdebug('Destination Reached')",
"def _move(self, steps):\n if self.direction == 0:\n self.y += steps\n elif self.direction == 1:\n self.x += steps\n elif self.direction == 2:\n self.y -= steps\n elif self.direction == 3:\n self.x -= steps",
"def pick_up_and_move_it(robot):\n robot.sound_system.speech_maker.speak(\"Moving an object\")\n robot.drive_system.spin_clockwise_until_sees_object(25,1400)\n robot.drive_system.go_forward_until_distance_is_less_than(3,25)\n robot.arm_and_claw.raise_arm()\n robot.drive_system.go(25,-25)\n time.sleep(2)\n robot.drive_system.stop()\n robot.drive_system.go_straight_for_inches_using_time(10,50)"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
Saves summary statistics as a csv file in the current directory and returns the output filename.
|
def save_summary_statistics_csv(
experiment_name, roi_summary_data, save_directory_path: str = ""
):
# Create directories on the path if they don't already exist
Path(save_directory_path).mkdir(parents=True, exist_ok=True)
csv_filename = f"{experiment_name} - summary statistics (generated {iso_datetime_for_filename(datetime.now())}).csv"
csv_filepath = Path(save_directory_path) / csv_filename
roi_summary_data.to_csv(csv_filepath, index=False)
print(f"Summary statistics saved to: {csv_filepath}\n")
return csv_filepath
|
[
"def save_csv(dir_out, no_of_files, result):\n try:\n np.savetxt(f\"{dir_out}_results_from_{no_of_files}-files.csv\",\n result.T, delimiter=\",\", header='Time(h), Avrg_int, SD, SE, Sum_int, Max_int')\n except:\n print(\"Existing csv file is not accessible!\")\n exit()",
"def save_csv(dir_out, list_of_files, result):\n try:\n np.savetxt(f\"{dir_out}_results_from_{len(list_of_files)}-files.csv\",\n result.T, delimiter=\",\", header='Time, Avrg_int, SD, SE, Sum_int, Max_int')\n except:\n print(\"Existing csv file is not accessible!\")\n exit()",
"def checkpoint_stats(self, stats):\n stats.to_csv(\n self.params.stat.dir + self.params.model.name + \"_\" + self.params.data.name + \".stat\",\n sep='\\t',index=False,header=True)",
"def save_stats(path, stats_df):\n utils.ensure_path_exist(path)\n stats_df.to_csv(path, index=False)",
"def save_csv_file():\n global output_on_display, import_lst, column_names, data\n if data_base == '':\n mistake_load_table()\n else:\n column_names = data[0]\n save_name = asksaveasfilename(title=\"Select file\", filetypes=((\"CSV\", \"*.csv\"), (\"all files\", \"*.*\")),\n confirmoverwrite=True, defaultextension='.csv')\n step = len(column_names)\n data_csv = import_lst\n if len(data_csv[0]) == step:\n pass\n else:\n data_csv = import_lst[step::]\n\n with open(save_name, 'w+') as csv_file:\n csv_writer = csv.writer(csv_file)\n csv_writer.writerow(column_names)\n csv_writer.writerows(data_csv)",
"def save_to_csv(self):\n path = partial(os.path.join, 'datasets')\n save_name = self.name.lower().replace(' ', '_')\n self.df['values'].sum(axis=1).to_csv(path('{0}_values.csv'.format(save_name)))\n self.df['allocations'].to_csv(path('{0}_allocations.csv'.format(save_name)))\n self.df['returns'].to_csv(path('{0}_returns.csv'.format(save_name)))\n self.trades.to_csv(path('{0}_trades.csv'.format(save_name)))",
"def saveStatsFile(self):\n if not os.path.exists(\"stats\"):\n os.mkdir(\"stats\")\n now = datetime.datetime.now()\n parts = [now.year, now.month, now.day]\n parts = [\"%02d\"%x for x in parts]\n todaysFileName = \"-\".join(parts)+\".txt\" \n timeStamp = time.strftime(\"%y%m%d%H%M\", time.localtime())\n log = \",\".join(self.logLinesStats)\n fname = \"stats/\"+todaysFileName\n with open(fname, 'a') as f:\n f.write(timeStamp+\",\"+log+\"\\n\")\n self.log(\"wrote \"+fname)",
"def save_to_file(self):\n file_name = 'data/out/' + self.type + '.csv'\n logger.info('Saving pre-processed data to ' + file_name)\n\n self.data.to_csv(file_name, index=False)",
"def save_submission(results, file_name='submission.csv'):\n submission_path = path.join('..', 'output', file_name)\n results.to_csv(submission_path)",
"def save_stats(self):\n with open(self.stats_file, 'w', newline='', encoding='utf-8') as fp:\n writer = csv.DictWriter(fp, fieldnames=self.field_names)\n writer.writeheader()\n for row in self.stats.values():\n writer.writerow(row)",
"def save_to_csv(dataset):\n\n today_date = date.today().strftime(\"%d_%m_%Y\")\n file_name = \"Sports_News_\" + today_date + \".csv\"\n # dataset.to_csv(os.path.join(save_input, file_name), index=False)\n dataset.to_csv(save_input / file_name, index=False)",
"def save_csv(self, filename): # DONE\n self.data.to_csv(filename)",
"def model_summary_to_file(model, save_path):\n with open(save_path, 'w') as fh:\n model.summary(print_fn=lambda x: fh.write(x + \"\\n\"))",
"def generate_csv(summaries, filename):\n with open(filename, 'wb') as f:\n header = ','.join(['ACTIVATION', 'HIDDEN SIZE', 'TRAIN LOSS', 'VAL LOSS', 'TRAIN PPX', 'VAL PPX']) + '\\n'\n f.write(header)\n\n def extract_best(summary, metric):\n return min([h.metrics[metric] for h in summary['history']])\n for summary in summaries:\n activation = summary['meta']['ACTIVATION']\n h_size = summary['meta']['NUM_HIDDEN']\n train_loss, val_loss, train_ppx, val_ppx = extract_best(summary, 'train_loss'), extract_best(summary, 'val_loss'), extract_best(summary, 'train_ppx'), extract_best(summary, 'val_ppx')\n line = \",\".join([activation] + map(lambda x: \"%.2f\" % (x), [h_size, train_loss, val_loss, train_ppx, val_ppx])) + '\\n'\n f.write(line)",
"def write_summary(lm, filename):\n predictors = lm.model.exog_names\n tvals = lm.tvalues\n pvals = lm.pvalues\n betas = lm.params\n se = lm.bse\n ci = lm.conf_int()\n r2 = lm.rsquared\n r2a = lm.rsquared_adj\n aic = lm.aic\n bic = lm.bic\n ll = lm.llf\n F = lm.fvalue\n df = lm.df_resid\n n = lm.nobs\n \n table = pd.DataFrame(dict(predictors=predictors,\n tvals=tvals,\n pvals=pvals,\n betas=betas,\n se=se,\n ci0=ci[0],\n ci1=ci[1],\n df=df,\n n=n,\n r2=r2))\n table.to_csv(filename)",
"def save_csv(csv_fn, output_dir, df_to_save):\n\n # import packages\n import os\n import pandas as pd\n\n\n if os.path.isfile(output_dir + '/' + csv_fn):\n print('Data already saved and will not be saved again')\n else:\n df_to_save.to_csv(output_dir + '/' + csv_fn, index = False)\n\n return None",
"def save_results_to_csv(save_file_path, append=True, tmp_file_path=tmp_file_path, datefmt='%d/%m/%Y %H:%M:%S'):\n # load tmp results\n res_summary = open_json(tmp_file_path, data_format=pd.DataFrame)\n\n # calculate average scores\n combis = list(product(\n ['CV', 'Val'], \n ['precision', 'recall', 'f1', 'exact match', 'loss', \n 'precision_CE', 'recall_CE', 'f1_CE', 'exact match_CE']\n ))\n for combi in combis:\n get_average(res_summary, combi)\n\n # calculate end time\n end = datetime.now()\n res_summary['endtime'] = end.strftime(datefmt)\n res_summary['timetaken'] = end - \\\n datetime.strptime(res_summary['starttime'][0], datefmt)\n\n if append and os.path.isfile(save_file_path):\n # load old file\n old_summary = pd.read_csv(save_file_path)\n # append below\n res_summary = pd.concat([old_summary, res_summary], axis=0)\n\n # save final and delete tmp file\n res_summary.to_csv(save_file_path, index=False)\n os.remove(tmp_file_path)",
"def store_performance(results, out_dir='', name='results_summary'):\n\n results_file = os.path.join(out_dir, name + '.csv')\n\n results_summary = {\n 'pop_mean_accuracies': ['%.2f' % (100 * np.mean(results[:, 1]))],\n 'pop_max_accuracies': ['%.2f' % (100 * np.max(results[:, 1]))],\n 'pop_mean_rewards': [np.mean(results[:, 0])],\n 'pop_max_rewards': [np.max(results[:, 0])],\n }\n\n df = pd.DataFrame.from_dict(results_summary)\n\n if os.path.isfile(results_file):\n old_df = pd.read_csv(results_file, sep=';')\n df = pd.concat([old_df, df], sort=True)\n df.to_csv(results_file, sep=';', index=False)",
"def save_analysis(analysisresults, analysisfile):\n with open(analysisfile, \"w\", encoding=\"utf8\") as outfile:\n analysisresults.to_csv(outfile, sep=\"\\t\", index=False)\n print(\"The analysis results have been saved to \"\n + basename(analysisfile) + \".\")"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
stack pandas DataFrames logically into a bigger DataFrame, resets the index of the resulting DataFrame to avoid duplicates in the index
|
def _stack_dataframes(dataframes: List[pd.DataFrame]) -> pd.DataFrame:
return pd.concat(dataframes).reset_index(drop=True)
|
[
"def split_and_stack(df,new_names):\n\n half = int(len(df.columns)/2)\n left = df.iloc[:, :half]\n right = df.iloc[:,half:]\n\n return pd.DataFrame(data = np.vstacks([left.values, right.values], columns = new_names))",
"def make_sub_df(src_df, index_col, cols):\n cols.append(index_col)\n return src_df[cols].drop_duplicates(index_col).set_index(index_col)",
"def window_stack(df, width=3):\n n = df.shape[0]\n a = np.hstack(list(df.values[(width-1-i):(n-i)] for i in range(0, width)))\n\n times = [ ('t' if not idx else 't-{:d}'.format(idx)) for idx in range(width) ]\n columns = pd.MultiIndex.from_product((times, df.columns), names=('time', 'location'))\n\n return pd.DataFrame(a, index=df.index[width-1:], columns=columns)",
"def cross(df1, df2, **kwargs):\r\n df1['_tmpkey'] = 1\r\n df2['_tmpkey'] = 1\r\n\r\n res = pd.merge(df1, df2, on='_tmpkey', **kwargs).drop('_tmpkey', axis=1)\r\n res.index = pd.MultiIndex.from_product((df1.index, df2.index))\r\n\r\n df1.drop('_tmpkey', axis=1, inplace=True)\r\n df2.drop('_tmpkey', axis=1, inplace=True)\r\n\r\n return res",
"def fill_in_df(test, results):\n a = results.row_id.ravel()\n b = test.row_id.ravel()\n print(len(a))\n print(len(b))\n diff = set(b) - set(a)\n alist = list(diff)\n for i in range(0, len(alist)):\n f = alist[i]\n a = pd.DataFrame([[f, '8129178888 8129178888 8129178888']])\n a.columns = ['row_id', 'place_id']\n results = results.append(a)\n return results",
"def merge (*a_data) :\n i = 0\n for loc_data in a_data :\n i += 1\n if i == 1 :\n loc_new_df = loc_data\n else :\n loc_new_df = __pd.merge(loc_new_df,loc_data,left_index=True,right_index=True)\n return loc_new_df",
"def _reindex_helper(old_index, new_index, axis, npartitions, *df):\n df = pandas.concat(df, axis=axis ^ 1)\n if axis == 1:\n df.index = old_index\n elif axis == 0:\n df.columns = old_index\n\n df = df.reindex(new_index, copy=False, axis=axis ^ 1)\n return _create_blocks_helper(df, npartitions, axis)",
"def dataframe_crossjoin(df1, df2, **kwargs):\n df1['_tmpkey'] = 1\n df2['_tmpkey'] = 1\n\n res = pd.merge(df1, df2, on='_tmpkey', **kwargs).drop('_tmpkey', axis=1)\n res.index = pd.MultiIndex.from_product((df1.index, df2.index))\n\n df1.drop('_tmpkey', axis=1, inplace=True)\n df2.drop('_tmpkey', axis=1, inplace=True)\n\n return res",
"def drop_multindex(df):\n\n if isinstance(df.index, pd.MultiIndex):\n df_flat = df.reset_index()\n # keep index if False\n else:\n df_flat = df.copy()\n return df_flat",
"def create_shifted_df(df: pd.DataFrame, periods: int = 1) -> pd.DataFrame:\n data_df_shifted = df.shift(periods=periods)\n data_df_shifted = data_df_shifted.combine_first(df).add_suffix(\"_shifted\")\n return pd.concat([df, data_df_shifted], axis=1, join=\"inner\").reset_index(\n drop=True\n )",
"def add_augmentations(df: pd.DataFrame) -> pd.DataFrame:\n df_flip_y = df.assign(flip_y=1)\n df_flip_x = df.assign(flip_x=1)\n df_both = df.assign(flip_y=1, flip_x=1)\n name_index = df.index.name\n df_aug = (\n pd.concat(\n [df, df_flip_y, df_flip_x, df_both], ignore_index=True, sort=False\n )\n .rename_axis(name_index)\n )\n return df_aug",
"def reset_column_index(df: DataFrame, level: List[Any], drop: bool=True, inplace: bool=False):\n \n if inplace:\n if drop:\n df.columns = df.columns.droplevel(level)\n else:\n raise NotImplementedError\n return df\n else:\n if drop:\n result = df.copy()\n result.columns = df.columns.droplevel(level)\n else:\n result = df.stack(level)\n return result",
"def combined_df(self) -> pd.DataFrame:\n return pd.concat([self.data, self.latest_data.reset_index()], ignore_index=True)",
"def stack_index(self, index, on_top=True, axis=1, inplace=False):\n\n def apply_func(obj_index):\n if on_top:\n return index_fns.stack_indexes(index, obj_index)\n return index_fns.stack_indexes(obj_index, index)\n\n return self.apply_on_index(apply_func, axis=axis, inplace=inplace)",
"def stack(self, level, dropna):\n return DataFrameDefault.register(pandas.DataFrame.stack)(\n self, level=level, dropna=dropna\n )",
"def merge_zach_dataframes(items_df, stores_df, sales_df):\n merged_df = pd.DataFrame()\n sales_df.rename(columns={'item' : 'item_id'}, inplace=True)\n merged_df = pd.merge(items_df, sales_df, how=\"left\", on=\"item_id\")\n merged_df.rename(columns={'store' : 'store_id'}, inplace=True)\n merged_df = pd.merge(merged_df, stores_df, how=\"left\", on=\"store_id\")\n return merged_df",
"def _stack_serieses(serieses: List[pd.Series]) -> pd.DataFrame:\n return pd.concat(serieses, axis=\"columns\").T",
"def _create_stacks(self, df: pd.DataFrame):\n\n assert len(self._stacks) is 0, 'Stacks have already been created.'\n\n for value in df[self.spread_by()].unique():\n items = df.loc[df[self.spread_by()] == value]\n self._stacks.append([value, items])",
"def _fill_missing_indexes(results):\n all_metric_indices_over_columns = all_metric_indices + all_over_columns\n all_idx = split_vars + [var_name] if melted else split_vars[:]\n all_idx += all_metric_indices\n all_idx += [comparison.condition_column] if comparison else []\n all_idx += all_over_columns\n # Metrics might have different MetricIndex and Over columns. To concat\n # metrics later, we need all metrics to have same indexes.\n for i, output in enumerate(results):\n for col in all_metric_indices_over_columns:\n if col not in output.index.names:\n output[col] = \"\"\n output.set_index(col, append=True, inplace=True)\n # To make the indexes of all metrics be in the same order.\n output = output.reset_index(all_idx).set_index(all_idx)\n\n if not melted:\n # df.unstack(df.index) is a pd.Series, not a pd.DataFrame anymore.\n # This will introduce trouble when operating with other df. So add a\n # placeholder index column in such case.\n if len(output.index.names) == len(all_metric_indices):\n output[\"\"] = 0\n output.set_index(\"\", append=True, inplace=True)\n output = output.unstack(all_metric_indices)\n\n results[i] = output\n return results"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
stack pandas Series logically into a DataFrame
|
def _stack_serieses(serieses: List[pd.Series]) -> pd.DataFrame:
return pd.concat(serieses, axis="columns").T
|
[
"def stack(*series):\n _timeseriescompat_multiple(*series)\n return time_series(MA.column_stack(series), series[0]._dates,\n **_attrib_dict(series[0]))",
"def series_to_frame(series: pd.Series, new_col_names: Dict[Any, Any]) -> pd.DataFrame:\n return series.to_frame().reset_index().rename(columns=new_col_names)",
"def expand_series(ser, columns):\n return ser.to_frame(columns[0]).reindex(columns=columns).ffill(axis=1)",
"def binarize(series):\n name = series.name\n df = pd.DataFrame()\n for category in series.value_counts().index:\n df[category] = (series == category)\n return df",
"def make_dummies(series, astype=float):\r\n import pandas as pd\r\n \r\n unique_elements = []\r\n series = series.fillna(\"\")\r\n clean_str = lambda x: x.strip().lower().replace(' ', '_')\r\n \r\n # Create a list of unique values throughout all cells in column\r\n for row in series:\r\n unique_elements.extend([clean_str(element) for element in row if clean_str(element) not in unique_elements])\r\n \r\n \r\n def create_bool_series(cell, unique):\r\n for element in cell:\r\n if unique == clean_str(element):\r\n return True\r\n else:\r\n pass\r\n return False\r\n \r\n dataframe = pd.DataFrame()\r\n for u in unique_elements:\r\n dataframe.insert(0, u, series.apply(lambda x: create_bool_series(x, unique=u)), allow_duplicates=True) # figure out what exactly allow_dups does\r\n \r\n # reverse order of columns\r\n # Later add sortby parameter (alphabetically, etc...)\r\n return dataframe[dataframe.columns.tolist()[::-1]].astype(astype)",
"def stack(self, level, dropna):\n return DataFrameDefault.register(pandas.DataFrame.stack)(\n self, level=level, dropna=dropna\n )",
"def appstart_series(df: pd.DataFrame, series: Union[pd.Series, List[pd.Series]],\n index: Optional[Union[int, str, List[Union[int, str]]]] = None) -> pd.DataFrame:\n\n # TODO does not optimized function!\n\n if not isinstance(series, list):\n series = [series]\n\n if index is not None:\n\n if not isinstance(index, list):\n index = [str(index)]\n\n else:\n\n if len(index) > len(series):\n index = index[:len(series)]\n\n elif len(index) < len(series):\n\n for i in range(len(series) - len(index)):\n index.append('Unnamed {}'.format(i))\n\n for i in index:\n if not isinstance(i, str):\n str(i)\n\n index = np.array(index + df.index.tolist())\n\n df_series = [df.iloc[i] for i in range(df.shape[0])]\n\n for s in df_series:\n series.append(s)\n\n return pd.DataFrame(series, index=index)",
"def split_and_stack(df,new_names):\n\n half = int(len(df.columns)/2)\n left = df.iloc[:, :half]\n right = df.iloc[:,half:]\n\n return pd.DataFrame(data = np.vstacks([left.values, right.values], columns = new_names))",
"def sample_series(self, series, append_frame=None):\n\n columns, values = self.get_readings(series)\n\n dataframe = DataFrame(values, columns=columns)\n dataframe = self.format_index(dataframe, self.ENERGY_DB_INDEX)\n\n # https://pandas.pydata.org/pandas-docs/stable/merging.html\n if append_frame is not None:\n # dataframe = pandas.concat([dataframe, input_frame], axis=1, join='inner', join_axes=[input_frame.index])\n dataframe = pandas.merge(append_frame, dataframe, on=['time', 'time'])\n # print(dataframe)\n\n return dataframe",
"def pandas_series(arr, nan_to_null=False):\n import pandas as pd\n return pd.Series(arr, copy=False)",
"def _make_series(\n n_timepoints=50,\n n_columns=1,\n all_positive=True,\n index_type=None,\n return_numpy=False,\n random_state=None,\n add_nan=False,\n return_mtype=None,\n):\n rng = check_random_state(random_state)\n data = rng.normal(size=(n_timepoints, n_columns))\n if add_nan:\n # add some nan values\n data[int(len(data) / 2)] = np.nan\n data[0] = np.nan\n data[-1] = np.nan\n if all_positive:\n data -= np.min(data, axis=0) - 1\n\n # np.ndarray case\n if return_numpy or return_mtype == \"np.ndarray\":\n if n_columns == 1:\n data = data.ravel()\n return data\n\n # pd.Series, pd.DataFrame case\n index = _make_index(n_timepoints, index_type)\n if n_columns == 1 and return_mtype is None or return_mtype == \"pd.Series\":\n return pd.Series(data.ravel(), index)\n elif return_mtype is None or return_mtype == \"pd.DataFrane\":\n return pd.DataFrame(data, index)\n\n # all other mtypes, convert from pd.DataFrame\n from sktime.datatypes import convert\n\n res = pd.DataFrame(data, index)\n res_conv = convert(res, \"pd.DataFrame\", return_mtype, \"Series\")\n return res_conv",
"def CombineSeries(*args):\r\n df = pd.concat([*args], axis=1)\r\n\r\n return df",
"def as_series(x):\n x = _check_is_1d_frame(x)\n x = x.as_data_frame(use_pandas=True)[x.columns[0]]\n return x",
"def window_stack(df, width=3):\n n = df.shape[0]\n a = np.hstack(list(df.values[(width-1-i):(n-i)] for i in range(0, width)))\n\n times = [ ('t' if not idx else 't-{:d}'.format(idx)) for idx in range(width) ]\n columns = pd.MultiIndex.from_product((times, df.columns), names=('time', 'location'))\n\n return pd.DataFrame(a, index=df.index[width-1:], columns=columns)",
"def series_from_dataframe(df, index_column: str, value_column: str=None):\n\n if len(df.columns) > 2:\n df = df[[index_column, value_column]].copy()\n else:\n df = df.copy()\n df.set_index(index_column, inplace=True)\n sr = df.squeeze()\n sr.name = value_column\n return sr",
"def append_series(df: pd.DataFrame, series: Union[pd.Series, List[pd.Series]],\n index: Optional[Union[int, str, List[Union[int, str]]]] = None) -> pd.DataFrame:\n\n if not isinstance(series, list):\n if len(df.columns.tolist()) != 0:\n columns = df.columns.tolist()\n else:\n columns = series.index.tolist()\n\n series = [series.to_numpy()]\n else:\n if len(df.columns.tolist()) != 0:\n columns = df.columns.tolist()\n else:\n columns = series[0].index.tolist()\n\n series = [\n s.to_numpy()\n for s in series\n ]\n\n if index is not None:\n\n if not isinstance(index, list):\n index = [str(index)]\n\n else:\n\n if len(index) > len(series):\n index = index[:len(series)]\n\n elif len(index) < len(series):\n\n for i in range(len(series) - len(index)):\n index.append('Unnamed {}'.format(i))\n\n for i in index:\n if not isinstance(i, str):\n str(i)\n\n index = np.array(index)\n\n other = pd.DataFrame(series, index=index, columns=columns)\n\n return df.append(other)",
"def series(self):\n import pandas as pd\n\n return pd.Series(index=self.key_list, data=self.value_list)",
"def as_series(self, arraylike: Iterable) -> pd.Series:\n return pd.Series(arraylike, index=self.data.index)",
"def as_series(self) -> \"pd.Series\":\n import pandas as pd\n\n data = {\"_row_id\": self.id, \"_row_num\": self.num, **self.as_dict()}\n series = pd.Series(data)\n return series"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
Load instruments from configpath
|
def _load(self) -> list[Instrument]:
logger.info("Loading config...")
self._config = yml.load(self.configpath)
instruments, modespec = self._config["instruments"], self._config["modes"]
logger.success(f"Found {len(instruments)} instruments, {len(modespec)} modes")
|
[
"def test_load_configs_simulation(self):\n global locator, config_paths\n locator.load_config(config_paths[1])\n\n self.assertEqual(locator.config['routines'], ['simulate'])\n self.assertEqual(locator.config['driver'],\n {\n 'type': 'SimDriver',\n 'kwargs': {\n \"arg\": \"val\"\n }\n })",
"def loadConfigs(self):\n self.onLoadConfig(urlopen(self.inipath))",
"def load_config(self, filepath):\r\n \r\n self.chart.load_config(filepath)",
"def load_instr(self, instr):\r\n for module_name, config_regs in instr.items():\r\n module = self.modules[module_name]\r\n module.load_config_regs(config_regs)",
"def test_load_configs_testing(self):\n global locator, config_paths\n locator.load_config(config_paths[0])\n\n self.assertEqual(locator.config['routines'], ['debug'])\n self.assertEqual(locator.config['driver'],\n {\n 'type': 'TestDriver',\n 'kwargs': {\n 'verbose': False\n }\n })",
"def __load_ini_file(file_path):\n config = ConfigParser(interpolation=ExtendedInterpolation())\n config.read(file_path)\n\n for each_section in config.sections():\n for key, value in config.items(each_section):\n ConfigurationsManager().set_object_for_key(value=value, key=key)",
"def __load_instruments(self) -> dict[str, list[Instrument]]:\r\n path = r'H:\\python\\Drums\\sound_files_wav'\r\n instruments = defaultdict(list)\r\n pattern = re.compile(r\".*?\\d-(\\w+)-.*\")\r\n for file in os.listdir(r'H:\\python\\Drums\\sound_files_wav'):\r\n match_obj = re.match(pattern, file)\r\n file_path = os.path.join(path, file)\r\n instrument = Instrument(file_path)\r\n instrument_name = match_obj.group(1)\r\n instruments[instrument_name.lower()].append(instrument)\r\n return instruments",
"def load(self, directory:str, config:dict):\r\n raise NotImplementedError()",
"def load_config(self, path=None):\n if path is not None:\n self.CONFIG_FILE = path\n self.load_main_config()\n self.load_included_files()",
"def load_sample(filename, **ctx):\n filename = os.path.join(SAMPLE_DIR, filename)\n return parse_config(filename, ctx)",
"def load_config(self):\n self.data.read(self.path)",
"def load(config):\n\n if config.sys_path:\n logger.debug(\"Appending %s to sys.path.\", config.sys_path)\n sys.path.append(config.sys_path)\n logger.debug(\"sys.path is now %s\", sys.path)\n if config.lookups:\n for key, handler in config.lookups.items():\n register_lookup_handler(key, handler)\n\n return config",
"def load_experiments(self, path):\n experiments = self.load_configuration(path)\n self.parameter = experiments['parameter']",
"def update_from_path(self, ini_path):\n self.config_parser.read(ini_path)\n self.used_config_paths.append(ini_path)",
"def load_configurations() :\n\n local_path = os.path.dirname(os.path.abspath(__file__))\n print(local_path)\n file_path = local_path + os.sep + 'conf.ini'\n parser = configparser.ConfigParser()\n\n if os.path.exists(file_path) :\n config = parser.read(file_path)\n else :\n parser['PATH'] = {}\n parser['PATH']['PATH_TO_DB'] = os.path.expanduser('~/inlusio_data/InlusioDB_Juni_2015.sqlite')\n parser['PATH']['PHYSIO_PATH'] = os.path.expanduser('~/inlusio_data')\n print('Creating new configuration file!!!')\n print('Please fit conf.ini to your local data path!')\n with open(file_path, 'w') as configfile:\n parser.write(configfile)\n\n return parser",
"def load_analysis_path():\n import json\n import os\n with open(os.path.join(os.path.dirname(__file__), \"analysis_config.json\")) as my_file:\n analysis_paths = json.load(my_file)\n return analysis_paths",
"def load_config(self) -> str:",
"def load ( self ):\n files = config.get_or_fail ( 'REPO.config_files' )\n for f in files:\n self.load_file ( f )",
"def load_config(self, path=\"\"):\n if not path:\n if not os.path.isdir(CONFIG_DIR):\n os.makedirs(CONFIG_DIR)\n file_path = QtGui.QFileDialog.getOpenFileName(self,\n \"Open Config\",\n CONFIG_DIR,\n \"Config Files (*.cfg)\")\n else:\n file_path = path\n self._load_state(file_path)\n #self.write_text(\"Loaded config @ {}\".format(file_path))"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
Expose unique instrument classes found in config
|
def _expose(self) -> None:
classes = {instrument.__class__ for instrument in self._config["instruments"]}
for class_ in classes:
pyro.expose(class_)
logger.success(f"Exposed {len(classes)} instrument class(es): {classes}")
|
[
"def config(self) -> InstrumentConfig:\n ...",
"def configure_instrumented_models(self):\n # Expose Pyramid configuration to classes\n from websauna.system.model.meta import Base\n Base.metadata.pyramid_config = self.config",
"def instrument_configs(self) -> list:\n from .rss import RSS\n from .salticam import Salticam\n\n # get configs\n rss = self.get_objects(Observation.RSS, RSS)\n salticam = self.get_objects(Observation.SALTICAM, Salticam)\n return rss + salticam",
"def _instrument_class(self, cls):\n newcls = type('InstrumentedClass', (cls, MapRedBase), {})\n return newcls",
"def instrument_type(self):\n \n raise NotImplementedError()",
"def register_config(cls):\n _configs[cls.__name__] = cls",
"def instruments(self) -> List[str]:\n if not self._instruments:\n nameof = itemgetter(\"instrument_name\")\n self._instruments = list(map(nameof, self._fetch(\"instrument\")))\n\n return self._instruments",
"def add_instrument_to_registry(inst_spec):\n if isinstance(inst_spec, dict):\n inst = inst_spec\n elif os.path.exists(inst_spec):\n f = open(inst_spec, \"r\")\n inst = json.load(f)\n f.close()\n name = inst[\"name\"]\n if name in instrument_registry:\n raise KeyError(\"The instrument with name %s is already in the registry! Assign a different name!\" % name)\n # Catch old JSON files with plate scale\n if \"plate_scale\" in inst:\n inst[\"fov\"] = inst[\"num_pixels\"]*inst[\"plate_scale\"]/60.0\n inst.pop(\"plate_scale\")\n instrument_registry[name] = inst\n mylog.info(\"The %s instrument specification has been added to the instrument registry.\" % name)\n return name",
"def add_instrument_to_registry(inst_spec):\n if isinstance(inst_spec, dict):\n inst = inst_spec\n elif os.path.exists(inst_spec):\n f = open(inst_spec, \"r\")\n inst = json.load(f)\n f.close()\n name = inst[\"name\"]\n if name in instrument_registry:\n raise KeyError(\"The instrument with name %s is already in the registry! Assign a different name!\" % name)\n # Catch older JSON files which don't distinguish between imagings and non-imagings\n if \"imaging\" not in inst:\n mylog.warning(\"Instrument specifications must now include an 'imaging' item, which \"\n \"determines whether or not this instrument specification supports \"\n \"imaging. Default is True.\")\n inst[\"imaging\"] = True\n if \"grating\" not in inst:\n mylog.warning(\"Instrument specifications must now include an 'grating' item, which \"\n \"determines whether or not this instrument specification corresponds \"\n \"to a gratings instrument. Default is False.\")\n inst[\"grating\"] = False\n if inst[\"grating\"] and inst[\"imaging\"]:\n raise RuntimeError(\"Currently, gratings instrument specifications cannot have \"\n \"'imaging' == True!\")\n if inst['imaging']:\n # Catch older JSON files without chip definitions\n if \"chips\" not in inst:\n mylog.warning(\"Instrument specifications must now include a 'chips' item, which details \"\n \"the layout of the chips if there are more that one. Assuming None for \"\n \"one chip that covers the entire field of view.\")\n inst[\"chips\"] = None\n # Catch older JSON files without aimpoint coordinates\n if \"aimpt_coords\" not in inst:\n mylog.warning(\"Instrument specifications must now include a 'aimpt_coords' item, which \"\n \"details the position in detector coordinates of the nominal aimpoint. \"\n \"Assuming [0.0, 0.0].\")\n inst[\"aimpt_coords\"] = [0.0, 0.0]\n default_set = {\"name\", \"arf\", \"rmf\", \"bkgnd\", \"fov\", \"chips\",\n \"aimpt_coords\", \"focal_length\", \"num_pixels\",\n \"dither\", \"psf\", \"imaging\", \"grating\"}\n else:\n default_set = {\"name\", \"arf\", \"rmf\", \"bkgnd\", \"focal_length\", \"imaging\", \"grating\"}\n my_keys = set(inst.keys())\n # Don't check things we don't need\n if \"dep_name\" in my_keys:\n my_keys.remove(\"dep_name\")\n if my_keys != default_set:\n missing = default_set.difference(my_keys)\n raise RuntimeError(\"One or more items is missing from the instrument specification!\\n\"\n \"Items needed: %s\" % missing)\n instrument_registry[name] = inst\n mylog.debug(\"The %s instrument specification has been added to the instrument registry.\" % name)\n return name",
"def instrumentClass(type):\n if type == NO_INSTRUMENT_TYPE: # 0\n return None\n elif type < RANGE_INSTRUMENT_TYPE: # < 16\n return SingleNoteInstrument\n elif type == RANGE_INSTRUMENT_TYPE: # 16\n return RangeInstrument\n elif type == REGIONAL_INSTRUMENT_TYPE: # 17\n return RegionalInstrument\n else:\n raise ValueError(f'Instrument type {type} is invalid.')",
"def test_register_config_decl_duplicate1(collector, config_decl):\n collector.contributions['BaseTask'] = None\n tb = {}\n config_decl.register(collector, tb)\n assert 'PyTaskConfig' in tb",
"def _serve(self) -> None:\n for instrument in self._config[\"instruments\"]:\n uri = self._daemon.register(instrument, objectId=str(instrument))\n self._services[instrument.id] = str(uri)\n logger.success(f\"Registered {instrument} at {uri}\")\n self.uri = self._daemon.register(self, objectId=self.servername)\n logger.success(f\"Registered self at {self.uri}\")",
"def test_instrument(self, nexus_base):\n assert isinstance(nexus_base.instrument, nx.NXinstrument)",
"def sensor_classes(self):\n raise NotImplementedError",
"def add_instrument(self, mount, instrument):\n pass",
"def sampled_class(self):\n raise NotImplementedError",
"def type_name(self):\n return self.config_class.__name__.replace(\"Config\", \"\")",
"def show_instrument_registry():\n for name, spec in instrument_registry.items():\n print(\"Instrument: %s\" % name)\n for k, v in spec.items():\n print(\" %s: %s\" % (k, v))",
"def group_by_instrument(self, instruments: dict, dirpath: str) -> dict:\n\n reflectances = self.get_txt_files(dirpath)\n instruments_names = instruments.keys()\n\n for reflectance in reflectances:\n for instrument in instruments_names:\n if instrument in reflectance:\n instruments[instrument].update_reflectance(reflectance)\n return instruments"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
Register instrument instances and self with daemon and storing uris
|
def _serve(self) -> None:
for instrument in self._config["instruments"]:
uri = self._daemon.register(instrument, objectId=str(instrument))
self._services[instrument.id] = str(uri)
logger.success(f"Registered {instrument} at {uri}")
self.uri = self._daemon.register(self, objectId=self.servername)
logger.success(f"Registered self at {self.uri}")
|
[
"def register():\n signals.initialized.connect(initialize)\n signals.article_generator_context.connect(add_libravatar)",
"def _Register(self):\r\n self._persistor.AddHandler(self)",
"def __init__(self, instrument):\n endpoint = self.ENDPOINT.format(instrument=instrument)\n super(Instruments, self).__init__(endpoint, method=self.METHOD)",
"def register(self):\n pass",
"def register(self):\n self._register_dockyard()\n self._register_docker()",
"def __init__(self):\n self.proxys = {}\n self.observers = {}\n self.uid_gen = generator.uid_generator()",
"def add_instrument(self, mount, instrument):\n pass",
"def _register_docker(self) -> None:\n api_docker = APIDocker()\n api_docker.coresys = self.coresys\n\n self.webapp.add_routes(\n [\n web.get(\"/docker/info\", api_docker.info),\n web.get(\"/docker/registries\", api_docker.registries),\n web.post(\"/docker/registries\", api_docker.create_registry),\n web.delete(\"/docker/registries/{hostname}\", api_docker.remove_registry),\n ]\n )",
"def setup_method(self):\n self.hmc, self.hmc_resources = standard_test_hmc()\n self.uris = (\n (r'/api/adapters/([^/]+)/storage-ports/([^/]+)',\n StoragePortHandler),\n )\n self.urihandler = UriHandler(self.uris)",
"def register(self):\n self.logger.info(\"Registering agent %s\", \"/registry/\" + self._configuration[\"identification\"][\"uuid\"])\n self._coordination.update(\"/registry/\" + self._configuration[\"identification\"][\"uuid\"], self._configuration[\"identification\"])",
"def register_publisher(self, hostname, expire=-1):",
"def register(self, target, hostname, listener_type, expire=-1):",
"def setInstrument(self,instrument):\n self.instrument = instrument\n self.instrument.attach(self)",
"def register(self, name):\n self.registered[name] = self.api(name=name, **self._all_api_kwargs)\n self.registered[name].create()\n self.logger.debug('setup {name} in storage {self!r}'.format(name=name, self=self))",
"def _register_services(self):\n base_url = self.bleemeo_base_url\n registration_url = urllib_parse.urljoin(base_url, '/v1/service/')\n\n for key, service_info in self.core.services.items():\n (service_name, instance) = key\n\n entry = {\n 'listen_addresses':\n get_listen_addresses(service_info),\n 'label': service_name,\n 'exe_path': service_info.get('exe_path', ''),\n 'stack': service_info.get('stack', ''),\n 'active': service_info.get('active', True),\n }\n if instance is not None:\n entry['instance'] = instance\n\n if key in self.services_uuid:\n entry['uuid'] = self.services_uuid[key]['uuid']\n # check for possible update\n if self.services_uuid[key] == entry:\n continue\n method = requests.put\n service_uuid = self.services_uuid[key]['uuid']\n url = registration_url + str(service_uuid) + '/'\n expected_code = 200\n else:\n method = requests.post\n url = registration_url\n expected_code = 201\n\n payload = entry.copy()\n payload.update({\n 'account': self.account_id,\n 'agent': self.agent_uuid,\n })\n\n response = method(\n url,\n data=json.dumps(payload),\n auth=(self.agent_username, self.agent_password),\n headers={\n 'X-Requested-With': 'XMLHttpRequest',\n 'Content-type': 'application/json',\n 'User-Agent': self.core.http_user_agent,\n },\n timeout=REQUESTS_TIMEOUT,\n )\n if response.status_code != expected_code:\n logging.debug(\n 'Service registration failed. Server response = %s',\n response.content\n )\n continue\n entry['uuid'] = response.json()['id']\n self.services_uuid[key] = entry\n self.core.state.set_complex_dict(\n 'services_uuid', self.services_uuid\n )",
"def add_instruments(self, instruments):\n for instrument in instruments:\n self.add_instrument(instrument)",
"def register(self, instrument, on_update):\n cbs = self._callbacks[instrument]\n if not cbs:\n self.subscribe(instrument)\n cbs.add(on_update)",
"def __init__(self, udisks):\n self.log = logging.getLogger('udiskie.daemon.Daemon')\n self.state = {}\n self.udisks = udisks\n\n self.event_handlers = {\n 'device_added': [],\n 'device_removed': [],\n 'device_mounted': [],\n 'device_unmounted': [],\n 'media_added': [],\n 'media_removed': [],\n 'device_unlocked': [],\n 'device_locked': [],\n 'device_changed': [self.on_device_changed]\n }\n\n for device in self.udisks.get_all_handleable():\n self._store_device_state(device)\n\n udisks.bus.add_signal_receiver(\n self._device_added,\n signal_name='DeviceAdded',\n bus_name='org.freedesktop.UDisks')\n udisks.bus.add_signal_receiver(\n self._device_removed,\n signal_name='DeviceRemoved',\n bus_name='org.freedesktop.UDisks')\n udisks.bus.add_signal_receiver(\n self._device_changed,\n signal_name='DeviceChanged',\n bus_name='org.freedesktop.UDisks')",
"def _set_inst_attr(self, attr, value):\n\n for instrument in self.instruments:\n setattr(instrument, attr, value)\n\n return"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
Disconnect instruments and shutdown daemon
|
def shutdown(self) -> None:
logger.info("Disconnecting instruments...")
for instrument in self._config["instruments"]:
instrument.disconnect()
logger.info(f"Shutting down {self}...")
self._daemon.shutdown()
|
[
"def shutdown(self):\n os.remove('/tmp/mimic_daemon')\n for address, p in self._connections.iteritems():\n if not p.returncode:\n p.terminate()\n self.daemon.shutdown()",
"def stopAndDisconnectWalabot():\n wlbt.Stop()\n wlbt.Disconnect()\n print ('Termination successful')",
"def shutdown():\n laser.shutdown()\n ccd.shutdown()",
"def shutdown(event):\n elements.REMOTE_SERVER.shutdown()\n disconnect(None)",
"def on_disconnect(self):\r\n MBClient.on_disconnect(self)\r\n\r\n #make sure engine is released and stdio streams restored\r\n self.release()\r\n\r\n #Could stop any running user code here but for remote engines the user \r\n #may want to disconnect and leave it running and connect again later. \r\n #self.stop_code(quiet=True)\r",
"def device_disconnect(self):\n pass",
"def close(self):\n logger.warning('Shutting down')\n self.display.off()\n self.mqtt.disconnect()",
"def __shutdown(self):\n try:\n self._logger.debug( \"Shutting down the device.\" )\n self.conn._serial._reset_cb = None\n self.conn._serial.reboot()\n self.bcc.heartbeat = 10\n del self.bcc\n except:\n # Interpreter Shutdown Time Pokemon Exception Handling (ISTPEH):\n # gotta catch them all!\n pass",
"def Disconnect_from_ePCSim_Server(ePCSim_conn):\r\n ePCSim_conn.Disconnect()",
"def disconnect(self):\n self.arduino.close()\n self.arduino = None",
"async def test_disconnect(self):\n mock_close = AsyncMock()\n async with self.test_lock:\n with patch.object(pyinsteon.tools, \"async_close\", mock_close):\n # Failed connection\n cmd_mgr, _, _ = self.setup_cmd_tool(\n InsteonCmd,\n [\n \"disconnect\",\n \"exit\",\n ],\n )\n await cmd_mgr.async_cmdloop(\"\")\n assert mock_close.call_count == 1",
"def shutdown():\n LIB.tfg_pitc_Terminate()",
"def teardown_test(self):\n self.log.info('Tearing down the test case')\n self.iperf_server.stop()\n self.access_point.bridge.teardown(self.brconfigs)\n self.access_point.close()\n wputils.reset_host_interface(self.pkt_sender.interface)\n self.mon.usb('on')",
"def tearDown(self):\n for host in self.java_clients: self.cmd(host, 'pkill -9 java') \n self.consumer.kill()\n self.cmd(self.fuse_kafka, 'src/fuse_kafka.py stop')\n os.remove(self.kafka_config.name)\n self.data_directories_cleanup()\n self.impersonate()\n self.net.stop()",
"def _disconnect(self):\n self.socket.send_json({\"worker_id\": self.socket_id, \"message\": \"disconnect\"})\n self.socket.close()\n self.context.term()\n exit()",
"async def callback_homeassistant_stop(self, event: \"Event\") -> NoReturn:\n _LOGGER.debug(\"Hekr system is shutting down\")\n for device_id, device in self.devices.items():\n connector = device.connector\n listener = connector.listener\n if listener is not None and listener.is_running:\n _LOGGER.debug('Shutting down listener for device ID \"%s\"' % device_id)\n listener.stop()\n\n if connector.is_connected:\n _LOGGER.debug('Shutting down connector for device ID \"%s\"' % device_id)\n await connector.close_connection()",
"def plugin_shutdown():\n collectd.info('Shutting down collectd-mlab plugin.')",
"def Stop_ePCSim(ePCSim_conn):\r\n ePCSim_conn.SendCmd(\"exit\")\r\n ePCSim_conn.prompt = \"#\"\r\n ePCSim_conn.SendCmd(\"pkill egate\")\r\n ePCSim_conn.SendCmd(\"pkill edaemon\")",
"def disconnect(self):\n self._logger.info('Disconnecting from IXIA...')\n\n self.run_tcl('cleanUp')\n\n self._logger.info('Disconnected from IXIA...')"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
python ~/code/xdoctest/testing/test_linenos.py test_lineno_failcase_called_code python ~/code/xdoctest/testing/test_linenos.py
|
def test_lineno_failcase_called_code():
text = _run_case(utils.codeblock(
r'''
def func(a):
"""
Example:
>>> func(0)
>>> # this doesnt do anything
>>> print('this passes')
this passes
>>> # call the failing code
>>> func(3)
"""
if a > 0:
nested_failure(a)
return a
def nested_failure(a):
if a > 0:
nested_failure(a - 1)
else:
raise Exception('fail case')
'''))
assert 'rel: 6, abs: 9,' in text
assert text
|
[
"def test_expected_failures(modpath, expected_failure):\n code = os.path.dirname(expected_failure)\n retcode, out = flake8(join(modpath, expected_failure))\n assert retcode, \"expected failure (%s), got success\" % code\n needle = \": %s \" % code\n assert needle in out\n\n with open(os.path.join(modpath, expected_failure)) as f:\n doc = ast.get_docstring(\n ast.parse(f.read(), expected_failure),\n clean=True,\n )\n\n # keep \"literal\" lines, skip shell lines\n result_check = \"\".join(\n line + \"\\n\" for line in doc.splitlines() if line.startswith(\" RST\")\n )\n if result_check:\n modpath = os.path.join(modpath, \"\")\n assert out.replace(modpath, \" \") == result_check",
"def testPythonCode1(self):\n src = textwrap.dedent(\"\"\"\n def bar PYTHONCODE\n \"\"\")\n self.TestRoundTrip(src)",
"def test_pytest_fail_notrace_collection(pytester: Pytester) -> None:\n pytester.makepyfile(\n \"\"\"\n import pytest\n def some_internal_function():\n pytest.fail(\"hello\", pytrace=False)\n some_internal_function()\n \"\"\"\n )\n result = pytester.runpytest()\n result.stdout.fnmatch_lines([\"hello\"])\n result.stdout.no_fnmatch_line(\"*def some_internal_function()*\")",
"def test_pytest_fail_notrace_runtest(pytester: Pytester) -> None:\n pytester.makepyfile(\n \"\"\"\n import pytest\n def test_hello():\n pytest.fail(\"hello\", pytrace=False)\n def teardown_function(function):\n pytest.fail(\"world\", pytrace=False)\n \"\"\"\n )\n result = pytester.runpytest()\n result.stdout.fnmatch_lines([\"world\", \"hello\"])\n result.stdout.no_fnmatch_line(\"*def teardown_function*\")",
"def pytest_function(line, cell):\n import pytest as pt\n import re\n # previous_commands = globals().get('In', [])\n ip = get_ipython()\n previous_commands = ip.user_ns['_ih']\n current_command_number = len(previous_commands) - 1\n #_, test_file = tempfile.mkstemp(text=True, suffix='.py', prefix='test_')\n\n # Save a filtered command history to file\n test_file = re.search(r'def (\\w+)', cell).group(1) + \".py\"\n with open(test_file, 'w') as f:\n f.write(\n '\\n'.join(\n c for c in previous_commands\n if \"get_ipython\" not in c\n and \"@register\" not in c\n and not re.match(r\"^assert .*\", c)\n and not re.match(r\"^print .*\", c)\n )\n )\n f.write(\"\\n{}\\n\".format(cell))\n\n # Run pytest on the file\n args = ['-q'] + line.split() + [test_file]\n pt.main(args)",
"def test_pytest_fail_notrace_non_ascii(pytester: Pytester) -> None:\n pytester.makepyfile(\n \"\"\"\\\n import pytest\n\n def test_hello():\n pytest.fail('oh oh: ☺', pytrace=False)\n \"\"\"\n )\n result = pytester.runpytest()\n result.stdout.fnmatch_lines([\"*test_hello*\", \"oh oh: ☺\"])\n result.stdout.no_fnmatch_line(\"*def test_hello*\")",
"def _test():\n import doctest\n doctest.testmod()",
"def test_doctests_run(self):\n results = doctest.testmod(lab, optionflags=TESTDOC_FLAGS, report=False)\n self.assertEqual(results[0], 0)",
"def pythonDocTest(self, pythonFile, pythonPath=None, output=None, environs=None, **kwargs):\n\t\tassert os.path.exists(os.path.abspath(pythonFile)), os.path.abspath(pythonFile)\n\t\t\n\t\tif not output: output = '%s-doctest.txt'%os.path.basename(pythonFile).replace('.py','')\n\t\tp = self.startPython(\n\t\t\targuments=['-m', 'doctest', '-v', os.path.normpath(pythonFile)],\n\t\t\tenvirons=self.createEnvirons(overrides=[environs, {\n\t\t\t\t'PYTHONPATH':None if not pythonPath else os.pathsep.join(pythonPath or [])}]),\n\t\t\tstdout=output, \n\t\t\tstderr=output+'.err', \n\t\t\tdisplayName='Python doctest %s'%os.path.basename(pythonFile),\n\t\t\tignoreExitStatus=True,\n\t\t\tabortOnError=False, \n\t\t\t**kwargs\n\t\t\t)\n\t\tmsg = 'Python doctest for %s'%(os.path.basename(pythonFile))\n\t\ttry:\n\t\t\tmsg += ': '+self.getExprFromFile(output, '\\d+ passed.*\\d+ failed') # appears whether it succeeds or fails\n\t\texcept Exception: \n\t\t\tmsg += ': failed to execute correctly'\n\t\ttry:\n\t\t\tmsg += '; first failure is: '+self.getExprFromFile(output, '^File .*, line .*, in .*')\n\t\texcept Exception:\n\t\t\tpass # probably it succeeded\n\t\t\n\t\tif p.exitStatus == 0:\n\t\t\tself.addOutcome(PASSED, msg)\n\t\telse:\n\t\t\tself.addOutcome(FAILED, msg)\n\t\t\tself.logFileContents(output+'.err') # in case there are any clues there\n\t\t\t\n\t\t\t# full doctest output is quite hard to read, so try to summarize just the failures \n\t\t\t\n\t\t\tfailures = []\n\t\t\tlines = [] # accumulate each test\n\t\t\twith openfile(os.path.join(self.output, output), encoding=locale.getpreferredencoding()) as f:\n\t\t\t\tfor line in f:\n\t\t\t\t\tline = line.rstrip()\n\t\t\t\t\tif line=='Trying:': # start of a new one, end of previous one\n\t\t\t\t\t\tif lines and lines[-1]!='ok':\n\t\t\t\t\t\t\tfailures.append(lines)\n\t\t\t\t\t\tlines = [line]\n\t\t\t\t\telif line == 'ok': # ignore if passed; needed if last test was a pass\n\t\t\t\t\t\tlines = []\n\t\t\t\t\telse:\n\t\t\t\t\t\tlines.append(line)\n\t\t\t\tif lines and lines[-1]!='ok':\n\t\t\t\t\tfailures.append(lines)\n\t\t\t\t\n\t\t\tfor failure in failures:\n\t\t\t\tlog.info('-'*20)\n\t\t\t\tfor line in failure:\n\t\t\t\t\tlog.warning(' %s'%line.rstrip())\n\t\t\t\tlog.info('')",
"def test_line_numbers(self):\n self._check_initialized()\n configuration = self._style_checker_configuration()\n error_handler = self._error_handler(configuration,\n line_numbers=[50])\n confidence = 5\n\n # Error on non-modified line: no error.\n self._call_error_handler(error_handler, confidence, line_number=60)\n self.assertEquals(0, self._error_count)\n self.assertEquals([], self._error_messages)\n\n # Error on modified line: error.\n self._call_error_handler(error_handler, confidence, line_number=50)\n self.assertEquals(1, self._error_count)\n self.assertEquals(self._error_messages,\n [\"foo.h(50): message [whitespace/tab] [5]\\n\"])",
"def test_pass_with_docstring(self):\n pass",
"def test_doctest_invalid_import(capsys: \"CaptureFixture\") -> None:\n main([\"doctest\", INVALID_IMPORT_NOTEBOOK])\n\n _, err = capsys.readouterr()\n assert \"ModuleNotFoundError: No module named 'thisdoesnotexist'\" in err",
"def test_multiLineSyntaxError(self):\r\n err = StringIO()\r\n lines = [\r\n 'bad line of source',\r\n 'more bad lines of source',\r\n ]\r\n reporter = Reporter(None, err)\r\n reporter.syntaxError('foo.py', 'a problem', 3, len(lines[0]) + 5,\r\n '\\n'.join(lines))\r\n self.assertEqual(\r\n (\"foo.py:3: a problem\\n\" +\r\n lines[-1] + \"\\n\" +\r\n \" ^\\n\"),\r\n err.getvalue())",
"def test_line_count(self):\n\t\tself.assertEqual(analyse_text(self.filename)[0], 4)",
"def test_removed_xreadlines(self):\n # NICE_TO_HAVE\n code = \"import os\\nwith open(os.path.realpath(__file__)) as f:\" \\\n \"\\n\\tf.{0}\"\n old, good1, good2 = 'xreadlines', 'readline', 'readlines'\n suggs = [quote(good1), quote(good2), \"'writelines'\"]\n old_code, new_code1, new_code2 = format_str(code, old, good1, good2)\n before, after = before_and_after((3, 0))\n self.runs(old_code, before)\n self.throws(old_code, ATTRIBUTEERROR, suggs, after)\n self.runs(new_code1)\n self.runs(new_code2)",
"def pytest_doctest_prepare_content(content):",
"def test_syntax(self):\n self.assertEqual(1, 1)",
"def test_log_mock():\n import doctest\n from calcloud import log\n\n doctest_result = doctest.testmod(log)\n assert doctest_result[0] == 0, \"More than zero doctest errors occurred.\" # test errors\n assert doctest_result[1] >= 17, \"Too few tests ran, something is wrong with testing.\" # tests run",
"def test_previous_line(self):\n before_b = \"\"\"\\\n a\n\n b\n \"\"\"\n after_b = \"\"\"\\\n a\n\n b\n \"\"\"\n self.run_test(\n before_b=before_b,\n after_b=after_b,\n before_sel=(\"3.0\", \"3.0\"),\n after_sel=(\"2.0\", \"2.0\"),\n command_name=\"previous-line\",\n )"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
Add to the list of describing adjectives.
|
def add_adjectives(self, adjective):
self.adjectives += [adjective]
|
[
"def add_adjectives(self, *sAdjs):\n self.adjectives += list(sAdjs)",
"def add_adnotation(self, adnotation):\n adnotations = list([self.decoding_dict[v] for v in self.get_item_list()])\n adnotations.append(adnotation)\n adnotations.sort(key = lambda item: item.pos)\n self.decoding_dict = dict()\n res = list()\n for a in adnotations:\n text = str(len(res) + 1) + '. ' + a.text\n self.decoding_dict[text] = a\n res.append(text)\n self.clear()\n self.add_item_list(res)",
"def advices(\n self,\n ) -> google.protobuf.internal.containers.RepeatedCompositeFieldContainer[\n global___Advice\n ]:",
"def add_many_descriptors(self, descriptors):",
"def add(self, thought):\n pass",
"def addObjective(self, objective):\n \n self.objectives.append(objective)\n self.module_order.append(objective)",
"def add(self, concept):\n raise NotImplementedError()",
"def addExample(self, example):\n self.exampleList.append(example)",
"def add(self, PlugLead):\n\n self.check_conflicts(PlugLead)\n self.plugleads.append(PlugLead)",
"def add_disease(self, disease):\n self.diseases.append(disease)",
"def add_to_initiative(self):\n\t\treturn",
"def addObjective(self, *args):\n return _libsbml.ListOfObjectives_addObjective(self, *args)",
"def add_experience(doc, experience):\n doc = add_title(doc, \"EXPERIENCE PROFESSIONNELLE\")\n for exp in experience :\n # ADD JOB TITLE LINE\n p = doc.add_paragraph(exp['title'])\n p.paragraph_format.space_before = Cm(0.2)\n p.paragraph_format.space_after = Cm(0)\n p.paragraph_format.line_spacing = 1\n for run in p.runs:\n run.bold = True\n \n # ADD COMPANY NAME, ADRESS, DATES\n p = doc.add_paragraph(exp['company'] + ', '+ (9*\"\\t\").join([exp['site'], exp['date']]))\n p.paragraph_format.line_spacing = 1\n p.paragraph_format.space_before = Cm(0) \n p.paragraph_format.space_after = Cm(0) \n for run in p.runs:\n run.italic = True\n \n # ADD CONTENT\n if exp['text'] != \"\" :\n p = doc.add_paragraph(exp['text'])\n p.paragraph_format.line_spacing = 1\n \n # ADD BULLETPOINTS\n for task in exp['tasks'] :\n p = doc.add_paragraph(task, style='List Bullet')\n p.paragraph_format.line_spacing = 1\n p.paragraph_format.space_before = Cm(0)\n p.paragraph_format.space_after = Cm(0)\n \n doc.add_paragraph('Environnement : ' + ', '.join(exp['Environnement']))\n\n return doc",
"def add_aov(self, aov: AOV) -> None:\n self.aovs.append(aov)",
"def AddConcept(self, concept):\n self.concepts.append(concept)",
"def add_episode_experience(self, states, next_states, rewards, actions, dones):\n self.states.append(states)\n self.next_states.append(next_states)\n self.rewards.append(rewards)\n self.actions.append(actions)\n self.dones.append(dones)",
"def __add__ (self):\r\n\t\tif cate not in self._cate:\r\n\t\t\traise KeyError(\"La catégorie {} n'existe pas dans la base de donnée\".format(cate))\r\n\t\telse:\r\n\t\t\tfor cate, aliment in self.items():\r\n\t\t\t\tself._cate[cate] = aliment",
"def add(self, li):\n # determine the total number of interactions\n for i in li:\n if i.idx != []:\n self.nimax = max(self.nimax,max((i.idx)))+1\n\n for i in li:\n self.addi(i)",
"def addParagraph(self, p: Paragraph):\n self.paragraphs.append(p)\n for i in range(p.sentenceCount()):\n self.addSentence(p.getSentence(i))"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
Returns the list of describing adjectives. The list is shuffled first because generally this is used to get a random adjective.
|
def get_adjectives(self):
random.shuffle(self.adjectives)
return self.adjectives
|
[
"def get_adjectives(lyrics):\n doc = nlp(lyrics.lower())\n all_adjectives = [token.lemma_ for token in doc if token.pos_ == \"ADJ\"]\n return all_adjectives",
"def adjectives_sorted(lyrics):\n adjectives = get_adjectives(lyrics)\n sorted_adjectives = Counter(adjectives)\n return sorted_adjectives",
"def get_feature_descriptions(self):\n return flatten_to_list(map(lambda x: (x.get_feature_name(), x.get_feature_description()),\n\t\t\t\t\tself.feature_generators\n\t\t\t\t\t))",
"def descriptions(self):\n return self._descriptions",
"def getAllDescWords(itemList):\r\n itemList = list(set(itemList)) # make itemList unique\r\n descWords = []\r\n for item in itemList:\r\n descWords.extend(worldItems[item][DESCWORDS])\r\n return list(set(descWords))",
"def _read_advantages(root):\n output_list = []\n for _, value in enumerate(root[0][3]):\n output_list.append(Advantage(value))\n return output_list",
"def get_advice():\n json_response = random_adviceslip()\n advice = parse_advice(json_response=json_response)\n return advice",
"def list_ideas():\n pass",
"def get_objectives(self):\n return copy.deepcopy(self.objectives), self.gates_names",
"def get_advisories(self):\n\n advisories = []\n\n for i in range(len(self.__data['advisories'])):\n data = requests.get(self.__data['advisories'][i]['links']['self']['href'], headers=getHeaders()).json()\n this = {}\n this['id'] = data['id']\n this['name'] = data['name']\n advisories.append(this)\n\n return advisories",
"def augment_list() -> List: \n l = [(RandomBrightnessAdjust,0.1,1.1),\n (RandomHorizontalFlip, 0.5, 0.5),\n (RandomRotate, -30, 30),\n (RandomShear, -0.2, 0.2), \n (RandomContrastAdjust,0.1,1.1),\n (RandomGammaCorrection,0.05,0.95),\n (RandomSaturationAdjust,0.1,1.1),\n (RandomHueAdjust,-0.5,0.5),\n (RandomShapren,0.,1.),\n (RandomGaussianBlur,0.,2.),\n ]\n \n return l",
"def get_associated_adjectives(word, n):\n # Get out if this isn't a single word or if this is already an\n # adjective\n if \" \" in word or is_word_adjective(word):\n return [word]\n\n # Instantiate Datamuse API client\n datamuse_client = datamuse.Datamuse()\n\n results = [hit[\"word\"] for hit in datamuse_client.words(rel_jjb=word, max=n)]\n\n return [word] + results",
"def defaultdesc(self):\n return random.choice((\n \"Carries a little bit of everything treasure.\",\n \"Greater and Lesser artifacts of all kinds.\",\n \"Everything your momma said you'd need.\",\n \"General treasure: a wholesale possibility\",\n \"Beware of Aisle 6: Traps!\",\n \"Blue Light Special: Lightning Rods!\",\n ))",
"def generate_all_descriptions(env_params):\n\n p = env_params.copy()\n\n # Get the list of admissible attributes and split them by name attributes (type and categories) and other different attributes.\n name_attributes = env_params['name_attributes']\n colors_attributes = env_params['colors_attributes']\n positions_attributes = env_params['positions_attributes']\n drawer_door_attributes = env_params['drawer_door_attributes']\n any_all_attributes = env_params['any_all_attributes']\n rgbb_attributes = env_params['rgbb_attributes']\n\n\n all_descriptions = ()\n if 'Throw' in p['admissible_actions']:\n throw_descriptions = []\n for i in range(env_params['max_nb_objects'] + 1):\n throw_descriptions.append('Throw {} objects on the floor'.format(i))\n all_descriptions += tuple(throw_descriptions)\n\n if 'Open' in p['admissible_actions']:\n open_descriptions = []\n for d in drawer_door_attributes:\n open_descriptions.append('Open the {}'.format(d))\n all_descriptions += tuple(open_descriptions)\n \n\n if 'Close' in p['admissible_actions']:\n close_descriptions = []\n for d in drawer_door_attributes:\n close_descriptions.append('Close the {}'.format(d))\n all_descriptions += tuple(close_descriptions)\n\n\n if 'Grasp' in p['admissible_actions']:\n grasp_descriptions = []\n for c in colors_attributes:\n grasp_descriptions.append('Grasp any {} object'.format(c))\n for ca in colors_attributes + ('any',):\n for n in name_attributes:\n grasp_descriptions.append('Grasp {} {}'.format(ca, n))\n all_descriptions += tuple(grasp_descriptions)\n\n if 'Move' in p['admissible_actions']:\n move_descriptions = []\n for c in colors_attributes:\n move_descriptions.append('Move any {} object'.format(c))\n for ca in colors_attributes + ('any',):\n for n in name_attributes:\n move_descriptions.append('Move {} {}'.format(ca, n))\n all_descriptions += tuple(move_descriptions)\n\n if 'Put' in p['admissible_actions']:\n put_descriptions = []\n for a in any_all_attributes:\n for c in colors_attributes:\n for pos in positions_attributes:\n put_descriptions.append('Put {} {} object {}'.format(a, c, pos))\n for ca in colors_attributes + any_all_attributes:\n for n in name_attributes:\n for pos in positions_attributes:\n put_descriptions.append('Put {} {} {}'.format(ca, n, pos))\n all_descriptions += tuple(put_descriptions)\n\n\n if 'Hide' in p['admissible_actions']:\n hide_descriptions = []\n for a in any_all_attributes:\n for c in colors_attributes:\n hide_descriptions.append('Hide {} {} object'.format(a, c))\n for ca in colors_attributes + any_all_attributes:\n for n in name_attributes:\n hide_descriptions.append('Hide {} {}'.format(ca, n))\n all_descriptions += tuple(hide_descriptions)\n\n\n if 'Turn on' in p['admissible_actions']:\n turn_on_descriptions = []\n for r in rgbb_attributes:\n turn_on_descriptions.append('Turn on the {} light'.format(r))\n all_descriptions += tuple(turn_on_descriptions)\n\n\n if 'Turn off' in p['admissible_actions']:\n turn_off_descriptions = []\n for r in rgbb_attributes:\n turn_off_descriptions.append('Turn off the {} light'.format(r))\n all_descriptions += tuple(turn_off_descriptions)\n\n\n if 'Make' in p['admissible_actions']:\n make_descriptions = []\n for c in colors_attributes:\n make_descriptions.append('Make the panel {}'.format(c))\n all_descriptions += tuple(make_descriptions)\n\n if 'Paint' in p['admissible_actions']:\n color_descriptions = []\n for a in any_all_attributes:\n for c1 in colors_attributes:\n for c2 in sorted(tuple(set(colors_attributes) - set(list(c1)))):\n color_descriptions.append('Paint {} {} object {}'.format(a, c1, c2))\n for c1 in colors_attributes:\n for n in name_attributes:\n for c2 in sorted(tuple(set(colors_attributes) - set([c1]))):\n color_descriptions.append('Paint {} {} {}'.format(c1, n, c2))\n for a in any_all_attributes:\n for n in name_attributes:\n for c2 in colors_attributes:\n color_descriptions.append('Paint {} {} {}'.format(a, n, c2))\n all_descriptions += tuple(color_descriptions)\n\n\n\n\n train_descriptions = []\n test_descriptions = []\n for descr in all_descriptions:\n to_remove = False\n for w in p['words_test_set_def']: # words_test_set_def is the set of occurrences that is reserved to the testing set.\n if w in descr:\n to_remove = True\n break\n if not to_remove:\n train_descriptions.append(descr)\n else:\n test_descriptions.append(descr)\n \n train_descriptions = tuple(sorted(train_descriptions))\n test_descriptions = tuple(sorted(test_descriptions))\n\n return train_descriptions, test_descriptions, all_descriptions",
"def _choose_babble_phrases(self) -> tuple:\n noun_choices = ('singular nouns', 'plural nouns')\n noun_choice = self.random_element(noun_choices)\n\n adjective_choices = (\n 'adjectives starting with consonant',\n 'adjectives starting with vowel')\n\n if noun_choice == 'singular nouns':\n article_choice = self.random_element(self.article_choices)\n else:\n article_choice = 'the'\n\n if article_choice == 'an':\n adjective_choice = 'adjectives starting with vowel'\n elif article_choice == 'a':\n adjective_choice = 'adjectives starting with consonant'\n else:\n adjective_choice = self.random_element(adjective_choices)\n\n return (\n self.technobabble['verbs'],\n article_choice,\n self.technobabble[adjective_choice],\n self.technobabble[noun_choice])",
"def getEssentialList(self):\n return self.essentials",
"def card_fields_in_order(self) -> List[str]:\n card_in_anki_order = [self.word, self.pronunciation, self.sentence,\n self.definitions, self.book_title, self.author]\n return card_in_anki_order",
"def add_adjectives(self, adjective):\n self.adjectives += [adjective]",
"def get_matches_description(self):\n description = []\n for match in self.matches:\n description.append(match.opponents)\n\n return description"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
Returns the noun, including all its describing adjectives, as a string.
|
def full_string(self):
return "{}: {}".format(str(self.word), " ".join([str(adj) for adj in self.adjectives]))
|
[
"def sentence():\r\n return nounPhrase() + \" \" + verbPhrase()",
"def getNouns(self):\n return self.nouns",
"def nounPhrase():\r\n return random.choice(articles) + \" \" + random.choice(nouns)",
"def replaceNouns(self):\n textacy.extract.named_entities\n return self.sentence",
"def verbPhrase():\r\n return random.choice(verbs) + \" \" + nounPhrase() + \" \" + prepositionalPhrase()",
"def get_noun(self, line):\n nlp = spacy.load(\"en_core_web_sm\")\n for token in nlp(line):\n if token.pos_ == \"NOUN\":\n return token.text",
"def en_noun(t, label):\r\n s = label\r\n p = []\r\n is_uncountable = False\r\n \r\n # http://en.wiktionary.org/wiki/Template:en-noun\r\n head = t.arg(\"head\", label)\r\n p1 = t.arg(0)\r\n p2 = t.arg(1)\r\n \r\n if p1 == \"-\":\r\n # uncountable\r\n is_uncountable = True\r\n \r\n if p2 == \"s\":\r\n # ends by s\r\n p.append(head + \"s\")\r\n \r\n elif p2 is not None:\r\n # word\r\n p.append(p2)\r\n \r\n elif p1 == \"es\":\r\n # add es\r\n p.append(head + \"es\")\r\n \r\n elif p1 is not None:\r\n # use term\r\n p.append(p1)\r\n \r\n elif p1 is None and p2 is None:\r\n p.append(head+\"s\")\r\n\r\n for k,a in t.args.items():\r\n if not a.is_named():\r\n if k == 0 or k == 1:\r\n continue\r\n \r\n p.append(a.as_string())\r\n \r\n return (s, p, is_uncountable)",
"def get_lemmatised_dlh_proper_nouns():\n file_path = os.path.join(PACKDIR,\n Work.DLH.get_main_directory(),\n constants.LEMMATISED_DLH_PROPER_NOUNS)\n return utils.get_annotated_proper_nouns_txt(file_path)",
"def noun(self) -> int:\n assert self._dram, \"no noun - empty dram\"\n return self._dram[1]",
"def getNouns(words):\n nounTags = [\"NN\",\"NNS\",\"NNP\",\"NNPS\",\"FW\"]\n return tagWords(words, nounTags)",
"def get_nouns(self):\n\t\tblob = TextBlob(self.raw_string)\n\n\t\tfor word, tag in blob.tags:\n\t\t\tif tag in ['NNP', 'NN']:\n\t\t\t\tself.nouns.append(word.lemmatize())",
"def to_adverb(self):\n\n if 'AdverbPhrase' in self.variants:\n return self.variants['AdverbPhrase']\n\n return self.copy_with(typ=\"AdverbPhrase\",\n text=\"with \" + self.text)",
"def noun_string(data_org):\n chains = []\n tokens = word_tokenize(data_org)\n #tokenize to prepare for tagging\n w_tag = dict(nltk.pos_tag(tokens))\n chain = []\n for w, tag in w_tag.items():\n #find all nouns based on treebank format\n if tag.startswith('N'):\n chain.append(w)\n else:\n if len(chain) >= 3:\n chains.append(\" \".join(chain))\n chain = []\n\n #move information to dataframe for printing to excel\n df_noun_string = pd.DataFrame({'Noun Strings (3+ Nouns in a row)': chains}, columns = ['Noun Strings (3+ Nouns in a row)'])\n return df_noun_string",
"def get_poem_name(self):\n nouns = self.get_noun_list()\n adjectives = self.get_adj_list()\n num_nouns = len(nouns)\n num = random.randint(0, num_nouns-1)\n noun = nouns[num]\n num_adj = len(adjectives)\n num = random.randint(0, num_adj-1)\n adj = adjectives[num]\n name = adj + \" \" + noun\n return name",
"def noun_id(item):\n return get_name(item) + '-noun-lex'",
"def get_compound_noun( ngram ):\n try:\n pattern = re.compile( '((?: ?\\\\b[^\\\\s]+(?:/NN.?/[a-z]+/[\\\\d]+)){2,})' )\n match = re.search( pattern, ngram )\n if match:\n compound = ''\n contains_root = False\n tokens = match.group().strip().split(' ')\n for t in tokens:\n # embed()\n items = t.split('/')\n compound += ( items[0] + ' ' )\n if items[3] == 0:\n contains_root = True\n if contains_root:\n return compound\n else:\n return None\n else:\n return None\n \n except ValueError:\n return None",
"def random_noun():\n return petname.Name()",
"def get_article_str(article_sents):\n article_str = \"\"\n for nlp_sent in article_sents:\n article_str += (' ' + nlp_sent.text + ' ')\n return article_str",
"def generalNote_to_string(gn):\n out_string = \"\"\n # add generalNote type (Rest or Note)\n if gn.isRest:\n out_string += \"R\"\n else:\n out_string += \"N\"\n # add notehead information (4,2,1,1/2, etc...). 4 means a black note, 2 white, 1 whole etc...\n type_number = Fraction(duration.convertTypeToNumber(gn.duration.type))\n if type_number >= 4:\n out_string += \"4\"\n else:\n out_string += str(type_number)\n # add the dot\n n_of_dots = gn.duration.dots\n for _ in range(n_of_dots):\n out_string += \"*\"\n return out_string"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
Parse a noun object from a data file containing nouns and their describing adjectives.
|
def parse(text):
parts = text.split(' ')
noun = Noun(parts[0], int(parts[1]))
parts = parts[2:]
while len(parts) > 0:
noun.add_adjectives(Word(parts[0], int(parts[1])))
parts = parts[2:]
return noun
|
[
"def extractNouns(filepath, debug=False):\n try:\n text = open(filepath).read()\n except:\n print(\"No such file found. Aborting...\")\n exit()\n \n is_noun = lambda pos: pos[:2] == 'NN'\n # do the nlp stuff\n tokenized = nltk.word_tokenize(text)\n nouns = [word for (word, pos) in nltk.pos_tag(tokenized) if is_noun(pos)] \n return nouns",
"def ParserIdentifyNoun(self,StartPos,EndPos):\r\n\r\n \r\n #-------------------\r\n # Create Empty Lists\r\n #-------------------\r\n\r\n # We have to create lists for one object's adjectives and nouns. We\r\n # use a list for nouns (even though an object only has one) because\r\n # it's possible for more than one object to have the same noun.\r\n #\r\n # The Return list returns all objects found (the command list might\r\n # have listed multiple objects).\r\n\r\n AdjectiveList = []\r\n NounList = []\r\n ReturnList = []\r\n \r\n \r\n #----------------------------------\r\n # Loop through each word in command\r\n #----------------------------------\r\n\r\n # In English: \"For each word in the Active command list from the\r\n # starting postion up to (but not including) the ending position,\r\n # do the following...\"\r\n #\r\n # We take advantage of the fact that all non-verb/preposition words\r\n # in the command pass through this function eventually. The \r\n \r\n for word in self.ActiveCommandList[StartPos:EndPos]:\r\n \r\n #-------------------\r\n # Word is adjective?\r\n #-------------------\r\n\r\n # If the word is an adjective, look up all objects that have\r\n # that adjective and append them to the adjective's list.\r\n\r\n if self.AdjsDict.has_key(word):\r\n AdjectiveList = Union(AdjectiveList,self.AdjsDict[word])\r\n DebugTrace(word + \" is Adjective\")\r\n\r\n \r\n #--------------\r\n # Is word noun?\r\n #--------------\r\n\r\n # When the word is a noun we basically append all objects that\r\n # have that noun, then (if there were adjectives) intersect the\r\n # noun and adjective list. Then we append all the objects in\r\n # the trimmed down NounList to the ReturnList (the list of\r\n # objects returned by this function).\r\n\r\n if self.NounsDict.has_key(word):\r\n DebugTrace(word + \" is Noun\")\r\n NounList = Union(NounList,self.NounsDict[word])\r\n\r\n if len(AdjectiveList) > 0:\r\n NounList = Intersect(NounList,AdjectiveList)\r\n\r\n ReturnList.append(NounList)\r\n AdjectiveList = []\r\n NounList = []\r\n\r\n \r\n #----------------\r\n # Is Word Adverb?\r\n #----------------\r\n\r\n # If the word is in the adverbs dictionary append the entry(s) to the \r\n # current adverb list. Normally an adverb will be only one object, but\r\n # it doesn't hurt to have more than one adverb object use the same\r\n # word.\r\n \r\n if self.AdverbsDict.has_key(word):\r\n self.CurrentAdverbList = Union(self.CurrentAdverbList,self.AdverbsDict[word])\r\n DebugTrace(word + \" is Adverb\")\r\n\r\n\r\n \r\n #--------------------------\r\n # Return Found Objects List\r\n #--------------------------\r\n\r\n # Now it's time to return the list of objects we parsed from the\r\n # section of the active command list. Note it's very possible for\r\n # this function to return an empty list.\r\n\r\n return ReturnList",
"def load_file(filename):\n conll_file = conll.CoNLLFile(filename)\n data = conll_file.get(['lemma'], as_sentences=True)\n return data",
"def extract_nouns_from_stanford_pos():\n noun_list_after_chunk = []\n\n pos_tagged_text = database.fetach_pos_tagged_sentence()\n\n chunk_reg_express = r\"\"\"NP: {<JJ>*<NN.*>}\"\"\" \n chunk_parsar = nltk.RegexpParser(chunk_reg_express)\n\n for review_id, pos_tagged_content in pos_tagged_text:\n \n pos_tagged_list = eval(pos_tagged_content)\n\n chunked = chunk_parsar.parse(pos_tagged_list)\n noun_list_per_sentence = []\n for subtree in chunked.subtrees(filter=lambda chunk_label: chunk_label.label() == 'NP'): \n noun_list_per_sentence.append(\" \".join(word for word, pos in subtree.leaves() if word not in noun_list_per_sentence))\n\n if noun_list_per_sentence:\n combine_value = (review_id, noun_list_per_sentence)\n noun_list_after_chunk.append(combine_value)\n\n # Filtra stopwords da lista de aspectos candidatos\n noun_list_without_stopwords = pre_processing.filter_stopwords(noun_list_after_chunk)\n database.insert_nouns_list_per_sentence_into_db(noun_list_without_stopwords)\n database.insert_single_candidate_aspect_per_row(noun_list_without_stopwords)",
"def noun_phrase_chunking(part_of_speech_data):\n\n grammar = r\"\"\"\n NP: {<DT|JJ|NN.*>+}\n PP: {<IN><NP>}\n VP: {<VB.*><NP|PP|CLAUSE>+$}\n CLAUSE: {<NP><VP>}\n \"\"\"\n\n grammar2 = r\"\"\"\n NP: {<DT|NN>+} # Chunk sequences of NN and DT\n {<DT><JJ><NN>} # Chunk det+adj+noun\n \"\"\"\n\n return RegexpParser(grammar).parse(part_of_speech_data).draw()",
"def parse(self, word):\n word = self.son.segs(word)\n son_map = self._sonority_map(word)\n son_map = self._mark_offglides(son_map)\n son_map = self._adjust_anom_fric_cod(son_map)\n son_map = self._adjust_anom_fric_ons(son_map)\n ons_son = self._initial_onset(son_map)\n cod_son = self._final_coda(son_map)\n ons = self.from_map(ons_son, word)\n cod = self.from_reverse_map(cod_son, word)\n return (ons, cod)",
"def get_nouns(self):\n\t\tblob = TextBlob(self.raw_string)\n\n\t\tfor word, tag in blob.tags:\n\t\t\tif tag in ['NNP', 'NN']:\n\t\t\t\tself.nouns.append(word.lemmatize())",
"def parse(self, infile):\r\n raise NotImplementedError()",
"def extract_nouns(article_content):\r\n #load spacy for English\r\n doc = nlp(article_content)\r\n compound_val = '';#For compound words, Ex: First token is Donald, and second token is Trump.\r\n #It would be better to do a wiki search for \"Donald Trump\" than \"Donald\" and \"Trump\" separately \r\n dict_nouns = {};\r\n for token in doc:\r\n #print(token.text, token.lemma_, token.pos_, token.tag_, token.dep_,\r\n # token.shape_, token.is_alpha, token.is_stop, token.ent_type_)\r\n isCapitalToken = token.text.isupper();#To show words like CNN as all case capital in UI.\r\n #recommendations and heuristics : Fine tuning candidates\r\n #Candidates are chosen with this algorithm:\r\n #1) If word is a proper noun and not too short word length \r\n #2)If word is a noun that is not too short and not belonging to WH words like who, what, where.\r\n #3)We don't need Date nouns.\r\n #4)We don't want proper nouns who don't have a definite entity type.\r\n token_val = token.text.rstrip().lstrip().lower();\r\n if (token.pos_ == 'PROPN' and len(token.text) > 2 and token.ent_type_ != \"\") or \\\r\n (token.pos_ == 'NOUN' and token.tag_ != 'WP' and len(token.text) > 3 ):\r\n if(token.ent_type_ != 'DATE' and token.ent_type_ != 'TIME'):\r\n wr = RankedWord(token_val, (token.pos_ == 'PROPN'), isUpper = isCapitalToken)\r\n if (wr.getword() not in dict_nouns):\r\n dict_nouns [wr.getword()] = wr;\r\n elif token_val in dict_nouns:\r\n #Some tokens like a date could be considered Proper Noun in some context and noun in some content\r\n #In those case, remove it\r\n del dict_nouns[token_val];\r\n elif token_val in dict_nouns:\r\n del dict_nouns[token_val];#remove if it is not a noun in another context\r\n \r\n #Searching for compound values to get more specific results\r\n if (token.pos_ == 'NOUN' or token.pos_ == 'PROPN') and token.dep_ == 'compound':\r\n compound_val += ' ' + token.text;#First time compound word, save it\r\n elif compound_val != '':#Earlier word was compound\r\n if token.pos_ != \"PART\":\r\n compound_val += \" \" + token.text; \r\n compound_val = compound_val.lstrip().rstrip();\r\n rw = RankedWord(compound_val, (token.pos_ == 'PROPN'));\r\n wiki_phrase = search_wiki(rw, article_content, dict_nouns);\r\n if wiki_phrase is not None:\r\n rw = RankedWord(wiki_phrase, (token.pos_ == 'PROPN'));\r\n dict_nouns[rw.getword()] = rw;\r\n compound_val = '';\r\n else:\r\n compound_val = '';#clearing once compound done\r\n return dict_nouns;",
"def read_tagged_word_list(filename):\n # TODO: write and test this method\n print 'reading tagged file'",
"def parse_label(self):\n # TODO: make this work with attached labels as well as\n # stand alone labels.\n # Save the RAW full text of the label to self._raw\n input_stream = FileStream(self.infile)\n lexer = ODLv21Lexer(input_stream)\n tokens = CommonTokenStream(lexer)\n\n parser = ODLv21Parser(tokens)\n parse_tree = parser.label()\n self._parse_tree = parse_tree\n visitor = Pds3LabelVisitor()\n visitor.visit(parse_tree)\n return visitor.root_dict",
"def _load_prond_from_file(fname: str) -> dict:\r\n \r\n prond = {}\r\n with open(fname, 'r', errors='ignore') as f:\r\n \r\n line = f.readline().strip()\r\n while line:\r\n \r\n if line[0].isalpha():\r\n word, pron = line.split(' ')\r\n \r\n # Ignore secondary pronunciations\r\n # Lowercase all\r\n if not word.endswith(')'):\r\n prond[word.lower()] = pron.split()\r\n\r\n line = f.readline().strip()\r\n \r\n return prond",
"def parse(las_file):\n io_stream = io.TextIOWrapper(las_file)\n \n entry_date = datetime.now().strftime('%Y-%m-%d-%H-%M-%S')\n entry_filename = 'las_file-' + entry_date + '.las'\n\n entry = SectionInfo()\n entry.filename = entry_filename\n section = ''\n\n for line in io_stream.readlines():\n\n line = line.rstrip()\n\n if not line:\n continue\n\n # Lines beginning with '~' denote the next section header.\n if line[0] == '~':\n section = line\n continue\n # Skip comment lines.\n elif line[0] == '#':\n continue\n\n # LAS standard option 'OTHER' section\n if section[1] == 'O': \n entry.value = line\n entry.section = section\n # The rest of the standard metadata sections\n elif section[1] in ['V', 'W', 'C', 'P']:\n entry = parse_formatted_section_line(section, line, entry)\n # the data section and non-standard sections\n else:\n # print(\"Non-Metadata-Section: [{}]: [{}]\".format(section[0:2], line))\n continue\n\n # Write entry to db\n entry.save()\n\n # Initialize next entry\n entry = SectionInfo()\n entry.filename = entry_filename\n\n return entry_filename",
"def get_lemmatised_dlh_proper_nouns():\n file_path = os.path.join(PACKDIR,\n Work.DLH.get_main_directory(),\n constants.LEMMATISED_DLH_PROPER_NOUNS)\n return utils.get_annotated_proper_nouns_txt(file_path)",
"def translate_nouns_from_reddit_posts(self) -> None:\n titles = self.download_post_titles()\n english_nouns = RedditNounTranslator.extract_nouns_from_titles(titles)\n translated_noun_pairs = self.make_translated_noun_pairs(english_nouns)\n self.write_pairs_to_output_file(translated_noun_pairs)",
"def _load_phrase_labels(self):\n self.d_phr2label = {}\n with codecs.open(self.annotation_file, encoding='utf-8') as s_label_file:\n count = 0\n for line in s_label_file:\n count += 1\n if count > self.annotation_count:\n break\n (label, phrase) = line.strip(\"\\n\").split(\"\\t\")\n # only useful labels are y and n\n if label in (\"y\", \"n\"):\n self.d_phr2label[phrase] = label",
"def parse_file(fname, path, label):\n record = {'class': label}\n f = open(path, 'r')\n record['attribute'] = extract_words(f)\n record['name'] = fname.decode(\"utf-8\")\n return record",
"def setNouns(self):\n self.nouns = [noun[0].lower() for noun in self.getTokenPOSTags() if noun[1][0] == \"N\" and noun[0].lower() in self.tokens]",
"def get_noun_phrases(blob):\n return blob.noun_phrases"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
Returns the self.guessed_by and self.metaphors_used data as a readable string.
|
def get_str_metadata(self):
return "\n".join(["Guessed by {}".format(self.guessed_by), "{} metaphors used".format(self.metaphors_used)])
|
[
"def to_strings(self):\n str1 = \"Matches: {0}\".format(self.matches)\n str2 = \"Inliers: {0}\".format(self.inliers)\n str3 = \"Inlier ratio: {0:.2f}\".format(self.ratio)\n str4 = \"Keypoints: {0}\".format(self.keypoints)\n str5 = \"FPS: {0:.2f}\".format(self.fps)\n return str1, str2, str3, str4, str5",
"def get_human_readable(self):\n\n def yesno(key):\n if getattr(self, key) and getattr(self, key) > 0:\n return \"Y\"\n else:\n return \"N\"\n\n keys = (\n \"pvs1\",\n \"ps1\",\n \"ps2\",\n \"ps3\",\n \"ps4\",\n \"pm1\",\n \"pm2\",\n \"pm3\",\n \"pm4\",\n \"pm5\",\n \"pm6\",\n \"pp1\",\n \"pp2\",\n \"pp3\",\n \"pp4\",\n \"pp5\",\n \"ba1\",\n \"bs1\",\n \"bs2\",\n \"bs3\",\n \"bs4\",\n \"bp1\",\n \"bp2\",\n \"bp3\",\n \"bp4\",\n \"bp5\",\n \"bp6\",\n \"bp7\",\n )\n result = \", \".join([\"%s: %s\" % (key.upper(), yesno(key)) for key in keys])\n result += \", ACMG classification: %s\" % self.class_auto\n if self.class_override:\n result += \", ACMG class. override: %s\" % self.class_override\n return result",
"def get_pair_str(self):\n return self.sub_contest.reported_winner + '-' + self.sub_contest.reported_loser",
"def __str__(self):\n header = [\n ' GnoweeHeuristics:']\n header += [('Population = {}').format(self.population)]\n header += [('Sampling Method = {}').format(self.initSampling)]\n header += [('Discovery Fraction = {}').format(self.fracMutation)]\n header += [('Elitism Fraction = {}').format(self.fracElite)]\n header += [('Levy Fraction = {}').format(self.fracLevy)]\n header += [('Levy Alpha = {}').format(self.alpha)]\n header += [('Levy Gamma = {}').format(self.gamma)]\n header += [('Levy Independent Samples = {}').format(self.n)]\n header += [('Levy Scaling Parameter = {}').format(self.scalingFactor)]\n header += [('Constraint Violaition Penalty = {}').format(self.penalty)]\n header += [('Max # of Generations = {}').format(self.maxGens)]\n header += [('Max # of Function Evaluations = {}').format(self.maxFevals)]\n header += [('Convergence Tolerance = {}').format(self.convTol)]\n header += [('Stall Limit = {}').format(self.stallLimit)]\n header += [('Optimal Convergence Tolerance = {}').format(self.optConvTol)]\n header += [' Attributes Inhereted from ProblemParameters:']\n header += [('{}').format(ProblemParameters.__str__(self))]\n return ('\\n').join(header) + '\\n'",
"def __str__(self):\n return (str(self.chromosome_id) + '. Chromosome: Genes: ' + str(\n self.genes) + '; Fitness: ' + str(self.fitness_value))",
"def __str__(self):\n return \"{}\".format(self._matches.keys())",
"def __str__(self):\n sorted_table = InferenceUtils.get_n_best(self._table, max(len(self._table), 1))\n\n result = []\n for key, value in sorted_table.items():\n result.append('P(%s):=%f\\n' % (str(key), value))\n\n return ''.join(result)[:-1] if len(result) > 0 else ''",
"def statistics():\n\n # Get statistics class variable\n stats = MontyHallGame.stats\n\n s1 = \"Changed and won: {} out of {}\".format(\n stats[\"changed\"][\"won\"], stats[\"changed\"][\"won\"] + stats[\"changed\"][\"lost\"]\n )\n\n s2 = \"Not changed and won: {} out of {}\".format(\n stats[\"not_changed\"][\"won\"],\n stats[\"not_changed\"][\"won\"] + stats[\"not_changed\"][\"lost\"],\n )\n\n return f\"{s1}\\n{s2}\"",
"def human_readable(self):\n if self.no_flags_set():\n return \"no flags set\"\n else:\n flag_desc = []\n for name in (\"bookmarked\", \"for_validation\", \"candidate\", \"final causative\"):\n if getattr(self, \"flag_%s\" % name.replace(\" \", \"_\")):\n flag_desc.append(name)\n for name in (\"visual\", \"validation\", \"molecular\", \"phenotype_match\", \"summary\"):\n field = getattr(self, \"flag_%s\" % name)\n if field and field != \"empty\":\n flag_desc.append(\"%s rating is %s\" % (name.split(\"_\")[0], field))\n return \", \".join(flag_desc)",
"def toJSON(self):\r\n guesses = [{'results':[{'guess': charResult.guessChar, 'result':charResult.result} for charResult in result.results]} for result in self.game.currentRound.guesses]\r\n return {'game':{'id':self.game.id,\r\n 'points':self.game.points,\r\n 'wordLength':self.game.currentRound.wordLength,\r\n 'triesLeft':self.game.currentRound.triesLeft,\r\n 'roundComplete':self.game.currentRound.completed,\r\n 'hasNextRound':self.game.hasNextRound(),\r\n 'guesses':guesses}}",
"def info(self) -> str:\n return GraphInfoFormatter(self).info",
"def to_string(self):\n\n return PrivateObservation.__hand_dict[self.encode]",
"def __str__(self):\n return \"{\" + (\", \".join(\"%s: %s\"%(ngram, value) for (ngram, value) in self.items())) + \"}\"",
"def __str__(self):\n result = ('---> Population - Generation: ' + str(self.generation)\n + '<--- \\n')\n result += 'Fittest Chromosome: \\n' + str(self.fittest_chromosome)\n\n for chromosome in self.chromosomes:\n result += str(chromosome) + '\\n'\n\n return result",
"def __str__(self):\n return Hand.__str__(self) + '\\nHand Rank: ' + self.get_full_label()",
"def get_driver_info(self):\n text = ''\n if self.assigned_vehicle and self.assigned_driver:\n text += str(self.assigned_vehicle.get_driver_str(\n self.assigned_driver)) + '\\n'\n return text",
"def __str__(self):\n result = \", \".join(map(str, self.cards))\n result += \"\\n \" + str(self.getPoints()) + \" points\"\n return result",
"def __str__(self):\n gene_values = self.values()\n msg = '[' + ','.join( [self.gene_format.format(x) for x in gene_values] ) + ']'\n if self.has_fitness():\n fitness = self.fitness\n msg += '->' + self.fitness_format.format(fitness)\n return msg",
"def __str__(self):\n\n out_string = \"Pose ID: \" + str(self.id)\n out_string += \"\\nDetection confidence: \" + str(self.confidence) + \"\\nKeypoints name-position:\\n\"\n # noinspection PyUnresolvedReferences\n for name, kpt in zip(Pose.kpt_names, self.data.tolist()):\n out_string += name + \": \" + str(kpt) + \"\\n\"\n return out_string"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
Quick plot of a `tick.base.TimeFunction`
|
def plot_timefunction(time_function, labels=None, n_points=300, show=True,
ax=None):
if ax is None:
fig, ax = plt.subplots(1, 1, figsize=(4, 4))
else:
show = False
if time_function.is_constant:
if labels is None:
labels = ['value = %.3g' % time_function.border_value]
t_values = np.arange(10).astype('float')
ax.plot(t_values, time_function.value(t_values), label=labels[0])
else:
if labels is None:
interpolation_to_legend = {
TimeFunction.InterLinear: 'Linear',
TimeFunction.InterConstLeft: 'Constant on left',
TimeFunction.InterConstRight: 'Constant on right'
}
border_to_legend = {
TimeFunction.Border0:
'border zero',
TimeFunction.BorderConstant:
'border constant at %.3g' % time_function.border_value,
TimeFunction.BorderContinue:
'border continue',
TimeFunction.Cyclic:
'cyclic'
}
labels = [
'original points',
'%s and %s' %
(interpolation_to_legend[time_function.inter_mode],
border_to_legend[time_function.border_type])
]
original_t = time_function.original_t
if time_function.border_type == TimeFunction.Cyclic:
cycle_length = original_t[-1]
original_t = np.hstack((original_t, original_t + cycle_length,
original_t + 2 * cycle_length))
t_values = _extended_discrete_xaxis(original_t, n_points=n_points)
ax.plot(time_function.original_t, time_function.original_y, ls='',
marker='o', label=labels[0])
ax.plot(t_values, time_function.value(t_values), label=labels[1])
ax.legend()
if show is True:
plt.show()
return ax.figure
|
[
"def test_plot_time_data():\n fig, ax = GlobalData.plot_time_data(timeStart=-1e-3, timeEnd=1e-3, units='ms', show_fig=False)\n return fig",
"def cistime_py():\n timing.plot_scalings(compare='python')",
"def plotTime(data,rate):\n t = np.arange(len(data))*1.0/rate\n \n #Plot time domain\n pl.plot(t, data)\n pl.ylabel(\"Amplitude\")\n pl.xlabel(\"Time(s)\")\n pl.show()",
"def yggtime_py():\n timing.plot_scalings(compare='python')",
"def plot_a_func_time(aes, times, which_are_final_bodies=None,year_unit='kyr',title=None): \n year_unit_dict = {\"Myr\":1.e6,\"kyr\":1.e3}\n\n fig = pp.figure()\n\n for i in range(len(aes)):\n pp.plot(np.divide(times[i],year_unit_dict[year_unit]),aes[i],color='blue',linewidth=0.5)\n\n if which_are_final_bodies != None:\n for i in range(len(which_are_final_bodies)):\n #print \" final body plotting as red: \" + str(i)\n pp.plot(np.divide(times[which_are_final_bodies[i]],year_unit_dict[year_unit]),aes[which_are_final_bodies[i]],color='red')#,color='blue',linewidth=1.5)\n\n pp.xscale(u'log')\n \n if title != None:\n pp.title(title)\n\n pp.xlabel(\"Time (\"+year_unit+\")\")\n pp.ylabel(\"Semimajor axis (AU)\")\n\n\n return fig",
"def graph_timings(ax, timings):\n\tx_values, y_values = zip(*timings)\n\treturn ax.plot(x_values, y_values, \"b-\", label=\"Time\")",
"def plot_times(self, train_time, title=None, xmin=None, xmax=None,\n ymin=None, ymax=None, ax=None, show=True, color=None,\n xlabel=True, ylabel=True, legend=True, chance=True,\n label='Classif. score'):\n if not np.array(train_time).dtype is np.dtype('float'):\n raise ValueError('train_time must be float | list or array of '\n 'floats. Got %s.' % type(train_time))\n\n return plot_gat_times(self, train_time=train_time, title=title,\n xmin=xmin, xmax=xmax,\n ymin=ymin, ymax=ymax, ax=ax, show=show,\n color=color, xlabel=xlabel, ylabel=ylabel,\n legend=legend, chance=chance, label=label)",
"def stackplot_t(tarray, seconds=None, start_time=None, ylabels=None, ax=None):\n data = tarray\n numSamples, numRows = tarray.shape\n# data = np.random.randn(numSamples,numRows) # test data\n# data.shape = numSamples, numRows\n if seconds:\n t = seconds * np.arange(numSamples, dtype=float)/numSamples\n# import pdb\n# pdb.set_trace()\n if start_time:\n t = t+start_time\n xlm = (start_time, start_time+seconds)\n else:\n xlm = (0,seconds)\n\n else:\n t = np.arange(numSamples, dtype=float)\n xlm = (0,numSamples)\n\n ticklocs = []\n if ax is None:\n ax = plt.subplot(111)\n plt.xlim(*xlm)\n # xticks(np.linspace(xlm, 10))\n dmin = data.min()\n dmax = data.max()\n dr = (dmax - dmin)*0.7 # Crowd them a bit.\n y0 = dmin\n y1 = (numRows-1) * dr + dmax\n plt.ylim(y0, y1)\n\n segs = []\n for i in range(numRows):\n segs.append(np.hstack((t[:,np.newaxis], data[:,i,np.newaxis])))\n # print \"segs[-1].shape:\", segs[-1].shape\n ticklocs.append(i*dr)\n\n offsets = np.zeros((numRows,2), dtype=float)\n offsets[:,1] = ticklocs\n\n lines = LineCollection(segs, offsets=offsets,\n transOffset=None,\n )\n\n ax.add_collection(lines)\n\n # set the yticks to use axes coords on the y axis\n ax.set_yticks(ticklocs)\n # ax.set_yticklabels(['PG3', 'PG5', 'PG7', 'PG9'])\n # if not plt.ylabels:\n plt.ylabels = [\"%d\" % ii for ii in range(numRows)]\n ax.set_yticklabels(ylabels)\n\n plt.xlabel('time (s)')",
"def test_multi_plot_time():\n fig, ax = optoanalysis.multi_plot_time(GlobalMultiData, SubSampleN=1, units='us', xlim=[-1000, 1000], LabelArray=[\"Reference\", \"Cooled\"], show_fig=False)\n return fig",
"def plottf(tfarray,tlst,flst,fignum=1,starttime=0,timeinc='hrs',\r\n dt=1.0,title=None,vmm=None,cmap=None,aspect=None,interpolation=None,\r\n cbori=None,cbshrink=None,cbaspect=None,cbpad=None,scale='log',\r\n normalize='n',):\r\n \r\n #time increment\r\n if timeinc=='hrs':\r\n tinc=3600/dt\r\n elif timeinc=='min':\r\n tinc=60/dt\r\n elif timeinc=='sec':\r\n tinc=1/dt\r\n else:\r\n raise ValueError(timeinc+'is not defined')\r\n #colormap\r\n if cmap==None:\r\n cmap='jet'\r\n else:\r\n cmap=cmap\r\n #aspect ratio\r\n if aspect==None:\r\n aspect='auto'\r\n else:\r\n aspect=aspect\r\n #interpolation\r\n if interpolation==None:\r\n interpolation='gaussian'\r\n else:\r\n interpolation=interpolation\r\n #colorbar orientation\r\n if cbori==None:\r\n cbori='vertical'\r\n else:\r\n cbori=cbori\r\n #colorbar shinkage\r\n if cbshrink==None:\r\n cbshrink=.99\r\n else:\r\n cbshrink=cbshrink\r\n #colorbar aspect\r\n if cbaspect==None:\r\n cbaspect=20\r\n else:\r\n cbaspect=cbaspect\r\n #colorbar pad\r\n if cbpad==None:\r\n cbpad=.1\r\n else:\r\n cbpad=cbpad\r\n #scale\r\n if scale=='log':\r\n zerofind=np.where(abs(tfarray)==0)\r\n tfarray[zerofind]=1.0\r\n if normalize=='y':\r\n plottfarray=10*np.log10(abs(tfarray/np.max(abs(tfarray))))\r\n else:\r\n plottfarray=10*np.log10(abs(tfarray))\r\n elif scale=='linear':\r\n if normalize=='y':\r\n plottfarray=abs(tfarray/np.max(abs(tfarray)))\r\n else:\r\n plottfarray=abs(tfarray)\r\n \r\n plt.rcParams['font.size']=10\r\n plt.rcParams['figure.subplot.left']=.08\r\n plt.rcParams['figure.subplot.right']=.99\r\n plt.rcParams['figure.subplot.bottom']=.07\r\n plt.rcParams['figure.subplot.top']=.96\r\n plt.rcParams['figure.subplot.wspace']=.25\r\n plt.rcParams['figure.subplot.hspace']=.20\r\n \r\n \r\n plt.figure(fignum)\r\n if vmm!=None:\r\n vmin=vmm[0]\r\n vmax=vmm[1]\r\n plt.imshow(plottfarray,extent=(tlst[0]/tinc+starttime,\r\n tlst[-1]/tinc+starttime,flst[0],flst[-1]),aspect=aspect,\r\n vmin=vmin,vmax=vmax,cmap=cmap,interpolation=interpolation)\r\n else:\r\n plt.imshow(plottfarray,extent=(tlst[0]/tinc+starttime,\r\n tlst[-1]/tinc+starttime,flst[0],flst[-1]),aspect=aspect,\r\n cmap=cmap,interpolation=interpolation)\r\n plt.xlabel('Time('+timeinc+')',fontsize=12,fontweight='bold')\r\n plt.ylabel('Frequency (Hz)',fontsize=12,fontweight='bold')\r\n if title!=None:\r\n plt.title(title,fontsize=14,fontweight='bold')\r\n plt.colorbar(orientation=cbori,shrink=cbshrink,pad=cbpad,aspect=cbaspect)\r\n plt.show()",
"def plot_time(self, X, x0, t):\n\n Pressure = [Solution(self, (x-x0)/t).pressure for x in X]\n Velocity = [Solution(self, (x-x0)/t).velocity for x in X]\n Density = [Solution(self, (x-x0)/t).rho for x in X]\n\n fig, axs = plt.subplots(3, sharex=True)\n fig.suptitle(\"Solution of the Riemann problem\\nat t = {}s\".format(t))\n axs[0].plot(X, Density)\n axs[1].plot(X, Velocity)\n axs[2].plot(X, Pressure)\n\n axs[0].grid()\n axs[0].set(ylabel = \"Density\")\n axs[1].grid()\n axs[1].set(ylabel = \"Velocity\")\n axs[2].grid()\n axs[2].set(ylabel = \"Pressure\")\n\n plt.xlabel(\"Location x\")",
"def compare_timed(fs, args):\n for f in fs:\n plt.plot(args, [timed(f, arg) for arg in args], label=f.__name__)\n plt.legend()\n plt.grid(True)\n plt.show()",
"def plot_time_frames(self):\n\n fig = plt.figure()\n plt.grid(True)\n\n plt.ylim([-1.5,1.5])\n plt.xlim([0,1])\n\n for key in self.timeframes.keys():\n if key == 0:\n plt.plot(self.x, self.timeframes[key], label=\"time: \" + str(round(key*self.dt, 3)), linewidth=5)\n else:\n plt.plot(self.x, self.timeframes[key], label=\"time: \" + str(round(key*self.dt, 3)))\n\n plt.title(\"Wave at different times\")\n plt.legend(loc=\"upper right\")\n plt.show()\n\n # fig.savefig('results/pics_wave/vibrating_string_'+ self.type + '.png', dpi=150)",
"def plotAll(fx,tfarray,tlst,flst,fignum=1,starttime=0,timeinc='hrs',\r\n dt=1.0,title=None,vmm=None,cmap=None,aspect=None,interpolation=None,\r\n cbori=None,cbshrink=None,cbaspect=None,cbpad=None,normalize='n',\r\n scale='log'):\r\n \r\n #time increment\r\n if timeinc=='hrs':\r\n tinc=3600/dt\r\n elif timeinc=='min':\r\n tinc=60/dt\r\n elif timeinc=='sec':\r\n tinc=1/dt\r\n else:\r\n raise ValueError(timeinc+'is not defined')\r\n #colormap\r\n if cmap==None:\r\n cmap='jet'\r\n else:\r\n cmap=cmap\r\n #aspect ratio\r\n if aspect==None:\r\n aspect='auto'\r\n else:\r\n aspect=aspect\r\n #interpolation\r\n if interpolation==None:\r\n interpolation='gaussian'\r\n else:\r\n interpolation=interpolation\r\n #colorbar orientation\r\n if cbori==None:\r\n cbori='vertical'\r\n else:\r\n cbori=cbori\r\n #colorbar shinkage\r\n if cbshrink==None:\r\n cbshrink=.99\r\n else:\r\n cbshrink=cbshrink\r\n #colorbar aspect\r\n if cbaspect==None:\r\n cbaspect=20\r\n else:\r\n cbaspect=cbaspect\r\n #colorbar pad\r\n if cbpad==None:\r\n cbpad=.1\r\n else:\r\n cbpad=cbpad\r\n \r\n #scale\r\n if scale=='log':\r\n zerofind=np.where(abs(tfarray)==0)\r\n tfarray[zerofind]=1.0\r\n if normalize=='y':\r\n plottfarray=20*np.log10(abs(tfarray/np.max(abs(tfarray))))\r\n else:\r\n plottfarray=20*np.log10(abs(tfarray))\r\n elif scale=='linear':\r\n if normalize=='y':\r\n plottfarray=abs(plottfarray/np.max(abs(plottfarray)))**2\r\n else:\r\n plottfarray=abs(tfarray)**2\r\n \r\n t=np.arange(len(fx))*dt+starttime*dt\r\n FX=np.fft.fft(padzeros(fx))\r\n FXfreq=np.fft.fftfreq(len(FX),dt)\r\n \r\n #set some plot parameters\r\n plt.rcParams['font.size']=10\r\n plt.rcParams['figure.subplot.left']=.13\r\n plt.rcParams['figure.subplot.right']=.98\r\n plt.rcParams['figure.subplot.bottom']=.07\r\n plt.rcParams['figure.subplot.top']=.96\r\n plt.rcParams['figure.subplot.wspace']=.25\r\n plt.rcParams['figure.subplot.hspace']=.20\r\n #plt.rcParams['font.family']='helvetica'\r\n \r\n fig=plt.figure(fignum)\r\n \r\n #plot FFT of fx\r\n fax=fig.add_axes([.05,.25,.1,.7])\r\n plt.plot(abs(FX[0:len(FX)/2]/max(abs(FX)))**2,FXfreq[0:len(FX)/2],'-k')\r\n plt.xlim(0,1)\r\n plt.ylim(0,FXfreq[len(FX)/2-1])\r\n fax.xaxis.set_major_locator(MultipleLocator(.5))\r\n \r\n #plot TFD\r\n pax=fig.add_axes([.25,.25,.75,.7])\r\n if vmm!=None:\r\n vmin=vmm[0]\r\n vmax=vmm[1]\r\n plt.imshow(plottfarray,extent=(tlst[0]/tinc,tlst[-1]/tinc,\r\n flst[0],flst[-1]),aspect=aspect,vmin=vmin,vmax=vmax,cmap=cmap,\r\n interpolation=interpolation)\r\n else:\r\n plt.imshow(plottfarray,extent=(tlst[0]/tinc,tlst[-1]/tinc,\r\n flst[0],flst[-1]),aspect=aspect,cmap=cmap,\r\n interpolation=interpolation)\r\n plt.xlabel('Time('+timeinc+')',fontsize=12,fontweight='bold')\r\n plt.ylabel('Frequency (Hz)',fontsize=12,fontweight='bold')\r\n if title!=None:\r\n plt.title(title,fontsize=14,fontweight='bold')\r\n plt.colorbar(orientation=cbori,shrink=cbshrink,pad=cbpad,aspect=cbaspect)\r\n \r\n #plot timeseries\r\n tax=fig.add_axes([.25,.05,.60,.1])\r\n plt.plot(t,fx,'-k')\r\n plt.axis('tight')\r\n plt.show()",
"def plot_dt_signal(x, title=None):\n pylab.figure()\n pylab.stem(range(len(x)), x)\n pylab.title(title)\n pylab.xlabel(\"samples\")",
"def example3():\n arrive_time=example2() # Get packets arrive time using example1\n time_series.plot_time_series(arrive_time) # Plot time series using packets arrive time",
"def test_HawkesKernelTimeFunc_time_function(self):\n t_values = np.linspace(-1, 100, 1000)\n np.testing.assert_array_equal(\n self.hawkes_kernel_time_func.time_function.value(t_values),\n self.time_function.value(t_values))\n\n self.hawkes_kernel_time_func = \\\n HawkesKernelTimeFunc(t_values=self.t_values, y_values=self.y_values)\n np.testing.assert_array_equal(\n self.hawkes_kernel_time_func.time_function.value(t_values),\n self.time_function.value(t_values))",
"def visualize_time_data(time, index=None):\n length = time[0].shape[0]\n index = np.array(index or range(length))\n time_x = time[0].iloc[index]\n time_y = time[1].iloc[index]\n for key,val in eval(CFGS[\"DATA\"][\"PLOTTIMECOL\"]).items():\n for i in range(val[0], val[1]+1):\n idx = time_x[key] == i\n plt.plot(index[idx], time_y[idx], \"--\")\n plt.title(key)\n # plt.legend([\"springer\", \"summer\", \"fall\", \"winter\"])\n plt.show()",
"def tick(self, tick):"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
Generates mapping from water measurements column names to indices of the given header.
|
def get_water_index_map(archive, header):
column_re = {
'surface': {
'flow': 'pretok',
'level': 'vodostaj'
},
'ground': {
'altitude': 'nivo',
'level': 'vodostaj'
}
}
column_map = {key: -1 for key in column_re[archive].keys()}
empty = True
# Do regex search of every db column for every CSV file column heading.
for i, column in enumerate(header):
for column_name in column_re[archive].keys():
if re.search(column_re[archive][column_name], column, re.IGNORECASE):
if column_map[column_name] != -1:
continue
column_map[column_name] = i
empty = False
return None if empty else column_map
|
[
"def indices(header):\n return dict((n,i) for i,n in enumerate(header))",
"def _create_field_header_index_dictionary(header):\n field_header_index_dict = {}\n for name in header:\n if name in call_data_field_dict.keys():\n field_header_index_dict[name] = header.index(name)\n\n return field_header_index_dict",
"def _create_id_header_index_dictionary(header):\n id_header_index_dict = {}\n for name in header:\n if name in call_data_identifier_dict.keys():\n id_header_index_dict[name] = header.index(name)\n\n return id_header_index_dict",
"def set_name_index_map(self):\n columns = self.get_processed_data().columns\n return {columns[i]: i for i in range(len(columns))}",
"def genColumnIds( colHeaders ):\n # try to generate a reasonable field name from the given columnName:\n colNames = []\n colNameMap = {}\n for i, cdesc in enumerate(colHeaders):\n words = re.findall( '\\w+', cdesc )\n if len(words)==0 or words[0] in colNameMap:\n cid = \"col\" + str(i)\n else:\n cid = words[0]\n colNames.append( ( cid, cdesc ) )\n colNameMap[ cid ] = True\n return colNames",
"def column_indexer(data):\n idCol = {label: index for index, label in enumerate(data.columns)}\n return idCol",
"def map_col_header_to_col_num(self):\n if self.general_WS:\n return\n from PyQt5.QtCore import pyqtRemoveInputHook\n\n # Or for Qt5\n # from PyQt5.QtCore import pyqtRemoveInputHook\n\n from pdb import set_trace\n pyqtRemoveInputHook()\n set_trace()\n self.col_num_map={}\n header_values = [x.lower() for x in self.wks.row_values(1)]\n key_values =[x.lower() for x in [\"Company Name\",\"title\",\"first\",\"Last Name\",\"E-Mail\",\"chosen\",\"contacted\",\"replied\",\"linkedin\",\"website\",\"results\", \"Date contacted\"]]\n #c=[i for i, item in enumerate(header_values) find_most_similar_item_in_list(item.lower(),key_values)]\n for item in key_values:\n self.c.append(find_most_similar_item_in_list(item.lower(),header_values))\n try:\n self.date_contacted = self.wks.col_values(self.c[11]+1)\n self.col_num_map.update({'Date contacted': self.c[11]+1})\n except:\n warnings.warn(\"google sheet -\"+self.sheet_name+\"- does not have 'Date contacted' column\")\n try:\n self.results = self.wks.col_values(self.c[10]+1)\n self.col_num_map.update({'results': self.c[10]+1})\n except:\n warnings.warn(\"google sheet -\"+self.sheet_name+\"- does not have 'results' column\")\n\n try:\n self.website = self.wks.col_values(self.c[9]+1)\n self.col_num_map.update({'website': self.c[9]+1})\n except:\n warnings.warn(\"google sheet -\"+self.sheet_name+\"- does not have 'website' column\")\n\n try:\n self.companies = self.wks.col_values(self.c[0]+1)\n self.col_num_map.update({'companies': self.c[0]+1})\n except:\n warnings.warn(\"google sheet -\"+self.sheet_name+\"- does not have 'companies' column\")\n try:\n self.linkedin = self.wks.col_values(self.c[8] + 1)\n self.col_num_map.update({'linkedin': self.c[8] + 1})\n except:\n warnings.warn(\"google sheet -\" + self.sheet_name + \"- does not have 'linkedin' column\")\n try:\n self.titles = self.wks.col_values(self.c[1]+1)\n self.col_num_map.update({'titles': self.c[1] + 1})\n except:\n warnings.warn(\"google sheet -\"+self.sheet_name+\"- does not have 'titles' column\")\n try:\n self.first_name= self.wks.col_values(self.c[2]+1)\n self.col_num_map.update({'first_name': self.c[2] + 1})\n except:\n warnings.warn(\"google sheet -\"+self.sheet_name+\"- does not have 'first name' column\")\n try:\n self.last_names = self.wks.col_values(self.c[3]+1)\n self.col_num_map.update({'last_names': self.c[3] + 1})\n except:\n warnings.warn(\"google sheet -\"+self.sheet_name+\"- does not have 'last names' column\")\n try:\n self.emails = self.wks.col_values(self.c[4]+1)\n self.col_num_map.update({'emails': self.c[4] + 1})\n\n except:\n warnings.warn(\"google sheet -\"+self.sheet_name+\"- does not have 'emails' column\")\n try:\n self.chosen = self.wks.col_values(self.c[5]+1)\n self.col_num_map.update({'chosen': self.c[5] + 1})\n except:\n if not self.master_list:\n warnings.warn(\"google sheet -\"+self.sheet_name+\"- does not have 'chosen' column\")\n if self.master_list:\n try:\n self.contacted = self.wks.col_values(self.c[6] + 1)\n self.col_num_map.update({'contacted': self.c[6] + 1})\n except:\n warnings.warn(\"google sheet -\"+self.sheet_name+\"- does not have 'contacted' column\")\n try:\n self.replied = self.wks.col_values(self.c[7] + 1)\n self.col_num_map.update({'replied': self.c[7] + 1})\n except:\n warnings.warn(\"google sheet -\"+self.sheet_name+\"- does not have 'replied' column\")",
"def header_index(arg, header):\n for i, ele in enumerate(header):\n if match(arg, ele):\n return i\n raise NameError('column not found')",
"def get_indices(filename='data/raw/housing.csv'):\n data = pd.read_csv(filename, nrows=0)\n columns = list(data)\n return [columns.index(col) for col in (\"total_rooms\", \"total_bedrooms\", \"population\", \"households\")]",
"def find_indeces(self, header):\n indeces = {'T': None, 'WV': None, 'WK': None, 'BZ': None, 'SPR': None,\n 'WBER': None, 'ABG.': None, 'UNG.': None, 'SPOE': None,\n 'FPOE': None, 'OEVP': None, 'GRUE': None, 'NEOS': None,\n 'WWW': None, 'ANDAS': None, 'GFW': None, 'SLP': None,\n 'WIFF': None, 'M': None, 'FREIE': None}\n for index, item in enumerate(header):\n indeces[item] = index\n return indeces",
"def get_table_indices(self):\n table_indices = {}\n for i in range(self.table.rowCount()):\n table_indices[self.table.verticalHeaderItem(i).text()] = i\n\n for i in range(self.table.columnCount()):\n table_indices[self.table.horizontalHeaderItem(i).text()] = i\n\n return table_indices",
"def build_col_index(fn,sheet):\n df = pd.read_excel(fn,sheet)\n\n # Store the sheet's original header in list\n cols_src = df.columns.values.tolist()\n\n # Index the relevant columns\n cols_index = []\n for col in settings.core_data_cols:\n cols_index.append(cols_src.index(col))\n return cols_index",
"def get_header_indices(filepath):\n\theaders = get_header_list(filepath, sort=False)\n\treturn {h: i for i, h in enumerate(headers)}",
"def file_columns(header):\n\n return [i for i, h in zip(range(0, len(header)), header) if\n is_file_column(h)]",
"def get_headers(self, headers, *args):\n if not headers:\n return {}\n\n header_indices = {}\n for index, header_info in enumerate(self.header_data):\n for arg in args:\n header_info = header_info[arg]\n \n if header_info in headers:\n header_indices[header_info] = index\n\n return header_indices",
"def create_matrix_mapping(train_mh, unk_vec_id):\n mh_index_map = {}\n matrix_idx = 0\n for vector_idx in train_mh:\n if vector_idx == unk_vec_id:\n unk_matrix_id = matrix_idx\n mh_index_map[vector_idx] = matrix_idx\n matrix_idx += 1\n return mh_index_map, unk_matrix_id",
"def column_info(colum_header):\n commas = colum_header.count(',')\n if commas == 0:\n return (column_name(clean_token(colum_header)))\n\n (key, units, location) = colum_header.split(',')\n key = column_name(clean_token(key))\n units = clean_token(units)\n location = clean_token(location)\n return (key, units, location)",
"def get_column_positions(table):\n \n column_names = get_columns(table)\n column_dict = {}\n for index, value in enumerate(column_names):\n column_dict[value] = index\n return column_dict",
"def create_index_mapping(self):\n print(self.df_movie)\n self.indices = pd.Series(self.df_movie.index, index=self.df_movie['title']).drop_duplicates()\n return self.indices"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.