query_id
stringlengths
32
32
query
stringlengths
9
4.01k
positive_passages
listlengths
1
1
negative_passages
listlengths
88
101
5ee7897f9c3469042c9481cb0aaa95db
Create a Convolutional Layer
[ { "docid": "538739d29b1eff228e06813a471a4179", "score": "0.0", "text": "def conv2d(inputs, filters, kernel_size = 1, strides = 1, pad = 'VALID', name = None):\r\n\twith tf.name_scope(name):\r\n\t\tkernel = tf.Variable(tf.contrib.layers.xavier_initializer(uniform=False)([kernel_size,kernel_size, inputs.get_shape().as_list()[3], filters]), name= 'weights')\r\n\t\tconv = tf.nn.conv2d(inputs, kernel, [1,strides,strides,1], padding=pad, data_format='NHWC')\r\n\t\twith tf.device('/cpu:0'):\r\n\t\t\ttf.summary.histogram('weights_summary', kernel, collections = ['train'])\r\n\t\treturn conv", "title": "" } ]
[ { "docid": "b070c84c9fc1b5e2fad7e952f9059ce4", "score": "0.70827186", "text": "def new_convolutional_layer(input, num_input_channels, size_filter, num_filter, pooling=True, name=\"Conv_layer\"):\n with tf.name_scope(name):\n shape = [size_filter, size_filter, num_input_channels, num_filter] # tvar filtru pro konvoluci=vah (format TF)\n # tvorba pocatecnich vah (hodnot filtru)\n w = tf.Variable(tf.truncated_normal(shape, stddev=0.05), name=\"Vahy\")\n # tvorba pocatecniho prahu, 1 pro kazdy filtr\n b = tf.Variable(tf.constant(0.05, shape=[num_filter]), name=\"Prah\")\n\n # 2D konvoluce\n layer = tf.nn.conv2d(input=input,\n filter=w,\n strides=[1, 1, 1, 1], # krok = 1 ve vsech dimenzich (cislo obr, x osa, y osa, input-chan.)\n padding='SAME') # vstupni obraz bude oramovan nulami, aby mel vystup stejnou velikost\n layer += b # pridani prahu k vysledku konvoluce -> hodnota pridana ke kazdemu kanalu filtru\n\n # Pooling vrstva\n if pooling:\n layer = tf.nn.max_pool(value=layer,\n ksize=[1, 2, 2, 1], # 2x2 max-pooling\n strides=[1, 2, 2, 1], # posun o 2 pixely po x i y\n padding='SAME')\n\n # ReLU vrstva - vypocita max(x, 0) pro kazdy pixel x\n layer = tf.nn.relu(layer) # relu(max_pool(x)) == max_pool(relu(x)) -> ReLU az po poolingu -> uspora operaci\n return layer, w # Vystup = vrstva a vahy filtru (pro zobrazeni vah)", "title": "" }, { "docid": "3ea81ccfa5207fc284f78a69c1c40108", "score": "0.6901321", "text": "def VGGConvLayer(n_filt, size=3, vgg_layer=None):\n if vgg_layer is not None:\n weights = get_weights_vgg16(vgg_layer)\n else:\n weights = None\n\n return Convolution2D(n_filt, size, size,\n weights=weights,\n activation='relu',\n border_mode='same',\n name='vgg_' + str(vgg_layer),\n trainable=True)", "title": "" }, { "docid": "11ba16ae8af5cd1405bb9ce4cedcc919", "score": "0.6896145", "text": "def __init__(self, input, num_filters, filter_size, stride=(1, 1), pad=(0, 0), activation=rectify):\n\n self.input = input\n self.output = layers.Conv2DLayer(self.input, num_filters, filter_size, stride=stride, pad=pad,\n W=initialize_parameters()[0], b=initialize_parameters()[1],\n nonlinearity=activation)", "title": "" }, { "docid": "06e8a9e8b07d6f4f5c543251f535770a", "score": "0.6860142", "text": "def _conv2d(prev_layer, layer, layer_name):\n W, b = _weights(layer, layer_name)\n W = tf.constant(W)\n b = tf.constant(np.reshape(b, (b.size)))\n return tf.nn.conv2d(prev_layer, filter = W, strides = [1,1,1,1], padding = 'SAME') + b", "title": "" }, { "docid": "78bb459c03e3bcd18e68b64f50b6eddd", "score": "0.684092", "text": "def convolutional_layer(previous, name, params, train=False, has_bn=False):\n fields = dict(num_output=int(params[\"filters\"]),\n kernel_size=int(params[\"size\"]))\n if \"stride\" in params.keys():\n fields[\"stride\"] = int(params[\"stride\"])\n if int(params.get(\"pad\", 0)) == 1: # use 'same' strategy for convolutions\n fields[\"pad\"] = fields[\"kernel_size\"]//2\n if has_bn:\n fields[\"bias_term\"] = False\n\n if train:\n fields.update(weight_filler=dict(type=\"gaussian\", std=0.01),\n bias_filler=dict(type=\"constant\", value=0))\n\n return cl.Convolution(previous, name=name, **fields)", "title": "" }, { "docid": "1fa92ae57dfbe0e99b26c27672ab80d5", "score": "0.6840586", "text": "def conv_layer(self, x, n=64, conv_1_1=False, dropout_layer=False, batch_norm=True):\n\tx = Conv2D(n, (3, 3), activation='relu', padding='same')(x)\n\tx = Conv2D(n, (3, 3), activation='relu', padding='same')(x)\n\tx = MaxPooling2D(pool_size=(2,2), strides=(2,2))(x)\n\tif batch_norm:\n\t x = BatchNormalization()(x)\n \tif dropout_layer:\n\t x = Dropout(0.5)(x)\n\treturn x", "title": "" }, { "docid": "40d5371ac141a46ef960729a68ef9fdd", "score": "0.6839389", "text": "def __init__(self,\n nChannelsPrevious,\n nChannels,\n kernelSize,\n padding=0,\n bias=True,\n **kwargs):\n\n ConstrainedLayer.__init__(self,\n nn.Conv1d(nChannelsPrevious, nChannels,\n kernelSize, padding=padding,\n bias=bias),\n **kwargs)", "title": "" }, { "docid": "7df29141e46d6bc5a58ecc4da32cdb42", "score": "0.6836834", "text": "def conv_layer(input_layer, kernel_dim, filter, activation=None, BN=False, pool=None, p_kernel=None, p_stride=None):\n # todo: Design the simplest convolution layer\n # Find the doc of mx.sym.Convolution by help command\n # Do you need BatchNorm?\n # Do you need pooling?\n # What is the expected output shape?\n\n # conv layer\n l = mx.sym.Convolution(data=input_layer, kernel=kernel_dim, num_filter=filter)\n\n if activation is not None:\n l = mx.sym.Activation(data=l, act_type=activation)\n if BN:\n l = mx.sym.BatchNorm(l)\n if pool is not None:\n l = mx.sym.Pooling(data=l, pool_type=pool, kernel=p_kernel, stride=p_stride)\n return l", "title": "" }, { "docid": "e2d235de18435628fe6e0eccdc9d3ed8", "score": "0.6825162", "text": "def layer_conv2(x, shape, batch_size, kernel_size=(3,3), strides=(1,1), dilation_rate=(1,1), activation='relu', name=None):\n \n return(tf.keras.layers.Conv2D(batch_size, kernel_size, strides, padding='valid', dilation_rate=(1,1), activation='relu', input_shape=shape[1:])(x))", "title": "" }, { "docid": "70ad0117e4acfd2a17de34ef3a97c06e", "score": "0.6817081", "text": "def create_convolution_layers(self):\n self.K = [None]\n self.character_embeddings = self.alphabet_size\n with tf.variable_scope(\"RNN\", initializer=tf.contrib.layers.xavier_initializer()):\n for window in xrange(1, self.max_conv_window+1):\n self.K.append(tf.get_variable(\"K%d\" % window,\n [window, self.character_embeddings, 1, self.kernels*window]))\n \n def cnn(w):\n W = tf.one_hot(w+2, self.alphabet_size, on_value=1.)\n # W = tf.gather(self.C_hat, w+3)\n W = tf.reshape(W, [-1, self.word_length, self.character_embeddings, 1])\n stride = [1, 1, self.character_embeddings, 1]\n \n W_hat = []\n for window in xrange(1, self.max_conv_window+1):\n W_window = tf.nn.conv2d(W, self.K[window], stride, \"VALID\")\n W_window = tf.reduce_max(W_window, axis=[1, 2])\n W_hat.append(W_window)\n \n W_hat = tf.concat(axis=1, values=W_hat)\n return tf.nn.relu(W_hat)\n \n self.f_x_cnn = cnn\n return", "title": "" }, { "docid": "8dbb8572b24ba96b82bb8eca77c285b1", "score": "0.6801169", "text": "def __init__(self,\n nChannelsPrevious,\n nChannels,\n kernelSize,\n padding=0,\n bias=True,\n transposed=False,\n **kwargs):\n if transposed:\n ConstrainedLayer.__init__(self,\n nn.ConvTranspose2d(nChannelsPrevious, nChannels,\n kernelSize, padding=padding,\n bias=bias),\n **kwargs)\n else:\n ConstrainedLayer.__init__(self,\n nn.Conv2d(nChannelsPrevious, nChannels,\n kernelSize, padding=padding,\n bias=bias),\n **kwargs)", "title": "" }, { "docid": "1bf48f419653c4583306844ac2237406", "score": "0.6780961", "text": "def conv_layer(inputs,\n filters=32,\n kernel_size=3,\n strides=1,\n use_maxpool=True,\n postfix=None,\n activation=None):\n\n x = Conv2D(filters=filters,\n kernel_size=kernel_size,\n strides=strides,\n kernel_initializer='he_normal',\n name=\"conv_\"+postfix,\n padding='same')(inputs)\n x = BatchNormalization(name=\"bn_\"+postfix)(x)\n x = Activation('relu', name='relu_'+postfix)(x)\n if use_maxpool:\n x = MaxPooling2D(name='pool'+postfix)(x)\n return x", "title": "" }, { "docid": "3d27de9b7b4a155a55f189a9e04ab7d3", "score": "0.6763744", "text": "def convolution_layer(self, X, W):\n\n Bias = W['B']\n W = W['W']\n\n F,_, C, K = W.shape\n M, J, _, _ = X.shape\n\n N = J - F + 1\n\n \"\"\"\n F - Filter size\n C - Number of channels\n K - Number of filters\n M - number of training examples (batches)\n J - image size\n \"\"\"\n\n y = np.zeros((M,N,N,K))\n\n W = np.flip(W, axis=0) #flipping filters for convolution not coleration process\n W = np.flip(W, axis=1)\n\n\n for m in range(M): #every batch\n for k in range(K): #every filter\n for c in range(C): #every channel of input\n patch = X[m, :, :, c]\n filter = W[:,:,c,k]\n convolved = self.convolve2d( patch, filter,border_mode='valid')\n y[m,:,:,k] += convolved\n\n y = y + Bias[0,:]\n\n return y", "title": "" }, { "docid": "e465ba141e2b9209ef14b5e6e11ed25b", "score": "0.6760026", "text": "def __init__(self, channels: int, kernel_size: int, bias: bool = True) -> None:\n super(ConvolutionModule, self).__init__()\n # kernerl_size should be a odd number for 'SAME' padding\n assert (kernel_size - 1) % 2 == 0\n\n self.pointwise_conv1 = nn.Conv1d(\n channels,\n 2 * channels,\n kernel_size=1,\n stride=1,\n padding=0,\n bias=bias,\n )\n self.depthwise_conv = nn.Conv1d(\n channels,\n channels,\n kernel_size,\n stride=1,\n padding=(kernel_size - 1) // 2,\n groups=channels,\n bias=bias,\n )\n self.norm = nn.BatchNorm1d(channels)\n self.pointwise_conv2 = nn.Conv1d(\n channels,\n channels,\n kernel_size=1,\n stride=1,\n padding=0,\n bias=bias,\n )\n self.activation = Swish()", "title": "" }, { "docid": "7e03d2d20096c058bfd6eba6bb95d6e1", "score": "0.67475873", "text": "def conv_factory(x, init_form, nb_filter, filter_size_block, dropout_rate=None, weight_decay=1E-4):\n #x = Activation('relu')(x)\n x = Conv1D(nb_filter, filter_size_block,\n init=init_form,\n activation='relu',\n border_mode='same',\n bias=False,\n W_regularizer=l2(weight_decay))(x)\n if dropout_rate:\n x = Dropout(dropout_rate)(x)\n\n return x", "title": "" }, { "docid": "9ed2eae9465e087518b18939c45c1c61", "score": "0.6742302", "text": "def make_conv(input_tensor, kernel_params, strides, scopename, init_w=(0.0, 0.1), padding=\"SAME\" ):\n init_k = tf.truncated_normal(kernel_params, mean=init_w[0], stddev=init_w[1])\n weights = tf.Variable(initial_value=init_k, name=\"weights\")\n biases = tf.constant(0.0, tf.float32, [kernel_params[-1]], name=\"biases\")\n conv = tf.nn.conv2d(input_tensor, weights, strides, padding=padding)\n value = tf.nn.bias_add(conv, biases)\n debug(\"C [%s]: %s c %s + %s = %s\" % (scopename,str(input_tensor.shape), str(weights.shape), str(biases.shape), str(value.shape)))\n return tf.nn.relu(value, name = scopename)", "title": "" }, { "docid": "9be94f5703a8db271ea745e038b0c919", "score": "0.67410254", "text": "def conv_layer(inputobject, filters, biases, zero_pad_dimensions=(0,0), stride=(1,1), train=False):\n\tdef zero_pad(inputobject, zero_pad_dimensions):\n\t\t\"\"\"\n\t\tzero pad equally on x and y axis equally per axis\n\t\t\"\"\"\n\t\tif len(inputobject.shape) > 2: #multidemsional\n\t\t\tinner = np.concatenate( #concatenate along columns\n\t\t\t\t\t(np.zeros((inputobject.shape[0],zero_pad_dimensions[0],inputobject.shape[2])),\n\t\t\t\t\tinputobject,\n\t\t\t\t\tnp.zeros((inputobject.shape[0],zero_pad_dimensions[0],inputobject.shape[2]))), axis=1)\n\t\t\treturn np.concatenate( #concatenate along rows\n\t\t\t\t(np.zeros((zero_pad_dimensions[1],inner.shape[1], inner.shape[2])),\n\t\t\t\tinner,\n\t\t\t\tnp.zeros((zero_pad_dimensions[1],inner.shape[1], inner.shape[2]))), axis=0)\n\t\telse: #uni-dimensional\n\t\t\tinner = np.concatenate(\n\t\t\t\t\t(np.zeros((zero_pad_dimensions[0],inputobject.shape[1])),\n\t\t\t\t\tinputobject,\n\t\t\t\t\tnp.zeros((zero_pad_dimensions[0],inputobject.shape[1]))), axis=1)\n\t\t\treturn np.concatenate(\n\t\t\t\t(np.zeros((zero_pad_dimensions[1],inner.shape[1])),\n\t\t\t\tinner,\n\t\t\t\tnp.zeros((zero_pad_dimensions[1],inner.shape[1]))), axis=0)\n\n\n\tdef convolute(inputobject, filters, biases, stride):\n\t\t\"\"\"\n\t\tconvolute filters across image and return result\n\t\t\"\"\"\n\t\toutput = np.zeros(((inputobject.shape[0]-filters.shape[1])//stride[0],(inputobject.shape[1]-filters.shape[2])//stride[1], filters.shape[0]))\n\t\tfor i in range(filters.shape[0]):\n\t\t\tfor j in range((inputobject.shape[0]-filters.shape[1])//stride[0]): #rows\n\t\t\t\tfor k in range((inputobject.shape[1]-filters.shape[2])//stride[1]): #columns\n\t\t\t\t\toutput[j,k,i] = (biases[i] + np.vdot(\n\t\t\t\t\t\tinputobject[np.ix_(np.arange(j*stride[0], j*stride[0] + filters.shape[1]), np.arange(k*stride[1], k*stride[1] + filters.shape[2]))],\n\t\t\t\t\t\tfilters[i]))\n\t\treturn output\n\n\n\treturn convolute(zero_pad(inputobject, zero_pad_dimensions), filters, biases, stride)", "title": "" }, { "docid": "ebb797a23d30c467d9e3e0de02528906", "score": "0.6718607", "text": "def conv_layer(x, num_channels_out, spatial_stride=2):\n num_channels_in = x.get_shape().as_list()[-1]\n conv_strides = [1, spatial_stride, spatial_stride, 1]\n W_shape = [5, 5, num_channels_in, num_channels_out]\n W = tf.Variable(tf.truncated_normal(\n W_shape,\n mean=0.0,\n stddev=5e-2,\n dtype=tf.float32,\n seed=None,\n name=None\n ))\n b = tf.Variable(tf.zeros([num_channels_out]))\n conv = tf.nn.conv2d(x, W, conv_strides, 'SAME')\n conv_with_bias = conv + b\n return conv_with_bias", "title": "" }, { "docid": "9b6e1506fea54e3fa127ff26dc07d5d1", "score": "0.668993", "text": "def _conv_block(inputs, filters, alpha, kernel=(3, 3), strides=(1, 1), layer_name='conv1'):\n channel_axis = 1 if K.image_data_format() == 'channels_first' else -1\n filters = int(filters * alpha)\n x = KL.Conv2D(filters, kernel,\n padding='same',\n use_bias=False,\n strides=strides,\n name=layer_name)(inputs)\n x = KL.BatchNormalization(axis=channel_axis, name=layer_name + '_bn')(x)\n return KL.Activation(relu6, name=layer_name + '_relu')(x)", "title": "" }, { "docid": "dd5edff6444c8b1fe5b9ec6d73b41c41", "score": "0.6660116", "text": "def __init__(self, input, num_filters, filter_size, stride=(2, 2), padding=(0, 0), activation=rectify):\n\n self.input = input\n self.output = layers.TransposedConv2DLayer(self.input, num_filters, filter_size, stride=stride, crop=padding,\n W=initialize_parameters()[0], b=initialize_parameters()[1],\n nonlinearity=activation)", "title": "" }, { "docid": "b91358bf6acebab4db4de99f1a807d66", "score": "0.6654239", "text": "def make_initial_layer(self):\n\n layer = nn.ModuleList([nn.ModuleList([]) for i in range(self.ns)]) # stores the strided convolutions\n\n for scale in range(self.ns):\n if scale == 0:\n \"\"\" first scale is produced by a convolution with stride of 0 \"\"\"\n conv = nn.Conv2d(\n in_channels = self.n_channels_in,\n out_channels = self.k0 * self.gr ** scale,\n kernel_size = (3,3),\n stride = 1,\n padding = 1\n )\n\n\n bn = nn.BatchNorm2d(conv.out_channels)\n\n else:\n \"\"\" subsequent scales are produced by convolutions with scale 'self.sf ** i' \"\"\"\n prev_out_channels = layer[scale - 1][0][-1].num_features\n conv = nn.Conv2d(\n in_channels = prev_out_channels,\n out_channels = self.k0 * self.gr ** scale,\n kernel_size = (3,3),\n padding = 1,\n stride = self.sf #** scale\n )\n\n bn = nn.BatchNorm2d(conv.out_channels)\n\n operations = nn.ModuleList([conv, bn])\n layer[scale].append(operations)\n return layer", "title": "" }, { "docid": "ad3f9239ed76e31fe27a8015920de548", "score": "0.6614213", "text": "def add_convolution(self, name, kernel_channels, output_channels, height,\n width, stride_height, stride_width, border_mode, groups, W, b, has_bias, \n is_deconv = False, output_shape = None, \n input_name = 'data', output_name = 'out', \n dilation_factors = [1,1],\n padding_top = 0, padding_bottom = 0, padding_left = 0, padding_right = 0,\n same_padding_asymmetry_mode = 'BOTTOM_RIGHT_HEAVY'):\n spec = self.spec\n nn_spec = self.nn_spec\n\n # Add a new layer\n spec_layer = nn_spec.layers.add()\n spec_layer.name = name\n spec_layer.input.append(input_name)\n spec_layer.output.append(output_name)\n spec_layer.convolution.MergeFromString(b'') # hack to set empty message\n\n # Set the layer params\n spec_layer_params = spec_layer.convolution\n spec_layer_params.isDeconvolution = is_deconv\n \n if is_deconv and output_shape:\n spec_layer_params.outputShape.append(output_shape[0])\n spec_layer_params.outputShape.append(output_shape[1])\n \n spec_layer_params.outputChannels = output_channels\n spec_layer_params.kernelChannels = kernel_channels\n spec_layer_params.kernelSize.append(height)\n spec_layer_params.kernelSize.append(width)\n spec_layer_params.stride.append(stride_height)\n spec_layer_params.stride.append(stride_width)\n\n if border_mode == 'valid':\n height_border = spec_layer_params.valid.paddingAmounts.borderAmounts.add()\n height_border.startEdgeSize = padding_top\n height_border.endEdgeSize = padding_bottom\n width_border = spec_layer_params.valid.paddingAmounts.borderAmounts.add()\n width_border.startEdgeSize = padding_left\n width_border.endEdgeSize = padding_right\n elif border_mode == 'same':\n if not (same_padding_asymmetry_mode == 'BOTTOM_RIGHT_HEAVY' or same_padding_asymmetry_mode == 'TOP_LEFT_HEAVY'):\n raise ValueError(\"Invalid value %d of same_padding_asymmetry_mode parameter\" % same_padding_asymmetry_mode)\n spec_layer_params.same.asymmetryMode = _NeuralNetwork_pb2.SamePadding.SamePaddingMode.Value(same_padding_asymmetry_mode)\n else:\n raise NotImplementedError(\n 'Border mode %s is not implemented.' % border_mode)\n\n spec_layer_params.nGroups = groups\n spec_layer_params.hasBias = has_bias\n\n # Assign weights\n weights = spec_layer_params.weights\n\n # Weight alignment: MLModel Spec requires following weight arrangement: \n # is_deconv == False ==> (output_channels, kernel_channels, height, width), where kernel_channel = input_channels / groups\n # is_deconv == True ==> (kernel_channels, output_channels / groups, height, width), where kernel_channel = input_channels \n if not is_deconv:\n Wt = W.transpose((3,2,0,1))\n Wt = Wt.flatten()\n else:\n Wt = W.transpose((2,3,0,1)).flatten()\n for idx in range(Wt.size):\n weights.floatValue.append(float(Wt[idx]))\n\n # Assign biases\n if has_bias:\n bias = spec_layer_params.bias\n for f in range(output_channels):\n bias.floatValue.append(float(b[f]))\n \n # add dilation factors\n spec_layer_params.dilationFactor.append(dilation_factors[0])\n spec_layer_params.dilationFactor.append(dilation_factors[1])", "title": "" }, { "docid": "cba3d981aa303e83b3163a8fb59450b2", "score": "0.6613258", "text": "def add_convolutional_layer(layers, count, params, train=False):\n layer_name = \"conv{0}\".format(count)\n has_batch_norm = (params.get(\"batch_normalize\", '0') == '1')\n\n layers.append(convolutional_layer(layers[-1], layer_name, params,\n train, has_batch_norm))\n if has_batch_norm:\n layers.append(batchnorm_layer(layers[-1], \"{0}_bn\".format(layer_name),\n train))\n layers.append(cl.Scale(layers[-1], name=\"{0}_scale\".format(layer_name),\n scale_param=dict(bias_term=True)))\n if params[\"activation\"] != \"linear\":\n layers.append(activation_layer(layers[-1], count, params[\"activation\"]))", "title": "" }, { "docid": "4c635300e2542177aea381be81339225", "score": "0.6595456", "text": "def create_conv_layer(input_, n_in, n_out, name='conv', bias=True,\n norm=False):\n with tf.variable_scope(name):\n W = weight_variable([3, 1, n_in, n_out])\n if bias:\n b = bias_variable([n_out])\n\n conv = conv2d(input_, W)\n if bias:\n conv = conv + b\n if norm:\n relu = tf.nn.relu(batch_norm(conv, n_out), name='relu')\n else:\n relu = tf.nn.relu(conv, name='relu')\n return W, relu", "title": "" }, { "docid": "c6b70d9321c94472be3d5ac87d0f5a90", "score": "0.6582357", "text": "def create_bconv_layer(\n weights, strides, padding, transpose=True, fused_multiply=None, fused_add=None\n):\n strides = [1, strides[0], strides[1], 1]\n padding = padding.upper()\n\n # Here the weights are still HWIO\n dotproduct_size = weights.shape[0] * weights.shape[1] * weights.shape[2]\n\n filter_format = \"HWIO\"\n if transpose:\n # Transpose: change from HWIO to OHWI\n weights = np.moveaxis(weights, 3, 0)\n filter_format = \"OHWI\"\n weights = np.sign(np.sign(weights) + 0.5)\n\n out_channels = weights.shape[0]\n\n if fused_multiply is None:\n fused_multiply = np.full(shape=(out_channels), fill_value=1)\n elif len(fused_multiply.shape) != 1 or fused_multiply.shape[0] != out_channels:\n raise Exception(\n f\"ERROR: Argument fused_multiply should have shape ({weights.shape[0]}) but has shape {fused_multiply.shape}\"\n )\n\n if fused_add is None:\n fused_add = np.full(shape=(out_channels), fill_value=0)\n elif len(fused_add.shape) != 1 or fused_add.shape[0] != out_channels:\n raise Exception(\n f\"ERROR: Argument fused_add should have shape ({weights.shape[0]}) but has shape {fused_add.shape}\"\n )\n\n # The bconv will do the following:\n # output = fused_add[channel] + fused_multiply[channel] * popcount\n # We use this to implement two things:\n # - `y1 = n - 2 * popcount` (the backtransformation to -1,+1 space)\n # - `y2 = a + b * y1` (optional fused batchnorm)\n # Together they become\n # `y = (a + b*n) + (-2b) * popcount\n fused_add = fused_add + dotproduct_size * fused_multiply\n fused_multiply = -2 * fused_multiply\n\n def bconv_op(x):\n y = bconv2d64(\n x,\n weights,\n fused_multiply,\n fused_add,\n strides,\n padding,\n data_format=\"NHWC\",\n filter_format=filter_format,\n )\n return y\n\n return bconv_op", "title": "" }, { "docid": "ee614ff08bab6ff637663c6dfa8d7d95", "score": "0.65370893", "text": "def simple_conv(x, k):\n if len(x.get_shape()) == 4:\n y = tf.nn.conv2d(x, k, [1, 1, 1, 1], padding='VALID')\n elif len(x.get_shape()) == 5:\n y = tf.nn.conv3d(x, k, [1, 1, 1, 1, 1], padding='VALID')\n return y", "title": "" }, { "docid": "c62564b1ffb3bb119a3c9cb54c646bed", "score": "0.65345985", "text": "def generateLayer(self, input_tensor):\n\n\t\t# Get the previous tensor shape\n\t\tinput_shape = input_tensor.get_shape().as_list()[1:]\n\n\t\t# Create the convolution weights and bias\n\t\tfilter_shape = self.kernel_shape + (input_shape[2], self.num_kernels)\n\t\tweights = tf.Variable(tf.truncated_normal(filter_shape, stddev=0.05))\n\t\tbias = tf.Variable(tf.constant(0.0, shape=(self.num_kernels,)))\n\n\t\ttry:\n\t\t\tself.tensor = self.activation(tf.nn.conv2d(input_tensor, weights, (1,) + self.stride + (1,), 'VALID') + bias)\n\t\texcept:\n\t\t\tprint \"Error!\"\n\t\t\tprint \"Input Shape:\", input_tensor.get_shape()\n\t\t\tprint \"Kernel Shape:\", self.kernel_shape\n\t\t\tprint \"Num Kernels:\", self.num_kernels\n\n\n\t\treturn self.tensor", "title": "" }, { "docid": "8f0758f304cb1ad656bbba7b2f4af791", "score": "0.653291", "text": "def create_bn_conv(input: KerasTensor, filter: int, kernel_size: int, dilation_rate: int = 1, pre_name: str = '',\r\n idx: int = 0, padding='same', activation: str = 'relu') -> KerasTensor:\r\n conv = layers.Conv2D(filter, (kernel_size, 1), padding=padding,\r\n dilation_rate=(dilation_rate, dilation_rate), activation=activation,\r\n name=f\"{pre_name}_conv{idx}\")(input)\r\n bn = layers.BatchNormalization(name=f\"{pre_name}_bn{idx}\")(conv)\r\n return bn", "title": "" }, { "docid": "0192067b7e40199bc4789f4fa274c2ac", "score": "0.65262264", "text": "def _Conv(self, name, filter_shape, stride=(1, 1), padding='SAME',\n use_bn=True, activation=None):\n # TODO(zhifengc): Revisit whether BatchNormLayer should apply gamma when the\n # following activation is a relu.\n if isinstance(stride, tuple):\n filter_stride = stride\n elif isinstance(stride, int):\n filter_stride = (stride, stride)\n else:\n raise ValueError(\n 'Input stride not a tuple or int. Is a {}'.format(type(stride)))\n norm = self._BN('bn', filter_shape[3]) if use_bn else self._Identity(name)\n return self._Seq(\n name,\n self._ConvPlain('conv', filter_shape, filter_stride, padding),\n norm,\n self._Activation('activation', activation_fn_or_name=activation))", "title": "" }, { "docid": "54929ed0cb5f855da53fa11fb96ef779", "score": "0.652046", "text": "def conv_bn_layer(name, input, filter_size, num_filters,\n stride, padding, channels=None,\n active_type=ReluActivation()):\n\n tmp = img_conv_layer(name=name + \"_conv\",\n input=input,\n filter_size=filter_size,\n num_channels=channels,\n num_filters=num_filters,\n stride=stride,\n padding=padding,\n act=LinearActivation(),\n bias_attr=False)\n return batch_norm_layer(name=name + \"_bn\",\n input=tmp,\n act=active_type)\n #use_global_stats=False,", "title": "" }, { "docid": "3521f9eb9510d5da779002deb6df8795", "score": "0.6504659", "text": "def toConv2D(self,**args):\n\t\tbias = self.getBias()\n\t\tweights = self.getWeights()\n\t\tkernel_size = (weights.shape[0],weights.shape[1])\n\t\tfilters = weights.shape[3]\n\t\treturn Conv2D(filters, kernel_size, bias_initializer=tf.keras.initializers.Constant(bias),\n\t\t\t\t\t\t\t\t\t\tkernel_initializer=tf.keras.initializers.Constant(weights), **args)", "title": "" }, { "docid": "b1dc749175783bc8b34b54d3df930ed7", "score": "0.6495615", "text": "def basic_conv():\n hparams = common_hparams.basic_params1()\n hparams.hidden_size = 64\n hparams.batch_size = 8\n hparams.num_hidden_layers = 3\n hparams.optimizer = \"Adam\"\n hparams.learning_rate_constant = 0.0002\n hparams.learning_rate_warmup_steps = 500\n hparams.learning_rate_schedule = \"constant * linear_warmup\"\n hparams.label_smoothing = 0.05\n hparams.initializer = \"uniform_unit_scaling\"\n hparams.initializer_gain = 1.0\n hparams.weight_decay = 0.0\n hparams.add_hparam(\"num_compress_steps\", 2)\n return hparams", "title": "" }, { "docid": "156141e4fa0f6d5205c3f90672601108", "score": "0.6488081", "text": "def convolve(self, input, **kwargs):\n raise NotImplementedError(\"BaseConvLayer does not implement the \"\n \"convolve() method. You will want to \"\n \"use a subclass such as Conv2DLayer.\")", "title": "" }, { "docid": "45738bf69d072417b55b2b4e9ce546dd", "score": "0.64718235", "text": "def build_model(self, img_input: TensorType) -> TensorType:\n filters = int(32 * self.alpha)\n shape = (-1, 1, 1, int(1024 * self.alpha))\n\n # Conv 1 block\n x = layers.zero_padding(img_input, padding=((0, 1), (0, 1)), name='conv1_pad')\n x = layers.conv(x, filters_out=filters, kernel_size=3, padding='valid', add_bias=False, stride=2, name='conv1')\n x = layers.norm(x, axis=-1, name='conv1_bn')\n x = layers.relu(x, name='conv1_relu')\n\n # Depthwise convolutions\n x = self._depthwise_conv_block(x, 64, self.alpha, depth_multiplier=1, block_id=1)\n x = self._depthwise_conv_block(x, 128, self.alpha, depth_multiplier=1, strides=2, block_id=2)\n x = self._depthwise_conv_block(x, 128, self.alpha, depth_multiplier=1, block_id=3)\n x = self._depthwise_conv_block(x, 256, self.alpha, depth_multiplier=1, strides=2, block_id=4)\n x = self._depthwise_conv_block(x, 256, self.alpha, depth_multiplier=1, block_id=5)\n x = self._depthwise_conv_block(x, 512, self.alpha, depth_multiplier=1, strides=2, block_id=6)\n x = self._depthwise_conv_block(x, 512, self.alpha, depth_multiplier=1, block_id=7)\n x = self._depthwise_conv_block(x, 512, self.alpha, depth_multiplier=1, block_id=8)\n x = self._depthwise_conv_block(x, 512, self.alpha, depth_multiplier=1, block_id=9)\n x = self._depthwise_conv_block(x, 512, self.alpha, depth_multiplier=1, block_id=10)\n x = self._depthwise_conv_block(x, 512, self.alpha, depth_multiplier=1, block_id=11)\n x = self._depthwise_conv_block(x, 1024, self.alpha, depth_multiplier=1, strides=2, block_id=12)\n x = self._depthwise_conv_block(x, 1024, self.alpha, depth_multiplier=1, block_id=13)\n\n # Include top\n x = layers.global_avg_pool(x)\n x = layers.reshape(x, shape=shape, name='reshape_1')\n x = layers.conv(x, filters_out=self.num_classes, kernel_size=1, padding='same', name='conv_preds',\n add_bias=False)\n x = layers.reshape(x, shape=(-1, self.num_classes), name='reshape_2')\n x = layers.softmax(x, name='act_softmax')\n return x", "title": "" }, { "docid": "15c3808031939495e3805fbab2b93f88", "score": "0.6471251", "text": "def __init__(self, emb_size, output_channels, kernel_size=5, padding=1):\r\n \r\n super(CNN, self).__init__()\r\n \r\n self.conv1D=nn.Conv1d(in_channels=emb_size,out_channels=output_channels,\r\n kernel_size=kernel_size,padding=padding,\r\n bias=True)", "title": "" }, { "docid": "ce950c3d7bfd5de2629cf94b647f80b7", "score": "0.6470516", "text": "def OneConvLayer(input_shape):\n model = Sequential()\n model.add(Conv2D(32, kernel_size=(3, 3), activation='relu', input_shape=input_shape))\n model.add(MaxPooling2D(pool_size=(2, 2)))\n model.add(Dropout(0.2))\n\n model.add(Flatten())\n\n model.add(Dense(128, activation='relu'))\n model.add(Dense(9, activation='softmax'))\n return model", "title": "" }, { "docid": "f59742675eb899463cc43ef148f43a68", "score": "0.6467779", "text": "def conv( in_channels , out_channels , kernel_size , stride = 1 , padding = 0 , activation_fn= None , use_batchnorm = False , pre_activation = False , bias = True , weight_init_fn = None ):\n if not pre_activation and use_batchnorm:\n assert not bias\n\n layers = []\n if pre_activation :\n if use_batchnorm:\n layers.append( nn.BatchNorm2d( in_channels ) )\n if activation_fn is not None:\n layers.append( activation_fn() )\n conv = nn.Conv2d( in_channels , out_channels , kernel_size , stride , padding , bias = bias )\n if weight_init_fn is None:\n weight_init_fn = get_weight_init_fn( activation_fn )\n try:\n weight_init_fn( conv.weight )\n except:\n print( conv.weight )\n layers.append( conv )\n if not pre_activation :\n if use_batchnorm:\n layers.append( nn.BatchNorm2d( out_channels ) )\n if activation_fn is not None:\n layers.append( activation_fn() )\n return nn.Sequential( *layers )", "title": "" }, { "docid": "8da929002cc399e96eb78139dd3395b9", "score": "0.64653265", "text": "def ThreeConvLayer(input_shape):\n model = Sequential()\n model.add(Conv2D(32, kernel_size=(3, 3), activation='relu', input_shape=input_shape))\n model.add(MaxPooling2D((2, 2)))\n model.add(Dropout(0.25))\n\n model.add(Conv2D(64, kernel_size=(3, 3), activation='relu'))\n model.add(MaxPooling2D(pool_size=(2, 2)))\n model.add(Dropout(0.25))\n\n model.add(Conv2D(128, kernel_size=(3, 3), activation='relu'))\n model.add(Dropout(0.4))\n\n model.add(Flatten())\n\n model.add(Dense(128, activation='relu'))\n model.add(Dropout(0.3))\n model.add(Dense(9, activation='softmax'))\n return model", "title": "" }, { "docid": "56f8add5325b71aa9ad52a97e9f16164", "score": "0.646313", "text": "def build_3DCNN(self):\n with tf.variable_scope(\n \"2plus1Dconvolution\",\n reuse=self.reuse,\n initializer=self.initializer,\n regularizer=None,\n ):\n input_layer_ = self.input_layer\n for i, num_filter in enumerate(self.config[\"num_filters\"]):\n spatial_num_filter = num_filter[0]\n temporal_num_filter = num_filter[1]\n\n # spatial convolution\n spatial_conv_layer = tf.layers.conv3d(\n inputs=input_layer_,\n filters=spatial_num_filter,\n kernel_size=[\n 1,\n self.config[\"spatial_filter_size\"][i],\n self.config[\"spatial_filter_size\"][i],\n ],\n padding=\"same\",\n activation=tf.nn.relu,\n )\n # temporal convolution\n temporal_conv_layer = tf.layers.conv3d(\n inputs=spatial_conv_layer,\n filters=temporal_num_filter,\n kernel_size=[self.config[\"temporal_filter_size\"][i], 1, 1],\n padding=\"same\",\n activation=tf.nn.relu,\n )\n\n pooling_layer = tf.layers.max_pooling3d(\n inputs=temporal_conv_layer,\n pool_size=[2, 2, 2],\n strides=[2, 2, 2],\n padding=\"same\",\n )\n input_layer_ = pooling_layer\n\n self.cnn_output_raw = input_layer_\n (\n batch_size_by_seq_length,\n new_temporal_dim,\n cnn_height,\n cnn_width,\n num_filters,\n ) = self.cnn_output_raw.shape.as_list()\n self.cnn_output_flat = tf.reshape(\n self.cnn_output_raw,\n [-1, new_temporal_dim * cnn_height * cnn_width * num_filters],\n )\n\n # Stack a dense layer to set CNN representation size.\n # Densely connected layer with <num_hidden_units> output neurons.\n # Output Tensor Shape: [batch_size, num_hidden_units]\n self.cnn_output_flat = tf.layers.dense(\n inputs=self.cnn_output_flat,\n units=self.config[\"num_hidden_units\"],\n activation=tf.nn.relu,\n )\n # CNN OUTPUT FLAT shape [batch_size * seq_len, num_hidden_units]\n # CNN OUTPUT shape [batch_size, seq_len, num_hidden_units]\n self.cnn_output = tf.reshape(\n self.cnn_output_flat,\n [self.batch_size_op, self.seq_len_op, self.config[\"num_hidden_units\"]],\n )", "title": "" }, { "docid": "2fb102b022a6679676d66baebf6cabb7", "score": "0.6456036", "text": "def conv2d(x, filters, num_row, num_col, padding='same', strides=(1, 1)):\n x = layers.Conv2D(\n filters, (num_row, num_col),\n strides=strides,\n padding=padding,\n use_bias=False)(x)\n x = layers.Activation('relu')(x)\n return x", "title": "" }, { "docid": "4e2a75d64ec623c51714b5ad673c92c2", "score": "0.6449423", "text": "def bn_conv_layer(inputs,\n activation = 'relu',\n filters = 4,\n kernel_size = (3,3),\n dropout = 0.5,\n bottleneck = True,\n bottleneck_factor=4,\n name = 'conv'):\n out = inputs \n\n # Placing batch norm BEFORE activation\n # some results get improvements when placing\n # it after. \n out = BatchNormalization()(out)\n out = Activation(activation)(out)\n\n if bottleneck:\n out = Conv2D(filters=filters*bottleneck_factor,\n kernel_size=(1,1),\n strides = (1,1),\n activation = None,\n padding='same',\n name = name+'/bottle_neck')(out)\n \n out = Dropout(dropout)(out)\n out = BatchNormalization()(out)\n out = Activation(activation)(out)\n\n out = Conv2D(filters=filters,\n kernel_size=kernel_size,\n strides = (1,1),\n activation = None,\n padding='same',\n name = name+'/conv')(out)\n out = Dropout(dropout)(out)\n \n return out", "title": "" }, { "docid": "6d9ad07ac8cab3b604295b064f7ac40c", "score": "0.6428845", "text": "def create_network(self, X):\n with tf.variable_scope('initial_causal_conv'):\n initial_conv_result = tf.nn.conv2d(X, self.variables[\n 'initial_filter'],\n padding=\"SAME\", strides=[1,1,1,1])\n residual = initial_conv_result\n\n # create dilated stack results\n skip_list = list()\n with tf.variable_scope(\"dilated_stack\"):\n for i, dilation in enumerate(self.dilations):\n residual, skip_result = self._dilated_stack(residual, dilation,\n i)\n skip_list.append(skip_result)\n\n # post-processing\n # addition --> Relu --> convolution --> Relu --> convolution\n with tf.variable_scope(\"post_processing\"):\n total_output = sum(skip_list)\n relu1 = tf.nn.tanh(total_output)\n conv1 = tf.nn.conv2d(relu1, self.variables['post_1'],\n padding=\"SAME\", strides=[1,1,1,1])\n\n relu2 = tf.nn.tanh(conv1)\n conv2 = tf.nn.conv2d(relu2, self.variables['post_2'],\n padding=\"SAME\", strides=[1,1,1,1])\n \n return conv2", "title": "" }, { "docid": "63be2c433dba5022fc19b6cde39fa29a", "score": "0.6419761", "text": "def make_layer(self, layer_index):\n\n layer = nn.ModuleList([None for i in range(self.ns)]) # initialise the layer\n\n for row in range(layer_index, self.ns):\n convs = nn.ModuleList([]) # stores the convolutions\n\n # === strided convolution h_hat ===\n h_hat_num_channels_in = (self.k0 * self.gr ** (row - 1)) + (layer_index - 1) * (2 * self.k0 * self.gr ** (row - 1))\n h_hat = self.h_hat(\n n_channels_in = h_hat_num_channels_in,\n scale_index = row\n )\n\n # === regular convolution h ===\n h_num_channels_in = (self.k0 * self.gr ** row) + (layer_index - 1) * (2 * self.k0 * self.gr ** row)\n h = self.h(\n n_channels_in = h_num_channels_in,\n scale_index = row\n )\n\n convs.append(h_hat)\n convs.append(h)\n\n layer.insert(row, convs)\n\n return layer", "title": "" }, { "docid": "5b57da98ed37c7f88fca0038bf917948", "score": "0.6414823", "text": "def new_conv_layer(\n previous_input_layer, num_input_channels, filter_size, num_filters,\n use_pooling=True):\n # Shape of the filter-weights for the convolution.\n # This format is determined by the TensorFlow API.\n shape = [filter_size, filter_size, num_input_channels, num_filters]\n\n # Create new weights aka. filters with the given shape.new_conv_layer\n weights = new_weights(shape=shape)\n\n # Create new biases, one for each filter.\n biases = new_biases(length=num_filters)\n\n # Create the TensorFlow operation for convolution.\n # Note the strides are set to 1 in all dimensions.\n # The first and last stride must always be 1,\n # because the first is for the image-number and\n # the last is for the input-channel.\n # But e.g. strides=[1, 2, 2, 1] would mean that the filter\n # is moved 2 pixels across the x- and y-axis of the image.\n # The padding is set to 'SAME' which means the input image\n # is padded with zeroes so the size of the output is the same.\n layer = tf.nn.conv2d(input=previous_input_layer,\n filter=weights,\n strides=[1, 1, 1, 1],\n padding='SAME')\n\n # Add the biases to the results of the convolution.\n # A bias-value is added to each filter-channel.\n layer += biases\n\n # Use pooling to down-sample the image resolution?\n if use_pooling:\n # This is 2x2 max-pooling, which means that we\n # consider 2x2 windows and select the largest value\n # in each window. Then we move 2 pixels to the next window.\n layer = tf.nn.max_pool(value=layer,\n ksize=[1, 2, 2, 1],\n strides=[1, 2, 2, 1],\n padding='SAME')\n\n # Rectified Linear Unit (ReLU).\n # It calculates max(x, 0) for each input pixel x.\n # This adds some non-linearity to the formula and allows us\n # to learn more complicated functions.\n layer = tf.nn.relu(layer)\n\n # Note that ReLU is normally executed before the pooling,\n # but since relu(max_pool(x)) == max_pool(relu(x)) we can\n # save 75% of the relu-operations by max-pooling first.\n\n # We return both the resulting layer and the filter-weights\n # because we will plot the weights later.\n return (layer, weights)", "title": "" }, { "docid": "340fe9d61888235a3fd9cace04971f6b", "score": "0.64048904", "text": "def __init__(self,\n in_channels,\n out_channels,\n kernel_size,\n stride=1,\n padding=0,\n alpha=0.5,\n dilation=1,\n groups=False,\n bias=False):\n\n super(OctConv2d, self).__init__()\n\n assert isinstance(in_channels, int) and in_channels > 0\n assert isinstance(out_channels, int) and out_channels > 0\n assert isinstance(kernel_size, int) and kernel_size > 0\n assert stride in {1, 2}, \"Only strides of 1 and 2 are currently supported\"\n\n if isinstance(alpha, tuple):\n assert len(alpha) == 2\n assert all([0 <= a <= 1 for a in alpha]), \"Alphas must be in interval [0, 1]\"\n self.alpha_in, self.alpha_out = alpha\n else:\n assert 0 <= alpha <= 1, \"Alpha must be in interval [0, 1]\"\n self.alpha_in = alpha\n self.alpha_out = alpha\n\n # in_channels\n in_ch_hf = int((1 - self.alpha_in) * in_channels)\n self.in_channels = {\n 'high': in_ch_hf,\n 'low': in_channels - in_ch_hf\n }\n\n # out_channels\n out_ch_hf = int((1 - self.alpha_out) * out_channels)\n self.out_channels = {\n 'high': out_ch_hf,\n 'low': out_channels - out_ch_hf\n }\n\n # groups\n self.groups = {\n 'high': 1,\n 'low': 1\n }\n\n if type(groups) == bool and groups:\n if self.alpha_out > 0 and self.in_channels['high'] <= self.out_channels['high']:\n self.groups['high'] = in_ch_hf\n\n if self.alpha_in > 0 and self.in_channels['low'] <= self.out_channels['low']:\n self.groups['low'] = in_channels - in_ch_hf\n\n self.kernel_size = kernel_size\n self.stride = stride\n self.padding = padding\n self.dilation = dilation\n self.bias = bias\n\n self.pool = nn.AvgPool2d(kernel_size=(2, 2), stride=2)\n\n self.conv_h2h = nn.Conv2d(in_channels=self.in_channels['high'],\n out_channels=self.out_channels['high'],\n kernel_size=kernel_size,\n padding=padding,\n dilation=dilation,\n groups=self.groups['high'],\n bias=bias) \\\n if not (self.alpha_in == 1 or self.alpha_out == 1) else None\n\n self.conv_h2l = nn.Conv2d(in_channels=self.in_channels['high'],\n out_channels=self.out_channels['low'],\n kernel_size=kernel_size,\n padding=padding,\n dilation=dilation,\n groups=self.groups['high'],\n bias=bias) \\\n if not (self.alpha_in == 1 or self.alpha_out == 0) else None\n\n self.conv_l2h = nn.Conv2d(in_channels=self.in_channels['low'],\n out_channels=self.out_channels['high'],\n kernel_size=kernel_size,\n padding=padding,\n dilation=dilation,\n groups=self.groups['low'],\n bias=bias) \\\n if not (self.alpha_in == 0 or self.alpha_out == 1) else None\n\n self.conv_l2l = nn.Conv2d(in_channels=self.in_channels['low'],\n out_channels=self.out_channels['low'],\n kernel_size=kernel_size,\n padding=padding,\n dilation=dilation,\n groups=self.groups['low'],\n bias=bias) \\\n if not (self.alpha_in == 0 or self.alpha_out == 0) else None", "title": "" }, { "docid": "dd17a56930036932417ebbea04ad452d", "score": "0.63973284", "text": "def __init__(self,\n num_layers,\n conv_kernel_size_list,\n num_conv_heads,\n conv_hidden_size,\n filter_size,\n conv_type=\"lightweight\",\n glu_after_proj=True,\n conv_weight_dropout_rate=0.,\n ffn_activation=\"relu\",\n ffn_dropout_rate=0.,\n layer_postprocess_dropout_rate=0.,\n layer_postprocess_epsilon=1e-6,\n name=None):\n super(LightConvolutionEncoder, self).__init__(\n num_layers=num_layers, conv_kernel_size_list=conv_kernel_size_list,\n num_conv_heads=num_conv_heads, conv_hidden_size=conv_hidden_size,\n filter_size=filter_size, ffn_activation=ffn_activation,\n ffn_dropout_rate=ffn_dropout_rate, conv_type=conv_type,\n glu_after_proj=glu_after_proj,\n conv_weight_dropout_rate=conv_weight_dropout_rate,\n layer_postprocess_dropout_rate=layer_postprocess_dropout_rate,\n layer_postprocess_epsilon=layer_postprocess_epsilon,\n name=name or self.__class__.__name__)\n self._stacking_layers = []", "title": "" }, { "docid": "e734f13bbedb21bc1004def808933ef0", "score": "0.6387087", "text": "def create_layer():\r\n pass", "title": "" }, { "docid": "7e631d9039725978871658753b1c18c8", "score": "0.63865274", "text": "def convnet(I):\n pass", "title": "" }, { "docid": "d8a799d016b2e638e4fb5bc42e5266d1", "score": "0.63824487", "text": "def conv_layer(self, inp, filters, kernel_size):\n out = Conv2D(filters, kernel_size=(kernel_size, kernel_size), strides=1, padding=\"same\",\n use_bias=Config.USE_BIAS,\n kernel_regularizer=l2(Config.REGULARIZER_CONST))(inp)\n out = BatchNormalization()(out)\n out = LeakyReLU()(out)\n return out", "title": "" }, { "docid": "75cab339494dac26b4cfe35197f32e6d", "score": "0.6376361", "text": "def _init_conv_layer(key, n_h, n_w, n_cout, n_cin):\n k1, k2 = random.split(key)\n W = random.normal(k1, (n_h, n_w, n_cin, n_cout))\n b = random.normal(k2, (n_cout,))\n return W, b", "title": "" }, { "docid": "96f15031542f3e3327b28aaaabee2760", "score": "0.63732946", "text": "def compile_conv_layer(self, layer, isFirst = False):\r\n\r\n array_size = self.acc_cfg[\"ARRAY_SIZE\"]\r\n instructions = []\r\n\r\n N = layer.nifm #input channel\r\n W = layer.wifm #input width\r\n H = W #input height\r\n K = layer.hfil #kernel width\r\n S = layer.htrd #stride\r\n O = layer.nofm #out channel\r\n R = layer.hofm #output height\r\n C = layer.wofm #output width\r\n\r\n gemm_array_a_rows = R*C\r\n gemm_array_a_cols = K*K*N\r\n\r\n gemm_array_b_rows = K*K*N\r\n gemm_array_b_cols = O\r\n\r\n if(isFirst):\r\n load_IA_inst = self.gen_load_inst(\"IA\",N*W*H, self.tag)\r\n instructions.append(load_IA_inst)\r\n\r\n instructions += self.partition_and_generate_gemm_inst(gemm_array_a_rows,gemm_array_a_cols, \\\r\n gemm_array_b_rows,gemm_array_b_cols, array_size)\r\n\r\n return instructions", "title": "" }, { "docid": "04b20c642e343d3d489a17985dea82c4", "score": "0.63500065", "text": "def construct(self, x):\n residual = x\n\n out = self.conv1(x)\n out = self.bn1(out)\n out = self.relu(out)\n\n out = self.conv2(out)\n out = self.bn2(out)\n out = self.relu(out)\n\n out = self.conv3(out)\n out = self.bn3(out)\n\n if self.downsample is not None:\n residual = self.downsample(x)\n\n out += residual\n out = self.relu(out)\n\n return out", "title": "" }, { "docid": "f03ab50bc01c3c1e66085669d5f21620", "score": "0.6345483", "text": "def createConvNetFn():\n IMG_MODEL = ImagenetModel(\"/projects/korpora/mscoco/coco/imagenet-vgg-verydeep-16.mat\")\n x = tensor.tensor4(\"input\", dtype=\"float32\")\n y_hat = x\n for layer in IMG_MODEL.layers[:-1]:\n y_hat = layer.apply(y_hat)\n predict = theano.function([x], y_hat, allow_input_downcast=True)\n return predict", "title": "" }, { "docid": "b7861798979de2fbec2758ce2f2136c9", "score": "0.63427657", "text": "def conv_layer(inputs,\n activation = 'relu',\n filters = 4,\n kernel_size = (3,3),\n dropout = 0.5,\n bottleneck = True,\n bottleneck_factor = 4,\n name = 'conv'):\n out = inputs \n # Check bottleneck\n if bottleneck:\n out = Conv2D(filters=filters*bottleneck_factor,\n kernel_size=(1,1),\n strides = (1,1),\n activation = activation,\n padding='same',\n name = name+'/bottle_neck')(out)\n out = Dropout(dropout)(out)\n \n out = Conv2D(filters=filters,\n kernel_size=kernel_size,\n strides = (1,1),\n activation = activation,\n padding='same',\n name = name+'/conv')(out)\n out = Dropout(dropout)(out)\n \n return out", "title": "" }, { "docid": "3f5ed2c62f01fa8903bb160babd70d9b", "score": "0.63422185", "text": "def _create_four_conv_layers(self, input_layer, filter=256, padding_flag='same'):\n sub_layer_1 = Conv2D(filter, 3, padding=padding_flag)(input_layer)\n sub_layer_1 = self._add_batch_norm_activation(sub_layer_1)\n sub_layer_2 = Conv2D(filter, 3, padding=padding_flag)(sub_layer_1)\n sub_layer_2 = self._add_batch_norm_activation(sub_layer_2)\n sub_layer_3 = Conv2D(filter, 3, padding=padding_flag)(sub_layer_2)\n sub_layer_3 = self._add_batch_norm_activation(sub_layer_3)\n sub_layer_4 = Conv2D(filter, 3, padding=padding_flag)(sub_layer_3)\n sub_layer_4 = self._add_batch_norm_activation(sub_layer_4)\n output_layer = MaxPooling2D()(sub_layer_4)\n\n return output_layer", "title": "" }, { "docid": "a8afcfb4652908f44daf8ffbbed02ffb", "score": "0.63417304", "text": "def construct(self, x):\n x = self.conv1(x)\n x = self.bn1(x)\n x = self.relu(x)\n x = self.maxpool(x)\n\n x = self.layer1(x)\n x = self.layer2(x)\n x = self.layer3(x)\n x = self.layer4(x)\n x = self.deconv_layers(x)\n\n return x", "title": "" }, { "docid": "be033be0daf71bcc8a1e5f077f23558a", "score": "0.6334813", "text": "def conv_layer(input, input_channels, filter_size, filters, name):\n with tf.variable_scope(name) as scope:\n filter_shape = [filter_size, filter_size, input_channels, filters]\n # constructed filters (weights and biases), in the form of a tf.Variable\n # non-zero initialization to prevent equal weights of nodes\n W = tf.Variable(tf.truncated_normal(filter_shape, stddev=0.05))\n b = tf.Variable(tf.constant(0.05, shape=[filters]))\n\n # stride -> [batch, x, y, channel]\n layer = tf.nn.conv2d(input=input, filter=W, strides=[1, 1, 1, 1], padding=\"SAME\")\n # add bias after convolution\n layer += b\n return layer", "title": "" }, { "docid": "83e7364a110a5ef42221b987c611d1ec", "score": "0.6331063", "text": "def __conv1_block(input_, weight_decay=0.0):\n\n x = Conv2D(16, kernel_size=3, padding='same', use_bias=False,\n kernel_regularizer=regularizers.l2(weight_decay))(input_)\n return x", "title": "" }, { "docid": "490fcdef1e4bf57d20f638eb6e2487ce", "score": "0.6328286", "text": "def __conv_block(self, x, nb_filter, dropout_rate=None, apply_batch_norm=False):\n if apply_batch_norm:\n x = BatchNormalization(axis=self.concat_axis, epsilon=1.1e-5)(x)\n\n x = Conv2D(filters=nb_filter, kernel_size=(3, 3), padding=\"same\", activation=\"relu\")(x)\n if dropout_rate:\n x = Dropout(dropout_rate)(x)\n return x", "title": "" }, { "docid": "c9c20783c3215ae005d9fe9976eaaab8", "score": "0.6323348", "text": "def _make_conv_layer(channels, use_bn=False, kernel_size=3, stride=1, padding=0):\n in_channels = channels[0]\n layers = []\n for out_channels in channels[1:]:\n layers.append(nn.Conv2d(in_channels=in_channels, out_channels=out_channels,\n kernel_size=kernel_size, stride=stride, pad_mode=\"pad\", padding=padding))\n if use_bn:\n layers.append(nn.BatchNorm2d(out_channels))\n layers.append(nn.ReLU())\n in_channels = out_channels\n return nn.SequentialCell(layers)", "title": "" }, { "docid": "88398478701fb9196f0d7194064d5cfd", "score": "0.6320488", "text": "def __init__(self, embed_size,channel_size, kernel_size):\n\n super(CNN,self).__init__()\n\n self.embed_size = embed_size\n self.conv = nn.Conv1d(embed_size,channel_size, kernel_size)", "title": "" }, { "docid": "affbdc51af7ea702cdd13730c275bec8", "score": "0.63079", "text": "def TwoConvLayer(input_shape):\n model = Sequential()\n model.add(Conv2D(32, kernel_size=(3, 3), activation='relu', input_shape=input_shape))\n model.add(Conv2D(64, (3, 3), activation='relu'))\n model.add(MaxPooling2D(pool_size=(2, 2)))\n model.add(Dropout(0.25))\n model.add(Flatten())\n model.add(Dense(128, activation='relu'))\n model.add(Dropout(0.5))\n model.add(Dense(9, activation='softmax'))\n return model", "title": "" }, { "docid": "fff2cfe98118c821ec8a54f23855937b", "score": "0.63004917", "text": "def conv2d(self, x, W):\n return tf.nn.conv2d(x, W, strides=[1, 1, 1, 1], padding='SAME')", "title": "" }, { "docid": "ad8aaa2e6bf87bfb4769f0dcb71e6bf6", "score": "0.6298599", "text": "def Convolution2D(n_filters, FL, FLredundant, activation=None,\n init=None, W_regularizer=None, border_mode=None):\n return Conv2D(n_filters, FL, activation=activation,\n kernel_initializer=init,\n kernel_regularizer=W_regularizer,\n padding=border_mode)", "title": "" }, { "docid": "a0df162f5739411311c357222c0ae81b", "score": "0.62927514", "text": "def add_convolution(\n self,\n name,\n kernel_channels,\n output_channels,\n height,\n width,\n stride_height,\n stride_width,\n border_mode,\n groups,\n W,\n b,\n has_bias,\n is_deconv=False,\n output_shape=None,\n input_name=\"data\",\n output_name=\"out\",\n dilation_factors=[1, 1],\n padding_top=0,\n padding_bottom=0,\n padding_left=0,\n padding_right=0,\n same_padding_asymmetry_mode=\"BOTTOM_RIGHT_HEAVY\",\n **kwargs\n ):\n\n if isinstance(input_name, tuple):\n input_names = list(input_name)\n elif isinstance(input_name, list):\n input_names = input_name\n else:\n input_names = [input_name]\n spec_layer = self._add_generic_layer(name, input_names, [output_name])\n\n # Set the layer params\n spec_layer_params = spec_layer.convolution\n spec_layer_params.isDeconvolution = is_deconv\n\n if is_deconv and output_shape:\n spec_layer_params.outputShape.append(output_shape[0])\n spec_layer_params.outputShape.append(output_shape[1])\n\n spec_layer_params.outputChannels = output_channels\n spec_layer_params.kernelChannels = kernel_channels\n spec_layer_params.kernelSize.append(height)\n spec_layer_params.kernelSize.append(width)\n spec_layer_params.stride.append(stride_height)\n spec_layer_params.stride.append(stride_width)\n\n border_mode = (\n border_mode.lower()\n if isinstance(border_mode, str)\n else border_mode\n )\n same_padding_asymmetry_mode = (\n same_padding_asymmetry_mode.upper()\n if isinstance(same_padding_asymmetry_mode, str)\n else same_padding_asymmetry_mode\n )\n\n if border_mode == \"valid\":\n height_border = spec_layer_params.valid.paddingAmounts.borderAmounts.add()\n height_border.startEdgeSize = padding_top\n height_border.endEdgeSize = padding_bottom\n width_border = spec_layer_params.valid.paddingAmounts.borderAmounts.add()\n width_border.startEdgeSize = padding_left\n width_border.endEdgeSize = padding_right\n elif border_mode == \"same\":\n if not (\n same_padding_asymmetry_mode == \"BOTTOM_RIGHT_HEAVY\"\n or same_padding_asymmetry_mode == \"TOP_LEFT_HEAVY\"\n ):\n raise ValueError(\n \"Invalid value %d of same_padding_asymmetry_mode parameter\"\n % same_padding_asymmetry_mode\n )\n spec_layer_params.same.asymmetryMode = _NeuralNetwork_pb2.SamePadding.SamePaddingMode.Value(\n same_padding_asymmetry_mode\n )\n else:\n raise NotImplementedError(\n \"Border mode %s is not implemented.\" % border_mode\n )\n\n spec_layer_params.nGroups = groups\n spec_layer_params.hasBias = has_bias\n\n # add dilation factors\n spec_layer_params.dilationFactor.append(dilation_factors[0])\n spec_layer_params.dilationFactor.append(dilation_factors[1])\n\n # If weight comes from another tensor just return\n if len(input_names) > 1:\n return\n\n # Weight assignments\n quantization = len(kwargs) > 0 and ('quantization_type' in kwargs and kwargs.get('quantization_type') is not None)\n if quantization:\n _verify_quantization_arguments(\n weight=W, output_channels=output_channels, **kwargs\n )\n\n nbits = kwargs.get(\"nbits\", 8)\n num_weights = (output_channels * kernel_channels * height * width) / groups\n if nbits < 8:\n byte_arr = _np.frombuffer(W, dtype=_np.uint8)\n W = _unpack_to_bytes(byte_arr, num_weights, nbits)\n else:\n W = _np.frombuffer(W, dtype=_np.uint8)\n\n if is_deconv:\n W = _np.reshape(\n W, (height, width, kernel_channels, output_channels / groups)\n )\n else:\n W = _np.reshape(W, (height, width, kernel_channels, output_channels))\n\n # Weight alignment: MLModel Spec requires following weight arrangement:\n # is_deconv == False ==> (output_channels, kernel_channels, height, width), where kernel_channel = input_channels / groups\n # is_deconv == True ==> (kernel_channels, output_channels / groups, height, width), where kernel_channel = input_channels\n if not is_deconv:\n Wt = W.transpose((3, 2, 0, 1))\n Wt = Wt.flatten()\n else:\n Wt = W.transpose((2, 3, 0, 1)).flatten()\n\n # Assign weights\n weights = spec_layer_params.weights\n if not quantization: # no quantization\n weights.floatValue.extend(Wt.flatten())\n else: # there is quantization\n W_bytes = bytes()\n if nbits == 8:\n W_bytes += Wt.flatten().tobytes()\n else:\n W_bytes += _convert_array_to_nbit_quantized_bytes(\n Wt.flatten(), nbits\n ).tobytes()\n _fill_quantized_weights(weights_message=weights, W=W_bytes, **kwargs)\n\n # Assign biases\n if has_bias:\n bias = spec_layer_params.bias\n for f in range(output_channels):\n bias.floatValue.append(float(b[f]))\n\n return spec_layer", "title": "" }, { "docid": "cd156d4564cd0cee01456b3d450e0faf", "score": "0.6285066", "text": "def conv2d_1(x, W):\n return tf.nn.conv2d(x, W, strides=[1, 1, 8, 1], padding='SAME')", "title": "" }, { "docid": "9408d8be52ee8f0fd7e661b3fd5a00c5", "score": "0.62836486", "text": "def cnn(x):\n relu = tf.nn.relu # shorthand\n keep_prob = tf.placeholder(tf.float32)\n x_image = tf.reshape(x, [-1, 28, 28, 1])\n\n # Layer 1 (convolutional)\n W_conv1 = weights([5, 5, 1, 32]) # Output: 24x24x32\n b_conv1 = bias([32])\n h_conv1 = relu(conv2d(x_image, W_conv1) + b_conv1)\n h_pool1 = max_pool(h_conv1)\n\n # Layer 2 (convolutional)\n W_conv2 = weights([5, 5, 32, 64]) # Output: 19x19x64\n b_conv2 = bias([64])\n h_conv2 = relu(conv2d(h_pool1, W_conv2) + b_conv2)\n h_pool2 = max_pool(h_conv2)\n\n # Layer 3 (fully connected)\n W_fc1 = weights([7 * 7 * 64, 1024])\n b_fc1 = bias([1024])\n h_pool2_flat = tf.reshape(h_pool2, [-1, 7 * 7 * 64])\n h_fc1 = relu(tf.matmul(h_pool2_flat, W_fc1) + b_fc1)\n\n # Apply dropout to prevent overfitting\n h_fc1_drop = tf.nn.dropout(h_fc1, keep_prob)\n\n # Last layer (fully connected)\n W_fc2 = weights([1024, 10])\n b_fc2 = bias([10])\n y_hat = tf.matmul(h_fc1_drop, W_fc2) + b_fc2\n\n return y_hat, keep_prob", "title": "" }, { "docid": "98e957713cc1516355398c91c7ddcf30", "score": "0.6276944", "text": "def contr_arm(input_tensor, filters, kernel_size):\n\n x = Conv2D(filters, kernel_size, padding='same')(input_tensor)\n x = bn_relu(x)\n\n x = Conv2D(filters, kernel_size, padding='same')(x)\n x = bn_relu(x)\n x = channel_layer(x)\n\n x1 = Conv2D(filters, 1, padding='same')(input_tensor)\n x1 = bn_relu(x1)\n\n x = keras.layers.add([x, x1])\n x = Activation(\"relu\")(x)\n return x", "title": "" }, { "docid": "85ac9ad6d91ca7efeab90bf89f7068e3", "score": "0.6275546", "text": "def CNN_encoder():\n\tinp = Input(shape = (224,224,3), name='Gray_Image_Input')\n\tvgg_model = VGG16(include_top=False,input_tensor=inp,input_shape=(224,224,3),weights=\"imagenet\")\n\tfor layer in vgg_model.layers:\n\t\tlayer.trainable = False\n\tblock5_pool = vgg_model.output \n\tfc1to_conv = Conv2D(filters=128,kernel_size=(7,7),name='fc1to_conv')(block5_pool)\n\tencoder_output = Flatten()(fc1to_conv)\n\tencoder_model = Model(inputs=inp, outputs=encoder_output)\n\n\treturn encoder_model", "title": "" }, { "docid": "c175bbaf1b779c54395e8459f4cd4de5", "score": "0.6263468", "text": "def Convolution(img, param, bias, fncs, channel, padding, stride):\n ##### 課題1-(a). 畳み込み層の計算を完成させる\n image_padded = np.pad(img, [(0, 0), (padding, padding),\n (padding, padding)], 'constant')\n H, W = img.shape[1], img.shape[2]\n filter_h = param.shape[1]\n filter_w = param.shape[2]\n out_h = (H + 2*padding - filter_h)//stride + 1\n out_w = (W + 2*padding - filter_w)//stride + 1\n conv = np.zeros((channel, out_h, out_w))\n for c in range(channel):\n for i in range(out_h):\n for j in range(out_w):\n for p in range(filter_h):\n for q in range(filter_w):\n conv[c, i, j] += param[c, p, q] * image_padded[c,\n stride * (i-1)+p+1, stride*(j-1)+q+1]+bias[c]\n conv = fncs(conv)\n return conv", "title": "" }, { "docid": "03dee7a0e39bf354619f84b27b154f7b", "score": "0.6250911", "text": "def conv_layer(input_tensor, kernel_size_x, kernel_size_y,\n input_feat_maps, output_feat_maps, stride, layer_name, act=tf.nn.relu,is_training=True,use_batch_norm=True):\n # Adding a name scope ensures logical grouping of the layers in the graph.\n with tf.name_scope(layer_name):\n # This Variable will hold the state of the weights for the layer\n with tf.name_scope('weights'):\n weights = weight_variable([kernel_size_x,kernel_size_y,input_feat_maps,output_feat_maps])\n variable_summaries(weights, layer_name + '/weights')\n with tf.name_scope('biases'):\n biases = bias_variable([output_feat_maps])\n variable_summaries(biases, layer_name + '/biases')\n with tf.name_scope('Wx_plus_b'):\n preactivate = tf.nn.conv2d(input_tensor,weights,\n strides=[1,stride,stride,1],padding='SAME') + biases\n tf.summary.histogram(layer_name + '/pre_activations', preactivate)\n if use_batch_norm:\n with tf.name_scope('batch_norm'):\n batch_norm = batch_norm_conv(preactivate, output_feat_maps, phase_train=is_training,scope=layer_name+'_batch_norm')\n tf.summary.histogram(layer_name + '/batch_norm', batch_norm)\n else:\n batch_norm = preactivate\n if act:\n activations = act(batch_norm, name='activation')\n else:\n activations = batch_norm\n tf.summary.histogram(layer_name + '/activations', activations)\n return activations", "title": "" }, { "docid": "43ad5bef8917b76bd77f88af67a96c29", "score": "0.6246325", "text": "def create_my_cnn(Y_train):\n\n my_vgg = K.applications.vgg16.VGG16(include_top=False,\n weights='imagenet',\n input_shape=(32, 32, 3),\n classes=Y_train.shape[1])\n\n # Freezing most of the input layers\n for lyr in my_vgg.layers:\n if (lyr.name[0:5] != 'block'):\n lyr.trainable = False\n\n new_model = K.Sequential()\n new_model.add(my_vgg)\n new_model.add(K.layers.Flatten())\n new_model.add(K.layers.Dense(256,\n activation='relu',\n kernel_initializer='he_uniform'))\n new_model.add(K.layers.Dense(10, activation='softmax'))\n new_model.summary()\n\n return new_model", "title": "" }, { "docid": "7a0333986b4ab8373bd199932546849f", "score": "0.6234279", "text": "def __init__(self, in_channels, out_channels, kernel_size, downscale_factor, use_convolution=False):\n super(SuperPixel, self).__init__()\n self.downscale_factor = downscale_factor\n self.use_convolution = use_convolution\n padding = (kernel_size - 1) // 2\n self.conv_layer = nn.Conv2d(in_channels=in_channels * downscale_factor ** 2,\n out_channels=out_channels,\n kernel_size=kernel_size,\n padding=padding)", "title": "" }, { "docid": "1aee87d439c962096f26e20228a180fc", "score": "0.62304777", "text": "def conv2d(name, layer_input, stride, padding,\n kernel_size = None, input_channel = None, output_channel = None,\n add_bias = True, activation_param = None, add_dropout = False,\n weight_initializer = None, bias_initializer = None,\n param = None, ops = None):\n weight_name = kernel_name_conv2d(name)\n bias_name = bias_name_conv2d(name)\n if (param is not None and weight_name in param.keys()):\n weight = param[weight_name]\n else:\n weight = None\n if (param is not None and add_bias and bias_name in param.keys()):\n bias = param[bias_name]\n else:\n bias = None\n conv_op, conv_param = conv2d_layer(name, layer_input,\n stride, padding, kernel_size, input_channel, output_channel,\n weight, bias, add_bias,\n weight_initializer = weight_initializer,\n bias_initializer = bias_initializer)\n if (ops is not None):\n ops[name] = conv_op\n if (param is not None):\n param[weight_name] = conv_param['weight']\n if (add_bias):\n param[bias_name] = conv_param['bias']\n if (activation_param is not None):\n conv_op, activation_type = activation_function(conv_op,\n activation_param)\n if (ops is not None):\n ops[activation_op_name(name, activation_type)] = conv_op\n if (add_dropout):\n dropout_rate_id = dropout_rate_name(name)\n dropout_rate_ph = tf.placeholder(tf.float32, [], dropout_rate_id)\n if (ops is not None):\n ops[dropout_rate_id] = dropout_rate_ph\n conv_op = tf.nn.dropout(conv_op, 1 - dropout_rate_ph)\n dropout_name = dropout_op_name(name)\n if (ops is not None):\n ops[dropout_name] = conv_op\n return conv_op, conv_param", "title": "" }, { "docid": "6ae0d2ab6ba52986582ea5953f6e3321", "score": "0.62283623", "text": "def conv2d_block(input_tensor, n_filters, kernel_size=3):\n # first layer\n x = input_tensor\n for i in range(2):\n x = tf.keras.layers.Conv2D(filters=n_filters, kernel_size=(kernel_size, kernel_size),\n kernel_initializer='he_normal', padding='same')(x)\n x = tf.keras.layers.Activation('relu')(x)\n\n return x", "title": "" }, { "docid": "cc2ea56144eb66ff489aa2a447f655fc", "score": "0.622732", "text": "def create_CNN(data, config):\n\n \n input_layer = tf.reshape(data, [-1,config.data_size,1])\n print('Input:',input_layer.shape)\n output_size = config.data_size\n\n with tf.variable_scope('conv1'):\n conv1 = tf.layers.conv1d(inputs=input_layer,filters=config.conv1_filters, kernel_size=config.conv1_kernel, padding='same',activation=tf.nn.relu)\n\n with tf.variable_scope('pool1'):\n pool1 = tf.layers.max_pooling1d(inputs=conv1, pool_size=config.pool1_size, strides=config.pool1_size)\n\n output_size //= config.pool1_size\n\n with tf.variable_scope('conv2'):\n conv2 = tf.layers.conv1d(inputs=pool1,filters=config.conv2_filters, kernel_size=config.conv2_kernel,padding='same',activation=tf.nn.relu)\n\n with tf.variable_scope('pool2'):\n pool2 = tf.layers.max_pooling1d(inputs=conv2, pool_size=config.pool2_size, strides=config.pool2_size)\n output_size //= config.pool2_size\n \n with tf.variable_scope('flatten'):\n pool2_flat = tf.reshape(pool2, [-1, int(output_size)*config.conv2_filters])\n\n with tf.variable_scope('dense'):\n dense = tf.layers.dense(inputs=pool2_flat, units=1024, activation=tf.nn.relu)\n\n with tf.variable_scope('dropout'):\n dropout = tf.layers.dropout(inputs=dense, rate=config.dropout)\n\n with tf.variable_scope('logits'):\n logits = tf.layers.dense(inputs=dropout, units=config.num_classes)\n \n return logits", "title": "" }, { "docid": "714df7fc58bb17f0a3e4c1d655fb8f92", "score": "0.6225749", "text": "def _conv(x, filter_size, out_channel, strides, name=\"conv\"):\n in_shape = x.get_shape()\n with tf.variable_scope(name):\n # Main operation: conv2d\n kernel = tf.get_variable('kernel',\n [filter_size, filter_size, in_shape[3],\n out_channel], tf.float32,\n initializer=tf.random_normal_initializer(\n stddev=np.sqrt(\n 2.0 / filter_size / filter_size / out_channel)))\n if kernel not in tf.get_collection(WEIGHT_DECAY_KEY):\n tf.add_to_collection(WEIGHT_DECAY_KEY, kernel)\n if strides == 1:\n conv = tf.nn.conv2d(x, kernel, [1, strides, strides, 1],\n padding='SAME')\n else:\n kernel_size_effective = filter_size\n pad_total = kernel_size_effective - 1\n pad_beg = pad_total // 2\n pad_end = pad_total - pad_beg\n x = tf.pad(x, [[0, 0], [pad_beg, pad_end], [pad_beg, pad_end],\n [0, 0]])\n conv = tf.nn.conv2d(x, kernel, [1, strides, strides, 1],\n padding='VALID')\n return conv", "title": "" }, { "docid": "ed2e30dc5895e5e2a63cfff53008877f", "score": "0.62255985", "text": "def conv2d(x, W):\n return tf.nn.conv2d(x, W, strides=[1, 1, 1, 1], padding='SAME', name='convolution')", "title": "" }, { "docid": "5c931b882e919d9a032f0d360d11f018", "score": "0.62199414", "text": "def conv(in_channels, out_channels, kernel_size, stride=1, padding=0):\n weight = weight_variable()\n return nn.Conv2d(in_channels, out_channels,\n kernel_size=kernel_size, stride=stride, padding=padding,\n weight_init=weight, has_bias=False, pad_mode=\"valid\")", "title": "" }, { "docid": "5c931b882e919d9a032f0d360d11f018", "score": "0.62199414", "text": "def conv(in_channels, out_channels, kernel_size, stride=1, padding=0):\n weight = weight_variable()\n return nn.Conv2d(in_channels, out_channels,\n kernel_size=kernel_size, stride=stride, padding=padding,\n weight_init=weight, has_bias=False, pad_mode=\"valid\")", "title": "" }, { "docid": "4ccd1f18a394b7f90f25c7e3ea067346", "score": "0.6218711", "text": "def __init__(self,\n in_channels,\n out_channels,\n kernel_size,\n t_kernel_size=1,\n t_stride=1,\n t_padding=0,\n t_dilation=1,\n bias=True):\n super(ConvTemporalGraphical, self).__init__()\n self.kernel_size = kernel_size\n # TODO: is this multiple channels with kernel size is necessary\n self.conv = nn.Conv2d(\n in_channels,\n out_channels * kernel_size,\n kernel_size=(t_kernel_size, 1),\n padding=(t_padding, 0),\n stride=(t_stride, 1),\n dilation=(t_dilation, 1),\n bias=bias\n )", "title": "" }, { "docid": "4f03ea9f9bbe680a314db2e82df52d54", "score": "0.62172204", "text": "def __init__(self, in_channels, out_channels, downscale_factor, kernel_size=9, use_convolution=False):\n super(SuperPixel1D, self).__init__()\n self.downscale_factor = downscale_factor\n self.use_convolution = use_convolution\n padding = (kernel_size - 1) // 2\n self.conv_layer = nn.Conv1d(in_channels=in_channels * downscale_factor,\n out_channels=out_channels,\n kernel_size=kernel_size,\n padding=padding)", "title": "" }, { "docid": "320a01986854ef3bec2789500fdb8050", "score": "0.6214917", "text": "def conv(c_in, c_out, k_size, stride=2, pad=1, bn=True):\n layers = []\n layers.append(nn.Conv2d(c_in, c_out, k_size, stride, pad))\n if bn:\n layers.append(nn.BatchNorm2d(c_out))\n return nn.Sequential(*layers)", "title": "" }, { "docid": "cf46c51649f129f5874104c59bc296e4", "score": "0.6213215", "text": "def cnn_model():\n # keras model\n action_space = np.zeros(3)\n model = keras.Sequential()\n model.add(keras.layers.InputLayer(input_shape=(8,) + INPUT_SHAPE))\n # make channels first\n model.add(keras.layers.Permute((2, 3, 1)))\n model.add(keras.layers.Convolution2D(16, (8, 8), padding='same', activation='relu'))\n model.add(keras.layers.MaxPool2D(pool_size=(2, 2)))\n model.add(keras.layers.Convolution2D(32, (4, 4), padding='same', activation='relu'))\n model.add(keras.layers.MaxPool2D(pool_size=(2, 2)))\n model.add(keras.layers.Convolution2D(64, (2, 2), padding='same', activation='relu'))\n model.add(keras.layers.MaxPool2D(pool_size=(2, 2)))\n model.add(keras.layers.Flatten())\n model.add(keras.layers.Dense(32, activation='relu'))\n model.add(keras.layers.Dense(3, activation='linear'))\n return model", "title": "" }, { "docid": "7a42cd61c1f8ec8261b03f1fde75d295", "score": "0.6212543", "text": "def convert_separable_to_conv2d(self):\n self._convert_layer(\"SeparableConv\", \"Convolutional\")\n pass", "title": "" }, { "docid": "6d6ad27ab835cf087d4f131a96083534", "score": "0.6207925", "text": "def mk_convlayer(spec, layerin, mkreg):\n \n nfilt = spec[1]\n dimfilt = spec[2]\n return tf.layers.conv2d(layerin, nfilt, dimfilt,\n padding='SAME', activation=tf.nn.relu,\n kernel_regularizer=mkreg())", "title": "" }, { "docid": "2c3508690e20128f2598349595fa92dc", "score": "0.62059885", "text": "def __init__(\n self,\n in_channels,\n out_channels,\n kernel_size=-1,\n stride=1,\n dilation=1,\n bias=False,\n kernel_generator=None,\n expand_coordinates=False,\n dimension=None,\n ):\n MinkowskiConvolutionBase.__init__(\n self,\n in_channels,\n out_channels,\n kernel_size,\n stride,\n dilation,\n bias,\n kernel_generator,\n is_transpose=False,\n expand_coordinates=expand_coordinates,\n dimension=dimension,\n )\n self.reset_parameters()", "title": "" }, { "docid": "2f66f6127a4fd76be7303e62af3a7982", "score": "0.62036884", "text": "def conv_1x1(layer, layer_name):\n return tf.layers.conv2d(inputs = layer,\n filters = NUMBER_OF_CLASSES,\n kernel_size = (1, 1),\n strides = (1, 1),\n name = layer_name)", "title": "" }, { "docid": "69e38dd46d414622b264e397512e73d2", "score": "0.6202899", "text": "def conv2d(x, w):\n return tf.nn.conv2d(x, w, strides=[1, 1, 1, 1], padding=\"SAME\")", "title": "" }, { "docid": "507c227302ef4ff60f31e787a3833258", "score": "0.61993164", "text": "def build_conv(mid_size, drate, filters):\n\n layers = list()\n for layer_id, filters_ in enumerate(filters):\n if layer_id == 0: # Input layer\n layers.append(nn.Conv3d(mid_size, filters_,\n kernel_size=3,\n padding=1))\n layers.append(nn.Dropout(drate))\n layers.append(nn.LeakyReLU(negative_slope=0.1))\n elif layer_id < len(filters) - 1:\n layers.append(nn.Conv3d(previous, filters_,\n kernel_size=3,\n padding=1))\n layers.append(nn.Dropout(drate))\n layers.append(nn.LeakyReLU(negative_slope=0.1))\n else:\n layers.append(nn.Conv3d(previous, filters_,\n kernel_size=3,\n padding=1))\n previous = filters_\n return nn.Sequential(*layers)", "title": "" }, { "docid": "bfad83f13a9573dd7c8f6b2c7e6cb512", "score": "0.6199215", "text": "def conv2d(x, W):\n return tf.nn.conv2d(x, W, strides=[1, 1, 1, 1], padding='SAME')", "title": "" }, { "docid": "a2a9b6e601145e5fad2f5fddd1ff7b5f", "score": "0.6197255", "text": "def conv_3d(self):\n # Model.\n model = Sequential()\n model.add(Conv3D(\n 32, (3,3,3), activation='relu', input_shape=self.input_shape\n ))\n model.add(MaxPooling3D(pool_size=(1, 2, 2), strides=(1, 2, 2)))\n model.add(Conv3D(64, (3,3,3), activation='relu'))\n model.add(MaxPooling3D(pool_size=(1, 2, 2), strides=(1, 2, 2)))\n model.add(Conv3D(128, (3,3,3), activation='relu'))\n model.add(Conv3D(128, (3,3,3), activation='relu'))\n model.add(MaxPooling3D(pool_size=(1, 2, 2), strides=(1, 2, 2)))\n model.add(Conv3D(256, (2,2,2), activation='relu'))\n model.add(Conv3D(256, (2,2,2), activation='relu'))\n model.add(MaxPooling3D(pool_size=(1, 2, 2), strides=(1, 2, 2)))\n\n model.add(Flatten())\n model.add(Dense(1024))\n model.add(Dropout(0.5))\n model.add(Dense(1024))\n model.add(Dropout(0.5))\n model.add(Dense(self.nb_classes, activation='softmax'))\n\n return model", "title": "" }, { "docid": "04c1341ca42e9012c6dd28732b52e1a9", "score": "0.61949813", "text": "def _conv_block(in_channels, out_channels, act, bn, dropout, *args, **kwargs):\n modules = nn.ModuleList([nn.Conv1d(in_channels, out_channels, *args, **kwargs)])\n if bn:\n modules.append(nn.BatchNorm1d(out_channels))\n if act == 'relu':\n modules.append(nn.ReLU())\n elif act == 'sigmoid':\n modules.append(nn.Sigmoid())\n if dropout:\n modules.append(nn.Dropout(p=.2))\n\n net = nn.Sequential(*modules)\n\n # # Weight initialization\n # def init_weights(m):\n # if type(m) == nn.Conv1d:\n # torch.nn.init.kaiming_normal_(m.weight)\n # net.apply(init_weights)\n return net", "title": "" }, { "docid": "4506dbf555707a21561d26a5b96f7ba5", "score": "0.61935896", "text": "def FourConvLayer(input_shape):\n model = Sequential()\n model.add(Conv2D(32, kernel_size=(3, 3), activation='relu', input_shape=input_shape))\n model.add(BatchNormalization())\n\n model.add(Conv2D(32, kernel_size=(3, 3), activation='relu'))\n model.add(BatchNormalization())\n model.add(MaxPooling2D(pool_size=(2, 2)))\n model.add(Dropout(0.25))\n\n model.add(Conv2D(64, kernel_size=(3, 3), activation='relu'))\n model.add(BatchNormalization())\n model.add(Dropout(0.25))\n\n model.add(Conv2D(128, kernel_size=(3, 3), activation='relu'))\n model.add(BatchNormalization())\n model.add(MaxPooling2D(pool_size=(2, 2)))\n model.add(Dropout(0.25))\n\n model.add(Flatten())\n\n model.add(Dense(512, activation='relu'))\n model.add(BatchNormalization())\n model.add(Dropout(0.5))\n\n model.add(Dense(128, activation='relu'))\n model.add(BatchNormalization())\n model.add(Dropout(0.5))\n\n model.add(Dense(9, activation='softmax'))\n return model", "title": "" }, { "docid": "69986ef6b82bfd45632538d3a2925004", "score": "0.619161", "text": "def __init__(self, n_filters, kernel_size=2, name=None, activation=tf.nn.leaky_relu, batch_norm=False, padding=None):\n\n if batch_norm:\n name += '_bn'\n super(PaddedConv1dTransposed, self).__init__(name=name)\n\n if padding is None:\n padding = 'valid'\n self._padding_overwrite_ = False\n else:\n self._padding_overwrite_ = True\n\n self.n_filters = n_filters\n self.kernel_size = kernel_size\n\n self.padding = [[0, 0], [1, 1], [0, 0]] # adds only a zero at the end of the time-dimension\n self.conv = tfkl.Conv1D(filters=self.n_filters, kernel_size=self.kernel_size, activation=activation, padding=padding)\n\n if batch_norm:\n self.batch_norm = tfkl.BatchNormalization()\n else:\n self.batch_norm = None", "title": "" }, { "docid": "a65cc50f2af1ff3c50419159d27e3ecb", "score": "0.6189539", "text": "def build(self, input_shape):\r\n super(Conv3D, self).build(input_shape)\r\n\r\n # TODO(b/177662019): tf.nn.conv3d with depthwise kernels on CPU\r\n # in eager mode may produce incorrect output or cause a segfault.\r\n # To avoid this issue, compile the op to TF graph using tf.function.\r\n self._convolution_op = tf.function(\r\n self._convolution_op, experimental_compile=True)", "title": "" }, { "docid": "460267810e460f8f87b4b05c7919741c", "score": "0.6188089", "text": "def resnet_block(input_features, nb_features=64, kernel_size=3):\n y = Conv2D(nb_features, kernel_size=kernel_size, padding='same')(input_features)\n y = Activation('relu')(y)\n y = Conv2D(nb_features, kernel_size=kernel_size, padding='same')(y)\n \n y = Add()([y, input_features])\n y = Activation('relu')(y)\n \n return y", "title": "" }, { "docid": "1ac197113f969ff7cf16f130a3ff87f1", "score": "0.61852986", "text": "def conv_forward(A_prev, W, b, hparameters):\n # Retrieve dimensions from A_prev's shape\n (m, n_H_prev, n_W_prev, n_C_prev) = A_prev.shape\n \n # Retrieve dimensions from W's shape \n (f, f, n_C_prev, n_C) = W.shape\n \n # Retrieve information from \"hparameters\"\n stride = hparameters[\"stride\"]\n pad = hparameters[\"pad\"]\n \n # Compute the dimensions of the CONV output volume using the formula. \n n_H = int((n_H_prev-f+2*pad)/stride)+1\n n_W = int((n_W_prev-f+2*pad)/stride)+1\n \n # Initialize the output volume Z with zeros. \n Z = np.zeros((m,n_H,n_W,n_C))\n \n # Create A_prev_pad by padding A_prev\n A_prev_pad = zero_pad(A_prev, pad)\n \n for i in range(m): # loop over the batch of training examples\n a_prev_pad = A_prev_pad[i,:,:,:] # Select ith training example's padded activation\n for h in range(n_H): # loop over vertical axis of the output volume\n for w in range(n_W): # loop over horizontal axis of the output volume\n for c in range(n_C): # loop over channels (= #filters) of the output volume\n \n # Find the corners of the current \"slice\" \n vert_start = h+h*(stride-1)\n vert_end = h+h*(stride-1)+f\n horiz_start = w+w*(stride-1)\n horiz_end = w+w*(stride-1)+f\n # Use the corners to define the (3D) slice of a_prev_pad\n a_slice_prev = a_prev_pad[vert_start:vert_end,horiz_start:horiz_end,:]\n # Convolve the (3D) slice with the correct filter W and bias b, to get back one output neuron.\n Z[i, h, w, c] = conv_single_step(a_slice_prev, W[:,:,:,c], b[:,:,:,c])\n\n # Making sure your output shape is correct\n assert(Z.shape == (m, n_H, n_W, n_C))\n \n # Save information in \"cache\" for the backprop\n cache = (A_prev, W, b, hparameters)\n \n return Z, cache", "title": "" }, { "docid": "f476d6308bd331951c7e85c30e836b48", "score": "0.6183348", "text": "def conv(c_in, c_out, k_size, stride=2, pad=1, bn=True):\n layers = []\n layers.append(nn.Conv2d(c_in, c_out, k_size, stride, pad, bias=False))\n if bn:\n layers.append(nn.BatchNorm2d(c_out))\n return nn.Sequential(*layers)", "title": "" }, { "docid": "37527c991646c135dd4c7060b80865fb", "score": "0.61825836", "text": "def _simple_conv_3d(x, k):\n y = tf.nn.conv3d(x, k, [1, 1, 1, 1, 1], padding='VALID')\n return y", "title": "" }, { "docid": "232d3081587a8350e95c6cdc6abd7f69", "score": "0.6175277", "text": "def CNN(input_shape, filters=[64, 64], filter_sizes=[5, 5], dropout=True, batch_norm=True, maxpool_size=5, activation='relu'):\n\tmodel = Sequential()\n\tmodel.add( layers.InputLayer(input_shape = (input_shape,)) )\n\tmodel.add( layers.Reshape((input_shape, 1)) )\n\n\tmodel = basic_CNN(model, filters=filters, filter_sizes=filter_sizes, dropout=dropout, batch_norm=batch_norm, activation=activation)\n\n\tmodel.add( layers.Flatten() )\n\tmodel.add( layers.Dense(128) )\n\tmodel.add( layers.Activation(activation) )\n\n\treturn model", "title": "" } ]
718f6daa719d8feb086d37b9a2ee67ff
with this hook you can do something very generic to a response after all processing.
[ { "docid": "0bd3658fd421c84cde654d2b2673beeb", "score": "0.72350335", "text": "def finalize_response(self, response):\n return response", "title": "" } ]
[ { "docid": "5096446644aa74e0296f4e3833d8e85c", "score": "0.7852177", "text": "def process_response(self, request, response):\n pass", "title": "" }, { "docid": "4f1dd571ef4f5cfee7912e94b204d348", "score": "0.76349527", "text": "def _post_process(self, response: requests.Response) -> None:\n pass", "title": "" }, { "docid": "c84200419c0020e1b01f955ea52408c2", "score": "0.75736195", "text": "def process_response(self, request, response):\n # Do something with response, possibly using request.\n\n return response", "title": "" }, { "docid": "0928739b837ae5da334a57978fdc77b7", "score": "0.7525179", "text": "def process_response(self, request, response):\n return response", "title": "" }, { "docid": "97c159a7ea894500abd427e127f915c2", "score": "0.7509945", "text": "def after_request(response):\n return response", "title": "" }, { "docid": "e4b13780c272d333377a64df91db1862", "score": "0.7427153", "text": "def after_handler(self, handler, response):", "title": "" }, { "docid": "4e524493f8574d7f0829ca025084c49f", "score": "0.73497856", "text": "def process_response(self, request, response):\n\n return response", "title": "" }, { "docid": "717db595e2b4fa31bf687f29bc935079", "score": "0.72763747", "text": "def after_request(self, name, response):\n if isinstance(response, ResourceResponse):\n return response.render()\n else:\n return response", "title": "" }, { "docid": "45d1fad381e946d1b4084ae66da95fd5", "score": "0.7232948", "text": "def process_response(self, response):\n return response", "title": "" }, { "docid": "45d1fad381e946d1b4084ae66da95fd5", "score": "0.7232948", "text": "def process_response(self, response):\n return response", "title": "" }, { "docid": "f2046d59e884d477bd003f359c8050d2", "score": "0.7136301", "text": "def handle(self, response):\n\t\tpass", "title": "" }, { "docid": "76743072f6894da0f669d06c7af234e5", "score": "0.71348846", "text": "def process_response(self, req, resp, resource):\n pass", "title": "" }, { "docid": "77e0ae0253a55e136e57bea62475659e", "score": "0.71281475", "text": "def _ProcessResponse(self):\n self._ProcessCommon()\n # future 130-specific processing.", "title": "" }, { "docid": "d423c008e60d9132d58e4d200cbd207f", "score": "0.701415", "text": "def process_response(self, body):\n raise NotImplementedError", "title": "" }, { "docid": "3edbad6d278c1a4bd72e12f6f9cc3550", "score": "0.6994964", "text": "def after_request(response):\n http_log_entry(response=response)\n return response", "title": "" }, { "docid": "9b304f0d5c79609257230fb1a6dd46d7", "score": "0.6974545", "text": "def _process_response(self, response): # pylint: disable=R0201\n return response", "title": "" }, { "docid": "9031e625e81d3ae646e85fa3d075e1e5", "score": "0.69073784", "text": "def finalize_response(\n self,\n request: Request,\n response: Response,\n *args: Any,\n **kwargs: Any,\n ) -> Response:\n if (\n request.method != \"GET\"\n and \"basket\" in request.GET\n and status.is_success(response.status_code)\n ):\n response = self.get_basket_response()\n return super().finalize_response(request, response, *args, **kwargs)", "title": "" }, { "docid": "1beecf4bc5df40dc99185b34af9bc3db", "score": "0.6855585", "text": "def on_response(self, request: Request, response: Response) -> Response:\n return response", "title": "" }, { "docid": "45cfbb05d173b7091c1aa148bdb32ec0", "score": "0.68401104", "text": "def process_response(\n self, response: ResponseType, context: Context\n ) -> ResponseType:\n return response", "title": "" }, { "docid": "dc639ee12e60f9afb554e185fec65236", "score": "0.67367554", "text": "def process_template_response(self, request, response):\n return response", "title": "" }, { "docid": "1051b8ec482013da63e32a4b663f20b7", "score": "0.6716907", "text": "def _after_response(self):\n if (self.return_type == 'Paged List' and\n int(self.paging_info['number_of_pages']) > 1):\n qs.logger.critical('Receieved too may responses', self._log_dict())", "title": "" }, { "docid": "70bd5dc50be178418428c708c10aa4e7", "score": "0.66954136", "text": "def handle_response(self, response):\n self.status_code = response.getcode()\n self.response = response.read()\n response.close()", "title": "" }, { "docid": "1c048164d44ac6c493d8f96d2232dd85", "score": "0.66725516", "text": "def handle_callback(self, response):\n pass", "title": "" }, { "docid": "f26bceac9241eae1cc19ff00a8dd93dd", "score": "0.6610144", "text": "def process_request(self, req, resp):\n pass", "title": "" }, { "docid": "f26bceac9241eae1cc19ff00a8dd93dd", "score": "0.6610144", "text": "def process_request(self, req, resp):\n pass", "title": "" }, { "docid": "9f545cbb8bd6c61d237cd670f0d5a0e2", "score": "0.658178", "text": "def process_response(self, req, resp, resource, req_succeeded):\n pass", "title": "" }, { "docid": "8d2f156f876cb3ee4d974e90064633f0", "score": "0.6579313", "text": "def response(self, request):\n raise NotImplementedError()", "title": "" }, { "docid": "f1141bfd1dc092d9dd2d51eac1e83cf9", "score": "0.65613925", "text": "def _handleResponse(self, response, uuid):\n\n pass", "title": "" }, { "docid": "4f88db8859fe60706e3223c85c03e511", "score": "0.65349734", "text": "def terminate_response(self):\n self.response.out.write = lambda *args: None\n self.get = lambda *args: None\n self.post = lambda *args: None", "title": "" }, { "docid": "4f88db8859fe60706e3223c85c03e511", "score": "0.65349734", "text": "def terminate_response(self):\n self.response.out.write = lambda *args: None\n self.get = lambda *args: None\n self.post = lambda *args: None", "title": "" }, { "docid": "fd96d6141cc78dedf66b1416475f726b", "score": "0.65282446", "text": "def after_request(self, name, response):\n base = {'error' : None, 'content' : {}, 'params' : dict(request.args)}\n try:\n data = ujson.loads(response.get_data())\n except ValueError:\n return response\n\n user = data.pop('user', 'unset')\n if not ('poll' in request.full_path and response.status_code == 206):\n #127.0.0.1 - frank [10/Oct/2000:13:55:36 -0700] \"GET /apache_pb.gif HTTP/1.0\" 200 2326 \"http://www.example.com/start.html\" \"Mozilla/4.08 [en] (Win98; I ;Nav) rid=aabbcc\"\n r_time = datetime.strftime(datetime.utcnow(), \"%d/%b/%Y:%H:%M:%S -0000\")\n try:\n client_ip = request.headers.getlist(\"X-Forwarded-For\")[0]\n except IndexError:\n client_ip = request.remote_addr\n logger.info('{0} - {1} [{2}] \"{3} {4} {5}\" {6} {7} \"{8}\" \"{9} rid={10}\"'.format(client_ip,\n user,\n r_time,\n request.method,\n request.full_path,\n request.environ.get('SERVER_PROTOCOL'),\n response.status_code,\n response.content_length,\n request.referrer,\n request.user_agent.string,\n request.headers.get('X-REQUEST-ID', None)))\n base.update(data)\n response.set_data(ujson.dumps(base))\n response.headers['Content-Type'] = 'application/json'\n response.headers.add('Link', '<{0}{1}?describe=true>; rel=help'.format(const.VLAB_URL, self.route_base))\n return response", "title": "" }, { "docid": "af8e9e09779bfbef7d7873459e11dd44", "score": "0.6493371", "text": "def handle_resp(self, orig_cont, rq):\n raise NotImplementedError()", "title": "" }, { "docid": "5e0947fdfb73a88aefae90e0951ba16b", "score": "0.64489007", "text": "def responseComplete(self):", "title": "" }, { "docid": "9a5095e9c4b6025715dde839dace8d92", "score": "0.63816017", "text": "def response_callback(resp, *args, **kwargs):\n pass", "title": "" }, { "docid": "be67ce82eff7e50031284c8661182a92", "score": "0.6354188", "text": "def onResponse(self, client, tid, response):\n pass", "title": "" }, { "docid": "162cd29ce0c6a49b5bb70edbf3f02445", "score": "0.63433677", "text": "def handle_response(self, response):\n super(TaskDetailsRequest, self).handle_response(response)\n if self.status_code == 200:\n self.response = json.loads(self.response)", "title": "" }, { "docid": "d7ac5f1f076f97ca397bf8dcd79860ab", "score": "0.6321134", "text": "def after_request_func(response):\n\n print(\"after_request is running!\")\n return response", "title": "" }, { "docid": "3a11aa2eb811d423039bcea737a43d5e", "score": "0.62605166", "text": "def customize_response(self, *args, **kwargs):\n resp = func(self, *args, **kwargs)\n if isinstance(resp, requests.models.Response):\n return resp\n for key, val in resp.iteritems():\n if key != 'status':\n time_str = datetime.strftime(datetime.now(),\n settings.DATETIME_MICRO_FORMAT)\n if isinstance(val, list):\n for obj in val:\n if isinstance(obj, dict):\n obj['fetch_time'] = time_str\n elif isinstance(val, dict):\n val['fetch_time'] = time_str\n content = {\"response\": {\"status\": \"\"},\n \"service\": {\"version\": settings.APP_VERSION}}\n content.get(\"response\").update(resp)\n return content", "title": "" }, { "docid": "26c07ba57a21dbbfd77c38471aa36318", "score": "0.6259748", "text": "def _handle_response(self):\n content = self.cache['restbase']['info']['content-type']\n if content.startswith('text/html'):\n html = self.cache['restbase']['response']\n if isinstance(html, bytes):\n html = html.decode('utf-8')\n self.data['html'] = html\n return\n\n response = self._load_response('restbase')\n\n http_status = self.cache['restbase']['info']['status']\n if http_status == 404:\n raise LookupError(self.cache['restbase']['query'])\n\n if self.params.get('endpoint') == '/page/':\n msg = \"RESTBase /page/ entry points: %s\" % response.get('items')\n utils.stderr(msg)\n del self.cache['restbase']\n return\n\n return response", "title": "" }, { "docid": "fddcc3e2599ede06ea32cfbe43566946", "score": "0.6248427", "text": "def process_response(self, request, response): # pylint: disable=unused-argument\n self._batch_report()\n return response", "title": "" }, { "docid": "e4dd0b0ce807cc27ddfde26de8ae4973", "score": "0.6240409", "text": "def handle_response(self, response):\n super(TaskTransitionsRequest, self).handle_response(response)\n if self.status_code == 200:\n self.response = json.loads(self.response)", "title": "" }, { "docid": "c73a777ca7dd0bcdf0cf81a0c4f77273", "score": "0.6239022", "text": "def returnResponseToHandler(func):\n def customize_response(self, *args, **kwargs):\n \"\"\"Return or create custom response.\"\"\"\n resp = func(self, *args, **kwargs)\n if isinstance(resp, requests.models.Response):\n return resp\n for key, val in resp.iteritems():\n if key != 'status':\n time_str = datetime.strftime(datetime.now(),\n settings.DATETIME_MICRO_FORMAT)\n if isinstance(val, list):\n for obj in val:\n if isinstance(obj, dict):\n obj['fetch_time'] = time_str\n elif isinstance(val, dict):\n val['fetch_time'] = time_str\n content = {\"response\": {\"status\": \"\"},\n \"service\": {\"version\": settings.APP_VERSION}}\n content.get(\"response\").update(resp)\n return content\n return customize_response", "title": "" }, { "docid": "3d447e61876fe8c7cef65d8f4010986a", "score": "0.62353104", "text": "def react(self, response):\n pass", "title": "" }, { "docid": "31e8c1a426d39c76bf58796cc8af063d", "score": "0.62312853", "text": "def process_response(self, id, result):\r\n raise NotImplementedError('process_response not implemented in BaseService')", "title": "" }, { "docid": "b8bafc703519cbc77ba415097430f1f1", "score": "0.62236917", "text": "async def process_response(self, spider, request, response):\n middleware_result = await self._middleware_process_response(spider, request, response)\n if isinstance(middleware_result, http.Response):\n response = middleware_result\n elif not middleware_result:\n return\n\n await self.signals.send(signals.response_received, spider=spider, request=request, response=response)\n\n callback = response.callback or spider.process_response\n\n try:\n await self._execute_spider_callback(spider, callback, response)\n except:\n await self.signals.send_async(signals.spider_error, spider=spider, response=response,\n exc_info=sys.exc_info())\n self.logger.exception('Error while executing callback %s, spider %s, %s %s',\n callback.__name__, spider, response.method, response.url)", "title": "" }, { "docid": "9dd65b03d35cab611766d2c5fa41c095", "score": "0.62151885", "text": "def after_request(response):\n api_logger = logging.getLogger(\"app.api\")\n api_logger.info(\n \"%s %s %s %s\", request.remote_addr, request.method, request.path, response.status\n )\n return response", "title": "" }, { "docid": "4bfb3d968a187d95b6052bf3a625c91b", "score": "0.61800027", "text": "def handleResponsePart(self, data):\n \n self.factory.handleResponse(data.strip())", "title": "" }, { "docid": "eb700f3f88435221208a0adbdb9136fa", "score": "0.61770403", "text": "def processResponseBody(self, data):\n pass", "title": "" }, { "docid": "b58b96438ff8eba1b682ab854c5dcd23", "score": "0.61648315", "text": "def process_request(self, request):", "title": "" }, { "docid": "4f944acbcead53dca2ac289def2e8821", "score": "0.61628085", "text": "def _had_response(self):\n pass", "title": "" }, { "docid": "955d3de4f3d107550e0c4f7a0f45a8ff", "score": "0.6160812", "text": "def __after_get__(self, request: Request) -> None:", "title": "" }, { "docid": "926f80536716995a026163fc3ed840e5", "score": "0.61593986", "text": "def process_response(self, request, response):\n if 'countthis' in request.META:\n index = response.content.upper().find('</BODY>')\n\n if index != -1:\n countthisbug = self.generate_bug(request.META['countthis'])\n response.content = response.content[:index] + countthisbug + response.content[index:]\n\n return response", "title": "" }, { "docid": "1158103e7f4d2d7061242a7b05da15a7", "score": "0.6152583", "text": "def _on_response(self, response):\r\n try:\r\n # Does the session want to invoke any relevant hooks?\r\n # This allows a session to detect problems in the session and\r\n # abort the operation.\r\n if hasattr(self._session, \"_on_http_grid_response\"):\r\n self._session._on_http_grid_response(response)\r\n\r\n # Process the HTTP error, if any.\r\n if isinstance(response, AsynchronousException):\r\n response.reraise()\r\n\r\n # If we're expecting a raw response back, then just hand the\r\n # request object back and finish here.\r\n self._file_like_object = io.StringIO(response.body.decode(\"UTF-8\"))\r\n df = pd.read_csv(self._file_like_object)\r\n self._state_machine.response_ok(result=df)\r\n return\r\n\r\n except: # Catch all exceptions for the caller.\r\n self._log.debug(\"Parse fails\", exc_info=1)\r\n self._state_machine.exception(result=AsynchronousException())", "title": "" }, { "docid": "638f8edca8480ae9d915961b03e2bdb9", "score": "0.61452866", "text": "def after_request(response: Response) -> Response:\n try:\n access_logger = getLogger(\"access\")\n request_method = request.method\n request_headers = dict(request.headers)\n request_url = f\"{request.scheme}://{request.remote_addr}{request.full_path}\"\n request_payload = json.loads(request.get_json()) if request.get_json() else \"{}\"\n\n response_headers = dict(response.headers)\n response_payload = json.loads(response.response[0])\n response_status = response.status\n message = (\n f\"{request_method} | {request_url} | {response_status} | {request_headers} \"\n f\"| {request_payload} | {response_headers} | {response_payload}\"\n )\n access_logger.info(message)\n except Exception as e:\n exception_handler(e)\n\n return response", "title": "" }, { "docid": "8e4940f3526ba8e9f10b3c49e809aa82", "score": "0.614155", "text": "def response( self ):\n pass", "title": "" }, { "docid": "305efe24e803a00a82b6b956fc1f9a9e", "score": "0.61180043", "text": "def process_response(self, request, response):\n for mw in reversed(self.middlewares):\n rv = mw.process_response(request, response)\n if rv is not None:\n return rv\n return response", "title": "" }, { "docid": "54522a47b1f5f3f56fd01eb5162223d1", "score": "0.6099178", "text": "def _process_response(self, response, action_result):\n\n # store the r_text in debug data, it will get dumped in the logs if the action fails\n if hasattr(action_result, 'add_debug_data'):\n action_result.add_debug_data({'r_status_code': response.status_code})\n action_result.add_debug_data({'r_text': response.text})\n action_result.add_debug_data({'r_headers': response.headers})\n\n # Process each 'Content-Type' of response separately\n\n # Process a json response\n if 'json' in response.headers.get('Content-Type', ''):\n return self._process_json_response(response, action_result)\n\n if 'text/javascript' in response.headers.get('Content-Type', ''):\n return self._process_json_response(response, action_result)\n\n # Process an HTML response, Do this no matter what the API talks.\n # There is a high chance of a PROXY in between phantom and the rest of\n # world, in case of errors, PROXY's return HTML, this function parses\n # the error and adds it to the action_result.\n if 'html' in response.headers.get('Content-Type', ''):\n return self._process_html_response(response, action_result)\n\n # it's not content-type that is to be parsed, handle an empty response\n if not response.text:\n return self._process_empty_response(response, action_result)\n\n # everything else is actually an error at this point\n message = \"Can't process response from server. {0}\".format(\n MS_AZURE_ERR_MSG.format(status_code=response.status_code, err_msg=response.text.replace('{', '{{').replace('}', '}}')))\n\n return RetVal(action_result.set_status(phantom.APP_ERROR, message), None)", "title": "" }, { "docid": "fb4fca076c7957aa3e01bbe28d866ac9", "score": "0.60929155", "text": "def alterResponse(self):\n\t\traise NotImplemented", "title": "" }, { "docid": "1362e879f0b5235f6e892ccdea189121", "score": "0.60820997", "text": "def after_static_request(self, url: str, response):", "title": "" }, { "docid": "ba56d71018944dc64633d1518a788303", "score": "0.60744375", "text": "def response(self, *args, **kwargs):\n raise NotImplementedError()", "title": "" }, { "docid": "316b511ba0812eaec0f1cc35ef9fd0b4", "score": "0.60734725", "text": "def hook_post_receive(response: str, silence_errors: bool) -> str:\n return response", "title": "" }, { "docid": "72f3566d662fb3ba54e27f009255c1b2", "score": "0.6072947", "text": "def do_after_activation(self, response):\n for func in reversed(self.after_activation_funcs):\n resp = func(response)\n if resp is not None:\n response = resp\n return response", "title": "" }, { "docid": "99eedc835f22043bd1a65469c625d035", "score": "0.6071462", "text": "def get_response(self, request):\n raise NotImplementedError # pragma: no cover", "title": "" }, { "docid": "57ee1ea3dbbef9848f13444ea0a474f9", "score": "0.6029054", "text": "def add_response(self, resp):\n if not resp or not issubclass(resp, BaseResponse):\n raise TypeError(\"custom response must be subclass of `pre_request.BaseResponse`\")\n\n self.response = resp", "title": "" }, { "docid": "b7951db1368b6d9d5882a50b365b3fd5", "score": "0.60062", "text": "def process(self, response):\n super().process(response)\n self.process_properties()\n return self.update_and_return_response()", "title": "" }, { "docid": "1dd6f7b7fd537bd293aab6c20683be0e", "score": "0.6001716", "text": "def process_request(self, request):\n pass", "title": "" }, { "docid": "1dd6f7b7fd537bd293aab6c20683be0e", "score": "0.6001716", "text": "def process_request(self, request):\n pass", "title": "" }, { "docid": "f0c1153cf3370b46123bcd0fe14a8ea4", "score": "0.5988644", "text": "def on_response(\n self,\n request: PipelineRequest[HTTPRequestType],\n response: PipelineResponse[HTTPRequestType, HTTPResponseType],\n ) -> Union[None, Awaitable[None]]:", "title": "" }, { "docid": "72623ef0c87c0502e5f3761e93d0bbb6", "score": "0.5973529", "text": "def on_response_callback(response):\n response_received.append(response)", "title": "" }, { "docid": "90900d8b9a1570167fdb1f3a6af50772", "score": "0.5971281", "text": "def post_processing(self):\n\t\tpass", "title": "" }, { "docid": "231c26e46e4c9a61cfe1fa6bb840b0d1", "score": "0.5963052", "text": "def response(self):\n pass", "title": "" }, { "docid": "3cf0d1410a94f75ba42aac018776c225", "score": "0.5961588", "text": "def handle(self, response):\n assert threading.current_thread().name == \"MainThread\", \\\n \"KoCodeIntelService.handle() should run on main thread!\"\n self.debug(\"handling: %s\", json.dumps(response))\n req_id = response.get(\"req_id\")\n if not req_id:\n # unsolicited response, look for a handler\n try:\n command = str(response.get(\"command\", \"\"))\n if not command:\n raise ValueError(\"Invalid response frame %s\" % (\n json.dumps(response),))\n meth = getattr(self, \"do_\" + command.replace(\"-\", \"_\"), None)\n if not meth:\n raise ValueError(\n \"Unknown unsolicited response \\\"%s\\\"\" % (command,))\n meth(response)\n except:\n log.exception(\"Error handling unsolicited response\")\n return\n callback, request, sent_time = self.requests.get(\n req_id, (None, None, None))\n if not request:\n try:\n log.error(\n \"Discard response for unknown request %s (command %s): have %s\",\n req_id, response[\"command\"],\n sorted(self.requests.keys()))\n except KeyError:\n log.error(\n \"Discard response for unknown request %s (%r): have %s\",\n req_id, response,\n sorted(self.requests.keys()))\n return\n command = request.get(\"command\", \"\")\n assert response.get(\"command\", command) == command, \\\n \"Got unexpected response command %s from request %s\" % (\n response.get(\"command\"), command)\n if \"success\" in response:\n self.debug(\"Removing completed request %s\", req_id)\n del self.requests[req_id]\n else:\n # unfinished response; update the sent time so it doesn't time out\n self.requests[req_id] = (callback, request, time.time())\n\n if callback:\n callback(request, response)", "title": "" }, { "docid": "969fea330e39ce2d4342aab542b1b112", "score": "0.5961303", "text": "def _handle_response(self, response):\n if isinstance(response, Exception):\n raise response\n wandb.termerror('Droppped streaming file chunk (see wandb/debug.log)')\n logging.error(\"dropped chunk %s\" % response)\n elif response.json().get(\"limits\"):\n parsed = response.json()\n self._api.dynamic_settings.update(parsed[\"limits\"])", "title": "" }, { "docid": "35445495c463d3f8a430776032572292", "score": "0.5953586", "text": "def process_response(self, request, response):\n if not self.should_cache(request, response):\n # We don't need to update the cache, just return\n return response \n\n response = self.patch_headers(response)\n self.set_cache(request, response)\n\n return response", "title": "" }, { "docid": "1e24aecee3e6d33a000868c0fcfa19bc", "score": "0.59449106", "text": "def handle(self, request):", "title": "" }, { "docid": "b3aab360a49181583025c6756434ce68", "score": "0.59439594", "text": "def handle_response(self, response):\n headers, content = response\n\n # Temporary hack to catch Gremlin Plugin exceptions that return 200 status\n # See https://github.com/neo4j/community/issues/343\n # Example: '\"java.lang.IllegalArgumentException: Unknown property type on...\"'\n if re.search(b\"^\\\"java.(.*).Exception:\", content):\n # raise error...\n server_error(response)\n \n response_handler = RESPONSE_HANDLERS.get(headers.status)\n response_handler(response)", "title": "" }, { "docid": "49ae038f32e415ef9546f8bad24c8bb4", "score": "0.5924775", "text": "def after_request(response):\n print(\"you should see this after each request\") # optional -- to illustrate that this code runs after each request\n models.DATABASE.close()\n return response # go ahead and send response back to client\n # (in our case this will be some JSON)", "title": "" }, { "docid": "1803f28ad8355723552d0f2d779e412a", "score": "0.59242296", "text": "def _handle_response(self, resp):\n\n self.headers['response'] = resp.headers\n\n # Roll over request to prepare for new one\n self._reset_uri()\n\n # 200 - ok, 201 - created\n if resp.status_code != 200 and resp.status_code != 201:\n if (resp.status_code == 304):\n return []\n else:\n raise APIHTTPError(resp.status_code, self.requested_uri)\n\n if \"json\" == self.req_format:\n self.response = json.loads(resp.content.decode('utf8'))\n self.response_json = json.dumps(self.response)\n else:\n self.response = resp.content.decode('utf8')\n\n return self", "title": "" }, { "docid": "7015fb66a1598b4a3c6f0a4bd7fe3a18", "score": "0.59222126", "text": "def post_call(self, ctxt, result, action, post_mod, pre_mod):\n\n return result", "title": "" }, { "docid": "c896be62db54042ee68429b808a9f69d", "score": "0.5918553", "text": "def handle_success(\n response: Any, response_holder: ResponseHolder, event: threading.Event\n):\n\n response_holder.success = response\n event.set()", "title": "" }, { "docid": "b6e4460b63507c00305dec0e500375c4", "score": "0.5917617", "text": "def response(self, response):\n\n self._response = response", "title": "" }, { "docid": "a9277848d3c944ca698196910f204635", "score": "0.5911263", "text": "def response_hook(optional=True):\n def _hook(hook):\n return APIHooks().add_response_hook(hook, optional)\n return _hook", "title": "" }, { "docid": "0905631a0e64c65a96fe0e795489eb08", "score": "0.59000134", "text": "def set_response(self, response):\n self.response = response", "title": "" }, { "docid": "7578a9b0e4b387519a6f40750815020b", "score": "0.58935404", "text": "def response(self, code):\n\t\tpass", "title": "" }, { "docid": "559ea9c19d6d52a3410451e298f71a90", "score": "0.5893341", "text": "def after_request(\n self, region: str, endpoint_name: str, method_name: str, url: str, response\n ):", "title": "" }, { "docid": "657f9031e3fed5f2a4b1c3bf9aaf98a7", "score": "0.5880931", "text": "def chunkResponseEnded(self, rest):\n\t\tself._responseDecoder = None\n\t\tself._chunkProcessing = False\n\t\tlogger.debug('%s: CHUNKED -> Off' % self)\n\t\tif rest:\n\t\t\tlogger.error('%s: REST_DATA_AFTER_CHUNK (len=%s)' % (self, len(rest)))\n\t\tself.handleResponseEnd()", "title": "" }, { "docid": "c92e7f4df12cd07634d2013bfad928e0", "score": "0.5869255", "text": "def after_request(response):\n # Set a custom header\n try:\n response.headers['X-Boe-Header'] = Configurator().boe_header\n except:\n pass\n\n return response", "title": "" }, { "docid": "c14826e2ae002e8c253436b771313beb", "score": "0.5867032", "text": "def parse_response(self, response: Any) -> Any:\n pass", "title": "" }, { "docid": "e74c8a7959443038f3154fb1370817e5", "score": "0.5864144", "text": "def on_response(self, event):\n request_handler = event.get('request_handler')\n\n if not hasattr(request_handler, 'run'):\n return\n\n content_type = request_handler.get_header('Content-Type')\n\n if len(content_type) < 9 or content_type[0:9] != 'text/html':\n return\n\n if request_handler.request.headers.get('Surrogate-Capability'):\n return\n\n if request_handler.get_query_argument('_element', default=False) == 'no-debug':\n return\n\n ## inject the toolbar to the response\n content = self.templating.get_template('element.plugins.profiler:profiler/toolbar_js.html').render({\n 'token': str(request_handler.run.id)\n })\n\n chunk = request_handler.get_chunk_buffer().decode('utf-8')\n request_handler.reset_chunk_buffer()\n\n k = chunk.rfind('</body>')\n\n request_handler.write(chunk[:k])\n request_handler.write(content)\n request_handler.write(chunk[k:])\n\n for name, collector in self.profiler.collectors.iteritems():\n collector.on_response(request_handler, request_handler.run)", "title": "" }, { "docid": "a63270d1d884de7ab6069f833ed847d9", "score": "0.58549845", "text": "def _process_response(self, r, action_result):\n\n if hasattr(action_result, 'add_debug_data'):\n action_result.add_debug_data({'r_status_code': r.status_code})\n action_result.add_debug_data({'r_text': r.text})\n action_result.add_debug_data({'r_headers': r.headers})\n\n if 'json' in r.headers.get('Content-Type', ''):\n return self._process_json_response(r, action_result)\n\n if 'html' in r.headers.get('Content-Type', ''):\n return self._process_html_response(r, action_result)\n\n if not r.text:\n return self._process_empty_response(r, action_result)\n\n message = \"Can't process response from server. Status Code: {0} Data from server: {1}\".format(\n r.status_code,\n self._handle_py_ver_compat_for_input_str(r.text.replace('{', '{{').replace('}', '}}'))\n )\n\n return RetVal(action_result.set_status(phantom.APP_ERROR, message), None)", "title": "" }, { "docid": "16a1be7182977ba376cb668ecb523ba9", "score": "0.5843436", "text": "def __call__(self, request):\n Application.current_app = self\n self.request = request\n\n try:\n request.auto_finish = True\n result = self.route_request(request)\n if request.auto_finish:\n self.parse_output(result)\n\n except Exception as err:\n # This should hopefully never happen, but it *could*.\n try:\n body, status, headers = self.handle_500(request, err)\n except Exception:\n # There's an error with our handle_500.\n log.exception(\"There was a problem handling a request, \"\n \"and a problem running Application.handle_500 \"\n \"for %r.\" % self)\n body, status, headers = error(500, request=request)\n\n # If an exception happens at *this* point, it's destined. Just\n # show the ugly page.\n\n if not 'Content-Length' in headers:\n headers['Content-Length'] = len(body)\n\n request.send_status(status)\n request.send_headers(headers)\n request.write(body)\n request.finish()\n\n finally:\n if hasattr(request, '_hooks'):\n hks = request._hooks.get('request_teardown')\n if hks:\n for hf in hks:\n try:\n hf(request)\n except Exception as err:\n # Log the exception, but continue.\n log.exception(\"There was a problem handling a \"\n \"request teardown hook for: %r\",\n request)\n\n if hasattr(request, '_converted_match'):\n del request._converted_match\n\n Application.current_app = None\n self.request = None", "title": "" }, { "docid": "df13e11269b3d5dd0c63213960e178dd", "score": "0.5831189", "text": "def _after_request(self, response):\n # Do not trace if the url is blacklisted\n if utils.disable_tracing_url(flask.request.url, self.blacklist_paths):\n return response\n\n try:\n tracer = execution_context.get_opencensus_tracer()\n tracer.add_attribute_to_current_span(\n HTTP_STATUS_CODE,\n str(response.status_code))\n except Exception: # pragma: NO COVER\n log.error('Failed to trace request', exc_info=True)\n finally:\n return response", "title": "" }, { "docid": "f326b13aef8cba120ebdf06ca3860e8c", "score": "0.58268654", "text": "def after_request(response):\n status = response.status\n if status[0] == '2':\n # new_token = generate_token()\n token = request.headers.get('token', generate_token())\n\n # update_token(cursor, connection, token, new_token)\n response.headers[\"token\"] = token\n\n return response", "title": "" }, { "docid": "126880130263a3c5e6bbd530ba05329b", "score": "0.5824949", "text": "def on_get(self, req, resp):\n self.build_response(req, resp)", "title": "" }, { "docid": "29b0ddeca2a4ec407efdc255226eb54c", "score": "0.5813653", "text": "def after_request(response):\n clean_path = create_clean_request_path()\n request = get_request()\n\n # Count number of times request is called\n METRICS_REQUEST_COUNT.labels(\n request.method, clean_path, response.status_code\n ).inc()\n\n # Count latency of request\n request_latency = time.time() - request._prometheus_metrics_request_start_time\n if not math.isinf(request_latency) and not math.isnan(request_latency):\n METRICS_REQUEST_LATENCY.labels(request.method, clean_path).observe(\n request_latency\n )\n else:\n print(request_latency)\n return response", "title": "" }, { "docid": "bccfc892fad25e3f50985bce6b0bb2b3", "score": "0.58087206", "text": "def set_http_response(step, r):\n\n step.context.api = dict()\n step.context.api['response'] = dict()\n step.context.api['response']['status'] = r.status_code\n step.context.api['response']['text'] = r.text\n\n try:\n step.context.api['response']['json'] = r.json()\n except ValueError:\n # No JSON no problem, should be by design.\n pass\n\n # Clears any existing HTTP request headers to prevent poisoning the next request.\n global http_headers_to_send\n http_headers_to_send = dict()", "title": "" }, { "docid": "386b5631938ca19b7b9f78d0adae793c", "score": "0.5807492", "text": "def response(self, value):\n self.__response = value\n self.__executed = True\n if self.response is None:\n logger.error('response is none')\n return\n self.response.encoding = self.__encoding\n logger.info(self.response.text)\n if self.__data_type == DataType.JSON:\n self.__data = self.response.json()\n elif self.__data_type == DataType.TEXT:\n self.__data = self.response.text\n else:\n self.__data = self.response.content", "title": "" }, { "docid": "f999401f58721209459e49a306ce2b3e", "score": "0.57931805", "text": "def handle_response(response):\n if isinstance(response, Exception):\n print(response)\n return response\n else:\n return response", "title": "" }, { "docid": "4d319586ce1cd3744d8f8498b5354201", "score": "0.57931", "text": "def on_post(self, req, resp):\n self.on_get(req, resp)", "title": "" }, { "docid": "12bf5fd96fdd6bd2508f72be4da02a76", "score": "0.5786451", "text": "def handle_response(self, client, response, status, data):\n queue_time = (\n self.queue_end_time - self.queue_start_time).total_seconds()\n request_time = (\n self.request_end_time - self.request_start_time).total_seconds()\n logging.info('Url: %s. Method: %s. Params: %s. Status: %s. Data: %s. '\n 'Queue Time: %s. Request Time: %s.', self.url,\n self.method, str(self.params), status, data, queue_time,\n request_time)", "title": "" } ]
97acf9a4daf2ff4a1fa2ca4a7cafbccb
Function to receive json
[ { "docid": "5c6438353ee5cedec71e2dcd9d6ef8a1", "score": "0.5765529", "text": "def receiveJson(sock):\n buffer = sock.recv(4)\n if buffer:\n print('Data received')\n json_length = struct.unpack(\"!i\", buffer)[0]\n # Reference: https://stackoverflow.com/a/15964489/9798310\n buffer = bytearray(json_length)\n view = memoryview(buffer)\n while json_length:\n nbytes = sock.recv_into(view, json_length)\n view = view[nbytes:]\n json_length -= nbytes\n\n json_string = buffer.decode(\"utf-8\")\n return json.loads(json_string)", "title": "" } ]
[ { "docid": "dfb868339faad755f062796bbcfa141b", "score": "0.78848916", "text": "def read_json():", "title": "" }, { "docid": "30707fc438c9e879ada201232f998f06", "score": "0.70540917", "text": "def json(self):", "title": "" }, { "docid": "a2b5a2de1b87e4397e2128a5226af038", "score": "0.695341", "text": "def receiveJSON(self):\n\n data = self.getRequest()\n data += self.getRestOfRequest()\n try:\n return json.loads(data)\n except json.JSONDecodeError as e:\n log(\"Decode error when receiving json: \" + str(e))\n return None", "title": "" }, { "docid": "610ba65092b5b1f0837522abf2ed11f7", "score": "0.66711223", "text": "def do_post_parse_json(self, *args, **kwargs): # real signature unknown\n pass", "title": "" }, { "docid": "d87e6652aa76599898f60d706fb6653d", "score": "0.6606561", "text": "def get_json(url):\n f = urllib2.urlopen(url)\n #response_text = f.read()\n #response_data = json.loads(response_text)\n #return response_data[stop][0]\n return json.loads(f.read())\n pass", "title": "" }, { "docid": "2538fdf4664683c2bdf79f826b6b90bc", "score": "0.66023225", "text": "def from_json(json):", "title": "" }, { "docid": "94e8ea80062f5182c8fba9e672ab92f5", "score": "0.65253246", "text": "def jsondata():\n payload = web.data().decode(\"utf-8\")\n return json.loads(payload)", "title": "" }, { "docid": "b5a7a3ad7accc0b60d073c76073b5261", "score": "0.6524833", "text": "def input_fn(request_body: str, content_type: str): \n print('Deserializing the input data.')\n\n if content_type == JSON_CONTENT_TYPE:\n data = json.loads(request_body)\n return data\n\n raise Exception('Requested unsupported ContentType in content_type: {}'.format(content_type))", "title": "" }, { "docid": "b64eeb9028bbe57db0d31dbf2a755cee", "score": "0.65107", "text": "def RequestJSON(self):\n if self.request.body:\n return json.loads(self.request.body)", "title": "" }, { "docid": "17e815bb00a3e110c74e483bb44e9283", "score": "0.6503726", "text": "async def receive_json(self):\n result = await self.receive()\n if isinstance(result, bytes):\n result = result.decode()\n return json.loads(result)", "title": "" }, { "docid": "dfead469b15d4534f16e8a6c36279d72", "score": "0.64488935", "text": "def receive(self):\n with open(self.path_to_json_file) as data_file:\n json_to_dict = json.load(data_file)\n\n if isinstance(json_to_dict, dict):\n return json_to_dict\n else:\n raise Exception(\"Errors found at the json file to import!\")", "title": "" }, { "docid": "d2af6293c556ac12593367924a61f3b8", "score": "0.63886684", "text": "def json(self):\r\n if 'json' not in self.environ.get('CONTENT_TYPE', ''):\r\n raise BadRequest('Not a JSON request')\r\n try:\r\n return loads(self.data)\r\n except Exception:\r\n raise BadRequest('Unable to read JSON request')", "title": "" }, { "docid": "612bb516a5d40b570c88a2995b24cd87", "score": "0.6344149", "text": "def get_data(self):\n return json.loads(self.request.body.decode('utf-8'))", "title": "" }, { "docid": "0b7bbdb4e6f047224ffe705e39ba9a0d", "score": "0.6343926", "text": "def get(self, *args, **kwargs):\n string = super().get(*args, **kwargs)\n return json.loads(string)", "title": "" }, { "docid": "c09ea49334e5ccdf8a4114c6968c4082", "score": "0.6311027", "text": "def load_json(self):\n try:\n self.request.arguments = json.loads(self.request.body)\n except ValueError:\n msg = \"Could not decode JSON: %s\" % self.request.body\n logger.debug(msg)\n raise tornado.web.HTTPError(400, msg)", "title": "" }, { "docid": "e6b6b4fb324be70c53d9983ecf778906", "score": "0.62970424", "text": "def _raw_load_json(self, req: falcon.Request):\n if not is_json_request(req) or req.content_length in (None, 0):\n return core.missing\n body = req.stream.read(req.content_length)\n if body:\n return core.parse_json(body)\n return core.missing", "title": "" }, { "docid": "8cfdba0e97046dc358c273ab2664dde4", "score": "0.62749326", "text": "def get_json_data(self,url):\n response = requests.get(url)\n return response.json()", "title": "" }, { "docid": "25cd3f881482d7710d497f48f761ccf1", "score": "0.62726593", "text": "def json(self):\n return self.json_data", "title": "" }, { "docid": "25cd3f881482d7710d497f48f761ccf1", "score": "0.62726593", "text": "def json(self):\n return self.json_data", "title": "" }, { "docid": "25cd3f881482d7710d497f48f761ccf1", "score": "0.62726593", "text": "def json(self):\n return self.json_data", "title": "" }, { "docid": "82d785c0892540f96531e02b50db1251", "score": "0.6254141", "text": "def json(self):\n return self.get(\"json\")", "title": "" }, { "docid": "f73ec0900554ac5d53679233863b4266", "score": "0.62458086", "text": "def handleRequest(self, json):\n\n if self.type == \"person\":\n return self.handlePersonRequest(json)\n\n elif self.type == \"co2\":\n return self.handleCO2Request(json)", "title": "" }, { "docid": "b019d82774c1efff20dd5b6f2485699b", "score": "0.62108713", "text": "def sendRequest(url, data):\r\n result = urllib2.urlopen(url, data).read()\r\n jres = json.loads(result)\r\n return jres", "title": "" }, { "docid": "4b3306a3dd59e2faf4fd21444767ba1a", "score": "0.6206903", "text": "def _do_json_method(self, method, url, *args, **kwargs):\n\t\tdata, headers = method(url, *args, **kwargs)\n\t\tif data:\n\t\t\tif not data.startswith(self._JSON_PREFIX):\n\t\t\t\tlogger.error(\"JSON response from Gerrit URL %s does not begin with \" \"%r: %s\", url, self._JSON_PREFIX, data)\n\t\t\telse:\n\t\t\t\tdata = json.loads(data[self._JSON_PREFIX_LENGTH :].decode(\"utf-8\"))\n\t\treturn data, headers", "title": "" }, { "docid": "dfce8a59774d0ebbc4ba9d4e26747105", "score": "0.61909497", "text": "def json(text):\n return jsonlib.loads(text)", "title": "" }, { "docid": "dfb2d4997d15c6b6b51bb3e1d263e78f", "score": "0.61885035", "text": "def retrieve_json(self):\n\n\t\ttry:\n\t\t\tresponse = urllib.request.urlopen(self.url + self.param)\n\t\t\tpayload = response.read()\n\t\t\tjsonPayload = json.loads(payload.decode('utf-8'))\n\t\t\tcurlFeed = CurlFeed(\"App1\", \"Success\", \"Retrieved JSON payload from URL\")\n\t\t\tcurlFeed.send()\n\t\t\treturn jsonPayload\n\t\texcept:\n\t\t\t# Catch all exceptions\n\t\t\te = sys.exc_info()[0]\n\t\t\tprint(\"Error:%s\"%e)\n\t\t\tjsonPayload = {}\n\t\t\tlogging.error(e)\n\t\t\tcurlFeed = CurlFeed(\"App1\", \"Failed\", \"Failed to retrieve JSON payload from URL\")\n\t\t\tcurlFeed.send()\n\t\t\treturn jsonPayload", "title": "" }, { "docid": "6b1c089b89241115830046cb6232fbba", "score": "0.61346084", "text": "def post(self):\n data=request.json\n pass", "title": "" }, { "docid": "8f5dd72ded73458080f57f511aea17a6", "score": "0.61303943", "text": "def do_json(self) -> pulumi.Input[str]:\n return pulumi.get(self, \"do_json\")", "title": "" }, { "docid": "25481ed0db2129c6b9a6fb2da1555ec7", "score": "0.6129421", "text": "def json(self,url):\n try:\n return json.loads(self.get(url))\n except:\n return None", "title": "" }, { "docid": "8c35537b0cb4688969e9cf807efb66da", "score": "0.6104055", "text": "def json(self):\n if self.mimetype == 'application/json':\n return from_json(self.data)", "title": "" }, { "docid": "7793f991846af3e4c970a40caae0a5d4", "score": "0.6103817", "text": "def _parse_json(self, request):\n content_type = request.getHeader('Content-Type')\n if request.method == 'POST' and content_type == 'application/json':\n content = request.content.read()\n return json.loads(content, cls=JSONDecoder)\n return None", "title": "" }, { "docid": "675810eee455faaa8d4f3c55dc550a1b", "score": "0.60961205", "text": "def json_data(name, default=None):", "title": "" }, { "docid": "53d99cb95e8b517d419e6b4ba6b3abc3", "score": "0.6080984", "text": "def json(self):\n return self.json_data", "title": "" }, { "docid": "28de682fdc6682bff378b82c349edb28", "score": "0.6067897", "text": "def json(self, **kwargs: Any) -> Any:\n content = self.get_content(strict=False)\n if content is None:\n raise TypeError(\"Message content is not available.\")\n else:\n return json.loads(content, **kwargs)", "title": "" }, { "docid": "a6bc1b33b522cd489fd0b3d8f8ad49fd", "score": "0.60400736", "text": "def json(self):\n return self.fake_json", "title": "" }, { "docid": "f2ff7602393409c15fd12a0ce16fea9e", "score": "0.6031762", "text": "def receive_message(self):\n\n data = self.reader()\n if type(data) != str:\n # Turn non-string to string or die trying\n data = json.dumps(data)\n return None, data", "title": "" }, { "docid": "1eef6e1503ac31316c5a7ee41b61477a", "score": "0.60201544", "text": "def fetchJson(url: str) -> T:\n try:\n response = urllib.request.urlopen(url)\n except:\n return\n\n data = json.loads(response.read())\n\n return data", "title": "" }, { "docid": "3bd0b2df8a127a318737e58b11c46b68", "score": "0.5989263", "text": "def main(which,data): \n \n \n url=geturl(which,data)\n #print(url) \n info=urllib.request.urlopen(url)\n \n #returns corresponding strings\n infotxt=info.read().decode('utf-8')\n #returns json objects taken from list \n objson = json.loads(infotxt) \n \n articles=getdict(objson)\n \n return articles", "title": "" }, { "docid": "1bf96628c85b78bb0387af49bc64bdab", "score": "0.59883004", "text": "def _request(url, data=None):\n res = urllib2.urlopen(url, data)\n return json.loads(res.read())", "title": "" }, { "docid": "1bf96628c85b78bb0387af49bc64bdab", "score": "0.59883004", "text": "def _request(url, data=None):\n res = urllib2.urlopen(url, data)\n return json.loads(res.read())", "title": "" }, { "docid": "2248547c816e6b96ede56b4b75be04f5", "score": "0.59833926", "text": "def get_json( self, url ):\n\n # attempt to fetch the JSON response from the host\n response = self._get_url( url )\n\n # make sure the host says it sent JSON data\n if response.ctype[ 0 ] == 'application/json':\n\n # attempt to parse the response data into a native dict/list/etc\n return json.loads( response.read() )\n\n # host did not send the appropriate Content-Type header\n raise http_error(\n 'Invalid Content-Type (%s) received for JSON request.\\n%s' % (\n response.ctype[ 0 ],\n response.read()\n )\n )", "title": "" }, { "docid": "e09877c236006b9d53607d70d46bed07", "score": "0.59775794", "text": "def json(self):\n data = self.data\n\n if data:\n # There's actual data here, so parse it and return it.\n return json.loads(data.decode('utf-8'))\n\n # Return whatever falsey value we received.\n return data", "title": "" }, { "docid": "87f44ec6063bf075c241191446fa8ee1", "score": "0.59719783", "text": "def JsonObj(data):\n return json.loads(str(data))", "title": "" }, { "docid": "cfcdfc4c6573cb03df4fd06a144386a9", "score": "0.59662604", "text": "def library(strictly_json):\n print(strictly_json.get_json())", "title": "" }, { "docid": "880678053017e5a7d66933eab4223fc5", "score": "0.5964272", "text": "def getJsonparsedData(url):\n response = urlopen(url)\n data = response.read().decode('utf-8')\n return json.loads(data)", "title": "" }, { "docid": "31cd04a9b7bf13f877ba95dfdd78ceb5", "score": "0.59615", "text": "def readJSON(self):\n if json_filename is not None:\n with open(json_filename) as jsf:\n self.json_data = json.load(jsf, encoding='utf-8')\n else:\n response = urllib2.urlopen(json_url).read().decode('utf-8')\n self.json_data = json.loads(response)", "title": "" }, { "docid": "c2be1815be137404682f15c71f30d887", "score": "0.5958691", "text": "def _parse(self, data):\n return json.loads(data)", "title": "" }, { "docid": "cae036d51ae3bc7b0cdf08ba596b8934", "score": "0.5952775", "text": "def on_get(self, req, resp):\n data = json.loads(req.stream.read())\n print(data)\n\n content = {\n 'name': 'Black Ruben Coat',\n 'brand': 'Nudie Jeans',\n 'price': '650',\n 'currency': 'CAD',\n 'code': '192078M176001',\n 'image': 'https://img.ssensemedia.com/images//192078M176001_1/nudie-jeans-black-ruben-coat.jpg'\n }\n output = {}\n if data['method'] == 'get-price':\n output['value'] = content['name']\n resp.body = json.dumps(output)", "title": "" }, { "docid": "0c1d7f95a9401fc8f86a5d319047c0a0", "score": "0.5951803", "text": "def parse(cls, api, json):\r\n raise NotImplementedError", "title": "" }, { "docid": "f29a90ebe107c061e0dbed715412348d", "score": "0.5941156", "text": "def json(self):\n if __debug__:\n _assert_have_json()\n if self.mimetype == 'application/json':\n return json.loads(self.data)", "title": "" }, { "docid": "edb47925ddb6521888e3873d1a1bf204", "score": "0.5941088", "text": "def post_json(self, *args, **kwargs):\n values = request.json\n result = self.handle_post(values, *args, **kwargs)\n if \"payload\" in result:\n return jsonify(result[\"payload\"]), result.get(\"status_code\", 200)\n return (jsonify({\"message\": result.get(\"message\", \"no output\")}),\n result.get(\"status_code\", 200))", "title": "" }, { "docid": "294c8a8fdfb2a2e44acb9bfedf782c78", "score": "0.59393394", "text": "def _get_json(self, url, params = None):\n if params is None:\n return json.load(self._open_url(url))\n else:\n return json.load(self._open_url(url % params))", "title": "" }, { "docid": "eb44856963abe4385d4bd0da1e53a9bb", "score": "0.59376633", "text": "async def json(self) -> Any:\n if not (self.is_json and self.reports_json):\n raise aiohttp.ContentTypeError(\n self.request_info, (), message='Dummy')\n return self._data", "title": "" }, { "docid": "e739f3cb64c16d541479096b583fed42", "score": "0.5934577", "text": "def json(self):\n return self.body", "title": "" }, { "docid": "b1c63b618c2a6fd4984e57ca2ffb230f", "score": "0.5931823", "text": "def __json__(self):\n #TODO implement the __json__ function\n pass", "title": "" }, { "docid": "2a3387b2abe7a33bb64c2db4122c11b2", "score": "0.5928871", "text": "def get_json(self) -> Any:\n return self.response.json()", "title": "" }, { "docid": "0efea6025defbf8e637c9a00616d5f23", "score": "0.59275854", "text": "def _post(self, url, data):\n print url\n\n req = urllib2.Request(url, json.dumps(data).encode('utf-8'))\n response = urllib2.urlopen(req)\n res = response.read()\n if isinstance(res, bytes):\n res = res.decode('utf-8')\n try:\n return json.loads(res)\n except ValueError:\n # probably empty response so no JSON to decode\n return res", "title": "" }, { "docid": "0640dbdd7996faeebe99f0a41bdad19c", "score": "0.5926509", "text": "def read_from_json(self, message):\n with open(self.file_name) as f:\n info = json.load(f)\n print(f\"{message} {info}\")\n return info", "title": "" }, { "docid": "2a737e8c4a32a7a567f67cf3ab1b2a67", "score": "0.59148777", "text": "def fetch_json(url, method=GET, data=None, headers=None, verify=None,\r\n timeout=5.0):\r\n request = fetch(**locals())\r\n\r\n return json.loads(request.text)", "title": "" }, { "docid": "709470b2fc7bd2ce942c9ce2a1631b29", "score": "0.59116644", "text": "def receive_json(self, content):\n type_ = content['type']\n payload = content.get('payload', {})\n meta = content.get('meta', {})\n\n if type_ == \"SESSION.CONNECT\":\n self.session_connect(**payload)\n\n elif type_ == \"SESSION.CLOSE\":\n self.session_close()\n\n elif type_ == \"SESSION.DELETE\":\n self.session_delete(**payload)\n\n elif type_ == \"SESSION.LIST\":\n self.session_list(**payload)\n\n else:\n if not self.session:\n self.send_json(\n type_,\n {\"error\": \"No sessions selected\"},\n error=True\n )\n return\n async_to_sync(self.channel_layer.group_send)(\n str(self.session.id), {\n \"type\": type_.lower(),\n **payload,\n **meta\n }\n )", "title": "" }, { "docid": "2186e3feaed53f771a31079c078db374", "score": "0.59047264", "text": "def get_json(in_take):\n if exists(str(in_take)):\n with open(in_take, 'r') as read_file:\n return load(read_file)\n elif isinstance(in_take, dict):\n return [in_take]\n elif isinstance(in_take, list):\n return in_take\n elif isinstance(in_take, str):\n return loads(in_take)\n raise ValueError('Input type is not valid.')", "title": "" }, { "docid": "7f79511e08b01b88bf1b98ea70adc457", "score": "0.5904294", "text": "def test_parse_json(self) -> None:\n sample_data = b'{\"jsonrpc\":\"2.0\",\"method\":\"initialized\",\"params\":{}}Content-Length: 7273\\r\\n\\r\\n{\"jsonrpc\":\"2.0\",\"method\":\"textDocument/didOpen\",\"params\":{\"textDocument\":{\"uri\":\"file:///example/main.py\",\"languageId\":\"python\",\"version\":1,\"text\":\"# Example file text.\"}}}' # noqa\n parsed_data = _parse_json_rpc(sample_data)\n self.assertEqual(2, len(parsed_data))\n self.assertEqual(\n {\"jsonrpc\": \"2.0\", \"method\": \"initialized\", \"params\": {}}, parsed_data[0]\n )", "title": "" }, { "docid": "a08517301e71d62e8e834139699c47ef", "score": "0.5889491", "text": "def jsonread():\n try: \n with open(JSONPATH) as json_file:\n data = json.load(json_file)\n except FileNotFoundError:\n print(\"path of jsonconfig is not valid\")\n exit()\n except json.decoder.JSONDecodeError as e:\n print(\"Format error in json file, see:\")\n print(e)\n exit()\n except Exception as e:\n print(e)\n exit()\n return translator(data)", "title": "" }, { "docid": "c06552306e227b3e60a7a92a129f94a6", "score": "0.5881073", "text": "def rawdatahandler(self, action=\"None\", path=None, silent=True, jsonflag=False):\n if action == \"GET\":\n rawdata = self.rdmc.app.get_handler(get_path=path, silent=silent)\n\n if jsonflag is True:\n rawdata = json.loads(rawdata.read)\n\n return rawdata", "title": "" }, { "docid": "ff753083ab8c7ecda686faa848d8df28", "score": "0.5878717", "text": "def rundata(self, strjson):\n\n d = json.loads(strjson)\n return self.api.data.post(d)", "title": "" }, { "docid": "34b19257a8105c164484b891b752dcae", "score": "0.5857172", "text": "def get_json_from_remote_server(func, **kwargs):\n rawjson = func(**kwargs)\n if rawjson is None:\n # If the request failed we already logged in in PiwikRequest;\n # no need to get into the exception handler below.\n return {}\n try:\n data = json.loads(rawjson)\n if isinstance(data, dict) and data.get('result') == 'error':\n current_plugin.logger.error('The Piwik server responded with an error: %s', data['message'])\n return {}\n return data\n except Exception:\n current_plugin.logger.exception('Unable to load JSON from source %s', rawjson)\n return {}", "title": "" }, { "docid": "3a93a4291bebb068626145b5488eaba6", "score": "0.5847482", "text": "def request(corpus, req_type, param):\n if 'outputformat' not in param:\n param['outputformat'] = 'json'\n url = make_url(corpus, req_type, param)\n #print(url)\n f = urlopen(url)\n return json.loads(f.read().decode('utf-8'))", "title": "" }, { "docid": "b16dc23d3090199af4df469a0f934ec8", "score": "0.58441347", "text": "def fetch_json(self):\n return requests.get(self.json_url).json()", "title": "" }, { "docid": "f51bdb13186aad3ac1768694d59567a8", "score": "0.5843328", "text": "def post_payload(request):\n if request.method == 'POST':\n data = request.body\n data = data.decode()\n data = json.loads(data)\n print(data)\n return JsonResponse({\"status\": \"ok\"}, status=201)\n return HttpResponse('post only my guy', status=400)", "title": "" }, { "docid": "424778a0ec3726e138c6062377310274", "score": "0.5843133", "text": "def load_json(self):\n try:\n self.request.arguments = json.loads(self.request.body)\n except ValueError:\n msg = f\"Could not decode JSON: {self.request.body}\"\n logger.debug(msg)\n raise APIBadRequest(msg)", "title": "" }, { "docid": "4e1709ffa4fd778b3963d31abd5bc8bb", "score": "0.5841473", "text": "def json_url_reader(url):\n # added user agent so that coindesk is not blocking as rogue request\n req = Request(url, headers={'User-Agent': 'Mozilla/5.0'})\n fin = urllib.request.urlopen(req)\n fin = fin.read().decode('utf-8')\n return json.dumps(fin)", "title": "" }, { "docid": "162a882ed6e8c5242d1b50fc0e820ad5", "score": "0.5840565", "text": "async def json(\n self,\n *args: Any,\n loads: JSONDecoder = json_loads,\n **kwargs: Any,\n ) -> Any:\n return await super().json(*args, loads=loads, **kwargs)", "title": "" }, { "docid": "d937834f840b6e26a5337d5ccce96f4f", "score": "0.58341855", "text": "def json(cls, res, *args, **kwargs):\r\n return parse_json(res.text, *args, **kwargs)", "title": "" }, { "docid": "5b3f54c767940296368b5618e9f0f24f", "score": "0.5827512", "text": "def get_json(url):\n import urllib.request\n import json\n\n request = urllib.request.urlopen(url)\n response_text = request.read().decode('utf-8')\n response_data = json.loads(response_text)\n return response_data", "title": "" }, { "docid": "4340b52225862469c84ed05d0932c9a1", "score": "0.58267385", "text": "def json_action(fn, *args, **kwargs):\n\n if not request().is_json: return fn(*args, **kwargs)\n\n try:\n cherrypy.response.headers['Content-Type'] = 'application/json'\n return fn(*args, **kwargs)\n except JsonError as err:\n raise err\n except cherrypy.HTTPError as err:\n raise JsonError(err.status, err.message)\n except (db.BadKeyError, db.BadValueError) as err:\n raise JsonError(400, err.message)\n except oauth.OAuthRequestError as err:\n raise JsonError(403, \"OAuth authentication failed.\")\n except Exception as err:\n raise JsonError(500, err.message)", "title": "" }, { "docid": "208f8eb4bb801020f907d631e1d31c68", "score": "0.58265203", "text": "def recv_json(self, connection):\n as_str = self.recv_str(connection)\n if as_str is None:\n return None\n else:\n as_str = as_str.split('\\n')\n json_messages = []\n for s in [i for i in as_str if len(i) > 0]:\n try:\n json_messages.append(json.loads(s))\n except json.JSONDecodeError:\n self._print_message(\n 'Error decoding this json: {}'.format(s))\n return None\n return json_messages", "title": "" }, { "docid": "996588f86fd21f641c6fce415807ccdc", "score": "0.5820991", "text": "def test_post_json(self):\n # Spawn Local Server with Hug\n with open(\"fixtures/basic.json\") as json_data:\n data = json.load(json_data)\n string_data = json_data.read()\n\n #header = {'content-type': 'application/json'}\n #response = hug.test.call(\"POST\",receive, url=\"http://127.0.0.1:8000/\", body=json.dumps(data), headers=header)\n #print(response.status)\n #assert response.status == HTTP_200\n r = requests.post(\"http://127.0.0.1:8000/\",\n json=json.dumps(data))\n assert r.text == \"\"\"{\"content_type\": \"text\", \"chat_id\": 22959774, \"response\": \"_NOACTION_\"}\"\"\"", "title": "" }, { "docid": "6d4f40cedd11b0cddba1fc825fc5384f", "score": "0.58203113", "text": "def json(self):\n return self._json", "title": "" }, { "docid": "3492b161c8d913b7bc53581ce656b695", "score": "0.58161616", "text": "def import_json(self, json_in):\n if \"id\" in json_in and \"parity\" in json_in and \"len\" in json_in and \"message\" in json_in:\n self.id = json_in[\"id\"]\n self.parity = json_in[\"parity\"]\n self.len = json_in[\"len\"]\n self.message = json_in[\"message\"]\n self.content = json.loads(self.message)", "title": "" }, { "docid": "9599a19e6c636d77b598b56cac4f34de", "score": "0.58124113", "text": "def get_data():\n data = None\n try:\n data = request.json\n if data == {}:\n data = request.data\n except:\n traceback.print_exc()\n data = {}\n\n return data", "title": "" }, { "docid": "1c4bcb21379580b10563e50e8b1fe57e", "score": "0.58080333", "text": "def parseJson(self, artistJson, artistEntry):\r\n pass", "title": "" }, { "docid": "31d14bc634d820b12e8bde4a38073671", "score": "0.57905906", "text": "def get_json(url):\n f = urllib.request.urlopen(url)\n response_text = f.read().decode(\"utf-8\")\n response_data = json.loads(response_text)\n return response_data", "title": "" }, { "docid": "31d14bc634d820b12e8bde4a38073671", "score": "0.57905906", "text": "def get_json(url):\n f = urllib.request.urlopen(url)\n response_text = f.read().decode(\"utf-8\")\n response_data = json.loads(response_text)\n return response_data", "title": "" }, { "docid": "31a8a7d4a8bd2fe21ea7abaf44e4a63b", "score": "0.5776854", "text": "def _json_parsed(fun):\n def decorator(*args, **kwargs):\n return json.loads(fun(*args, **kwargs).text)\n\n return decorator", "title": "" }, { "docid": "47876f4e67e6b87cb3ace9b9bf9ce634", "score": "0.5772918", "text": "def post_json( self, url, data ):\n\n # send the POST request, fetch a JSON response (hopefully)\n response = self._post_url(\n url,\n body = json.dumps( data ),\n mimetype = 'application/json'\n )\n\n # make sure the host says it sent JSON data\n if response.ctype[ 0 ] == 'application/json':\n\n # attempt to parse the response data into a native dict/list/etc\n return json.loads( response.read() )\n\n # host did not send the appropriate Content-Type header\n raise http_error(\n 'Invalid Content-Type (%s) received for JSON request.\\n%s' % (\n response.ctype[ 0 ],\n response.read()\n )\n )", "title": "" }, { "docid": "7c4f5c1bfd7bd292ca05102f1fd37602", "score": "0.576201", "text": "def json_get(self, *args, **kwargs):\n return self._do_json_method(self.http_get, *args, **kwargs)", "title": "" }, { "docid": "1f74fa91ce0a4fb8410f01be856c1a26", "score": "0.5757319", "text": "def _json_payload(request):\n try:\n return request.json_body\n except ValueError:\n raise PayloadError()", "title": "" }, { "docid": "265eb86917125ada2448c6a88c36be0f", "score": "0.5757108", "text": "def json_loads(src: str) -> Any:\n if not isinstance(src, str):\n return None\n\n context = ContextContainer._get_context()\n assert context\n\n if context and context.revision >= Revision.THREE.value:\n step_cost: int = _get_api_call_step_cost(context, ScoreApiStepRatio.JSON_LOADS)\n step: int = step_cost + step_cost * len(src.encode(CHARSET_ENCODING)) // 100\n\n context.step_counter.consume_step(StepType.API_CALL, step)\n\n return json.loads(src)", "title": "" }, { "docid": "0809ef792bc62c84ae77351ab604a730", "score": "0.575595", "text": "def get_json(url):\r\n def open_connection(url):\r\n \"\"\" Opening an HTTP connection to the desired site and returning an HTTP object. \"\"\"\r\n http_response_obj = urllib.request.urlopen(url)\r\n return http_response_obj\r\n \r\n def acquire_json_decode(http_response_obj):\r\n \"\"\" Reading in the JSON, decoding it and returning a JSON object. \"\"\"\r\n json_obj = http_response_obj.read().decode(\"utf-8\")\r\n return json_obj\r\n \r\n print(\"Acquiring HTTP Response Object...\\n\")\r\n http_response_obj = open_connection(url)\r\n print(\"Acquiring the JSON feed...\\n\")\r\n json_obj = acquire_json_decode(http_response_obj)\r\n return json_obj", "title": "" }, { "docid": "705ee6097c8922c912edbc62b6e586e5", "score": "0.57558584", "text": "def is_json(text):", "title": "" }, { "docid": "a6692e767521503522f6821138ccf0ec", "score": "0.57550067", "text": "def test_json_function_reads_data(self):\n result = file_writing.json_function('test_unIpass\\\\test.json', 'r')\n\n self.assertEqual(result, 'test_data')", "title": "" }, { "docid": "e9909ba3c01cdc8e3cb6056866affa9a", "score": "0.57514757", "text": "def json_method(fn):\n return __network_method(fn, JSONSerialisation)", "title": "" }, { "docid": "6a28a0183c81fd5ccafea72e6aa9d60b", "score": "0.5751327", "text": "def read_json(input_file):\n with open(input_file, mode='rb') as json_file:\n data = json.load(json_file)\n return data", "title": "" }, { "docid": "4bd7eb95c30fc8b6a0456919c2c826f5", "score": "0.5745009", "text": "def post_json(url, j):\r\n connection = http.client.HTTPConnection(SERVER_URL)\r\n headers = {\"Authorization\": \"ApiKey {}\".format(API_KEY), \"Content-type\": \"application/json\"}\r\n connection.request(\"POST\", url, j, headers=headers)\r\n response = connection.getresponse()\r\n content = response.read().decode()\r\n print(\"POST {} {} ({} bytes)\".format(response.status, url, len(content)))\r\n #print(content)\r\n if response.status != 200:\r\n print(content)\r\n raise ValueError(\"POST to {} was not successful. Sent JSON '{}'\".format(url, j))\r\n return json.loads(content)", "title": "" }, { "docid": "16bf9f6de1d27d905b675a08d89cdb3f", "score": "0.5742287", "text": "def get_json( url, config = None ):\n\n # create a basic HTTP client\n client = http( config )\n\n # request the JSON document, and return the result\n return client.get_json( url )", "title": "" }, { "docid": "cdbd7f6956e03eafd8ecbdd6358466e9", "score": "0.57273984", "text": "def from_valid_json(jobj):", "title": "" }, { "docid": "f9c883532581df9295c211f5afdbf01f", "score": "0.5720388", "text": "def fetch_json(args, out, url, data=None):\n content = None\n\n # 1. request and basic authentication\n req = urllib2.Request(url, data=data)\n auth = '%s:%s' % (args.user, args.password)\n auth = auth.encode('base64')\n # By default long base64 string are broken into multiple lines. Join them into single line. Also drop the trailing \\n.\n auth = auth.replace('\\n','')\n req.add_header(\"Authorization\", \"Basic %s\" % auth)\n\n # 2. http fetch\n resp = urllib2.urlopen(req)\n print ' %s %s (%s)' % (resp.code, resp.msg, resp.headers.get('content-length','n/a'))\n content = resp.read()\n out.write(content)\n\n # 3. parse json\n content = json.loads(content)\n\n return content", "title": "" }, { "docid": "f4b5a37a5a3165c03c992af9b591a010", "score": "0.5718205", "text": "def getjson(url):\n from workflow import web\n r = web.get(url)\n log.debug('[%s] %s', r.status_code, r.url)\n r.raise_for_status()\n return r.json()", "title": "" }, { "docid": "11a333d36b036704d8a239181dfe3edd", "score": "0.57174677", "text": "def test_receive_json(peers):\n connection = peers[0].connect(peers[1].address_name, data_type=\"json\")\n\n data_test = \"2easy4u\"\n connection.send(data_test)\n\n time.sleep(.1)\n\n assert len(datas) == 1 and datas[0] == data_test", "title": "" }, { "docid": "adc780129836a6e3a7a2fd78e09f427f", "score": "0.57137823", "text": "def do_json(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"do_json\")", "title": "" } ]
f928e10b71127be588db7080aadddf59
new request with added url prefix
[ { "docid": "500302353444e52592c399ad0c5b42b1", "score": "0.7734129", "text": "def add_prefix(self, prefix: str) -> 'Request':\n return replace(self, url=prefix + self.url)", "title": "" } ]
[ { "docid": "5a983c427ab4ff4da4b887f4601c21f8", "score": "0.6477151", "text": "def process_request(self, request):\n self.request = request\n site = get_current_site(request)\n if site.id > 1:\n prefix = \"_{0}\".format(site.id)\n self.request.urlconf = settings.ROOT_URLCONF + prefix", "title": "" }, { "docid": "8085660e9402da51301f82b3b3428918", "score": "0.64046407", "text": "def get_prefix_url(request):", "title": "" }, { "docid": "b121cd52b0ec7f0df8969bdd199a1ac9", "score": "0.63772404", "text": "def preQuery(self):\n self.request_url = self.url\n pass", "title": "" }, { "docid": "b1bc83e0515e3140ea0569440f7049a4", "score": "0.6298312", "text": "def _create_request_url():\n url = 'http'\n if _config['save']:\n url += 's'\n url += '://{}:{}/move'.format(_config['ip'], _config['port'])\n return url", "title": "" }, { "docid": "608b7f21f791f2de25f9142f672ebc4b", "score": "0.6177108", "text": "def add_request(self, request):\r\n\t\tself.requestCt = self.requestCt + 1\r\n\t\tfound = False\r\n\t\tfor req in self.requests:\r\n\t\t\tif \treq.service == request.service and \\\r\n\t\t\t\treq.host == req.host and \\\r\n\t\t\t\treq.port == req.port:\r\n\t\t\t\t\treq.timestamp = request.timestamp\r\n\t\t\t\t\tfound = True\r\n\t\t\t\t\tbreak\r\n\t\tif not found:\r\n\t\t\tself.requests.append(request)\r\n\t\t\tself.last_modified = time.time()", "title": "" }, { "docid": "fc8eb1fd4a96f4c416537b5133d11bde", "score": "0.6159193", "text": "def add_request(self, request):\n\t\tself.requestCt = self.requestCt + 1\n\t\tfound = False\n\t\tfor req in self.requests:\n\t\t\tif \treq.service == request.service and \\\n\t\t\t\treq.host == req.host and \\\n\t\t\t\treq.port == req.port:\n\t\t\t\t\treq.timestamp = request.timestamp\n\t\t\t\t\tfound = True\n\t\t\t\t\tbreak\n\t\tif not found:\n\t\t\tself.requests.append(request)\n\t\t\tself.last_modified = time.time()", "title": "" }, { "docid": "9bc16c7fb9ffca30dc6f606c2ad57eb7", "score": "0.6141393", "text": "def urlfor( request, *args, **kwargs ):", "title": "" }, { "docid": "94d52f620440caee89dee26e901016aa", "score": "0.606293", "text": "def newMCRequest(self, request):\n raise NotImplementedError", "title": "" }, { "docid": "2fc5a844b56d12e3ec6116e97b316bcf", "score": "0.60610163", "text": "def start_requests(self):\n requests = super(ClarksProductSpider, self).start_requests()\n\n for req in requests:\n new_url = req.url.replace('+', '%20')\n req = req.replace(url=new_url)\n yield req", "title": "" }, { "docid": "a3306772a515d8fd2ed9acaaee1ef20e", "score": "0.600169", "text": "def normalize_request(self, request):\n if not self.url.startswith('/'):\n self.url = '/' + self.url\n request.path = self.url\n request.path_info = self.url\n request.META['PATH_INFO'] = self.url\n return request", "title": "" }, { "docid": "d36aebef53166081e6f9383e50b43441", "score": "0.5991233", "text": "def set_url_prefix(self, new_prefix):\n old_prefix = self._url_prefix\n self._url_prefix = new_prefix\n return old_prefix,self._url_prefix", "title": "" }, { "docid": "98a05fbb6ce1e036f047abd0c176285c", "score": "0.5954496", "text": "def urlpath( request, *args, **kwargs ):", "title": "" }, { "docid": "5e9afe35a6224d450861234e5c610bb9", "score": "0.5852522", "text": "def newRequest(self):\n return Request( )", "title": "" }, { "docid": "d532f62eed93c45b14c145b935c81b22", "score": "0.5842452", "text": "def __call__(self, request):\n if self.where == \"qs\":\n parts = urlparse(request.url)\n qs = parse_qs(parts.query)\n qs[self.qs_key] = self.token\n request.url = urlunparse(\n (\n parts.scheme,\n parts.netloc,\n parts.path,\n parts.params,\n urlencode(qs),\n parts.fragment,\n )\n )\n elif self.where == \"header\":\n request.headers[\"Authorization\"] = \"Bearer {}\".format(self.token)\n return request", "title": "" }, { "docid": "565d9fb2eb91d8e8694fe43c1a5aa355", "score": "0.5812543", "text": "def __init__( request ):", "title": "" }, { "docid": "37401a03312c7d48f5c45a96c6777d12", "score": "0.5803095", "text": "def newProcessingRequest(self, request):\n raise NotImplementedError", "title": "" }, { "docid": "2f309419ecc29aee72c6f7c02dff5514", "score": "0.5729534", "text": "def process_request_starts(self, request):\n pass", "title": "" }, { "docid": "a086408ccc4fee11b0bbf1d3115d2e02", "score": "0.57173485", "text": "def process_request(self, request, spider):\n new_url_matches = re.findall(r'^.+\\/dp\\/[A-Z][A-Z0-9]+', request.url)\n if new_url_matches:\n logging.info(f'Changing url from {request.url} to {new_url_matches[0]}')\n request = request.replace(url=new_url_matches[0])\n return request\n return None", "title": "" }, { "docid": "21e9e4d65e9b75e783c366ba1f8baa9b", "score": "0.56708056", "text": "def __update_request(self, request_dict, namespace, apikey):\n request_dict['namespace'] = namespace if namespace else self.namespace\n request_dict['apikey'] = apikey if apikey else self.apikey", "title": "" }, { "docid": "91d496b2ced3e9bf4bf8c57acfa76bfd", "score": "0.5661592", "text": "def testBuildRequest_Prefix(self):\n self.Reinitialize(urllib.urlencode([('prefix_integer_field', '10'),\n ('prefix_string_field', 'a string'),\n ('prefix_enum_field', 'VAL1'),\n ]),\n self.content_type)\n\n url_encoded_mapper = service_handlers.URLEncodedRPCMapper(\n parameter_prefix='prefix_')\n request = url_encoded_mapper.build_request(self.service_handler,\n Request1)\n self.assertEquals(10, request.integer_field)\n self.assertEquals('a string', request.string_field)\n self.assertEquals(Enum1.VAL1, request.enum_field)", "title": "" }, { "docid": "5577ad7aabebaa97d298eab9f76a17e5", "score": "0.565153", "text": "def new_request(self, **kwargs):\n url = self.config[\"base_url\"]\n\n if kwargs.get(\"user_id\") is not None:\n url = url + kwargs[\"user_id\"]\n\n self.req = request.Request(host=self.config[\"host\"], protocol=constant.HTTP, url=url,\n method=kwargs[\"method\"], time_out=kwargs[\"timeout\"])\n\n return self", "title": "" }, { "docid": "d0586d82718554d92daba7db72bc5b56", "score": "0.5642726", "text": "def _set_request(self, request):\n self._request = request", "title": "" }, { "docid": "5bc5998a42ef4d962ccedce076597857", "score": "0.5637646", "text": "def append_to_request(self, request_base, request_object):\n\n pass", "title": "" }, { "docid": "5cf10e671278d968ed8979d6dba57901", "score": "0.56290793", "text": "def request(self, url, *args, **kwargs):\n raise NotImplementedError", "title": "" }, { "docid": "7e297a4fb0f8b303bdc5283eb90c84ca", "score": "0.5615185", "text": "def add_to_request_dict(request, request_dict, params):\n\n if not is_good_request(request, params):\n return\n\n if params['ignore_www']:\n if re.match(r\"www\\.\", request.url):\n # Remove www. from url ????????????????????\n request.url = request.url[4:]\n\n request_dict[request.url][0] += 1\n request_dict[request.url][1] += int(request.responce_time)", "title": "" }, { "docid": "ebdadd698cbcddd930591175f46f3170", "score": "0.55845016", "text": "def get_request(func):\r\n func.request = True\r\n return func", "title": "" }, { "docid": "91cd77b5ee1dceb93c719903c7428c43", "score": "0.55770254", "text": "def req():\n return Request()", "title": "" }, { "docid": "0fd25485f8aed8978bc0ec5a58190de2", "score": "0.5572234", "text": "def build_request(url, headers, body, initial_request: Request) -> Request:\n updated_request = Request(\n method=initial_request.method,\n url=url,\n headers=headers,\n content=body\n )\n\n if hasattr(initial_request, 'extensions'):\n updated_request.extensions = initial_request.extensions\n\n return updated_request", "title": "" }, { "docid": "7a383129ab474e0215a86fcfe6f21a64", "score": "0.55722153", "text": "def url(self, request_path=\"\"):\n return f\"{self.scheme}://{self.host}/{request_path}\"", "title": "" }, { "docid": "cfce33fa15353c3551ed8d1e1ba03b2b", "score": "0.55594367", "text": "def StartRequestHook(ref, args, request):\n del ref\n del args\n start_request = GetMessagesModule().StartNodeRequest()\n request.startNodeRequest = start_request\n return request", "title": "" }, { "docid": "9dcabf459d0567b0a78623fe8b9b8b0f", "score": "0.55541414", "text": "def _CreateRequest(self, url, data=None):\n logging.debug(\"Creating request for: '%s' with payload:\\n%s\", url, data)\n req = urllib2.Request(url, data=data)\n if self.host_override:\n req.add_header(\"Host\", self.host_override)\n for key, value in self.extra_headers.iteritems():\n req.add_header(key, value)\n return req", "title": "" }, { "docid": "e90eba2364f1388ded61300739c4ebd7", "score": "0.5518329", "text": "def _create_normal_request(self, url):\r\n request = self.factory.get(url)\r\n request.user = AnonymousUser()\r\n middleware = SessionMiddleware()\r\n middleware.process_request(request)\r\n request.session.save()\r\n MakoMiddleware().process_request(request)\r\n return request", "title": "" }, { "docid": "2d93c7911d8a99bd868e249ec882477d", "score": "0.55134034", "text": "def setRequest(request):\n setLocal('request', request)", "title": "" }, { "docid": "3f357111de637f986dfae5bdc3b29e48", "score": "0.5512349", "text": "def __init__(self, request):\n self.arguments = {}\n for k, v in request.GET.items():\n self.arguments.setdefault(k, []).append(v)\n\n self.full_url = lambda: request.url\n self.host = request.host\n self.path = request.path", "title": "" }, { "docid": "c4affaa7d7659e2a10a460462aab44aa", "score": "0.5511405", "text": "def __prepare_request(self, page):\n\t\t# Replace the page parameter if it exists and add it if it doesn't exist\n\t\tpage_regex = re.compile(\"(?<=[\\\\&\\\\?]page=)\\\\d*\")\n\t\tregex_res = page_regex.subn(str(page), self.__response.request.url)\n\n\t\tif regex_res[1] == 0:\n\t\t\tself.__response.request.prepare_url(self.__response.request.url, {\"page\": str(page)})\n\t\telse:\n\t\t\tself.__response.request.url = regex_res[0]\n\n\t\treturn self.__response.request", "title": "" }, { "docid": "1a5a099a4ae1e56611f7378a6f0789f0", "score": "0.54521024", "text": "def _CreateRequest(self, url, data=None):\r\n logging.debug(\"Creating request for: '%s' with payload:\\n%s\", url, data)\r\n req = urllib2.Request(url, data=data, headers={\"Accept\": \"text/plain\"})\r\n if self.host_override:\r\n req.add_header(\"Host\", self.host_override)\r\n for key, value in self.extra_headers.iteritems():\r\n req.add_header(key, value)\r\n return req", "title": "" }, { "docid": "a9d1327d47dc519e311fd51610fbb352", "score": "0.54322356", "text": "def url_for_request(self, method, extras):\n raise NotImplementedError(\"Should be overriden by subclass\")", "title": "" }, { "docid": "3c9931efeb821ea17f4a51e5c32b2a41", "score": "0.54322124", "text": "def _update_request_uri_query(self, request):\n if \"?\" in request.path:\n request.path, _, query_string = request.path.partition(\"?\")\n if query_string:\n query_params = query_string.split(\"&\")\n for query in query_params:\n if \"=\" in query:\n name, _, value = query.partition(\"=\")\n request.query.append((name, value))\n\n request.path = url_quote(request.path, \"/()$=',\")\n\n # add encoded queries to request.path.\n if request.query:\n request.path += \"?\"\n for name, value in request.query:\n if value is not None:\n request.path += \"{}={}{}\".format(name, url_quote(value, \"/()$=',\"), \"&\")\n request.path = request.path[:-1]\n\n return request.path, request.query", "title": "" }, { "docid": "a447377cad972df90670cbf95f9fc052", "score": "0.5428896", "text": "def start_requests(self):\n yield scrapy.Request(url=self.start_urls[0])", "title": "" }, { "docid": "efe35ee26226aa1cf7e9d26b56975eb9", "score": "0.5428687", "text": "def build_url(self, request, action, **query):\n base = urlparse.urljoin(request['base_url'], self.auth_prefix + '/' + action)\n return appendArgs(base, query)", "title": "" }, { "docid": "bd7de1156a126917549ce4d4f46d23cb", "score": "0.5418175", "text": "def __call__(self, request):", "title": "" }, { "docid": "2bfa13ad5a7035339da105c3a1ba33fa", "score": "0.54169506", "text": "def request(self, **request):\n # Fall back to defaults as in the superclass's implementation:\n path = request.get('PATH_INFO', self.defaults.get('PATH_INFO', '/'))\n locale, shortened = split_path(path)\n if not locale:\n request['PATH_INFO'] = '/%s/%s' % (settings.LANGUAGE_CODE,\n shortened)\n return super(LocalizingClient, self).request(**request)", "title": "" }, { "docid": "78e3dd88c99148954cf4ccfc76088b24", "score": "0.5404444", "text": "def request(query):", "title": "" }, { "docid": "5452922f90faf31235beed51c78a88eb", "score": "0.53977257", "text": "def __init__(self, url):\n self.url = url\n self.admin_url = os.path.join(url, \"__admin\")\n self.admin_mapping_url = os.path.join(self.admin_url, \"mappings\")\n self.mapping_reset_url = os.path.join(self.admin_mapping_url, 'reset')\n self.requests_url = \"%s/requests\" % self.admin_url", "title": "" }, { "docid": "edd06be4c1be31082e07b0d8b4374bc7", "score": "0.537775", "text": "def process_request(self, req):\n req.context = self.make_context(req)", "title": "" }, { "docid": "635ef02029c1bef6925fe4949bfeb424", "score": "0.5368029", "text": "def _send_request(self, request: HttpRequest, **kwargs: Any) -> HttpResponse:\n\n request_copy = deepcopy(request)\n request_copy.url = self._client.format_url(request_copy.url)\n return self._client.send_request(request_copy, **kwargs)", "title": "" }, { "docid": "635ef02029c1bef6925fe4949bfeb424", "score": "0.5368029", "text": "def _send_request(self, request: HttpRequest, **kwargs: Any) -> HttpResponse:\n\n request_copy = deepcopy(request)\n request_copy.url = self._client.format_url(request_copy.url)\n return self._client.send_request(request_copy, **kwargs)", "title": "" }, { "docid": "635ef02029c1bef6925fe4949bfeb424", "score": "0.5368029", "text": "def _send_request(self, request: HttpRequest, **kwargs: Any) -> HttpResponse:\n\n request_copy = deepcopy(request)\n request_copy.url = self._client.format_url(request_copy.url)\n return self._client.send_request(request_copy, **kwargs)", "title": "" }, { "docid": "5f03e6c1fa602550aed2b5e92d69ec1c", "score": "0.5366282", "text": "def add_request_to_queue(self,request):\n self.queue.append(request)", "title": "" }, { "docid": "fc97c4216b9d875f2d24da34c513a1a4", "score": "0.53641003", "text": "def match_request(self):\n try:\n url_rule, self.request.view_args = \\\n self.url_adapter.match(return_rule=True)\n self.request.url_rule = url_rule\n except HTTPException as e:\n self.request.routing_exception = e", "title": "" }, { "docid": "96bd3e01016545742a285281601e959a", "score": "0.5352603", "text": "def make_subrequest(request, path):\n env = request.environ.copy()\n if path and '?' in path:\n path_info, query_string = path.split('?', 1)\n path_info = unquote(path_info)\n else:\n path_info = unquote(path)\n query_string = ''\n env['PATH_INFO'] = path_info\n env['QUERY_STRING'] = query_string\n subreq = request.__class__(env, method='GET', content_type=None,\n body=b'')\n subreq.remove_conditional_headers()\n # XXX \"This does not remove headers like If-Match\"\n return subreq", "title": "" }, { "docid": "c54083fec59830575c9a773794983691", "score": "0.5347475", "text": "def url_shortner(self):", "title": "" }, { "docid": "a8426a73f087d43ff24f259c13016183", "score": "0.53361577", "text": "def inject_new_request(self):\n\n self.__current_request_mock = CoreRequestMock()", "title": "" }, { "docid": "593508fb55d990a7f4068b34356e97fc", "score": "0.5334191", "text": "def form_api_path(self, request_host: RequestHost, api_path_prefix: str, endpoint: str) -> str:\n api_url = self.base_url(request_host)\n\n if api_path_prefix:\n api_url += api_path_prefix\n\n return f\"{api_url}{endpoint}\"", "title": "" }, { "docid": "a0e2ff263d9f8d14beb9e904d9bf7873", "score": "0.53330654", "text": "def start_request(self, environ):\n registry = environ['paste.registry']\n registry.register(self.request_local, self.request_local_class(environ))\n self.request_local.default_view = self._default_view", "title": "" }, { "docid": "bf9129f8c5f59aea2412ade447864072", "score": "0.53316885", "text": "def _build_request_url(self, params, kwargs, post=False):\n if post:\n return '%s%s' % (self.endpoint, self.methodname)\n else:\n return '%s%s?%s' % (self.endpoint, self.methodname, kwargs)", "title": "" }, { "docid": "3aeb23b5bdb9dd6a55a1ed957e2b0b84", "score": "0.532345", "text": "def process_request(self, request):\n if request.path.split(\"/\")[1] == \"secondary\":\n request.urlconf = \"django29673.secondary.urls\"\n\n return self.get_response(request)\n\n # To make tests pass, comment the return statement and uncomment this\n # block:\n # try:\n # return self.get_response(request)\n # finally:\n # if getattr(request, \"urlconf\", None) is not None:\n # set_urlconf(None)", "title": "" }, { "docid": "76881a4b21652aa82be1bf23bd82b68e", "score": "0.5317213", "text": "def new_url(**kwargs):\n # To create the URL, we need to take the format string and return it with no params\n url_base = \"/axapi/v3/gslb/zone/{zone_name}/service/{service-port}+{service-name}\"\n f_dict = {}\n f_dict[\"service-port\"] = \"\"\n f_dict[\"service-name\"] = \"\"\n f_dict[\"zone_name\"] = kwargs[\"zone_name\"]\n\n return url_base.format(**f_dict)", "title": "" }, { "docid": "59a3195c9f06b720e0a57017a009acd6", "score": "0.53165096", "text": "def start_request(self):\n now = _now()\n self._last_request_no += 1\n # Instantiate a request_info object and fill its start_time\n new_request = self._request_info_class(\n request_no=self._last_request_no, start_time=now,\n _request_finalizer=self._finalize_request)\n # Add currently running request\n self._current_requests[self._last_request_no] = new_request\n\n if now - self._last_autoclean_time > self.AUTOCLEAN_INTERVAL:\n # Avoid memory leak even if client sometimes don't finish requests\n self._clean_outdated()\n\n return new_request", "title": "" }, { "docid": "2aeb61dd8023ec02a64afd27b7d289c5", "score": "0.5307173", "text": "def test_create_namespaced_build_request_clone(self):\n pass", "title": "" }, { "docid": "743cf3d3c93041412a137bdfa147a6f6", "score": "0.5292813", "text": "def log_request(self, r):\n\n token = r.headers.get(self.header, None)\n r.token = token\n self.requests.append(r)\n if r.token:\n self.log.debug('[%s] %s', token or '/', r.url)", "title": "" }, { "docid": "92b04797d02614cce4f9345143fcc7ce", "score": "0.52876717", "text": "def add_prefix(self, name, uri):\n\n self.prefixes.append('%s: %s' % (name, uri))", "title": "" }, { "docid": "0c1f06b5b1eb76800914521af2a0c36f", "score": "0.5258539", "text": "def set_short_url_base(url):", "title": "" }, { "docid": "f312fd7157391fb56fdd74d669b1a349", "score": "0.5257657", "text": "def _request(self, *args):\n raise NotImplementedError", "title": "" }, { "docid": "5e5bfe189cbc59db3618d0779a49b93a", "score": "0.5257135", "text": "def request_add_host(request, address):\n\n request.setdefault('headers', {})\n request['headers'].setdefault('Host', address)\n\n return request", "title": "" }, { "docid": "75df52637f8c7b9d19917df95426c7bb", "score": "0.52544945", "text": "def prefix(self, prefix):\n self._path_prefix = prefix", "title": "" }, { "docid": "afbc0d087cd40abed071c7a76c444756", "score": "0.5242554", "text": "def start_requests(self):\n use_set = self.settings.getbool('REDIS_START_URLS_AS_SET', defaults.START_URLS_AS_SET)\n add_urls = self.server.sadd if use_set else self.server.lpush\n add_urls(self.redis_key, *self.start_urls)\n return self.next_requests()", "title": "" }, { "docid": "fb1f0d3a194da354b8adaa1ae791e95b", "score": "0.52402014", "text": "def __init__(self, request: object) -> None:\n super().__init__({}, request, URL, Api)", "title": "" }, { "docid": "a66f7c3ccfe558f210d043f737df72b5", "score": "0.5227911", "text": "def _make_url(self):\n ...", "title": "" }, { "docid": "4505c16959818b604c3065776e8c2160", "score": "0.5221241", "text": "def start_request(self):\n self.session_manager.start_request()", "title": "" }, { "docid": "4b8c6e31cf89c7632342ef4e8bf0025a", "score": "0.5214959", "text": "def _construct_url(self, endpoint):\n return self.base_url + self.api_path + endpoint.strip('/')", "title": "" }, { "docid": "205d6e690134f000ec627329a4be986c", "score": "0.5212387", "text": "def process_request(self, req, resp, resource, params):", "title": "" }, { "docid": "3b0aaa036508cb4d667b90bb755f92f4", "score": "0.5200916", "text": "def rebuild_request(self) -> Quotecast.Request:\n\n references = self.references\n request = Quotecast.Request()\n\n for vwd_id, metric in references.values():\n request.subscriptions[vwd_id].append(metric)\n\n return request", "title": "" }, { "docid": "478c85e4221db1458a8b4dcbd54334f7", "score": "0.51992905", "text": "def prepare_request(self, request):\r\n request = super(OpenStackApiSession, self).prepare_request(request)\r\n\r\n if not self._authenticating:\r\n request.headers.update({\r\n \"X-Auth-Token\": self._token_x_subject,\r\n \"Content-Type\": 'application/json'\r\n })\r\n\r\n logger.debug(\"URL: %s\", request.url)\r\n\r\n return request", "title": "" }, { "docid": "d9175c20f2ead71059a5b2384a6197f3", "score": "0.51864296", "text": "def link_prefix(request):\n return request.application_url", "title": "" }, { "docid": "ae08cc845e86c9f98634c3c0ed3dd7c9", "score": "0.5186363", "text": "def _prepare_url(self):\n\n base_url = '{}://{}{}'.format(\n self.client.protocol, self.client.base_url, self.api_path\n )\n url_parts = '/'.join(\n [part for part in self.parameters[constants.RequestConst.PATH]]\n )\n\n if url_parts:\n final_url = '{}/{}'.format(base_url, url_parts)\n else:\n final_url = base_url\n\n if self.method == constants.RequestConst.GET:\n params = self.parameters[constants.RequestConst.QUERY]\n for param, value in params.items():\n if isinstance(value, list):\n params[param] = ','.join(value)\n elif isinstance(value, dict):\n params[param] = ','.join([f'{k}:{v}' for k, v in value])\n\n url_query = '?' + '&'.join([f'{k}={v}' for k, v in params.items()])\n final_url = '{}{}'.format(final_url, url_query)\n\n self.debug.ok('final url', final_url)\n\n return final_url", "title": "" }, { "docid": "5af288073fda15a5f37ea3bc03dce80f", "score": "0.5186299", "text": "def send_request(self, request):\n # Below line is a debug to show what the full request URL is. Useful in testing multitenancy API calls\n #print(\"KARTIK : CONN OBJECT : send_request called with URL: '\"+self._url_prefix + request.endpoint+\"'\")\n #POORVA: changed url-prefix because only admin has right to update spot-region in geo-fabric present in any non-mm tenant\n if '_tenant' in request.endpoint and '_fabric' in request.endpoint:\n find_url = self._url_prefix.find('/_tenant')\n find_url += 1\n url = self._url_prefix[0:find_url]\n final_url = url + request.endpoint\n else:\n final_url = self._url_prefix + request.endpoint\n\n return self._http_client.send_request(\n method=request.method,\n url=final_url,\n params=request.params,\n data=request.data,\n headers=request.headers,\n auth=self._auth,\n )", "title": "" }, { "docid": "85d3d3a37a6a3f57ce6c625333e3725e", "score": "0.51816934", "text": "def start_requests(self) -> scrapy.http.Request:\n for i, query in enumerate(self.queries):\n url = self.query_url(query)\n yield scrapy.Request(url,\n meta={'cookiejar': i,\n 'queries': [query]},\n dont_filter=True)", "title": "" }, { "docid": "e73d6107c2127ac36dc35a93d4594906", "score": "0.5170789", "text": "def __call__(self, requestStr):\n return self.connection.Request(requestStr)", "title": "" }, { "docid": "8613eb58b007a54e40b586d36f35daba", "score": "0.5168427", "text": "def __extend_uri(prefixes, short):\n for prefix in prefixes:\n if short.startswith(prefix):\n return short.replace(prefix + ':', prefixes[prefix])\n return short", "title": "" }, { "docid": "d1263302e570c00b49a866f0f75d1923", "score": "0.5164553", "text": "def make_request_from_data(self, data):\n new_url = 'https://www.instagram.com/explore/tags/%s/' % data\n if '://' in new_url:\n return Request(new_url, dont_filter=True, meta={'keyword': data})\n else:\n self.logger.error(\"Unexpected URL from '%s': %r\", self.redis_key, new_url)", "title": "" }, { "docid": "3441f37cbf8a95dc243276a8e5a58fc0", "score": "0.5159658", "text": "def new_url(module):\n # To create the URL, we need to take the format string and return it with no params\n url_base = \"/axapi/v3/event-notification/kafka/server\"\n\n f_dict = {}\n\n return url_base.format(**f_dict)", "title": "" }, { "docid": "a81aa6f047e27d0e936f1c817263c27d", "score": "0.5150599", "text": "def mangle_request(self, request):\n data = request.get_data()\n for regex, string in self._manglers['q']['b']:\n data = regex.sub(string, data)\n\n header_string = str(request.get_headers())\n \n for regex, string in self._manglers['q']['h']:\n header_string = regex.sub(string, header_string)\n \n headers_inst = Headers.from_string(header_string)\n\n request.set_headers(headers_inst)\n request.add_data(data)\n return request", "title": "" }, { "docid": "60c21543fd5b524f2b3b7fb9da556f1b", "score": "0.5144946", "text": "def getChild(self, name, request):\r\n request.prepath = []\r\n request.postpath.insert(0, name)\r\n # re-establishes request.postpath so to contain the entire path\r\n return self.wsgi_resource", "title": "" }, { "docid": "9b85c697234450b5a1c92d11db3356fa", "score": "0.5144088", "text": "def _generate_request_url(endpoint: str, request_path: str) -> str:\n if endpoint is None or request_path is None:\n raise ValueError(\"endpoint and request_path are required.\")\n if urllib_parse.urlparse(request_path).path != request_path:\n raise ValueError('Incorrect format for request_path: {request_path}'.format(**{'request_path': request_path}))\n return endpoint + request_path", "title": "" }, { "docid": "13dea143e8542828a85216f6d4592311", "score": "0.514312", "text": "def preprocess_url(self, request_url: str):\n if re.fullmatch('.*/+', request_url) is None:\n return request_url\n else:\n return re.compile(request_url.rstrip('/') + '/+')", "title": "" }, { "docid": "13dea143e8542828a85216f6d4592311", "score": "0.514312", "text": "def preprocess_url(self, request_url: str):\n if re.fullmatch('.*/+', request_url) is None:\n return request_url\n else:\n return re.compile(request_url.rstrip('/') + '/+')", "title": "" }, { "docid": "13dea143e8542828a85216f6d4592311", "score": "0.514312", "text": "def preprocess_url(self, request_url: str):\n if re.fullmatch('.*/+', request_url) is None:\n return request_url\n else:\n return re.compile(request_url.rstrip('/') + '/+')", "title": "" }, { "docid": "13dea143e8542828a85216f6d4592311", "score": "0.514312", "text": "def preprocess_url(self, request_url: str):\n if re.fullmatch('.*/+', request_url) is None:\n return request_url\n else:\n return re.compile(request_url.rstrip('/') + '/+')", "title": "" }, { "docid": "13dea143e8542828a85216f6d4592311", "score": "0.514312", "text": "def preprocess_url(self, request_url: str):\n if re.fullmatch('.*/+', request_url) is None:\n return request_url\n else:\n return re.compile(request_url.rstrip('/') + '/+')", "title": "" }, { "docid": "13dea143e8542828a85216f6d4592311", "score": "0.514312", "text": "def preprocess_url(self, request_url: str):\n if re.fullmatch('.*/+', request_url) is None:\n return request_url\n else:\n return re.compile(request_url.rstrip('/') + '/+')", "title": "" }, { "docid": "13dea143e8542828a85216f6d4592311", "score": "0.514312", "text": "def preprocess_url(self, request_url: str):\n if re.fullmatch('.*/+', request_url) is None:\n return request_url\n else:\n return re.compile(request_url.rstrip('/') + '/+')", "title": "" }, { "docid": "54f5aa0f17871b0e6f5454558c556693", "score": "0.5140402", "text": "def test_request_url(self):\n httpretty.register_uri(httpretty.POST, 'http://somewhere.com/test')\n r = CkanResource('http://somewhere.com/test', None, {'offset': None, 'limit': None})\n s = r._get_response(0, 0)\n assert_equals(httpretty.last_request().path, '/test?limit=0&offset=0')", "title": "" }, { "docid": "6f52fca0d15afaba11bd582d165d2113", "score": "0.5139332", "text": "def new_url(module):\n # To create the URL, we need to take the format string and return it with no params\n url_base = \"/axapi/v3/fw/template/logging/{name}\"\n f_dict = {}\n \n f_dict[\"name\"] = \"\"\n\n return url_base.format(**f_dict)", "title": "" }, { "docid": "b28d09660f6036f32ee248e41f245585", "score": "0.5128043", "text": "def onRequestStart(self, api, request):\n logging.info('Request start ({})'.format(request))", "title": "" }, { "docid": "5726f68377b21db5a7db53b6d28217fd", "score": "0.5126555", "text": "def adapt_request(self, original_request, listen_port):\n return original_request.replace(\"localhost:%d\" % listen_port, \"%s:%s\" % (self.address, self.port))", "title": "" }, { "docid": "455b3e507cddaad1703a7891f77943df", "score": "0.512223", "text": "def InvocationAddRequest(builder, request):\n return AddRequest(builder, request)", "title": "" }, { "docid": "ce1e404d64c21ffaac2875c69327967f", "score": "0.51144826", "text": "def route( request, c ):", "title": "" }, { "docid": "d307ed200a7e53488c41ffe5a643257d", "score": "0.51133466", "text": "def appurl( instkey, name, **matchdict ) :", "title": "" }, { "docid": "a29c7ddfad9bbb58107591a03586e975", "score": "0.5111879", "text": "def request(self, **request):\n environ = {\n 'HTTP_COOKIE': self.cookies,\n 'PATH_INFO': '/',\n 'QUERY_STRING': '',\n 'REQUEST_METHOD': 'GET',\n 'SCRIPT_NAME': '',\n 'SERVER_NAME': 'testserver',\n 'SERVER_PORT': 80,\n 'SERVER_PROTOCOL': 'HTTP/1.1',\n 'wsgi.version': (1,0),\n 'wsgi.url_scheme': 'http',\n 'wsgi.input': FakePayload(''),\n 'wsgi.errors': self.errors,\n 'wsgi.multiprocess': True,\n 'wsgi.multithread': False,\n 'wsgi.run_once': False,\n }\n environ.update(self.defaults)\n environ.update(request)\n return WSGIRequest(environ)", "title": "" } ]
6e6569e4b674b3abb5a1fb9d542546fa
Recovers all the containers of an instance including metasploit msfrpcd.
[ { "docid": "2a532e88e44d753f7eda991a95711b40", "score": "0.6858903", "text": "def recover_containers(containers):\n port = None\n\n for container in containers: # start all the containers\n container.start()\n container.reload()\n\n for ports in container.ports.values():\n for port_configuration in ports:\n port = port_configuration[\"HostPort\"]\n if port:\n break\n exit_code, _ = container.exec_run(f\"./msfrpcd -P 123456 -S -p {port}\")\n if exit_code:\n raise ApiException(error_msg=f\"Failed recovering container {container.id}\", error_code=500)", "title": "" } ]
[ { "docid": "4e5206faee73d635ed0330cac7c2af7c", "score": "0.6030402", "text": "def restart_vpp_in_all_containers(self):\n for container in self.containers:\n self.engine.container = self.containers[container]\n self.engine.restart_vpp()", "title": "" }, { "docid": "6c5e264d51d78f698af528d02fc793dc", "score": "0.5665045", "text": "async def rebuild(self) -> None:\n with suppress(DockerError):\n await self.instance.stop()\n await self.start()", "title": "" }, { "docid": "10d6237307a2b8d8f7fd4b431b67577d", "score": "0.558642", "text": "def _old_images_clean_up(self, ctx: ExecutionContext, service: ServiceDeclaration, clear_images: bool):\n\n if not clear_images:\n yield\n return\n\n self.io().debug('Finding all container names for service \"%s\"' % service.get_name())\n images = self.get_all_images_for_service(ctx, service)\n\n self.io().debug('OK, images collected')\n yield\n\n for image in images:\n # docker build was run locally\n if image is None:\n continue\n\n try:\n self.io().info('Trying to clean up image \"%s\"' % image)\n self.containers(ctx).rm_image(image, capture=True)\n except:\n self.io().warn('Cannot clean up image: \"%s\" [ignored, may be in use]' % image)", "title": "" }, { "docid": "4a4f478a59fc05da3af546f34a146409", "score": "0.54434687", "text": "def garbage_collect(self):\n self.logger.info(\"Running garbage collector\")\n p = subprocess.Popen([self.registry_bin, \"garbage-collect\", self._save_config(), \"--delete-untagged=true\"], stderr=subprocess.PIPE, stdout=subprocess.PIPE)\n retval = self._track_process(p, wait=True)\n \n if retval != 0:\n raise Exception(\"registry garbage-collect failed\")", "title": "" }, { "docid": "3cc086681c493c03dd89c53543fe398a", "score": "0.5413362", "text": "def _clean_all(self, client):\n if os.path.isdir(self.deploy.TMP_FOLDER):\n shutil.rmtree(self.deploy.TMP_FOLDER)\n\n client.containers.prune()\n\n for container in client.containers.list():\n if \"ict\" in container.name:\n container.kill()", "title": "" }, { "docid": "fe521438c8ab6759654f58fba8769a84", "score": "0.53846735", "text": "def setUp(self):\n containers = docker.ps(\"-qa\").split()\n for container in containers:\n delete_container(container)\n\n self.ip = get_ip()\n self.start_etcd()", "title": "" }, { "docid": "e2e3bef5430b7c60f277d08fab8bda4c", "score": "0.5328558", "text": "def reboot_instances(self):\n with open(f'WebApp/DeploymentLogs/{self.protocol_name}.log', 'w+') as output_file:\n print(f'Rebooting {self.protocol_name} instances', file=output_file)\n regions = self.protocol_config['cloudProviders']['AWS']['regions']\n\n for idx in range(len(regions)):\n region_name = regions[idx][:-1]\n instances = self.describe_instances(region_name, self.protocol_name)\n client = self.session.client('ec2', region_name=region_name)\n client.reboot_instances(InstancesIds=instances)", "title": "" }, { "docid": "69d21d67f225044f20a40b6c498ef51d", "score": "0.52637434", "text": "def main():\n for container in EVENTS:\n try:\n container_stop = False\n if \"start\" in container[\"status\"]:\n try:\n container_event_id = container[\"Actor\"][\"ID\"]\n container_inspect = CLIENT.inspect_container(container_event_id)\n container_id = container_inspect[\"Id\"]\n container_name = container_inspect[\"Name\"].replace(\"/\", \"\")\n\n container_cap_drop = container_inspect[\"HostConfig\"][\"CapDrop\"]\n container_cap_add = container_inspect[\"HostConfig\"][\"CapAdd\"]\n container_privileged = container_inspect[\"HostConfig\"][\"Privileged\"]\n container_security_opt = container_inspect[\"HostConfig\"][\n \"SecurityOpt\"\n ]\n\n no_security_opt = \"%s: no security options has been set\" % (\n container_name\n )\n priv_not_allowed_log = \"%s: privileged set but not allowed\" % (\n container_name\n )\n priv_no_policy_log = \"%s: privileged set but no policy\" % (\n container_name\n )\n cap_drop_log = \"%s: all capabilities not dropped\" % (container_name)\n cap_add_all_log = \"%s: capability ALL has been set\" % (\n container_name\n )\n stop_container_log = \"%s: stopping container\" % (container_name)\n\n try:\n if CONF[\"debug\"]:\n print((\"container_name: \", container_name))\n print((\"containerStatus: \", container[\"status\"]))\n print((\"container_event_id: \", container_event_id))\n print(\n (\n \"container_inspect: \",\n CLIENT.inspect_container(container_event_id),\n )\n )\n print((\"containerID: \", container_inspect[\"Id\"]))\n print((\"container_cap_drop: \", container_cap_drop))\n print((\"container_cap_add: \", container_cap_add))\n print((\"container_security_opt: \", container_security_opt))\n print((\"container_privileged: \", container_privileged))\n print((\"container_stop: \", container_stop))\n\n except NameError:\n pass\n\n try:\n if container_privileged is not False:\n if not CONF[container_name][\"privileged\"]:\n syslog.syslog(priv_not_allowed_log)\n container_stop = True\n\n except KeyError:\n syslog.syslog(priv_no_policy_log)\n container_stop = True\n\n try:\n if container_security_opt is None:\n if CONF[container_name][\"security_opt_required\"]:\n syslog.syslog(no_security_opt)\n container_stop = True\n\n if CONF[\"debug\"]:\n print(\n (\n \"container_security_opt: \",\n container_security_opt,\n )\n )\n\n except KeyError:\n syslog.syslog(no_security_opt)\n container_stop = True\n\n try:\n if container_cap_drop is not None:\n for cap_drop in container_cap_drop:\n if (\n \"all\" not in cap_drop.lower()\n and not CONF[container_name][\"cap_drop_required\"]\n ):\n syslog.syslog(cap_drop_log)\n container_stop = True\n\n if CONF[\"debug\"]:\n print((\"cap_drop: \", cap_drop.lower()))\n print(\n (\n \"cap_dropRequired: \",\n CONF[container_name][\n \"cap_drop_required\"\n ],\n )\n )\n\n except KeyError:\n syslog.syslog(cap_drop_log)\n container_stop = True\n\n try:\n if container_cap_drop is None:\n if CONF[container_name][\"cap_drop_required\"]:\n syslog.syslog(cap_drop_log)\n container_stop = True\n\n except KeyError:\n syslog.syslog(cap_drop_log)\n container_stop = True\n\n if container_cap_add is not None:\n for cap_add in container_cap_add:\n if \"all\" in cap_add.lower():\n syslog.syslog(cap_add_all_log)\n container_stop = True\n\n if container_stop and container_stop is not False:\n client_stop = \"%s\" % (container_id)\n\n try:\n if CONF[\"debug\"]:\n print((\"container_stop: \", container_stop))\n print((\"CLIENT.stop sent to \", client_stop))\n\n except NameError:\n pass\n\n try:\n CLIENT.stop(client_stop)\n syslog.syslog(stop_container_log)\n\n except UnboundLocalError as exception:\n print(exception)\n\n except KeyError as exception:\n print(exception)\n\n except UnboundLocalError as exception:\n print(exception)\n\n except KeyError as exception:\n print(exception)\n\n except KeyError:\n pass", "title": "" }, { "docid": "00b665264a82d495cf0515128e03273c", "score": "0.5256297", "text": "def refresh(self):\n current_containers = self._client.containers(all=True)\n self.clear()\n self.update(name[1:] for container in current_containers for name in container['Names'])", "title": "" }, { "docid": "148283ba9a7d944c2629189c1f545bb8", "score": "0.5252766", "text": "def tearDown(self):\n if do_teardown():\n containers = docker.ps(\"-qa\").split()\n for container in containers:\n delete_container(container)", "title": "" }, { "docid": "cde38b38f2edaf8991c51760da609d24", "score": "0.5213369", "text": "def container_monitor(self):", "title": "" }, { "docid": "663e83310ae419bb6078fe7ed9764e28", "score": "0.520288", "text": "def clean():\n\n # Check minikube status and delete if minikube is running\n print 'Checking minikube status...'\n try:\n status = minikube_adm.check_status()\n if status['minikube'] == \"Running\" or status['minikube'] == \"Stopped\":\n print 'Deleting minikube...'\n try:\n minikube_adm.teardown()\n except Exception as err:\n print str(err)\n else:\n print 'Machine not present.'\n except Exception as err:\n print str(err)\n\n # Remove all docker containers\n print\n print 'Removing docker containers...'\n try:\n minikube_adm.clear_containers()\n except Exception as err:\n print str(err)\n\n print\n print 'Removing', NDM_TEST_YAML_NAME, '...'\n if isfile(NDM_TEST_YAML_PATH+NDM_TEST_YAML_NAME):\n removefile(NDM_TEST_YAML_PATH+NDM_TEST_YAML_NAME)\n print 'Removed.'\n else:\n print 'Not present.'", "title": "" }, { "docid": "f6ea9b7398b9b3e7cf45590864d067ac", "score": "0.51689714", "text": "def containers_reset(configdir=None):\n _configure(configdir=configdir)\n IT.DockerFactory.reset()", "title": "" }, { "docid": "fc17d87c0586b7edf4e37a98686dde7e", "score": "0.51647896", "text": "def resetall():\n killall()\n local('vagrant provision')\n resetdb(delete_images=True, load_images=True)", "title": "" }, { "docid": "ffc2c138fa06a47838ccd3a0d740c428", "score": "0.5155797", "text": "def restart_all():\n OpenstackLoadbalancerCharm.singleton.restart_all()", "title": "" }, { "docid": "fc0eabde0776cfbc7b84c74dc3e0442f", "score": "0.5147618", "text": "def cleanup_images(min_age_days=0, simulate=False):\n\n # two helper functions for working with the shortened image ids/names that the container listing outputs\n def image_matches_abbreviated_id(image, abbreviated_id):\n # match for short id\n if image['Id'].startswith(abbreviated_id):\n return True\n # match for name / tag\n if not abbreviated_id.startswith(\"unknown\") and abbreviated_id in image['RepoTags']:\n return True\n return False\n \n def image_exists(abbreviated_id):\n for image in images:\n if image_matches_abbreviated_id(image, abbreviated_id):\n return True\n return False\n \n images = api_client.images()\n # get all running and non-running containers\n containers = api_client.containers(trunc=False, all=True)\n # the container list has abbreviated image ids like 2524da18912f\n used_image_ids_abbreviated = [container['Image'] for container in containers]\n for short_id in used_image_ids_abbreviated:\n assert image_exists(short_id), \"unknown abbreviated image id {} for container, cannot find image\".format(short_id)\n \n def is_used_by_containers(image):\n \"is an image currently used by a container?\"\n for short_id in used_image_ids_abbreviated:\n if image_matches_abbreviated_id(image, short_id):\n return True\n return False\n \n for image in images:\n if image['RepoTags'] != [u'<none>:<none>']:\n # image is tagged, skip\n continue\n if is_used_by_containers(image):\n # image is in use, skip\n continue\n if image['Created'] > time.time() - 60*60*24*min_age_days:\n # image is too young, skip\n continue\n \n if simulate:\n print_bold(\"would delete unused old image {}\".format(image['Id']))\n else:\n print_bold(\"deleting unused old image {}\".format(image['Id']))\n exec_verbose(\"docker rmi \" + image['Id'])", "title": "" }, { "docid": "9a88e347c1febfb79e2577a1d9baf05e", "score": "0.51401055", "text": "def clean(hard):\n docker_clean(hard)", "title": "" }, { "docid": "c39bc80bd02d1b51f7ac18401a58a15f", "score": "0.5111838", "text": "def test_get_containers(self):\n pass", "title": "" }, { "docid": "2430819623863d87600994fe27aee285", "score": "0.51060826", "text": "def dockercomposerestart(ctx):\n _run('docker-compose restart')", "title": "" }, { "docid": "4a093a334b7c391eabbb309058806b5a", "score": "0.5081483", "text": "def cleanup(self):\n if self.reuse_containers:\n log.warning(\"Container reuse enabled: Skipping environment cleanup\")\n return\n\n self.down()", "title": "" }, { "docid": "0db9ed319c2a2bfa04dd58b3f18b9924", "score": "0.5059242", "text": "def remount(ctx):\n click.echo('Sudo needed to remount instances')\n password = getpass.getpass()\n for instance in ctx.obj['instances']:\n click.echo('Remounting instance {}'.format(instance))\n inst_path = os.path.join(CFG['system']['InstancesDir'], instance)\n with sh.contrib.sudo(password=password, _with=True):\n sh.mount(\"-o\" \"remount\", inst_path)", "title": "" }, { "docid": "e10ff9dedd548f7f2518ca361e7d9ffa", "score": "0.5058462", "text": "def purge():\n chain_docker_cmds('docker ps -q', 'docker rm -f')\n chain_docker_cmds('docker images -f dangling=true -q', 'docker rmi -f')\n chain_docker_cmds('docker network ls -q', 'docker network rm')", "title": "" }, { "docid": "53dcb3174a1b15d6b8c933861f12e844", "score": "0.50543106", "text": "def restore_instances(conn):\n sg = get_security_group(conn)\n key_pair = get_key_pair(conn)\n snapshots = get_snapshots()\n\n images = conn.get_all_images(owners=[\"self\"])\n image_name_mapping = {} # Mapping of image name to instance id\n output.debug(\"Launching instances from this account's AMIs...\")\n for image in images:\n reservation = image.run(\n min_count=1, max_count=1,\n security_groups=[sg.name], key_name=key_pair.name,\n instance_type=EC2_DEFAULT_INSTANCE_TYPE,\n placement=EC2_DEFAULT_EBS_AZ,\n monitoring_enabled=True)\n time.sleep(EC2_DEFAULT_WAIT_INTERVAL)\n instance = reservation.instances[0]\n conn.create_tags([instance.id], {\"Name\": image.name})\n image_name_mapping[image.name] = instance.id\n\n instances = get_instances(conn, True, \"pending\")\n output.debug(\"Waiting for all instances running...\")\n for instance in instances:\n while instance.update() == \"pending\":\n time.sleep(EC2_DEFAULT_WAIT_INTERVAL)\n assert instance.update() == \"running\"\n\n instances = get_instances(conn, True, \"running\")\n\n addresses = get_addresses()\n output.debug(\"Associating public IPs into restored instances...\")\n\n for instance in instances:\n name = instance.tags.get(\"Name\", \"-\")\n ip = addresses[name]\n conn.associate_address(instance.id, ip)\n\n output.debug(\"Creating EBS data volumes from snapshots...\")\n snapshots_dict = get_snapshots()\n snapshot_ids = [item for item in snapshots_dict]\n snapshots = conn.get_all_snapshots(snapshot_ids=snapshot_ids,\n owner=\"self\")\n for snapshot in snapshots:\n volume = snapshot.create_volume(EC2_DEFAULT_EBS_AZ)\n while volume.update() != \"available\":\n time.sleep(EC2_DEFAULT_WAIT_INTERVAL)\n name = snapshots_dict[snapshot.id]\n instanced_id = image_name_mapping[name]\n output.debug(\"Attaching EBS data volume for instance %s...\" % name)\n volume.attach(instanced_id, EC2_DEFAULT_DATA_DEVICE)\n\n # Delete snapshots of data volumes\n delete_all_snapshots(conn, snapshot_ids)\n db.write_data(DB_FILES[\"snapshots\"], [])", "title": "" }, { "docid": "2748bbe160dd2420acd09272c2162dc6", "score": "0.5054047", "text": "def _discover_instances(self, context):\n # Call the Compute Discovery Driver to get a list of existing VM's\n drv_instances = self.driver.discover_instances(context)\n # Generate the UUID's for the VM's and determine which are Managed\n self._generate_instance_uuids(context, drv_instances)\n return drv_instances", "title": "" }, { "docid": "87da65626334e96e0bd35ea506b358c2", "score": "0.50448513", "text": "def reboot():\n _reboot_instance()", "title": "" }, { "docid": "aac82cb248a86af7266aadb38e38a3a4", "score": "0.501779", "text": "def test_spawn_fail_cleanup_1(self):\n vdi_recs_start = self._list_vdis()\n stubs.stubout_fetch_disk_image(self.stubs, raise_failure=True)\n self.assertRaises(xenapi_fake.Failure,\n self._test_spawn, 1, 2, 3)\n # No additional VDI should be found.\n vdi_recs_end = self._list_vdis()\n self._check_vdis(vdi_recs_start, vdi_recs_end)", "title": "" }, { "docid": "dca6ed01ac21c8190883674418a5c120", "score": "0.49937835", "text": "def clean_running_containers_and_related_resources(self):\n LOG.debug(\"Terminating all running warm containers\")\n for function_name, container in self._containers.items():\n LOG.debug(\"Terminate running warm container for Lambda Function '%s'\", function_name)\n self._container_manager.stop(container)\n self._clean_decompressed_paths()\n self._observer.stop()", "title": "" }, { "docid": "3d220a2362effa26961285dab305564e", "score": "0.49919683", "text": "def fresh_restart(ctx, ignore_system_prune=False):\n run_command(ctx, f\"docker-compose --project-name {PROJECT_NAME} kill\", in_container=False)\n if not ignore_system_prune:\n run_command(ctx, \"docker system prune\", in_container=False)\n run_command(ctx, f\"docker volume rm -f {PROJECT_NAME}_{PROJECT_NAME}-data\", in_container=False)\n run_command(ctx, f\"docker-compose --project-name {PROJECT_NAME} build\", in_container=False)\n run_command(ctx, f\"docker-compose --project-name {PROJECT_NAME} up -d\", in_container=False)", "title": "" }, { "docid": "738764bc32433bf5b9930ef3b4ddb8f4", "score": "0.49760756", "text": "async def cleanup(self) -> None:\n try:\n await self.stop(await self.docker.containers.get(self.name))\n finally:\n return", "title": "" }, { "docid": "87012d0836fd28ad167ffc3fea14d814", "score": "0.49733013", "text": "def stop_media_machinery(c):\n c.run('docker-compose down --remove-orphans')", "title": "" }, { "docid": "0f0b4e0bb020b8e51251e85c9d3b1c18", "score": "0.4970779", "text": "def clean_intallations(cls):\n logging.debug(\"Start cleaning images...\\n\")\n\n # Get ongoing installations dictionary\n ongoing_intallations = cls.get_ongoing_installations()\n\n # For each image in the ongoing installation file\n for image_name in ongoing_intallations.keys():\n\n # If the image status is 'CORRUPTED', clean the image\n if cls.get_image_status(image_name) == cls.ImageStatus.CORRUPTED:\n\n logging.warning('Image \\'' + image_name + '\\' corrupted, trying to clean image')\n\n # Importing image class for cleaning\n image_class = cls.get_class(ongoing_intallations[image_name]['image_class'])\n\n # Use the image class clean static method to clean image\n image_class.clean(image_name)\n\n # Unregister image from ongoing installations\n cls.unregister_installation(image_name)\n\n logging.warning('Image \\'' + image_name + '\\' cleaned\\n')", "title": "" }, { "docid": "c269115bfd63e049c642339c95f587dc", "score": "0.49591768", "text": "def _clean(self):\n if self.container:\n self.container.destroy()\n self.look_missed_request(\n self.container.daos.result.stderr)\n if self.pool:\n self.pool.destroy(force=1)\n self.look_missed_request(self.pool.dmg.result.stderr)", "title": "" }, { "docid": "1fee11fcef871cc1bf2d4fc79d9e8bc2", "score": "0.495117", "text": "def clean_all_containers():\n print('\\n---\\nSelected to stop all active containers.')\n run_command('docker container stop $(docker container ls -aq)')\n\n print('\\n---\\nSelected to remove all active containers.')\n run_silent('docker container rm $(docker container ls -aq)')", "title": "" }, { "docid": "1fe0bd3e04b0c23885590255ee618cae", "score": "0.4935521", "text": "def reset(self):\n for ddpg_agent in self.maddpg_agent:\n ddpg_agent.reset()", "title": "" }, { "docid": "a1d12fff15b1bdd3bd9329853a61adc0", "score": "0.49264887", "text": "def containers(forge):\n forge.execute(forge.bake)", "title": "" }, { "docid": "845249aec41ed2192c6346b4b714104f", "score": "0.4901409", "text": "def start_vpp_in_all_containers(self):\n for container in self.containers:\n self.engine.container = self.containers[container]\n # We need to install supervisor client/server system to control VPP\n # as a service\n self.engine.install_supervisor()\n self.engine.start_vpp()", "title": "" }, { "docid": "74fe493f3d6bba2c10f009e52d14845f", "score": "0.489848", "text": "def test_registry_shutdown_and_recovery_node(self, nodes):\n\n # Pull and push images to registries\n log.info(\"Pull and push images to registries\")\n image_pull_and_push(project_name=self.project_name)\n\n # Get the node list\n node_list = get_nodes(node_type=\"worker\")\n\n for node in node_list:\n\n # Stop node\n nodes.stop_nodes(nodes=[node])\n\n # Validate node reached NotReady state\n wait_for_nodes_status(\n node_names=[node.name], status=constants.NODE_NOT_READY\n )\n\n # Start node\n nodes.start_nodes(nodes=[node])\n\n # Validate all nodes are in READY state and up\n retry(\n (\n CommandFailed,\n TimeoutError,\n AssertionError,\n ResourceWrongStatusException,\n ),\n tries=60,\n delay=15,\n )(wait_for_nodes_status)(timeout=900)\n\n # Validate all storage pods are running\n retry(CommandFailed)(wait_for_storage_pods)(timeout=900)\n\n # Validate cluster health ok and all pods are running\n self.sanity_helpers.health_check(tries=40)\n\n # Validate image registry pods\n validate_registry_pod_status()\n\n # Validate image exists in registries path\n validate_image_exists()", "title": "" }, { "docid": "0117ac746ac951e9e5e44400b7331d41", "score": "0.48575994", "text": "def clean_up_dangling_images(self):\n cargoes = Image.all(client=self._client_session, filters={'dangling': True})\n for id, cargo in six.iteritems(cargoes):\n logger.info(\"Removing dangling image: {0}\".format(id))\n cargo.delete()", "title": "" }, { "docid": "b6187847bb7941bc79abaadaabb7f789", "score": "0.4857415", "text": "def full_restart(self):\n for p in self._sc2_proc:\n p.close()\n try:\n self._launch()\n self.force_restarts += 1\n except:\n self.full_restart()", "title": "" }, { "docid": "66a068a0998ebbd37cd81b1b4015f508", "score": "0.4855063", "text": "def validate_k8s_service_images(nodes, k8s_version, rke_client, kubectl):\n expectedimagesdict = build_expectedimages_dict(k8s_version, rke_client)\n print(expectedimagesdict)\n\n for node in nodes:\n containers = node.docker_ps()\n allcontainers = node.docker_ps(includeall=True)\n print(\"Container Dictionary \")\n print(containers)\n print(\"All containers dictionary\")\n print(allcontainers)\n sidekickservice = \"service-sidekick\"\n for key in expectedimagesdict.keys():\n servicename = key\n if servicename in containers:\n print(\"Service name\")\n print(servicename)\n print(expectedimagesdict[servicename])\n print(containers[servicename])\n assert expectedimagesdict[servicename] == \\\n containers[servicename], (\n \"K8s service '{0}' does not match config version \"\n \"{1}, found {2} on node {3}\".format(\n servicename, expectedimagesdict[servicename],\n containers[servicename], node.node_name))\n if sidekickservice in expectedimagesdict.keys():\n if sidekickservice in allcontainers:\n print(\"sidekick-service in allcontainers\")\n print(sidekickservice)\n print(expectedimagesdict[sidekickservice])\n print(allcontainers[sidekickservice])\n assert expectedimagesdict[sidekickservice] == \\\n allcontainers[sidekickservice], (\n \"K8s service '{0}' does not match config version \"\n \"{1}, found {2} on node {3}\".format(\n sidekickservice, expectedimagesdict[sidekickservice],\n allcontainers[sidekickservice], node.node_name))\n\n verify_ingress_addon_images(k8s_version, kubectl,\n \"ingress-nginx\", \"app=ingress-nginx\",\n \"app=default-http-backend\")\n verify_networking_addon_images(k8s_version, kubectl,\n \"kube-system\", \"k8s-app=canal\")\n verify_metrics_server_addon_images(k8s_version, kubectl,\n \"kube-system\", \"k8s-app=metrics-server\")\n verify_dns_addon_images(k8s_version, kubectl,\n \"kube-system\", \"k8s-app=kube-dns\")", "title": "" }, { "docid": "caa176f9037262a91a970ccb836994aa", "score": "0.4849737", "text": "def load_containers(self):\n # Scan through directories and load ones that look right\n containers = []\n for name in os.listdir(self.path):\n container_path = os.path.join(self.path, name)\n if os.path.isdir(container_path) and os.path.isfile(os.path.join(container_path, \"Dockerfile\")):\n containers.extend(Container.from_directory(self, container_path))\n self.add_containers(containers)", "title": "" }, { "docid": "6a1caf4f5e04ce4fd3953a9af848664c", "score": "0.4839353", "text": "def _ProcessInstances(instance_list):\n return [instance.RemoteInstance(gce_instance) for gce_instance in instance_list]", "title": "" }, { "docid": "bea0cfcc3ef61e4c60678cbcf699e0f6", "score": "0.48066884", "text": "def instantiate_all(self): \n self._instances.instantiate_all(self=self._instances)", "title": "" }, { "docid": "f1f991efde6092758fd43a28f9021e7c", "score": "0.4805913", "text": "def cleanup(self):\n from CAServer import casdel\n lst = self.get_pv_list()\n for item in lst:\n casdel(item)", "title": "" }, { "docid": "3ab112c8295d542bff04dd51f62b163e", "score": "0.4800534", "text": "def clean(self):\n self.clear()\n runtimes = self.list_runtimes()\n for docker_image_name, memory in runtimes:\n self.delete_runtime(docker_image_name, memory)\n\n logger.debug('Deleting all lithops configmaps')\n configmaps = self.core_api.list_namespaced_config_map(namespace=self.namespace)\n for configmap in configmaps.items:\n config_name = configmap.metadata.name\n if config_name.startswith('lithops'):\n logger.debug('Deleting configmap {}'.format(config_name))\n self.core_api.delete_namespaced_config_map(\n name=config_name,\n namespace=self.namespace,\n grace_period_seconds=0)", "title": "" }, { "docid": "081c3882668f3749dfd7780546146859", "score": "0.4796618", "text": "def _clear_container_cache(self):\n cmd = [\"docker\", \"system\", \"prune\", \"--all\", \"--force\", \"--volumes\"]\n shell(cmd)\n if not os.path.exists(self._app.config.container_image_cache_path):\n return\n for dirpath, _, files in safe_walk(self._app.config.container_image_cache_path):\n for f in files:\n os.unlink(os.path.join(dirpath, f))", "title": "" }, { "docid": "2707bab2f3527132514ffc3f19f1083c", "score": "0.47923505", "text": "def testProcess(self,\n mock_list_disks,\n mock_getDisksFromInstance,\n mock_CreateDiskCopy,\n mock_GetInstance):\n mock_getDisksFromInstance.return_value = FAKE_DISK_MULTIPLE\n mock_CreateDiskCopy.side_effect = FAKE_DISK_COPY\n mock_GetInstance.return_value = FAKE_INSTANCE\n mock_list_disks.return_value = {\n 'bootdisk': FAKE_BOOT_DISK,\n 'disk1': FAKE_DISK\n }\n\n test_state = state.DFTimewolfState(config.Config)\n collector = gce_disk_copy.GCEDiskCopy(test_state)\n collector.SetUp(\n 'test-analysis-project-name',\n 'test-target-project-name',\n 'fake_zone',\n 'my-owned-instance',\n None,\n True,\n True\n )\n FAKE_INSTANCE.Stop = mock.MagicMock()\n\n collector.PreProcess()\n conts = collector.GetContainers(collector.GetThreadOnContainerType(), True)\n for d in conts:\n collector.Process(d) # pytype: disable=wrong-arg-types\n # GetContainers returns the abstract base class type, but process is\n # called with the instantiated child class.\n mock_CreateDiskCopy.assert_called_with(\n 'test-target-project-name',\n 'test-analysis-project-name',\n FAKE_INSTANCE.zone,\n disk_name=d.name) # pytype: disable=attribute-error\n collector.PostProcess()\n\n FAKE_INSTANCE.Stop.assert_called_once()\n\n out_disks = collector.GetContainers(containers.GCEDisk)\n out_disk_names = [d.name for d in out_disks]\n expected_disk_names = ['disk1-copy', 'disk2-copy']\n self.assertEqual(out_disk_names, expected_disk_names)\n for d in out_disks:\n self.assertEqual(d.project, 'test-analysis-project-name')\n\n # Do it again, but we don't want to stop the instance this time.\n # First, clear the containers\n collector.GetContainers(containers.GCEDisk, True)\n mock_CreateDiskCopy.side_effect = FAKE_DISK_COPY\n collector = gce_disk_copy.GCEDiskCopy(test_state)\n collector.SetUp(\n 'test-analysis-project-name',\n 'test-target-project-name',\n 'fake_zone',\n 'my-owned-instance',\n None,\n True,\n False,\n )\n FAKE_INSTANCE.Stop = mock.MagicMock()\n\n collector.PreProcess()\n conts = collector.GetContainers(collector.GetThreadOnContainerType(), True)\n for d in conts:\n collector.Process(d) # pytype: disable=wrong-arg-types\n # GetContainers returns the abstract base class type, but process is\n # called with the instantiated child class.\n mock_CreateDiskCopy.assert_called_with(\n 'test-target-project-name',\n 'test-analysis-project-name',\n FAKE_INSTANCE.zone,\n disk_name=d.name) # pytype: disable=attribute-error\n collector.PostProcess()\n\n FAKE_INSTANCE.Stop.assert_not_called()\n out_disks = collector.GetContainers(containers.GCEDisk)\n out_disk_names = sorted([d.name for d in out_disks])\n expected_disk_names = ['disk1-copy', 'disk2-copy']\n self.assertEqual(out_disk_names, expected_disk_names)\n for d in out_disks:\n self.assertEqual(d.project, 'test-analysis-project-name')", "title": "" }, { "docid": "aeff8606af6dfcb1c781b250ff00bc71", "score": "0.47915316", "text": "def ps(): # pylint: disable=invalid-name\n api = ApiDocker()\n api.list_containers()", "title": "" }, { "docid": "97631c3221107e562e014b46d5cd1ce4", "score": "0.47758344", "text": "def recreateResources(self):\n for image in self.images:\n image._saveFiles()", "title": "" }, { "docid": "7e29661b4e94c3a5b03a62696491b6d2", "score": "0.4773798", "text": "def _restart(self):\n try:\n self._kill_all_units()\n for _ in range(3):\n for c in self._controller:\n c.step()\n except (protocol.ProtocolError, protocol.ConnectionError):\n self.full_restart()", "title": "" }, { "docid": "fda2788ba216c45670d6213959de90e3", "score": "0.47584322", "text": "def reap_children():\n\n for col in all_living_collectors():\n now = int(time.time())\n # FIXME: this is not robust. the asyncproc module joins on the\n # reader threads when you wait if that process has died. this can cause\n # slow dying processes to hold up the main loop. good for now though.\n status = col.proc.poll()\n if status is None:\n continue\n col.proc = None\n\n # behavior based on status. a code 0 is normal termination, code 13\n # is used to indicate that we don't want to restart this collector.\n # any other status code is an error and is logged.\n if status == 13:\n LOG.info(\"removing %s from the list of collectors (by request)\", col.name)\n col.dead = True\n elif status != 0:\n LOG.warning(\n \"collector %s terminated after %d seconds with \"\n \"status code %d, marking dead\",\n col.name,\n now - col.lastspawn,\n status,\n )\n col.dead = True\n else:\n register_collector(\n Collector(\n col.name, col.interval, col.filename, col.mtime, col.lastspawn\n )\n )", "title": "" }, { "docid": "c2193f6fda5e0c41e8a2233cc6bdbd0e", "score": "0.4752902", "text": "def execute_on_all_containers(self, command):\n for container in self.containers:\n self.engine.container = self.containers[container]\n self.engine.execute(command)", "title": "" }, { "docid": "7890cfc61bc8d2f33bcd0c88679901c6", "score": "0.47396544", "text": "def reboot_all_command():\n\n for c in pychromecast.get_chromecasts_as_dict().values():\n print(\"rebooting {}\".format(c.device.friendly_name))\n c.reboot()", "title": "" }, { "docid": "e0b193d17e0eb929ebbc0529a49d4fcc", "score": "0.47193173", "text": "def test_reboot_cloud_db_server_instance(self):\n pass", "title": "" }, { "docid": "efcd6a9bac9e0abb8dac13abcc03ab31", "score": "0.47115645", "text": "def get_unhealthy_instances(self):", "title": "" }, { "docid": "6180c8081ea9c5741f2a2fbe7d625d88", "score": "0.46950614", "text": "def discover_instances(self, context):\n driver = self.driver\n # Currently we won't throw an exception if it isn't a Discover Driver\n if not isinstance(driver, discovery_driver.ComputeDiscoveryDriver):\n drvclass = driver.__class__.__name__\n LOG.warn(_('Driver %s does not implement Discover Driver')\n % drvclass)\n return {'identifier': None, 'servers': [], 'chunking': False}\n # Call the Compute Discovery Driver to get a list of existing VM's\n all_instances = self._discover_instances(context)\n # We need to modify the Instances returned from the Driver slightly\n self._manipulate_driver_instances(all_instances, False)\n # Break up the list of VM's to a set of Chunks to be returned\n identifier = self._persist_instances_chunks(all_instances)\n # Return the first chunk of persisted volumes to the caller\n return self.get_next_instances_chunk(context, identifier)", "title": "" }, { "docid": "f9c08cfebeb206a05099e58cf9a5c9a8", "score": "0.46890274", "text": "def resolve_running_docker_containers():\n container_ids = terminal.docker_ps(ps_filter='name={}'.format(CONDUCTR_NAME_PREFIX))\n container_names = [terminal.docker_inspect(container_id, '{{.Name}}')[1:] for container_id in container_ids]\n return sorted(container_names)", "title": "" }, { "docid": "1022f12fcaacab9019085fa5241d4c6d", "score": "0.46857452", "text": "def cleanup(cls):\n LOGGER.debug(\"cleanup\")\n cmd = 'experiment-cli get --list --state Running,Waiting'\n experiments = call_cli(cmd)[\"items\"]\n\n for exp in experiments:\n exp_id = exp[\"id\"]\n call_cli('experiment-cli stop -i {}'.format(exp_id))", "title": "" }, { "docid": "0a45f47c5240f6e1ae619f3f55e0afb0", "score": "0.46621355", "text": "def cleanup_appliances(self):\n LOG.info(\"Cleaning up appliances\")\n for project_name in self.mapping.get_projects():\n domain_name = self.mapping.get_domain_from_project(project_name)\n glance = openstack_client.get_glance_client(\n project_name, domain_name\n )\n if not glance:\n LOG.error(\"Not authorized to manage images from the \"\n \"project: %s\" % project_name)\n continue\n try:\n img_generator = glance.images.list()\n image_list = list(img_generator)\n except Exception as err:\n LOG.error(\"Not authorized to retrieve the image list from \"\n \"the project: %s\" % project_name)\n LOG.exception(err)\n continue\n\n for image in image_list:\n if IMAGE_LIST_ID_TAG in image:\n if (IMAGE_STATUS_TAG in image and\n image[IMAGE_STATUS_TAG] == 'EOL'):\n try:\n LOG.debug(\"Deleting image '%s'\" % image['id'])\n glance.images.delete(image['id'])\n LOG.debug(\n \"Image '%s' successfully \"\n \"deleted\" % image['id']\n )\n except Exception as err:\n LOG.error(\n \"Cannot delete image '%s'\" % image['id']\n )\n LOG.error(err)", "title": "" }, { "docid": "d3ac7f2317957cec2003d9d0574e60a4", "score": "0.4659243", "text": "def Setup_Instances(self):\n\n self.notes += \"master/slave configed:\"\n print \"Setting up the instances.\"\n\n Dirs = ['data', 'tmp']\n ### We need to move these re's to the object level. \n Re_Instance = re.compile('__INSTANCE__')\n Re_Prefix = re.compile('__PREFIX__')\n Re_Extra = re.compile('__EXTRA__') \n Re_Instance_Dir = re.compile('__INSTANCE_DIR__')\n Re_Instance_Cnf = re.compile('__INSTANCE_CNF__')\n Re_DB_Port = re.compile('__DB_PORT__')\n\n Re_DB_ID = re.compile('__DB_ID__')\n Re_NO_Replicas = re.compile('__NO_REPLICAS__')\n Re_API_Port = re.compile('__API_PORT__')\n Re_API_ID = re.compile('__API_ID__')\n Re_MGM_ID = re.compile('__MGM_ID__')\n \n Start_File_Text = \"\\n cd \" + self.prefix + \"\\n\"\n Stop_File_Text = \"\\n cd \" + self.prefix + \"\\n\"\n Mysql_Alias = \"\"\n \n ### Setup the master database config and directories\n Extra_Master = \"\"\n if self.master > 0: Extra = \"log-bin=__PREFIX__/instance/__INSTANCE__/main_log\\n\"\n Extra_Master += \"port=\" + str(self.master_port) + \"\\n\"\n Extra_Master += \"server-id=1\\n\"\n Instance = \"master1\"\n\n Instance_Dir = self.prefix + \"/instance/\" + Instance\n print \"Setting up:\", Instance_Dir\n self.Command(command='mkdir -p ' + Instance_Dir)\n self.Command(command='mkdir -p ' + Instance_Dir + \"/data\")\n self.Command(command='mkdir -p ' + Instance_Dir + \"/tmp\")\n self.Command(command='chown -R mysql ' + Instance_Dir)\n self.Command(command='chown -R mysql.mysql ' + Instance_Dir)\n self.Command(command='chmod 700 ' + Instance_Dir + \"/data\")\n\n re_list = [[Re_Extra,Extra_Master], [Re_Prefix,self.prefix]]\n re_list.append([Re_Instance_Dir,Instance_Dir])\n re_list.append([Re_Instance, Instance])\n re_list.append([Re_Instance_Cnf, Instance_Dir + \"/my.cnf_\" + Instance])\n\n File = Instance_Dir + \"/my.cnf_\" + Instance\n self.Write_Template(file=File, template='my.cnf_master',re_list=re_list)\n\n File = self.prefix + \"/Start_\" + Instance\n self.Write_Template(file=File, template='start',re_list=re_list)\n\n File = self.prefix + \"/Stop_\" + Instance\n self.Write_Template(file=File, template='stop',re_list=re_list)\n\n self.Init_Instance(cnf=Instance_Dir + \"/my.cnf_\" + Instance)\n \n Start_File_Text += \"echo Starting \" + Instance + \"\\nbash \" + self.prefix + \"/Start_\" + Instance + \"\\n\\n\"\n Stop_File_Text += \"echo Stopping \" + Instance + \"\\nbash \" + self.prefix + \"/Stop_\" + Instance + \"\\n\\n\"\n\n MySQL_Alias = \"\"\"\nexport PATH=\"\"\" + '\"' + self.prefix + \"\"\"/bin:\"\"\" + self.prefix + \"\"\"/scripts:$PATH\"\n \"\"\"\n\n MySQL_Alias += \"alias mysql_\" + Instance + \"='\" + self.prefix + \"/bin/mysql --defaults-file=\" + Instance_Dir + \"/my.cnf_\" + Instance + \" --local-infile=1'\\n\"\n\n Write = open(Instance_Dir + \"/first_run.bash\", \"w\")\n Write.write(\"\\n\" + self.prefix + \"/bin/mysql --defaults-file=\" + Instance_Dir + \"/my.cnf_\" + Instance + \" -e \\\"GRANT REPLICATION SLAVE ON *.* TO 'root'@'127.0.0.1' IDENTIFIED BY ''\\\"\\n\")\n Write.write(self.prefix + \"/bin/mysql --defaults-file=\" + Instance_Dir + \"/my.cnf_\" + Instance + \" -e \\\"create database test_slave\\\"\\n\")\n Write.write(self.prefix + \"/bin/mysql --defaults-file=\" + Instance_Dir + \"/my.cnf_\" + Instance + \" -e \\\"create table table1 (text1 text)\\\" test_slave\\n\")\n Write.write(self.prefix + \"/bin/mysql --defaults-file=\" + Instance_Dir + \"/my.cnf_\" +Instance + \" -e \\\"insert into table1 values ('data')\\\" test_slave\\n\")\n Write.close()\n \n ### For each slave we have, do this.\n for no in range(2,self.slave + 2):\n Instance = \"slave\" + str(no)\n Instance_Dir = self.prefix + \"/instance/\" + Instance\n print \"Setting up:\", Instance_Dir\n self.Command(command='mkdir -p ' + Instance_Dir)\n self.Command(command='mkdir -p ' + Instance_Dir + \"/data\")\n self.Command(command='mkdir -p ' + Instance_Dir + \"/tmp\")\n self.Command(command='chown -R mysql ' + Instance_Dir)\n self.Command(command='chown -R mysql.mysql ' + Instance_Dir)\n self.Command(command='chmod 700 ' + Instance_Dir + \"/data\")\n\n ### This goes into the extra space in my.cnf.\n S_Extra = \"port=\" + str(self.slave_port + no) + \"\\n\"\n S_Extra += \"log-bin=__PREFIX__/instance/__INSTANCE__/main_log\\n\"\n S_Extra += \"server-id=\" + str(no) + \"\\n\"\n S_Extra += \"report-port=\" + str(self.slave_port + no) + \"\\n\"\n\n ### Setup the list of search/replace values and the re's.\n re_list = [[Re_Extra,S_Extra], [Re_Prefix,self.prefix]]\n re_list.append([Re_Instance_Dir,Instance_Dir])\n re_list.append([Re_Instance, Instance])\n re_list.append([Re_Instance_Cnf, Instance_Dir + \"/my.cnf_\" + Instance])\n re_list.append([self.re_m_port, str(self.master_port)])\n\n ## Create the text files. \n File = Instance_Dir + \"/my.cnf_\" + Instance\n self.Write_Template(file=File, template='my.cnf_slave',re_list=re_list)\n \n File = self.prefix + \"/Start_\" + Instance\n self.Write_Template(file=File, template='start',re_list=re_list)\n \n File = self.prefix + \"/Stop_\" + Instance\n self.Write_Template(file=File, template='stop',re_list=re_list)\n\n ### Intitalize the database. \n self.Init_Instance(cnf=Instance_Dir + \"/my.cnf_\" + Instance)\n\n ### Various entries into Start, Stop, and bash_aliases.\n Start_File_Text += \"echo Starting \" + Instance + \"\\nbash \" + self.prefix + \"/Start_\" + Instance + \"\\n\\n\"\n Stop_File_Text += \"echo Stopping \" + Instance + \"\\nbash \" + self.prefix + \"/Stop_\" + Instance + \"\\n\\n\"\n MySQL_Alias += \"alias mysql_\" + Instance + \"='\" + self.prefix + \"/bin/mysql --defaults-file=\" + Instance_Dir + \"/my.cnf_\" + Instance + \"'\\n\"\n\n ## If we have clustering, make a management server\n if self.cluster > 0:\n Instance = \"cluster_management\"\n Instance_Dir = self.prefix + \"/instance/\" + Instance\n print \"Setting up:\", Instance_Dir\n self.Command(command='mkdir -p ' + Instance_Dir)\n ## Technically, we don't need a data directory. \n self.Command(command='mkdir -p ' + Instance_Dir + \"/data\")\n self.Command(command='mkdir -p ' + Instance_Dir + \"/tmp\")\n self.Command(command='chown -R mysql ' + Instance_Dir)\n self.Command(command='chown -R mysql.mysql ' + Instance_Dir)\n self.Command(command='chmod 700 ' + Instance_Dir + \"/data\")\n \n C_Extra = \"port=\" + str(no + self.cluster_api_port) + \"\\n\"\n C_Extra += \"server-id=\" + str(no) + \"\\n\"\n\n MySQL_Alias += \"alias ndb_mgm='\" + self.prefix + \"/bin/ndb_mgm 127.0.0.1 \" + str(self.cluster_management_port) + \"'\\n\"\n\n Re_Cluster_No = re.compile('__CLUSTER_NO__')\n\n re_list = [[Re_Extra,C_Extra], [Re_Prefix,self.prefix]]\n re_list.append([Re_Instance_Dir,Instance_Dir])\n re_list.append([Re_Instance, Instance])\n re_list.append([Re_Instance_Cnf, Instance_Dir + \"/my.cnf_\" + Instance])\n re_list.append([Re_Cluster_No, str(self.cluster + 1)])\n re_list.append([Re_NO_Replicas, '2'])\n# re_list.append([Re_NO_Replicas, str(self.cluster)])\n re_list.append([Re_MGM_ID, str(self.cluster + self.slave + 2)])\n\n re_list.append([self.re_mgm_port, str(self.cluster_management_port)])\n re_list.append([self.re_m_port, str(self.master_port)])\n\n File = Instance_Dir + \"/my.cnf_\" + Instance\n self.Write_Template(file=File, template='my.cnf_cluster_management',re_list=re_list)\n \n File = Instance_Dir + \"/config.ini_cluster_management\"\n self.Write_Template(file=File, template='config.ini_mgm',re_list=re_list)\n\n Start_File_Text += \"echo Starting cluster ndb_mgmd.\\n\"\n Start_File_Text += \"su mysql -c './libexec/ndb_mgmd --config-file=./instance/cluster_management/config.ini_cluster_management'\\n\\n\"\n\n Max_No = self.cluster + self.slave + 2 + 1\n Max_No2 = Max_No + self.cluster\n ### Configure each storage node. Each storage node gets its own\n ### mysqld server as well -- later in the script. \n for no in range(2 + self.slave, self.cluster + self.slave + 2):\n Instance2 = \"cluster_ndb\" + str(no)\n Instance_Dir2 = self.prefix + \"/instance/\" + Instance2\n\n Init_File = Instance_Dir2 + \"/initialized\"\n Start_File_Text += \"\"\"\nif [ ! -e '\"\"\" + Init_File + \"\"\"' ]; then\n echo ''\n echo Initializing cluster \"\"\" + str(Max_No + no)+ \"\"\" \n su mysql -c './libexec/ndbd --initial -c 127.0.0.1:\"\"\" + str(self.cluster_management_port) + \"\"\":id=\"\"\" + str(Max_No + no)+ \"\"\"'\n touch \"\"\" + Init_File + \"\"\"\nelse\n echo Starting cluster db\"\"\" + str(Max_No + no) + \"\"\"\n su mysql -c './libexec/ndbd -c 127.0.0.1:\"\"\" + str(self.cluster_management_port) + \"\"\":id=\"\"\" + str(Max_No + no)+ \"\"\"'\nfi\n \\n\"\"\"\n\n print \"Setting up:\", Instance_Dir2\n self.Command(command='mkdir -p ' + Instance_Dir2)\n ### Technically, we don't need a data directory.\n self.Command(command='mkdir -p ' + Instance_Dir2 + \"/data\")\n self.Command(command='mkdir -p ' + Instance_Dir2 + \"/tmp\")\n self.Command(command='chown -R mysql ' + Instance_Dir2)\n self.Command(command='chown -R mysql.mysql ' + Instance_Dir2)\n self.Command(command='chmod 700 ' + Instance_Dir2 + \"/data\")\n \n re_list = [[Re_Prefix,self.prefix]]\n re_list.append([Re_Instance_Dir,Instance_Dir2])\n re_list.append([Re_Instance, Instance2])\n re_list.append([Re_DB_ID, str(Max_No + no)])\n re_list.append([Re_API_ID, str(Max_No2 + no)])\n\n re_list.append([self.re_mgm_port, str(self.cluster_management_port)])\n re_list.append([self.re_m_port, str(self.master_port)])\n# re_list.append([self.re_s_port, str(self.slave_port)])\n# re_list.append([self.re_api_port, str(self.cluster_api_port)])\n re_list.append([self.re_db_port, str(self.cluster_db_port + no)])\n \n\n F2 = \"/tmp/db_temp.cnf\"\n self.Write_Template(file=F2,template='config.ini_db',re_list=re_list)\n Write = open(File, 'a')\n Read = open(F2, 'r')\n for Line in Read: Write.write(Line)\n Write.close()\n Read.close()\n \n ## For each mysqld server in the cluster, do this. \n for no in range(2 + self.slave, self.cluster + self.slave + 2):\n Instance = \"cluster_mysqld\" + str(no)\n Instance_Dir = self.prefix + \"/instance/\" + Instance\n print \"Setting up:\", Instance_Dir\n self.Command(command='mkdir -p ' + Instance_Dir)\n ### Technically, we don't need a data directory ( I think). \n self.Command(command='mkdir -p ' + Instance_Dir + \"/data\")\n self.Command(command='mkdir -p ' + Instance_Dir + \"/tmp\")\n self.Command(command='chown -R mysql ' + Instance_Dir)\n self.Command(command='chown -R mysql.mysql ' + Instance_Dir)\n self.Command(command='chmod 700 ' + Instance_Dir + \"/data\")\n \n C_Extra = \"port=\" + str(no + self.cluster_api_port) + \"\\n\"\n C_Extra += \"log-bin=__PREFIX__/instance/__INSTANCE__/main_log\\n\"\n C_Extra += \"server-id=\" + str(no) + \"\\n\"\n\n re_list = [[Re_Extra,C_Extra], [Re_Prefix,self.prefix]]\n re_list.append([Re_Instance_Dir,Instance_Dir])\n re_list.append([Re_Instance, Instance])\n re_list.append([Re_Instance_Cnf, Instance_Dir + \"/my.cnf_\" + Instance])\n re_list.append([Re_API_ID, str(2*self.cluster+ self.slave + 2 + 1 + no)])\n\n re_list.append([self.re_mgm_port, str(self.cluster_management_port)])\n re_list.append([self.re_m_port, str(self.master_port)])\n \n File = Instance_Dir + \"/my.cnf_\" + Instance\n self.Write_Template(file=File, template='my.cnf_cluster_db',re_list=re_list)\n \n File = self.prefix + \"/Start_\" + Instance\n self.Write_Template(file=File, template='start',re_list=re_list)\n \n File = self.prefix + \"/Stop_\" + Instance\n self.Write_Template(file=File, template='stop',re_list=re_list)\n\n self.Init_Instance(cnf=Instance_Dir + \"/my.cnf_\" + Instance)\n \n Start_File_Text += \"echo Starting \" + Instance + \"\\nbash \" + self.prefix + \"/Start_\" + Instance + \"\\n\\n\"\n Stop_File_Text += \"echo Stopping \" + Instance + \"\\nbash \" + self.prefix + \"/Stop_\" + Instance + \"\\n\\n\"\n MySQL_Alias += \"alias mysql_\" + Instance + \"='\" + self.prefix + \"/bin/mysql --defaults-file=\" + Instance_Dir + \"/my.cnf_\" + Instance + \"'\\n\"\n\n\n ### After creating all the stop, start, and alias files, we want one\n ### file to glue them altogether. We create one Start and Stop file.\n ### We should add a /etc/rc.d/init.d/mysql file later. \n\n Write = open(self.prefix + \"/Start\", 'w')\n Write.write(\"export mysql_prefix='\" + self.prefix + \"'\\n\")\n Write.write(Start_File_Text)\n Write.close()\n Write = open(self.prefix + \"/Stop\", 'w')\n Write.write(Stop_File_Text)\n Write.write(\"echo Killing ndbd and then ndb_mgmd.\\n\")\n ### This is a cludge. \n Write.write(\"killall ndbd; sleep 2\\n\")\n Write.write(\"killall -q -9 ndbd; sleep 2\\n\")\n Write.write(\"killall ndb_mgmd; sleep 2\\n\")\n Write.write(\"killall -q -0 ndb_mgmd; sleep 2\\n\")\n \n Write.close()\n Write = open(self.prefix + \"/bash_aliases\", 'w')\n Write.write(MySQL_Alias)\n Write.close()\n \n Command = \"chmod 755 \" + self.prefix + \"/*\"\n print self.Command(command=Command)\n \n return(1)\n \n ### Setup a bunch of test scripts.", "title": "" }, { "docid": "3dcaf22848d12efa550f8d54f8707aa5", "score": "0.46531677", "text": "def test_namespace_clean(self):\n\n NUM_INSTANCES = 6\n LONG_WAIT_MINS = 5\n SHORT_WAIT_MINS = 2\n\n inst_uuids = set()\n for i in range(NUM_INSTANCES):\n new_inst = self.test_client.create_instance(\n 'test-cirros-%s' % i, 1, 1024,\n [\n {\n 'network_uuid': self.net['uuid']\n }\n ],\n [\n {\n 'size': 8,\n 'base': 'cirros',\n 'type': 'disk'\n }\n ], None, None,\n namespace=self.namespace)\n inst_uuids.add(new_inst['uuid'])\n\n # Wait for all instances to start\n for uuid in inst_uuids:\n self._await_login_prompt(uuid)\n\n # Run the test\n self.test_client.delete_all_instances(self.namespace)\n self.test_client.delete_all_networks(self.namespace, clean_wait=True)\n\n # Wait for instances to be deleted\n start_time = time.time()\n while inst_uuids:\n for uuid in copy.copy(inst_uuids):\n i = self.system_client.get_instance(uuid)\n if i['state'] in ['deleted']:\n inst_uuids.remove(uuid)\n if time.time() - start_time > LONG_WAIT_MINS * 60:\n break\n time.sleep(5)\n\n self.assertEqual(len(inst_uuids), 0,\n 'Instances not deleted: %s' % inst_uuids)\n\n start_time = time.time()\n while time.time() - start_time < SHORT_WAIT_MINS * 60:\n test_net = self.test_client.get_network(self.net['uuid'])\n if test_net['state'] in ['deleted', 'error']:\n break\n time.sleep(5)\n\n self.assertEqual(test_net['state'], 'deleted',\n 'Network not deleted by delete_all_networks()')", "title": "" }, { "docid": "efe6d30a7c6f3488c518dc8bfc2670c4", "score": "0.46451497", "text": "def inventory_instances(self, context, instances=None):\n driver, context = (self.driver, context.elevated())\n LOG.debug('Invoking Method to Refresh Instance Inventory - Begin')\n try:\n drv_insts = self.driver.list_instances()\n name_map = dict([(inst, '') for inst in drv_insts])\n # Retrieve All of the Instances from the Nova DB for the Given Host\n db_insts = self._query_db_instances(context, instances)\n db_insts = [inst for inst in db_insts if inst['name'] in name_map]\n # If this is a Discovery Driver, then let it Gather Inventory\n if isinstance(driver, discovery_driver.ComputeDiscoveryDriver):\n # We will also allow the Driver to handle Evacuated Instances\n driver.handle_evacuated_instances(context, db_insts, drv_insts)\n # Now we can actually collect Inventory on the Instances\n driver.inventory_instances(context, db_insts)\n except Exception as exc:\n LOG.warn(_('Error refreshing Instance Inventory'))\n LOG.exception(exc)\n LOG.debug('Invoking Method to Refresh Instance Inventory - End')", "title": "" }, { "docid": "881907ce8aa58b70bc2679f6f0ebc13b", "score": "0.4641112", "text": "def remove_all():\n try:\n print('Removing all...')\n subprocess.check_call('docker system prune -a --volumes -f', shell=True, stdout=subprocess.PIPE)\n print('Removing all images...')\n subprocess.check_call('docker image prune -a -f', shell=True, stdout=subprocess.PIPE)\n print('Removing volumes...')\n subprocess.check_call('docker volume prune -f', shell=True, stdout=subprocess.PIPE)\n return True\n except subprocess.CalledProcessError:\n return False", "title": "" }, { "docid": "8be08fa85ddaf13e8876723e76faaaab", "score": "0.46392822", "text": "def _fixTheFuckingCores(self):\r\n\r\n\t\tremovedNameSpaces = []\r\n\t\t## Remove duplicate root core namespaces\r\n\t\tgetAllNameSpaces = cmds.namespaceInfo(listOnlyNamespaces = True)\r\n\t\tfor eachNS in getAllNameSpaces:\r\n\t\t\tif eachNS.endswith('1'):\r\n\t\t\t\tprint 'Removing %s' % eachNS\r\n\t\t\t\tcmds.namespace(removeNamespace = eachNS, mergeNamespaceWithRoot = True)\r\n\t\t\t\tremovedNameSpaces.append(eachNS.replace('1', '').replace('_CORE', ''))\r\n\r\n\t\t## Remove duplicate base cores\r\n\t\tfor each in cmds.ls(type = 'core_archive'):\r\n\t\t\tif '1'in each:\r\n\t\t\t\tprint 'Cleaned rootCore %s from scene...' % each\r\n\t\t\t\tcmds.delete(each)\r\n\r\n\t\t## Now find all geo with the core name in it and proceess it for reconnection\r\n\t\tfor eachCore in removedNameSpaces:\r\n\t\t\t#print eachCore\r\n\t\t\t## Check child _hrc grps for processing\r\n\t\t\tgetAllGeo = [eachGeo for eachGeo in cmds.ls('*%s*' % eachCore) if cmds.nodeType(eachGeo) == 'transform']\r\n\t\t\tfor eachGeo in getAllGeo:\r\n\t\t\t\tself._reconnectDuplicates(eachGeo, '%s_CORE_Geoshader' % eachCore)\r\n\r\n\t\tcoreLib.cleanupDeadCoreArchives()", "title": "" }, { "docid": "3c6fe67e39c5a08f0e8d41614e7392a0", "score": "0.4638903", "text": "def reconcile(self, experiment_name, no_update=False):\n\n logger.info('Reconciling ContainerList with docker ps')\n\n active_containers = subprocess.check_output(['docker', 'ps', '-q']).decode('ascii').split('\\n')[:-1]\n\n for c_id in active_containers:\n if c_id not in self.ids:\n c = ContainerWrapper(id=c_id, updatable=not no_update,\n trial_start=self.trial_start, interval=self.interval)\n logger.info('Adding {} to ContainerList'.format(c_id))\n self.add(c)\n\n for c in self:\n if c.id not in active_containers:\n logger.info('Removing {} from ContainerList'.format(c.id))\n c.save_logs(experiment_name=experiment_name)\n self.containers.remove(c)", "title": "" }, { "docid": "66fb1e37ba8e2a4cce5e51744f7aa918", "score": "0.46335843", "text": "def reset_imgs(cls):\n for i, j in cls.tile_ids.items():\n j.reset()\n for i, j in cls.deco_ids.items():\n j.reset()", "title": "" }, { "docid": "0c29175efafd4496ef07bd9a5edab11f", "score": "0.4633326", "text": "def tearDownClass(cls):\n stop_fm = subprocess.run(f\"docker rm -f {cls.fm_container}\",\n stdout=subprocess.PIPE,\n stderr=subprocess.PIPE,\n shell=True)", "title": "" }, { "docid": "41d845cbd5f7b98319d9259cd58c0184", "score": "0.4626462", "text": "def _instances():\n print \"%-10s %-15s %-10s %-10s %-19s %-12s %-12s %-12s\" \\\n \" %-10s %-10s %-10s %-5s\" % ('instance', 'node', \\\n 'type', 'state', 'launched', 'image', 'kernel', 'ramdisk',\n 'project', 'user', 'zone','index')\n instances = db.instance_get_all(context.get_admin_context())\n for i in instances:\n print \"%-10s %-15s %-10s %-10s %-19s %-12s %-12s %-12s\" \\\n \" %-10s %-10s %-10s %-5d\" % (\n i['hostname'], i['host'], i['instance_type'],\n i['state_description'], i['launched_at'], i['image_id'],\n i['kernel_id'], i['ramdisk_id'], i['project_id'],\n i['user_id'], i['availability_zone'], i['launch_index'])", "title": "" }, { "docid": "024e47f7afe1f175776a0777b8d0e071", "score": "0.46092033", "text": "def instances(self, skip_exist_test=False):\n self.reset_result_type()\n self._instances = True\n self._instances_skip_exist_test = skip_exist_test\n return self", "title": "" }, { "docid": "1ac9648a2961c3a99159d8f03bf67d16", "score": "0.4595288", "text": "def main():\n\n fill_container()", "title": "" }, { "docid": "e851c51cd93e3ff7e2ec3a36420c483c", "score": "0.4595082", "text": "def _clean_childs_cache(self):\n datastream_revisions = DataStreamRevision.objects.filter(dataset=self.dataset.id)\n\n for datastream_revision in datastream_revisions:\n DatastreamLifeCycleManager(self.user, datastream_revision_id=datastream_revision.id).clean_cache()", "title": "" }, { "docid": "5eacc6c5a260d1b1bdf92518c0606d84", "score": "0.45941693", "text": "def start_media_machinery(c, logs=False):\n c.run('docker-compose up --force-recreate -d --remove-orphans')\n if logs:\n log_media_machinery(c, follow=True)", "title": "" }, { "docid": "a583dc015fa9b419e140b18d8512ea98", "score": "0.45925763", "text": "def localnet(docker_services):\n client = docker.from_env()\n container = client.containers.run(\n \"pygate/powergate-cli:v1.2.1\",\n network_mode=\"host\",\n auto_remove=True,\n detach=True,\n tty=True,\n )\n start_time, timeout = time(), 600\n while True:\n if time() - start_time > timeout:\n logger.error(\"Setting up localnet timed out....\")\n pytest.exit(3)\n\n sleep(5)\n\n try:\n result = container.exec_run(\"pow --version\")\n if result.exit_code > 0:\n continue\n except docker.errors.ContainerError:\n continue\n break\n\n yield {\"cli\": container}\n\n logger.debug(\"Tearing down localnet...\")\n container.stop()", "title": "" }, { "docid": "3865f309e207245b70e63ece658414bb", "score": "0.4590925", "text": "def mon_util_cycle(ctx):\n findbe = False\n lc_utils = 0\n be_utils = 0\n date = datetime.now().isoformat()\n bes = []\n newbe = False\n containers = ctx.docker_client.containers.list()\n remove_finished_containers({c.id for c in containers}, ctx.util_cons)\n\n for container in containers:\n cid = container.id\n name = container.name\n pids = list_pids(container)\n key = cid if ctx.args.key_cid else name\n if cid in ctx.util_cons:\n con = ctx.util_cons[cid]\n else:\n con = Container(ctx.cgroup_driver, cid, name, pids,\n ctx.args.verbose)\n ctx.util_cons[cid] = con\n if ctx.args.control:\n if key in ctx.be_set:\n newbe = True\n ctx.cpuq.set_share(con, CpuQuota.CPU_SHARE_BE)\n else:\n ctx.cpuq.set_share(con, CpuQuota.CPU_SHARE_LC)\n con.update_cpu_usage()\n if ctx.args.record:\n with open(Analyzer.UTIL_FILE, 'a') as utilf:\n utilf.write(date + ',' + cid + ',' + name +\n ',' + str(con.utils) + '\\n')\n\n if key in ctx.lc_set:\n lc_utils = lc_utils + con.utils\n\n if key in ctx.be_set:\n findbe = True\n be_utils = be_utils + con.utils\n bes.append(con)\n\n loadavg = os.getloadavg()[0]\n if ctx.args.record:\n with open(Analyzer.UTIL_FILE, 'a') as utilf:\n utilf.write(date + ',,lcs,' + str(lc_utils) + '\\n')\n utilf.write(date + ',,loadavg1m,' + str(loadavg) + '\\n')\n\n if lc_utils > ctx.sysmax_util:\n ctx.sysmax_util = lc_utils\n ctx.analyzer.update_lcutilmax(lc_utils)\n if ctx.args.control:\n ctx.cpuq.update_max_sys_util(lc_utils)\n\n if newbe:\n ctx.cpuq.budgeting(bes, [])\n\n if findbe and ctx.args.control:\n exceed, hold = ctx.cpuq.detect_margin_exceed(lc_utils, be_utils)\n if not ctx.args.enable_hold:\n hold = False\n ctx.controllers[Contention.CPU_CYC].update(bes, [], exceed, hold)", "title": "" }, { "docid": "ebf01f1069c3897895f59118ada5fd68", "score": "0.45800298", "text": "def poll_rebooting_instances(self, timeout):\n self._vmops.poll_rebooting_instances(timeout)", "title": "" }, { "docid": "68dfe9f93733dd260ad32346e1bbbe7f", "score": "0.45710015", "text": "def reload():\n _restart_services()", "title": "" }, { "docid": "68d9eaea9297c638dfe43b78350a76f8", "score": "0.45685324", "text": "def ingestImageStack ( self ):\n\n for i in range(0, len(self.proj.datasetcfg.resolutions ) ):\n \n cmd = 'mysql {} {} < {}{}.res{}.sql'.format ( self.starterString, self.token, self.location, self.token, i )\n print cmd\n os.system( cmd )", "title": "" }, { "docid": "5e4d014d3f2b0fc4c6b830a09b42e61e", "score": "0.45657778", "text": "def start_daemon():\n\t# Autoscale the slave containers based on the requests every 2 minutes.\n\tscale_after(interval=120)", "title": "" }, { "docid": "e95a9340d1aaf36552d93cc6dde09da8", "score": "0.45555118", "text": "def reinitialize_services(self):\n self.start_services()\n self.init_server_connection()", "title": "" }, { "docid": "bc820b140427414ef1be8af3068f0314", "score": "0.4551566", "text": "def fill_container():\n\n pass", "title": "" }, { "docid": "315f459b6d50de962ff1cad7e6605805", "score": "0.45352766", "text": "def containers(self, containers):\n\n self._containers = containers", "title": "" }, { "docid": "9383220d3f44326f2ddc45b58929f546", "score": "0.45349756", "text": "def iterate_containers(self):\n\n for container_name in os.listdir(self.base_path):\n full_path = os.path.join(self.base_path, container_name)\n if not os.path.isdir(full_path):\n continue\n yield self._make_container(container_name)", "title": "" }, { "docid": "4e51b3b2d881c7000c0bbda7ba16ef90", "score": "0.45328972", "text": "def tear_down_all(self):\n pass", "title": "" }, { "docid": "e7a5186d06b682f5106c900b52e7db18", "score": "0.45261484", "text": "def list_containers():\n response = requests.get(f'{Global.BASE_URL}containers/json')\n return response", "title": "" }, { "docid": "0177fab0b4aceb66318f10819ce64fa8", "score": "0.4522945", "text": "def poll_rescued_instances(self, timeout):\n self._vmops.poll_rescued_instances(timeout)", "title": "" }, { "docid": "04ab7fa765d698685e9abeb9dcc43e3a", "score": "0.45123222", "text": "def cleanupall(cls):\n cl = list(cls.Register.values())\n for c in cl:\n c.cleanup()", "title": "" }, { "docid": "70c18eb9e7b23d82f4dcbd44d1c511bc", "score": "0.45106673", "text": "def nuke_confs():\n for service, remote_path, extension in server_config.SERVER_SERVICES:\n with settings(warn_only=True):\n installed_path = _get_installed_conf_path(\n service, remote_path, extension\n )\n\n sudo(\"rm -f %s\" % installed_path)\n\n if service == \"nginx\":\n sudo(\"service nginx reload\")\n elif service == \"uwsgi\":\n service_name = _get_installed_service_name(service)\n sudo(\"service %s stop\" % service_name)\n sudo(\"initctl reload-configuration\")\n elif service == \"app\":\n sudo(\"rm %s\" % server_config.UWSGI_SOCKET_PATH)", "title": "" }, { "docid": "a99c8ba4cc2c9e346430fff70b32bc98", "score": "0.45095667", "text": "def flatten(self):\n logger.debug('\\tFlattening child images')\n for snap in self.image.list_snaps():\n snap = rbd.Image(self.ioctx, self.branch, snap['name'])\n for child_pool, child_image in snap.list_children():\n logger.info('\\tFlattening {}/{}'.format(\n child_pool, child_image))\n try:\n pool = self.cluster.open_ioctx(child_pool)\n image = rbd.Image(pool, child_image)\n image.flatten()\n except:\n logger.exception(\"Error trying to flatten {}/{}\".format(\n child_pool, child_image))\n finally:\n image.close()\n pool.close()\n time.sleep(10) # give Ceph room catch up with I/O", "title": "" }, { "docid": "aada284d58e7c73a649ed12e2e03b2f4", "score": "0.4497815", "text": "def test_spawn_fail_cleanup_2(self):\n vdi_recs_start = self._list_vdis()\n stubs.stubout_create_vm(self.stubs)\n self.assertRaises(xenapi_fake.Failure,\n self._test_spawn, 1, 2, 3)\n # No additional VDI should be found.\n vdi_recs_end = self._list_vdis()\n self._check_vdis(vdi_recs_start, vdi_recs_end)", "title": "" }, { "docid": "40ba70e956ee667be9eb578066f24533", "score": "0.44962916", "text": "def cleanup(self):\n super(Test200SmartFullIdevice058, self).cleanup()", "title": "" }, { "docid": "ccf44988a1b8ca9ec95815c525de3a54", "score": "0.44886544", "text": "def clean(self):\n logger.debug('Deleting all runtimes')\n\n runtimes = self.list_runtimes()\n\n for runtime in runtimes:\n runtime_name, runtime_memory = runtime\n self.delete_runtime(runtime_name, runtime_memory)", "title": "" }, { "docid": "a5c5b87aae8ab8d6e2d5b06975a6ce86", "score": "0.44859275", "text": "def refresh_drivers():\n global _REGISTERED_DRIVERS\n _REGISTERED_DRIVERS = {}\n global _DISCOVERED\n _DISCOVERED = True\n _discover_local_drivers()\n _discover_entry_point_chemistry_drivers()\n if logger.isEnabledFor(logging.DEBUG):\n logger.debug(\"Found: drivers {} \".format(local_drivers()))", "title": "" }, { "docid": "10474aba506c100ddeb07bf77231fb50", "score": "0.44854534", "text": "def reset(self):\n self.objects = []\n self.labels = []\n #self._objects = []\n #self._labels = []\n self._stack_info = []\n\n # This stream belongs to a program, which may have any code previously\n # added to this stream cached. So, invalidate the program's cache.\n self.prgm._cached = False\n\n if self._debug:\n import inspect\n self._stack_info.append(_extract_stack_info(inspect.stack()))\n\n return", "title": "" }, { "docid": "8af786ed938229a425c91e5aae0d6bb2", "score": "0.44846046", "text": "def restartDaemons(self):\n for d in self.getDaemonNames():\n self.About.doDaemonAction(d, 'restart')", "title": "" }, { "docid": "978a9d1f8f5024d654384a65e210543a", "score": "0.44838193", "text": "def _remove_all_local_instance(self, context):\n local_instances = self._get_all_local_instances(context)\n for local_instance in local_instances:\n self._remove_local_instance(context, local_instance)", "title": "" }, { "docid": "6d3bddf8d350e2300eede145aecbf05b", "score": "0.44821456", "text": "def start_containers(self, containers):\n self.containers = DockerContainers(\n os.path.dirname(__file__),\n containers\n )\n self.containers.build()\n self.containers.start()", "title": "" }, { "docid": "6c9ed27235afc0f0adf56e473e53e2b5", "score": "0.44783548", "text": "def _run_discovery(self):\n discovered_services = {}\n processes = self._get_processes_map()\n\n netstat_info = self.get_netstat()\n\n # Process PID present in netstat output before other PID, because\n # two process may listen on same port (e.g. multiple Apache process)\n # but netstat only see one of them.\n for pid in itertools.chain(netstat_info.keys(), processes.keys()):\n process = processes.get(pid)\n if process is None:\n continue\n\n service_info = get_service_info(process['cmdline'])\n if service_info is not None:\n service_info = service_info.copy()\n service_info['exe_path'] = process.get('exe') or ''\n instance = process['instance']\n service_name = service_info['service']\n if (service_name, instance) in discovered_services:\n # Service already found\n continue\n if instance in self.docker_containers_ignored.values():\n continue\n logging.debug(\n 'Discovered service %s on %s',\n service_name, instance\n )\n\n service_info['active'] = True\n\n if instance is None:\n ports = netstat_info.get(pid, {})\n else:\n ports = self.get_docker_ports(instance)\n docker_inspect = self.docker_containers[instance]\n labels = docker_inspect.get('Config', {}).get('Labels', {})\n if labels is None:\n labels = {}\n service_info['stack'] = labels.get('bleemeo.stack', None)\n service_info['container_id'] = docker_inspect.get('Id')\n\n self._discovery_fill_address_and_ports(\n service_info,\n instance,\n ports,\n )\n\n # some service may need additionnal information, like password\n if service_name == 'mysql':\n self._discover_mysql(instance, service_info)\n if service_name == 'postgresql':\n self._discover_pgsql(instance, service_info)\n\n if service_info.get('interpreter') == 'java':\n self._guess_jmx_config(service_name, service_info, process)\n\n discovered_services[(service_name, instance)] = service_info\n\n logging.debug(\n 'Discovery found %s running services', len(discovered_services)\n )\n return discovered_services", "title": "" }, { "docid": "601a9d671f18c72360a04c762013df1c", "score": "0.44777825", "text": "def cleanup():\n for factory in factories.itervalues():\n factory.cleanup()", "title": "" }, { "docid": "da66821fd3c90525c685910231f0ebeb", "score": "0.44722384", "text": "def container_clean(name=\"3bot\", configdir=None):\n _configure(configdir=configdir)\n docker = container_get(name=name)\n docker.clean()", "title": "" }, { "docid": "42df0028758ad3cf727e2af869d582d9", "score": "0.44717044", "text": "def terminate(self):\n deferreds = []\n\n for container in self._containers.copy():\n deferreds.append(container.remote_destroy())\n\n if deferreds:\n deferredList = DeferredList(deferreds)\n deferredList.addCallback(self._cleanPackageDir)\n return deferredList\n else:\n self._cleanPackageDir()", "title": "" } ]
c41d47381d7582440c496fa8c8c5fcdb
applies the given netmask to the networks, True if the results are the same
[ { "docid": "d17a9c442dee1d7a39c64f9b1bc71315", "score": "0.6128977", "text": "def in_subnet(network1, network2, netmask):\n subnet1 = apply_netmask(network1, netmask)\n subnet2 = apply_netmask(network2, netmask)\n return subnet1 == subnet2", "title": "" } ]
[ { "docid": "61e94f906aa1799ab53499bafb573297", "score": "0.61975306", "text": "def net_contains(net, cand):\n\treturn cand.network_address in net and cand.broadcast_address in net", "title": "" }, { "docid": "403ee80e33bce6a7f6c613285760624c", "score": "0.6157785", "text": "def apply_netmask(network, netmask):\n nw = network.split('.')\n nm = netmask.split('.')\n netmasked = []\n for part in range(len(nw)):\n netmasked.append(str(int(nw[part]) & int(nm[part])))\n return '.'.join(netmasked)", "title": "" }, { "docid": "4905ab627844f2a6dfb953cb0ce6043d", "score": "0.5871916", "text": "def update_antispoofing(self, networks=None):\n if not networks and len(self.data.get('antispoofing_ne_ref')):\n self.data.update(antispoofing_ne_ref=[])\n return True\n _networks = element_resolver(networks)\n if set(_networks) ^ set(self.data.get('antispoofing_ne_ref')):\n self.data.update(antispoofing_ne_ref=_networks)\n return True\n return False", "title": "" }, { "docid": "a28fee16391748ca483bb182651f1fad", "score": "0.5825316", "text": "def _apply_mask(self):\n with torch.no_grad():\n for name, param in self.net.named_parameters():\n # Check if is pruneable layer\n if name in self._mask.keys():\n # Then apply mask\n param.data = param.data*self._mask[name]", "title": "" }, { "docid": "ac3e767ef578483d51918a1f92b9c8be", "score": "0.5817978", "text": "def is_ips_in_same_subnet(ips, netmask):\n subnets = [IP(\"{0}/{1}\".format(ip, netmask), make_net=True) for ip in ips]\n return len(set(subnets)) == 1", "title": "" }, { "docid": "ad4ea629905c9bcfcf7f3a8cc87b0417", "score": "0.5715061", "text": "def apply_mask(self, mask, op='and', *, inplace=False):\n if inplace:\n ret = self\n else:\n ret = self.copy()\n\n if not (issubclass(type(mask), RoadImage)) or not (mask.binary) or not (self.binary):\n raise TypeError('RoadImage.apply_mask: Image and mask must be binary RoadImages.')\n\n flatmask = mask.flatten()\n if flatmask.shape[3] != self.shape[3]:\n if flatmask.shape[3] != 1:\n raise ValueError('RoadImage.apply_mask: mask must have 1 channel or %d channels.' % self.shape[3])\n flatmask = np.repeat(flatmask, self.shape[3], axis=3)\n\n # zeroes = np.zeros(shape=(1,1,1,1))\n zeroes = np.zeros_like(ret[0])\n\n # The simplest implementation is to use make_collection, and combine_masks...\n # We avoid the warnings and copy the data.\n with warnings.catch_warnings():\n warnings.simplefilter(\"ignore\")\n for ix, img in enumerate(self):\n ret[ix, (flatmask[0] == zeroes)] = 0\n\n return ret", "title": "" }, { "docid": "803a0b70bc3fb288a4601759b941d1a6", "score": "0.56919676", "text": "def mask_exists(self, new_mask):\n for m in self.mask:\n if np.array_equal(m, new_mask):\n return True\n return False", "title": "" }, { "docid": "1f18e8f43f5f41809e96d6c81d4250a5", "score": "0.56658494", "text": "def update_masks(self, masks: Dict[str, Dict[str, torch.Tensor]]):\n for module_name, target_masks in masks.items():\n if module_name not in self._module_wrappers:\n _logger.warning(f'Wrapper {module_name} is not register in this compressor, the masks will be ignored.')\n wrapper = self._module_wrappers[module_name]\n for target_name, target_mask in target_masks.items():\n target_space = wrapper.pruning_target_spaces.get(target_name, None)\n if target_space is None:\n _logger.warning(f'Target space `{target_name}` is not in wrapper `{wrapper.name}`, the mask of it will be ignored.')\n continue\n if target_mask is None:\n target_space.mask = None\n else:\n assert target_name in wrapper.pruning_target_spaces, \\\n f'{module_name}.{target_name} is not a pruning target, can not update mask for it.'\n # NOTE: try to auto get the device of the current module\n try:\n device = next(wrapper.parameters()).device\n except StopIteration:\n try:\n device = next(wrapper.buffers()).device\n except StopIteration:\n if target_space.mask is not None:\n device = target_space.mask.device\n else:\n # NOTE: this will have risk in model parallel\n device = next(self.bound_model.parameters()).device\n target_space.mask = target_mask.to(device)", "title": "" }, { "docid": "7e1281704968e72957c5f714cbd2f8d9", "score": "0.56101185", "text": "def _test_net(self, net, ops_list):\n ops_output = self.assertNetContainOps(net, ops_list)\n workspace.RunNetOnce(net)\n return ops_output", "title": "" }, { "docid": "de75e3cbac7e66d134031e8e5a13e2e5", "score": "0.5582417", "text": "def replace_layer_with_mask_conv(self):\r\n\r\n if self.settings.net_type in [\"preresnet\", \"resnet\"]:\r\n self.replace_layer_with_mask_conv_resnet()", "title": "" }, { "docid": "8a00e2862d96e85281fe75cde4e537c4", "score": "0.5541342", "text": "def __find_network_for_addr(addr, networks):\n logger.debug('find_network_for_addr: addr [%s]' % addr)\n net_netmask = []\n for net, net_obj in networks.items():\n subnet = str(net_obj.ip_addr) + '/' + str(net_obj.mask)\n logger.debug(\n \"Net: %s, subnet: %s\" % (net, subnet))\n if ipaddress.ip_address(unicode(addr)) in ipaddress.ip_network(unicode(subnet)):\n # return net\n logger.debug('ip %s is in subnet %s' % (addr, subnet))\n net_netmask.append(net)\n\n if len(net_netmask):\n logger.debug('network for addr %s is %s' % (addr, net_netmask))\n return net_netmask\n else:\n return False", "title": "" }, { "docid": "2606a2b803c5262896307187a2551b16", "score": "0.5533715", "text": "def __call__(self, boxes_1, boxes_2, boxes_1_masks=None, boxes_2_masks=None):\n boxes_1_rank = len(boxes_1.shape)\n boxes_2_rank = len(boxes_2.shape)\n if boxes_1_rank < 2 or boxes_1_rank > 3:\n raise ValueError(\n '`groudtruth_boxes` must be rank 2 or 3, got {}'.format(boxes_1_rank))\n if boxes_2_rank < 2 or boxes_2_rank > 3:\n raise ValueError(\n '`anchors` must be rank 2 or 3, got {}'.format(boxes_2_rank))\n if boxes_1_rank < boxes_2_rank:\n raise ValueError('`groundtruth_boxes` is unbatched while `anchors` is '\n 'batched is not a valid use case, got groundtruth_box '\n 'rank {}, and anchors rank {}'.format(\n boxes_1_rank, boxes_2_rank))\n\n result = iou(boxes_1, boxes_2)\n if boxes_1_masks is None and boxes_2_masks is None:\n return result\n background_mask = None\n mask_val_t = tf.cast(self.mask_val, result.dtype) * tf.ones_like(result)\n perm = [1, 0] if boxes_2_rank == 2 else [0, 2, 1]\n if boxes_1_masks is not None and boxes_2_masks is not None:\n background_mask = tf.logical_or(boxes_1_masks,\n tf.transpose(boxes_2_masks, perm))\n elif boxes_1_masks is not None:\n background_mask = boxes_1_masks\n else:\n background_mask = tf.logical_or(\n tf.zeros(tf.shape(boxes_2)[:-1], dtype=tf.bool),\n tf.transpose(boxes_2_masks, perm))\n return tf.where(background_mask, mask_val_t, result)", "title": "" }, { "docid": "2bcf32582531c81fa4a047378a1235fa", "score": "0.5496462", "text": "def checkMaskOutput(mask):\n return 255 in mask", "title": "" }, { "docid": "ed7c3464155dd26be12ec31b195f34fa", "score": "0.5470181", "text": "def find_network(address, mask):\n network = []\n for i in range(8):\n network.append(int(address[i], 16) & mask[i])\n\n return network", "title": "" }, { "docid": "6d03cfe89c9f86e02d164a6c56cbc4df", "score": "0.53981686", "text": "def is_same_network(cls, ipaddress, ipnetwork):\n return IPAddress(ipaddress) in IPNetwork(ipnetwork)", "title": "" }, { "docid": "328d243a1771db6b8766ac99db47ddd4", "score": "0.5343032", "text": "def ip_in_netmask(ip_dotted, netmask):\n\n start, end = ctools.getNetmask(*netmask)\n if start > end:\n start, end = end, start\n ip = struct.unpack(\">I\", socket.inet_aton(ip_dotted))[0]\n if start <= ip <= end:\n return True\n return False", "title": "" }, { "docid": "005a23821abb172bc25c133dd024cb3a", "score": "0.5336305", "text": "def test_map_networks(self):\n ova_networks = ['network1', 'network2']\n user_networks = ['neta', 'netb']\n vcenter_networks = {'neta': vmware.vim.Network(moId='asdf'), 'netb': vmware.vim.Network(moId='asdf')}\n\n output = vmware.map_networks(ova_networks, user_networks, vcenter_networks)\n\n self.assertTrue(isinstance(output, list))", "title": "" }, { "docid": "cffd43b722ba17d9334190fe2ab978f1", "score": "0.5335234", "text": "def matchmask(self, other):\n if len(other.data) != len(self.data):\n return False\n for w1, w2 in zip(self.data, other.data):\n if isinstance(w1, Gap) and isinstance(w2, Gap):\n #good, match\n pass\n elif w1 != w2:\n return False\n return True", "title": "" }, { "docid": "10cd24bc06bf23c4fc2f8fbd31ce4fa8", "score": "0.53164744", "text": "def _logical_unite_masks(self, maskA, maskB): \n if maskA == None:\n return maskB\n elif maskB == None:\n return maskA\n print 'Joining masks %s and %s' % (maskA.name, maskB.name)\n logicOR = vtk.vtkImageLogic()\n logicOR.SetOperationToOr()\n logicOR.SetInput1(maskA.data)\n logicOR.SetInput2(maskB.data)\n logicOR.Update()\n\n result = self._threshold_image(logicOR.GetOutput(), 1, 255)\n return Mask('Merged','',result)", "title": "" }, { "docid": "ae39b769818d7812f4f2c4789a8d5dbc", "score": "0.5313802", "text": "def _check_masks(word_mask, c_mask):\n check_sum = word_mask + c_mask\n assert(np.all(0 <= check_sum) and np.all(check_sum <= 1))", "title": "" }, { "docid": "42e490d3b9f8c6e306744123047f3e9f", "score": "0.5309034", "text": "def _generate_masks(self, train_num, val_num, test_num):\n train_mask = torch.zeros(self.num_nodes, device=self._device)\n train_mask[:train_num] = 1\n\n val_mask = torch.zeros(self.num_nodes, device=self._device)\n val_mask[train_num:train_num + val_num] = 1\n\n # Mask all non-test docs\n test_mask = torch.zeros(self.num_nodes, device=self._device)\n test_mask[val_num + train_num:val_num + train_num + test_num] = 1\n\n return train_mask.bool(), val_mask.bool(), test_mask.bool()", "title": "" }, { "docid": "eff3566de4f5177838c10d3a3fa835d0", "score": "0.52867216", "text": "def propagate_masks_and_winnow(self):\n\n # Propagate the masks\n self._propagate_masks()\n\n modified_op_list = self._mask_propagator.get_ops_with_non_default_ip_op_masks()\n for name in modified_op_list:\n logger.info(\"Modified Op: %s\", name)\n\n modified_modules_dict = self._module_reducer.reduce_modules(modified_op_list)\n\n if modified_modules_dict:\n ordered_module_list = self._create_modified_modules_list(modified_modules_dict)\n else:\n ordered_module_list = None\n logger.info(\"No modules were winnowed. Original model is returned.\")\n\n return self._model, ordered_module_list", "title": "" }, { "docid": "bb7c8348204338a1c1e20355f0c17caa", "score": "0.5279693", "text": "def test_network_isomorphs(network_1, network_2) -> bool:\n if not network_1 or not network_2:\n return False\n else:\n return nx.is_isomorphic(network_1, network_2)", "title": "" }, { "docid": "533f06f4bc70e4c2fca996c014b02e18", "score": "0.52738243", "text": "def node_has_mask(node):\n return \"maskChannelMask\" in node.knobs()", "title": "" }, { "docid": "600e8d73a344aa32cc7b1799f9d222e9", "score": "0.52688515", "text": "def netmask(self, netmask) :\n\t\ttry :\n\t\t\tself._netmask = netmask\n\t\texcept Exception as e:\n\t\t\traise e", "title": "" }, { "docid": "392d0d519b7e39b01f57f9276529b161", "score": "0.52592343", "text": "def mask_recon_loss(pred_mask_logits, instances, recon_net, targets, box_ths=0.8, mask_ths=0.9, iter=0):\r\n cls_agnostic_mask = pred_mask_logits.size(1) == 1\r\n # total_num_masks = pred_mask_logits.size(0)\r\n mask_side_len = pred_mask_logits.size(2)\r\n assert pred_mask_logits.size(2) == pred_mask_logits.size(3), \"Mask prediction must be square!\"\r\n targets_classes_lst = []\r\n pred_classes_lst = []\r\n index = [0]\r\n\r\n input_gt = []\r\n input_pred = []\r\n for instances_per_image in instances:\r\n arange = torch.arange(len(instances_per_image))\r\n iou_box = pairwise_iou(instances_per_image.proposal_boxes, instances_per_image.gt_boxes)[arange, arange]\r\n\r\n gt_amodal_per_image = instances_per_image.gt_masks.crop_and_resize(\r\n instances_per_image.proposal_boxes.tensor, mask_side_len\r\n ).to(device=pred_mask_logits.device)\r\n\r\n pred_masks_per_image = pred_mask_logits[index[-1]: index[-1] + len(instances_per_image)] > 0\r\n pred_masks_per_image = pred_masks_per_image[arange, instances_per_image.gt_classes]\r\n\r\n iou_mask = torch.sum((gt_amodal_per_image * pred_masks_per_image) > 0, dim=(1, 2)).float() / \\\r\n torch.sum((gt_amodal_per_image + pred_masks_per_image) > 0, dim=(1, 2)).float()\r\n filter_inds = (iou_box > box_ths) * (iou_mask > mask_ths).nonzero()\r\n\r\n gt_classes = instances_per_image.gt_classes\r\n if cls_agnostic_mask:\r\n input_pred.append((pred_mask_logits[index[-1]: index[-1] + len(instances_per_image)][filter_inds[:, 0]][:, 0] > 0).detach())\r\n else:\r\n indices = torch.arange(gt_classes.size(0))\r\n pred_mask_logits_per_instance = pred_mask_logits[index[-1]: index[-1] + len(instances_per_image)]\r\n pred_mask_logits_per_instance = pred_mask_logits_per_instance[indices, gt_classes]\r\n input_pred.append((pred_mask_logits_per_instance[filter_inds[:, 0]] > 0).detach())\r\n pred_classes_lst.append(gt_classes[filter_inds[:, 0]])\r\n\r\n index.append(len(instances_per_image))\r\n\r\n for target_per_image in targets:\r\n try:\r\n input_gt.append(target_per_image.gt_masks.crop_and_resize(\r\n target_per_image.gt_boxes.tensor, mask_side_len).to(device=pred_mask_logits.device))\r\n except:\r\n import ipdb\r\n ipdb.set_trace()\r\n\r\n gt_classes = target_per_image.gt_classes\r\n targets_classes_lst.append(gt_classes)\r\n\r\n inputs = cat(input_gt + input_pred, dim=0).unsqueeze(1).float().detach()\r\n\r\n recon_masks_logits, _ = recon_net(inputs)\r\n mask_recon_loss = F.mse_loss(recon_masks_logits, inputs.to(dtype=torch.float32))\r\n # if iter % 2000 == 1:\r\n # try:\r\n # vis.images(cat(input_gt).unsqueeze(1), win_name=\"gt_mask_input_{}_{}\".format(recon_net.name, iter))\r\n # vis.images((recon_masks_logits[:cat(input_gt).size(0)]).float(),\r\n # win_name=\"gt_mask_output_{}_{}\".format(recon_net.name, iter))\r\n # except:\r\n # pass\r\n # # vis.images(cat(input_gt).unsqueeze(1), win_name=\"gt_mask_input_to_recon_{}\".format(iter))\r\n # # vis.images((recon_masks_logits[:cat(input_gt).size(0)]).float(), win_name=\"gt_mask_output_from_recon_{}\".format(iter))\r\n # if cat(input_gt).size(0) != recon_masks_logits.size(0):\r\n # try:\r\n # vis.images(cat(input_pred).unsqueeze(1), win_name=\"pred_mask_input_{}_{}\".format(recon_net.name, iter))\r\n # vis.images((recon_masks_logits[cat(input_gt).size(0):]).float(),\r\n # win_name=\"pred_mask_output_{}_{}\".format(recon_net.name, iter))\r\n # except:\r\n # pass\r\n\r\n return mask_recon_loss", "title": "" }, { "docid": "da5672770f463fd0e049874ed7610973", "score": "0.5250366", "text": "def is_valid_mask(self, mask):\n if not self.is_valid_ip_address(mask):\n return False\n\n # Make sure the chunks match the possible values\n octets = [int(octet) for octet in mask.split(\".\")]\n\n # The last octet cannot be 255\n if octets[-1] == 255:\n return False\n for octet in octets:\n if octet not in [0, 128, 192, 224, 240, 248, 252, 254, 255]:\n return False\n\n return True", "title": "" }, { "docid": "823492d247d380010cb6afb9a8a8c8e7", "score": "0.52466613", "text": "def apply_mask(data=None, weight_map=None, mask=None, method='dot_product', save_output=False, output_dir='.'):\n\n if mask is not None:\n if type(mask) is not nib.nifti1.Nifti1Image:\n raise ValueError(\"Mask is not a nibabel instance\")\n else:\n mask = nib.load(os.path.join(get_resource_path(),'MNI152_T1_2mm_brain_mask.nii.gz'))\n\n if type(data) is not nib.nifti1.Nifti1Image:\n if type(data) is str:\n if os.path.isfile(data):\n data = nib.load(data)\n elif type(data) is list:\n data = nib.funcs.concat_images(data)\n else:\n raise ValueError(\"Data is not a nibabel instance, list of files, or a valid file name.\")\n\n nifti_masker = NiftiMasker(mask_img=mask)\n data_masked = nifti_masker.fit_transform(data)\n if len(data_masked.shape) > 2:\n data_masked = data_masked.squeeze()\n\n if type(weight_map) is not nib.nifti1.Nifti1Image:\n if type(weight_map) is str:\n if os.path.isfile(weight_map):\n data = nib.load(weight_map)\n elif type(weight_map) is list:\n weight_map = nib.funcs.concat_images(weight_map)\n else:\n raise ValueError(\"Weight_map is not a nibabel instance, list of files, or a valid file name.\")\n\n weight_map_masked = nifti_masker.fit_transform(weight_map)\n if len(weight_map_masked.shape) > 2:\n weight_map_masked = weight_map_masked.squeeze()\n\n # Calculate pattern expression\n pexp = pd.DataFrame()\n for w in range(0, weight_map_masked.shape[0]):\n if method == 'dot_product':\n pexp = pexp.append(pd.Series(np.dot(data_masked,np.transpose(weight_map_masked[w,:]))), ignore_index=True)\n elif method == 'correlation':\n pexp = pexp.append(pd.Series(pearson(data_masked,weight_map_masked[w,:])), ignore_index=True)\n pexp = pexp.T\n\n if save_output:\n pexp.to_csv(os.path.join(output_dir,\"Pattern_Expression_\" + method + \".csv\"))\n # np.savetxt(os.path.join(output_dir,\"Pattern_Expression_\" + method + \".csv\"), pexp, delimiter=\",\")\n\n return pexp", "title": "" }, { "docid": "58a02bb995b645cfe5d54cf304a04f89", "score": "0.5234759", "text": "def update_mask(model, mask, prune_rate, prune_strategy):\n\n if prune_strategy[\"name\"] == \"local\":\n apply_mask(model, mask)\n for name, param in model.named_parameters():\n if \"weight\" not in name:\n continue\n # get magnitudes of weight matrices. ignores bias.\n weight_magnitudes = param.flatten().cpu().detach().numpy().astype(np.float64)\n weight_magnitudes = np.random.normal(scale=1e-45, size=weight_magnitudes.shape)\n weight_magnitudes = np.abs(weight_magnitudes)\n\n # gets the kth weight\n num_weights = len(weight_magnitudes)\n k = int(num_weights*prune_rate)\n kth_weight = np.partition(weight_magnitudes, k)[k]\n\n # updating mask\n mask[name] = (param.abs() > kth_weight).cpu()\n num_equal = (param.abs() == kth_weight).sum()\n if num_equal > 100:\n raise Exception(f\"{num_equal} parameters have the same magnitude {kth_weight} - use iter prune strategy\")\n elif num_equal > 1:\n print(f\"warning: {num_equal} parameters have the same magnitude {kth_weight}\")\n elif prune_strategy[\"name\"] == \"global\":\n # get magnitudes of weight matrices. ignores bias.\n apply_mask(model, mask)\n layer_weights = [(name, param) for name, param in model.named_parameters() if \"weight\" in name]\n weight_magnitudes = [param.flatten().cpu().detach().numpy().astype(np.float64) for name, param in layer_weights]\n weight_magnitudes = np.concatenate(weight_magnitudes)\n weight_magnitudes = np.abs(weight_magnitudes + np.random.normal(scale=1e-39, size=weight_magnitudes.shape))\n\n # gets the kth weight\n num_weights = len(weight_magnitudes)\n k = int(num_weights*prune_rate)\n kth_weight = np.partition(weight_magnitudes, k)[k]\n\n # updating mask\n num_equal = 0\n for name, parameter in model.named_parameters():\n if \"weight\" in name:\n mask[name] = (parameter.abs() > kth_weight).cpu()\n num_equal += (parameter.abs() == kth_weight).sum()\n if num_equal > 100:\n raise Exception(f\"{num_equal} parameters have the same magnitude {kth_weight} - use iter prune strategy\")\n elif num_equal > 1:\n print(f\"warning: {num_equal} parameters have the same magnitude {kth_weight}\")\n elif prune_strategy[\"name\"] == \"early_bird\":\n # get magnitudes of weight matrices. ignores bias.\n apply_mask(model, mask)\n\n bn_layers = []\n for bn_layer_name, w in model.named_children():\n if isinstance(w, torch.nn.BatchNorm2d):\n bn_layers.append((f\"{bn_layer_name}.weight\", w.weight))\n\n weight_magnitudes = [param.flatten().cpu().detach().numpy().astype(np.float64) for name, param in bn_layers]\n weight_magnitudes = np.concatenate(weight_magnitudes)\n weight_magnitudes = np.abs(weight_magnitudes + np.random.normal(scale=1e-39, size=weight_magnitudes.shape))\n\n # gets the kth weight\n num_weights = len(weight_magnitudes)\n k = int(num_weights*prune_rate)\n kth_weight = np.partition(weight_magnitudes, k)[k]\n\n # updating mask\n num_equal = 0\n for bn_layer_name, w in model.named_children():\n if isinstance(w, torch.nn.BatchNorm2d):\n mask[f\"{bn_layer_name}.weight\"] = (w.weight.abs() > kth_weight).cpu()\n num_equal += (w.weight.abs() == kth_weight).sum()\n\n if num_equal > 100:\n raise Exception(f\"{num_equal} parameters have the same magnitude {kth_weight} - use iter prune strategy\")\n elif num_equal > 1:\n print(f\"warning: {num_equal} parameters have the same magnitude {kth_weight}\")\n else:\n raise Exception(f\"prune strategy {prune_strategy} not found\")", "title": "" }, { "docid": "d7fe167ca1b426750161de2100a116d6", "score": "0.5224177", "text": "def mask_and(ev, collection, masks):\n # Start with array of True\n decision = ( ak.ones_like(ev.MET.pt)==1 )\n\n coll = getattr(ev, collection)\n\n # Flip to true if any is passed\n for t in masks:\n try:\n decision = decision & getattr(coll, t)\n except KeyError:\n continue\n return decision", "title": "" }, { "docid": "6f3ca301a0afd9856dae7083eb7b7de3", "score": "0.51992476", "text": "def apply_masks(self, *masks):\n # Ensure masks are recorded in a format to enable the modifications below.\n self.homogenise_masks()\n for dataset in self:\n dataset.apply_masks(*masks)\n return self", "title": "" }, { "docid": "6f724c77f764b5c66e131da1266f7cc7", "score": "0.5193068", "text": "def replace_layer_with_mask_conv_resnet(self):\r\n\r\n for module in self.pruned_model.modules():\r\n if isinstance(module, (PreBasicBlock, BasicBlock, Bottleneck)):\r\n # replace conv2\r\n temp_conv = MaskConv2d(\r\n in_channels=module.conv2.in_channels,\r\n out_channels=module.conv2.out_channels,\r\n kernel_size=module.conv2.kernel_size,\r\n stride=module.conv2.stride,\r\n padding=module.conv2.padding,\r\n bias=(module.conv2.bias is not None))\r\n\r\n temp_conv.weight.data.copy_(module.conv2.weight.data)\r\n if module.conv2.bias is not None:\r\n temp_conv.bias.data.copy_(module.conv2.bias.data)\r\n module.conv2 = temp_conv\r\n\r\n if isinstance(module, Bottleneck):\r\n # replace conv3\r\n temp_conv = MaskConv2d(\r\n in_channels=module.conv3.in_channels,\r\n out_channels=module.conv3.out_channels,\r\n kernel_size=module.conv3.kernel_size,\r\n stride=module.conv3.stride,\r\n padding=module.conv3.padding,\r\n bias=(module.conv3.bias is not None))\r\n\r\n temp_conv.weight.data.copy_(module.conv3.weight.data)\r\n if module.conv3.bias is not None:\r\n temp_conv.bias.data.copy_(module.conv3.bias.data)\r\n module.conv3 = temp_conv", "title": "" }, { "docid": "905d257b152f92acfc3f970b3dfc9bdd", "score": "0.51890427", "text": "def update_mask(self, new_nodes):\n arr = (torch.arange(0, self.num_nodes).unsqueeze(0).unsqueeze(1)\n .expand_as(self.mask).type(self.dtypeLong))\n new_nodes = new_nodes.unsqueeze(2).expand_as(self.mask)\n update_mask = 1 - torch.eq(arr, new_nodes).type(self.dtypeFloat)\n self.mask = self.mask * update_mask\n if self.probs_type == 'logits':\n # Convert 0s in mask to inf\n self.mask[self.mask == 0] = 1e20", "title": "" }, { "docid": "60b3c2c7b45a4ae45cb38dadb6bdb544", "score": "0.5178625", "text": "def _get_triplet_mask(labels: torch.Tensor) -> torch.BoolTensor:\n\n # Check that i, j and k are distinct\n indices = torch.logical_not(torch.eye(labels.size(0)).bool()).to(labels.device)\n i_not_equal_j = indices.unsqueeze(2)\n i_not_equal_k = indices.unsqueeze(1)\n j_not_equal_k = indices.unsqueeze(0)\n\n distinct_indices = (i_not_equal_j & i_not_equal_k) & j_not_equal_k\n\n\n label_equal = labels.unsqueeze(0) == labels.unsqueeze(1)\n i_equal_j = label_equal.unsqueeze(2)\n i_equal_k = label_equal.unsqueeze(1)\n\n valid_labels = ~i_equal_k & i_equal_j\n\n return valid_labels & distinct_indices", "title": "" }, { "docid": "b5949b573bd38262b834cca5099f1d01", "score": "0.5173026", "text": "def rpn_mask_loss_graph(target_masks, pred_masks):\r\n \r\n pred_masks = tf.squeeze(pred_masks, -1)\r\n target_masks = tf.squeeze(target_masks, -1) \r\n target_masks = tf.cast(target_masks, tf.float32) \r\n \r\n # Compute binary cross entropy. If no positive ROIs, then return 0.\r\n # shape: [batch, roi, num_classes]\r\n loss = K.switch(tf.size(target_masks) > 0,\r\n K.binary_crossentropy(target=target_masks, output=pred_masks),\r\n tf.constant(0.0))\r\n loss = K.mean(loss)\r\n \r\n return loss", "title": "" }, { "docid": "fe416090dd649734d6634665f6b8c646", "score": "0.5169601", "text": "def test_multiple_onboarded_networks(self, mock_sess):\n self.blink.networks = {\n \"0000\": {\"onboarded\": False, \"name\": \"foo\"},\n \"5678\": {\"onboarded\": True, \"name\": \"bar\"},\n \"1234\": {\"onboarded\": True, \"name\": \"test\"},\n }\n self.blink.get_ids()\n self.assertTrue(\"0000\" not in self.blink.network_ids)\n self.assertTrue(\"5678\" in self.blink.network_ids)\n self.assertTrue(\"1234\" in self.blink.network_ids)", "title": "" }, { "docid": "dbf48746b977afe436d2f97d868f4638", "score": "0.5161471", "text": "def elementwise_mask_propagation(cls, input_masks: List[SymbolicMask]) -> SymbolicMask:\n producers = SymbolicMaskProducer.merge_producers(input_masks)\n for input_mask in input_masks[1:]:\n if not input_masks[0].shape == input_mask.shape:\n return AmbiguousSymbolicMask(producers)\n\n return SymbolicMask(input_masks[0].shape[0], producers)", "title": "" }, { "docid": "c112d9cdfc4deb71b9e174ca9637595b", "score": "0.5154406", "text": "def apply_mask(self, mask):\n\n mask = xr.DataArray(mask, dims=(\"height\", \"width\"))\n self.data[\"images\"] = self.data[\"images\"].where(mask)", "title": "" }, { "docid": "215b66b3ee617603f01dd9954ab54aa6", "score": "0.5147337", "text": "def apply_mask(the_mask, the_matrix):\n return np.multiply(the_matrix, the_mask)", "title": "" }, { "docid": "62423dbd0b6938f7a11a38e1997dea3d", "score": "0.5138337", "text": "def ipv4_validate_mask(mask):\n tuples = mask.split('.')\n if len(tuples) != 4:\n return False\n else:\n mask_value = 0\n for i in xrange(0, 4):\n mask_value += int(tuples[i]) << 24 - 8 * i\n\n allbits = bin(mask_value)\n ones = allbits.count('1')\n correct_value = 0\n for i in xrange(32 - ones, 32):\n correct_value |= 1 << i\n\n if bin(correct_value) == allbits:\n return True\n return False", "title": "" }, { "docid": "d540f99cbec3a1501cd8fd3e70102ed1", "score": "0.5132176", "text": "def build_mask_graph(rois, feature_maps, pool_size, part_num,\n is_training, bn_decay=None, weight_decay=0.0):\n feature_map = feature_maps['deconv_features_layer4'] # [batch, vox,vox,vox,48]\n batch_size = feature_map.get_shape()[0]\n rois_num = rois.get_shape()[1]\n # ROI Pooling\n pooled_fea = roi_pooling(rois, feature_map, pool_size) # [batch,num_rois,15,15,15,n_ch]\n # Conv Layers\n input_batch = tf.reshape(pooled_fea,\n [-1,pool_size,pool_size,pool_size,feature_map.get_shape()[-1]])\n en1 = tf_util.conv3d(input_batch, 32, kernel_size=[3,3,3],\n padding='SAME', stride=[1,1,1], bn=True,\n is_training=is_training, scope='mask_conv1',\n bn_decay=bn_decay, weight_decay=weight_decay)\n en2 = tf_util.conv3d(en1, 32, kernel_size=[3,3,3],\n padding='SAME', stride=[1,1,1], bn=True,\n is_training=is_training, scope='mask_conv2',\n bn_decay=bn_decay, weight_decay=weight_decay)\n en3 = tf_util.conv3d(en2, 32, kernel_size=[3,3,3],\n padding='SAME', stride=[1,1,1], bn=True,\n is_training=is_training, scope='mask_conv3',\n bn_decay=bn_decay, weight_decay=weight_decay)\n en4 = tf_util.conv3d(en3, 32, kernel_size=[3,3,3],\n padding='SAME', stride=[1,1,1], bn=True,\n is_training=is_training, scope='mask_conv4',\n bn_decay=bn_decay, weight_decay=weight_decay)\n\n net = tf_util.conv3d(en4, part_num, kernel_size=[1,1,1],\n padding='SAME', stride=[1,1,1], bn=False,\n activation_fn=tf.nn.sigmoid, is_training=is_training,\n scope='mask')\n \"\"\" Masks [batch,roi_count,height,width,depth,num_classes] \"\"\"\n mask = tf.reshape(net,\n [batch_size, rois_num,\n net.get_shape()[1], net.get_shape()[2], net.get_shape()[3],\n net.get_shape()[-1]])\n return mask", "title": "" }, { "docid": "64f6dfca7d88b3898e37eac9fa0e2067", "score": "0.5126282", "text": "def netmask(self):\n return self._to_prefix(((1 << self.mask) - 1) << (32 - self.mask), 32)", "title": "" }, { "docid": "f9da7237ccd9a643caaa8b63d22ba949", "score": "0.5101369", "text": "def test_multiple_networks(self, mock_sess):\n self.blink.networks = {\n \"0000\": {\"onboarded\": False, \"name\": \"foo\"},\n \"5678\": {\"onboarded\": True, \"name\": \"bar\"},\n \"1234\": {\"onboarded\": False, \"name\": \"test\"},\n }\n self.blink.get_ids()\n self.assertTrue(\"5678\" in self.blink.network_ids)", "title": "" }, { "docid": "f18c4a5cfd3506b4e1a394f06485613b", "score": "0.5097685", "text": "def check_correct(self, check_connections: bool = True) -> None:\n network_operations.check_correct(self.nodes_set, check_connections)", "title": "" }, { "docid": "c22526a6aa41bc45e11ef03f1f42f348", "score": "0.5095648", "text": "def calc_broadcast_addr(addr = \"192.168.0.1\", mask = \"255.255.255.0\"):\n #TODO Make a own function to add enough 0 in binary, maybe also add the b' so the '&' can work propertly. \n addr = decimal_to_binary(addr)\n mask = decimal_to_binary(mask)\n print(addr, mask, \"testing 1\")\n binsum = int(addr) & int(mask)\n print(binsum, \"testing 2\")\n binsum = str(binsum)\n\n #Make this its own funct? Same as the 14 ish last lines in cidr_to_subnet_mask function...\n binsum2 = binsum.replace(\",\", \"\")\n chars = 31\n for i in range(chars):\n if len(binsum2) <= 31:\n if i == \"1\":\n continue\n else:\n bit = str(\"0\")\n binsum2 += bit\n binsum_split = [binsum[0:8], binsum[8:16], binsum[16:24], binsum[24:32]]\n print(binsum_split, \"testing 3\")\n binsum_split2 = []\n for i in binsum_split:\n binsum_split2.append(int(i,2))\n return \".\".join(map(str, binsum_split2))", "title": "" }, { "docid": "5fb4328c39c6f468f97669f3b51e41c8", "score": "0.5092132", "text": "def forward(\n self,\n data_flows: List[Tuple[int, Tensor, List[Adj]]],\n masks: Tensor,\n evaluate: bool = False,\n rand_net_idxs: Optional[np.ndarray] = None,\n ):\n if rand_net_idxs is not None:\n raise NotImplementedError(\"Network sampling is not used with model parallelism.\")\n\n idxs = list(range(self.n_modalities))\n net_scales, interp_masks = self.interp(masks, idxs, evaluate, device=\"cuda:0\")\n\n # Define encoder logic.\n out_pre_cat_layers = [] # Final layers before concatenation, not currently used\n\n batch_size = data_flows[0][0]\n x_store_modality = torch.zeros(\n (batch_size, self.integration_size), device=\"cuda:0\"\n ) # Tensor to store results from each modality.\n\n # Iterate over input networks\n for i, data_flow in enumerate(data_flows):\n if self.shared_encoder:\n net_idx = 0\n else:\n net_idx = idxs[i]\n device = f\"cuda:{self.net_to_cuda_mapper[net_idx]}\"\n\n x = self.encoders[net_idx](data_flow, device).to(\"cuda:0\")\n x = net_scales[:, i] * interp_masks[:, i].reshape((-1, 1)) * x\n x_store_modality += x\n\n # Embedding\n emb = self.emb(x_store_modality)\n\n # Dot product (network reconstruction)\n dot = torch.mm(emb, torch.t(emb))\n\n # Classification (if standards are provided)\n if self.cls_heads:\n classes = [head(emb) for head in self.cls_heads]\n else:\n classes = None\n\n return dot, emb, out_pre_cat_layers, net_scales, classes", "title": "" }, { "docid": "b933be7d123c8dda6ce2df2beef9c581", "score": "0.5091047", "text": "def mask_postprocess(self, mask: torch.Tensor) -> torch.Tensor:\n return", "title": "" }, { "docid": "45c3e7890de2f395855d005f5661e578", "score": "0.5087083", "text": "def test_copy_weights_resnet_module(m1, m2):\n m1_state_dict = m1.state_dict()\n m2_state_dict = m2.state_dict()\n weight_copy_flag = 1\n\n # Get m1 and m2 layer names\n m1_layer_names, m2_layer_names = [], []\n for name, param in m1_state_dict.items():\n m1_layer_names.append(name)\n for name, param in m2_state_dict.items():\n m2_layer_names.append(name)\n\n # Check if copy was succesful\n for ind in range(len(m1_layer_names)):\n if m1_layer_names[ind][:6] == 'resnet':\n if not torch.all(torch.eq(m1_state_dict[m1_layer_names[ind]].data, m2_state_dict[m2_layer_names[ind]].data)):\n weight_copy_flag = 0\n print ('Something is incorrect for layer {} and {}'.format(m1_layer_names[ind], m2_layer_names[ind]))\n\n if weight_copy_flag == 1:\n print ('All is well')", "title": "" }, { "docid": "ee148b921826474a0307721b047e8e87", "score": "0.5056179", "text": "def matches(self, other):\n\t\tassert(isinstance(other, IP))\n\t\treturn self.first[:self._mask] == other.getbits()[:self._mask]", "title": "" }, { "docid": "25624bca988cd7826a68c643512ecce6", "score": "0.50449616", "text": "def direct_check(weights, biases, perm, input_cond_hook):\n\n z3_vars = [ z3.Real('v_%d'%i) for i in range(len(perm)) ]\n z3_vars_perm = [ z3.Real('p_v_%d'%i) for i in range(len(perm)) ]\n\n solver = z3.Solver()\n\n solver.add(encode_perm_props.encode_perm(z3_vars, z3_vars_perm, perm))\n solver.add( z3.Not( encode_dnn.encode_network(weights, biases, z3_vars) == \n encode_dnn.encode_network(weights, biases, z3_vars_perm)) )\n input_cond_hook(z3_vars, solver)\n \n if solver.check() == z3.unsat:\n return (True, [])\n else:\n mdl = solver.model()\n return (False, ([ mdl.eval(z3.Real('v_%d'%i)) for i in range(len(perm)) ], \n [ mdl.eval(z3.Real('p_v_%d'%i)) for i in range(len(perm)) ]))", "title": "" }, { "docid": "17658420da7f11310d5d839786bf1e75", "score": "0.50425917", "text": "def ComparePredictionSeveralNetworks( im_id ):\n\n\t# plot true activation first:\n\tos.chdir('/Users/ricardo/Documents/Projects/neurosynth_dnn/Data')\n\tplotting.plot_glass_brain( 'trial_name.nii.gz', display_mode='z', threshold=1, title='True')\n\tbimage_true = myData[im_id]['image'].reshape((20,20))\n\tmy_arr_mask = np.ma.masked_where(bimage_true < 0.05, bimage_true)\n\tcmap = plt.cm.YlOrRd\n\tcmap.set_bad(color='white')\n\tlimits = 100 # 90 \n\tplt.imshow( my_arr_mask , extent=(-1* limits, limits, -1*limits, limits), cmap=cmap, vmin=0)#, vmax=my_arr.max())\n\tos.chdir('/Users/ricardo/Documents/Projects/neurosynth_dnn/Figures/networkComparison')\n\tplt.savefig('activation_id' + str(im_id) + '_true.png')\n\n\t# plot l1 network activation next:\n\tos.chdir('/Users/ricardo/Documents/Projects/neurosynth_dnn/Data')\n\tplotting.plot_glass_brain( 'trial_name.nii.gz', display_mode='z', threshold=1, title='L1')\n\twvec = Variable( torch.from_numpy( test_loader.dataset[im_id]['wordVector'] ).float())\n\twvec = wvec.resize(1,200)\n\tbimage = net( wvec ).data.numpy().reshape((20,20))\n\tmy_arr_mask = np.ma.masked_where(bimage < 0.05, bimage)\n\tcmap = plt.cm.YlOrRd\n\tcmap.set_bad(color='white')\n\tlimits = 100 # 90 \n\tplt.imshow( my_arr_mask , extent=(-1* limits, limits, -1*limits, limits), cmap=cmap, vmin=0)#, vmax=my_arr.max())\n\tos.chdir('/Users/ricardo/Documents/Projects/neurosynth_dnn/Figures/networkComparison')\n\tplt.savefig('activation_id' + str(im_id) + '_l1.png')\n\n\t# the L2 network\n\tos.chdir('/Users/ricardo/Documents/Projects/neurosynth_dnn/Data')\n\tplotting.plot_glass_brain( 'trial_name.nii.gz', display_mode='z', threshold=1, title='L2')\n\twvec = Variable( torch.from_numpy( test_loader.dataset[im_id]['wordVector'] ).float())\n\twvec = wvec.resize(1,200)\n\tbimage = net_l2( wvec ).data.numpy().reshape((20,20))\n\tmy_arr_mask = np.ma.masked_where(bimage < 0.05, bimage)\n\tcmap = plt.cm.YlOrRd\n\tcmap.set_bad(color='white')\n\tlimits = 100 # 90 \n\tplt.imshow( my_arr_mask , extent=(-1* limits, limits, -1*limits, limits), cmap=cmap, vmin=0)#, vmax=my_arr.max())\n\tos.chdir('/Users/ricardo/Documents/Projects/neurosynth_dnn/Figures/networkComparison')\n\tplt.savefig('activation_id' + str(im_id) + '_l2.png')\n\n\t# finally,the BCE network\n\tos.chdir('/Users/ricardo/Documents/Projects/neurosynth_dnn/Data')\n\tplotting.plot_glass_brain( 'trial_name.nii.gz', display_mode='z', threshold=1, title='BCE')\n\twvec = Variable( torch.from_numpy( test_loader.dataset[im_id]['wordVector'] ).float())\n\twvec = wvec.resize(1,200)\n\tbimage = net_ce( wvec ).data.numpy().reshape((20,20))\n\tmy_arr_mask = np.ma.masked_where(bimage < 0.05, bimage)\n\tcmap = plt.cm.YlOrRd\n\tcmap.set_bad(color='white')\n\tlimits = 100 # 90 \n\tplt.imshow( my_arr_mask , extent=(-1* limits, limits, -1*limits, limits), cmap=cmap, vmin=0)#, vmax=my_arr.max())\n\tos.chdir('/Users/ricardo/Documents/Projects/neurosynth_dnn/Figures/networkComparison')\n\tplt.savefig('activation_id' + str(im_id) + '_BCE.png')\n\n\n\tos.system('convert +append activation_id'+str(im_id)+'_true.png activation_id'+str(im_id)+'_l1.png activation_id'+str(im_id)+'_l2.png activation_id'+str(im_id)+'_BCE.png res_id'+str(im_id)+'.png')\n\t# remove older files\n\tos.system('rm *_true.png')\n\tos.system('rm *_l1.png')\n\tos.system('rm *_l2.png')\n\tos.system('rm *_BCE.png')\n\tplt.close('all')", "title": "" }, { "docid": "367e3dd58df11b5ff77b9588710c1f0e", "score": "0.5041081", "text": "def searchIp(x, network):\n \n binary_address = int(ip.ip_address(x))\n binary_mask = int(network.netmask)\n binary_network_addr = int(network.network_address)\n return (binary_address & binary_mask == binary_network_addr)", "title": "" }, { "docid": "bb538b321ef7113c8ff5843ec44e0d0a", "score": "0.503731", "text": "def results_mask(self):\n return (self.target_mask * self.predictor_matrix_mask \n * self.model_mask)", "title": "" }, { "docid": "79822de77307bf6bd1e2f2499e1f6c81", "score": "0.50247276", "text": "def test_get_address_in_network_netmask(self, _interfaces, _ifaddresses):\n _interfaces.return_value = DUMMY_ADDRESSES.keys()\n _ifaddresses.side_effect = DUMMY_ADDRESSES.__getitem__\n self._test_get_address_in_network('fd2d:dec4:cf59:3c16::1',\n 'fd2d:dec4:cf59:3c16::/64',\n fatal=False)", "title": "" }, { "docid": "4c8a1bc871a71a10dd97d1f83d627df7", "score": "0.50226295", "text": "def apply(self, weight, mask, task: int) -> None:\n pass", "title": "" }, { "docid": "da2a37f0d81bb9a9f1225d8dc9723cc8", "score": "0.50186217", "text": "def test_combine_masks(self, mock_delegate):\n backend = Mock()\n df1 = np.array([[1, 2, 3], [4, 5, 6]])\n df2 = np.array([[-1, -2, -3], [-4, -5, -6]])\n df3 = np.array([[10, 20, 30], [40, 50, 60]])\n df4 = np.array([[-10, -20, -30], [-40, -50, -60]])\n df5 = np.array([[100, 200, 300], [400, 500, 600]])\n df6 = np.array([[-100, -200, -300], [-400, -500, -600]])\n masks = [[df1, df2], [df3, df4], [df5, df6]]\n transposed_masks = [[df1, df3, df5], [df2, df4, df6]]\n backend.combine_masks = Mock()\n state = MultiDecorator(backend)\n state.combine_masks(masks)\n mock_delegate.assert_called_with('combine_masks', transposed_masks)", "title": "" }, { "docid": "68bc2c2888935213dae9c06a90b2647f", "score": "0.50121933", "text": "def in_other_network(network_object):\n for netif in network_object.netifs(sort=True):\n if hasattr(netif, \"othernet\"):\n if netif.othernet.objid != network_object.objid:\n return True\n return False", "title": "" }, { "docid": "f5253feabdfe61e6648b0576de9a2e0c", "score": "0.5005744", "text": "def test(net, data):\n\tcount = 0\n\tfor datum in data:\n\t\tinputs = datum[0]\n\t\toutput = round(datum[1][0], rounding_place)\n\t\tactual = round(net.feedforward(inputs)[0], rounding_place)\n\t\tif(round(actual, rounding_place) == output):\n\t\t\tcount += 1\n\n\treturn count", "title": "" }, { "docid": "2bdd3c5e694832096826433f4f872104", "score": "0.49945563", "text": "def _supports_masking(remask_kernel: bool):\n def supports_masking(layer):\n\n @utils.wraps(layer)\n def layer_with_masking(*args, **kwargs):\n layer_fns = layer(*args, **kwargs)\n init_fn, apply_fn, kernel_fn = layer_fns[:3]\n\n if len(layer_fns) == 3:\n # No mask propagation function supplied - use identity.\n _mask_fn = lambda mask, input_shape: mask\n elif len(layer_fns) == 4:\n # Custom mask propagation function supplied.\n _mask_fn = layer_fns[3]\n else:\n raise ValueError(f'Expected 3 (`init_fn`, `apply_fn`, `kernel_fn`) or 4'\n f' (..., `mask_fn`) layer functions, '\n f'got {len(layer_fns)}.')\n\n @utils.wraps(_mask_fn)\n def mask_fn(mask, input_shape):\n if mask is None:\n return None\n return _mask_fn(mask, input_shape)\n\n def apply_fn_with_masking(params, inputs, mask_constant=None, **kwargs):\n inputs = utils.get_masked_array(inputs, mask_constant)\n inputs, mask = inputs.masked_value, inputs.mask\n outputs = apply_fn(params, inputs, mask=mask, **kwargs)\n outputs_mask = mask_fn(mask,\n inputs.shape if isinstance(inputs, np.ndarray)\n else [i.shape for i in inputs])\n if outputs_mask is None:\n return outputs\n return utils.MaskedArray(outputs, outputs_mask)\n\n def kernel_fn_with_masking(kernels, **user_reqs):\n if isinstance(kernels, Kernel):\n mask1 = mask_fn(kernels.mask1, kernels.shape1)\n mask2 = mask_fn(kernels.mask2, kernels.shape2)\n elif isinstance(kernels, list):\n mask1 = mask_fn([k.mask1 for k in kernels],\n [k.shape1 for k in kernels])\n mask2 = mask_fn([k.mask2 for k in kernels],\n [k.shape2 for k in kernels])\n else:\n raise TypeError(type(Kernel), Kernel)\n\n kernels = kernel_fn(kernels, **user_reqs)\n\n if remask_kernel:\n kernels = kernels.mask(mask1, mask2)\n else:\n kernels = kernels.replace(mask1=mask1, mask2=mask2)\n return kernels\n\n if hasattr(kernel_fn, _INPUT_REQ):\n setattr(kernel_fn_with_masking,\n _INPUT_REQ,\n getattr(kernel_fn, _INPUT_REQ))\n\n return init_fn, apply_fn_with_masking, kernel_fn_with_masking\n\n return layer_with_masking\n\n return supports_masking", "title": "" }, { "docid": "24e91fa821761cc1c92864758cce6f62", "score": "0.49884927", "text": "def checkOverlappingClasses(masks):\n checkMask = np.zeros_like(masks[0])\n for i in range(len(masks)):\n checkMask += masks[i]\n maxValue = np.max(mask.ravel())\n if maxValue <= 1:\n overlapping = False\n else:\n overlapping = True\n return overlapping", "title": "" }, { "docid": "57728806ee6cba0fb5c593115bccc7bc", "score": "0.49817103", "text": "def mask_or(ev, collection, masks):\n # Start with array of False\n decision = ( ak.ones_like(ev.MET.pt)==0 )\n\n coll = getattr(ev, collection)\n\n # Flip to true if any is passed\n for t in masks:\n try:\n decision = decision | getattr(coll, t)\n except KeyError:\n continue\n return decision", "title": "" }, { "docid": "eef08556fc476118ed62789ccf204afd", "score": "0.49797404", "text": "def test_immunize_network_vaccinate(self):\n G = nx.barabasi_albert_graph(100, 5)\n network = contagion.ContactNetwork(\n G,\n fraction_infected = 0.5,\n fraction_recovered = 0.35)\n Im = copy.deepcopy(network.In)\n np.random.shuffle(Im)\n network.immunize_network(Im, efficacy = 0.7)\n self.assertEqual(np.sum(network.Im), np.sum(Im))", "title": "" }, { "docid": "10e40ff55b4720dbb95233dbaf5d3414", "score": "0.4970395", "text": "def panet_build_fpn_mask_graph(rois, feature_maps, image_meta,\n pool_size, num_classes, train_bn=True):\n # ROI Pooling\n # Shape: [batch, num_rois, MASK_POOL_SIZE, MASK_POOL_SIZE, channels]\n x = PyramidROIAlign([pool_size, pool_size],\n name=\"roi_align_mask\")([rois, image_meta] + feature_maps)\n\n # Conv layers\n x = KL.TimeDistributed(KL.Conv2D(256, (3, 3), padding=\"same\"),\n name=\"mrcnn_mask_conv1\")(x)\n x = KL.TimeDistributed(BatchNorm(),\n name='mrcnn_mask_bn1')(x, training=train_bn)\n x = KL.Activation('relu')(x)\n\n x = KL.TimeDistributed(KL.Conv2D(256, (3, 3), padding=\"same\"),\n name=\"mrcnn_mask_conv2\")(x)\n x = KL.TimeDistributed(BatchNorm(),\n name='mrcnn_mask_bn2')(x, training=train_bn)\n x = KL.Activation('relu')(x)\n\n x = KL.TimeDistributed(KL.Conv2D(256, (3, 3), padding=\"same\"),\n name=\"mrcnn_mask_conv3\")(x)\n x = KL.TimeDistributed(BatchNorm(),\n name='mrcnn_mask_bn3')(x, training=train_bn)\n x = KL.Activation('relu')(x)\n\n x1 = KL.TimeDistributed(KL.Conv2D(256, (3, 3), padding=\"same\"),\n name=\"mrcnn_mask_conv4_fc\")(x)\n x1 = KL.TimeDistributed(BatchNorm(),\n name='mrcnn_mask_conv4bn')(x1, training=train_bn)\n x1 = KL.Activation('relu')(x1)\n\n x1 = KL.TimeDistributed(KL.Conv2D(256, (3, 3), strides=(2, 2), padding=\"same\"),\n name=\"mrcnn_mask_conv5_fc\")(x1)\n x1 = KL.TimeDistributed(BatchNorm(),\n name='mrcnn_mask_conv5bn')(x1, training=train_bn)\n x1 = KL.Activation('relu')(x1)\n\n # x1 = KL.TimeDistributed(KL.Dense(256*4*4,activation=\"sigmoid\"),\n # name=\"mrcnn_mask_fc\")(x1)\n x1 = KL.TimeDistributed(KL.Flatten())(x1)\n x1 = KL.TimeDistributed(KL.Dense(28 * 28 * num_classes), name='mrcnn_mask_fc_logits')(x1)\n\n x1 = KL.Activation(\"softmax\", name=\"mrcnn_class_fc\")(x1)\n\n s = K.int_shape(x1)\n x1 = KL.Reshape((s[1], 28, 28, num_classes), name=\"mrcnn_mask_fc_reshape\")(x1)\n # x1 = KL.TimeDistributed(KL.Reshape((14,14)),name=\"mrcnn_mask_fc_reshape\")(x1)\n\n x = KL.TimeDistributed(KL.Conv2D(256, (3, 3), padding=\"same\"),\n name=\"mrcnn_mask_conv4\")(x)\n x = KL.TimeDistributed(BatchNorm(),\n name='mrcnn_mask_bn4')(x, training=train_bn)\n x = KL.Activation('relu')(x)\n\n x = KL.TimeDistributed(KL.Conv2DTranspose(256, (2, 2), strides=2, activation=\"relu\"),\n name=\"mrcnn_mask_deconv\")(x)\n\n x = KL.TimeDistributed(KL.Conv2D(num_classes, (1, 1), strides=1, activation=\"softmax\"),\n name=\"mrcnn_mask\")(x)\n x = KL.Add(name=\"mrcnn_mask_add\")([x, x1])\n x = KL.Activation('tanh', name=\"mrcnn_masksoftmax\")(x)\n\n return x", "title": "" }, { "docid": "a19455243a2ff96d52329b0340c0ea01", "score": "0.4970046", "text": "def im_detect_mask(model, im_scale, boxes):\n M = cfg.MRCNN.RESOLUTION\n if boxes.shape[0] == 0:\n pred_masks = np.zeros((0, M, M), np.float32)\n return pred_masks\n\n inputs = {'mask_rois': _get_rois_blob(boxes, im_scale)}\n # Add multi-level rois for FPN\n if cfg.FPN.MULTILEVEL_ROIS:\n _add_multilevel_rois_for_test(inputs, 'mask_rois')\n\n for k, v in inputs.items():\n workspace.FeedBlob(core.ScopedName(k), v)\n workspace.RunNet(model.mask_net.Proto().name)\n\n # Fetch masks\n pred_masks = workspace.FetchBlob(\n core.ScopedName('mask_fcn_probs')\n ).squeeze()\n\n if cfg.MRCNN.CLS_SPECIFIC_MASK:\n pred_masks = pred_masks.reshape([-1, cfg.MODEL.NUM_CLASSES, M, M])\n else:\n pred_masks = pred_masks.reshape([-1, 1, M, M])\n\n return pred_masks", "title": "" }, { "docid": "bb4db19f88c5226ecff5976343066c69", "score": "0.49656373", "text": "def generate_masks(model, path, save_path): \n # trainData = np.load('/var/tmp/mi714/NEW/npy_dataset/data.npy')\n # trainMask = np.load('/var/tmp/mi714/NEW/npy_dataset/dataMask.npy')\n\n # valData = np.load('/var/tmp/mi714/NEW/npy_dataset/dataval.npy')\n # valMask = np.load('/var/tmp/mi714/NEW/npy_dataset/dataMaskval.npy')\n\n testData = np.load('/var/tmp/mi714/NEW/npy_dataset/datatest.npy')\n testMask = np.load('/var/tmp/mi714/NEW/npy_dataset/dataMasktest.npy')\n\n images = [file for file in listdir(path)]\n for image in images:\n # Get image\n img_id = splitext(image)[0]\n x = Image.open(path + \"/\" + image).convert('L')\n x = np.asarray(x, dtype=np.float32)\n # Add channel axis and batch axis\n x = x[np.newaxis,...] \n x = x[...,np.newaxis]\n x = np.asarray(x)\n # Make the prediction and treshold it at 0.5 so that pixels predicted as\n # 1 will be shown as white while the rest is all black. Then saves.\n img = model.predict(x)\n img = img[0,:,:,:]\n _, img = cv2.threshold(img, 0.5, 255, cv2.THRESH_BINARY)\n img = np.asarray(img, dtype=np.float32)\n img = Image.fromarray(img)\n img = img.convert(\"L\")\n img.save(save_path + \"/\" + image)", "title": "" }, { "docid": "fcf590a1edb51976a77cb8c4725a3981", "score": "0.49613646", "text": "def get_ips_same_net(self, ipaddr: str) -> List[Any]:\n\n out = list()\n for device in self.devices:\n if isinstance(device, Device):\n for ip_item in device.get_ips():\n if ipaddr in ipcalc.Network('{}/{}'.format(ip_item.ipaddr, ip_item.mask)):\n out.append(ip_item)\n return out", "title": "" }, { "docid": "33fe7b33874580b2932a18a9c771dc1b", "score": "0.4957194", "text": "def check_network_overlap(new_network, configured_networks):\n if any(new_network.ip in subnet for subnet in\n configured_networks):\n raise ValidateFail(\n \"Subnet %s overlaps with another configured subnet\" % new_network)", "title": "" }, { "docid": "b3a838ad5718cd84f6022d335c3b28d8", "score": "0.4936395", "text": "def count_wrong_boolean(mn_optimals, mn_optimals_mask, outputs):\n\n wrong_count = 0\n for optimal, mask, output in zip(\n mn_optimals, mn_optimals_mask, outputs):\n \n if mask:\n continue\n if optimal != output:\n wrong_count += 1\n return wrong_count", "title": "" }, { "docid": "3a2e0523bb8689f40211a0fc554d5f26", "score": "0.4921227", "text": "def _node_match(query_net: nx.MultiGraph, target_net: nx.MultiGraph, query_node: int, target_node: int) -> bool:\n match: bool = False\n query = query_net.nodes[query_node]['kappa']\n target = target_net.nodes[target_node]['kappa']\n for site in query.get_agent_ports():\n s_name = site.get_port_name()\n s_stat = '{' + site.get_port_int_state() + '}'\n s_bond = '[' + site.get_port_bond_state() + ']' if site.get_port_bond_state() in ['.', '_', '#'] else '[_]'\n relaxed_port = KappaPort(s_name + s_bond + s_stat)\n if not any([relaxed_port in t_site for t_site in target.get_agent_ports()]):\n return match\n match = True\n return match", "title": "" }, { "docid": "104d78deefa8a568bac7208561daaf9a", "score": "0.4912635", "text": "def is_any_addr_match_flow(flow_prefix_string, flow_mask_string, prefix):\r\n\tfor ipv4addr in prefix:\r\n\t\tif is_flow_bytes_match(flow_prefix_string, flow_mask_string, str(ipv4addr)):\r\n\t\t\treturn True\r\n\telse:\r\n\t\treturn False", "title": "" }, { "docid": "39e138ef97857e7f6b7339089a960531", "score": "0.4893796", "text": "def check_networks(self):\n\n seen_nets = self._list_networks()\n seen_names = [n['name'] for n in seen_nets]\n seen_ids = [n['id'] for n in seen_nets]\n self.assertIn(self.net['network']['name'], seen_names)\n self.assertIn(self.net['network']['id'], seen_ids)\n\n if self.subnet:\n seen_subnets = self._list_subnets()\n seen_net_ids = [n['network_id'] for n in seen_subnets]\n seen_subnet_ids = [n['id'] for n in seen_subnets]\n self.assertIn(self.net['network']['id'], seen_net_ids)\n self.assertIn(self.subnet['subnet']['id'], seen_subnet_ids)\n\n if self.router:\n seen_routers = self._list_routers()\n seen_router_ids = [n['id'] for n in seen_routers]\n seen_router_names = [n['name'] for n in seen_routers]\n self.assertIn(self.router['name'],\n seen_router_names)\n self.assertIn(self.router['id'],\n seen_router_ids)", "title": "" }, { "docid": "39e138ef97857e7f6b7339089a960531", "score": "0.4893796", "text": "def check_networks(self):\n\n seen_nets = self._list_networks()\n seen_names = [n['name'] for n in seen_nets]\n seen_ids = [n['id'] for n in seen_nets]\n self.assertIn(self.net['network']['name'], seen_names)\n self.assertIn(self.net['network']['id'], seen_ids)\n\n if self.subnet:\n seen_subnets = self._list_subnets()\n seen_net_ids = [n['network_id'] for n in seen_subnets]\n seen_subnet_ids = [n['id'] for n in seen_subnets]\n self.assertIn(self.net['network']['id'], seen_net_ids)\n self.assertIn(self.subnet['subnet']['id'], seen_subnet_ids)\n\n if self.router:\n seen_routers = self._list_routers()\n seen_router_ids = [n['id'] for n in seen_routers]\n seen_router_names = [n['name'] for n in seen_routers]\n self.assertIn(self.router['name'],\n seen_router_names)\n self.assertIn(self.router['id'],\n seen_router_ids)", "title": "" }, { "docid": "d83361aa0d59a0e13817150d6f4f65b9", "score": "0.48916832", "text": "def put_gray_mask(imgs, mask):\n\n reset = False\n if imgs.dim() == 3:\n imgs = torch.unsqueeze(imgs, dim=0)\n mask = torch.unsqueeze(mask, dim=0)\n reset = True\n\n imgs_masked = imgs * (1 - mask) + 0.5 * mask\n\n if reset:\n return imgs_masked[0]\n else:\n return imgs_masked", "title": "" }, { "docid": "831f4e8b6c1434428bb02d934f62ab67", "score": "0.48909193", "text": "def _make_masks(ilens, olens):\n in_masks = get_mask_from_lengths(ilens) # (B, T_in)\n out_masks = get_mask_from_lengths(olens) # (B, T_out)\n return out_masks.unsqueeze(-1) & in_masks.unsqueeze(-2) # (B, T_out, T_in)", "title": "" }, { "docid": "58890f0fec0b2264e06d9bb8a7dcf9e8", "score": "0.4890798", "text": "def apply_mask_tensor(data, mask):\n return data * mask + 0.0", "title": "" }, { "docid": "6493b63694ba9c87246c3b67ec4c6d25", "score": "0.4887195", "text": "def equivalent(self, other):\n return self.base_address == other.base_address and self.cidrmask == other.cidrmask", "title": "" }, { "docid": "4e0d3fcbc0317e9e6bc6731d8f24f600", "score": "0.48866284", "text": "def test_map_address_in_network(self):\n in_net = \"127.42.254.23\"\n\n network = self.network.map_ipv4_address_to_network_address(in_net)\n self.assertEqual(network, self.network.network)", "title": "" }, { "docid": "1acfa258e6cec9061aad843681e2b21b", "score": "0.48853683", "text": "def v4_netmask_to_cidr(mask):\n powers = {255: 8, 254: 7, 252: 6, 248: 5, 240: 4, 224: 3, 192: 2, 128: 1, 0: 0}\n bit_count = 0\n elems = [elem for elem in mask.split(\".\")]\n if len(elems) != 4:\n raise ValueError(\"Invalid old-style netmask [%s]\" % mask)\n elem = None\n try:\n for elem in elems:\n bit_count += powers[int(elem)]\n except (KeyError, ValueError):\n raise ValueError(\"Invalid element [%s] in old-style netmask [%s]\" % (elem, mask))\n return bit_count", "title": "" }, { "docid": "fa64fb5180197e5717ba22949a6786da", "score": "0.4877671", "text": "def _get_anchor_positive_triplet_mask(labels: torch.Tensor, device: torch.device) -> torch.BoolTensor:\n \n # Check that i and j are distinct\n indices_equal = torch.eye(labels.size(0)).bool().to(device)\n indices_not_equal = ~indices_equal\n\n # Check if labels[i] == labels[j]\n # Uses broadcasting where the 1st argument has shape (1, batch_size) and the 2nd (batch_size, 1)\n labels_equal = labels.unsqueeze(0) == labels.unsqueeze(1)\n\n return labels_equal & indices_not_equal", "title": "" }, { "docid": "264ec9df25cdc3843bfb0253885874b3", "score": "0.48776126", "text": "def is_power_gound_net(self, netname_list):\n if isinstance(netname_list, str):\n netname_list = [netname_list]\n power_nets_names = list(self.power_nets.keys())\n for netname in netname_list:\n if netname in power_nets_names:\n return True\n return False", "title": "" }, { "docid": "5505c40e91ce8d6a32b94812d21e2a61", "score": "0.48762465", "text": "def _update_target_networks(self):\n\n if self.update_target_type == \"soft\":\n self._soft_update(self.actor_active, self.actor_target)\n self._soft_update(self.critic_active, self.critic_target)\n elif self.update_target_type == \"hard\":\n self._hard_update(self.actor_active, self.actor_target)\n self._hard_update(self.critic_active, self.critic_target)", "title": "" }, { "docid": "74e315fb6ff3d958a32f0ebca0034376", "score": "0.4865949", "text": "def merge_masks(masks, weights, background_marker=-1):\n if len(masks) != len(weights):\n raise Exception('length of weights and masks must be equal')\n\n merged = np.ones(masks[0].shape) * background_marker\n\n for i in range(len(masks) - 1, -1, -1):\n merged[masks[i]] = weights[i]\n\n return merged", "title": "" }, { "docid": "2d76f2dc47ec13ec77bba19537e476d2", "score": "0.4863148", "text": "def combine_masks(self, op, *, perchannel=True):\n flat = self.flatten()\n if not (self.binary):\n raise ValueError('RoadImage.combine_masks: Image collection must be binary.')\n if perchannel:\n nbch = 1\n else:\n nbch = flat.shape[3]\n nbimg = flat.shape[0]\n if op == 'and':\n op = nbimg * nbch\n elif op == 'or':\n op = 1\n elif type(op) is str:\n raise ValueError(\"RoadImage.combine_masks: op must be 'or', 'and', or a number.\")\n elif type(op) is float:\n if op > 1. or op < 0.:\n raise ValueError(\"RoadImage.combine_masks: expressing a percentage, float op must be between 0. and 1.\")\n op = int(round(op * nbimg * nbch))\n if op == 0:\n op = 1 # Force minimum 1, otherwise pixels at zero after the sum are undecided in gradient mode\n warnings.warn(\"RoadImage.combine_masks: percentage for success is too low\", RuntimeWarning)\n\n if op < 1 or op > nbimg * nbch:\n raise ValueError(\"RoadImage.combine_masks: op must be an integer between 1 and the number of images.\")\n\n if perchannel:\n ret = np.zeros_like(flat[0:1, :, :, :])\n raw = flat.sum(axis=0, keepdims=True)\n else:\n ret = np.zeros_like(flat[0:1, :, :, 0:1])\n ret.colorspace = 'GRAY'\n raw = flat.sum(axis=(0, 3), keepdims=True)\n\n ret[(raw >= op)] = 1\n if self.gradient:\n ret[(raw <= -op)] = -1\n\n ret.binary = True\n ret.gradient = self.gradient\n ret.warped = self.warped\n\n # Return a collection of the same ndim, but all collection dimensions collapsed to shape=1.\n sh = RoadImage.__match_shape__(ret.shape[:-1], self.shape)\n sh = sh[:-1] + (ret.shape[-1],)\n\n return ret.reshape(sh)", "title": "" }, { "docid": "94bf5377da90d3d51e1c331501fe58ee", "score": "0.48596844", "text": "def _permitted_address(obj: dict, permitted: list) -> bool:\n test_network = ipaddress.ip_network(obj[\"entry\"][\"ip-netmask\"])\n for network in permitted:\n if test_network.subnet_of(ipaddress.ip_network(network)):\n return True\n\n return False", "title": "" }, { "docid": "a1863413354b97dab11dda2df6893284", "score": "0.48480445", "text": "def areFirewallsEquivalent(f1, f2):\n xs = createVariables()\n a1 = rulesToIndicatorFn(f1, xs)\n a2 = rulesToIndicatorFn(f2, xs)\n (r, _) = areEquivalent(a1, a2)\n return r", "title": "" }, { "docid": "255b4ca89e4874fb8b387c8ecbc20c2f", "score": "0.48342127", "text": "def create_tripple_masks_for_training(path, dims=(1024,1024), dtype='.bmp',\r\n is_save_newly_calced_masks=True):\r\n list_mask_files = ['mask_cornea',\r\n 'mask_ovd', \r\n 'mask_background'] \r\n trip_masks = []\r\n # Check if the single masks already exist and load them, else create them from the combined mask\r\n crn_file = os.path.join(path, str(list_mask_files[0] + dtype))\r\n ovd_file = os.path.join(path, str(list_mask_files[1] + dtype))\r\n bg_file = os.path.join(path, str(list_mask_files[2] + dtype))\r\n ### IF three single DONT masks already EXIST -> Calculate\r\n if not os.path.isfile(crn_file) or not os.path.isfile(ovd_file) or not os.path.isfile(bg_file) :\r\n # Load line-segmented mask\r\n if os.path.isfile(os.path.join(path, 'mask.bmp')) :\r\n raw_mask = Backend.load_single_image(os.path.join(path, 'mask.bmp'), dims)\r\n masks = create_three_masks_from_tripple_mask(raw_mask)\r\n elif os.path.isfile(os.path.join(path, 'mask.png')) :\r\n raw_mask = Backend.load_single_image(os.path.join(path, 'mask.png'), dims)\r\n # create and add all three masks in order\r\n masks, _ = create_output_channel_masks(raw_mask)\r\n trip_masks.append(masks)\r\n masks = np.asarray(masks)\r\n else :\r\n print(f\"No valid mask file found in {path}\")\r\n # save masks if flag is True\r\n if is_save_newly_calced_masks :\r\n Backend.save_single_grey_img(masks[0,:,:], ovd_file)\r\n Backend.save_single_grey_img(masks[1,:,:], crn_file)\r\n Backend.save_single_grey_img(masks[2,:,:], bg_file)\r\n print(f\"\\nSaved generated single masks for training in {path}\")\r\n ### IF they EXIST -> load them\r\n else :\r\n cornea = np.asarray(Image.open(os.path.join(path, str(list_mask_files[0] + dtype))).resize(dims))\r\n ovd = np.asarray(Image.open(os.path.join(path, str(list_mask_files[1] + dtype))).resize(dims))\r\n background = np.asarray(Image.open(os.path.join(path, str(list_mask_files[2] + dtype))).resize(dims))\r\n new_masks = np.dstack((cornea[:,:,0], ovd[:,:,0], background[:,:,0]))\r\n trip_masks.append(new_masks)\r\n return np.asarray(trip_masks, dtype=np.uint8)", "title": "" }, { "docid": "cfcdd871c438277c380370c045933658", "score": "0.48337534", "text": "def applyToMask(self, mask):\n traceMask = self.getTrace().mask\n mask.Factory(mask, traceMask.getBBox(), PARENT)[:] |= traceMask", "title": "" }, { "docid": "df3a1e21b6a1cf1c60315a9e1acdb138", "score": "0.48318604", "text": "def apply_mask_change_shape(fMRI_datas, mask):\n resampled_mask = resample_to_img(mask, fMRI_datas[0], interpolation=\"nearest\")\n resampled_mask = nb.Nifti1Image(np.array(resampled_mask.get_fdata() > 0, dtype=np.int8),\n resampled_mask.affine)\n \n # Multiply the functional image with the mask\n roi_fMRI_datas = []\n for target_data in fMRI_datas:\n roi_img = nilearn.masking.apply_mask(target_data, resampled_mask)\n roi_fMRI_datas.append(roi_img)\n \n return roi_fMRI_datas, resampled_mask", "title": "" }, { "docid": "4ea4fac96927b7f8c2a8583e02c79186", "score": "0.48291343", "text": "def _maskIsValid(currentMask: list[int]):\n return all([val == 255 for val in currentMask])", "title": "" }, { "docid": "bf6e92b5a5d6da44dbecaa025c84b3e9", "score": "0.48254877", "text": "def _aa_subnet_mask(self):\n self.is_option = True\n self.is_statement = False\n self.has_validator = True\n self._single_ip_validator()", "title": "" }, { "docid": "db49f74fc882bc465ce60c98fb8cba05", "score": "0.48235232", "text": "def check_networks(self):\n LOG.debug(\"check_networks: Start\")\n seen_nets = self._list_networks()\n seen_names = [n['name'] for n in seen_nets]\n seen_ids = [n['id'] for n in seen_nets]\n self.assertIn(self.network.name, seen_names)\n self.assertIn(self.network.id, seen_ids)\n\n seen_subnets = self._list_subnets()\n seen_net_ids = [n['network_id'] for n in seen_subnets]\n seen_subnet_ids = [n['id'] for n in seen_subnets]\n self.assertIn(self.network.id, seen_net_ids)\n self.assertIn(self.subnet.id, seen_subnet_ids)\n\n seen_routers = self._list_routers()\n seen_router_ids = [n['id'] for n in seen_routers]\n seen_router_names = [n['name'] for n in seen_routers]\n self.assertIn(self.router.name,\n seen_router_names)\n self.assertIn(self.router.id,\n seen_router_ids)\n LOG.debug(\"check_networks: End\")", "title": "" }, { "docid": "fdbbbfaedb04d41266627d62cb2dca6a", "score": "0.4820018", "text": "def test_network_connectivity(network) -> bool:\n if not network:\n return False\n else:\n return nx.is_connected(network)", "title": "" }, { "docid": "bdc1e789bd59371378b25566f7d09cc4", "score": "0.48170146", "text": "def define_node_masks(N, train_n, val_n):\n idx_train = range(train_n)\n idx_val = range(train_n, train_n + val_n)\n idx_test = range(train_n + val_n, N)\n train_mask = torch.BoolTensor(_sample_mask(idx_train, N))\n val_mask = torch.BoolTensor(_sample_mask(idx_val, N))\n test_mask = torch.BoolTensor(_sample_mask(idx_test, N))\n return train_mask, val_mask, test_mask, idx_train, idx_val, idx_test", "title": "" }, { "docid": "151f222f0363daac061ff8f6b1930864", "score": "0.48143077", "text": "def mask2cidr(mask):\n try:\n return sum([bin(int(x)).count(\"1\") for x in mask.split(\".\")])\n except Exception:\n return 24", "title": "" }, { "docid": "ee1527131cd4e62486b68ea7c0d6fa51", "score": "0.48143047", "text": "def masked_rdm_brain(rdm_brain, nifti_mask, debug=None):\n rdm_shape = rdm_brain.shape\n rdm_brain_1d = rdm_brain.reshape(-1, rdm_shape[3], rdm_shape[4])\n mask_data_1d = nifti_mask.get_fdata().reshape(-1)\n \n if debug != None:\n # rdm_brain과 nifti mask를 1차원으로 축약해서 mask를 씌워도\n # 같다는 공간이라는 것을 보이기 위함\n test = np.sum(np.sum(rdm_brain_1d, axis=1), axis = 1)\n \n for i in range(0, len(mask_data_1d)):\n if mask_data_1d[i] == True:\n test[i] = np.sum(test[i])\n else:\n test[i] = 0\n return test\n \n masked_data_only = rdm_brain_1d[mask_data_1d > 0, :, :]\n \n return masked_data_only", "title": "" }, { "docid": "f6b762383e3aa883cc6931ea12813c02", "score": "0.48089418", "text": "def test_router_connected_to_correct_networks(self):\n for dst_router in self.dst_routers:\n dst_ports = self.dst_cloud.neutronclient.list_ports(\n retrieve_all=True, **{'device_id': dst_router['id']})['ports']\n for src_router in self.src_routers:\n if src_router['name'] == dst_router['name']:\n src_ports = self.src_cloud.neutronclient.list_ports(\n retrieve_all=True,\n **{'device_id': src_router['id']})['ports']\n self.validate_network_name_in_port_lists(\n src_ports=src_ports, dst_ports=dst_ports)", "title": "" }, { "docid": "b9f1398590260baa3ffba059f04fb0e2", "score": "0.48080128", "text": "def is_network_changed(self):\n\n if self.net_mode_changed:\n self.net_mode_changed = False\n # return True\n\n if self.__cur_interface_name in self.__nic_dict:\n info = self.__nic_dict[self.__cur_interface_name]\n\n if info == '':\n return True\n\n if 'static' in info:\n if self.rbtn_static.get_active():\n for x in info.split('\\n'):\n if 'address' in x:\n if cmp(self.entbuf_ip.get_text().strip(), x.replace('address', '').strip()):\n return True\n if 'netmask' in x:\n if cmp(self.entbuf_mask.get_text().strip(), x.replace('netmask', '').strip()):\n return True\n if 'gateway' in x:\n if cmp(self.entbuf_gateway.get_text().strip(), x.replace('gateway', '').strip()):\n return True\n if cmp(self.entbuf_dns.get_text().strip(), self.__dnsinfo.replace('nameserver', '').strip()):\n return True\n else:\n return True\n elif 'dhcp' in info:\n if self.rbtn_static.get_active():\n return True\n\n return False", "title": "" }, { "docid": "373a90c6a87dd3647af5867ed10c3b92", "score": "0.48051548", "text": "def test_with_defaults(self, mask):\n\n img = image.fill(mask)\n expected = np.full((*mask.shape, 4), (0, 0, 0, 0))\n\n if mask.any():\n expected[mask] = (0, 0, 0, 255)\n\n assert (np.asarray(img) == expected).all()", "title": "" }, { "docid": "cd0c7d3ec0f2370698c5df33b304d31c", "score": "0.4803829", "text": "def loss(self, mask_pred, gt_masks, gt_bboxes, img_meta, sampling_results):\n loss_mask = []\n num_imgs = len(mask_pred)\n total_pos = 0\n for idx in range(num_imgs):\n cur_mask_pred = mask_pred[idx]\n cur_gt_masks = gt_masks[idx].float()\n cur_gt_bboxes = gt_bboxes[idx]\n cur_img_meta = img_meta[idx]\n cur_sampling_results = sampling_results[idx]\n\n pos_assigned_gt_inds = cur_sampling_results.pos_assigned_gt_inds\n num_pos = pos_assigned_gt_inds.size(0)\n # Since we're producing (near) full image masks,\n # it'd take too much vram to backprop on every single mask.\n # Thus we select only a subset.\n if num_pos > self.max_masks_to_train:\n perm = torch.randperm(num_pos)\n select = perm[:self.max_masks_to_train]\n cur_mask_pred = cur_mask_pred[select]\n pos_assigned_gt_inds = pos_assigned_gt_inds[select]\n num_pos = self.max_masks_to_train\n total_pos += num_pos\n\n gt_bboxes_for_reweight = cur_gt_bboxes[pos_assigned_gt_inds]\n\n mask_targets = self.get_targets(cur_mask_pred, cur_gt_masks,\n pos_assigned_gt_inds)\n if num_pos == 0:\n loss = cur_mask_pred.sum() * 0.\n elif mask_targets is None:\n loss = F.binary_cross_entropy(cur_mask_pred,\n torch.zeros_like(cur_mask_pred),\n torch.zeros_like(cur_mask_pred))\n else:\n cur_mask_pred = torch.clamp(cur_mask_pred, 0, 1)\n loss = F.binary_cross_entropy(\n cur_mask_pred, mask_targets,\n reduction='none') * self.loss_mask_weight\n\n h, w = cur_img_meta['img_shape'][:2]\n gt_bboxes_width = (gt_bboxes_for_reweight[:, 2] -\n gt_bboxes_for_reweight[:, 0]) / w\n gt_bboxes_height = (gt_bboxes_for_reweight[:, 3] -\n gt_bboxes_for_reweight[:, 1]) / h\n loss = loss.mean(dim=(1,\n 2)) / gt_bboxes_width / gt_bboxes_height\n loss = torch.sum(loss)\n loss_mask.append(loss)\n\n if total_pos == 0:\n total_pos += 1 # avoid nan\n loss_mask = [x / total_pos for x in loss_mask]\n\n return dict(loss_mask=loss_mask)", "title": "" }, { "docid": "66ce64931a3411df33c0086867560545", "score": "0.48027405", "text": "def check_networks(ip, allowed_networks):\n if not isinstance(ip, netaddr.IPAddress):\n raise TypeError(\"ip must be a netaddr ip address\")\n\n if not allowed_networks:\n # no valid networks were provided, so we can't make any assertions\n logger.warning(\"No valid network IP ranges were given, skipping\")\n return True\n\n if any(ip in netaddr.IPNetwork(net) for net in allowed_networks):\n return True\n\n return False", "title": "" } ]
3b7c40e01f7562f1c2c98b475a8995a0
Differentiate signals to get velocity and accelerations
[ { "docid": "d9604181fdba608edd1fd29a759bdbfe", "score": "0.5557643", "text": "def get_accel(self, isnumeric=False):\n self.isnumeric = isnumeric\n y = self.y\n ndof = self.ndof\n fs = self.fs\n\n yd = np.empty(y.shape)\n ydd = np.empty(y.shape)\n for i in range(ndof):\n yd[i, :], ydd[i, :] = differentiate(\n y[i, :], fs, isnumeric=isnumeric)\n\n self.yd = yd\n self.ydd = ydd", "title": "" } ]
[ { "docid": "8c76ae0beb9a41d26d305633e98782ae", "score": "0.6204682", "text": "def base_velocity(self):\n raise NotImplementedError('Not yet implemented!')", "title": "" }, { "docid": "621cf629c68f1d2bb317f5ec88170f2c", "score": "0.61361825", "text": "def velocity(self) -> np.ndarray:\n return self._state[3:5]", "title": "" }, { "docid": "d4a12a16577bd1dd21dccc7c4114e577", "score": "0.6079308", "text": "def getVelocity(self):\n return self.v", "title": "" }, { "docid": "ac15aee41298171ad359b863d060986e", "score": "0.60321933", "text": "def input_system():\n vx = a.odometry_data[:, 2:3] # linear velocity_y [m/s]\n vy = a.odometry_data[:, 1:2] # linear velocity_x [m/s]\n v = np.add(vx, vy)\n v = np.true_divide(v, 2) # combined velocity [m/s]\n yawrate = np.reshape(a.odometry_data[:, 3], (-1, 1)) # angular_z [rad/s]\n u = np.reshape([v, yawrate], (-1, 2))\n return u", "title": "" }, { "docid": "7cce7dd1651a7fdd7629078f294331bb", "score": "0.59254676", "text": "def acceleration(v,u,t):\n return ((v-u)/t)", "title": "" }, { "docid": "9e200b6428d79bd337d3bf5f6d491471", "score": "0.58982956", "text": "def __init__(self, temperature=298.0 * simtk.unit.kelvin, collision_rate=91.0 / simtk.unit.picoseconds, timestep=1.0 * simtk.unit.femtoseconds):\n\n # Compute constants.\n kT = kB * temperature\n gamma = collision_rate\n\n # Create a new custom integrator.\n super(VVVRIntegrator, self).__init__(timestep)\n\n #\n # Integrator initialization.\n #\n self.addGlobalVariable(\"kT\", kT) # thermal energy\n self.addGlobalVariable(\"b\", numpy.exp(-gamma * timestep)) # velocity mixing parameter\n self.addPerDofVariable(\"sigma\", 0)\n self.addPerDofVariable(\"x1\", 0) # position before application of constraints\n\n #\n # Allow context updating here.\n #\n self.addUpdateContextState()\n\n #\n # Pre-computation.\n # This only needs to be done once, but it needs to be done for each degree of freedom.\n # Could move this to initialization?\n #\n self.addComputePerDof(\"sigma\", \"sqrt(kT/m)\")\n\n #\n # Velocity perturbation.\n #\n self.addComputePerDof(\"v\", \"sqrt(b)*v + sqrt(1-b)*sigma*gaussian\")\n self.addConstrainVelocities()\n\n #\n # Metropolized symplectic step.\n #\n self.addComputePerDof(\"v\", \"v + 0.5*dt*f/m\")\n self.addComputePerDof(\"x\", \"x + v*dt\")\n self.addComputePerDof(\"x1\", \"x\")\n self.addConstrainPositions()\n self.addComputePerDof(\"v\", \"v + 0.5*dt*f/m + (x-x1)/dt\")\n self.addConstrainVelocities()\n\n #\n # Velocity randomization\n #\n self.addComputePerDof(\"v\", \"sqrt(b)*v + sqrt(1-b)*sigma*gaussian\")\n self.addConstrainVelocities()", "title": "" }, { "docid": "d848635661cf9c089aa45d37b98b5aaa", "score": "0.5866066", "text": "def get_acceleration(self,v,el,T_s,T_i):\n\n rad = radiation.Radiation()\n T_atm = rad.getTemp(el)\n p_atm = rad.getPressure(el)\n rho_atm = rad.getDensity(el)\n g = rad.getGravity(el)\n\n\n rho_int = p_atm/(self.Rsp_air*T_i) # Internal air density\n\n Cd = .5 # Drag Coefficient\n F_b = (rho_atm - rho_int)*self.vol*g # Force due to buyoancy\n F_d = Cd*(0.5*rho_atm*math.fabs(v)*v)*self.cs_area# Force due to Drag\n\n if F_d > 0:\n F_d = F_d * self.Upsilon\n vm = (self.massEnv + self.mp) + rho_atm*self.vol + self.vm_coeff*rho_atm*self.vol #Virtual Mass\n accel = ((F_b - F_d - (self.massEnv + self.mp)*g)/vm)\n\n return accel", "title": "" }, { "docid": "a6b89260699ba8f6b1133ae6791cd8c1", "score": "0.58383507", "text": "def get_velocity(self):\n\n vs = []\n pairs = [(-2, -1), (-3, -1), (-3, -1)]\n\n for i1, i2 in pairs:\n f1 = self.files[i1]\n p1 = Profile(os.path.join(self.name, f1))\n\n f2 = self.files[i2]\n p2 = Profile(os.path.join(self.name, f2))\n\n # we'll do this by looking at 3 different temperature\n # thresholds and averaging\n T_ref = [2.e9, 3.e9, 4.e9]\n\n for T0 in T_ref:\n x1 = p1.find_x_for_T(T0)\n x2 = p2.find_x_for_T(T0)\n vs.append((x1 - x2)/(p1.time - p2.time))\n\n vs = np.array(vs)\n v = np.mean(vs)\n v_sigma = np.std(vs)\n return v, v_sigma", "title": "" }, { "docid": "c59b029d7f3b0fbbd720f6d50a2cb848", "score": "0.58337855", "text": "def velocity_field(xt,yt,x0,y0,Vinf,dia,rot,chord,B,param=None,veltype='all',integration='simp',m=220,n=200):\n rad = dia/2.\n tsr = rad*fabs(rot)/Vinf\n solidity = (chord*B)/rad\n\n # Translating the turbine position\n x0t = x0 - xt\n y0t = y0 - yt\n\n coef0,coef1,coef2,coef3,coef4,coef5,coef6,coef7,coef8,coef9 = coef_val()\n\n # Calculating EMG distribution parameters (based on polynomial surface fitting)\n if param is None:\n loc1 = _parameterval(tsr,solidity,coef0)\n loc2 = _parameterval(tsr,solidity,coef1)\n loc3 = _parameterval(tsr,solidity,coef2)\n spr1 = _parameterval(tsr,solidity,coef3)\n spr2 = _parameterval(tsr,solidity,coef4)\n skw1 = _parameterval(tsr,solidity,coef5)\n skw2 = _parameterval(tsr,solidity,coef6)\n scl1 = _parameterval(tsr,solidity,coef7)\n scl2 = _parameterval(tsr,solidity,coef8)\n scl3 = _parameterval(tsr,solidity,coef9)\n\n else:\n # Reading in EMG distribution parameters\n loc1 = param[0]\n loc2 = param[1]\n loc3 = param[2]\n spr1 = param[3]\n spr2 = param[4]\n skw1 = param[5]\n skw2 = param[6]\n scl1 = param[7]\n scl2 = param[8]\n scl3 = param[9]\n\n ###################################\n if veltype == 'vort':\n # VORTICITY CALCULATION (NO INTEGRATION)\n if x0t < 0.:\n vel = 0.\n else:\n vel = _vawtwake.vorticitystrength(x0t,y0t,dia,loc1,loc2,loc3,spr1,spr2,skw1,skw2,scl1,scl2,scl3)/rot\n ###################################\n else:\n # Integration of the vorticity profile to calculate velocity\n if integration == 'simp':\n # SIMPSON'S RULE INTEGRATION (must use polynomial surface coefficients from VAWTPolySurfaceCoef.csv)\n inte = 1 # Simpson's Rule\n # inte = 2 # Trapezoidal Rule (optional ability of the code-- faster but less accurate)\n\n if param is not None:\n print \"**** Using polynomial surface coefficients from VAWTPolySurfaceCoef.csv for Simpson's rule integration ****\"\n\n vel_xs,vel_ys = _vawtwake.vel_field(xt,yt,x0,y0,dia,rot,chord,B,Vinf,coef0,coef1,coef2,coef3,coef4,coef5,coef6,coef7,coef8,coef9,m,n,inte)\n\n if veltype == 'all':\n vel = sqrt((vel_xs*Vinf + Vinf)**2 + (vel_ys*Vinf)**2)/Vinf\n elif veltype == 'x':\n vel = (vel_xs*Vinf + Vinf)/Vinf\n elif veltype == 'y':\n vel = vel_ys\n elif veltype == 'ind':\n vel = np.array([vel_xs,vel_ys])\n ###################################\n elif integration == 'gskr':\n # 21-POINT GAUSS-KRONROD RULE QUADRATURE INTEGRATION\n xbound = (scl3+5.)*dia\n argval = (x0t,y0t,dia,loc1,loc2,loc3,spr1,spr2,skw1,skw2,scl1,scl2,scl3)\n if veltype == 'all' or veltype == 'x' or veltype == 'ind':\n vel_x = _dblquad(_vawtwake.integrandx,0.,xbound,lambda x: -1.*dia,lambda x: 1.*dia,args=argval)\n vel_xs = (vel_x[0]*fabs(rot))/(2.*pi)\n if veltype == 'all' or veltype == 'y' or veltype == 'ind':\n vel_y = _dblquad(_vawtwake.integrandy,0.,xbound,lambda x: -1.*dia,lambda x: 1.*dia,args=argval)\n vel_ys = (vel_y[0]*fabs(rot))/(2.*pi)\n\n if veltype == 'all':\n vel = sqrt((vel_xs + Vinf)**2 + (vel_ys)**2)/Vinf\n elif veltype == 'x':\n vel = (vel_xs + Vinf)/Vinf\n elif veltype == 'y':\n vel = vel_ys/Vinf\n elif veltype == 'ind':\n vel = np.array([vel_xs,vel_ys])/Vinf\n ###################################\n\n return vel", "title": "" }, { "docid": "2d1a5227f80ef1f7fc99e9ff41e93a4b", "score": "0.58249176", "text": "def motor_velocities(self):\n return np.asarray(self._robot_state.velocity)", "title": "" }, { "docid": "c7a1b695d1b0d9f52d1f599bcc109714", "score": "0.5821854", "text": "def base_acceleration(self):\n raise NotImplementedError('Not yet implemented!')", "title": "" }, { "docid": "dc44bd176cc3124583a791ead43411ca", "score": "0.5792028", "text": "def velocity_field(self):\n return scipy.dstack((self._u_int, self._v_int))", "title": "" }, { "docid": "34fb280faaadd940c939ffbebf6d8109", "score": "0.5742188", "text": "def __init__(self, timestep=1.0 * simtk.unit.femtoseconds):\n\n super(VelocityVerletIntegrator, self).__init__(timestep)\n\n self.addPerDofVariable(\"x1\", 0)\n\n self.addUpdateContextState()\n self.addComputePerDof(\"v\", \"v+0.5*dt*f/m\")\n self.addComputePerDof(\"x\", \"x+dt*v\")\n self.addComputePerDof(\"x1\", \"x\")\n self.addConstrainPositions()\n self.addComputePerDof(\"v\", \"v+0.5*dt*f/m+(x-x1)/dt\")\n self.addConstrainVelocities()", "title": "" }, { "docid": "0542c366e534f3fdf6f6c061012ad1bc", "score": "0.57288516", "text": "def cmd_velocity(self, vn, ve, vd, heading):\n pass", "title": "" }, { "docid": "f3838e42dcfdfef1c1c1fc0b69b745c7", "score": "0.56939924", "text": "def target_velocity(self, time):\n\n x_v = self.w*self.r*cos(self.w*time)\n y_v = -self.w*self.r*sin(self.w*time)\n z_v = 0\n # raise NotImplementedError\n return np.array([x_v,y_v,z_v])", "title": "" }, { "docid": "2a4ef7adfdd80ea2c5762eb34753b4b0", "score": "0.5690559", "text": "def apply_velocity(self, angles, velocity, phase, x):\r\n \r\n # VX\r\n v=velocity[0]*self.parameters[\"vx_amplitude\"]\r\n d=(x*2-1)*v\r\n if phase:\r\n angles[\"l_thigh_joint\"]+=d\r\n angles[\"l_ankle_joint\"]+=d\r\n angles[\"r_thigh_joint\"]+=d\r\n angles[\"r_ankle_joint\"]+=d\r\n else:\r\n angles[\"l_thigh_joint\"]-=d\r\n angles[\"l_ankle_joint\"]-=d\r\n angles[\"r_thigh_joint\"]-=d\r\n angles[\"r_ankle_joint\"]-=d\r\n\r\n # VY\r\n v=velocity[1]*self.parameters[\"vy_amplitude\"]\r\n d=(x)*v\r\n d2=(1-x)*v\r\n if v>=0:\r\n if phase:\r\n angles[\"l_hip_joint\"]-=d\r\n angles[\"l_foot_joint\"]-=d\r\n angles[\"r_hip_joint\"]+=d\r\n angles[\"r_foot_joint\"]+=d\r\n else:\r\n angles[\"l_hip_joint\"]-=d2\r\n angles[\"l_foot_joint\"]-=d2\r\n angles[\"r_hip_joint\"]+=d2\r\n angles[\"r_foot_joint\"]+=d2\r\n else:\r\n if phase:\r\n angles[\"l_hip_joint\"]+=d2\r\n angles[\"l_foot_joint\"]+=d2\r\n angles[\"r_hip_joint\"]-=d2\r\n angles[\"r_foot_joint\"]-=d2\r\n else:\r\n angles[\"l_hip_joint\"]+=d\r\n angles[\"l_foot_joint\"]+=d\r\n angles[\"r_hip_joint\"]-=d\r\n angles[\"r_foot_joint\"]-=d\r\n \r\n ## VT\r\n #v=velocity[2]*self.parameters[\"vt_amplitude\"]\r\n #d=(x)*v\r\n #d2=(1-x)*v\r\n #if v>=0:\r\n #if phase:\r\n #angles[\"j_pelvis_l\"]=-d\r\n #angles[\"j_pelvis_r\"]=d\r\n #else:\r\n #angles[\"j_pelvis_l\"]=-d2\r\n #angles[\"j_pelvis_r\"]=d2\r\n #else:\r\n #if phase:\r\n #angles[\"j_pelvis_l\"]=d2\r\n #angles[\"j_pelvis_r\"]=-d2\r\n #else:\r\n #angles[\"j_pelvis_l\"]=d\r\n #angles[\"j_pelvis_r\"]=-d\r", "title": "" }, { "docid": "d11f9b164dedf0e9d9622602b1620d07", "score": "0.5690368", "text": "def impulse(self,v1,v2):\n dv_peri = self.v_peri - v1\n \n dv_aphe = self.v_peri - v2\n \n return dv_peri, dv_aphe", "title": "" }, { "docid": "5c36e0defa738940cef0a44e2b8a3f5a", "score": "0.5667067", "text": "def _get_next_velocity(self):\n\n self._predict_state()\n\n # curr = pos_quat_to_euler(self.curr_quat)\n dest = pos_quat_to_euler(self.dest_quat_predict)\n error = self.calc_error(self.dest_quat_predict)\n # TODO error should be computed for phi, th axis individually\n\n # TODO recommend_velocity to reach desired setpoint at a given velocity\n phi_vel = self.motor_phi.recommend_velocity(dest[0])\n th_vel = self.motor_th .recommend_velocity(dest[1])\n\n if error < 0.05:\n return 0.0, 0.0\n\n # TODO this is lame\n #scale = error * 4\n #phi_vel = scale * phi_vel\n #th_vel = scale * th_vel\n\n return phi_vel, th_vel\n\n # TODO PID (control algo)... or should it be handled closer to motors?\n # TODO Path planning\n # TODO Velocity-accel curve to estimate time required to get to point\n # - Cache its integral and use as lookup to estimate if we can get\n # to point without overshoot", "title": "" }, { "docid": "a579d556a569caf7f51896a76a6bbd75", "score": "0.56113213", "text": "def __init__(self, temperature=298 * simtk.unit.kelvin, collision_rate=91.0 / simtk.unit.picoseconds, timestep=1.0 * simtk.unit.femtoseconds):\n super(AndersenVelocityVerletIntegrator, self).__init__(timestep)\n\n #\n # Integrator initialization.\n #\n kT = kB * temperature\n self.addGlobalVariable(\"kT\", kT) # thermal energy\n self.addGlobalVariable(\"p_collision\", timestep * collision_rate) # per-particle collision probability per timestep\n self.addPerDofVariable(\"sigma_v\", 0) # velocity distribution stddev for Maxwell-Boltzmann (computed later)\n self.addPerDofVariable(\"collision\", 0) # 1 if collision has occured this timestep, 0 otherwise\n self.addPerDofVariable(\"x1\", 0) # for constraints\n\n #\n # Update velocities from Maxwell-Boltzmann distribution for particles that collide.\n #\n self.addComputePerDof(\"sigma_v\", \"sqrt(kT/m)\")\n self.addComputePerDof(\"collision\", \"step(p_collision-uniform)\") # if collision has occured this timestep, 0 otherwise\n self.addComputePerDof(\"v\", \"(1-collision)*v + collision*sigma_v*gaussian\") # randomize velocities of particles that have collided\n\n #\n # Velocity Verlet step\n #\n self.addUpdateContextState()\n self.addComputePerDof(\"v\", \"v+0.5*dt*f/m\")\n self.addComputePerDof(\"x\", \"x+dt*v\")\n self.addComputePerDof(\"x1\", \"x\")\n self.addConstrainPositions()\n self.addComputePerDof(\"v\", \"v+0.5*dt*f/m+(x-x1)/dt\")\n self.addConstrainVelocities()", "title": "" }, { "docid": "ad43c06398d14da5c727c58715115c8f", "score": "0.560981", "text": "def read_acceleration(self):\n data = self.ag.read_bytes(Register.OUT_X_XL, 6)\n return lsm9ds1.to_vector_left_to_right_hand_rule(data)", "title": "" }, { "docid": "08b2918c72aa7d8692dcc764b547a3c4", "score": "0.56055874", "text": "def get_velocity(self, message):\n #print('**************** vel ')\n self.velocity = message.data\n self.state[0:self.ndegres] = self.velocity[0:self.ndegres]", "title": "" }, { "docid": "721f93bfef7ce722dc19d2fd6651002b", "score": "0.56039613", "text": "def animatevelocity(j):\n i = j/v_intervals\n uanim.set_data(x, u[j])\n massanim.set_data(t[:j], mass[:j])\n vanim.set_data(v_times[:i], velocity[:i])\n ampanim.set_data(t[:j], amplitudes[:j])\n #txt1.set_text(\"j = %s\" %j)\n if j > stop1:\n wave1.set_data(x, u[stop1])\n if j > stop2:\n wave2.set_data(x, u[stop2])\n if j > stop3:\n i = stop3/v_intervals\n wave3.set_data(x, u[stop3])\n uanim.set_data(x, u[stop3])\n massanim.set_data(t[:stop3], mass[:stop3])\n vanim.set_data(v_times[:i], velocity[:i])\n ampanim.set_data(t[:stop3], amplitudes[:stop3])\n # txt1.set_text(\"j = %s\" %j)\n return uanim, massanim, vanim, ampanim, wave1, wave2, wave3", "title": "" }, { "docid": "c9a53463860548b1d35aec2e0a749796", "score": "0.5602327", "text": "def get_voltage(self, i_sup, t, *args, **kwargs):\r\n raise NotImplementedError", "title": "" }, { "docid": "52245a19caca97a3d95e5b0c92ac301c", "score": "0.559854", "text": "def calc_accel(vel_data, dt):\n\n ax = np.gradient(vel_data[:, 0], dt)\n ay = np.gradient(vel_data[:, 1], dt)\n\n return np.c_[ax, ay]", "title": "" }, { "docid": "6f1e23d980a0a03f04e52431448542ff", "score": "0.5587482", "text": "def velocity(self):\n return self._vel.to_list()", "title": "" }, { "docid": "1cb48ffa72946df481c4bd862a39c2ac", "score": "0.55823004", "text": "def vel(self, time):\n if (time < self.ti):\n t = 0\n elif (time > self.tf):\n t = self.tf - self.ti\n else:\n t = time - self.ti\n return self.a1 + 2.0 * self.a2 * t + 3.0 * self.a3 * pow(t, 2) + 4.0 * self.a4 * pow(t, 3) + 5.0 * self.a5 * pow(t, 4)", "title": "" }, { "docid": "a8fcc3bcc5c4551110cd2d300a4580fe", "score": "0.5559983", "text": "def target_acceleration(self, time):\n x_a = -self.w**2*self.r*sin(self.w*time)\n y_a = -self.w**2*self.r*cos(self.w*time)\n z_a = 0\n # raise NotImplementedError\n return np.array([x_a,y_a,z_a])", "title": "" }, { "docid": "8bc890bdd00914de37ea46c67909d89a", "score": "0.5552213", "text": "def find_signal_morphology(rr_intervals, fs: float = 4):\n baseline = calculate_time_features(rr_intervals=rr_intervals)['baseline']\n vhr = rr_intervals - baseline\n accel_values = np.sort(vhr[vhr > 15]) # Change for right value\n decel_values = np.sort(vhr[vhr < -15]) # Change for right value\n accel_args = np.zeros(accel_values.shape, dtype=int)\n decel_args = np.zeros(decel_values.shape, dtype=int)\n acceleration_array = []\n deceleration_array = []\n k = 0\n for i, x in enumerate(vhr):\n if x in accel_values:\n accel_args[k] = int(i)\n k += 1\n # Make acceleration array of tuples (start, end)\n if np.sum(accel_values > 0):\n start = accel_args[0]\n end = accel_args[0]\n for i in range(len(accel_args) - 1):\n if (accel_args[i + 1] - accel_args[i] >= 2) or (i + 1 == len(accel_args) - 1):\n acceleration_array.append((start, end))\n start = accel_args[i + 1]\n else:\n end = accel_args[i + 1]\n # Make deceleration array of tuples (start, end)\n k = 0\n for i, x in enumerate(vhr):\n if x in decel_values:\n decel_args[k] = i\n k += 1\n if np.sum(decel_values < 0) > 2:\n start = decel_args[0]\n end = decel_args[0]\n for i in range(len(decel_args) - 1):\n if (decel_args[i + 1] - decel_args[i] >= 2) or (i + 1 == len(decel_args)):\n deceleration_array.append((start, end))\n start = decel_args[i + 1]\n else:\n end = decel_args[i + 1]\n delete_array = np.concatenate((accel_args, decel_args))\n vhr_pure = np.delete(vhr, delete_array)\n AmpStd = np.sqrt(np.mean(np.square(vhr_pure)))\n return baseline, AmpStd, acceleration_array, deceleration_array", "title": "" }, { "docid": "e27fc4789751b8950f4920d229899e3b", "score": "0.5550941", "text": "def get_analytical_velocities(self):\n # create empty numpy array for accelerations\n velocities = np.zeros((3, len(self.times)))\n # tangential velocity is angular velocity multiplied by radius but radius is one\n vt = self.wz\n # decompose tangential velocity in x and y components\n velocities[0, :] = vt * -sin(self.th[:, 2])\n velocities[1, :] = vt * cos(self.th[:, 2])\n # linear velocity along z axis\n velocities[2, :] = self.v0x + self.ax * self.times\n return velocities", "title": "" }, { "docid": "e1a541951fc783b64b670608085e313d", "score": "0.5550763", "text": "def update_vehicle_state(self):\n #vel = self.v + self.commands['throttle']/self.m/self.simulation_rate\n\n vel = self.commands['speed']\n steer = self.commands['steering_angle']\n\n if steer > 0.5:\n steer_cmd = 25\n elif steer < -0.5:\n steer_cmd = 185\n else:\n steer_cmd = 100 - 160*steer ##linear\n #steer_cmd = 100 - 640*steer**3 ##cubic\n\n #rospy.logwarn('Velocity command is '+ str(vel))\n # 130 is the lowest vel_cmd that makes the truck move.\n if vel > 12:\n vel_cmd = 161\n elif vel < 0:\n vel_cmd = 0\n else:\n vel_cmd = 3.77*vel + 117\n # rospy.logerr('throttle: ' + str(throttle))\n hw_port.set_command(vel_cmd,steer_cmd,self.vehicle_id)", "title": "" }, { "docid": "6a6fe420e1c860563a45efc058d47257", "score": "0.5538826", "text": "def signal(self, orientation):\n #return np.array([src.emission(orientation) for src in self.virtualsources]).sum(axis=0)\n #signal = 0.0\n #for src in self.virtualsources:\n #signal += src.emission(orientation)\n #return signal\n #print(orientation)\n return sum((src.emission(orientation.copy()) for src in self.virtualsources))", "title": "" }, { "docid": "e0e65929e4ec4538355374b6f24de29d", "score": "0.55347323", "text": "def get_velocity(self):\n return (self._I85_msg_from_device(self.node.sdo[0x606c].phys)) / 10 # rad/s", "title": "" }, { "docid": "413170f5bc4aeb6e9f8973f4c2a9fa47", "score": "0.55333126", "text": "def default_velocity(self) -> int:\r\n ...", "title": "" }, { "docid": "f9a3fd8764e3f88a561d862797963628", "score": "0.55302185", "text": "def update_motor_speeds(self, event):\n \n # Determine the time step for differentiation and integration\n current_time = rospy.get_time()\n dt = current_time - self.old_time\n \n # Get the motor desired speeds from the onboard controller\n motor_control = self.onboard_controller.get_control_input(dt)\n [front_left, front_right, rear_left, rear_right] = motor_control\n \n # Set the motor_cmd with the controller values\n self.vel_prop_msg.motor_cmd = [front_left, front_right, rear_left, rear_right]\n\n # Publish the motor commands for the ardrone plugin\n self.pub_vel_prop.publish(self.vel_prop_msg)\n \n # Set the old time to the current for future time step calculations\n self.old_time = current_time", "title": "" }, { "docid": "350e0eb660d2ccec2b1540aa4d2d06cd", "score": "0.5524558", "text": "def velocities(self, return_np=False):\n if return_np:\n return self.si_values()[3:6]\n return [self.v_r, self.v_t, self.v_p]", "title": "" }, { "docid": "39baeb8cd9ef54a7f4f366c3f903039c", "score": "0.5524539", "text": "def acceleration(self):\n ux,uy = np.gradient(self._obj['u'],self._obj['x'],self._obj['y'],axis=(0,1))\n vx,vy = np.gradient(self._obj['v'],self._obj['x'],self._obj['y'],axis=(0,1))\n \n ax = self._obj['u']*ux + self._obj['v']*uy\n ay = self._obj['u']*vx + self._obj['v']*vy\n\n self._obj['w'] = xr.DataArray(np.sqrt(ax**2+ay**2), dims=['x', 'y','t'])\n\n if len(self._obj.attrs['units']) == 4:\n vel_units = self._obj.attrs['units'][-1]\n self._obj.attrs['units'].append(f'{vel_units}^2')\n else:\n vel_units = self._obj.attrs['units'][-2]\n self._obj.attrs['units'][-1] = (f'{vel_units}^2')\n\n\n return self._obj", "title": "" }, { "docid": "f0f3ccc2a93f92485b797f98bcd236c0", "score": "0.55208963", "text": "def get_velocity(self):\n return self.momentum/self.mass", "title": "" }, { "docid": "ae4471e29ffee7beaff63ad3b3108b44", "score": "0.5515927", "text": "def velocities(self, return_np=False):\n if return_np:\n return self.si_values()[3:]\n return [self.v_x, self.v_y, self.v_z]", "title": "" }, { "docid": "c3f8565b56a3b2262adb0da6305f26fb", "score": "0.55081815", "text": "def obj(k_next) : \n \n if method==1 :\n value_vec = -1 * (log(A*k_current**(alpha) - k_next) + beta*linear_interp(k_grid,v_update,k_next))\n elif method==2 :\n value_vec = -1 * (log(A*k_current**(alpha) - k_next) + beta*quad_interp(k_grid,v_update,k_next))\n elif method==3 :\n value_vec = -1 * (log(A*k_current**(alpha) - k_next) + beta*cubic_interp(k_grid,v_update,k_next))\n \n return value_vec", "title": "" }, { "docid": "23343a80eef8a90b11abf68a874099fe", "score": "0.549042", "text": "def initvelocity():\n uanim.set_data([],[])\n massanim.set_data([],[])\n vanim.set_data([],[])\n ampanim.set_data([],[])\n # txt1.set_text(\" \")\n return uanim, massanim, vanim, ampanim", "title": "" }, { "docid": "c369f34f6cfc0e85086760673db1e300", "score": "0.54800403", "text": "def __init__(self, time, rate, type='sine'):\n self.t_v = np.zeros(time*rate)\n self.heading = np.zeros(self.t_v.shape)\n self.commands = np.zeros(self.t_v.shape)\n self.derivative = np.zeros(self.t_v.shape)", "title": "" }, { "docid": "590e29bbb99b8f755440d84f5c4899c9", "score": "0.54707676", "text": "def get_accel_data(self):\n x = self.read_i2c_word(self.ACCEL_XOUT0)\n y = self.read_i2c_word(self.ACCEL_YOUT0)\n z = self.read_i2c_word(self.ACCEL_ZOUT0)\n\n accel_scale_modifier = None\n accel_range = self.read_accel_range(True)\n\n if accel_range == self.ACCEL_RANGE_2G:\n accel_scale_modifier = self.ACCEL_SCALE_MODIFIER_2G\n elif accel_range == self.ACCEL_RANGE_4G:\n accel_scale_modifier = self.ACCEL_SCALE_MODIFIER_4G\n elif accel_range == self.ACCEL_RANGE_8G:\n accel_scale_modifier = self.ACCEL_SCALE_MODIFIER_8G\n elif accel_range == self.ACCEL_RANGE_16G:\n accel_scale_modifier = self.ACCEL_SCALE_MODIFIER_16G\n else:\n print(\"Unkown range - accel_scale_modifier set to self.ACCEL_SCALE_MODIFIER_2G\")\n accel_scale_modifier = self.ACCEL_SCALE_MODIFIER_2G\n\n x = x / accel_scale_modifier\n y = y / accel_scale_modifier\n z = z / accel_scale_modifier\n\n x = x * self.GRAVITIY_MS2\n y = y * self.GRAVITIY_MS2\n z = z * self.GRAVITIY_MS2\n return [x, y, z]", "title": "" }, { "docid": "2eb6a00ef560b0a8950223ed1e13920d", "score": "0.5451951", "text": "def _velocity_verlet(vel_update, pos_update=update.PositionUpdate()):\n return Symmetric([vel_update, pos_update])", "title": "" }, { "docid": "ad02075d65e8f5e52c72992aecd8ef53", "score": "0.54454106", "text": "def get_motor_states(self, latency=None):\n if latency is None:\n latency = self._pd_latency\n buffer = self._observation_buffer.get_delayed_value(latency)\n angle_vel_t0 = buffer.value_0\n angle_vel_t1 = buffer.value_1\n coeff = buffer.coeff\n\n pos_idx = 0\n motor_angles = angle_vel_t0[pos_idx] * (\n 1 - coeff) + coeff * angle_vel_t1[pos_idx]\n vel_idx = 1\n motor_velocities = angle_vel_t0[vel_idx] * (\n 1 - coeff) + coeff * angle_vel_t1[vel_idx]\n return motor_angles, motor_velocities", "title": "" }, { "docid": "0c782728ad29af1859112364f6833ad8", "score": "0.5437462", "text": "def edge_velocity(self):\n #reflext x values at x edges\n self.u[1,:,0] = -self.u[1,:,1]\n self.u[1,:,-1] = -self.u[1,:,-2]\n #mirror x values at y edges \n self.u[1,0,:] = self.u[1,1,:]\n self.u[1,-1,:] = self.u[1,-2,:]\n #mirror y values at x edges\n self.u[0,:,0] = self.u[0,:,1]\n self.u[0,:,-1] = self.u[0,:,-2]\n #mirror y values at y edges \n self.u[0,0,:] = -self.u[0,1,:]\n self.u[0,-1,:] = -self.u[0,-2,:]", "title": "" }, { "docid": "14772e170cb9a1952d40407d9c9398a2", "score": "0.54357463", "text": "def accelerometer(self):\n accel = [0,0,0]\n accel[X] = twos_comp(self.i2c_bus.read_byte_data(self.addr, OUT_X_H_A) << 8 | \n self.i2c_bus.read_byte_data(self.addr, OUT_X_L_A), 16)\n accel[Y] = twos_comp(self.i2c_bus.read_byte_data(self.addr, OUT_Y_H_A) << 8 | \n self.i2c_bus.read_byte_data(self.addr, OUT_Y_L_A), 16)\n accel[Z] = twos_comp(self.i2c_bus.read_byte_data(self.addr, OUT_Z_H_A) << 8 | \n self.i2c_bus.read_byte_data(self.addr, OUT_Z_L_A), 16)\n\n for i in range(X, Z+1):\n self._accel[i] = accel[i] / math.pow(2, 15) * ACCEL_SCALE\n\n return vector(self._accel)", "title": "" }, { "docid": "fedd48cc2637ecd93bb4b282a69c1ead", "score": "0.5428064", "text": "def get_cmd_velocity(self):\n return self.gripper_io.get_signal_value(\"speed_mps\")", "title": "" }, { "docid": "78aae8aeeb1c7f6a512b63eadfded0ae", "score": "0.5418935", "text": "def acceleration(self):\n return self.__accel", "title": "" }, { "docid": "6e747bcac1cdcdaca50b8e20ee63ae1a", "score": "0.54134345", "text": "def update(self):\r\n # change in position -> velocity\r\n self.position += self.velocity\r\n # change in celocity -> acceleration\r\n self.velocity += self.acceleration\r\n \r\n # if velocity magnitude is higher than the defined limit set the velocity \r\n # magnitude to max speed\r\n if np.linalg.norm(self.velocity) > self.max_speed:\r\n self.velocity = self.velocity / np.linalg.norm(self.velocity) * self.max_speed\r\n \r\n # reset the acceleration\r\n self.acceleration = Vector(*np.zeros(2))", "title": "" }, { "docid": "9af8c116efa37adbd0b6fbce4f89a8f3", "score": "0.54113925", "text": "def accelerate(self):\n\t\tself.velocity += self.direction * self.ACCELERATION", "title": "" }, { "docid": "fc949605bd27c6e12333518183a99b09", "score": "0.5411391", "text": "def velocity(self):\n return self.base_platform.velocity", "title": "" }, { "docid": "08a72fe7cf1aadc1e9baf23fa44fdfa9", "score": "0.5402486", "text": "def interpret(mid, beats, rs, vols, ts_vol=[]):\n mid = tempo_r(mid, beats, rs)\n for t_vol in ts_vol:\n mid.tracks[t_vol[0]] = velocity_r(mid.tracks[t_vol[0]],\n t_vol[1], rs[0])\n mid = volume(mid, vols)\n return mid", "title": "" }, { "docid": "54b2c6b3bb66f15b9c3204b127e742f0", "score": "0.5390275", "text": "def velocity_model(self):\n return self._velocity_model", "title": "" }, { "docid": "d8550877c3ab3049104e692b7ee4ac98", "score": "0.5388591", "text": "def cmd_vel_callback(self, msg):\n # Just store the desired velocity. The actual control runs on odometry callbacks\n v_l = msg.linear\n v_a = msg.angular\n self.v_linear_des = numpy.array([v_l.x, v_l.y, v_l.z])\n self.v_angular_des = numpy.array([v_a.x, v_a.y, v_a.z])", "title": "" }, { "docid": "90a59bc073a69f196ebd90ee6099a3f4", "score": "0.5387121", "text": "def energy_function(self, x):\n \n return -T.dot(T.transpose(x), T.dot(self.W, x)) -\\\n T.dot(T.transpose(self.b), x)", "title": "" }, { "docid": "483c6953d7aec35cb0ec3c9991842e36", "score": "0.5381777", "text": "def getVelocity(self):\n\t\tif len(self.prevPositions) < 2:\n\t\t\tself.velocity = 0\n\t\telse:\n\t\t\ttime = self.position[2] - self.prevPositions[len(self.prevPositions)-1][2]\n\t\t\txdist = self.position[0][0] - self.prevPositions[len(self.prevPositions)-1][0][0]\n\t\t\tydist = self.position[0][1] - self.prevPositions[len(self.prevPositions)-1][0][1]\n\t\t\tself.velocity = (xdist,ydist,time.total_seconds())\n\t\treturn self.velocity\n\t\t\t#speed = math.pow(math.pow(1.0*xdist,2) + math.pow(1.0*ydist,2),0.5) / (1.0*time.total_seconds())", "title": "" }, { "docid": "fea6a3f117a4b5305ee8eacb2652b69c", "score": "0.537699", "text": "def updateInterface(self):\n p = self.cxn[self.selectedADR].packet()\n p.magnetv().pscurrent().psvoltage()\n p.time()\n p.temperatures()\n p.get_state_var('CompressorStatus')\n p.get_instrument_state()\n state = yield p.send()\n # change instrument statuses\n for name,status in state['get_instrument_state']:\n if status[0] == False: color = 'red3'\n elif status[1] == False: color = 'orange3'\n elif status[1] == True: color = 'green3'\n else: color = 'gray70'\n self.instrumentStatuses[name].config(bg=color)\n # change compressor button\n if state['get_state_var'] == True:\n self.compressorButton.configure(text='Stop Compressor',\n command=self.stopCompressor,\n state=Tkinter.NORMAL)\n elif state['get_state_var'] == False:\n self.compressorButton.configure(text='Start Compressor',\n command=self.startCompressor,\n state=Tkinter.NORMAL)\n else: self.compressorButton.configure(state=Tkinter.DISABLED)\n # update current, voltage fields\n temps = {}\n stages = ('T_60K','T_3K','T_GGG','T_FAA')\n for i in range(len(stages)):\n temps[stages[i]] = state['temperatures'][i]\n #if temps[stages[i]] == 'nan': temps[stages[i]] = numpy.nan\n if numpy.isnan(state['magnetv']['V']):\n emf = 'ERR'\n else:\n emf = \"{0:.3f}\".format(state['magnetv']['V'])\n if numpy.isnan(state['pscurrent']['A']):\n psI = 'PS OFF'\n else:\n psI = \"{0:.3f}\".format(state['pscurrent']['A'])\n if numpy.isnan(state['psvoltage']['V']):\n psV = 'PS OFF'\n else:\n psV = \"{0:.3f}\".format(state['psvoltage']['V'])\n self.currentBackEMF.set( emf )\n self.currentI.set( psI )\n self.currentV.set( psV )\n # update plot:\n # change data to plot\n self.stage60K.set_xdata(numpy.append(self.stage60K.get_xdata(),mpl.dates.date2num(state['time'])))\n self.stage60K.set_ydata(numpy.append(self.stage60K.get_ydata(),temps['T_60K']['K']))\n self.stage03K.set_xdata(numpy.append(self.stage03K.get_xdata(),mpl.dates.date2num(state['time'])))\n self.stage03K.set_ydata(numpy.append(self.stage03K.get_ydata(),temps['T_3K']['K']))\n self.stageGGG.set_xdata(numpy.append(self.stageGGG.get_xdata(),mpl.dates.date2num(state['time'])))\n self.stageGGG.set_ydata(numpy.append(self.stageGGG.get_ydata(),temps['T_GGG']['K']))\n self.stageFAA.set_xdata(numpy.append(self.stageFAA.get_xdata(),mpl.dates.date2num(state['time'])))\n self.stageFAA.set_ydata(numpy.append(self.stageFAA.get_ydata(),temps['T_FAA']['K']))\n #update plot\n self.updatePlot()\n # update legend\n labelOrder = ['T_60K','T_3K','T_GGG','T_FAA']\n lines = [self.stage60K,self.stage03K,self.stageGGG,self.stageFAA]\n labels = [l.strip('T_')+' ['+\"{0:.3f}\".format(temps[l]['K'])+'K]' for l in labelOrder]\n labels = [s.replace('1.#QOK','OoR') for s in labels]\n # legend on top (if not using this, delete \\n in title)\n self.ax.legend(lines,labels,bbox_to_anchor=(0., 1.02, 1., .102), loc=3,\n ncol=4, mode=\"expand\", borderaxespad=0.)", "title": "" }, { "docid": "2e9887b5f44eb30ee0279d86b3be5133", "score": "0.53748244", "text": "def velocities(self, return_np=False):\n\n if return_np:\n return self.si_values()[3:]\n return [self.v_r, self.v_t, self.v_p]", "title": "" }, { "docid": "47fe426d87355c135cd0d345e638607f", "score": "0.537195", "text": "def set_voltages(): \n #0) set parameters\n from project_parameters import trapFile,multipoleControls,reg,driveFrequency,ax,az,phi,coefs\n import pickle\n with open(trapFile,'rb') as f:\n trap = pickle.load(f)\n V,X,Y,Z=trap.instance.DC,trap.instance.X,trap.instance.Y,trap.instance.Z\n tc=trap.configuration\n C = tc.multipoleControl\n el = []\n #1) check if trap_knobs has been run yet, creating multipoleControl and multipoleKernel\n if tc.trap_knobs != True:\n return 'WARNING: You must run trap_knobs first!'\n #2a) determine electrode voltages directly\n elif multipoleControls: # note plurality to contrast from attribute\n el = np.dot(C,coefs.T) # these are the electrode voltages\n #2b) determine electrode volages indirectly\n else:\n charge = tc.charge\n mass = tc.mass\n V0 = mass*(2*np.pi*frequencyRF)**2/charge\n U2 = az*V0/8\n U1 = U2+ax*V0/4\n U3 = 2*U1*np.tan(2*np.pi*(phi+tc.thetaRF)/180)\n U1p= np.sqrt(U1**2+U3**2/2)\n U4 = U1p*tc.Qrf[4]/tc.Qrf[1]\n U5 = U1p*tc.Qrf[5]/tc.Qrf[1]\n inp = np.array([E[0], E[1], E[2], U1, U2, U3, U4, U5]).T\n mCf = tc.multipoleCoefficients[1:9,:]\n el = np.dot(mCf.T,inp) # these are the electrode voltages\n el = np.real(el)\n #3) regularize if set to do so\n reg = 0\n if reg: \n C = el\n Lambda = np.linalg.lstsq(tc.multipoleKernel,C)\n Lambda=Lambda[0]\n el = el-(np.dot(tc.multipoleKernel,Lambda))\n return el", "title": "" }, { "docid": "b6dfab0e9c06699f8cfa576626c39c38", "score": "0.5360369", "text": "def base_acceleration_accelerometer(self):\n raise NotImplementedError('Not yet implemented!')", "title": "" }, { "docid": "9e46b5758ffd245c2be25bb1922242a2", "score": "0.53522694", "text": "def dynamics(self,eta,nu,u_actual,u_control,sampleTime): \n \n # Current velocities\n u_c = self.V_c * math.cos(self.beta_c - eta[5]) # current surge velocity\n v_c = self.V_c * math.sin(self.beta_c - eta[5]) # current sway velocity \n \n nu_c = np.array([u_c,v_c,0,0,0,0],float) # current velocity vector\n nu_r = nu - nu_c # relative velocity vector\n \n U_r = math.sqrt( nu_r[0]**2 + nu_r[1]**2 ) # relative speed\n \n # Rudder command and actual rudder angle\n delta_c = u_control[0]\n delta = u_actual[0]\n \n # Rudder forces and moment (Fossen 2021, Chapter 9.5.1)\n b = 0.7 * self.T # rudder height\n AR = b**2 / self.Lambda # aspect ratio: Lamdba = b**2/AR \n CN = 6.13 * self.Lambda / ( self.Lambda + 2.25 ) # normal coefficient\n t_R = 1 - 0.28 * self.Cb - 0.55\n a_H = 0.4\n x_R = -0.45 * self.L\n x_H = -1.0 * self.L\n\n Xdd = -0.5 * ( 1 - t_R ) * self.rho * U_r**2 * AR * CN\n Yd = -0.25 * ( 1 + a_H ) * self.rho * U_r**2 * AR * CN \n Nd = -0.25 * ( x_R + a_H * x_H ) * self.rho * U_r**2 * AR * CN \n \n # Control forces and moment\n delta_R = -delta # physical rudder angle (rad)\n T = self.tau_X # thrust (N)\n t_deduction = 0.1 # thrust deduction number\n tau1 = ( 1 - t_deduction ) * T - Xdd * math.sin( delta_R )**2 \n tau2 = -Yd * math.sin( 2 * delta_R ) \n tau6 = -Nd * math.sin( 2 * delta_R ) \n tau = np.array( [ tau1, tau2, tau6 ],float) \n \n # Linear maneuvering model\n T_surge = self.L # approx. time constant in surge (s)\n xg = 0 # approx. x-coordinate, CG (m) \n \n # 3-DOF ship model\n [M,N] = clarke83(U_r,self.L, self.B, self.T,self.Cb,self.R66,xg,T_surge)\n Minv = np.linalg.inv(M)\n nu3 = np.array( [ nu_r[0], nu_r[1], nu_r[5] ]) \n nu3_dot = np.matmul( Minv, tau - np.matmul(N,nu3) ) \n \n # 6-DOF ship model\n nu_dot = np.array( [ nu3_dot[0],nu3_dot[1],0,0,0,nu3_dot[2] ]) \n\n # Rudder angle saturation\n if ( abs(delta) >= self.deltaMax * math.pi / 180 ):\n delta = np.sign(delta) * self.deltaMax * math.pi / 180\n \n # Rudder dynamics\n delta_dot = (delta_c - delta) / self.T_delta \n\n # Forward Euler integration [k+1]\n nu = nu + sampleTime * nu_dot\n delta = delta + sampleTime * delta_dot\n\n u_actual = np.array([delta],float) \n\n return nu, u_actual", "title": "" }, { "docid": "6386ced08e1e6187154525859ecbde2f", "score": "0.53493994", "text": "def velocity(self):\n return self._velocity", "title": "" }, { "docid": "6386ced08e1e6187154525859ecbde2f", "score": "0.53493994", "text": "def velocity(self):\n return self._velocity", "title": "" }, { "docid": "8fd23d696b3ebe0ffebe11b24fd1002c", "score": "0.5341301", "text": "def velocity(self, X, Y):\n self.u = self.Vinf * np.ones_like(X)\n self.v = np.zeros_like(X)", "title": "" }, { "docid": "56e63fc2b8dca407e9aeac9c24451eb9", "score": "0.53358537", "text": "def get_inputs(self):\n flight_snapshot = self.flight()\n orbit_snapshot = self.orbit()\n\n\n inputs = [flight_snapshot.heading / 360, flight_snapshot.pitch / 90, flight_snapshot.roll / 360, flight_snapshot.speed / 2000,\n flight_snapshot.horizontal_speed / 500, flight_snapshot.vertical_speed / 500, self.throttle(),\n min(self.liquid_fuel(), self.oxidizer())/100, orbit_snapshot.apoapsis_altitude / 100000,\n orbit_snapshot.periapsis_altitude /100000, orbit_snapshot.inclination, orbit_snapshot.eccentricity,\n flight_snapshot.dynamic_pressure / 1000]\n return inputs", "title": "" }, { "docid": "03976af1131a57c24c069612a1d6e417", "score": "0.5333796", "text": "def update_speed_input_step(self,curr_v):\n \n # update speed inputs \n self.speed_inputs_east*=0\n self.speed_inputs_west*=0\n self.speed_inputs_north*=0\n self.speed_inputs_south*=0\n\n if self.use_eight_directions is True: \n self.speed_inputs_north_east*=0\n self.speed_inputs_north_west*=0\n self.speed_inputs_south_east*=0\n self.speed_inputs_south_west*=0\n \n #speed_values=self.rr[:self.N_e,0] \n speed_values=np.ones((self.N_e,1))\n\n if curr_v[0]>0:\n \n # north-east\n if self.use_eight_directions is True and curr_v[1]>0:\n self.speed_inputs_north_east=speed_values \n \n # south-east \n elif self.use_eight_directions is True and curr_v[1]<0:\n self.speed_inputs_south_east=speed_values\n \n #east \n else:\n self.speed_inputs_east=speed_values\n\n\n elif curr_v[0]<0:\n\n # north-west \n if self.use_eight_directions is True and curr_v[1]>0:\n self.speed_inputs_north_west=speed_values\n\n # south-west \n elif self.use_eight_directions is True and curr_v[1]<0:\n self.speed_inputs_south_west=speed_values\n \n # west \n else:\n self.speed_inputs_west=speed_values\n\n else: \n # north\n if curr_v[1]>0:\n self.speed_inputs_north=speed_values\n\n # south\n elif curr_v[1]<0:\n self.speed_inputs_south=speed_values", "title": "" }, { "docid": "16e9488f03453fbc04d637d17a27817f", "score": "0.53331476", "text": "def velocity(df0, df1):\n velocity = df1 - df0\n return velocity", "title": "" }, { "docid": "2a0bd244e0b0a0f9306635bceac6e540", "score": "0.5325136", "text": "def E2V(E):\n# for energy in mev returns velocity in m/s\n return sqrt(E/5.227e-6)", "title": "" }, { "docid": "bc805ad5aef2d6f887750e3541931082", "score": "0.5324752", "text": "def velocity(self, X, Y):\n self.u = (self.strength / (2 * np.pi) *\n (X - self.xc) / ((X - self.xc)**2 + (Y - self.yc)**2))\n self.v = (self.strength / (2 * np.pi) *\n (Y - self.yc) / ((X - self.xc)**2 + (Y - self.yc)**2))", "title": "" }, { "docid": "69b6b46c17ff605f3489e45b3e1a01f5", "score": "0.53216165", "text": "def _position_and_velocity(self, jd):\n pos, vel = terra(self.latitude.radians, self.longitude.radians,\n self.elevation.au, jd.gast)\n pos = einsum('ij...,j...->i...', jd.MT, pos)\n vel = einsum('ij...,j...->i...', jd.MT, vel)\n return pos, vel", "title": "" }, { "docid": "3168de0b20a9a1ec9646135dc582c8af", "score": "0.53205156", "text": "def target_velocity(self, time):\n pass", "title": "" }, { "docid": "3168de0b20a9a1ec9646135dc582c8af", "score": "0.53205156", "text": "def target_velocity(self, time):\n pass", "title": "" }, { "docid": "43c377cdf111e6f5c399350574b5e48d", "score": "0.53175", "text": "def target_acceleration(self, time):\n #return np.array([0, 0, 0])\n if time <= self.total_time/4:\n return self.path1.target_acceleration(time)\n elif time <= self.total_time/2:\n return self.path2.target_acceleration(time)\n elif time <= self.total_time/4*3:\n return self.path3.target_acceleration(time)\n else:\n return self.path4.target_acceleration(time)", "title": "" }, { "docid": "d72088d6cec4e96a245cf2e68500cbf1", "score": "0.5317101", "text": "def accel(self):\n self._read(False)\n return self._readings.accel", "title": "" }, { "docid": "d51d156df5fbca733b4d967d95960876", "score": "0.53147906", "text": "def main():\n \n def get_x_input():\n \"\"\"\n This gets the initial x position and velocity values\n Param:none\n Return:Tuple with x pos and vel\n \"\"\"\n # Ask for and validate user input for x pos and vel\n while True:\n try:\n posx = float(input(\"Please enter the initial x position in m: \"))\n except ValueError:\n print(\"Invalid Input\")\n continue\n else:\n break\n\n while True:\n try:\n velx = float(input(\"Please enter the initial x velocity in m/s: \"))\n except ValueError:\n print(\"Invalid Input\")\n continue\n else:\n break\n \n #return tuple\n xinput = (posx, velx)\n return xinput\n\n def get_y_input():\n \"\"\"\n This gets the initial y position and velocity values\n Param:none\n Return:Tuple with y pos and vel\n \"\"\" \n # Ask for and validate user input for y pos and vel\n while True:\n try:\n posy = float(input(\"Please enter the initial y position in m: \"))\n\n #start at ground\n if posy < 0:\n print(\"Please enter a positive value.\")\n continue\n\n except ValueError:\n print(\"Invalid input\")\n continue\n else:\n break\n\n while True:\n try:\n vely = float(input(\"Please enter the initial y velocity in m/s: \"))\n except ValueError:\n print(\"Invalid Input\")\n continue\n else:\n break\n \n # Return tuple\n yinput = (posy, vely)\n return yinput\n\n #Inital position and velocity of user input x and y\n posx0, velx0 = get_x_input()\n posy0, vely0 = get_y_input()\n \n #acceleration y acceleration is gravity\n accelx = 0.0\n GRAVITY = -9.8 \n \n #Initial time of 0s, time intervals of .01 s\n deltat = .01\n t = 0.0\n \n #lists of all x and y positions in the motion \n x = [posx0]\n y = [posy0]\n \n #limit of time intervals to calculate\n intervals = 4000\n\n for i in range(0, intervals):\n #increment time, add xy positions at that time\n t = t + deltat\n x.append(position(posx0, velx0, t, accelx))\n y.append(position(posy0, vely0, t, GRAVITY))\n \n #if the projectile has hit the ground, break\n if y[i+1] <= 0:\n break\n\n plot_motion(x, y)", "title": "" }, { "docid": "068ee2c9c533dfe5aaa89ac8145dd8a8", "score": "0.531138", "text": "def calculate_com(self):\n vr, vphi, gamma = self.emitter.get_velocities()\n u1, u3, gamma2 = self.emitter.get_rotation_velocities()\n math_v, gamma3 = self.emitter.get_momentum_velocity()\n rho = self.emitter.rho\n\n alpha = 5/2 * self.emitter.get_s() / rho**2\n\n E = self._E(self.chi, self.eta, self.iota, gamma, vphi, gamma2, u1, u3, math_v, gamma3)\n L = self._L(self.chi, self.eta, self.iota, gamma, vphi, gamma2, u1, u3, math_v, gamma3)\n Q = self._Q(self.chi, self.eta, L)\n\n return E, L, Q", "title": "" }, { "docid": "83d10e3f153e426ae73210f23a24169c", "score": "0.53099954", "text": "def get_numerical_derived_accelerations(self):\n # calculate numerical 2° order derivative and return it\n return np.gradient(np.gradient(self.trajectory,axis=1),axis=1)", "title": "" }, { "docid": "e10855e9c89774a134a7772abb3bda9e", "score": "0.5309556", "text": "def parse_events(self, clock: pygame.time.Clock):\n events = pygame.event.get()\n key_pressed = pygame.key.get_pressed()\n for event in events:\n if event.type == pygame.QUIT or key_pressed[K_q] or key_pressed[K_ESCAPE]:\n return False, VehicleControl()\n if event.type == pygame.JOYHATMOTION:\n hori, vert = self.joystick.get_hat(0)\n if vert > 0:\n self.max_throttle = np.clip(self.max_throttle + self.gear_throttle_step, 0, 1)\n elif vert < 0:\n self.max_throttle = np.clip(self.max_throttle - self.gear_throttle_step, 0, 1)\n\n if hori > 0:\n self.steering_offset = np.clip(self.steering_offset + self.gear_steering_step, -1, 1)\n elif hori < 0:\n self.steering_offset = np.clip(self.steering_offset - self.gear_steering_step, -1, 1)\n\n if self.use_joystick:\n self.throttle, self.steering = self._parse_joystick()\n else:\n self.throttle, self.steering = self._parse_vehicle_keys(key_pressed)\n\n return True, VehicleControl(throttle=np.clip(self.throttle, -self.max_throttle, self.max_throttle),\n steering=np.clip(self.steering, -self.max_steering, self.max_steering))", "title": "" }, { "docid": "b8b010a4c208518a6018af8064dbb72e", "score": "0.53073055", "text": "def acceleration(p,s,damp=0, v=0):\n return -p * s - damp*v", "title": "" }, { "docid": "05857342e9478291c51bab4c93b4d5ba", "score": "0.53053457", "text": "def calculate_signals(self):\n raise NotImplementedError(\"Should implement calculate_signals()\")", "title": "" }, { "docid": "afea55aaca9d51d2e041ae35eba44ab9", "score": "0.5302394", "text": "def caculate_signals(self):\n\t\traise NotImplementedError(\"Should implement calculate_signals()\")", "title": "" }, { "docid": "ea5807af7d6837a2cfa8c6d408a1ba47", "score": "0.53001416", "text": "def update(self, gyro, accel, deltaT):\r\n gyro = np.array(gyro)\r\n accel = np.array(accel)\r\n q = self.quaternion\r\n qDot1 = 0.5 * (-q[1] * gyro[0] - q[2] * gyro[1] - q[3] * gyro[2])\r\n qDot2 = 0.5 * ( q[0] * gyro[0] + q[2] * gyro[2] - q[3] * gyro[1])\r\n qDot3 = 0.5 * ( q[0] * gyro[1] - q[1] * gyro[2] + q[3] * gyro[0])\r\n qDot4 = 0.5 * ( q[0] * gyro[2] + q[1] * gyro[1] - q[2] * gyro[0])\r\n\r\n qdot = [qDot1, qDot2, qDot3, qDot4]\r\n\r\n # Normalise accelerometer measurement\r\n if norm(accel) is 0:\r\n warnings.warn(\"accelerometer is zero\")\r\n else:\r\n accel /= norm(accel)\r\n\r\n # Auxiliary variables to avoid repeated calculations\r\n _2q0 = 2.0 * q[0]\r\n _2q1 = 2.0 * q[1]\r\n _2q2 = 2.0 * q[2]\r\n _2q3 = 2.0 * q[3]\r\n _4q0 = 4.0 * q[0]\r\n _4q1 = 4.0 * q[1]\r\n _4q2 = 4.0 * q[2]\r\n _8q1 = 8.0 * q[1]\r\n _8q2 = 8.0 * q[2]\r\n q0q0 = q[0] * q[0]\r\n q1q1 = q[1] * q[1]\r\n q2q2 = q[2] * q[2]\r\n q3q3 = q[3] * q[3]\r\n\r\n # Gradient descent algorithm corrective step\r\n s0 = _4q0 * q2q2 + _2q2 * accel[0] + _4q0 * q1q1 - _2q1 * accel[1]\r\n s1 = _4q1 * q3q3 - _2q3 * accel[0] + 4.0 * q0q0 * q[1]- _2q0 * accel[1] - _4q1 + _8q1 * q1q1 + _8q1 * q2q2 + _4q1 * accel[2]\r\n s2 = 4.0 * q0q0 * q[2] + _2q0 * accel[0] + _4q2 * q3q3 - _2q3 * accel[1] - _4q2 + _8q2 * q1q1 + _8q2 * q2q2 + _4q2 * accel[2]\r\n s3 = 4.0 * q1q1 * q[3] - _2q1 * accel[0] + 4.0 * q2q2 * q[3] - _2q2 * accel[1]\r\n\r\n s = np.array([s0, s1, s2, s3])\r\n s /= norm(s)\r\n\r\n # Apply Feedback Step\r\n qdot -= self.beta*s #(q * Quaternion(0, gyroscope[0], gyroscope[1], gyroscope[2])) * 0.5 - self.beta * step.T\r\n\r\n # Integrate to yield quaternion\r\n q += qdot * self.samplePeriod\r\n self.quaternion /= norm(q) # normalise quaternion\r", "title": "" }, { "docid": "27913448b14304c5fc704db7f93e9210", "score": "0.52960736", "text": "def sent_velocity(self,velocity):\n if self.mode == 3: # Profiled Velocity\n self.node.sdo[0x6040].bits[0] = 1\n self.node.sdo[0x6040].bits[1] = 1\n self.node.sdo[0x6040].bits[2] = 1\n self.node.sdo[0x6040].bits[3] = 1\n # self.node.sdo[0x6040].bits[7] = 0\n velocity = 10 * self._I85_msg_to_device(velocity)\n self.node.sdo.download(0x60ff, 0x0, self._decTohex_32(velocity)) # velocity", "title": "" }, { "docid": "86a7b67bdf352fff18c669b4cf2f05ee", "score": "0.529247", "text": "def get_velocity(self, time_points, interval_indexes=None):\n return self.interpolator.eval_first_derivative(time_points, interval_indexes)", "title": "" }, { "docid": "0bb537fe213b40a045f801078b7c91d2", "score": "0.5287083", "text": "def _velocity_verlet_multiple(vel_updates, pos_update=update.PositionUpdate()):\n return Symmetric(vel_updates + [pos_update])", "title": "" }, { "docid": "76a175239e9084e8c01cf4fb9a796eb7", "score": "0.5283823", "text": "def evaluate(self, *args, **kwargs):\n return self.constant_velocity", "title": "" }, { "docid": "0c73ebd7f187398a5de3640e1f06ec62", "score": "0.52681595", "text": "def vx0(self):\n return self.params['vx0']", "title": "" }, { "docid": "a168d586f4656f595b880df47e4ca28c", "score": "0.5258774", "text": "def target_acceleration(self, time):\n pass", "title": "" }, { "docid": "a168d586f4656f595b880df47e4ca28c", "score": "0.5258774", "text": "def target_acceleration(self, time):\n pass", "title": "" }, { "docid": "16185d6284cfb9c9d32bf36b7d2f5233", "score": "0.5258207", "text": "def set_velocities(self):\r\n self.wx = np.copy(Turbine.wzero)\r\n self.wy = np.copy(Turbine.wzero)", "title": "" }, { "docid": "114683c5ba15c4290c807aa22d29c10a", "score": "0.5257647", "text": "def reactive_power(self, params=None):\n if self.inv is None:\n raise der.DERError('DER not initialized')\n\n try:\n if params is not None:\n ena = params.get('Ena')\n if ena is not None:\n if ena is True:\n self.inv.volt_var.ModEna = 1\n else:\n self.inv.volt_var.ModEna = 0\n\n q = params.get('Q')\n if q is not None:\n self.inv.volt_var.ActCrv = 1 # use curve 1\n n_pt = int(self.inv.volt_var.NPt)\n from numpy import linspace\n v = linspace(90, 110, n_pt)\n q = [q]*n_pt\n # Meaning of dependent variable: 1=%WMax 2=%VArMax 3=%VArAval.\n curve_params = {'DeptRef': 2, 'RmpTms': 0, 'RmpDecTmm': 0, 'RmpIncTmm': 0,\n 'v': v, 'var': q}\n if params.get('RmpTms') is not None:\n curve_params['RmpTms'] = params.get('RmpTms')\n if params.get('RmpTms') is not None:\n curve_params['RmpDecTmm'] = params.get('RmpTms')\n curve_params['RmpIncTmm'] = params.get('RmpTms')\n self.volt_var_curve(id=self.inv.volt_var.ActCrv, params=curve_params)\n\n win_tms = params.get('WinTms')\n if win_tms is not None:\n self.inv.volt_var.WinTms = win_tms\n rmp_tms = params.get('RmpTms')\n if rmp_tms is not None:\n self.inv.volt_var.RmpTms = rmp_tms\n rvrt_tms = params.get('RvrtTms')\n if rvrt_tms is not None:\n self.inv.volt_var.RvrtTms = rvrt_tms\n\n self.inv.volt_var.write()\n\n else:\n params = {}\n self.inv.volt_var.read()\n if self.inv.volt_var.ModEna == 0:\n params['Ena'] = False\n else:\n params['Ena'] = True\n params['WinTms'] = self.inv.volt_var.WinTms\n params['RmpTms'] = self.inv.volt_var.RmpTms\n params['RvrtTms'] = self.inv.volt_var.RvrtTms\n if self.inv.volt_var.ActCrv != 0:\n params['curve'] = self.volt_var_curve(id=self.inv.volt_var.ActCrv)\n params['Q'] = self.inv.volt_var_curve.var[0]\n\n except Exception, e:\n raise der.DERError(str(e))\n\n return params", "title": "" }, { "docid": "32bedaef0ecf1657a2241a27794e27de", "score": "0.5256659", "text": "def headingRx(self, inputs):\n result = {}\n ag = bpy.context.scene.objects[self.userid]\n for into in inputs:\n for i in into:\n emitterAgent = self.sim.agents[i]\n # eVel = emitterAgent.globalVelocity\n\n z = mathutils.Matrix.Rotation(-emitterAgent.arz, 4, 'Z')\n y = mathutils.Matrix.Rotation(-emitterAgent.ary, 4, 'Y')\n x = mathutils.Matrix.Rotation(-emitterAgent.arx, 4, 'X')\n\n rotation = x * y * z\n emitHead = Vector((0, 1, 0)) * rotation\n\n target = emitHead - ag.location\n\n z = mathutils.Matrix.Rotation(ag.rotation_euler[2], 4, 'Z')\n y = mathutils.Matrix.Rotation(ag.rotation_euler[1], 4, 'Y')\n x = mathutils.Matrix.Rotation(ag.rotation_euler[0], 4, 'X')\n\n rotation = x * y * z\n relative = target * rotation\n\n changez = math.atan2(relative[0], relative[1]) / math.pi\n changex = math.atan2(relative[2], relative[1]) / math.pi\n\n result[i] = changex\n return result", "title": "" }, { "docid": "d1a5e0268ca59348948f99e79d06b8ec", "score": "0.5254494", "text": "def test_velocity(self):\n sol = Mader(p_cj=3.0e11, d_cj=8.0e5, gamma=3.0, u_piston=0.0)\n # r must contain 2 elements, otherwise the density and pressure are nan\n r = np.array([0.7, 0.8])\n t = 6.25e-6\n solrt = sol(r, t)\n np.testing.assert_allclose(solrt.velocity[0], 144000.0)", "title": "" }, { "docid": "35f62b871ff38ed4775fc1123e6557ff", "score": "0.525418", "text": "def eval_accel(self,t,endBehavior='halt') -> Vector:\n res = Trajectory.deriv_state(self,t,endBehavior)\n return res[len(res)//2:]", "title": "" }, { "docid": "bc1fe61dcb49c0a654a52b815befdaa4", "score": "0.52527064", "text": "def target_velocity(self, time):\n \"\"\"\n start_point = self.points[self.cur_start]\n cur_target = self.points[(self.cur_start + 1) % 4]\n total_time = self.total_time / 4\n avg_vel = (cur_target - start_point)/ total_time\n return avg_vel\n \"\"\"\n total_time = self.total_time\n if time <= self.total_time/4:\n return self.path1.target_velocity(time)\n\n elif time - total_time/4 == 0:\n rospy.sleep(0.5)\n\n elif time <= self.total_time/2:\n return self.path2.target_velocity(time - (total_time/4 + 0.5))\n\n elif time - total_time/2 == 0:\n rospy.sleep(0.5)\n\n elif time <= self.total_time/4*3:\n return self.path3.target_velocity(time - (total_time/2 + 1))\n\n elif time - total_time/4*3 == 0:\n rospy.sleep(0.5)\n\n\n else:\n return self.path4.target_velocity(time - (total_time/4*3 + 1.5))", "title": "" }, { "docid": "571dcd93adb12dae07c93125cd786322", "score": "0.5252642", "text": "def update(self):\n self.velocity = [math.cos(self.angle), - math.sin(self.angle)]\n self.velocity = [self.speed * i for i in self.velocity]\n\n super().update()", "title": "" }, { "docid": "ae162e9e2c20e8a29c4fc904b7bd2ebc", "score": "0.52519864", "text": "def generate(self): \r\n \r\n self.pfn={} # phase joint functions \r\n self.afn={} # anti phase joint functions\r\n\r\n ## Foot and hip -> Lateral motion\r\n foot_func=SinusoidFunction()\r\n foot_func.angular_frequency= self.parameters[\"step_frequency\"]\r\n foot_func.amplitude= self.parameters[\"foot_amplitude\"]\r\n foot_func.amplitude_offset= self.parameters[\"foot_amplitude_offset\"]\r\n foot_func.phase_offset= self.parameters[\"foot_phase_offset\"]\r\n self.pfn[\"l_foot_joint\"]=foot_func \r\n foot_func_af=foot_func.mirror()\r\n self.afn[\"l_foot_joint\"]=foot_func_af\r\n \r\n hip_func=SinusoidFunction()\r\n hip_func.angular_frequency= self.parameters[\"step_frequency\"]\r\n hip_func.amplitude= self.parameters[\"hip_amplitude\"]\r\n hip_func.amplitude_offset= self.parameters[\"hip_amplitude_offset\"]\r\n hip_func.phase_offset= self.parameters[\"hip_phase_offset\"]\r\n self.pfn[\"l_hip_joint\"]=hip_func\r\n hip_func_af=hip_func.mirror()\r\n self.afn[\"l_hip_joint\"]=hip_func_af\r\n \r\n ## Thigh, ankle and knee -> Frontal motion\r\n thigh_func=SinusoidFunction()\r\n thigh_func.angular_frequency= self.parameters[\"step_frequency\"]\r\n thigh_func.amplitude= self.parameters[\"thigh_amplitude\"]\r\n thigh_func.amplitude_offset= self.parameters[\"thigh_amplitude_offset\"]\r\n thigh_func.phase_offset= self.parameters[\"thigh_phase_offset\"]\r\n self.pfn[\"l_thigh_joint\"]=thigh_func\r\n thigh_func_af=thigh_func.mirror_keep_amplitude_offset()\r\n self.afn[\"l_thigh_joint\"]=thigh_func_af\r\n \r\n ankle_func=SinusoidFunction()\r\n ankle_func.angular_frequency= self.parameters[\"step_frequency\"]\r\n ankle_func.amplitude= self.parameters[\"ankle_amplitude\"]\r\n ankle_func.amplitude_offset= self.parameters[\"ankle_amplitude_offset\"]\r\n ankle_func.phase_offset= self.parameters[\"ankle_phase_offset\"]\r\n self.pfn[\"l_ankle_joint\"]=ankle_func\r\n ankle_func_af=ankle_func.mirror_keep_amplitude_offset()\r\n self.afn[\"l_ankle_joint\"]=ankle_func_af\r\n \r\n knee_func=SinusoidFunction()\r\n knee_func.angular_frequency= self.parameters[\"step_frequency\"]\r\n knee_func.amplitude= self.parameters[\"knee_amplitude\"]\r\n knee_func.amplitude_offset= self.parameters[\"knee_amplitude_offset\"]\r\n knee_func.phase_offset= self.parameters[\"knee_phase_offset\"]\r\n self.pfn[\"l_knee_joint\"]=knee_func\r\n knee_func_af=knee_func.mirror_keep_amplitude_offset()\r\n self.afn[\"l_knee_joint\"]=knee_func_af\r\n \r\n #f3=SinusoidFunction()\r\n #f3.angular_frequency=self.parameters[\"step_frequency\"]\r\n #f3.amplitude=self.parameters[\"step_amplitude\"]\r\n #f3.amplitude_offset=self.parameters[\"step_amplitude_offset\"]\r\n #self.pfn[\"l_thigh_joint\"]= f3\r\n #f33=f3.clone()\r\n #f33.amplitude_offset = self.parameters[\"ankle_amplitude_offset\"]\r\n #f33.amplitude = self.parameters[\"ankle_amplitude\"]\r\n #self.pfn[\"l_ankle_joint\"]=f33\r\n #f4=f3.mirror()\r\n ##f4.amplitude_offset -= 0.4\r\n #self.pfn[\"l_knee_joint\"]=f4\r\n \r\n #f5=f3.mirror_keep_amplitude_offset()\r\n #self.afn[\"l_thigh_joint\"]=f5\r\n \r\n #f6=f33.mirror_keep_amplitude_offset()\r\n #self.afn[\"l_ankle_joint\"]=f6\r\n #f7=f5.mirror()\r\n ##f7.amplitude_offset -= 0.4\r\n #self.afn[\"l_knee_joint\"]=f7\r\n \r\n self.generate_right()\r\n \r\n self.show()", "title": "" }, { "docid": "94e3b8b8219ecdb4ef6260def7366363", "score": "0.52420586", "text": "def __init__(self, mass, radius, position, velocity):\n self.mass = mass\n self.radius = radius\n self.position = position\n self.velocity = velocity\n print(self.velocity)\n self.vafter = np.copy(velocity) # temp storage for velocity of next step\n self.delete = False", "title": "" }, { "docid": "88db291051366b259fe91c9451c8b9bb", "score": "0.5241781", "text": "def update(self):\n \n self.accelerometer()\n self.magnetometer()", "title": "" }, { "docid": "f53ca569bf8bf91cd11a399f482a0efd", "score": "0.52409357", "text": "def step(self, dt, bodies):\n k1x = self.v_x\n k1y = self.v_y\n \n k1vx, k1vy = sum([body.compute_acceleration(self.x, self.y) for body \\\n in bodies])\n \n k2x = self.v_x + dt/2 * k1vx\n k2y = self.v_y + dt/2 * k1vy\n \n k2vx, k2vy = sum([body.compute_acceleration(self.x + dt/2 * k1x, \n self.y + dt/2 * k1y) for \\\n body in bodies])\n \n k3x = self.v_x + dt/2 * k2vx\n k3y = self.v_y + dt/2 * k2vy\n \n k3vx, k3vy = sum([body.compute_acceleration(self.x + dt/2 * k2x, \n self.y + dt/2 * k2y) for \\\n body in bodies])\n \n k4x = self.v_x + dt * k3vx\n k4y = self.v_y + dt * k3vy\n \n k4vx, k4vy = sum([body.compute_acceleration(self.x + dt * k3x, \n self.y + dt * k3y) for \\\n body in bodies])\n \n x_n1 = self.x + dt/6 * (k1x + 2*k2x + 2*k3x + k4x)\n y_n1 = self.y + dt/6 * (k1y + 2*k2y + 2*k3y + k4y)\n vx_n1 = self.v_x + dt/6 * (k1vx + 2*k2vx + 2*k3vx + k4vx)\n vy_n1 = self.v_y + dt/6 * (k1vy + 2*k2vy + 2*k3vy + k4vy)\n \n self.x = x_n1\n self.y = y_n1\n self.v_x = vx_n1\n self.v_y = vy_n1", "title": "" } ]
49fd8840ef9368227d2b7e660821b272
+ Truncate/drop all tables and recreate them + Delete all packages in package_files
[ { "docid": "0533af5678f26a8b1eb78b03d099c715", "score": "0.0", "text": "def clear(server_path, force=False):\n # Confirm:\n ans = input(\"Are you sure you want to delete all packages? [yN]\")\n if ans.lower() in ('y', 'yes'):\n pass\n elif ans.lower() in('n', 'no'):\n logger.debug(\"User aborted.\")\n sys.exit(0)\n else:\n logger.debug(\"Unknown response '%s'. Aborting.\" % ans)\n sys.exit(1)\n\n # Read the config file to find our other locations\n # TODO: I hate this...\n sys.path.append(server_path)\n import config\n\n # Delete all the packages\n pkg_path = Path(config.SERVER_PATH) / Path(config.PACKAGE_DIR)\n try:\n shutil.rmtree(str(pkg_path))\n except FileNotFoundError:\n logger.warn(\"Path '%s' does not exist.\" % str(pkg_path))\n\n # Delete/drop the database\n if config.DB_BACKEND == 'sqlite':\n sqlite_path = Path(config.SERVER_PATH) / Path(config.DB_NAME)\n try:\n sqlite_path.unlink()\n except FileNotFoundError:\n logger.warn(\"Path '%s' does not exist.\" % str(pkg_path))\n\n # And receate the directories and database based on the config file.\n logger.info(\"Recreating database and package dir.\")\n _create_directories(server_path, config.PACKAGE_DIR, \"/var/log/pynuget\")\n _create_db(config.DB_BACKEND, config.DB_NAME, server_path)\n\n return False", "title": "" } ]
[ { "docid": "7a68d64a77b898c481a9e4d0c0265a0d", "score": "0.7258292", "text": "def _clear_all_tables(self):\n self.session.close()\n self.meta_data.drop_all()\n self.meta_data.create_all()\n clear_mappers()", "title": "" }, { "docid": "53f3ca5be2e84c984f9ff654fafc18e1", "score": "0.725158", "text": "def clean_tables(self):\n self.database.province.drop()\n self.database.canton.drop()\n self.database.district.drop()\n self.database.electors.drop()", "title": "" }, { "docid": "a0acbcf8c5e4d7eb1a178c6035f6cb24", "score": "0.7097086", "text": "def drop_tables(self):", "title": "" }, { "docid": "2f5caf226b48aee6514bc703754abbbc", "score": "0.7094322", "text": "def cleanup_db(db):\n\n for table in reversed(db.metadata.sorted_tables):\n try:\n db.session.execute(table.delete())\n except SQLAlchemyError:\n print(f\"Failed to delete table {table}\")\n pass", "title": "" }, { "docid": "434a3312096989f381f41d5bf1e40569", "score": "0.7074394", "text": "def _purgeTables(cls):\n for table in cls.TABLES:\n table = getattr(models, table)\n try:\n print 'Deleting %s table ...' % table.__name__\n table.query.delete()\n except Exception:\n print 'Deleting %s table ...' % table.name\n table.delete()\n\n db.session.commit()", "title": "" }, { "docid": "a7c5e7ef887136d4e2c47ede75d543d2", "score": "0.6975551", "text": "def truncate_tables(self):", "title": "" }, { "docid": "40ee387a0f3d5f4cd025b99ce3382698", "score": "0.6975543", "text": "def drop_tables_after_running_tests():\n commands = (\n \"\"\"\n DROP TABLE users\n \"\"\",\n \"\"\"\n DROP TABLE orders \n \"\"\"\n )\n \n cursor = connection.cursor()\n # create table one by one\n for command in commands:\n cursor.execute(command)\n # commit the changes\n connection.commit()", "title": "" }, { "docid": "774c3708daeefca44b63306291235718", "score": "0.69469106", "text": "def reset():\n\n drop = prompt_bool('Drop all tables? All data will be lost...')\n if drop:\n db.drop_all()\n db.session.commit()\n\n config = _get_config(None)\n alembic.command.stamp(config, 'base')\n alembic.command.upgrade(config, 'head')", "title": "" }, { "docid": "cb47e15e98830ffdd75b8f54a0e3dda8", "score": "0.68749416", "text": "def clear_cache(self):\n self._execute(models.Name.objects.drop_table())\n self._execute(models.Package.objects.drop_table())\n self._setup_db()\n self.connection.commit()", "title": "" }, { "docid": "48e2b936f98bc9548a2d2415a38a189f", "score": "0.68680894", "text": "def schema_cleanup():\n engine = api.get_engine()\n with engine.begin() as conn:\n inspector = sa.inspect(engine)\n metadata = sa.schema.MetaData()\n tbs = []\n all_fks = []\n\n for table_name in inspector.get_table_names():\n fks = []\n for fk in inspector.get_foreign_keys(table_name):\n if not fk[\"name\"]:\n continue\n fks.append(\n sa.schema.ForeignKeyConstraint((), (), name=fk[\"name\"]))\n table = sa.schema.Table(table_name, metadata, *fks)\n tbs.append(table)\n all_fks.extend(fks)\n\n if engine.name != \"sqlite\":\n for fkc in all_fks:\n conn.execute(sa.schema.DropConstraint(fkc))\n for table in tbs:\n conn.execute(sa.schema.DropTable(table))\n\n if engine.name == \"postgresql\":\n sqla_100 = int(sa.__version__.split(\".\")[0]) >= 1\n\n if sqla_100:\n enums = [e[\"name\"] for e in sa.inspect(conn).get_enums()]\n else:\n enums = conn.dialect._load_enums(conn).keys()\n\n for e in enums:\n conn.execute(\"DROP TYPE %s\" % e)", "title": "" }, { "docid": "c14ea12925e50247640297010e4dcee3", "score": "0.68139476", "text": "def db_drop_and_create_all():\n db.drop_all()\n db.create_all()", "title": "" }, { "docid": "c14ea12925e50247640297010e4dcee3", "score": "0.68139476", "text": "def db_drop_and_create_all():\n db.drop_all()\n db.create_all()", "title": "" }, { "docid": "48ae4db822ee876581ac3e37aa092bf7", "score": "0.6764665", "text": "def reset_db():\n BaseModel.metadata.drop_all(engine)\n BaseModel.metadata.create_all(engine)", "title": "" }, { "docid": "8eb7b926578b552d34e96369baa5dcdf", "score": "0.6763454", "text": "def recreate_db():\n db.drop_all()\n db.create_all()\n db.session.commit()", "title": "" }, { "docid": "8eb7b926578b552d34e96369baa5dcdf", "score": "0.6763454", "text": "def recreate_db():\n db.drop_all()\n db.create_all()\n db.session.commit()", "title": "" }, { "docid": "c2e4c4afca41b22d5fa80dac27e6b7a8", "score": "0.67546177", "text": "def drop_tables():\n with db:\n db.execute_sql('SET FOREIGN_KEY_CHECKS=0;')\n for table in MODELS:\n if table.table_exists():\n log.info('Dropping database table: %s', table.__name__)\n db.drop_tables([table], safe=True)\n\n db.execute_sql('SET FOREIGN_KEY_CHECKS=1;')", "title": "" }, { "docid": "164784cde0c806b4fa86dbce39980e40", "score": "0.67267114", "text": "def _clean_prepopulated_tables(self: typing.Any) -> None:\n logger.info(\n \"*** Removing automatically created entries in the new \" \"database...\"\n )\n for table in [\n \"alembic_version\",\n \"chart\",\n \"connection\",\n \"job\",\n \"known_event_type\",\n \"kube_resource_version\",\n \"kube_worker_uuid\",\n \"serialized_dag\",\n \"slot_pool\",\n \"task_instance\",\n ]:\n logger.info('*** Removing entries from the table \"%s\"...', table)\n output = EnvironmentUtils.execute_command_in_a_pod(\n self.worker_pod_namespace,\n self.worker_pod_name,\n self.worker_container_name,\n \"psql postgres://root:${SQL_PASSWORD}\"\n f\"@{self.sql_proxy}/{self.temporary_database_name} \"\n f\"-p 3306 -t -c 'DELETE FROM {table};'\",\n )\n logger.info(output)", "title": "" }, { "docid": "f51c98a963cb1784fd687eed31f22791", "score": "0.6706717", "text": "def drop_schema(self):\n self.cursor.executescript(\"\"\"\n DROP TABLE IF EXISTS gauged_data;\n DROP TABLE IF EXISTS gauged_keys;\n DROP TABLE IF EXISTS gauged_writer_history;\n DROP TABLE IF EXISTS gauged_cache;\n DROP TABLE IF EXISTS gauged_statistics;\n DROP TABLE IF EXISTS gauged_metadata\"\"\")\n self.db.commit()", "title": "" }, { "docid": "0568235104066dbdb470b040af3815e8", "score": "0.66986644", "text": "def truncate_tables(driver: IndexDriverABC, base) -> None:\n with driver.engine.begin() as txn:\n for table in reversed(base.metadata.sorted_tables):\n # do not clear schema versions so each test does not re-trigger migration.\n if table.name not in [\"index_schema_version\", \"alias_schema_version\"]:\n txn.execute(f\"TRUNCATE {table.name} CASCADE;\")", "title": "" }, { "docid": "f6c85141cac56983223e1466ae9c2b3f", "score": "0.66973126", "text": "def reset_db():\n with connection.cursor() as cursor:\n\n if (settings.CLEAR_PUBLIC_SCHEMA_ON_FIXTURIZE.lower() == \"true\"):\n cursor.execute(\"select tablename from pg_tables where schemaname = 'geocity' or schemaname = 'public'\")\n tables = [row[0] for row in cursor.fetchall() if row[0] not in {'spatial_ref_sys'}]\n else: # some user might don't want to clear public schema\n cursor.execute(\"select tablename from pg_tables where schemaname = 'geocity'\")\n tables = [row[0] for row in cursor.fetchall()]\n # Can't use query parameters here as they'll add single quotes which are not\n # supported by postgres\n for table in tables:\n cursor.execute('drop table \"' + table + '\" cascade')\n\n # Call migrate so that post-migrate hooks such as generating a default Site object\n # are run\n management.call_command(\"migrate\", \"--noinput\", stdout=StringIO())", "title": "" }, { "docid": "bb478faff3f8c4c0a72bc33bafbaf6a7", "score": "0.6677677", "text": "def reset_database():\n # Reset database\n db.drop_all()\n\n # Create tables\n db.create_all()", "title": "" }, { "docid": "2b42d147028ab5a6da1ea56aeda93e9f", "score": "0.66726756", "text": "def tearDown(self):\n\t\tdb.session.rollback()\n\t\tdb.drop_all()", "title": "" }, { "docid": "89be54b1693b145208e32f7e4bc0f1e6", "score": "0.6665903", "text": "def clear_schema(self):\n self.cursor.executescript(\"\"\"\n DELETE FROM gauged_data;\n DELETE FROM gauged_keys;\n DELETE FROM gauged_writer_history;\n DELETE FROM gauged_cache;\n DELETE FROM gauged_statistics;\n DELETE FROM sqlite_sequence WHERE name = 'gauged_keys'\"\"\")\n self.db.commit()", "title": "" }, { "docid": "5d5808f531d8b15b25a7237b067618dd", "score": "0.665157", "text": "def drop_tables(cls):\n Base.metadata.drop_all(cls.engine)", "title": "" }, { "docid": "d6049cc6bdd88f95fc7c437fb89c0ec4", "score": "0.6646681", "text": "def clear_data(db):\n for table in reversed(db.metadata.sorted_tables):\n db.session.execute(table.delete())\n db.session.commit()", "title": "" }, { "docid": "919fe1c2cf292deac7c6a7cd29c5ef71", "score": "0.6641903", "text": "def tearDown(self):\r\n self.Base.metadata.drop_all()", "title": "" }, { "docid": "919fe1c2cf292deac7c6a7cd29c5ef71", "score": "0.6641903", "text": "def tearDown(self):\r\n self.Base.metadata.drop_all()", "title": "" }, { "docid": "e8c4cf87777fb51a6de32b40b6bba237", "score": "0.6639143", "text": "def resetdb():\n from app.exts import db\n\n # drop all tables\n db.drop_all()\n\n # install postgis if not already done\n sql = \"SELECT count(extname) FROM pg_extension WHERE extname = 'postgis'\"\n if (db.engine.execute(sql).rowcount == 0): db.engine.execute(\"CREATE EXTENSION postgis;\")\n\n # create all tables\n db.create_all()", "title": "" }, { "docid": "d4aaa37cd4b51cf54806b820f7396225", "score": "0.6639", "text": "def _drop_tables(self):\n cursor = self.connection.cursor()\n\n cursor.execute(\"drop table if exists Users;\")\n\n self.connection.commit()", "title": "" }, { "docid": "540b7e2babeb353688bc7eb6ff967435", "score": "0.6625557", "text": "def truncate_tables(session=None):\n tables = [Line.__table__, Post.__table__, Point.__table__, Map.__table__]\n for table in tables:\n session.execute(table.delete())", "title": "" }, { "docid": "aee38b803cd24e56d8f9a6974b8e36e2", "score": "0.66014314", "text": "def tearDown(self):\r\n self.db.drop_all()", "title": "" }, { "docid": "98c39c3d35298c7b1a9e4289556a74a4", "score": "0.6593637", "text": "def _do_final_cleanup(conn, logger, is_locked, tables_to_delete):\n if is_locked:\n with conn.cursor() as cursor:\n cursor.execute('SELECT pg_advisory_unlock(%s::BIGINT)', [hash_string_64bit('dirbs-classify')])\n\n with conn.cursor() as cursor:\n remaining_tables_to_delete = copy.copy(tables_to_delete)\n for t in tables_to_delete:\n try:\n cursor.execute(sql.SQL('DROP TABLE IF EXISTS {0} CASCADE').format(sql.Identifier(t)))\n conn.commit()\n remaining_tables_to_delete.remove(t)\n except: # noqa: E722\n for t_not_deleted in remaining_tables_to_delete:\n logger.warning('Failed to drop table {0} due to exception. Please issue '\n \"\\'DROP TABLE IF EXISTS {0}\\' manually!\".format(t_not_deleted))\n raise", "title": "" }, { "docid": "76505fbc3c364f575edf5cf1e6e18a27", "score": "0.6587759", "text": "def drop_schema():\n\n print_warning('Dropping all models...')\n migration_services.drop_all()", "title": "" }, { "docid": "008d955b838d22c7b36987c872f20ea0", "score": "0.6586391", "text": "async def drop_all_tables(self):\n try:\n self.db.drop_tables([self.models.User, self.models.Message, self.models.Channel, self.models.Server,\n self.models.LoveTransaction], safe=True)\n except OperationalError as e:\n self.config.logger.error(e)", "title": "" }, { "docid": "89a98d1d967448968073ba49da271a5c", "score": "0.658625", "text": "def _do_final_cleanup(conn, logger, is_locked, tables_to_delete):\n if is_locked:\n with conn.cursor() as cursor:\n cursor.execute('SELECT pg_advisory_unlock(%s::BIGINT)', [hash_string_64bit('dirbs-classify')])\n\n with conn.cursor() as cursor:\n remaining_tables_to_delete = copy.copy(tables_to_delete)\n for t in tables_to_delete:\n try:\n cursor.execute(sql.SQL('DROP TABLE IF EXISTS {0} CASCADE').format(sql.Identifier(t)))\n conn.commit()\n remaining_tables_to_delete.remove(t)\n except: # noqa: E722\n for t_not_deleted in remaining_tables_to_delete:\n logger.warn('Failed to drop table {0} due to exception. Please issue '\n '\\'DROP TABLE IF EXISTS {0}\\' manually!'.format(t_not_deleted))\n raise", "title": "" }, { "docid": "7c5156299deddca2fc377faecbf520ea", "score": "0.6582826", "text": "def drop_db():\n print 'Drop DB'\n from yamp.app import db, engine\n from yamp.models import Base, __all__ as model_files\n from importlib import import_module\n for model_file in model_files:\n import_module('yamp.models.' + model_file, __name__)\n print '* Model \"%s\" loaded.' % model_file\n Base.metadata.drop_all(bind=engine)\n db.commit()\n print '... All tables deleted!'\n raise SystemExit", "title": "" }, { "docid": "8c19f8fbeb2fdcb8afc0c9a6817224ee", "score": "0.6581242", "text": "def clean_tables(self):\n print(\"Deleting all registry data\")\n with connection.cursor() as cursor:\n logger.debug(\"Execute 'TRUNCATE `padronelectoral_elector`' \")\n # Delete in raw for optimization\n cursor.execute('TRUNCATE `padronelectoral_elector`')\n\n # Using cascade aproach to delete other tables\n print(Province.objects.all().delete())", "title": "" }, { "docid": "22ff81a13745e4e37aba8621ecc119e4", "score": "0.65716606", "text": "def cleanUp():\n engine = create_engine('sqlite:///categoryitem.db')\n connection = engine.connect() \n print \"connected\"\n trans = connection.begin() \n try: \n connection.execute(\"drop table if exists item\")\n connection.execute(\"drop table if exists category\")\n trans.commit() \n connection.close()\n print \"tables dropped\"\n except: \n trans.rollback()\n connection.close()\n print \"error while dropping tables ..\"", "title": "" }, { "docid": "8434d6926220509d601a72857db56a8e", "score": "0.65655965", "text": "def clean_database():\n\n\tcommands = [\n\t\t\"\"\"\n\t\tDO $$\n\t\tBEGIN \n\t\t\tIF \n\t\t\t\tEXISTS (SELECT 1 FROM pg_type WHERE typname = 'recurrence_options') \n\t\t\t\tTHEN DROP TYPE recurrence_options CASCADE; \n\t\t\tEND IF;\n\t\tEND$$;\n\t\t\"\"\",\n\t\t\"\"\"\n\t\tDROP SCHEMA public CASCADE;\n\t\t\"\"\",\n\t\t\"\"\"\n\t\tCREATE SCHEMA public;\n\t\t\"\"\",\n\t\t\"\"\"\n\t\tGRANT ALL ON SCHEMA public TO cxp;\n\t\t\"\"\",\n\t\t\"\"\"\n\t\tALTER SCHEMA public OWNER TO cxp\n\t\t\"\"\",\n\t\t\"\"\"\n\t\tGRANT ALL ON SCHEMA public TO public;\n\t\t\"\"\"\n\t]\n\n\tif(execute_commands(commands)):\n\t\treturn True\n\n\treturn False", "title": "" }, { "docid": "690703a805aaf495d0607d5a8a9d1381", "score": "0.6561056", "text": "def reset_db():\n\n webapp.dbsql.drop_all()\n webapp.dbsql.create_all()\n webapp.dbsql.session.commit()", "title": "" }, { "docid": "c86078145dcb103e5e9476b9f3e5eea8", "score": "0.65594906", "text": "def drop_sql_tables(self, session):\n temp_metadata = MetaData(bind=session.bind)\n temp_metadata.reflect()\n temp_metadata.drop_all()", "title": "" }, { "docid": "67cdfe4cd55ddccf38f87df43392afc6", "score": "0.6555699", "text": "def drop_tables():\n commands = (\n \"\"\"\n DROP TABLE album CASCADE\n \"\"\",\n \"\"\"\n DROP TABLE musica_ficheiro CASCADE\n \"\"\",\n \"\"\"\n DROP TABLE compositor CASCADE\n \"\"\",\n \"\"\"\n DROP TABLE autor CASCADE\n \"\"\",\n \"\"\"\n DROP TABLE concerto CASCADE\n \"\"\",\n \"\"\"\n DROP TABLE criticamusica CASCADE\n \"\"\",\n \"\"\"\n DROP TABLE utilizador CASCADE\n \"\"\",\n \"\"\"\n DROP TABLE playlist CASCADE\n \"\"\",\n \"\"\"\n DROP TABLE grupomusical CASCADE\n \"\"\",\n \"\"\"\n DROP TABLE artista CASCADE\n \"\"\",\n \"\"\"\n DROP TABLE editora CASCADE\n \"\"\",\n \"\"\"\n DROP TABLE periodoeditora CASCADE\n \"\"\",\n \"\"\"\n DROP TABLE criticaalbum CASCADE\n \"\"\",\n \"\"\"\n DROP TABLE musica_ficheiro_concerto CASCADE\n \"\"\",\n \"\"\"\n DROP TABLE grupomusical_artista CASCADE\n \"\"\",\n \"\"\"\n DROP TABLE musica_ficheiro_utilizador CASCADE\n \"\"\",\n \"\"\"\n DROP TABLE musica_ficheiro_playlist CASCADE\n \"\"\",\n \"\"\"\n DROP TABLE musica_ficheiro_genero CASCADE\n \"\"\",\n \"\"\"\n DROP TABLE musica_ficheiro_album CASCADE\n \"\"\",\n \"\"\"\n DROP TABLE genero CASCADE\n \"\"\",\n \"\"\"\n DROP TABLE utilizador_musica_ficheiro CASCADE\n \"\"\")\n\n try:\n\n conn = psycopg2.connect(host=\"localhost\",database=\"dropmusic\", user=\"postgres\", password=\"postgres\")\n cur = conn.cursor()\n # DROP table one by one\n for command in commands:\n cur.execute(command)\n # close communication with the PostgreSQL database server\n cur.close()\n # commit the changes\n conn.commit()\n except (Exception, psycopg2.DatabaseError) as error:\n print(error)\n finally:\n if conn is not None:\n conn.close()", "title": "" }, { "docid": "c2012096589bc8a854180956950f571f", "score": "0.65556324", "text": "def tearDown(self):\n\n db.session.rollback()\n db.session.remove()\n db.drop_all()", "title": "" }, { "docid": "335c7c385741dd0d02f472660a8e8d4a", "score": "0.6550352", "text": "def drop_db():\n\n db.drop_all()\n db.engine.execute(\"DROP TABLE IF EXISTS alembic_version;\")\n print(\"TABLES DROPPED\")", "title": "" }, { "docid": "f0d837649abf9c6503840dbd36a4236f", "score": "0.6527915", "text": "def tearDown(self):\n\n db.drop_all()\n db.create_all()", "title": "" }, { "docid": "92208109aebd5f4de87cefcb73d98bf5", "score": "0.65214145", "text": "def dbreset(self): \r\n self.db.droptable()", "title": "" }, { "docid": "0508c8b8c3574972788daa6fedc7fc33", "score": "0.6518474", "text": "def delete_all_tables(self):\n for table in self.table_names:\n self.delete_table(table)\n self.close()", "title": "" }, { "docid": "539f5cf47262a2b4d77195627fcfdd21", "score": "0.6517126", "text": "def drop_all_tables(self, engine):\n try:\n self.metadata.drop_all(engine)\n except Exception:\n pass", "title": "" }, { "docid": "c37325ace30e675aed58f09bc6861214", "score": "0.6484484", "text": "def repack_database(instance: Recorder) -> None:\n assert instance.engine is not None\n dialect_name = instance.engine.dialect.name\n\n # Execute sqlite command to free up space on disk\n if dialect_name == SupportedDialect.SQLITE:\n _LOGGER.debug(\"Vacuuming SQL DB to free space\")\n with instance.engine.connect() as conn:\n conn.execute(text(\"VACUUM\"))\n conn.commit()\n return\n\n # Execute postgresql vacuum command to free up space on disk\n if dialect_name == SupportedDialect.POSTGRESQL:\n _LOGGER.debug(\"Vacuuming SQL DB to free space\")\n with instance.engine.connect().execution_options(\n isolation_level=\"AUTOCOMMIT\"\n ) as conn:\n conn.execute(text(\"VACUUM\"))\n conn.commit()\n return\n\n # Optimize mysql / mariadb tables to free up space on disk\n if dialect_name == SupportedDialect.MYSQL:\n _LOGGER.debug(\"Optimizing SQL DB to free space\")\n with instance.engine.connect() as conn:\n conn.execute(text(f\"OPTIMIZE TABLE {','.join(ALL_TABLES)}\"))\n conn.commit()\n return", "title": "" }, { "docid": "aff75d877b47330561f0fb98e4675854", "score": "0.6454899", "text": "def drop_all_tables(self):\n tables = self.get_tables()\n table_names = \"\"\n if len(tables) > 0 :\n for ind, table in enumerate(tables):\n if ind == 0:\n table_names = str(table.split('.')[1])\n else:\n table_names = table_names + \", \" + str(table.split('.')[1])\n self.__db.query(\"DROP TABLE \" + table_names)\n else:\n print(\"Nothing to delete.\")", "title": "" }, { "docid": "3c2d10f305dfbab0659e546630e71ec2", "score": "0.6420399", "text": "def clean_db(db_name):\n\n with sqlite3.connect(db_name) as conn:\n db_clear(conn, drop=False)\n db_create(conn)", "title": "" }, { "docid": "3436d93b6f9b55a59eb3aed2e8a3c0ae", "score": "0.64160824", "text": "def tearDown(self):\n drop_tables(config['testing'].db)", "title": "" }, { "docid": "5d10a15d702d0b26a3b1228a458af17d", "score": "0.64109564", "text": "def drop_db():\n db.drop_all()", "title": "" }, { "docid": "5d10a15d702d0b26a3b1228a458af17d", "score": "0.64109564", "text": "def drop_db():\n db.drop_all()", "title": "" }, { "docid": "5d10a15d702d0b26a3b1228a458af17d", "score": "0.64109564", "text": "def drop_db():\n db.drop_all()", "title": "" }, { "docid": "5d10a15d702d0b26a3b1228a458af17d", "score": "0.64109564", "text": "def drop_db():\n db.drop_all()", "title": "" }, { "docid": "5d10a15d702d0b26a3b1228a458af17d", "score": "0.64109564", "text": "def drop_db():\n db.drop_all()", "title": "" }, { "docid": "a2b1a9763753c70ab1761b949cb7fac4", "score": "0.6409978", "text": "def cleanup_old_databases(cls):\r\n for path in cls.generate_db_paths():\r\n if os.path.exists(path):\r\n try:\r\n os.remove(path)\r\n except EnvironmentError:\r\n logging.warn(\"Share.cleanup_old_databases(): error \"\r\n \"removing %s\" % path)", "title": "" }, { "docid": "036b96dace7cd69872e977f5a92e3ea0", "score": "0.64008874", "text": "def clear_db(self):\n # Iterate over the tables\n table_data = self.get_table_data()\n for table_cfg in table_data[::-1]:\n table_name = table_cfg['name']\n id_col = table_cfg['id_col']\n self.clear_table(table_name, id_col)", "title": "" }, { "docid": "4a84c5ede836a53e687326d702e66a25", "score": "0.63975203", "text": "def tearDown(self):\n self.app.db.drop_all()", "title": "" }, { "docid": "7bfe8c6cf24807c50375faee4fccb850", "score": "0.6396696", "text": "def db_drop_all():\r\n from cosmos.framework.database import db\r\n from flask.ext.script import prompt_bool\r\n if prompt_bool('Are you sure you want to drop all the tables in the database'):\r\n db.drop_all()", "title": "" }, { "docid": "e89d88c12f5a8a934a6160e1ee501670", "score": "0.6386763", "text": "def clear_all_tables():\n Follow.query.delete()\n UserPost.query.delete()\n ForumPost.query.delete()\n ForumQuestion.query.delete()\n Message.query.delete()\n User.query.delete()\n db.session.commit()", "title": "" }, { "docid": "accb63ea7e7d6535becc88c669f4b3f2", "score": "0.6375781", "text": "def rebuild_db():\n from my_calendar import create_app\n app = create_app()\n with app.app_context():\n db.drop_all()\n db.create_all()", "title": "" }, { "docid": "c299ca2284ed0b11ae5cbec70235baf4", "score": "0.6373973", "text": "def __rollback(self):\n for fname in self.__installedFiles:\n if os.path.exists(fname):\n os.remove(fname)\n for dname in self.__installedDirs:\n if os.path.exists(dname):\n shutil.rmtree(dname)", "title": "" }, { "docid": "f8d763d5d1fff3cdcd1253307f94510e", "score": "0.63705283", "text": "def destroy_postgres(connection):\n\n # queries below generate a resultset with rows containing SQL queries\n # which can be executed to drop the db content\n postgres_gen_drop_tables = \"\"\"\nselect 'drop table if exists \"' || tablename || '\" cascade;'\n from pg_tables\n where schemaname = 'public';\n\"\"\"\n postgres_gen_drop_functions = \"\"\"\nselect 'drop function if exists ' || ns.nspname || '.' || proname\n || '(' || oidvectortypes(proargtypes) || ') cascade;'\nfrom pg_proc inner join pg_namespace ns on (pg_proc.pronamespace = ns.oid)\nwhere ns.nspname = 'public' order by proname;\n\"\"\"\n\n postgres_gen_drop_sequences = \"\"\"\nselect 'drop sequence ' || relname || ';'\nfrom pg_class c\ninner join pg_namespace ns on (c.relnamespace = ns.oid)\nwhere ns.nspname = 'public' and c.relkind = 'S';\n\"\"\"\n\n for big_query in postgres_gen_drop_tables, \\\n postgres_gen_drop_functions, \\\n postgres_gen_drop_sequences:\n cursor = connection.cursor()\n cursor.execute(big_query)\n queries = [row[0] for row in cursor.fetchall()]\n for query in queries:\n print(query)\n cursor.execute(query)\n connection.commit()", "title": "" }, { "docid": "9bc8ea15541aefd9562f647f0c932ed9", "score": "0.6361749", "text": "def drop_schema(self):\n\t\tself.__check_connections()\n\t\tself.pg_engine.drop_repack_schema(self.connection, self.args.connection )", "title": "" }, { "docid": "6fabcca7ac365a97591b811f096ec7d2", "score": "0.6357691", "text": "def drop_all_immeditely():\n db.drop_all()", "title": "" }, { "docid": "63d36589329f6b2f439a5041321f2f33", "score": "0.6333223", "text": "def tearDown(self):\n db.session.close()\n db.drop_all()", "title": "" }, { "docid": "11b82c3894431b2cadfb0a145ba9c77f", "score": "0.63331723", "text": "def wipe_database():\r\n dbpath = \"/\".join(__file__.split('/')[:-1] + ['samples.db'])\r\n os.system(\"rm -f {0}\".format(dbpath))", "title": "" }, { "docid": "a3de91d6d4d57ddb6b582afd179514c5", "score": "0.6321818", "text": "def database_cleanup():\n\n logger.info(\"Starting database cleanup.\")\n\n session = create_db_session()\n\n series_list = session.query(TV_Series).all()\n\n for series in series_list:\n series_followers = session.query(Follow). \\\n filter_by(tv_series_id=series.id). \\\n filter_by(is_following=True). \\\n all()\n if len(series_followers) == 0:\n print(series.name + \" has 0 followers. Marking for removal from database.\")\n session.query(Follow). \\\n filter_by(tv_series_id=series.id). \\\n delete()\n session.query(TV_Series). \\\n filter_by(id=series.id). \\\n delete()\n\n session.commit()\n session.close()\n\n logger.info(\"Finished database cleanup\")", "title": "" }, { "docid": "99e7f858f3be212ead160866432a7431", "score": "0.6318494", "text": "def tearDown(self):\n\n db.session.close()\n db.drop_all()", "title": "" }, { "docid": "99e7f858f3be212ead160866432a7431", "score": "0.6318494", "text": "def tearDown(self):\n\n db.session.close()\n db.drop_all()", "title": "" }, { "docid": "99e7f858f3be212ead160866432a7431", "score": "0.6318494", "text": "def tearDown(self):\n\n db.session.close()\n db.drop_all()", "title": "" }, { "docid": "99e7f858f3be212ead160866432a7431", "score": "0.6318494", "text": "def tearDown(self):\n\n db.session.close()\n db.drop_all()", "title": "" }, { "docid": "99e7f858f3be212ead160866432a7431", "score": "0.6318494", "text": "def tearDown(self):\n\n db.session.close()\n db.drop_all()", "title": "" }, { "docid": "99e7f858f3be212ead160866432a7431", "score": "0.6318494", "text": "def tearDown(self):\n\n db.session.close()\n db.drop_all()", "title": "" }, { "docid": "99e7f858f3be212ead160866432a7431", "score": "0.6318494", "text": "def tearDown(self):\n\n db.session.close()\n db.drop_all()", "title": "" }, { "docid": "99e7f858f3be212ead160866432a7431", "score": "0.6318494", "text": "def tearDown(self):\n\n db.session.close()\n db.drop_all()", "title": "" }, { "docid": "ad3ad4968f17fb72b51f22f708f9bfaf", "score": "0.63054794", "text": "def truncate(table_name=None):\n\n tables = db.engine.execute(\n text(\"\"\"SELECT table_name\n FROM information_schema.tables\n WHERE table_schema='public'\n AND table_type='BASE TABLE'\"\"\")).fetchall()\n\n tables = ', '.join(\n [table[0] for table in tables if table[0] not in ('alembic_version')])\n\n if table_name:\n tables = table_name\n db.engine.execute(text(f'TRUNCATE {tables} CASCADE'))", "title": "" }, { "docid": "640c8fa0cd8ee78a59cb4a4937fd5d94", "score": "0.6301164", "text": "def tearDown(self):\n TEST_DB.drop_tables(MODELS)\n TEST_DB.close()", "title": "" }, { "docid": "ddfab0d45f534b8ad9d61c5c917c6335", "score": "0.6300553", "text": "def destroy_db():\n from app.models import User, Diary # noqa\n Base.metadata.drop_all(bind=engine)", "title": "" }, { "docid": "b927b6ebcf39f800717bcb1c4939fc06", "score": "0.6296245", "text": "def populate_oracle_tables(self):\n self.populate_kernel_names_table()\n self.populate_runtime_stats_table()\n\n self.execute(\"DELETE FROM oracle_params\")\n self.populate_oracle_params_table()\n\n self.populate_scenario_stats_table()\n self.populate_param_stats_table()\n\n io.debug(\"Compacting database ...\")\n self.execute(\"VACUUM\")\n\n io.info(\"Done.\")", "title": "" }, { "docid": "7a0b825befa1f8011357409bb31f7582", "score": "0.62947136", "text": "def clear_database(engine: Union[Engine, Connection], schemas: Iterable[str] = ()) -> None:\n assert check_argument_types()\n if engine.dialect.name == 'sqlite':\n # SQLite does not support dropping constraints and it's faster to just delete the file\n if engine.url.database not in (None, ':memory:') and os.path.isfile(engine.url.database):\n os.remove(engine.url.database)\n else:\n metadatas = []\n for schema in (None,) + tuple(schemas):\n # Reflect the schema to get the list of the tables, views and constraints\n metadata = MetaData()\n metadata.reflect(engine, schema=schema, views=True)\n metadatas.append(metadata)\n\n for metadata in metadatas:\n metadata.drop_all(engine, checkfirst=False)", "title": "" }, { "docid": "78fedf62022cd556b765cda32973285e", "score": "0.6292281", "text": "def drop_tables(session):\n for tab in ['session_library', 'song_library', 'user_library']:\n try:\n session.execute(f'DROP TABLE IF EXISTS {tab}')\n except Exception as e:\n print(e)", "title": "" }, { "docid": "ddcbd9be1549e33b9385449588ae6bb3", "score": "0.6271852", "text": "def tearDown(self):\n with self.app.app_context():\n # drop all tables\n db.session.remove()\n db.drop_all()", "title": "" }, { "docid": "5b8ffb338bb6e93bb9c548b95bf12d40", "score": "0.6270927", "text": "def drop_tables(cur, conn):\n sql = sql_queries.Queries()\n for query in sql.drop_table_queries:\n cur.execute(query)\n conn.commit()", "title": "" }, { "docid": "1eefa8fca1a8cac525b5fc695197a841", "score": "0.6267576", "text": "def clean_tables():\n\n pass", "title": "" }, { "docid": "bb86c34e8c3b497795f072ec5c114bc0", "score": "0.6253654", "text": "def tearDown(self):\n\t\twith self.app.app_context():\n\t\t\t#Drop all tables\n\t\t\tdb.session.remove()\n\t\t\tdb.drop_all()", "title": "" }, { "docid": "e276b6ca0c94518475b21a75f7578a49", "score": "0.62536013", "text": "def drop_tables(session):\n for query in drop_table_queries:\n try:\n session.execute(query)\n except psycopg2.Error as e:\n print(\"Error : Dropping table \" + query)\n print (e)", "title": "" }, { "docid": "fc82ba6079dd0a42ce5acb47007d3e20", "score": "0.6252856", "text": "def reset_db():\n print(\"Start: Reset DB\")\n print(\">>> Deleting All Tables from Database\")\n if os.path.exists(f\"{BACKEND_DIR}/db.sqlite3\"):\n os.remove(f\"{BACKEND_DIR}/db.sqlite3\")\n else:\n print(f\"The file '{BACKEND_DIR}/db.sqlite3' does not yet exist.\")\n\n print(\">>> Deleting Migrations File to Ensure Updated Models\")\n if os.path.isdir(f\"{BACKEND_DIR}/api/migrations\"):\n shutil.rmtree(f\"{BACKEND_DIR}/api/migrations\")\n else:\n print(f\"The '{BACKEND_DIR}/api/migrations' directory does not yet exist.\")\n print(\"Finish: Reset DB\")", "title": "" }, { "docid": "a1c5a5ee872c226362056e109b9b24bd", "score": "0.6252747", "text": "def db_init():\n from services.repository.sql import repo\n repo.create_all_tables()\n repo.truncate_all_tables()\n print(\"Database initialized and cleared.\")", "title": "" }, { "docid": "49ce8030129fd25bec2de3b625ba7947", "score": "0.62475103", "text": "def Destroy(self):\n self.cursor.execute(\"DROP TABLE Bedrooms;\")\n self.cursor.execute(\"DROP TABLE Users;\")\n self.cursor.execute(\"DROP TABLE Pictures;\")\n self.cursor.execute(\"DROP TABLE Devices;\")\n \n self.commitChanges()", "title": "" }, { "docid": "51061d79f52ad2b051c5e0ae07dfb3ad", "score": "0.6246192", "text": "def drop_database():\n drop_service_db()", "title": "" }, { "docid": "8406a56b356c8dbdb3fe7072f3e3de1b", "score": "0.6239832", "text": "def teardown(self):\n with app.app_context():\n # drop all tables\n db.session.remove()\n db.drop_all()", "title": "" }, { "docid": "ef02abd1ed745897ce902ba8d526f866", "score": "0.6238546", "text": "def tearDown(self):\n with self.app.app_context():\n # drop all tables\n db.session.remove()\n db.drop_all()", "title": "" }, { "docid": "ef02abd1ed745897ce902ba8d526f866", "score": "0.6238546", "text": "def tearDown(self):\n with self.app.app_context():\n # drop all tables\n db.session.remove()\n db.drop_all()", "title": "" }, { "docid": "ef02abd1ed745897ce902ba8d526f866", "score": "0.6238546", "text": "def tearDown(self):\n with self.app.app_context():\n # drop all tables\n db.session.remove()\n db.drop_all()", "title": "" }, { "docid": "429d8f8aaa5dc00c0284a3c6524878d2", "score": "0.6237741", "text": "def reset_db():\n db_name = get_db_name()\n\n if not confirm(\"Do you want to erase the '{}' database\"\n \" and re-create it?\".format(db_name)):\n abort(colors.yellow(\"Aborting at user request.\"))\n\n local('sudo mysql < db/000/downgrade.sql')\n local('sudo mysql < db/000/upgrade.sql')\n local('sudo mysql ctsi_dropper_s < db/001/upgrade.sql')\n local('sudo mysql ctsi_dropper_s < db/002/upgrade.sql')\n local('sudo mysql ctsi_dropper_s < db/002/data.sql')\n local('sudo mysql ctsi_dropper_s < db/003/upgrade.sql')\n local('sudo mysql ctsi_dropper_s < db/004/upgrade.sql')", "title": "" }, { "docid": "fae7be371bce396700e755d4ef42fae3", "score": "0.6233367", "text": "def cleanup(coverage):\n\n drop_schema()\n remove_pytest_cache()\n\n if coverage is True:\n remove_coverage()", "title": "" }, { "docid": "b9e1652e6895431d1db88baaa2cc8672", "score": "0.62250274", "text": "def assure_tables():\n Base.metadata.create_all()", "title": "" }, { "docid": "f44d73755430556063a826bdd378c6a4", "score": "0.62220824", "text": "def _drop_contents(self):\n for database_name in self._mk.connection.database_names():\n db = self._mk.connection[database_name]\n try:\n self._mk.connection.drop_database(database_name)\n except:\n # This is a system database, leave it alone!\n pass", "title": "" } ]
35fa76dfe12a7bcfcb841a04b983c9cf
Parse wifi values from terminal
[ { "docid": "45e8150210b1713afc39966fbe5879ba", "score": "0.6029007", "text": "def wifi_stats(stats_count=3):\n link_sum,level_sum, noise_sum = 0, 0, 0\n for i in range(stats_count):\n args = [\"grep\", \"-i\", \"wlp2s0\",\"/proc/net/wireless\"]\n try:\n link, level, noise = str(terminal_capture(args))[16:35].replace(\".\",\"\").strip().split()\n except:\n link, level, noise = -999, -900, -900\n print(\"Unable to retrieve wifi data. Are you connected?\") \n link_sum += int(link)\n level_sum += int(level)\n noise_sum += int(noise)\n# Take several measurements and calculate the average value for every point. \n link = link_sum / stats_count\n level = level_sum / stats_count\n noise = noise_sum / stats_count\n return [stats_count, link, int(link*(10/7)), level, round((10**(level/10.))*1000000,2), \\\n noise, round((10**((noise)/10.))*1000000,2)]", "title": "" } ]
[ { "docid": "9fb87dbbad13db7c5198dc9f65e4ca76", "score": "0.6035363", "text": "def get_wifi_settings():\n sets = {'ssid': 'SSID', 'passwd': 'PASS'}\n\n if settings.PB_HOSTAPD_FILE is None:\n return sets\n\n with open(settings.PB_HOSTAPD_FILE, 'r') as f:\n for line in f:\n if line.startswith('ssid='):\n sets['ssid'] = line.split('=')[1]\n if line.startswith('wpa_passphrase='):\n sets['passwd'] = line.split('=')[1]\n\n return sets", "title": "" }, { "docid": "0526564740bc9b56f16eee9f9e86010d", "score": "0.5889469", "text": "def wifi_stats(stats_count=3):\n link_sum = 0\n level_sum = 0\n noise_sum = 0\n \n for i in range(stats_count):\n args = [\"grep\", \"-i\", \"wlp2s0\",\"/proc/net/wireless\"]\n try:\n link, level, noise = str(terminal_capture(args))[16:35].replace(\".\",\"\").strip().split()\n except:\n link = -999\n level = -900\n noise = -900\n print(\"Unable to retrieve wifi data. Are you connected?\")\n \n link_sum += int(link)\n level_sum += int(level)\n noise_sum += int(noise)\n \n# Take several measurements and calculate the average value for every point. \n link = link_sum / stats_count\n level = level_sum / stats_count\n noise = noise_sum / stats_count\n return [stats_count, link, int(link*(10/7)), level, round((10**(level/10.))*1000000,2), \\\n noise, round((10**((noise)/10.))*1000000,2)]", "title": "" }, { "docid": "5b1c54e44e0babbbc0a7eb5bbd32ac2a", "score": "0.58864784", "text": "def get_available_wifi_networks():\n try:\n cmd = [\"nmcli\",\n \"-terse\",\n \"-colors\", \"no\",\n \"-fields\", \"ssid,signal\",\n \"-escape\", \"no\",\n \"device\", \"wifi\"]\n\n p = subprocess.Popen(cmd, stdout=subprocess.PIPE, stderr=subprocess.PIPE)\n out, err = p.communicate()\n if (p.returncode != 0) or (err != \"\"):\n raise Exception(err)\n\n network_dict = dict()\n for line in out.splitlines():\n try:\n network, signal = line.split(\":\", 1)\n if (network not in network_dict) or (int(signal) > network_dict[network]):\n network_dict[network] = int(signal)\n except Exception as ex:\n pass\n\n return sorted(network_dict.items(), key=operator.itemgetter(1), reverse=True)\n\n except Exception as ex:\n print(f\"Failed to retrieve available WiFi networds: {ex}\")\n return list()", "title": "" }, { "docid": "d13fbc0ff69d29b8aac8f2c02dc8c535", "score": "0.58374524", "text": "def GetWifiStatus():\n result = 'No WiFi'\n\n regexp = re.compile(r'^([^ ]+)\\s+ESSID:\"(.+)\"$')\n\n try:\n output = subprocess.check_output(['/sbin/iwgetid']).decode('utf-8')\n match = regexp.search(output)\n if match:\n _, essid = match.groups()\n result = essid\n except subprocess.SubprocessError:\n pass\n return result", "title": "" }, { "docid": "abd32f0df8761efb804c5601cd84f80d", "score": "0.5825446", "text": "def return_all_wifi_connections():\n result = subprocess.run(['nmcli', \"d\", \"wifi\", \"list\"], stdout=subprocess.PIPE)\n scan_out_lines = str(result).split(\"\\\\n\")[1:-1]\n scan_out_data = {}\n for each_line in scan_out_lines:\n split_line = [e for e in each_line.split(\" \") if e != \"\"]\n line_data = {\"SSID\": split_line[0], \"RSSI\": int(split_line[2]),\n \"HT\": (split_line[4] == \"Y\"), \"CC\": split_line[5]}\n scan_out_data[split_line[1]] = line_data\n ssid_list = []\n for key in scan_out_data.keys():\n ssid_list.append(scan_out_data.get(key).get('SSID'))\n return ssid_list", "title": "" }, { "docid": "22f7a15ea835e98ef701f97a04333dbc", "score": "0.57598054", "text": "def _current_data( self, text=None ):\n\t\tcmdoutput = toolbox.run( self._command_current_connection )\n\t\tcmdoutput = cmdoutput.split( \"\\n\" )\n\t\tdata = [ line.strip() for line in cmdoutput if line.strip().startswith( \"SSID\" ) and line.strip() != \"\" ]\n\t\tif data:\n\t\t\tdata = data[0].split( \":\" )[1].strip()\n\t\t\tif( data != \"\" ):\n\t\t\t\treturn data\n\t\telse:\n\t\t\t#return \"no access point\"\n\t\t\treturn False", "title": "" }, { "docid": "151b4101a06a8a0dd4ab3a91fe0698a1", "score": "0.5525933", "text": "def _parse_access_point(self, cell):\n ap = WirelessNetwork()\n ap.essid = misc.RunRegex(WirelessRegexPatterns.essid, cell)\n try:\n ap.essid = misc.to_unicode(ap.essid)\n except (UnicodeDecodeError, UnicodeEncodeError):\n print 'Unicode problem with current network essid, ignoring!!'\n return None\n if ap.essid in ['<hidden>', \"\"]:\n ap.essid = 'Hidden'\n ap.hidden = True\n else:\n ap.hidden = False\n ap.channel = misc.RunRegex(WirelessRegexPatterns.channel, cell)\n if ap.channel == None:\n freq = misc.RunRegex(WirelessRegexPatterns.freq, cell)\n ap.channel = self._freq_to_channel(freq)\n ap.bssid = misc.RunRegex(WirelessRegexPatterns.ap_mac, cell)\n ap.mode = misc.RunRegex(WirelessRegexPatterns.mode, cell)\n if (WirelessRegexPatterns.strength.match(cell)):\n [(strength, max_strength)] = WirelessRegexPatterns.strength.findall(cell)\n if max_strength:\n ap.quality = 100 * int(strength) // int(max_strength)\n else:\n ap.quality = int(strength)\n elif misc.RunRegex(WirelessRegexPatterns.altstrength,cell):\n ap.quality = misc.RunRegex(WirelessRegexPatterns.altstrength, cell)\n else:\n ap.quality = -1\n if misc.RunRegex(WirelessRegexPatterns.signaldbm, cell):\n ap.strength = misc.RunRegex(WirelessRegexPatterns.signaldbm, cell)\n return ap", "title": "" }, { "docid": "b832f44afeec00f052f3dccb95a9322f", "score": "0.55144656", "text": "def nm_cli_output():\n nm_dmenu.rescan_wifi_nw()\n return nm_dmenu.get_nmcli_out()", "title": "" }, { "docid": "d57a9b8b446999e9a531dd62252a9c43", "score": "0.5428774", "text": "def parse_wifi_config(config_file_path):\n wifi_config = None\n\n try:\n with open(config_file_path) as config_file:\n config_lines = [line.strip() for line in config_file.readlines()]\n\n wifi_config = dict()\n\n for line in config_lines:\n line = line.strip()\n\n # Parse the line as an assignment.\n name, value = parse_assignment(line)\n if name is not None:\n # Store names in lower case; we are ignoring the case of the variables.\n wifi_config[name.lower()] = value\n\n except Exception as ex:\n return {}\n\n return wifi_config", "title": "" }, { "docid": "73c962ecf5a298c4b36cb4ae0f45aa76", "score": "0.53803885", "text": "def parse_network_statistics():\n raw_stats = subprocess.check_output('ifconfig -s', shell=True).decode('utf-8').split(\"\\n\")\n labels = raw_stats[0]\n ifconfig = {}\n for raw_network_line in raw_stats[1:]:\n ifconfig.update(_parse_raw_network_line(raw_network_line, labels))\n return ifconfig", "title": "" }, { "docid": "75967480f53520eacb578e84ad3dc4dc", "score": "0.5335339", "text": "def marvell8864VHTGet(self, host, port=23, user=None, password=None):\n if host is None:\n return {}\n try:\n tn = telnetlib.Telnet(host=host, port=port)\n except (IOError):\n return {}\n tn.read_until(\"MarvellAP login: \", 3)\n tn.write(\"{0}\\r\\n\".format(user))\n tn.read_until(\"Password: \", 3)\n tn.write(\"{0}\\r\\n\".format(password))\n tn.read_until(\"~ $ \", 3)\n tn.write(\"iwpriv wdev1ap0 version\\r\\n\")\n raw_sw_version = tn.read_until(\"~ \", 3)\n tn.write(\"exit\\r\\n\")\n tn.close()\n try:\n sw_version = raw_sw_version.split('\\r\\n')[1]\n except (IndexError, AttributeError, TypeError):\n return {}\n return {'sw_version' : sw_version}", "title": "" }, { "docid": "c9ef0404be6588deffd178c0269a9a0b", "score": "0.5310551", "text": "def ping(ip, ping_count=3):\n args = [\"ping\",\"-c\", str(ping_count), str(ip)]\n output = terminal_capture(args)\n ping_values = [-999,-999,-999,-999,-999,-999,-999,-999]\n \n# if errors occure return -999 value for all ping values\n if \"Name or service not known\" in str(output[1]):\n print(\"Invalid IP address.\")\n return ping_values # -999\n elif \"Network is unreachable\" in str(output[1]):\n print(\"Network is unreachable.\")\n return ping_values # -999\n elif \"Destination Host Unreachable\" in str(output[0]):\n print(\"Destination Host Unreachable.\")\n return ping_values # -999\n else:\n# extract useful data from the terminal output\n output = str(output[0]).split(\"--- \" + str(ip) + \" ping statistics ---\")[1].replace(\"'\",\"\").replace(\"ms\",\"\").replace(\"%\",\"\").strip(\"\\\\n\").split(\"\\\\n\")\n if len(output) < 2:\n print(\"Ping output has not the right format.\")\n return ping_values # -999\n \n ping_values = []\n# extract package data \n for word in output[0].split(\",\"):\n first, second = word.strip().split(\" \",1)\n if first.isnumeric():\n ping_values.append(int(first))\n else:\n ping_values.append(int(second))\n\n# extract statistics\n words = output[1].strip(\" \").split(\"=\")\n for i, word in enumerate(words[1].split(\"/\")): \n try:\n ping_values.append(float(word))\n except ValueError:\n ping_values.append(-999) \n return ping_values # Right values", "title": "" }, { "docid": "da7f75e841ad461a87afd19356679aaf", "score": "0.53087217", "text": "def get_ip_config_data(adress, user, pdw):\n client = connect_to_server_ssh(adress, user, pdw)\n if_config = {}\n stdin, stdout_if_config_tx, stderr = client.exec_command(\n \"ifconfig | grep TX | grep bytes \")\n stdin, stdout_if_config_rx, stderr = client.exec_command(\n \"ifconfig | grep RX | grep bytes \")\n line_stdout_if_config_tx = stdout_if_config_tx.readlines()\n line_stdout_if_config_rx = stdout_if_config_rx.readlines()\n\n if_config_rx_packet = int(''.join(line_stdout_if_config_rx).split(\" \")[10])\n if_config_rx_bytes = int(''.join(line_stdout_if_config_rx).split(\" \")[13])\n\n if_config_tx_packet = int(''.join(line_stdout_if_config_tx).split(\" \")[10])\n if_config_tx_bytes = int(''.join(line_stdout_if_config_tx).split(\" \")[13])\n\n if_config[adress] = {\n \"tx_packet\": if_config_tx_packet,\n \"rx_packet\": if_config_rx_packet,\n \"rx_bytes\": if_config_rx_bytes,\n \"tx_bytes\": if_config_tx_bytes\n }\n\n print(stdin, stderr)\n\n return if_config", "title": "" }, { "docid": "aa7345499cda4836670beefe782d6094", "score": "0.52916944", "text": "def url_parse(url):\n query_params = ure.search(\"\\?(.*?) HTTP\", url.decode('utf-8')).group(1)\n parameters = {}\n ampersandsplit = query_params.split(\"&\")\n for element in ampersandsplit:\n equalsplit = element.split(\"=\")\n if equalsplit[1] != \"\":\n parameters[equalsplit[0]] = equalsplit[1]\n\n codelist = {\"%20\" : \" \",\n \"%21\" : \"!\",\n \"%23\" : \"#\",\n \"%24\" : \"$\",\n \"%2F\" : \"/\"}\n\n for key, value in parameters.items():\n for code in codelist.keys():\n value = value.replace(code, codelist[code]).rstrip(\"/\")\n parameters[key] = value.replace(code, codelist[code])\n if \"password\" in parameters and \"ssid\" in parameters:\n return parameters[\"ssid\"], parameters[\"password\"]\n else:\n return None, None", "title": "" }, { "docid": "238c13d3bf487cc02e8d43714a6221d1", "score": "0.52874315", "text": "def wifi(arg):\n return \"Enabled\" if droid.checkWifiState().result else \"Disabled\"", "title": "" }, { "docid": "d0076e8419f66f08b43f889048735be2", "score": "0.52669245", "text": "def get_current_ssid():\n\n ssid = None\n output = wpa_cli(\"status\")\n for line in output:\n if line.startswith('wpa_state'):\n state = line.split('=')[1].strip()\n break\n if state == 'COMPLETED':\n for line in output:\n if line.startswith('ssid'):\n ssid = line.split('=')[1].strip()\n break\n return ssid", "title": "" }, { "docid": "fe869951789a125da635d99d1e35228c", "score": "0.5249715", "text": "def parse_network_info(elem):\n # TODO\n return {}", "title": "" }, { "docid": "7b41518c3de60b61059757a2455e1868", "score": "0.5247649", "text": "def hosted_network_info():\n # while True:\n show, state = get_state()\n show_security = show_security_hosted_network()\n data_clients = \"\"\n # print(show_security)\n # i=0\n # for line in show:\n # print(i,line)\n # i+=1\n\n HOSTED_NETWORK_INFO[\"modo\"] = show[2].split(\":\")[1].strip().replace('\"', \"\")\n HOSTED_NETWORK_INFO[\"ssid\"] = show[3].split(\":\")[1].strip().replace('\"', \"\")\n HOSTED_NETWORK_INFO[\"key\"] = show_security[5].split(\":\")[1].strip()\n HOSTED_NETWORK_INFO[\"max_clientes\"] = show[4].split(\":\")[1].strip().replace('\"', \"\")\n HOSTED_NETWORK_INFO[\"autenticacion\"] = show[5].split(\":\")[1].strip().replace('\"', \"\")\n HOSTED_NETWORK_INFO[\"cifrado\"] = show[6].split(\":\")[1].strip().replace('\"', \"\")\n\n HOSTED_NETWORK_INFO[\"state\"] = state\n\n HOSTED_NETWORK_INFO[\"cant_clients\"] = 0\n # si la red no esta iniciada no tiene estos datos.\n try:\n HOSTED_NETWORK_INFO[\"bssid\"] = show[10].split(\":\")[1].strip().replace('\"', \"\")\n HOSTED_NETWORK_INFO[\"radio\"] = show[11].split(\":\")[1].strip().replace('\"', \"\")\n HOSTED_NETWORK_INFO[\"canal\"] = show[12].split(\":\")[1].strip().replace('\"', \"\")\n HOSTED_NETWORK_INFO[\"cant_clients\"] = show[13].split(\":\")[1].strip()\n i = 14\n while i < 34:#34 pq 20 es el max client permit\n try:\n spl = show[i].split(\" \")#esta linea tiene la froma mac: status\n client_mac = spl[0]\n client_status = spl[-1] # status es autenticado o no\n data_clients += \"%s\\t%s\\n\" % (client_mac, client_status)\n i += 1\n except IndexError:\n break\n # print('data clients', data_clients)\n except IndexError:\n pass\n\n if not data_clients:\n data_clients = \"No hay clientes conectados\"\n HOSTED_NETWORK_INFO[\"data_clients\"] = data_clients", "title": "" }, { "docid": "3374118be4508c66eb293e6e1e743616", "score": "0.52132976", "text": "def broadcomVHTGet(self, host, port=23, user=None, password=None):\n #This can't actually work. UCC has no knowledge of this AP's IP address\n #only the broadcom CA does. Luckily capi commands work for it.\n if host is None:\n return {}\n try:\n tn = telnetlib.Telnet(host)\n except (IOError):\n return {}\n tn.read_until(\"# \", 3)\n tn.write(\"wl ver\\r\\n\")\n version = tn.read_until(\"# \", 3)\n tn.write(\"exit\\r\\n\")\n tn.close()\n sw_version = version.split('\\r\\n')[2]\n return {'sw_version' : sw_version}", "title": "" }, { "docid": "0fc0eb605eac5e3b69141c3bfbfd7ffe", "score": "0.5206469", "text": "def mos_wifi(port):\n print(\"Connecting to WiFi\")\n result = sp.Popen([MOSPATH + ' wifi '+ SSID + PASS + port],\n shell=True,\n stdout=sp.PIPE,\n stderr=sp.PIPE,\n cwd=REPOPATH)\n output, error = result.communicate()\n wifi_out = output.decode('UTF-8')\n wifi_err = error.decode('UTF-8')\n\n if VERBOSE == 1:\n term_warning(wifi_out)\n term_warning(wifi_err)\n\n if \"Saving and rebooting...\" in wifi_err:\n term_good(\"Device connected to WiFi.\")\n elif \"Error:\" in wifi_err:\n term_warning(\"Error:\" + wifi_err)\n exit()\n else:\n term_warning(\"Error: expected message not found.\")\n exit()", "title": "" }, { "docid": "4fcba68272ccd349afae100f602c5375", "score": "0.51952374", "text": "def get_wifi_ssid(self) -> GoProResp:", "title": "" }, { "docid": "2568f6fc7efd0f1a72f56ddac82f0a1d", "score": "0.5171087", "text": "def get_wifi_password(self) -> GoProResp:", "title": "" }, { "docid": "303429a5b4956999005f624b71e0e4d7", "score": "0.5166444", "text": "def ping(ip, ping_count=3):\n args = [\"ping\",\"-c\", str(ping_count), str(ip)]\n output = terminal_capture(args)\n ping_values = [-999,-999,-999,-999,-999,-999,-999,-999]\n# if errors occure return -999 value for all ping values\n if \"Name or service not known\" in str(output[1]):\n print(\"Invalid IP address.\")\n return ping_values # -999\n elif \"Network is unreachable\" in str(output[1]):\n print(\"Network is unreachable.\")\n return ping_values # -999\n elif \"Destination Host Unreachable\" in str(output[0]):\n print(\"Destination Host Unreachable.\")\n return ping_values # -999\n else:\n# extract useful data from the terminal output\n output = str(output[0]).split(\"--- \" + str(ip) + \" ping statistics ---\")[1].replace(\"'\",\"\").replace(\"ms\",\"\").replace(\"%\",\"\").strip(\"\\\\n\").split(\"\\\\n\")\n if len(output) < 2:\n print(\"Ping output has not the right format.\")\n return ping_values # -999\n ping_values = []\n# extract package data \n for word in output[0].split(\",\"):\n first, second = word.strip().split(\" \",1)\n if first.isnumeric():\n ping_values.append(int(first))\n else:\n ping_values.append(int(second))\n# extract statistics\n words = output[1].strip(\" \").split(\"=\")\n for i, word in enumerate(words[1].split(\"/\")): \n try:\n ping_values.append(float(word))\n except ValueError:\n ping_values.append(-999) \n return ping_values # Right values", "title": "" }, { "docid": "152367aedde358ebbdd6329c4a505ad4", "score": "0.51031375", "text": "def show_int_wifi(self, interface: str):\n\n command = \"show int \" + interface\n template = \"show_wifi.textfsm\"\n\n command_response = self.send_command(command)\n int_wifi = self.fsm_parse(command_response, template)\n return int_wifi", "title": "" }, { "docid": "21512b045f434a3339481f82067a7fcf", "score": "0.50800204", "text": "def parse_ovs_vsctl_show(buf, bridge):\n\n # Bridge \"br-phy1\"\n # Port \"eth1\"\n # Interface \"eth1\"\n # type: dpdk\n # options: {dpdk-devargs=\"0000:18:00.1\", n_rxq=\"1\"}\n buf = buf.strip().split(\"\\n\")\n result = []\n find = False\n for line in buf:\n line = line.strip()\n if \"Bridge\" in line:\n if bridge in line:\n find = True\n port = \"\"\n else:\n find = False\n if find:\n if line.startswith(\"Port\"):\n port = line[4:].strip()\n if port.startswith(\"\\\"\"):\n port = port[1:len(port) - 1]\n if port:\n if \"dpdk\" in line:\n result.append(port)\n\n return result", "title": "" }, { "docid": "d0aecb60e704e9f67065d75003dfccad", "score": "0.5079244", "text": "def _parse_ip_stats_link_show(raw_result):\n\n show_re = (\n r'.+?RX:.*?\\n'\n r'\\s*(?P<rx_bytes>\\d+)\\s+(?P<rx_packets>\\d+)\\s+(?P<rx_errors>\\d+)\\s+'\n r'(?P<rx_dropped>\\d+)\\s+(?P<rx_overrun>\\d+)\\s+(?P<rx_mcast>\\d+)'\n r'.+?TX:.*?\\n'\n r'\\s*(?P<tx_bytes>\\d+)\\s+(?P<tx_packets>\\d+)\\s+(?P<tx_errors>\\d+)\\s+'\n r'(?P<tx_dropped>\\d+)\\s+(?P<tx_carrier>\\d+)\\s+(?P<tx_collisions>\\d+)'\n )\n\n re_result = match(show_re, raw_result, DOTALL)\n result = None\n\n if (re_result):\n result = re_result.groupdict()\n for key, value in result.items():\n if value is not None:\n if value.isdigit():\n result[key] = int(value)\n\n return result", "title": "" }, { "docid": "3b333f64f929cc0e396536e22e819a4f", "score": "0.50639915", "text": "def _parse(self, line):\n pieces = line.split(\":\")\n if pieces[1].isnumeric():\n device = int(pieces[1])\n if device > 0 :\n state = pieces[3]\n sensor = next((sensor for sensor in self._sensors if sensor.bplid == device), None)\n if not sensor :\n _LOGGER.warning(\"Device not understood :\"+str(device)+\", ignoring state update :\"+state)\n return\n if state == 'CMD_ON' :\n sensor.set_state(True)\n elif state == 'CMD_OFF':\n sensor.set_state(False)\n elif state == 'EV_CURTAIN_OPEN' :\n sensor.open_cover()\n elif state =='EV_CURTAIN_CLOSE' :\n sensor.close_cover()\n elif state == 'EV_CURTAIN_STOP' :\n sensor.stop_cover()\n else :\n _LOGGER.warning(\"Command not understand command \"+line+\" for device :\"+str(device))", "title": "" }, { "docid": "5a3f924fedefe360993358831d44a2da", "score": "0.50468826", "text": "def getInfo():\n \n # Ensure pwrstat is present. If not, abort.\n status, out = commands.getstatusoutput('pwrstat -status')\n if status:\n msg = \"status err Unable to find PowerPanel software.\\n\"\n msg += \"Please install the required software and try again.\\n\"\n sys.stderr.write(msg)\n sys.exit(status)\n\n # Build our results into key: value pairs.\n results = {}\n # All values are separated by a line of periods.\n # Get the items on either side.\n for line in out.split('\\n'):\n if '.' in line:\n option = line.split('.')[0].strip()\n value = line.split('.')[-1].strip()\n # Rename a few options to make them more clear.\n # Also, in making numbers ints, we lose the Volt/Watt label.\n if option == 'Load':\n option = 'Load Wattage'\n # Might as well pull out the percentage while we are here.\n results['Watt Percentage'] = int(line.split()[-2].split('(')[1])\n elif option == 'Rating Power':\n option = 'Rating Wattage'\n elif option == 'Battery Capacity':\n option = 'Battery Percentage'\n elif option == 'Remaining Runtime':\n option = 'Minutes Remaining'\n # A period after \"Min\" requires a different split\n value = int(line.split('.')[-2].strip().split()[0])\n\n # Pull the options we want as integers.\n if option in ( 'Rating Wattage', 'Battery Percentage',\n 'Utility Voltage', 'Output Voltage', \n 'Rating Voltage', 'Load Wattage' ):\n value = int(value.split()[0])\n\n # Add our new key\n results[option] = value\n\n # Send the results\n return results", "title": "" }, { "docid": "fc3ec32eec55c4d7a0782b5701b351a1", "score": "0.5011323", "text": "def parseNetworkConfig(robotName: str) -> Tuple[str, str, str]:\n\n\thostname = \"\"\n\tip_lan = \"\"\n\tip_wlan = \"\"\n\t# Config file for network:\n\tnetworkConfigFile = bhumanBase.robotsDir + robotName + \"/network.cfg\"\n\tprint(\"-> Networkconfigfile: ...\", networkConfigFile)\n\t# check if config file exists:\n\tif not(os.path.isfile(networkConfigFile)):\n\t\texit(\"Network configuration is not found for:\", robotName)\n\t# Read net-configuration:\n\twith open(networkConfigFile, newline='') as csvfile:\n\t\treader = csv.reader(csvfile, delimiter='=', quotechar='\"')\n\t\tfor row in reader:\n\t\t\toption = row[0].replace(\" \", \"\")\n\t\t\tvalue = row[1].replace('\"', \"\").replace(\" \", \"\").replace(\";\", \"\")\n\t\t\tif (option == \"name\" and value != \"\"):\n\t\t\t\thostname = value\n\t\t\telif (option == \"lan\" and value != \"\"):\n\t\t\t\tip_lan = value\n\t\t\telif (option == \"wlan\" and value != \"\"):\n\t\t\t\tip_wlan = value\n\t\t\telse:\n\t\t\t\texit(\"Unable to parse network config file: \", networkConfigFile)\n\tprint(\"-> New network configuration is: <hostname> <lan> <wlan> ...\", hostname, ip_lan, ip_wlan)\n\treturn(hostname, ip_lan, ip_wlan)", "title": "" }, { "docid": "650ac0cf1499adfc81fe68f0ed61bd0f", "score": "0.5010367", "text": "def ralinkAc_AP_VHT_get(self, host, port=23, user=None, password=None):\n if host is None:\n return {}\n try:\n tn = telnetlib.Telnet(host=host, port=port)\n except (IOError):\n return {}\n tn.read_until('# ', 3)\n tn.write('dmesg -c > /dev/null;iwpriv ra0 show driverinfo; dmesg\\r\\n')\n raw_ret = tn.read_until('# ', 3)\n tn.write('exit\\r\\n')\n tn.close()\n try:\n sw_version = raw_ret.split('\\r\\n')[1]\n except (IndexError, AttributeError, TypeError):\n return {}\n return {'sw_version' : sw_version}", "title": "" }, { "docid": "9246a78ce417658ea34bc5d11f2af67d", "score": "0.49944082", "text": "def handle(text, mic, profile):\n ipadd = commands.getoutput(\"/sbin/ifconfig\").split(\"\\n\")[1].split()[1][5:]\n mic.say(\"I am located at\")\n for c in list(ipadd):\n mic.say(c)", "title": "" }, { "docid": "e4776290945910ae0add857b868257c7", "score": "0.49765283", "text": "def eight023ParseSTP(self):\n print(\"----------------802.3 header----------------\")\n if self.data[6:10] == \"0000\":\n print(\"Protocol Identifier: Spanning Tree Protocol\")\n else:\n print(\"Protocol Identifier: Unrecognized\")\n print(\"Root Bridge System ID Extension: \", self.hexToDec(self.data[18:20]))\n self.data = self.data[20:]\n print(\"Root Bridge System ID: \", self.macFinder())\n print(\"Root Path Cost: \", self.hexToDec(self.data[:8]))\n print(\"Bridge Priority: \", self.hexToDec(self.data[8:12]))\n self.data = self.data[12:]\n print(\"System ID: \", self.macFinder())\n print(\"Port Identifier: 0x\" + self.data[:4])\n print(\"Message Age: \", self.hexToDec(self.data[4:8]))\n print(\"Max Age: \", self.hexToDec(self.data[8:12]))\n print(\"Hello Time: \", self.hexToDec(self.data[12:16]))\n print(\"Forward Delay: \", self.hexToDec(self.data[16:20]))", "title": "" }, { "docid": "24d9335159ba8ff9a2d8cb0453236e3d", "score": "0.49623153", "text": "def getIPSmgtInfo():\n # IPS mgt VLAN\n while True:\n try:\n ipsVlan = int(input('Enter the VLAN ID of IPS mgmt: '))\n if ipsVlan <= 0 or 4096 < ipsVlan:\n print('ERROR: DATA INVALID\\n')\n else:\n print('OK')\n break\n except (ValueError, IndexError):\n print('ERROR: DATA INVALID\\n')\n # IPS mgt depth\n while True:\n try:\n depth = input('\\nEnter the depth code of the segment [0101]: ').strip() or '0101'\n if not re.match(r\"^0\\d{3}$\",depth):\n print('ERROR: DATA INVALID\\n')\n else:\n break\n except ValueError:\n print('ERROR: DATA INVALID\\n')\n # IPS mgt IP\n ipsmgtIPaddr = getInterfaceIP('management interface')\n return [ipsVlan,depth,ipsmgtIPaddr]", "title": "" }, { "docid": "d2785931e614e48f4870e7b4333fe99f", "score": "0.4958791", "text": "def get_ports_attrs(self, cli_stp, instance_sep):\n ports = {} # instance -> port -> attributes\n for I in cli_stp.split(instance_sep)[1:]:\n instance_id, _ = I.split(\"\\n\", 1)\n instance_id = int(instance_id)\n ports[instance_id] = {}\n for R in parse_table(\n # Skip empty first line on 3750\n I.replace(\"---\\n\\n\", \"---\\n\")\n ):\n interface = self.profile.convert_interface_name(R[0])\n # Found in WS-C2950T-24 12.1(9)EA1\n # This device do not display port state and capabilities\n if self.rx_prio.search(R[1]):\n ports[instance_id][interface] = {\n \"point_to_point\": True, # @todo: detect P2P properly\n \"edge\": False,\n \"role\": \"unknown\",\n \"state\": {\n \"dis\": \"disabled\",\n \"blk\": \"discarding\",\n \"bkn\": \"broken\",\n \"lrn\": \"learning\",\n \"??\": \"learning\",\n \"fwd\": \"forwarding\",\n \"lis\": \"listen\",\n \"lbk\": \"loopback\",\n }[\n R[3].lower()\n ], # @todo: refine states\n }\n else:\n settings = R[-1]\n ports[instance_id][interface] = {\n \"point_to_point\": True, # @todo: detect P2P properly\n \"edge\": \"edge\" in settings.lower(),\n \"role\": {\n \"dis\": \"disabled\",\n \"altn\": \"alternate\",\n \"back\": \"backup\",\n \"root\": \"root\",\n \"desg\": \"designated\",\n \"mstr\": \"master\",\n \"????\": \"nonstp\",\n \"_\": \"unknown\",\n }[\n R[1].lower()\n ], # @todo: refine roles\n \"state\": {\n \"dis\": \"disabled\",\n \"blk\": \"discarding\",\n \"bkn\": \"broken\",\n \"lrn\": \"learning\",\n \"??\": \"learning\",\n \"fwd\": \"forwarding\",\n \"lis\": \"listen\",\n \"lbk\": \"loopback\",\n }[\n R[2].lower()\n ], # @todo: refine states\n }\n return ports", "title": "" }, { "docid": "ceaac2c2461f99259a0afbf3f9cdb92b", "score": "0.49535736", "text": "def _parse_vw_output(text):\n data = {}\n for line in text.splitlines():\n if line.startswith('average loss'):\n data['average_loss'] = float(line.split('=')[1])\n break\n\n return data", "title": "" }, { "docid": "6fc26ab524575443bb77371c0182da0b", "score": "0.49501356", "text": "def get_traffic(ip_add, iface):\n try:\n client = SSH(ip_add)\n packets = \"cat /proc/net/dev | grep \" + iface + \"| awk '{print $1, $9}'\"\n packets = client.sendCommand(packets)\n\n data = packets.strip().split(':', 1)[-1]\n \n\n if not data[0].isdigit():\n packets = \"cat /proc/net/dev | grep \" + iface + \"| awk '{print $2, $10}'\"\n packets = client.sendCommand(packets)\n data = packets.strip().split(':', 1)[-1]\n\n data = data.split()\n\n traffic_in = int(data[0])\n traffic_out = int(data[1])\n\n all_traffic = {'traffic_in': traffic_in, 'traffic_out': traffic_out}\n data = all_traffic\n\n except Exception as err:\n data = str(err)\n\n return data", "title": "" }, { "docid": "39c7b89a63c5328654736461f367dfd1", "score": "0.49427265", "text": "def wifi_connect(self):\n\n import network\n import wifi_details\n\n self.wlan = network.WLAN(network.STA_IF)\n self.wlan.active(True)\n\n get_wifi_details = wifi_details.get_wifi_details\n if not self.wlan.isconnected():\n name, psk = get_wifi_details()\n self.wlan.connect(name, psk)\n\n while not self.wlan.isconnected():\n pass\n\n # Get and print IP Address\n self.ip_address = self.wlan.ifconfig()[0]\n print(\"IP Address: {}\".format(self.ip_address))", "title": "" }, { "docid": "1c0fce339232f9f45dbfeff726c18a3d", "score": "0.4935761", "text": "def parse_ovs_appctl_bond_show(buf):\n\n #---- bond0 ----\n #bond_mode: active-backup\n #bond may use recirculation: no, Recirc-ID : -1\n #bond-hash-basis: 0\n #updelay: 0 ms\n #downdelay: 0 ms\n #lacp_status: configured\n #lacp_fallback_ab: false\n #active slave mac: 00:00:00:00:00:00(none)\n #\n #slave enp134s0f0: disabled\n # may_enable: false\n #\n #slave enp134s0f1: disabled\n # may_enable: false\n buf = buf.strip().split(\"\\n\")\n states = {}\n for idx, line in enumerate(buf):\n line = line.strip()\n if line.startswith(\"slave\"):\n state = line.split(\":\")\n interface = state[0][6:]\n if \"disabled\" in state[1]:\n states[interface] = LINK_DOWN\n elif \"enabled\" in state[1]:\n states[interface] = LINK_UP\n else:\n states[interface] = UNKNOWN_STATE\n\n return states", "title": "" }, { "docid": "b1aa32f0a89c6f0d43743e501e2221d5", "score": "0.4934951", "text": "def hello2():\n hostname = cli('show hostname').rstrip(' \\n')\n for l in cli('show system uptime').split('\\n'):\n if re.search('System uptime',l):\n uptime = l.split(':')[1].lstrip()\n for l in cli('show hardware').split('\\n'):\n if re.search('Switch type',l):\n hw_type = l.split(':')[1].lstrip()\n elif re.search('system:',l):\n sw_vers = l.split(':')[1].lstrip()\n print \"Hello! My name is %s.\\nI'm a %s\\n running %s.\\nI've been up for %s\" % (hostname,hw_type,sw_vers,uptime)", "title": "" }, { "docid": "b52bc9ce25f1b96f2479ae5c67821422", "score": "0.49273324", "text": "def extract_pin_and_net(token):\n # replace .,() with blank\n for c in ('.', ',', '(', ')'):\n token = token.replace(c, ' ')\n\n token = token.strip().split()\n pin, net = token[0], token[1]\n return pin, net", "title": "" }, { "docid": "b9dab237cdfb5ffa6916e084dc4af747", "score": "0.4917583", "text": "def parseLLDP(self, text):\n \n added=False\n try:\n s=text.split(\"\\n\")\n \n name=ip=fr=to=capa=plat='Unknown'\n \n for t in s:\n if re.search('System Name: (.*)',t):\n name=re.search('System Name: \"(.*)\"',t).group(1).strip()\n elif re.search('.*Management Address\\s+: (.*)',t):\n ip=re.search('.*Management Address.*: (.*)',t).group(1).strip()\n elif re.search('LLDP (.*) detected.*',t):\n fr=re.search('LLDP (.*) detected.*',t).group(1).strip()\n elif re.search('Port ID\\s+: (.*)',t):\n to=re.search('.*: \"(.*)\"',t).group(1).strip()\n elif re.search('.*System Capabilities : (.*)',t):\n capa=re.search('.*System Capabilities : \"(.*)\"',t).group(1).strip()\n elif re.search('.*System Description: (.*)',t):\n plat=re.search('.*System Description: (.*)',t).group(1).strip()\n index=s.index(t)+1\n while not re.search('.*Port Description: (.*)',s[index]):\n plat+=s[index].strip()\n index+=1\n plat=plat[1:len(plat)-2]\n \n if ip in elems:\n element=elems[ip]\n if element.type=='Unknown':\n element.type=capa\n if element.platform=='Unknown':\n element.platform=plat\n if element.name=='Unknown':\n element.name=name \n else:\n if 'Cisco' in plat:\n element=CiscoElement(capa,name,plat,ip)\n elif 'Extreme' in plat or 'EXOS' in plat:\n element=ExtremeElement(capa,name,plat,ip)\n else:\n element=Element(capa,name,plat,ip)\n elems[ip]=element\n \n l=Link(fr,to,element)\n \n if l not in self.links:\n added=True\n self.addLink(l)\n \n if(ip not in visited and ip not in toVisit):\n toVisit.append(ip)\n except:\n print('found new element but not enough information to be added')\n finally:\n return added", "title": "" }, { "docid": "a65cb5b217edc38b8655704966ef8c07", "score": "0.49081433", "text": "def get_netrc(service):\n lines = service.info().split('\\n')\n line = ''\n for line in lines:\n if line.strip().startswith('machine'):\n break\n else:\n return None\n matches = re.findall('<([^>]+)>', line)\n for item in matches:\n value = input(\"[%s] %s :\" % (service.name(), item))\n line = line.replace('<' + item + '>', value)\n return line", "title": "" }, { "docid": "e34b69b5c8d5763bbde199f5b7c40fa5", "score": "0.48973396", "text": "def _parse_ip_addr_show(raw_result):\n # does link exist?\n show_re = (\n r'\"(?P<dev>\\S+)\"\\s+does not exist'\n )\n re_result = search(show_re, raw_result)\n result = None\n\n if not (re_result):\n # match top two lines for serveral 'always there' variables\n show_re = (\n r'\\s*(?P<os_index>\\d+):\\s+(?P<dev>\\w+):\\s+<(?P<falgs_str>.*)?>.*?'\n r'mtu\\s+(?P<mtu>\\d+).+?state\\s+(?P<state>\\w+).*'\n r'\\s*link/(?P<link_type>\\w+)\\s+(?P<mac_address>\\S+)'\n )\n\n re_result = search(show_re, raw_result, DOTALL)\n result = re_result.groupdict()\n\n # seek inet if its there\n show_re = (\n r'((inet )\\s*(?P<inet>[^/]+)/(?P<inet_mask>\\d{1,2}))'\n )\n re_result = search(show_re, raw_result)\n if (re_result):\n result.update(re_result.groupdict())\n\n # seek inet6 if its there\n show_re = (\n r'((?<=inet6 )(?P<inet6>[^/]+)/(?P<inet6_mask>\\d{1,2}))'\n )\n re_result = search(show_re, raw_result)\n if (re_result):\n result.update(re_result.groupdict())\n\n # cleanup dictionary before returning\n for key, value in result.items():\n if value is not None:\n if value.isdigit():\n result[key] = int(value)\n\n return result", "title": "" }, { "docid": "0e0eaa1554104889424b090aa3624dc0", "score": "0.48830637", "text": "def parseBitRateCounters(line):\n if line.startswith(\"Port\"):\n portName = re.search(r'INTA\\d+|EXT\\d+|\\d+', line).group()\n record = line.split() # cuts the line into indiv. strings\n inkbps = record[2] # gets the InKbsp value (string)\n InBits = inkbps[:-4] # Removes 'Kbps'\n outkbps = record[3] # gets the OutKbps value (string)\n OutBits = outkbps[:-4] # Removes 'Kbps'\n result = [portName, InBits, OutBits]\n return result", "title": "" }, { "docid": "2ce1cab9605e8a3684138d8e28cd3c85", "score": "0.48629963", "text": "def get_wireless_config(wifi_id=None):\n net_info = []\n conn_ids = [wifi_id] if wifi_id else CONNECTION_IDS\n for conn_id in conn_ids:\n net_state = get_uci_state('network.wwan')\n wifi_state = get_uci_state('wireless')\n available = bool(wifi_state.get(RADIO_UCI[conn_id]))\n connected = False\n info = {}\n if available:\n connected = net_state.get('network.wwan.connected') == '1'\n info['ssid'] = wifi_state.get('wireless.wifibridge.ssid') or STATE_NOT_CONFIGURED \n info['encryption'] = wifi_state.get('wireless.wifibridge.encryption', STATE_NONE)\n info['mode'] = wifi_state.get('wireless.wifibridge.mode') or STATE_NOT_CONFIGURED\n info['hidden'] = wifi_state.get('wireless.wifibridge.hidden', '0')\n info['channel'] = wifi_state.get('wireless.radio1.channel') or STATE_AUTO\n info['hwmode'] = wifi_state.get('wireless.radio1.hwmode')\n info['network'] = {}\n net_info.append(dict(\n id=conn_id,\n name=CONNECTION_NAMES[conn_id],\n available=available,\n connected=connected,\n info=info\n ))\n return net_info", "title": "" }, { "docid": "89afea4b55182042f53db50eb99fa2aa", "score": "0.48438844", "text": "def get_networks(iface, retry=10):\r\n while retry > 0:\r\n if \"OK\" in run_program(\"wpa_cli -i %s scan\" % iface):\r\n networks = []\r\n r = run_program(\"wpa_cli -i %s scan_result\" % iface).strip()\r\n if \"bssid\" in r and len(r.split(\"\\n\")) > 1:\r\n for line in r.split(\"\\n\")[1:]:\r\n b, fr, s, f = line.split()[:4]\r\n ss = \" \".join(line.split()[4:]) # Hmm, dirty\r\n networks.append({\"bssid\": b, \"freq\": fr, \"sig\": s, \"ssid\": ss, \"flag\": f})\r\n return networks\r\n retry -= 1\r\n logging.debug(\"Couldn't retrieve networks, retrying\")\r\n time.sleep(0.5)\r\n logging.error(\"Failed to list networks\")", "title": "" }, { "docid": "2e23c7d4eeeafddb9ac31368326f2f6f", "score": "0.48291436", "text": "def _ubntbox(self):\n output = self.run('ubntbox mca-status')\n info = {}\n # loop over output\n for line in output.split('\\r\\n'):\n parts = line.split('=')\n # main device info\n if len(parts) > 2:\n subparts = line.split(',')\n for subpart in subparts:\n key, value = subpart.split('=')\n info[key] = value\n # all other stuff\n elif len(parts) == 2:\n info[parts[0]] = parts[1]\n else:\n pass\n # return dictionary\n return info", "title": "" }, { "docid": "99aa7415eaf6b378a10cfb8f08a96a30", "score": "0.48229906", "text": "def set_wifi_settings(ssid, pass1, pass2):\n\n # TODO: check ssid and pass for valid chars\n\n if pass1 != pass2:\n return\n\n lines = []\n with open(settings.PB_HOSTAPD_FILE, 'r') as f:\n for line in f:\n if line.startswith('ssid='):\n line = 'ssid='+ssid\n if line.startswith('wpa_passphrase='):\n line = 'wpa_passphrase='+pass1\n lines.append(line)\n\n with open(settings.PB_HOSTAPD_FILE+'.new', 'w') as f:\n for line in lines:\n f.write(line+'\\n')\n\n shutil.copy(settings.PB_HOSTAPD_FILE+'.new', settings.PB_HOSTAPD_FILE)", "title": "" }, { "docid": "306def6e654a09147a4835eb1eb4d733", "score": "0.48211536", "text": "def get_state_temp(one_record):\n entries = one_record.split(',')\n bluetooth = entries[6]\n wifi = entries[7]\n dev_temp = int(entries[5])\n return (bluetooth, wifi, dev_temp)", "title": "" }, { "docid": "306def6e654a09147a4835eb1eb4d733", "score": "0.48211536", "text": "def get_state_temp(one_record):\n entries = one_record.split(',')\n bluetooth = entries[6]\n wifi = entries[7]\n dev_temp = int(entries[5])\n return (bluetooth, wifi, dev_temp)", "title": "" }, { "docid": "b9e9a6a4bf31be4d84a29153c5862c62", "score": "0.47929308", "text": "def get_wnics():\r\n r = run_program(\"iwconfig\")\r\n ifaces = []\r\n for line in r.split(\"\\n\"):\r\n if \"IEEE\" in line:\r\n ifaces.append(line.split()[0])\r\n return ifaces", "title": "" }, { "docid": "13b5ce658d1db9ebf8588d62e572329c", "score": "0.47892392", "text": "def parseLLDP(self,text):\n pass", "title": "" }, { "docid": "0b64b1a5a9ef04e102b00b749534afab", "score": "0.47772563", "text": "def __init__(self, ap_name):\n self.ap_name = ap_name\n self.ssids = []\n self.targetip = None\n self.targetport = None\n self.targetuser = None\n self.targetpass = None\n self.mac = None\n self.stub = None\n self.json = None", "title": "" }, { "docid": "3953eaa81f3928135a6d73544e29a1be", "score": "0.47681653", "text": "def get_current_state():\n\n output = wpa_cli(\"status\")\n for line in output:\n if line.startswith('wpa_state'):\n state = line.split('=')[1].strip()\n break\n\n return state", "title": "" }, { "docid": "c8c9afdfead2d197f1188ac2267375ed", "score": "0.47584337", "text": "def parse(raw: str) -> dict[str, dict[str, str]]:\n\n aps: dict[str, dict[str, str]] = {}\n for line in raw.split(\"\\n\"):\n line = line.strip()\n\n cell = cellRe.search(line)\n if cell:\n bssid = cell.group(2)\n aps[bssid] = {}\n continue\n\n for dat in dataRe:\n r = dat.search(line)\n if r:\n aps[bssid].update(r.groupdict())\n\n return aps", "title": "" }, { "docid": "5155b682ec9110f63bda23bc52cb21e6", "score": "0.47546983", "text": "def determine_mtu(self):\n cmd = \"ifconfig \"+str(self.name)\n out = subprocess.check_output(cmd, universal_newlines=True, shell=True)\n words = out.split(\" \")\n\n for word in words:\n if word == 'mtu' or word[0:3] == 'MTU':\n if word == 'mtu':\n return int(words[words.index(word)+1])\n else:\n return int(word[4:])\n print(\"MTU was not determined successfully.\")\n return 0", "title": "" }, { "docid": "5c43a034534a3cb89d60f233e58b532e", "score": "0.47486952", "text": "def parse_device_info(elem):\n # TODO\n return {}", "title": "" }, { "docid": "fc40308c97d6f326c1e8ac57e46e9ec4", "score": "0.4734646", "text": "def connect_to_wlan(ssid, password):\n result = subprocess.run(['nmcli', \"d\", \"wifi\", \"connect\", ssid, \"password\", password], stdout=subprocess.PIPE)\n return evaluate_result(result)", "title": "" }, { "docid": "17c7f7674ec3b431b9926a224d663615", "score": "0.47306088", "text": "def ready_to_apptimize(self):\n\n gateway_mac = str(check_output(settings.BSSID_CMD, shell=True)).strip()\n print gateway_mac\n print settings.TETHERING_MACS\n if gateway_mac.lower() in settings.TETHERING_MACS:\n return True\n else:\n return False", "title": "" }, { "docid": "7137701d0eb0897f83e87ae2899bc6c3", "score": "0.47267154", "text": "def to_python(cls, value):\n\n try:\n return SSID(value)\n except ValueError:\n msg = '%r is not a valid SSID.' % value\n raise ValidationError(msg)", "title": "" }, { "docid": "d59d5f0650b3c3fdb4d7ca0cf8a42540", "score": "0.47235563", "text": "def get_packet_info(self, pcap_file, filter='wlan.fc.type_subtype==3 && wlan.tag.number==55'):\n print(\"pcap file path: %s\" % pcap_file)\n try:\n if pcap_file is not None:\n cap = self.read_pcap(pcap_file=pcap_file, apply_filter=filter)\n packet_count = 0\n data = []\n for pkt in cap:\n data.append(str(pkt))\n packet_count += 1\n print(\"Total Packets: \", packet_count)\n print(data)\n data = \"\\n\".join(data)\n if packet_count != 0:\n return data\n else:\n return data\n except ValueError:\n raise \"pcap file is required\"", "title": "" }, { "docid": "aec088e32b3c30b0e98e837bfc925640", "score": "0.47033197", "text": "def get_wlan_conf(self):\n return self._api_read(\"list/wlanconf\")", "title": "" }, { "docid": "5e33380ade79849e36922b0cf6703bda", "score": "0.47030532", "text": "def show_ssid(self):\n command = \"show ssid\"\n template = \"show_ssid.textfsm\"\n\n command_response = self.send_command(command)\n ssid = self.fsm_parse(command_response, template)\n return ssid", "title": "" }, { "docid": "b7a726d267fb93cc5bf29a274b443da6", "score": "0.46977624", "text": "def connect_wifi(ssid, password):\n cmd = [\"nmcli\",\n \"device\"]\n\n # Add SSID to connect to.\n cmd.extend([\"wifi\", \"connect\", ssid])\n\n # Add password if specified.\n if password != \"\":\n cmd.extend([\"password\", password])\n\n try:\n p = subprocess.Popen(cmd, stdout=subprocess.PIPE, stderr=subprocess.PIPE)\n out, err = p.communicate()\n returncode = p.returncode\n\n except Exception as ex:\n out = \"\"\n err = f\"Error while executing nmcli: {ex}\"\n returncode = -1\n\n finally:\n out_response = clean_nmcli_response(out)\n err_response = clean_nmcli_response(err)\n response = out_response + err_response\n\n success = (returncode == 0)\n result = list()\n\n for line in response:\n if line == \"\":\n continue\n result.append(line)\n if \"error\" in line.lower():\n success = False\n\n if success:\n result.insert(0, str(f\"Successfully connected to \\\"{ssid}\\\":\"))\n else:\n result.insert(0, str(f\"({returncode}) Failed to connect to \\\"{ssid}\\\":\"))\n\n return success, result", "title": "" }, { "docid": "35c398a3ec3ea7c1ca3c16aaa01bcfc3", "score": "0.46914297", "text": "def parseLLDP(self,text):\n \n added=False\n try:\n s=text.split(\"\\n\")\n \n name=ip=fr=to=capa=plat='Unknown'\n \n for t in s:\n if re.search('System Name: (.*)',t):\n name=re.search('System Name: (.*)',t).group(1).strip()\n elif re.search('.*IP: (.*)',t):\n ip=re.search('.*IP: (.*)',t).group(1).strip()\n elif re.search('Local Intf: (.*)',t):\n fr=re.search('Local Intf: (.*)',t).group(1).strip()\n elif re.search('Port id: (.*)',t):\n to=re.search('.*: (.*)',t).group(1).strip()\n elif re.search('.*System Capabilities: (.*)',t):\n capa=re.search('.*System Capabilities: (.*)',t).group(1).strip()\n elif re.search('.*System Description: (.*)',t):\n plat=s[s.index(t)+1].strip()\n\n \n if ip in elems and isinstance(elems[ip],(ExtremeElement,CiscoElement)):\n element=elems[ip]\n if element.type=='Unknown':\n element.type=capa\n if element.platform=='Unknown':\n element.platform=plat\n if element.name=='Unknown':\n element.name=name \n else:\n if 'Cisco' in plat:\n element=CiscoElement(capa,name,plat,ip)\n elif 'Extreme' in plat or 'EXOS' in plat:\n element=ExtremeElement(capa,name,plat,ip)\n else:\n element=Element(capa,name,plat,ip)\n elems[ip]=element\n \n l=Link(fr,to,element)\n \n if l not in self.links:\n added=True\n self.addLink(l)\n \n if(ip not in visited and ip not in toVisit):\n toVisit.append(ip)\n except:\n print('found new element but not enough information to be added\\n')\n finally:\n return added", "title": "" }, { "docid": "41ca9b9b21b992f885e5c5b06992193f", "score": "0.4690414", "text": "def auction_scan_parse():\r\n return", "title": "" }, { "docid": "4a79593ca8c3bdb8740596c67e5b188f", "score": "0.46807718", "text": "def parseArp(self,text):\n text=re.compile('\\s\\s+').split(text)\n ip=text[1]\n mac=text[2]\n \n element=None\n \n if ip in elems:\n element=elems[ip]\n else:\n element=Element(\"Unknown\",\"Unknown\",\"Unknown\",ip)\n elems[ip]=element\n \n if(element.mac==''):\n element.addMac(mac)\n \n if mac not in elemsByMac:\n elemsByMac[mac]=element", "title": "" }, { "docid": "5f07453996b35793b51b6be75df51592", "score": "0.46734285", "text": "def parseArp(self,text):\n\n text=re.compile('\\s\\s+').split(text)\n ip=text[1]\n mac=text[3]\n \n element=None\n \n if ip in elems:\n element=elems[ip]\n else:\n element=Element(\"Unknown\",\"Unknown\",\"Unknown\",ip)\n elems[ip]=element\n \n element.addMac(mac)\n \n elemsByMac[mac]=element", "title": "" }, { "docid": "ee41a60916ed3c1a5ab8f0cd1d041086", "score": "0.46661946", "text": "def test_parse_network_adapter_config(adapter):\n\n networks = simple_network_cfg(adapter)\n\n assert isinstance(networks, list)\n\n assert len(networks) == 1, 'Multiple networks parsed; 1 expected'\n\n assert len(networks[0]) == 3, 'Unexpected elements in tuple'\n\n assert networks[0][0] == 'network1', 'Expected \\'network1\\''\n\n assert networks[0][1] == 'subnet1', 'Expected \\'subnet1\\''\n\n assert networks[0][2] == 'external', 'Expected \\'external\\''", "title": "" }, { "docid": "eca337a761728739324b1f9b18475ca6", "score": "0.4665924", "text": "def parsePBP(self):", "title": "" }, { "docid": "acccb071f147b96982119613b8bf46f7", "score": "0.46637747", "text": "def wifi_connect(ssid, pwd):\n\n sta_if = network.WLAN(network.STA_IF)\n ap_if = network.WLAN(network.AP_IF)\n if ap_if.active():\n ap_if.active(False)\n if not sta_if.isconnected():\n print('connecting to network...')\n sta_if.active(True)\n sta_if.connect(ssid, pwd)\n while not sta_if.isconnected():\n pass\n return 'IP address: %s' % sta_if.ifconfig()[0]", "title": "" }, { "docid": "1c5e6a8efc5c445966fb15bb2bad1802", "score": "0.46615183", "text": "def record_start_mp(ipaddress):\n status = \"\"\n #for deckname, ipaddress in hyperdecks.items():\n tn = Telnet(ipaddress, TCP_PORT)\n tn.write(b'record' + b'\\r\\n')\n tn.write(b'quit' + b'\\r\\n')\n status += tn.read_all().decode('ascii')\n print(status)", "title": "" }, { "docid": "9a24d652d0426e91c7cceeda37921a49", "score": "0.4660265", "text": "def parse(log):\n\n # bash equivalent:\n # recv=$((-$(cat \"$log\" | grep \"eth0\" | awk '{print $2}' | tr '\\n' '+')0))\n # sent=$((-$(cat \"$log\" | grep \"eth0\" | awk '{print $10}' | tr '\\n' '+')0))\n recv = 0\n sent = 0\n\n for line in open(log).readlines():\n # lines appear as initial followed by final, so this does the correct computation\n if \"eth0\" in line:\n recv = float(line.split()[1]) - recv\n sent = float(line.split()[9]) - sent\n\n return (recv/BYTE_PER_GB, sent/BYTE_PER_GB)", "title": "" }, { "docid": "92c3ed55d936b792dca2c338ce30f641", "score": "0.46488062", "text": "def get_alfred_data(datatype):\n try:\n info = subprocess.getoutput('bash ../network/get_alfred_info.bash ' + str(datatype))\n info = info.strip('').split('\\n')\n return info\n except subprocess.SubprocessError as e:\n return 'There is a problem with A.L.F.R.E.D daemon:' + str(e)", "title": "" }, { "docid": "139785af9bea10017927dc3e05bd6074", "score": "0.4644806", "text": "def _parse_device(self):\n COMMA = self._scanner.symbol_types.COMMA\n SEMICOLON = self._scanner.symbol_types.SEMICOLON\n OPENPAREN = self._scanner.symbol_types.OPENPAREN\n CLOSEPAREN = self._scanner.symbol_types.CLOSEPAREN\n NUMBER = self._scanner.symbol_types.NUMBER\n\n ret = True\n\n [device_name_status, device_id] = self._parse_device_name()\n # setting default parameter value\n parameter = None\n ret = ret and device_name_status\n\n self._current_sym = self._scanner.get_symbol()\n\n if self._current_sym.symtype == OPENPAREN:\n # get number and closeparen\n self._current_sym = self._scanner.get_symbol()\n if self._current_sym.symtype != NUMBER:\n # ERROR - supposed to have a parameter\n self.display_error(\n self.NO_PARAMETER,\n self.stopping_symbols[\"BETWEEN\"])\n return [False, None]\n parameter = self._current_sym.symid\n self._current_sym = self._scanner.get_symbol()\n if self._current_sym.symtype != CLOSEPAREN:\n # Error\n self.display_error(\n self.NO_CLOSE_BRACKET,\n self.stopping_symbols[\"BETWEEN\"])\n return [False, None]\n self._current_sym = self._scanner.get_symbol()\n\n # comma/semicolon checked in device_def\n return [ret, {device_id: parameter}]", "title": "" }, { "docid": "94427bc35e5e88b3acec1670b9d2857d", "score": "0.46368104", "text": "def parse(txt, device_list, command_list):\n\n command = None\n for c in command_list:\n if txt.startswith(c):\n command = c\n txt = txt[len(c)+1:]\n break\n if command is None:\n raise UnknownCommandError()\n\n device = None\n for d in device_list:\n if txt.lower().startswith(d.name.lower()):\n device = d\n txt = txt[len(d.name)+1:]\n break\n if device is None:\n raise UnknownDeviceError()\n\n return command, device, txt", "title": "" }, { "docid": "74b798c783aa58290e0647ea4857eb2f", "score": "0.46345127", "text": "def parse(self, name):\n # Uncomment only for debugging purpose\n return parse_udevadm_output(self.get_text(name), 64)[\"device_list\"]", "title": "" }, { "docid": "0c61a579dd6481553cad6f75b8a0328f", "score": "0.46281683", "text": "def parse_from(data):\r\n try:\r\n packet = struct.unpack(b\"!BBBbiI4B2I2I2I2I\", data[:48])\r\n LI = int(('0'*8+bin(packet[0])[2:])[-8:][:2], 2)\r\n VN = int(('0'*8+bin(packet[0])[2:])[-8:][2:5], 2)\r\n MODE = int(('0'*8+bin(packet[0])[2:])[-8:][5:], 2)\r\n STRATUM = packet[1]\r\n Poll = packet[2]\r\n Precision = packet[3]\r\n Root_Delay =packet[4]\r\n Root_Dispersion = packet[5]\r\n Reference_Identifier = packet[6:10]\r\n Reference_Timestamp = packet[10:12]\r\n Originate_Timestamp = packet[12:14]\r\n Receive_Timestamp = packet[14:16]\r\n Transmit_Timestamp = packet[16:18]\r\n return (LI, VN, MODE, STRATUM, Poll, Precision, Root_Delay, Root_Dispersion, Reference_Identifier,\r\n Reference_Timestamp, Originate_Timestamp, Receive_Timestamp, Transmit_Timestamp)\r\n except Exception:\r\n print(\"Cant parse a package, is it the right one?\")", "title": "" }, { "docid": "5b41917e6998d084868f7a5d72633403", "score": "0.46256104", "text": "def get_tx_power(device, ap_name):\n tx_power = \"\"\n try:\n radio_summary = device.parse('show ap dot11 5ghz summary')\n if radio_summary.get('ap_name').get(ap_name):\n tx_power = radio_summary.get('ap_name').get(ap_name).get('tx_pwr')\n\n except SchemaEmptyParserError as e:\n log.error(\"Failed to get tx power from 'show ap dot11 5ghz summary': Error: {e}\".format(e=str(e)))\n return \"\"\n return tx_power", "title": "" }, { "docid": "1b5024312a19ebc18cdc41ecf22eda9e", "score": "0.46173915", "text": "def parse_packet(packet):\n \n\n data = str(packet[S.TCP])\n start = data.find(\"username=\")\n if(start==-1):\n return None\n end = data.find(\"&password=\")\n if(end == -1):\n return None\n username = data[start:end]\n password = data[end:]\n \n return (username[9:],password[10:])", "title": "" }, { "docid": "a3449b08d2b387f36bb6237f58803d89", "score": "0.46140504", "text": "def recopila(ip):\r\n\tcdp_list=[]\r\n\tdata_path = 'C:/Users/Ileam Montoya/Dropbox/PYTHON/Python Scripts/CLARO/Seguimiento Macs/Levantamiento/data/{} - {}.txt'\r\n\tif ping(ip):\r\n\t\tuser_pass= '/Users/Ileam Montoya/Dropbox/Claro/Automatization/password_claro_actual/authentication.txt'\r\n\t\twith open(user_pass) as file:\r\n\t\t\tusername = file.readline().strip('\\n')\r\n\t\t\tpassword = file.readline()\r\n\t\trouter = {\r\n\t\t 'ip': ip,\r\n\t\t 'username': username,\r\n\t\t 'password': password,\r\n\t\t 'device_type': 'autodetect'\r\n\t\t\t}\r\n\t\tconn = try_connection(router, ip)\r\n\t\tif conn:\r\n\t\t\ttry:\r\n\t\t\t\tprompt = conn.find_prompt()\r\n\t\t\t\t#Se verifica el prompt y en caso de contener > es un equipo Huawei\r\n\t\t\t\tif '>' in prompt:\r\n\t\t\t\t\t#En caso de que se haya conectado un equipo huawei en modo cisco\r\n\t\t\t\t\tif router['device_type'] == 'cisco_ios':\r\n\t\t\t\t\t\tconn.disconnect()\r\n\t\t\t\t\t\trouter['device_type'] = 'huawei'\r\n\t\t\t\t\t\ttry:\r\n\t\t\t\t\t\t\tconn = ConnectHandler(**router)\r\n\t\t\t\t\t\texcept:\r\n\t\t\t\t\t\t\tprint('Error en Huawei')\r\n\t\t\t\t\tprint('HUAWEI '+prompt)\r\n\t\t\t\t\thostname=re.sub('[<>]', '', prompt)\r\n\t\t\t\t\toutput = conn.send_command('display mac-address')\r\n\t\t\t\t\t#Regex captura la mac e interfaz de salida en un Huawei, ejemplo:\r\n\t\t\t\t\t#1409-dcf4-92fb 1666 - - GE6/0/0 dynamic 4/- \r\n\t\t\t\t\tmac_ints=re.findall('(....-....-....) \\d+ +- - +(.+) +dynamic',output)\r\n\t\t\t\t\twriting=open(data_path.format(hostname,ip),'w')\r\n\t\t\t\t\tfor i, j in mac_ints:\r\n\t\t\t\t\t\t#Por ser Huawei, se cambie el GE por Gi excepto cuando la interfaz es 100GE\r\n\t\t\t\t\t\t#En las macs se cambia el - por .\r\n\t\t\t\t\t\tk=j.replace('GE','Gi') if '100' not in j else j\r\n\t\t\t\t\t\twriting.write('{}\\t{}\\n'.format(i.replace('-','.'),k))\r\n\t\t\t\t\t\tif k not in cdp_list:\r\n\t\t\t\t\t\t\tcdp_list.append(k)\r\n\t\t\t\t\tfind_cdp_huawei(cdp_list,writing,conn,hostname)\r\n\t\t\t\t\twriting.close()\r\n\t\t\t\t#Se verifica el prompt y en caso de contener : es un equipo XR\r\n\t\t\t\telif ':' in prompt:\r\n\t\t\t\t\tprint('XR '+prompt)\r\n\t\t\t\t\thostname=re.findall('RP/0/RSP0/CPU0:(.+)#',prompt)[0]\r\n\t\t\t\t\t#El levantamiento de macs en XR depende del dominio del equipo y la vlan, se debe modificar el comando antes de correr el script\r\n\t\t\t\t\tprint('EJECUTANDO COMANDO SOBRE EQUIPO XR - VERIFICAR COMANDO DE DOMINIO Y VLAN')\r\n\t\t\t\t\toutput = conn.send_command('show l2vpn forwarding bridge-domain L2TRUNKS:VLAN3579 mac-address location 0/0/CPU0 ')\r\n\t\t\t\t\tmac_ints=re.findall('(....\\.....\\.....) dynamic (.+) +0/0/CPU0',output)\r\n\t\t\t\t\twriting=open(data_path.format(hostname,ip),'w')\r\n\t\t\t\t\tfor i, j in mac_ints:\r\n\t\t\t\t\t\tneighbor_int = j.split('.')[0]\r\n\t\t\t\t\t\twriting.write('{}\\t{}\\n'.format(i,neighbor_int))\r\n\t\t\t\t\t\tif neighbor_int not in cdp_list:\r\n\t\t\t\t\t\t\tcdp_list.append(neighbor_int)\r\n\t\t\t\t\tfind_cdp_xr_ios(cdp_list,writing,conn,hostname)\r\n\t\t\t\t\twriting.close()\r\n\t\t\t\t#Si no es Huawei ni XR se califica como IOS\r\n\t\t\t\telse:\r\n\t\t\t\t\tprint('IOS '+prompt)\r\n\t\t\t\t\thostname=re.findall('(.+)#',prompt)[0]\r\n\t\t\t\t\tversion = conn.send_command('show version')\r\n\t\t\t\t\toutput = conn.send_command('show mac-address-table')\r\n\t\t\t\t\t#En caso de que el equipo rechace el comando show mac-address-table se lanza el siguiente:\r\n\t\t\t\t\t#show mac address-table\r\n\t\t\t\t\tif 'Invalid input' in output:\r\n\t\t\t\t\t\tprint('DIFFERENT MAC COMMAND FOR IP {}'.format(ip))\r\n\t\t\t\t\t\toutput = conn.send_command('show mac address-table')\r\n\t\t\t\t\t#Dentro de IOS cada version lanza lineas un poco diferentes en su tabla mac\r\n\t\t\t\t\t#En esta seccion se separan las versiones y se realizan las capturas necesarias\r\n\t\t\t\t\tif 'ASR-920' in version:\r\n\t\t\t\t\t\tmac_ints=re.findall('(....\\.....\\.....) DYNAMIC (.+) +',output)\r\n\t\t\t\t\t\twriting=open(data_path.format(hostname,ip),'w')\r\n\t\t\t\t\t\tfor i, j in mac_ints:\r\n\t\t\t\t\t\t\tneighbor_int = j.split('.')[0]\r\n\t\t\t\t\t\t\tif 'Po' in neighbor_int:\r\n\t\t\t\t\t\t\t\tneighbor_int = 'Po'+re.findall('\\d+$',neighbor_int)[0]\r\n\t\t\t\t\t\t\twriting.write('{}\\t{}\\n'.format(i,neighbor_int))\r\n\t\t\t\t\t\t\tif neighbor_int not in cdp_list:\r\n\t\t\t\t\t\t\t\tcdp_list.append(neighbor_int)\r\n\t\t\t\t\t\tfind_cdp_xr_ios(cdp_list,writing,conn,hostname)\r\n\t\t\t\t\t\twriting.close()\r\n\t\t\t\t\telif 'ME-3' in version or 'ME-C3' in version:\r\n\t\t\t\t\t\tmac_ints=re.findall('(....\\.....\\.....) DYNAMIC (.+)\\n',output)\r\n\t\t\t\t\t\twriting=open(data_path.format(hostname,ip),'w')\r\n\t\t\t\t\t\tfor i, j in mac_ints:\r\n\t\t\t\t\t\t\twriting.write('{}\\t{}\\n'.format(i,j))\r\n\t\t\t\t\t\t\tif j not in cdp_list:\r\n\t\t\t\t\t\t\t\tcdp_list.append(j)\r\n\t\t\t\t\t\tfind_cdp_xr_ios(cdp_list,writing,conn,hostname)\r\n\t\t\t\t\t\twriting.close()\r\n\t\t\t\t\telif ' CISCO76' in version:\r\n\t\t\t\t\t\tmac_ints=re.findall('(....\\.....\\.....).+\\d+ +(.+)\\n',output)\r\n\t\t\t\t\t\twriting=open(data_path.format(hostname,ip),'w')\r\n\t\t\t\t\t\tfor i, j in mac_ints:\r\n\t\t\t\t\t\t\twriting.write('{}\\t{}\\n'.format(i,j))\r\n\t\t\t\t\t\t\tif j not in cdp_list:\r\n\t\t\t\t\t\t\t\tcdp_list.append(j)\r\n\t\t\t\t\t\tfind_cdp_xr_ios(cdp_list,writing,conn,hostname)\r\n\t\t\t\t\t\twriting.close()\r\n\t#Se capturan errores en el levantamiento, conexion o ping\r\n\t\t\texcept Exception as e:\r\n\t\t\t\twriting=open(data_path.format('ERROR LEVANTAMIENTO',ip),'w')\r\n\t\t\t\twriting.close()\r\n\t\t\t\tprint('ERROR DURANTE LEVANTAMIENTO DE EQUIPO {}'.format(ip))\r\n\t\t\t\twriting=open(data_path.format('LOG','ERRORES'),'a')\r\n\t\t\t\twriting.write('ERROR DURANTE LEVANTAMIENTO DE EQUIPO {}\\n'.format(ip))\r\n\t\t\t\twriting.write(str(e)+'\\n\\n\\n')\r\n\t\t\t\twriting.close()\t\r\n\t\t\tconn.disconnect()\r\n\t\telse:\r\n\t\t\twriting=open(data_path.format('ERROR CONEXION',ip),'w')\r\n\t\t\twriting.close()\r\n\t\t\tprint('NO HAY CONEXION AL EQUIPO DE IP {}'.format(ip))\r\n\t\t\twriting=open(data_path.format('LOG','ERRORES'),'a')\r\n\t\t\twriting.write('NO HAY CONEXION AL EQUIPO DE IP {}\\n\\n\\n'.format(ip))\r\n\t\t\twriting.close()\t\r\n\r\n\telse:\r\n\t\twriting=open(data_path.format('ERROR PING',ip),'w')\r\n\t\twriting.close()\r\n\t\tprint('ERROR PING - {}'.format(ip))\r\n\t\twriting=open(data_path.format('LOG','ERRORES'),'a')\r\n\t\twriting.write('ERROR PING - {}\\n\\n\\n'.format(ip))\r\n\t\twriting.close()", "title": "" }, { "docid": "fa8dc13476e543c3e1ae699fe1ba1e62", "score": "0.4610714", "text": "def GetIPAddr():\n cmd = \"ifconfig | awk '/192/ {print $2}'\"\n res = Run(cmd).replace(\"\\n\", \"\") # remove end of line char\n return res.replace(\"addr:\", \"\") # remove \"addr:\" prefix", "title": "" }, { "docid": "2dfb5b5295a4f45138f55cbd4b7b4320", "score": "0.46066785", "text": "def parse(cmd_result):\n\n float_pattern = re.compile(\n r'^(\\+|\\-)[0-9]\\.[0-9]{5,6}e(\\-|\\+)[0-9][0-9]')\n # cmd_result = cmd_result.decode('utf-8').replace('\\r\\n', '')\n cmd_result = cmd_result.replace('\\r\\n', '')\n split_input = cmd_result.split(',')\n result = []\n\n for data in split_input:\n if data == 'N' or data.startswith('----'):\n result.append(None)\n\n elif re.match(float_pattern, data):\n result.append(float(data))\n\n elif data == 'ON':\n result.append(True)\n\n elif data == 'OFF':\n result.append(False)\n\n else:\n try:\n result.append(int(data))\n\n except ValueError:\n result.append(data)\n\n if len(result) == 1:\n return result[0]\n\n return tuple(result)", "title": "" }, { "docid": "8bcd3225725083ea82c98f28311a9739", "score": "0.46052647", "text": "def get_ip():\n\tresult = subprocess.run(['ifconfig', 'eth0'], stdout=subprocess.PIPE, stderr=subprocess.PIPE, universal_newlines=True)\n\tmyip = get_pattern(\"inet addr:\", result.stdout, ' ')\n\tprint(\"\\nMy ip is {}\".format(myip))\n\n\tmask = get_pattern(\"Mask:\", result.stdout, '\\n')\n\tsubnet = '.'.join(myip.split('.')[0:3] + ['0']) + \"/\" + str(mask_to_slash(mask))\n\tprint(\"Scanning subnet : %s\" % subnet)\n\n\tIPs = subprocess.run(['fping', '-aqg', subnet], stdout=subprocess.PIPE)\n\tactive_IPs = IPs.stdout.decode('utf8').strip().split()\n\treturn active_IPs", "title": "" }, { "docid": "1756886a587fe90d0678c90dbc3abbe2", "score": "0.45998585", "text": "def get_roomba_ip():\n tmp_file = '/tmp/net.txt'\n\n if os.path.exists(tmp_file):\n os.remove(tmp_file)\n\n print \"Scanning network...\"\n subprocess.call(['nmap', '-oG', tmp_file, '-sP', '192.168.1.*'], stdout=subprocess.PIPE)\n with open(tmp_file, 'r') as f:\n lines = f.readlines()\n for line in lines:\n if \"Roomba\" in line:\n parts = line.split()\n return parts[1]\n\n print \"#\" * 80\n print \"Unable to find roomba IP. Try again :) Scan results:\"\n print \"#\" * 80\n print \"\".join(lines)\n print \"#\" * 80", "title": "" }, { "docid": "f3d0781779a2829ee5594c10ec940977", "score": "0.45985922", "text": "def set_wifi(network_interface, status):\n if status in ['up', 'down']:\n \n call(['sudo', 'ip', 'link', 'set', 'dev', network_interface, status])", "title": "" }, { "docid": "d8b8c4eb18b5c96a5f67c0d33af8e3aa", "score": "0.45978376", "text": "def change_wifi_mode(mode, ssid='', passwd='', wep=0):\n logging.debug('Got switch request mode-{0} ssid-{1} pass-{2} wep-{3} '.format(mode, ssid, passwd, wep))\n if mode == 0:\n dhcpcd_conf = '''# A sample configuration for dhcpcd.\n# See dhcpcd.conf(5) for details.\n\n# Allow users of this group to interact with dhcpcd via the control socket.\n#controlgroup wheel\n\n# Inform the DHCP server of our hostname for DDNS.\nhostname\n\n# Use the hardware address of the interface for the Client ID.\nclientid\n# or\n# Use the same DUID + IAID as set in DHCPv6 for DHCPv4 ClientID as per RFC4361.\n#duid\n\n# Persist interface configuration when dhcpcd exits.\npersistent\n\n# Rapid commit support.\n# Safe to enable by default because it requires the equivalent option set\n# on the server to actually work.\noption rapid_commit\n\n# A list of options to request from the DHCP server.\noption domain_name_servers, domain_name, domain_search, host_name\noption classless_static_routes\n# Most distributions have NTP support.\noption ntp_servers\n# Respect the network MTU.\n# Some interface drivers reset when changing the MTU so disabled by default.\n#option interface_mtu\n\n# A ServerID is required by RFC2131.\nrequire dhcp_server_identifier\n\n# Generate Stable Private IPv6 Addresses instead of hardware based ones\nslaac private\n\n# A hook script is provided to lookup the hostname if not set by the DHCP\n# server, but it should not be run by default.\nnohook lookup-hostname'''\n\n with open('/etc/dhcpcd.conf', 'w') as fp:\n fp.write(dhcpcd_conf)\n\n if ssid:\n # Update the saved wifi credentials\n with open('Hotspot/SavedWifiCredentials.json', 'r') as fp:\n saved_cred = json.load(fp)\n\n saved_cred[ssid] = {'passwd': passwd, 'wep': wep}\n\n with open('Hotspot/SavedWifiCredentials.json', 'w') as fp:\n json.dump(saved_cred, fp)\n\n if passwd:\n if wep:\n interfaces_conf = \"\"\"# interfaces(5) file used by ifup(8) and ifdown(8)\n\n# Please note that this file is written to be used with dhcpcd\n# For static IP, consult /etc/dhcpcd.conf and 'man dhcpcd.conf'\n\n# Include files from /etc/network/interfaces.d:\nsource-directory /etc/network/interfaces.d\nauto lo\n\niface lo inet loopback\niface eth0 inet dhcp\n\nauto wlan0\nallow-hotplug wlan0\niface wlan0 inet dhcp\n wireless-essid {0}\n wireless-key {1}\n\n\"\"\".format(ssid, passwd)\n # If wep not marked, treat as wpa\n else:\n interfaces_conf = \"\"\"# interfaces(5) file used by ifup(8) and ifdown(8)\n\n# Please note that this file is written to be used with dhcpcd\n# For static IP, consult /etc/dhcpcd.conf and 'man dhcpcd.conf'\n\n# Include files from /etc/network/interfaces.d:\nsource-directory /etc/network/interfaces.d\nauto lo\n\niface lo inet loopback\niface eth0 inet dhcp\n\nauto wlan0\nallow-hotplug wlan0\niface wlan0 inet dhcp\n wpa-ssid \"{0}\"\n wpa-psk \"{1}\"\n wpa-conf /etc/wpa_supplicant/wpa_supplicant.conf\n\"\"\".format(ssid, passwd)\n\n with open('/etc/network/interfaces', 'w') as fp:\n fp.write(interfaces_conf)\n logging.debug('Switching to client mode')\n for task in client_mode_tasks:\n try:\n logging.debug(' '.join(task[1:]) + ' - Exit Status : ' + str(subprocess.check_call(task)))\n\n except Exception as e:\n logging.error(str(e))\n\n return None\n\n # If no passwd given, treat as open network\n else:\n interfaces_conf = \"\"\"# interfaces(5) file used by ifup(8) and ifdown(8)\n# Please note that this file is written to be used with dhcpcd\n# For static IP, consult /etc/dhcpcd.conf and 'man dhcpcd.conf'\n\n# Include files from /etc/network/interfaces.d:\nsource-directory /etc/network/interfaces.d\nauto lo\n\niface lo inet loopback\niface eth0 inet dhcp\n\nauto wlan0\nallow-hotplug wlan0\niface wlan0 inet dhcp\n wireless-essid {0}\n wireless-mode managed\"\"\".format(ssid)\n\n with open('/etc/network/interfaces', 'w') as fp:\n fp.write(interfaces_conf)\n\n logging.debug('Switching to client mode')\n for task in client_mode_tasks:\n try:\n logging.debug(' '.join(task[1:]) + ' - Exit Status : ' + str(subprocess.check_call(task)))\n\n except Exception as e:\n logging.error(str(e))\n\n return None\n\n # If no ssid given, automatically choose a saved one\n else:\n logging.debug('Switching to client mode')\n if not client_mode():\n # Switch to client mode to start a wifi scan\n for task in client_mode_tasks:\n try:\n logging.debug(' '.join(task[1:]) + ' - Exit Status : ' + str(subprocess.check_call(task)))\n\n except Exception as e:\n logging.error(str(e))\n sleep(1)\n\n # scan until there is a wifi AP found\n logging.debug('Wifi Scan Started \\n')\n retries = 0\n scan = None\n while retries < 20:\n # Search for AP's and store inside a file\n try:\n scan = Cell.all(wifi_interface)\n\n except Exception as e:\n logging.error(str(e))\n\n if scan:\n break\n\n retries += 1\n sleep(1)\n\n if scan:\n # Sort in desc order acc to the quality and iterate over list to see if a cell is in saved wifi cred\n scan = sorted(scan, key=lambda cell: cell.quality, reverse=True)\n\n logging.debug('Wifi Scan Completed - ' + str(scan))\n\n try:\n with open('Hotspot/SavedWifiCredentials.json', 'r') as fp:\n wifi_credentials = json.load(fp)\n\n except FileNotFoundError as e:\n logging.error('No saved wifi credentails were found')\n return None\n\n ssid = None\n for cell in scan:\n if cell.ssid in wifi_credentials.keys():\n ssid = cell.ssid\n break\n\n # ssid = sorted(scan, key=lambda cell: cell.quality)[-1].ssid\n if ssid:\n change_wifi_mode(0, ssid, wifi_credentials[ssid]['passwd'], wifi_credentials[ssid]['wep'])\n\n else:\n logging.debug('Couldn\\'t match any scan with saved wifi credentials')\n\n return None\n\n else:\n logging.debug('Wifi Scan unsuccessful')\n\n elif mode == 1:\n dhcpcd_conf = '''# A sample configuration for dhcpcd.\n# See dhcpcd.conf(5) for details.\n\n# Allow users of this group to interact with dhcpcd via the control socket.\n#controlgroup wheel\n\n# Inform the DHCP server of our hostname for DDNS.\nhostname\n\n# Use the hardware address of the interface for the Client ID.\nclientid\n# or\n# Use the same DUID + IAID as set in DHCPv6 for DHCPv4 ClientID as per RFC4361.\n#duid\n\n# Persist interface configuration when dhcpcd exits.\npersistent\n\n# Rapid commit support.\n# Safe to enable by default because it requires the equivalent option set\n# on the server to actually work.\noption rapid_commit\n\n# A list of options to request from the DHCP server.\noption domain_name_servers, domain_name, domain_search, host_name\noption classless_static_routes\n# Most distributions have NTP support.\noption ntp_servers\n# Respect the network MTU.\n# Some interface drivers reset when changing the MTU so disabled by default.\n#option interface_mtu\nfoo\n# A ServerID is required by RFC2131.\nrequire dhcp_server_identifier\n\n# Generate Stable Private IPv6 Addresses instead of hardware based ones\nslaac private\n\n# A hook script is provided to lookup the hostname if not set by the DHCP\n# server, but it should not be run by default.\nnohook lookup-hostname\n\ndenyinterfaces wlan0'''\n\n with open('/etc/dhcpcd.conf', 'w') as fp:\n fp.write(dhcpcd_conf)\n\n interfaces_conf = \"\"\"# interfaces(5) file used by ifup(8) and ifdown(8)\n\n# Please note that this file is written to be used with dhcpcd\n# For static IP, consult /etc/dhcpcd.conf and 'man dhcpcd.conf'\n\n# Include files from /etc/network/interfaces.d:\nsource-directory /etc/network/interfaces.d\n\nauto lo\n\niface lo inet loopback\niface eth0 inet dhcp\n\nallow-hotplug wlan0\niface wlan0 inet static\n address 10.0.0.1\n netmask 255.255.255.0\n network 10.0.0.0\n\"\"\"\n with open('/etc/network/interfaces', 'w') as fp:\n fp.write(interfaces_conf)\n\n logging.debug('Switching to AP mode')\n\n for task in AP_mode_tasks:\n try:\n logging.debug(' '.join(task[1:]) + ' - Exit Status : ' + str(subprocess.check_call(task)))\n\n except Exception as e:\n logging.error(str(e))\n\n return None", "title": "" }, { "docid": "ffb651c771f7ae9b89f3cba8b4d4eca3", "score": "0.45907593", "text": "def scan(iface: str) -> str:\n cmd = [\"iwlist\", iface, \"scan\"]\n ret = subprocess.run(cmd, text=True, stdout=subprocess.PIPE, timeout=15)\n\n return ret.stdout", "title": "" }, { "docid": "4623a06516676c37468d165e05ad67bc", "score": "0.45892197", "text": "def ssh_get_vlan(text):\n\n\tvlan_dict = {}\n\tfor line in text.split('\\n'):\n\t\t#Needs to skip all lines till first line of VLAN shows\n\t\tcolumns = line.split()\n\t\tif len(columns) < 3 or \"active\" != columns[2] or columns[0] in {'1', '1001', '1002', '1003', '1004', '1005'}:\n\t\t\tcontinue\n\t\tvlan_dict[columns[1]] = int(columns[0])\n\treturn vlan_dict", "title": "" }, { "docid": "e81e66b7f13815883270f94e0daabf8d", "score": "0.45857686", "text": "def arpParse(self):\n print(\"-----------------ARP header-----------------\")\n # Hardware Dictionary to identify the hardware category\n hardwareDict = {1: \"Ethernet\", 6: \"IEEE 802 Networks\", 7: \"ARCNET\", 15: \"Frame reply\", 16: \"ATM\", 17: \"HDLC\",\n 18: \"Fibre Channel\", 19: \"ATM\", 20: \"Serial Line\"}\n print(\"Hardware Type: \", hardwareDict[self.hexToDec(self.data[:4])])\n if self.data[4:8] == \"0800\":\n print(\"Protocol Type: IPv4\")\n else:\n print(\"Protocol Type: Unrecognized\")\n print(\"Hardware Size: \", self.hexToDec(self.data[8:10]))\n print(\"Protocol Size: \", self.hexToDec(self.data[10:12]))\n if self.hexToDec(self.data[12:16]) == 1:\n print(\"Opcode: Request\")\n else:\n print(\"Opcode: Reply\")\n self.data = self.data[16:]\n print(\"Sender MAC address: \", self.macFinder())\n print(\"Sender IP address: \", self.decodeIP())\n print(\"Target MAC address: \", self.macFinder())\n print(\"Target IP address: \", self.decodeIP())", "title": "" }, { "docid": "00199c67c375f4f113862213be007c76", "score": "0.45759505", "text": "def get_wifi_status(set_to):\n # Wake up our cellphone\n serial, d = get_serial_and_device()\n # turn on screen\n d.screen.on()\n # press back key\n d.press.back()\n # press home key\n d.press.home()\n wifi_status = int(check_output(['adb', 'shell', 'settings', 'get', 'global', 'wifi_on']))\n if set_to == wifi_status:\n print \"wifi already set to the desired state\"\n return True\n print \"changing wifi status\"\n return False", "title": "" }, { "docid": "c4a460a8d78683c3b0b7163b4e047332", "score": "0.4575237", "text": "def current_mac(cfgfile):\n cur = open(cfgfile)\n cur = cur.readlines()\n for i in cur:\n if 'HWADDR' in i:\n return i.strip()[7:]", "title": "" }, { "docid": "ef38a5d5082e03a17a9700c69917dbf2", "score": "0.45701388", "text": "def main():\n\n args = parse_args()\n settings = load_settings(args.settings_file)\n\n if 'ip' not in settings:\n try:\n settings['ip'] = discover_ip_address()\n except:\n print(\"Unable to discover ip address of TV\", file=sys.stderr)\n sys.exit(1)\n \n if args.find_mac_address:\n try:\n settings['mac-address'] = find_mac_address(settings['ip'])\n except:\n print(\"Unable to find MAC address in arp cache. Ensure the TV is \"\n \"on and that arp is installed and on the path.\",\n file=sys.stderr)\n sys.exit(1)\n elif args.wake:\n if 'mac-address' not in settings:\n print(\"Unable to wake TV: no MAC address saved. \"\n \"Run lgtv --find-mac-address with the TV on\",\n file=sys.stderr)\n sys.exit(1)\n wakeonlan.send_magic_packet(settings['mac-address'])\n else: \n ws_uri = 'ws://' + settings['ip'] + ':3000'\n client_key = settings.get('client-key')\n\n (response, client_key) = send(ws_uri, args.ssap_uri, args.payload, client_key)\n\n print(response)\n\n settings['client-key'] = client_key\n \n save_settings(args.settings_file, settings)", "title": "" }, { "docid": "5facc6e29fecd62555734c9c36e18848", "score": "0.45667624", "text": "def get_mac():\n #ip addr show wlp1s0 | grep \"link/ether\\b\" | awk '{print $2}' | cut -d/ -f1\n call(['sudo', 'ip', 'link', network_interface, 'show'])\n pass", "title": "" }, { "docid": "f913190f353b1a7115f7bf3087e6523a", "score": "0.45625877", "text": "def _GetNetworkDeviceProperties(self, device_name: str) -> Dict[str, str]:\n # ethtool can exist under /usr/sbin or needs to be installed (debian9)\n if self.HasPackage('ethtool'):\n self.InstallPackages('ethtool')\n try:\n stdout, _ = self.RemoteCommand(\n f'PATH=\"${{PATH}}\":/usr/sbin ethtool -i {device_name}'\n )\n except errors.VirtualMachine.RemoteCommandError:\n logging.info('ethtool not installed', exc_info=True)\n return {}\n properties = {}\n for line in stdout.splitlines():\n m = self._ETHTOOL_RE.match(line)\n if m:\n properties[m['key']] = m['value']\n return properties", "title": "" }, { "docid": "c7d164d4d091fcedfdeaa0a162a4990c", "score": "0.45609337", "text": "def show_profile(essid=None, brief=False):\n\n output_list = []\n\n if essid is None or brief:\n stdout = wpa_cli(\"list_networks\")\n\n for line in stdout:\n if essid is None or \"\\t%s\\t\" % essid in line:\n output_list.append(line.strip())\n\n else: # essid was specified\n network_number = find_profile(essid)\n if network_number is None:\n raise WifiError('show_profile: ESSID \"%s\" was not found' % essid)\n\n for field in (\"ssid\", \"scan_ssid\", \"key_mgmt\", \"pairwise\", \"group\",\n \"psk\", \"eap\", \"identity\", \"password\", \"ca_cert\", \"client_cert\",\n \"private_key\", \"private_key_passwd\", \"phase1\", \"ca_cert2\",\n \"client_cert2\", \"private_key2\", \"private_key2_passwd\", \"wep_key0\",\n \"wep_tx_keyidx\", \"proto\", ):\n stdout = wpa_cli (\"get_network %d %s\" % (network_number, field),\n raise_failures=False)\n\n answer = stdout[1]\n\n if answer == \"FAIL\\n\": continue\n\n output_list.append(\"%20s = %s\" % (field, answer))\n\n return output_list", "title": "" }, { "docid": "1487f0b82fea15b8fe8c5d6fad1b6e7e", "score": "0.45516992", "text": "def get_phone_status():\n send_command('AT+CPAS')\n return get_output()[1].split(' ')[1]", "title": "" }, { "docid": "c5fbf5a3a1c0d20f3f9ca43c428ec323", "score": "0.45464537", "text": "def parse_status_update(self, data):\n ret = {}\n _type = data[3]\n if _type == b'\\x1b':\n # Power Clamp\n # Unknown\n pass\n\n elif _type == b'\\x1c':\n # Power Switch\n # Unknown\n pass\n\n elif _type == b'\\x1d':\n # Key Fob\n ret['temperature'] = float(struct.unpack(\"<h\", data[8:10])[0]) / 100.0 * 1.8 + 32\n ret['counter'] = struct.unpack('<I', data[4:8])[0]\n\n elif _type == b'\\x1e' or _type == b'\\x1f':\n # Door Sensor\n ret['temperature'] = float(struct.unpack(\"<h\", data[8:10])[0]) / 100.0 * 1.8 + 32\n if ord(data[-1]) & 0x01 == 1:\n ret['trigger_state'] = 1 # Open\n else:\n ret['trigger_state'] = 0 # Closed\n\n if ord(data[-1]) & 0x02 == 0:\n ret['tamper_state'] = 1 # Open\n else:\n ret['tamper_state'] = 0 # Closed\n\n else:\n self._logger.error('Unrecognised Device Status %r %r', _type, data)\n\n return ret", "title": "" }, { "docid": "3731d96c06010aceafb967c610924cdb", "score": "0.45334867", "text": "def get_netstat(ip_add):\n try:\n client = SSH(ip_add)\n ss = \"ss -tnp | grep ESTAB | awk '{print $4, $5}'| sed 's/::ffff://g' \\\n | awk -F: '{print $1, $2}' | awk 'NF > 0' | sort -n | uniq -c\"\n\n ss = client.sendCommand(ss)\n data = ss.strip().split('\\n')\n \n data = [i.split(None, 4) for i in data]\n\n except Exception as err:\n data = str(err)\n\n return data", "title": "" }, { "docid": "20c82add2afbea1eaa0bd459bbf5a37c", "score": "0.4532632", "text": "def make_qr_code_data(self) -> str:\n\n wifi_config = \"WIFI:\"\n if self.ssid:\n wifi_config += \"S:%s;\" % _escape_mecard_special_chars(self.ssid)\n if self.authentication:\n wifi_config += \"T:%s;\" % WifiConfig.AUTHENTICATION_CHOICES[self.authentication][1]\n if self.password:\n wifi_config += \"P:%s;\" % _escape_mecard_special_chars(self.password)\n if self.hidden:\n wifi_config += \"H:%s;\" % str(self.hidden).lower()\n wifi_config += \";\"\n return wifi_config", "title": "" } ]
c9ca1502783cde9e7254c22151a34af7
This method is the workhorse of the browser. It handles screen drawing and the keyboard.
[ { "docid": "adee66d3b690a632aad4bc601d68f8b2", "score": "0.0", "text": "def _dodisplay(self, scr):\n self.scr = scr\n curses.halfdelay(1)\n footery = 2\n\n keys = []\n for cmd in (\"quit\", \"help\"):\n key = self.keymap.findkey(cmd, None)\n if key is not None:\n keys.append(\"%s=%s\" % (self.keylabel(key), cmd))\n helpmsg = \" | %s\" % \" \".join(keys)\n\n scr.clear()\n msg = \"Fetching first batch of objects...\"\n (self.scrsizey, self.scrsizex) = scr.getmaxyx()\n scr.addstr(self.scrsizey//2, (self.scrsizex-len(msg))//2, msg)\n scr.refresh()\n\n lastc = -1\n\n self.levels = []\n # enter the first level\n self.enter(self.input, *self.attrs)\n\n self._calcheaderlines(None)\n\n while True:\n level = self.levels[-1]\n (self.scrsizey, self.scrsizex) = scr.getmaxyx()\n level.mainsizey = self.scrsizey-1-self._headerlines-footery\n\n # Paint object header\n for i in xrange(self._firstheaderline, self._firstheaderline+self._headerlines):\n lv = self.levels[i]\n posx = 0\n posy = i-self._firstheaderline\n endx = self.scrsizex\n if i: # not the first level\n msg = \" (%d/%d\" % (self.levels[i-1].cury, len(self.levels[i-1].items))\n if not self.levels[i-1].exhausted:\n msg += \"+\"\n msg += \") \"\n endx -= len(msg)+1\n posx += self.addstr(posy, posx, 0, endx, \" ibrowse #%d: \" % i, self.style_objheadertext)\n for (style, text) in lv.header:\n posx += self.addstr(posy, posx, 0, endx, text, self.style_objheaderobject)\n if posx >= endx:\n break\n if i:\n posx += self.addstr(posy, posx, 0, self.scrsizex, msg, self.style_objheadernumber)\n posx += self.addchr(posy, posx, 0, self.scrsizex, \" \", self.scrsizex-posx, self.style_objheadernumber)\n\n if not level.items:\n self.addchr(self._headerlines, 0, 0, self.scrsizex, \" \", self.scrsizex, self.style_colheader)\n self.addstr(self._headerlines+1, 0, 0, self.scrsizex, \" <empty>\", astyle.style_error)\n scr.clrtobot()\n else:\n # Paint column headers\n scr.move(self._headerlines, 0)\n scr.addstr(\" %*s \" % (level.numbersizex, \"#\"), self.getstyle(self.style_colheader))\n scr.addstr(self.headersepchar, self.getstyle(self.style_colheadersep))\n begx = level.numbersizex+3\n posx = begx-level.datastartx\n for attr in level.displayattrs:\n attrname = attr.name()\n cwidth = level.colwidths[attr]\n header = attrname.ljust(cwidth)\n if attr is level.displayattr[1]:\n style = self.style_colheaderhere\n else:\n style = self.style_colheader\n posx += self.addstr(self._headerlines, posx, begx, self.scrsizex, header, style)\n posx += self.addstr(self._headerlines, posx, begx, self.scrsizex, self.headersepchar, self.style_colheadersep)\n if posx >= self.scrsizex:\n break\n else:\n scr.addstr(\" \"*(self.scrsizex-posx), self.getstyle(self.style_colheader))\n\n # Paint rows\n posy = self._headerlines+1+level.datastarty\n for i in xrange(level.datastarty, min(level.datastarty+level.mainsizey, len(level.items))):\n cache = level.items[i]\n if i == level.cury:\n style = self.style_numberhere\n else:\n style = self.style_number\n\n posy = self._headerlines+1+i-level.datastarty\n posx = begx-level.datastartx\n\n scr.move(posy, 0)\n scr.addstr(\" %*d%s\" % (level.numbersizex, i, \" !\"[cache.marked]), self.getstyle(style))\n scr.addstr(self.headersepchar, self.getstyle(self.style_sep))\n\n for attrname in level.displayattrs:\n cwidth = level.colwidths[attrname]\n try:\n (align, length, parts) = level.displayrows[i-level.datastarty][attrname]\n except KeyError:\n align = 2\n style = astyle.style_nodata\n if i == level.cury:\n style = self.getstylehere(style)\n padstyle = self.style_datapad\n sepstyle = self.style_sep\n if i == level.cury:\n padstyle = self.getstylehere(padstyle)\n sepstyle = self.getstylehere(sepstyle)\n if align == 2:\n posx += self.addchr(posy, posx, begx, self.scrsizex, self.nodatachar, cwidth, style)\n else:\n if align == 1:\n posx += self.addchr(posy, posx, begx, self.scrsizex, self.datapadchar, cwidth-length, padstyle)\n elif align == 0:\n pad1 = (cwidth-length)//2\n pad2 = cwidth-length-len(pad1)\n posx += self.addchr(posy, posx, begx, self.scrsizex, self.datapadchar, pad1, padstyle)\n for (style, text) in parts:\n if i == level.cury:\n style = self.getstylehere(style)\n posx += self.addstr(posy, posx, begx, self.scrsizex, text, style)\n if posx >= self.scrsizex:\n break\n if align == -1:\n posx += self.addchr(posy, posx, begx, self.scrsizex, self.datapadchar, cwidth-length, padstyle)\n elif align == 0:\n posx += self.addchr(posy, posx, begx, self.scrsizex, self.datapadchar, pad2, padstyle)\n posx += self.addstr(posy, posx, begx, self.scrsizex, self.datasepchar, sepstyle)\n else:\n scr.clrtoeol()\n\n # Add blank row headers for the rest of the screen\n for posy in xrange(posy+1, self.scrsizey-2):\n scr.addstr(posy, 0, \" \" * (level.numbersizex+2), self.getstyle(self.style_colheader))\n scr.clrtoeol()\n\n posy = self.scrsizey-footery\n # Display footer\n scr.addstr(posy, 0, \" \"*self.scrsizex, self.getstyle(self.style_footer))\n\n if level.exhausted:\n flag = \"\"\n else:\n flag = \"+\"\n\n endx = self.scrsizex-len(helpmsg)-1\n scr.addstr(posy, endx, helpmsg, self.getstyle(self.style_footer))\n\n posx = 0\n msg = \" %d%s objects (%d marked): \" % (len(level.items), flag, level.marked)\n posx += self.addstr(posy, posx, 0, endx, msg, self.style_footer)\n try:\n item = level.items[level.cury].item\n except IndexError: # empty\n pass\n else:\n for (nostyle, text) in ipipe.xrepr(item, \"footer\"):\n if not isinstance(nostyle, int):\n posx += self.addstr(posy, posx, 0, endx, text, self.style_footer)\n if posx >= endx:\n break\n\n attrstyle = [(astyle.style_default, \"no attribute\")]\n attr = level.displayattr[1]\n if attr is not ipipe.noitem and not isinstance(attr, ipipe.SelfDescriptor):\n posx += self.addstr(posy, posx, 0, endx, \" | \", self.style_footer)\n posx += self.addstr(posy, posx, 0, endx, attr.name(), self.style_footer)\n posx += self.addstr(posy, posx, 0, endx, \": \", self.style_footer)\n try:\n value = attr.value(item)\n except (SystemExit, KeyboardInterrupt):\n raise\n except Exception, exc:\n value = exc\n if value is not ipipe.noitem:\n attrstyle = ipipe.xrepr(value, \"footer\")\n for (nostyle, text) in attrstyle:\n if not isinstance(nostyle, int):\n posx += self.addstr(posy, posx, 0, endx, text, self.style_footer)\n if posx >= endx:\n break\n\n try:\n # Display input prompt\n if self.mode in self.prompts:\n history = self.prompts[self.mode]\n posx = 0\n posy = self.scrsizey-1\n posx += self.addstr(posy, posx, 0, endx, history.prompt, astyle.style_default)\n posx += self.addstr(posy, posx, 0, endx, \" [\", astyle.style_default)\n if history.cury==-1:\n text = \"new\"\n else:\n text = str(history.cury+1)\n posx += self.addstr(posy, posx, 0, endx, text, astyle.style_type_number)\n if history.history:\n posx += self.addstr(posy, posx, 0, endx, \"/\", astyle.style_default)\n posx += self.addstr(posy, posx, 0, endx, str(len(history.history)), astyle.style_type_number)\n posx += self.addstr(posy, posx, 0, endx, \"]: \", astyle.style_default)\n inputstartx = posx\n posx += self.addstr(posy, posx, 0, endx, history.input, astyle.style_default)\n # Display report\n else:\n if self._report is not None:\n if isinstance(self._report, Exception):\n style = self.getstyle(astyle.style_error)\n if self._report.__class__.__module__ == \"exceptions\":\n msg = \"%s: %s\" % \\\n (self._report.__class__.__name__, self._report)\n else:\n msg = \"%s.%s: %s\" % \\\n (self._report.__class__.__module__,\n self._report.__class__.__name__, self._report)\n else:\n style = self.getstyle(self.style_report)\n msg = self._report\n scr.addstr(self.scrsizey-1, 0, msg[:self.scrsizex], style)\n self._report = None\n else:\n scr.move(self.scrsizey-1, 0)\n except curses.error:\n # Protect against errors from writing to the last line\n pass\n scr.clrtoeol()\n\n # Position cursor\n if self.mode in self.prompts:\n history = self.prompts[self.mode]\n scr.move(self.scrsizey-1, inputstartx+history.curx)\n else:\n scr.move(\n 1+self._headerlines+level.cury-level.datastarty,\n level.numbersizex+3+level.curx-level.datastartx\n )\n scr.refresh()\n\n # Check keyboard\n while True:\n c = scr.getch()\n if self.resized:\n size = fcntl.ioctl(0, tty.TIOCGWINSZ, \"12345678\")\n size = struct.unpack(\"4H\", size)\n oldsize = scr.getmaxyx()\n scr.erase()\n curses.resize_term(size[0], size[1])\n newsize = scr.getmaxyx()\n scr.erase()\n for l in self.levels:\n l.mainsizey += newsize[0]-oldsize[0]\n l.moveto(l.curx, l.cury, refresh=True)\n scr.refresh()\n self.resized = False\n break # Redisplay\n if self.mode in self.prompts:\n if self.prompts[self.mode].handlekey(self, c):\n break # Redisplay\n else:\n # if no key is pressed slow down and beep again\n if c == -1:\n self.stepx = 1.\n self.stepy = 1.\n self._dobeep = True\n else:\n # if a different key was pressed slow down and beep too\n if c != lastc:\n lastc = c\n self.stepx = 1.\n self.stepy = 1.\n self._dobeep = True\n cmdname = self.keymap.get(c, None)\n if cmdname is None:\n self.report(\n UnassignedKeyError(\"Unassigned key %s\" %\n self.keylabel(c)))\n else:\n cmdfunc = getattr(self, \"cmd_%s\" % cmdname, None)\n if cmdfunc is None:\n self.report(\n UnknownCommandError(\"Unknown command %r\" %\n (cmdname,)))\n elif cmdfunc():\n returnvalue = self.returnvalue\n self.returnvalue = None\n return returnvalue\n self.stepx = self.nextstepx(self.stepx)\n self.stepy = self.nextstepy(self.stepy)\n curses.flushinp() # get rid of type ahead\n break # Redisplay\n self.scr = None", "title": "" } ]
[ { "docid": "0d8778a822da0bea2b74406879196b37", "score": "0.73315173", "text": "def draw(self, screen):\n pass", "title": "" }, { "docid": "6984ab10ce449f006f1e8504d628c9cc", "score": "0.6826859", "text": "def draw(self):\n self.canvas_draw()\n self.screen.blit(self.canvas, (0, 0))", "title": "" }, { "docid": "c829e6764a0c011e85c0f254fa6a1832", "score": "0.6716292", "text": "def _draw(self, stdscr):\n\n self._stdscr = stdscr\n key_pressed = 0\n\n # Clear and refresh the screen for a blank canvas\n stdscr.clear()\n stdscr.refresh()\n curses.mousemask(curses.ALL_MOUSE_EVENTS)\n # stdscr.nodelay(False)\n #stdscr.keypad(True)\n\n # Initialization functions. Generates colors and renderer\n self._initialize_colors()\n self._initialize_widget_renderer()\n \n # If user specified a refresh timeout, apply it here\n if self._refresh_timeout > 0:\n self._stdscr.timeout(self._refresh_timeout)\n\n # If user sets non-default border characters, update them here\n if self._border_characters is not None:\n self._renderer._set_border_renderer_chars(self._border_characters)\n\n # Loop where key_pressed is the last character pressed. Wait for exit key while no popup or focus mode\n while key_pressed != self._exit_key or self._in_focused_mode or self._popup is not None:\n\n try:\n # If we call stop, we want to break out of the main draw loop\n if self._stopped:\n break\n\n # Initialization and size adjustment\n stdscr.erase()\n\n # find height width, adjust if status/title bar added. We decrement the height by 4 to account for status/title bar and padding\n if self._simulated_terminal is None:\n height, width = stdscr.getmaxyx()\n else:\n height = self._simulated_terminal[0]\n width = self._simulated_terminal[1]\n\n height = height - 4\n\n # If the user defined an update function to fire on each draw call,\n # Run it here. This can of course be also handled user-side\n # through a separate thread.\n if self._on_draw_update_func is not None:\n self._on_draw_update_func()\n\n # This is what allows the CUI to be responsive. Adjust grid size based on current terminal size\n # Resize the grid and the widgets if there was a resize operation\n if key_pressed == curses.KEY_RESIZE:\n self._logger.info('Resizing CUI to new dimensions {} by {}'.format(height, width))\n try:\n self._refresh_height_width(height, width)\n except py_cui.errors.PyCUIOutOfBoundsError as e:\n self._logger.info('Resized terminal too small')\n self._display_window_warning(stdscr, str(e))\n\n # Here we handle mouse click events globally, or pass them to the UI element to handle\n elif key_pressed == curses.KEY_MOUSE:\n self._logger.info('Detected mouse click')\n _, x, y, _, _ = curses.getmouse()\n in_element = self.get_element_at_position(x, y)\n\n # In first case, we click inside already selected widget, pass click for processing\n if in_element is not None and in_element.is_selected():\n in_element._handle_mouse_press(x, y)\n # Otherwise, if not a popup, select the clicked on widget\n elif in_element is not None and not isinstance(in_element, py_cui.popups.Popup):\n self.move_focus(in_element)\n in_element._handle_mouse_press(x, y)\n\n # If we have a post_loading_callback, fire it here\n if self._post_loading_callback is not None and not self._loading:\n self._logger.info('Firing post-loading callback function {}'.format(self._post_loading_callback.__name__))\n self._post_loading_callback()\n self._post_loading_callback = None\n\n # Handle widget cycling\n if key_pressed == self._forward_cycle_key:\n self._cycle_widgets()\n elif key_pressed == self._reverse_cycle_key:\n self._cycle_widgets(reverse=True)\n\n # Handle keypresses\n self._handle_key_presses(key_pressed)\n\n try:\n # Draw status/title bar, and all widgets. Selected widget will be bolded.\n self._draw_status_bars(stdscr, height, width)\n self._draw_widgets()\n # draw the popup if required\n if self._popup is not None:\n self._popup._draw()\n except curses.error as e:\n self._logger.error('Curses error while drawing TUI')\n self._display_window_warning(stdscr, str(e))\n except py_cui.errors.PyCUIOutOfBoundsError as e:\n self._logger.error('Resized terminal too small')\n self._display_window_warning(stdscr, str(e))\n\n # Refresh the screen\n stdscr.refresh()\n\n # Wait for next input\n if self._loading or self._post_loading_callback is not None:\n # When loading, refresh screen every quarter second\n time.sleep(0.25)\n # Need to reset key_pressed, because otherwise the previously pressed key will be used.\n key_pressed = 0\n elif self._stopped:\n key_pressed = self._exit_key\n else:\n self._logger.info('Waiting for next keypress')\n key_pressed = stdscr.getch()\n\n except KeyboardInterrupt:\n self._logger.info('Detect Keyboard Interrupt, Exiting...')\n self._stopped = True\n\n\n stdscr.erase()\n stdscr.refresh()\n curses.endwin()\n if self._on_stop is not None:\n self._logger.info('Firing onstop function {}'.format(self._on_stop.__name__))\n self._on_stop()", "title": "" }, { "docid": "0b59f4d1cdcb3d2cfb44c4c5866ac872", "score": "0.6667962", "text": "def main(self) -> None:\n while True:\n self.render()\n self.kinput(self.inkey())", "title": "" }, { "docid": "f652eb1f8f04e52fbed8883590429dab", "score": "0.6603405", "text": "def draw(self):\n self._draw_background()\n\n self._draw_back_button()\n self._draw_volume()\n self._draw_switch()\n self._draw_intro()\n\n self._play_sound()\n self._draw_cursor()", "title": "" }, { "docid": "2d48fcbb243ea2e5a265db8a6c4d22d0", "score": "0.65910286", "text": "def __init__(self):\n pygame.init()\n self.screen = pygame.display.set_mode(App.size)\n self.running = True\n self.stepping = True\n\n self.rect = Rect((0, 0), App.size)\n self.draw_options = pymunk.pygame_util.DrawOptions(self.screen)\n self.dt = 1/50\n\n self.shortcuts = {\n K_a: 'Arrow(get_mouse_pos(self.screen), color=BLACK)',\n K_b: 'Rectangle(get_mouse_pos(self.screen), color=GREEN)',\n K_v: 'Rectangle(get_mouse_pos(self.screen), color=BLUE)',\n \n K_c: 'Circle(get_mouse_pos(self.screen), color=RED)',\n K_n: 'self.next_space()',\n \n K_q: 'self.running = False',\n K_ESCAPE: 'self.running = False',\n K_SPACE: 'self.stepping = not self.stepping',\n\n K_1: 'self.draw_options.flags ^= 1',\n K_2: 'self.draw_options.flags ^= 2',\n K_3: 'self.draw_options.flags ^= 4',\n\n K_p: 'self.capture()',\n K_s: 'App.current.space.step(self.dt)',\n K_z: 'App.current.remove_all()',\n K_g: 'App.current.space.gravity = 0, 0',\n }", "title": "" }, { "docid": "0f2fc1c850a2657be283b2f74731cec4", "score": "0.6585104", "text": "def render(self):\n self.screen.fill((0, 0, 0))", "title": "" }, { "docid": "6974fae2f9367fcba6e3b36d214d142a", "score": "0.6557537", "text": "def main_menu(self):\n self.screen.draw()", "title": "" }, { "docid": "f2a378636c00fa0dc40ed59f82708445", "score": "0.6546349", "text": "def draw():\n pass", "title": "" }, { "docid": "e34a84341cc136beb080ca67d4ca066f", "score": "0.6531087", "text": "def draw(self):\n if self._screen:\n self._screen.blit(self._background, (0, 0))", "title": "" }, { "docid": "31c0baada971a89459a01345c7ca0f0a", "score": "0.64453775", "text": "def draw(self):\n super().draw()\n self._screen.blit(self._title, self._title_pos)\n self._screen.blit(self._press_any_key, self._press_any_key_pos)\n self.draw_instructions()", "title": "" }, { "docid": "6ce58af07825059a533742b036c78057", "score": "0.6440217", "text": "def draw(self):\n self.screen.fill(c.BACKGROUND_COLOR)\n self.draw_lines()\n self.draw_blocks()\n pygame.display.update()", "title": "" }, { "docid": "aab025ba212b74433fa00ac1d62d1700", "score": "0.64248246", "text": "def draw(self):\n pass", "title": "" }, { "docid": "aab025ba212b74433fa00ac1d62d1700", "score": "0.64248246", "text": "def draw(self):\n pass", "title": "" }, { "docid": "aab025ba212b74433fa00ac1d62d1700", "score": "0.64248246", "text": "def draw(self):\n pass", "title": "" }, { "docid": "aab025ba212b74433fa00ac1d62d1700", "score": "0.64248246", "text": "def draw(self):\n pass", "title": "" }, { "docid": "aab025ba212b74433fa00ac1d62d1700", "score": "0.64248246", "text": "def draw(self):\n pass", "title": "" }, { "docid": "4636c66e249f5b0f2ee4e3983b08e9ec", "score": "0.63954973", "text": "def drawFrame(self):\n\n #call on the active screen to draw itself\n self.activeScreen.drawFrame()", "title": "" }, { "docid": "569f2ef5ea0a224078c0a13ff5363dc4", "score": "0.6390531", "text": "def draw(self):\r\n screen.fill(black)\r\n self.draw_text()\r\n pygame.display.flip()", "title": "" }, { "docid": "d96b464ded56692bfc9c1c1b72256cd7", "score": "0.6382346", "text": "def draw_interface(self):\n\n maxy, maxx = self.__scr.getmaxyx()\n\n self.__win_top = curses.newwin(maxy - self.__bottom_height,\n maxx,\n 0,\n 0)\n self.__win_top.keypad(True)\n\n self.__win_bottom = curses.newwin(self.__bottom_height,\n maxx,\n maxy - self.__bottom_height,\n 0)\n self.__win_bottom.keypad(True)\n\n self.__win_top.box()\n self.__win_top.refresh()\n self.__win_bottom.box()\n self.__win_bottom.refresh()", "title": "" }, { "docid": "d79cb84e71200aa0b65c904ebb9180ec", "score": "0.63765246", "text": "def draw_screen(self):\n if not self.screen_size:\n self.screen_size = self.screen.get_cols_rows()\n\n canvas = self._topmost_widget.render(self.screen_size, focus=True)\n self.screen.draw_screen(self.screen_size, canvas)", "title": "" }, { "docid": "33993cdb95e33020a085b11a7ec7254d", "score": "0.63749653", "text": "def run(self):\n while True:\n os.system('cls')\n self.draw()\n sleep(.1)\n self.update_all()", "title": "" }, { "docid": "8e3194e2ecc75d49d9d11554894702e3", "score": "0.63628936", "text": "def run(self):\n while 1:\n\n #we clear and prepare the draw_list for the canvas\n self.clear()\n events = pygame.event.get()\n for e in events:\n\n #Set quit state when window is closed\n if e.type == pygame.QUIT :\n self.QUIT = True\n if e.type == KEYDOWN:\n #Set quit state on Esc key press\n if e.key == K_ESCAPE:\n self.QUIT = True\n\n if self.QUIT:\n #Exit pygame gracefully\n pygame.quit()\n sys.exit(0)\n\n #call the mouse handler with current events\n self.mouse_handler(events)\n\n #Process any drawing that needs to be done\n self.draw()\n\n #flip the display\n pygame.display.flip()", "title": "" }, { "docid": "dda8875d9565c20dd0f6a7a29519b0f9", "score": "0.6353887", "text": "def event_loop(self):\n for event in pg.event.get():\n if event.type == pg.KEYDOWN:\n self.keys = pg.key.get_pressed()\n self.toggle_show_fps(event.key)\n if event.key == pg.K_PRINT:\n # Print screen for full render-sized screencaps.\n pg.image.save(self.render_surf, \"screenshot.png\")\n elif event.type == pg.KEYUP:\n self.keys = pg.key.get_pressed()\n elif event.type == pg.VIDEORESIZE:\n self.on_resize(event.size)\n pg.event.clear(pg.VIDEORESIZE)\n self.state_machine.get_event(event, self.scale)", "title": "" }, { "docid": "72e2802347daf766c8f090c46b628ab1", "score": "0.6353208", "text": "def entering_idle(self):\n if self.screen.started:\n self.draw_screen()", "title": "" }, { "docid": "547067896b1bdc76a4b3d1731128a63f", "score": "0.6351838", "text": "def draw(self, screen):\r\n self.round.draw(screen)", "title": "" }, { "docid": "3248b51c518d141ab442ac41542357ea", "score": "0.6339299", "text": "def refresh_screen(self):\n x, y = 0, 0\n\n while not self.comp.halted and not self.comp.input_from_keyboard:\n self.comp.run_until_output()\n\n if self.comp.output == 10: # \"10 starts a new line of output below the current one...\"\n x = 0\n y += 1\n else:\n self.screen[(x, y)] = chr(self.comp.output)\n x += 1\n self.find_screen_edges()\n self.find_robot()", "title": "" }, { "docid": "2656beca9cd8a154d917672ea0a3baa3", "score": "0.6330517", "text": "def update_screen(self):\n if self.previous_screennumber != self.screennumber:\n self.previous_screennumber = self.screennumber\n self.screen.blit(self.background, (0, 0))\n self.draw_text()\n self.update_buttons()\n for event in pygame.event.get():\n if event.type == pygame.QUIT:\n pygame.quit()\n sys.exit()\n pygame.display.update(self.update_rects)\n self.update_rects.clear()", "title": "" }, { "docid": "f2f3b80ec1c5c9a105d4cda9874e513e", "score": "0.6326743", "text": "def draw(self):\n self.window.clear()\n \n self.window.view = sf.View.from_center_and_size(self.view_size / 2.,\n self.view_size) \n if self.DEBUG:\n self.draw_FPS()\n\n # Draw the start screen.\n if self.state == 'start':\n text = sf.Text(\"Multitroids\", self.FONT, 200)\n text.scale = sf.Vector2f(0.5, 0.5)\n text.position = sf.Vector2f(self.view_size.x / 2.0 -\n text.rect.width / 2.0,\n self.view_size.y / 2.0 -\n text.rect.height - 30)\n self.window.draw(text)\n text = sf.Text(\"Arrow keys to fly\\nSpacebar to shoot and restart\"+\n \"\\n\\n Press spacebar to begin\",\n self.FONT, 60)\n text.scale = sf.Vector2f(0.5, 0.5)\n text.position = sf.Vector2f(self.view_size.x / 2.0 -\n text.rect.width / 2.0,\n self.view_size.y / 2.0 +\n text.rect.height / 2.0 - 30)\n self.window.draw(text)\n text = sf.Text(\"www.mattkeeter.com\",\n self.FONT, 50)\n text.scale = sf.Vector2f(0.5, 0.5)\n text.position = sf.Vector2f(15,\n self.view_size.y -\n text.rect.height * 1.5)\n self.window.draw(text)\n self.window.display()\n return\n # Draw the end screen.\n elif self.state == 'win':\n text = sf.Text(\"YOU WIN\", self.FONT, 160)\n text.scale = sf.Vector2f(0.5, 0.5)\n text.position = sf.Vector2f(self.view_size.x / 2.0 -\n text.rect.width / 2.0,\n self.view_size.y / 2.0 -\n text.rect.height)\n self.window.draw(text)\n text = sf.Text(\"Ships: %d\" % len(self.players), self.FONT, 60)\n text.scale = sf.Vector2f(0.5, 0.5)\n text.position = sf.Vector2f(self.view_size.x / 2.0 -\n text.rect.width / 2.0,\n self.view_size.y / 2.0 +\n text.rect.height / 2.0)\n self.window.draw(text)\n text = sf.Text(\"Press spacebar to restart\", self.FONT, 40)\n text.scale = sf.Vector2f(0.5, 0.5)\n text.position = sf.Vector2f(self.view_size.x / 2.0 -\n text.rect.width / 2.0,\n self.view_size.y / 2.0 +\n text.rect.height * 2.5)\n self.window.draw(text)\n self.window.display()\n return\n \n # Draw the rest of the game.\n for player in self.players: \n player.draw(self.window)\n [bullet.draw(self.window) for bullet in self.bullets]\n \n # Draw each asteroid at 8 positions, so that they wrap around cleanly.\n for offset in [sf.Vector2f(0, 0), sf.Vector2f(self.view_size.x, 0),\n sf.Vector2f(-self.view_size.x, 0),\n sf.Vector2f(0, self.view_size.y),\n sf.Vector2f(0, -self.view_size.y)]:\n [asteroid.draw(self.window, offset) for asteroid in self.asteroids] \n \n self.draw_HUD()\n self.window.display()", "title": "" }, { "docid": "44f86ba5f370c7a31e44a797c391c590", "score": "0.6312598", "text": "def draw_on_paper(self):\n # Create a drawing window\n self.paper[:] = 255\n cv2.namedWindow('Paper')\n cv2.imshow('Paper', self.paper)\n # Selecting draw mode\n while True:\n cv2.imshow('Paper', self.paper)\n self.key = cv2.waitKey(1)\n if self.key == 27 & 0xFF:\n break\n elif self.key == ord('1') & 0xFF:\n self.mode = 1\n elif self.key == ord('2') & 0xFF:\n self.mode = 2\n elif self.key == ord('3') & 0xFF:\n self.mode = 3\n elif self.key == ord('4') & 0xFF:\n self.color_bar()\n if self.fill == 0:\n self.draw_thickness[1] = self.draw_thickness[0]\n elif self.fill == 1:\n self.draw_thickness[1] = -1\n cv2.setMouseCallback('Paper', self.draw_mode)\n self.quit()", "title": "" }, { "docid": "7027f51fad6962208c4f0e9b0a731dd7", "score": "0.63084584", "text": "def _update_screen(self):\n self.WIN.fill(self.settings.bg_color)\n self.draw_map()\n self.player.drawme()\n self.player.rays()\n self.draw_view()\n\n pygame.display.flip()", "title": "" }, { "docid": "30baa7dd5f2fde2e3a9dc2bb884a9d23", "score": "0.62918526", "text": "def start(self) -> None:\n self._running = True\n while self._running:\n for event in py.event.get():\n self.event(event)\n \n self.draw()\n \n # cap at the given frame rate\n self._clock.tick(self._frame_rate)\n py.display.flip()\n\n print(\"Graphics has stopped.\")", "title": "" }, { "docid": "2d7ce8ba25efd43d9772cbc32f80eb90", "score": "0.62824243", "text": "def draw_screen(self, surf):\n # surf.fill(colors.black)\n if (self._render_rgb and self._obs.observation.HasField(\"render_data\") and\n self._obs.observation.render_data.HasField(\"map\")):\n self.draw_rendered_map(surf)\n else:\n self.draw_base_map(surf)\n self.draw_units(surf)\n self.draw_selection(surf)\n self.draw_build_target(surf)\n self.draw_overlay(surf)\n self.draw_commands(surf)\n self.draw_panel(surf)", "title": "" }, { "docid": "d540f92d5b633c975a04c66f66145a15", "score": "0.62716126", "text": "def draw(self):\n self._render()", "title": "" }, { "docid": "be473ae37043ea89cc5c9545242228ef", "score": "0.6268918", "text": "def draw(self) -> None:\n pass", "title": "" }, { "docid": "43a624124b79819bb281d9bad0c6f9b8", "score": "0.62554234", "text": "def start():\n while True:\n with canvas(device) as draw:\n # Handle full screen states\n if device_state.get_state() == STATE_SHUTTING_DOWN:\n display_text(draw)\n time.sleep(0.2)\n continue\n if device_state.get_state() == STATE_ACTIVE_ALERT:\n display_alert(draw)\n continue\n # Draw system status icons\n display_wifi(draw)\n display_time(draw)\n display_cellular(draw)\n\n if device_state.get_state() == STATE_MENU:\n display_menu(draw)\n else:\n display_text(draw)\n time.sleep(0.2)", "title": "" }, { "docid": "0004de9d0d283c390615318d1be6f341", "score": "0.6241859", "text": "def draw(self, screen: pygame.Surface) -> None:\n raise NotImplementedError", "title": "" }, { "docid": "417200ca9df36a5a5cb689e194604ddf", "score": "0.6239165", "text": "def draw(self) -> None:\n pg.display.flip()", "title": "" }, { "docid": "f485108e918e52fc295f19746848c078", "score": "0.6223229", "text": "def display_go_screen(self):\r\n self.screen.fill(Colors.WHITE)\r\n self.draw_text(\"The END\", 32, Colors.BLUE, WIDTH // 2, HEIGHT // 2)\r\n pygame.display.update()\r\n self.wait_for_key(end=True)", "title": "" }, { "docid": "410b4ee934dc364fe2da1bdb10b0af90", "score": "0.6218644", "text": "def draw(self):\n\n #################\n # Your code here\n #################\n pass", "title": "" }, { "docid": "6c0e46d4f4a5ba96dbf502916c67755d", "score": "0.6201081", "text": "def draw(self):\n self.screen.blit(self.img, (0, 0))", "title": "" }, { "docid": "4f04523b77c90f22a57e8d53b20fee77", "score": "0.61961883", "text": "def draw(self):\n\t\tself.screen.fill(pygame.Color('black')) #have a black background\n\t\t#draw the music bars to the screen\n\t\tfor bar in self.model.bars:\n\t\t\tpygame.draw.rect(self.screen, pygame.Color(bar.color),bar)\n\t\t#draw the character to the screen\n\t\tpygame.draw.rect(self.screen, pygame.Color('white'), self.model.character)\n\t\tpygame.display.update()", "title": "" }, { "docid": "b37ba13dbf6b8fbd67622cb6e13ee5a6", "score": "0.61925995", "text": "def render(self):\n dirty_rects = self.all_sprites.draw(self.screen)\n pg.display.update(dirty_rects)", "title": "" }, { "docid": "eccc97af325d0d5dee96eea7156117e1", "score": "0.61864895", "text": "def update(self):\n\t\tself.draw_bg()\n\t\tself.screen_image.blit(self.bg_image, (0, 0))\n\t\tpane_coords = SCREEN_DATA_MAP[self.screen_key][COORDS]\n\t\tself.screen_image.blit(self.draw_select_pane(), ( pane_coords[0], pane_coords[1]) )", "title": "" }, { "docid": "26c34a74a62724d7fe32e53096525812", "score": "0.6176666", "text": "def run(self):\r\n self._do_loop()\r\n self._screen.start_screen()", "title": "" }, { "docid": "5ad33ae725205e5bdf8f133782ff134d", "score": "0.6175062", "text": "def draw(self):\n self.screen.fill(self.button_color, self.rect)\n self.screen.blit(self.message_image, self.message_image_rect)", "title": "" }, { "docid": "542581a522cf3ea23124a21bbaa24bdc", "score": "0.6149665", "text": "def draw(self,screen) :\n\t\traise Exception('The draw(screen) method must be overridden in custom AetherModules')", "title": "" }, { "docid": "fc6b48a3eb2c03547d51b082fa18ef2f", "score": "0.6142622", "text": "def draw(self) -> None:\n\n tcod.console_set_default_foreground(self.console, tcod.white)\n\n self.map.draw(self.console, self.colors)\n\n tcod.console_blit(self.console, 0, 0, self.w, self.h, 0, 0, 0)", "title": "" }, { "docid": "a889fa2ff674d53eb7025dc7bb3100c0", "score": "0.61371577", "text": "def run(self):\n \n # Begin loop\n while self.running:\n self.handle_input()\n self.update()\n if self.running:\n self.draw()\n# if self.RECORDING:\n# self.image.copy_screen(self.window)\n# self.image.save_to_file(\"frames/\"+str(self.frameNo) + \".png\")\n# self.frameNo += 1\n self.shutdown()", "title": "" }, { "docid": "4b91658181b747d7641f81018ec48403", "score": "0.6122671", "text": "def draw(self):\n App.screen.blit(self.img, self.rect)", "title": "" }, { "docid": "4b91658181b747d7641f81018ec48403", "score": "0.6122671", "text": "def draw(self):\n App.screen.blit(self.img, self.rect)", "title": "" }, { "docid": "5411a60c25ea209bb74da45a01d5e2f2", "score": "0.61212724", "text": "def main ():\n global drawing_canvas;\n global drawing;\n\n # set up the window with appropriate dimensions \n window = Tk();\n window.wm_title( \"Character Drawer\" );\n window.resizable( width=False, height=False );\n window.geometry( '{}x{}'.format( WINDOW_WIDTH, WINDOW_HEIGHT ) );\n window.configure( background='white' );\n\n # --- create the drawing canvas with its grid ---\n\n # initialize the canvas with constant dimensions\n drawing_canvas = Canvas( window, width=DC_SIZE_PX + 1, \n height=DC_SIZE_PX + 1, background='white', highlightbackground='white', \n highlightcolor='white' );\n \n # load the drawing canvas\n drawing_canvas.pack( padx=PADDING, pady=PADDING );\n\n redraw_canvas();\n\n drawing_canvas.bind( \"<Motion>\", move_mouse )\n drawing_canvas.bind( \"<ButtonPress-1>\", press_mouse )\n drawing_canvas.bind( \"<ButtonRelease-1>\", release_mouse )\n\n # ---\n\n # --- add field to: enter text ---\n\n # frame to hold number entering stuff\n char_panel = Frame( window, bg='white');\n\n # create label\n char_drawn_lbl = Label( char_panel, text='Character Drawn:', bg='white' );\n char_drawn_lbl.pack( side=LEFT );\n\n # character entered, trace it to limit it to 1 character \n char_entered = StringVar()\n char_entered.trace( \"w\", lambda name, index, mode, sv=char_entered: \n char_callback( char_entered ) );\n\n # to store character entered; make sure that field only accepts \n # one character\n char_field = Entry( char_panel, textvariable=char_entered, width=1 );\n char_field.pack( side=LEFT );\n\n # display the char panel\n char_panel.pack();\n \n # ---\n\n # --- add buttons to: save image, clear canvas ---\n\n # frame to hold control panel widgets\n control_panel = Frame( window );\n\n # add save button\n save_btn = Button( control_panel, text='Save', command=save_button );\n save_btn.pack( side=LEFT );\n \n # add save button\n clear_btn = Button( control_panel, text='Clear', command=clear_button );\n clear_btn.pack( side=LEFT );\n\n # display the control panel\n control_panel.pack();\n\n # ---\n\n # --- set up the necessary folders ---\n\n # char folder does not already exist\n if not os.path.isdir( 'char_data' ):\n # make the char folder\n os.makedirs( 'char_data' );\n\n # ---\n\n # begin window loop\n window.mainloop();", "title": "" }, { "docid": "2c41c6814efc19ee3d5179676eef4fa4", "score": "0.61212593", "text": "def _mainloop(self):\n\n # ask for redraw by default\n self._redraw = True\n\n # initial state\n last_screen = None\n error_counter = 0\n\n # run until there is nothing else to display\n while self._screens:\n # process asynchronous events\n self.process_events()\n\n # if redraw is needed, separate the content on the screen from the\n # stuff we are about to display now\n if self._redraw:\n print(self._spacer)\n\n try:\n # draw the screen if redraw is needed or the screen changed\n # (unlikely to happen separately, but just be sure)\n if self._redraw or last_screen != self._screens[-1][0]:\n # we have fresh screen, reset error counter\n error_counter = 0\n if not self._do_redraw():\n # if no input processing is requested, go for another cycle\n continue\n\n last_screen = self._screens[-1][0]\n\n # get the screen's prompt\n try:\n prompt = last_screen.prompt(self._screens[-1][1])\n except ExitMainLoop:\n raise\n except Exception: # pylint: disable=broad-except\n send_exception(self.queue, sys.exc_info())\n continue\n\n # None means prompt handled the input by itself\n # ask for redraw and continue\n if prompt is None:\n self.redraw()\n continue\n\n # get the input from user\n c = self.raw_input(prompt)\n\n # process the input, if it wasn't processed (valid)\n # increment the error counter\n if not self.input(self._screens[-1][1], c):\n error_counter += 1\n else:\n # input was successfully processed, but no other screen was\n # scheduled, just redraw the screen to display current state\n self.redraw()\n\n # redraw the screen after 5 bad inputs\n if error_counter >= 5:\n self.redraw()\n\n # propagate higher to end all loops\n # not really needed here, but we might need\n # more processing in the future\n except ExitAllMainLoops:\n raise\n\n # end just this loop\n except ExitMainLoop:\n break", "title": "" }, { "docid": "f3314ed847fa508e77f9a1b2aac61244", "score": "0.61198556", "text": "def on_draw(self):\n from pyglet import gl\n gl.glClear(gl.GL_COLOR_BUFFER_BIT | gl.GL_DEPTH_BUFFER_BIT)\n # gl.glMatrixMode(gl.GL_MODELVIEW)\n # gl.glEnableClientState(gl.GL_VERTEX_ARRAY)\n\n if self._video_player:\n if self._video_player.source:\n self._draw_video()\n elif self._bg:\n self._draw_background()\n\n if window._handler.state == GAME_STATE.GAME_PAUSED:\n arcade.draw_rectangle_filled(window.width//2, window.height//5, 200, 50, arcade.color.BLACK)\n arcade.draw_text(\"Press 'P' to play\", window.width//2, window.height//5, arcade.color.WHITE, 16, align='center', anchor_x='center', anchor_y='center')\n if self._keyboard:\n self._draw_keyboard()\n\n self._draw_fx()\n\n self._draw_clock()\n self._draw_combo()\n self._draw_score()\n self._draw_total_accuracy()\n self._draw_overall_grade()\n self._draw_accuracy_bar()\n\n self._draw_game_time()\n\n self._draw_fps()\n\n self._draw_pointer()", "title": "" }, { "docid": "f75f42ebbb2c20f610bf09ddf2bd0b06", "score": "0.61182964", "text": "def _run_screen_event_loop(self):\n next_alarm = None\n\n while True:\n self.draw_screen()\n\n if not next_alarm and self.event_loop._alarms:\n next_alarm = heapq.heappop(self.event_loop._alarms)\n\n keys = None\n while not keys:\n if next_alarm:\n sec = max(0, next_alarm[0] - time.time())\n self.screen.set_input_timeouts(sec)\n else:\n self.screen.set_input_timeouts(None)\n keys, raw = self.screen.get_input(True)\n if not keys and next_alarm: \n sec = next_alarm[0] - time.time()\n if sec <= 0:\n break\n\n keys = self.input_filter(keys, raw)\n \n if keys:\n self.process_input(keys)\n \n while next_alarm:\n sec = next_alarm[0] - time.time()\n if sec > 0:\n break\n tm, callback, user_data = next_alarm\n callback(self, user_data)\n \n if self._alarms:\n next_alarm = heapq.heappop(self.event_loop._alarms)\n else:\n next_alarm = None\n \n if 'window resize' in keys:\n self.screen_size = None", "title": "" }, { "docid": "8c5ef3deb865e7f8e2192abcc8c2c735", "score": "0.61129075", "text": "def draw_gui(self):\n # TODO: get the gui stuff going.\n pass", "title": "" }, { "docid": "23c4a981d7230c87a6f1bbf174aab9a0", "score": "0.6110656", "text": "def display_menu(self):\n\n self.page = 0\n self.state = \"0\"\n self.cursor_rect.midtop = (self.firstx + self.offset, self.firsty)\n self.run_display = True\n while self.run_display:\n self.event_handler.handle_events()\n self._check_input()\n self.renderer.render_lab_selection_menu(self.lab_selection[self.page])\n self.renderer.draw_cursor(self.cursor_rect.x, self.cursor_rect.y)\n self.renderer.blit_screen()", "title": "" }, { "docid": "5b17430d52420e8457fcc9b04822ce6e", "score": "0.6106061", "text": "def draw(self):\n #log.debug(\"Drawing\")\n self.programs['debugText'].reset()\n self.ctx.clear(color=self.bgColor, depth=1000000)\n #self.programs['test'].run()\n self.programs['sfa'].run()\n #self.programs['debugText'].printf(\"Howdy thar\")\n self.programs['debugText'].run()\n\n #log.debug(\"Draw OK\")", "title": "" }, { "docid": "c8ae2c0bc033db955782525210fb825e", "score": "0.6104869", "text": "def draw(self):\r\n screen.fill(black)\r\n screen.blit(self.image, self.rect)\r\n self.draw_text()\r\n pygame.display.flip()", "title": "" }, { "docid": "48acbf9e10b9d5510f54e5bee7b5a2b5", "score": "0.6103552", "text": "def main(width=480, height=480):\r\n global win\r\n win = Win(title='CANVAS', grow=False)\r\n win.canvas = Canvas(win, width=width, height=height)\r\n win.images = [Image(file=\"smiley%s.png\" % name) for name in '123456']\r\n win.width, win.height = width, height\r\n # ----------------------------------------------------------------------------\r\n draw_rect(); wait(); draw_oval(); wait(); draw_line(); wait()\r\n draw_curve(); wait(); draw_text(); wait(); draw_image(); win.loop()", "title": "" }, { "docid": "1b4646de77017eff2fe1ed4b9d959a3f", "score": "0.6098264", "text": "def draw(self, screen):\n text = \"Welcome to the Game! \\n \\nTo start the game please press the following buttons \\n \\n-To play an Easy Mode Press 1 \\n \\n-To Play a Normal Mode Press 2 \\n \\n-To Play a Hard Mode Press 3\"\n #textsurface = self.font.render(\"This is Start Screen \\nPress the Space Button to start\", False, (0, 0, 0))\n screen.fill((75,166,193))\n utils.blit_text(screen, text, (99,150), self.font)\n return", "title": "" }, { "docid": "babb73b8384d29871580099a4678fc60", "score": "0.6096884", "text": "def draw(self, screen):\n text = \"How to Play the Game!? \\n \\n-To move up - Press the Up or W Button \\n \\n-To move down - Press the Down or S Button \\n \\n-To move left - Press the Left or A Button \\n \\n-To move right - Press the Right or D Button \\n \\nReady to Start? \\nLet's Go! Press the Space Button!\"\n screen.fill((75,166,193))\n utils.blit_text(screen, text, (30,30), self.font)", "title": "" }, { "docid": "3c3b55ef55c914ff09b2a34594b9b000", "score": "0.60880387", "text": "def draw(self):\n self.screen.blit(self.image, self.rect)", "title": "" }, { "docid": "23b639144ee15572a32cb8cc75615f7c", "score": "0.6088035", "text": "def draw(self):\n self.win.fill(COLORS[0])\n\n self.top_bar.draw(self.win)\n self.leaderboard.draw(self.win)\n self.board.draw(self.win)\n self.chat.draw(self.win)\n if self.is_drawing:\n self.bottom_bar.draw(self.win)\n else:\n self.person_is_drawing.draw(self.win, self.drawer)\n\n pygame.display.update()", "title": "" }, { "docid": "4777de5e3416a611bb4bb808032bf35e", "score": "0.60839015", "text": "def _mouse_handler(self, app, mouse_event):\n process = self.process\n x = mouse_event.position.x\n y = mouse_event.position.y\n\n # The containing Window translates coordinates to the absolute position\n # of the whole screen, but in this case, we need the relative\n # coordinates of the visible area.\n y -= self.process.screen.line_offset\n\n if not self.has_focus(app):\n # Focus this process when the mouse has been clicked.\n if mouse_event.event_type == MouseEventType.MOUSE_UP:\n # XXX: something like ............................. app.layout.focus(self)\n self.set_focus_cb(app)\n else:\n # Already focussed, send event to application when it requested\n # mouse support.\n if process.screen.sgr_mouse_support_enabled:\n # Xterm SGR mode.\n ev, m = {\n MouseEventType.MOUSE_DOWN: ('0', 'M'),\n MouseEventType.MOUSE_UP: ('0', 'm'),\n MouseEventType.SCROLL_UP: ('64', 'M'),\n MouseEventType.SCROLL_DOWN: ('65', 'M'),\n }.get(mouse_event.event_type)\n\n self.process.write_input(\n '\\x1b[<%s;%s;%s%s' % (ev, x + 1, y + 1, m))\n\n elif process.screen.urxvt_mouse_support_enabled:\n # Urxvt mode.\n ev = {\n MouseEventType.MOUSE_DOWN: 32,\n MouseEventType.MOUSE_UP: 35,\n MouseEventType.SCROLL_UP: 96,\n MouseEventType.SCROLL_DOWN: 97,\n }.get(mouse_event.event_type)\n\n self.process.write_input(\n '\\x1b[%s;%s;%sM' % (ev, x + 1, y + 1))\n\n elif process.screen.mouse_support_enabled:\n # Fall back to old mode.\n if x < 96 and y < 96:\n ev = {\n MouseEventType.MOUSE_DOWN: 32,\n MouseEventType.MOUSE_UP: 35,\n MouseEventType.SCROLL_UP: 96,\n MouseEventType.SCROLL_DOWN: 97,\n }.get(mouse_event.event_type)\n\n self.process.write_input('\\x1b[M%s%s%s' % (\n six.unichr(ev),\n six.unichr(x + 33),\n six.unichr(y + 33)))", "title": "" }, { "docid": "378e919b963150a048c90808986e0c11", "score": "0.6082128", "text": "def draw_internal(self):\n global_vars.screen.blit(self.image, self.position)", "title": "" }, { "docid": "3e32d163beb772c292cab22ab6d5f7a1", "score": "0.60817826", "text": "def run(self) -> None:\n # control the draw update speed\n self._dt_seconds = self._clock.tick(120) / 1000\n\n # check events\n self.check_events()\n\n self.update()\n self.draw()\n\n # update the screen with what we've drawn\n self.show_fps()\n pygame.display.flip()", "title": "" }, { "docid": "dde0fd695453a9058dedb5a7dd5e3d93", "score": "0.6075272", "text": "def draw(self):\n pygame.draw.rect(self.screen, self.color, self.rect)", "title": "" }, { "docid": "ca0e5fb5d8ef62c7581374e72d64060b", "score": "0.60726535", "text": "def handle(self):\n turtle.tracer(False)\n self.__init_hands()\n self.__clock_outline(160)\n turtle.tracer(True)\n self.__dynamic_display()\n turtle.mainloop()", "title": "" }, { "docid": "c0ccb559d1ce7173d4fe8bda2bd73892", "score": "0.6067104", "text": "def draw(self):\n rect = self.rect\n self.window.bg.blit(self.image, rect)\n self.window.screen.blit(self.image, rect)\n pygame.display.update(rect)", "title": "" }, { "docid": "fab95b1863f50f600479757a62a0c487", "score": "0.60633945", "text": "def update(self):\r\n\t\tself.onEraseBackground()\r\n\t\tself.onDraw()\r\n\t\tpygame.display.update()", "title": "" }, { "docid": "35d9e514e71c4e827785fa2c4fcf0387", "score": "0.6061617", "text": "def draw(self):\n raise NotImplementedError", "title": "" }, { "docid": "9261dd4a1592a2613b8ef587340f7b01", "score": "0.6050963", "text": "def _redraw_screen(self):\r\n self._std_scr.erase()\r\n self._redraw_main_win()\r\n\r\n if self._header.version <= 3:\r\n self._draw_status_line()", "title": "" }, { "docid": "b9a4b9edb08592ce2f3ba834790b64dc", "score": "0.6046588", "text": "def run(self) -> None:\n\n while not tcod.console_is_window_closed():\n self.update()\n self.draw()\n self.flush()", "title": "" }, { "docid": "1715a2b503e772978e743490253b18c3", "score": "0.60441285", "text": "def help_screen():\n pygame.draw.rect(screen, WHITE, (150, 20, 830, 560))\n help_text = [\"Welcome to Paint!\", \"Color: pick one of 117 colors.\", \"Pen: free draw across the canvas.\",\n \"Fill: fill an enclosed area on the canvas.\", \"Eraser: turn selected areas on canvas back into white.\",\n \"Square: draw a blank rectangle on the canvas.\", \"Oval: draw a blank ellipse on the canvas.\",\n \"Clear: reset canvas.\", \"Copy: copy a rectangular area from canvas.\",\n \"Cut: copy a rectangular area from canvas and whiten it\", \"Paste: paste back copied area.\",\n \"Undo: undo last action (pen/eraser/fill/square/Oval/cut/paste/clear/load).\",\n \"Save: save current canvas.\",\n \"Load: load last saved canvas. Note: loading without saving loads blank canvas.\",\n \"Screenshot: save current canvas to your computer.\"]\n\n help_lines = []\n count = 0\n for text in help_text: # Position text lines on the screen\n help_lines.append(font.render(text, 0, BLACK))\n\n screen.blit(help_lines[count], (160, 30 + (30 * count)))\n count += 1\n pygame.display.flip()\n\n while True:\n cursor = pygame.mouse.get_pos()\n for event in pygame.event.get():\n if event.type == QUIT:\n return False\n if event.type == pygame.MOUSEBUTTONDOWN and pygame.mouse.get_pressed()[0]:\n if pygame.Rect(900, 2, 80, 18).collidepoint(cursor):\n return True", "title": "" }, { "docid": "445d0bc039e7985ed13bae5e555fb524", "score": "0.604023", "text": "def default_screen():\r\n fixation_cross.draw()\r\n Box().default_draw()", "title": "" }, { "docid": "a913f0f56cf0c8f65c01324d856e0bbe", "score": "0.6037551", "text": "def draw(self, screen, screen_world_rect):\n pass", "title": "" }, { "docid": "0389a1bd86fad33270cb68c817edca18", "score": "0.6033316", "text": "def draw(self):\n # Clear the surface\n if self.background_image:\n self.screen.blit(self.background_image, (0, 0))\n else:\n self.screen.fill(self.background_color)\n # Draw all the widgets\n for widget in self.widgets:\n # if widget.dirty:\n widget.draw(self.screen)", "title": "" }, { "docid": "485b1697b14a622dbf0b64c8fc7f3dc0", "score": "0.60332483", "text": "def main(self):\n self.ui = urwid.raw_display.Screen()\n self.ui.register_palette(self.palette)\n self.build_interface()\n self.ui.run_wrapper(self.run)", "title": "" }, { "docid": "d7401748cbf69b8419b61df2f3e0c194", "score": "0.6033065", "text": "def run(self):\n while App.running:\n for event in pygame.event.get():\n if event.type == QUIT:\n App.running = False\n\n App.screen.fill(Color('gray'))\n App.t.draw()\n pygame.display.update()\n\n pygame.quit()", "title": "" }, { "docid": "1b773ae2209bf3e210bcd89174bd239b", "score": "0.60294276", "text": "def _update_screen(self):\n self.screen.fill(self.settings.bg_color)\n\n pg.display.flip()", "title": "" }, { "docid": "2ae1fa31aa54b6cbdbbcc0dd46b9e215", "score": "0.60270387", "text": "def draw_current_screen(self):\n # TODO: make method able to draw any screen\n for y in range(self.view.height):\n for x in range(self.view.width):\n self.terminal.print(x, y, self.view.current_view[y][x])\n self.terminal.print(52, 0, \"X: {}\".format(self.player.coordinates.x))\n self.terminal.print(52, 1, \"Y: {}\".format(self.player.coordinates.y))\n self.terminal.refresh()\n self.set_state(self.States.TURN)", "title": "" }, { "docid": "69d3ee80a05207580b5b0f192008a737", "score": "0.60269827", "text": "def draw(self,window):\n self.rectangle.draw(window)\n self.drawn=True", "title": "" }, { "docid": "3898cbd50d29d4dfd24d8d8c8311ecc6", "score": "0.60194874", "text": "def handle_input(self)->None:\n clock = pg.time.Clock()\n waiting = True\n running = True\n\n while waiting:\n clock.tick(FPS/2)\n self.draw()\n self.x, self.y = pg.mouse.get_pos()\n\n for event in pg.event.get():\n self.mouseclick = pg.mouse.get_pressed()[0]\n\n if event.type == pg.QUIT:\n waiting = running = False\n if event.type == pg.MOUSEBUTTONUP and self.back_btn_hover:\n waiting = False\n if event.type == pg.MOUSEBUTTONUP and self.intro_hovered:\n self.intro_played = not self.intro_played\n self._pixels_to_volume()\n self._save_intro_state()\n pg.display.update()\n pg.mixer.music.set_volume(get_volume())\n return running", "title": "" }, { "docid": "64997f2230da35cc0cde4e771c8dcfbd", "score": "0.60105413", "text": "def _draw(self):\n if not self._deferDraw:\n try:\n self._toplevel.update()\n except Tkinter.TclError:\n sys.exit()", "title": "" }, { "docid": "b24bab66bd6a8a58602a1e123c47f202", "score": "0.6004367", "text": "def _update_screen(self):\n self.screen.fill(self.settings.bg_color)\n \n self._find_box()\n if self.settings.current_x_coord < self.settings.screen_width and self.settings.current_y_coord < self.settings.screen_height:\n self._highlight_current_box()\n self._draw_grid()\n self._save_num()\n self._fill_num_grid()\n self._check_collision()\n self._check_completion()\n self._timer()\n print(self.settings.hasError)\n \n\n #Make the most recently drawn screen visible\n pygame.display.flip()", "title": "" }, { "docid": "07d0bb554fba0303951c312acc6225ae", "score": "0.59948164", "text": "def update(self, **kwargs):\n self.esc_key_state = kwargs.get('keyState', False)\n self.__draw_border__()\n self.__draw_buttons__()\n self.__check_mouse__()", "title": "" }, { "docid": "47b8ad3c8c61d87c1782396ec202df56", "score": "0.5990555", "text": "def refresh_screen(self):\n window = self.get_current_window()\n window.fill(WHITE)\n self.fill_grid()\n self.draw_grid_lines(window)\n return None", "title": "" }, { "docid": "4f424ef71bfb42cf48ed8bcb9bee9d11", "score": "0.5984236", "text": "def run(self):\n while App.running:\n for event in pygame.event.get():\n if event.type == QUIT:\n App.running = False\n\n #App.screen.fill(Color('green'))\n #App.t.draw()\n pygame.display.update()\n pygame.quit()", "title": "" }, { "docid": "d551b762e057f65fb600991466912719", "score": "0.59786355", "text": "def embody(self):\n self.buttonify()\n self.draw()", "title": "" }, { "docid": "d551b762e057f65fb600991466912719", "score": "0.59786355", "text": "def embody(self):\n self.buttonify()\n self.draw()", "title": "" }, { "docid": "80ee73fbb7c240d8d38f2761dcee017e", "score": "0.59654003", "text": "def write_to_screen(self, app, screen, mouse_handlers, write_position):\n # Set size of the screen.\n self.process.set_size(write_position.width, write_position.height)\n\n vertical_scroll = self.process.screen.line_offset\n\n # Render UserControl.\n temp_screen = self.process.screen.pt_screen\n\n # Write body to screen.\n self._copy_body(app, temp_screen, screen, write_position, vertical_scroll,\n write_position.width)\n\n # Set mouse handlers.\n def mouse_handler(app, mouse_event):\n \"\"\" Wrapper around the mouse_handler of the `UIControl` that turns\n absolute coordinates into relative coordinates. \"\"\"\n position = mouse_event.position\n\n # Call the mouse handler of the UIControl first.\n self._mouse_handler(\n app, MouseEvent(\n position=Point(x=position.x - write_position.xpos,\n y=position.y - write_position.ypos + vertical_scroll),\n event_type=mouse_event.event_type))\n\n mouse_handlers.set_mouse_handler_for_range(\n x_min=write_position.xpos,\n x_max=write_position.xpos + write_position.width,\n y_min=write_position.ypos,\n y_max=write_position.ypos + write_position.height,\n handler=mouse_handler)\n\n # If reverse video is enabled for the whole screen.\n if self.process.screen.has_reverse_video:\n data_buffer = screen.data_buffer\n\n for y in range(write_position.ypos, write_position.ypos + write_position.height):\n row = data_buffer[y]\n\n for x in range(write_position.xpos, write_position.xpos + write_position.width):\n char = row[x]\n token = list(char.token or DEFAULT_TOKEN)\n\n # The token looks like ('C', *attrs). Replace the value of the reverse flag.\n if token and token[0] == 'C':\n token[-1] = not token[-1] # Invert reverse value.\n row[x] = Char(char.char, tuple(token))", "title": "" }, { "docid": "58cd842aee4c6f42914429aa2f63d43b", "score": "0.5962901", "text": "def curses_interface(self):\n screen = curses.initscr() #set new terminal settings\n curses.noecho()\n curses.cbreak()\n curses.curs_set(0)\n screen.nodelay(1)\n screen.keypad(1)\n try:\n h, w = screen.getmaxyx() # Get screen size and do math to determine subwindow dimensions\n maxg_w = w - 10\n maxg_h = h - 10 # Max size of the game window\n mid_w = int(w/2)\n mid_h = int(h/2)\n banner_window = curses.newwin(10, w, 0, 0)\n b_h, b_w = banner_window.getmaxyx()\n if self.args.dimensions:\n p_w, p_h = self.args.dimensions\n g_w = clamp(p_w + 2, 0, maxg_w) # Make a game window the same size as the array, or the max size\n g_h = clamp(p_h + 2, 0, maxg_h) # Add 2 because of the box\n top_l = (mid_w-int(g_w/2), mid_h-int(g_h/2))\n game_window = curses.newwin(g_h, g_w, top_l[0], top_l[1])\n else:\n top_l = (mid_w-int(maxg_w/2), mid_h-int(maxg_h/2)) # If no dimensions are supplied, default to max size\n game_window = curses.newwin(maxg_h, maxg_w, top_l[0], top_l[1])\n p_h, p_w = game_window.getmaxyx() # Default plane dim to the size of the game window\n p_w -= 2 # To account for the box\n p_h -= 2\n if self.args.load:\n plane = self.load(self.args.load) # We can specify a save file to load at start\n else:\n plane = np.random.randint(0, 2, (p_h, p_w), dtype=bool) # Initializing with random noise\n game_running = False\n ms = 100 # Time to wait between updates\n exec_time = None\n recording = False\n # Main loop\n while True:\n events = self.get_events(screen, ms) #get event queue\n if ord(\"q\") in events:\n if len(self.images) > 0: self.output_gif()\n break\n if ord(\"r\") in events: game_running = True\n if ord(\"p\") in events: game_running = False\n if ord(\"g\") in events: ms = clamp(ms-10, 30, 5000)\n if ord(\"h\") in events: ms = clamp(ms+10, 30, 5000)\n if ord(\"n\") in events: plane = np.random.randint(0, 2, (p_h, p_w), dtype=bool)\n if ord(\"v\") in events: recording = True\n if game_running:\n exec_time, plane = self.game_update(plane)\n if (recording and game_running):\n self.save_frame(plane)\n # Draw the main menu banner\n banner_window.addstr(0, 0, \"Conway's Game of Life, Presented by Jacob Smith\")\n banner_window.addstr(0, int(w/2), \"Main Menu\")\n if (recording and game_running):\n banner_window.addstr(1, int(w/2), \"Recording, Frames: \" + str(len(self.images)))\n banner_window.addstr(2, 0, \"Game Dimensions: \" + str(p_h) + ',' + str(p_w))\n if exec_time: banner_window.addstr(1, 0, \"Update function execution time: \" + str(exec_time))\n banner_window.addstr(3, 0, \"Press e to enter edit mode, press r to start the game, p to pause, q to quit.\")\n banner_window.refresh()\n\n self.draw_game(game_window, plane)\n\n screen.nodelay(0) #reset terminal settings\n screen.keypad(0)\n curses.echo()\n curses.nocbreak()\n curses.curs_set(1)\n curses.endwin()\n\n except Exception as e:\n screen.nodelay(0) #reset terminal settings\n screen.keypad(0)\n curses.echo()\n curses.nocbreak()\n curses.curs_set(1)\n curses.endwin()\n print(e)\n print(traceback.format_exc())", "title": "" }, { "docid": "ecf0ee3118d3327b75e2ffa4754cd68d", "score": "0.59617203", "text": "def run(self):\n while self.running:\n for event in pygame.event.get():\n if event.type == QUIT:\n self.running = False\n \n elif event.type == KEYDOWN:\n self.do_shortcut(event)\n\n App.current.do_event(event)\n\n for s in App.current.space.shapes:\n if s.body.position.y < -100:\n App.current.space.remove(s)\n\n self.draw()\n\n if self.stepping:\n App.current.space.step(self.dt)\n\n pygame.quit()", "title": "" }, { "docid": "c3c62a8385cbd86ae5bba814116c4d76", "score": "0.5957003", "text": "def on_draw():\n \n global MOUSE_LEFT_CLICKED, focus\n \n # Clearn screen.\n mainWindow.clear()\n \n # Draw Widgets.\n num = 0\n for widget in widgets_list:\n \n if widget.collision(MOUSE_X, MOUSE_Y):\n \n if widget.type == 'Button':\n \n widget.opacity = 155\n cursor = mainWindow.get_system_mouse_cursor(mainWindow.CURSOR_HAND)\n mainWindow.set_mouse_cursor(cursor) \n \n if MOUSE_LEFT_CLICKED == True:\n \n eval(widget.click_action)\n MOUSE_LEFT_CLICKED = False\n \n break\n \n elif widget.type == 'Input':\n \n cursor = mainWindow.get_system_mouse_cursor(mainWindow.CURSOR_TEXT)\n mainWindow.set_mouse_cursor(cursor) \n \n if MOUSE_LEFT_CLICKED == True:\n \n if focus != widget:\n \n focus.caret.visible = False\n focus = widget\n focus.caret.visible = True\n \n else:\n \n widget.caret.on_mouse_press(MOUSE_X, MOUSE_Y, mouse.LEFT, None)\n \n MOUSE_LEFT_CLICKED = False\n \n break\n \n else:\n \n widget.opacity = 255\n \n num += 1\n \n if num == len(widgets_list)-1:\n \n cursor = mainWindow.get_system_mouse_cursor(mainWindow.CURSOR_DEFAULT)\n mainWindow.set_mouse_cursor(cursor) \n \n # Draw batch.\n batch.draw()", "title": "" }, { "docid": "349c9c7fd372605566621e61074daaff", "score": "0.59559655", "text": "def main():\n world = World()\n painter = Painter()\n player = world.player\n\n\n while not libtcod.console_is_window_closed():\n world.update()\n painter.paint(world)\n player.interact(world)\n if player.exit:\n break\n painter.new_canvas()", "title": "" }, { "docid": "9710cafa9e7ff1516b72d858a91c402c", "score": "0.59544224", "text": "def __init_body_win(self):\n self.body_win.timeout(100)\n self.body_win.keypad(1)\n y, x = self.body_win.getmaxyx()\n self.body_win.noutrefresh()", "title": "" }, { "docid": "25887a7ac4c68b928c7db94496f0213b", "score": "0.5933972", "text": "def drawSurface(self):", "title": "" }, { "docid": "3770b802afb79c2abd04c9a405aafe7c", "score": "0.5933554", "text": "def draw(self,screen):\r\n # First fill the screen with the background color\r\n pygame.draw.rect(screen,self.background_color,self.rect)\r\n # Draw the edges of the button\r\n pygame.draw.rect(screen,BLACK,self.rect,3)\r\n # Get the width and height of the text surface\r\n width = self.text.get_width()\r\n height = self.text.get_height()\r\n # Calculate the posX and posY\r\n posX = self.rect.centerx - (width / 2)\r\n posY = self.rect.centery - (height / 2)\r\n # Draw the image into the screen\r\n screen.blit(self.text,(posX,posY))", "title": "" }, { "docid": "fec3a119ffc120bc8537340b97d23399", "score": "0.5926865", "text": "def draw(self):\n # Clear the screen and reset the OpenGL state\n glClearColor(0, 0, 0, 1)\n glClear(GL_COLOR_BUFFER_BIT | GL_DEPTH_BUFFER_BIT)\n\n glEnable(GL_TEXTURE_2D)\n\n glMatrixMode(GL_MODELVIEW)\n glLoadIdentity()\n\n # Set the level position on the screen\n glPushMatrix()\n glTranslate(-8.5, -2, 0)\n glRotate(15, 1, 0, 0)\n glRotate(20, 0, 1, 0)\n\n # Draw the level\n self._level.draw()\n\n # Draw the characters\n self._player.draw()\n self._robot.draw()\n\n glPopMatrix()\n\n # Draw the interface\n self._linesLabel.draw()\n if not self._debugView:\n self._editor.draw()\n else:\n self._debug.text = (self.dumpProcessor(self._robot.processor, self._debugAddr)\n + \"\\nPress the left and right arrow keys to\\nchange memory pages.\")\n self._debug.draw()\n self._keysLabel.draw()\n self._infoLabel.draw()\n self._varLabel.draw()\n self._statusLabel.draw()", "title": "" }, { "docid": "05bfac4f0cdd291f328a605007a1815d", "score": "0.59228843", "text": "def render(self, screen: pygame.Surface):\r\n pass", "title": "" } ]
a712f0cb9d8eddbf41248121075a19fa
Attach a lot of summaries to a Tensor (for TensorBoard visualization).
[ { "docid": "f55821b85f4b9014bb03f97a13767b5a", "score": "0.5550347", "text": "def variable_summaries(var):\n with tf.name_scope('summaries'):\n mean = tf.reduce_mean(var)\n tf.summary.scalar('mean', mean)\n with tf.name_scope('stddev'):\n stddev = tf.sqrt(tf.reduce_mean(tf.square(var - mean)))\n tf.summary.scalar('stddev', stddev)\n tf.summary.scalar('max', tf.reduce_max(var))\n tf.summary.scalar('min', tf.reduce_min(var))\n tf.summary.histogram('histogram', var)", "title": "" } ]
[ { "docid": "384e9aded997888d340ff722a51377a8", "score": "0.77373797", "text": "def add_summaries(self):\n # defines a namespace for the summaries\n with tf.name_scope('summaries'):\n # adds a plot for the loss\n tf.summary.scalar('loss', self.loss)\n #tf.summary.scalar('accuracy', self.accuracy)\n tf.summary.scalar('accuracy', self.acc_batch)\n # groups summaries\n self.summary = tf.summary.merge_all()", "title": "" }, { "docid": "d5837eb8691be4b4228a97d570d6f2ab", "score": "0.74456966", "text": "def add_to_tensorboard(self):\n self.merged = tf.summary.merge_all()\n self.f_writer = tf.summary.FileWriter(self.config['model_dir'], self.sess.graph)", "title": "" }, { "docid": "78a485235f662153f72af9271965b479", "score": "0.70527154", "text": "def _setup_summaries(self):\n with tf.name_scope('summaries'):\n tf.summary.scalar(\"loss\", self._loss)\n \n tf.summary.scalar('weight_mean', tf.reduce_mean(self._w))\n tf.summary.scalar('max_weight', tf.reduce_max(self._w))\n tf.summary.scalar('min_Weight', tf.reduce_min(self._w))\n tf.summary.histogram('weights_histogram', self._w)", "title": "" }, { "docid": "0951cceb95cdf28d80d8ef0f30427a44", "score": "0.6754626", "text": "def __create_tf_summaries__(self):\n pass", "title": "" }, { "docid": "8b38ae9a6d5aa615c86d220d66cfa95a", "score": "0.6746156", "text": "def build_summaries(self):\n with tf.name_scope('summaries'):\n tf.summary.scalar('loss', self.loss)\n tf.summary.histogram('weights_histogram', self.embed)\n\n self.merged_summaries = tf.summary.merge_all()\n self.writer = tf.summary.FileWriter(self.summaries_dir,\n self.session.graph)", "title": "" }, { "docid": "98d023254f3238cc83cd196737c78bed", "score": "0.6583265", "text": "def _create_summaries(self, loss, summary_dict={}, summary_list=[]):\n\n # TODO: Custom histogram with per-class performance\n # See: https://stackoverflow.com/questions/42012906/create-a-custom-tensorflow-histogram-summary\n \n ### Add summaries\n # with tf.name_scope(\"summaries\"):\n # tf.summary.scalar('model_loss', loss) # placeholder summary\n \n for name, tf_placeholder in summary_dict.items():\n # Inspired by:\n # https://stackoverflow.com/a/41031284\n tf.summary.scalar(name, tf_placeholder)\n\n for tf_placeholder in summary_list:\n tf.summary.scalar(tf_placeholder.name, tf_placeholder)\n\n return", "title": "" }, { "docid": "1d592d4193a36f8a57895311f6c3b5c5", "score": "0.6568918", "text": "def add_metrics_summary(metrics):\n\n for key in metrics.keys():\n tf.summary.scalar(key, metrics[key][1])", "title": "" }, { "docid": "d2d47b80852115f8674bcc0f08eb36dc", "score": "0.6532411", "text": "def add_summary_op(self):\n return tf.summary.merge_all()", "title": "" }, { "docid": "9361127b8624bccf7adb1b70b173154d", "score": "0.65304357", "text": "def add_summary(self):\n self.merged = tf.summary.merge_all()\n self.file_writer = tf.summary.FileWriter(self.config.dir_output,\n self.sess.graph)", "title": "" }, { "docid": "f808bdf8f024260693a0dc4173c7be97", "score": "0.64709187", "text": "def add_summary(self):\n self.merged = tf.summary.merge_all()\n self.file_writer = tf.summary.FileWriter(self.args.dir_output,\n self.sess.graph)", "title": "" }, { "docid": "a64d3b4bfa11ba608cbb4f9bb38c324b", "score": "0.64518285", "text": "def log_summary(self, epoch, metrics):\n for name, value in metrics.items():\n self.writer.add_scalar(name, value, epoch)", "title": "" }, { "docid": "ae8e7574c73c013e2d0da6f47a376ec5", "score": "0.64485514", "text": "def _create_summary(self):\n\n with tf.name_scope('summary'):\n\n tf.summary.scalar(\"cost\", self.cost)\n tf.summary.histogram(\"histrogram_cost\", self.cost)\n # etc.\n\n # because you have several summaries, we should merge them all\n # into one op to make it easier to manage\n self.summary = tf.summary.merge_all()", "title": "" }, { "docid": "2a373652e58c8fb6db6db3ffa7fefb63", "score": "0.6369975", "text": "def tensorboard_summaries(self, logits, features, labels):\n if not tf.contrib.eager.in_eager_mode():\n weighted_auroc = tf.multiply(*average_auroc(logits, labels, features))\n weighted_auprc = tf.multiply(*average_auprc(logits, labels, features))\n tf.summary.scalar(\"metrics/average_auroc\", weighted_auroc)\n tf.summary.scalar(\"metrics/average_auprc\", weighted_auprc)\n # Logging AUC metrics for individual labels.\n # TODO(alexyku): can we display multple curves on the same plot?\n # tf.summary.scalars(\"metrics/set_auroc\", set_auroc(logits, targets))\n # tf.summary.scalars(\"metrics/set_auprc\", set_auprc(logits, targets))", "title": "" }, { "docid": "9f77d2327327edd37acb51eae0234ac7", "score": "0.6350556", "text": "def setup_summary(self):\r\n episode_total_reward = tf.Variable(0.)\r\n episode_avg_max_q = tf.Variable(0.)\r\n episode_duration = tf.Variable(0.)\r\n\r\n tf.summary.scalar('Total Reward/Episode', episode_total_reward)\r\n tf.summary.scalar('Average Max Prob/Episode', episode_avg_max_q)\r\n tf.summary.scalar('Duration/Episode', episode_duration)\r\n\r\n summary_vars = [episode_total_reward,episode_avg_max_q,episode_duration]\r\n\r\n summary_placeholders = [tf.placeholder(tf.float32) for _ in range(len(summary_vars))]\r\n update_ops = [summary_vars[i].assign(summary_placeholders[i]) for i in range(len(summary_vars))]\r\n summary_op = tf.summary.merge_all()\r\n return summary_placeholders, update_ops, summary_op", "title": "" }, { "docid": "1227761d30a1effb9550daca5b6692a3", "score": "0.6344058", "text": "def add_summaries_eval(self):\n with tf.name_scope(\"Evaluation\"):\n with self.graph.as_default():\n tf.summary.scalar(\"L1_diff\", self._l1_diff)\n tf.summary.scalar(\"L2_squared_diff\", self._l2sh_diff)\n tf.summary.scalar(\"Proportion_wrong\", self._prop_wrong)\n tf.summary.scalar(\"Proportion_where_any_is_wrong\",\n self._prop_any_wrong)\n tf.summary.scalar(\"Kendall's_tau\",\n self._kendall_tau)", "title": "" }, { "docid": "7759e8b5506ddb93b17b60a4bfaa687e", "score": "0.6343777", "text": "def _setup_summaries(sess, writer, image_input, labels, keep_prob, cross_entropy_loss, prediction_op, iou_mean,\n acc_mean, summary_images, summary_labels, step, classes_num):\n tf.summary.scalar('loss', cross_entropy_loss)\n tf.summary.scalar('iou', iou_mean)\n tf.summary.scalar('acc', acc_mean)\n\n # Merge running summaries\n summary_op = tf.summary.merge_all()\n\n max_imgs = len(summary_images)\n\n # Setup the prediction image summary op\n image_summary_op = tf.summary.image(\n 'image_prediction',\n tf.expand_dims(tf.div(tf.cast(prediction_op, dtype=tf.float32), classes_num), -1),\n max_outputs=max_imgs)\n\n # Execute the input image summary\n image_input_summary = sess.run(\n tf.summary.image('image_input', image_input, max_outputs=max_imgs),\n feed_dict={\n image_input: summary_images,\n labels: summary_labels,\n keep_prob: 1.0\n })\n\n # Writes the input image only once (records the steps if trained in multiple passes)\n writer.add_summary(image_input_summary, global_step=step)\n\n # Setup the hyperparams summary\n hyperparams_summary = sess.run(tf.summary.text('hyperparameters', _config_tensor()))\n\n # Writes the hyperparams only once (records the steps if trained in multiple passes)\n writer.add_summary(hyperparams_summary, global_step=step)\n\n return summary_op, image_summary_op", "title": "" }, { "docid": "b20190ca8c99777142adcffa3a1bc179", "score": "0.63034993", "text": "def add_summaries_train(self):\n with tf.name_scope(\"Training\"):\n with self.graph.as_default():\n tf.summary.scalar(\"Total_l2_squared_loss\", self._l2s_diff)", "title": "" }, { "docid": "3f54046de66aad30fe91420d3ec49556", "score": "0.62929976", "text": "def _write_train_summary(self, summary_writer, step, loss, other_metrics_val):\n if summary_writer:\n with summary_writer.as_default(), tf.contrib.summary.always_record_summaries():\n tf.contrib.summary.scalar('train/loss', loss, step=step)\n for metric_name, metric_val in other_metrics_val.items():\n if not isinstance(metric_val, (tuple, list, np.ndarray)):\n tf.contrib.summary.scalar('val/'+metric_name, metric_val, step=step)\n summary_writer.flush()", "title": "" }, { "docid": "26abf16b16829dfd59e039f1a2788535", "score": "0.6272487", "text": "def write_summary(self):\n if self._step % self._parameters['summary_frequency'] == 0 and \\\n self._parameters['tensorboard']:\n\n summary = tf.Summary()\n for key in self._stats.keys():\n if len(self._stats[key]) > 0:\n stat_mean = float(np.mean(self._stats[key]))\n summary.value.add(tag='{}'.format(key), simple_value=stat_mean)\n self._stats[key] = []\n self._summary_writer.add_summary(summary, self._step)\n self._summary_writer.flush()", "title": "" }, { "docid": "d4d4f70fc4f1642b227ac1a421907433", "score": "0.6238147", "text": "def write_summary(self):\n if self.step % self.parameters['summary_frequency'] == 0 and \\\n self.parameters['tensorboard']:\n summary = tf.Summary()\n for key in self.stats.keys():\n if len(self.stats[key]) > 0:\n stat_mean = float(np.mean(self.stats[key]))\n summary.value.add(tag='{}'.format(key), simple_value=stat_mean)\n self.run.log_scalar('{}'.format(key), stat_mean, self.step)\n self.stats[key] = []\n self.summary_writer.add_summary(summary, self.step)\n self.summary_writer.flush()", "title": "" }, { "docid": "8a25507e56a9a13459437117511677dd", "score": "0.61864054", "text": "def summary_routines(self):\r\n # Note that summary_routines are called outside of the self.mode name_scope. Hence, self.mode should be\r\n # prepended to the summary name if needed.\r\n tf.summary.scalar(self.mode + \"/loss\", self.loss, collections=[self.mode + \"/model_summary\"])\r\n\r\n if self.is_training:\r\n tf.summary.scalar(self.mode + \"/learning_rate\",\r\n self.learning_rate,\r\n collections=[self.mode + \"/model_summary\"])\r\n\r\n self.summary_update = tf.summary.merge_all(self.mode + \"/model_summary\")", "title": "" }, { "docid": "866285c61da7ce71bfcf44d47a874234", "score": "0.6174107", "text": "def _build_summary_ops(self):\n with tf.variable_scope(self.scope, reuse=self.reuse):\n with tf.name_scope('summaries'):\n # The td_error here is the difference between q_t and q_t_target.\n # Without abs(), the summary of td_error is actually underestimated.\n error_summaries = [tf.summary.scalar\n ('td_error_%i' % i, tf.reduce_mean(tf.abs(self.td_error[i])))\n for i in range(self.num_objectives)]\n self.error_summary = tf.summary.merge(error_summaries)\n self.smiles = tf.placeholder(tf.string, [], 'summary_smiles')\n self.rewards = [\n tf.placeholder(tf.float32, [], 'summary_reward_obj_%i' % i)\n for i in range(self.num_objectives)\n ]\n # Weighted sum of the rewards.\n self.weighted_reward = tf.placeholder(tf.float32, [],\n 'summary_reward_sum')\n smiles_summary = tf.summary.text('SMILES', self.smiles)\n reward_summaries = [\n tf.summary.scalar('reward_obj_%i' % i, self.rewards[i])\n for i in range(self.num_objectives)\n ]\n reward_summaries.append(\n tf.summary.scalar('sum_reward', self.rewards[-1]))\n\n self.episode_summary = tf.summary.merge([smiles_summary] +\n reward_summaries)", "title": "" }, { "docid": "c0a04a15ccb627acf81f5e1a27ab0847", "score": "0.61199373", "text": "def put_summaries(var, prefix_name, suffix_text = ''):\n prefix_title = prefix_name+'/'\n with tf.name_scope('summaries'):\n mean = tf.reduce_mean(var)\n #tf.scalar_summary(prefix_title+'mean'+suffix_text, mean)\n tf.summary.scalar(prefix_title+'mean'+suffix_text, mean)\n with tf.name_scope('stddev'):\n stddev = tf.sqrt(tf.reduce_sum(tf.square(var - mean)))\n tf.summary.scalar(prefix_title+'stddev'+suffix_text, stddev)\n tf.summary.scalar(prefix_title+'max'+suffix_text, tf.reduce_max(var))\n tf.summary.scalar(prefix_title+'min'+suffix_text, tf.reduce_min(var))\n tf.summary.histogram(prefix_name, var)", "title": "" }, { "docid": "d4e66dd78387b06761165c97d797ad35", "score": "0.61190337", "text": "def _summaries(self):\n tf.summary.scalar(\"Total_Loss\", self.cost)\n tf.summary.scalar(\"XEntropy_Loss_Pi\", self.xentropy_p)\n tf.summary.scalar(\"XEntropy Loss_yi\", self.xentropy_y)\n tf.summary.scalar(\"Weight_Decay_Loss\", self.weight)", "title": "" }, { "docid": "62216bed0518bffe199935808287b0d2", "score": "0.6073017", "text": "def add_summary(summary_writer, global_step, tag, value):\n summary = tf.Summary(value=[tf.Summary.Value(tag=tag, simple_value=value)])\n summary_writer.add_summary(summary, global_step)", "title": "" }, { "docid": "c2eec1cfd38a1f981a2bb8604d1b1a2e", "score": "0.6066448", "text": "def create_summaries_and_logs():\n # Construct extra metrics for Training and Evaluation\n images = tf.cast(tf.map_fn(lambda f: mean_image_addition(f, params['dataset_mean_values']), features), tf.uint8)\n summary_images = [images, network_graph.y_decoded, network_graph.y_pred_decoded]\n\n extra_summary_ops = [tf.summary.scalar('loss', network_graph.loss),\n tf.summary.scalar('pixel_wise_accuracy', network_graph.metrics['pixel_wise_accuracy'][1]),\n tf.summary.scalar('mean_iou', compute_mean_iou(network_graph.metrics['mean_iou'][1])),\n # Concatenate them on width axis\n tf.summary.image('images', tf.concat(axis=2, values=summary_images),\n max_outputs=params['max_num_tensorboard_images'])]\n\n # TFEstimator automatically creates a summary hook during training. So, no need to create one.\n if mode == tf.estimator.ModeKeys.TRAIN:\n extra_summary_ops.append(tf.summary.scalar('learning_rate', network_graph.learning_rate))\n\n # Construct tf.logging tensors\n train_tensors_to_log = {'epoch': network_graph.global_step // params['num_iterations'],\n 'learning_rate': network_graph.learning_rate,\n 'train_px_acc': network_graph.metrics['pixel_wise_accuracy'][1],\n 'train_mean_iou': compute_mean_iou(network_graph.metrics['mean_iou'][1])}\n logging_hook = tf.train.LoggingTensorHook(tensors=train_tensors_to_log,\n every_n_iter=params['log_every'])\n\n return [logging_hook]\n\n summary_output_dir = join(params['experiment_dir'], 'eval')\n\n # Construct tf.logging tensors\n val_tensors_to_log = {'epoch': network_graph.global_step // params['num_iterations'] - 1,\n 'global_step': network_graph.global_step,\n 'val_loss': network_graph.loss,\n 'val_px_acc': network_graph.metrics['pixel_wise_accuracy'][1],\n 'val_mean_iou': compute_mean_iou(network_graph.metrics['mean_iou'][1])}\n logging_hook = tf.train.LoggingTensorHook(tensors=val_tensors_to_log, every_n_iter=params['log_every'])\n\n summary_hook = tf.train.SummarySaverHook(params['tensorboard_update_every'], output_dir=summary_output_dir,\n summary_op=tf.summary.merge(extra_summary_ops))\n\n return [logging_hook, summary_hook]", "title": "" }, { "docid": "d710730e22262096aade2fc0a691ef53", "score": "0.6063921", "text": "def callback(self, locals_, globals_):\r\n self_ = locals_['self']\r\n # Log additional tensor\r\n if not self_.is_tb_set:\r\n with self_.graph.as_default():\r\n tf.summary.scalar('value_target', tf.reduce_mean(self_.value_target))\r\n self_.summary = tf.summary.merge_all()\r\n self_.is_tb_set = True\r\n # Log scalar threshold (here a random variable)\r\n values_to_log = []\r\n for current_attribute_value in self.config['main']['logs']:\r\n value = np.mean(self.env.get_attr(current_attribute_value))\r\n values_to_log.append(tf.Summary.Value(tag=current_attribute_value, simple_value=value, ))\r\n summary = tf.Summary(value=values_to_log)\r\n locals_['writer'].add_summary(summary, self_.num_timesteps)\r\n\r\n return True", "title": "" }, { "docid": "2a80517eb2d225d16ba1c56ff7cf8616", "score": "0.60590285", "text": "def build_summary_writer(self, sess):\n\n # Compute the average Q-value\n avg_qvalues_train = tf.reduce_mean(self.qvalues)\n avg_qvalues_target = tf.reduce_mean(tf.reduce_mean(self.pl_qtargets))\n avg_reward_batch = tf.reduce_mean(tf.reduce_mean(self.pl_rewards))\n\n # Summaries for training\n training_summaries = [\n tf.summary.scalar(\"train/qvalues_train_avg\", avg_qvalues_train),\n tf.summary.scalar(\"train/qvalues_target_avg\", avg_qvalues_target),\n tf.summary.scalar(\"train/avg_reward_batch\", avg_reward_batch),\n tf.summary.scalar(\"train/loss\", self.loss),\n tf.summary.scalar(\"train/loss_average\", self.loss_moving_avg.average(self.loss)),\n tf.summary.scalar(\"train/learning_rate\", self.learning_rate),\n tf.summary.histogram(\"train/delta\", self.delta)\n ]\n training_summaries_merged = tf.summary.merge(training_summaries)\n\n # Environment related summaries\n with tf.variable_scope(\"environment\"):\n self.avg_reward_per_game = tf.Variable(0.0, trainable=False, name=\"avg_reward_per_game\")\n self.max_reward_per_game = tf.Variable(0.0, trainable=False, name=\"max_reward_per_game\")\n self.avg_moves_per_game = tf.Variable(0.0, trainable=False, name=\"avg_moves_per_game\")\n self.num_games_played = tf.Variable(0.0, trainable=False, name=\"num_games_played\")\n self.moves = tf.Variable(0.0, trainable=False, name=\"num_moves_played\")\n self.total_reward_replay = tf.Variable(0.0, trainable=False, name=\"reward_in_replay_memory\")\n self.actions_random = tf.Variable(0.0, trainable=False, name=\"num_actions_random\")\n self.actions_greedy = tf.Variable(0.0, trainable=False, name=\"num_actions_greedy\")\n\n environment_summaries = [\n tf.summary.scalar(\"environment/avg_reward_per_game\", self.avg_reward_per_game),\n tf.summary.scalar(\"environment/max_reward_per_game\", self.max_reward_per_game),\n tf.summary.scalar(\"environment/num_games_played\", self.num_games_played),\n tf.summary.scalar(\"environment/moves\", self.moves),\n tf.summary.scalar(\"environment/avg_moves_per_game\", self.avg_moves_per_game),\n tf.summary.scalar(\"environment/reward_in_replay_memory\", self.total_reward_replay),\n tf.summary.scalar(\"actions/num_actions_random\", self.actions_random),\n tf.summary.scalar(\"actions/num_actions_greedy\", self.actions_greedy),\n #tf.summary.image(\"screens\", self.pl_screens, max_outputs=10) # This only works with atari for some reason\n ]\n environment_summaries_merged = tf.summary.merge(environment_summaries)\n\n # Environment related summaries\n with tf.variable_scope(\"evaluation\"):\n self.eval_rewards = tf.Variable(0.0, trainable=False, name=\"total_reward\")\n self.eval_win_rate = tf.Variable(0.0, trainable=False, name=\"win_rate\")\n self.eval_num_rewards = tf.Variable(0.0, trainable=False, name=\"num_rewards\")\n self.eval_max_reward = tf.Variable(0.0, trainable=False, name=\"max_reward\")\n self.eval_num_episodes = tf.Variable(0.0, trainable=False, name=\"num_episodes\")\n self.eval_actions = tf.Variable(np.zeros(self.num_actions), trainable=False, name=\"actions\")\n\n\n evaluation_summaries = [\n tf.summary.scalar(\"evaluation/total_reward\", self.eval_rewards),\n tf.summary.scalar(\"evaluation/win_rate\", self.eval_win_rate),\n tf.summary.scalar(\"evaluation/num_rewards\", self.eval_num_rewards),\n tf.summary.scalar(\"evaluation/max_reward\", self.eval_max_reward),\n tf.summary.scalar(\"evaluation/num_episodes\", self.eval_num_episodes),\n tf.summary.histogram(\"evaluation/actions\", self.eval_actions)\n ]\n\n # Evaluation Summaries for TensorBoard\n self.eval_summary_op = tf.summary.merge(evaluation_summaries)\n\n # Training summaries for TensorBoard\n self.train_summary_op = tf.summary.merge([training_summaries_merged,\n environment_summaries_merged])\n\n train_summary_dir = os.path.join(self.output_dir, \"summaries_\" + self.params.game)\n self.train_summary_writer = tf.summary.FileWriter(train_summary_dir, sess.graph)", "title": "" }, { "docid": "22b9d7dffddd55454108ae144ba79812", "score": "0.60527474", "text": "def summarize_metrics(eval_metrics_writer, metrics, epoch):\n for (name, value) in six.iteritems(metrics):\n summary = tf.Summary()\n summary.value.add(tag=name, simple_value=value)\n eval_metrics_writer.add_summary(summary, epoch)\n eval_metrics_writer.flush()", "title": "" }, { "docid": "57298656f118d9520272b62a40994912", "score": "0.6035934", "text": "def _sum_tensor(self, x, name=None):\n if name is None:\n name = x.op.name\n tf.summary.histogram(name + '/histogram', x)\n tf.summary.scalar(name + '/sparsity', tf.nn.zero_fraction(x))", "title": "" }, { "docid": "bbe1da1e19b1af233792ad32d6758e2a", "score": "0.60277545", "text": "def log_tensorboard(self):\n if not self.is_master:\n return\n\n for param_name, param in self.student.named_parameters():\n self.tensorboard.add_scalar(\n tag=\"parameter_mean/\" + param_name, scalar_value=param.data.mean(), global_step=self.n_total_iter\n )\n self.tensorboard.add_scalar(\n tag=\"parameter_std/\" + param_name, scalar_value=param.data.std(), global_step=self.n_total_iter\n )\n if param.grad is None:\n continue\n self.tensorboard.add_scalar(\n tag=\"grad_mean/\" + param_name, scalar_value=param.grad.data.mean(), global_step=self.n_total_iter\n )\n self.tensorboard.add_scalar(\n tag=\"grad_std/\" + param_name, scalar_value=param.grad.data.std(), global_step=self.n_total_iter\n )\n\n self.tensorboard.add_scalar(\n tag=\"losses/cum_avg_loss_epoch\",\n scalar_value=self.total_loss_epoch / self.n_iter,\n global_step=self.n_total_iter,\n )\n self.tensorboard.add_scalar(tag=\"losses/loss\", scalar_value=self.last_loss, global_step=self.n_total_iter)\n self.tensorboard.add_scalar(\n tag=\"losses/loss_ce\", scalar_value=self.last_loss_ce, global_step=self.n_total_iter\n )\n if self.alpha_clm > 0.0:\n self.tensorboard.add_scalar(\n tag=\"losses/loss_clm\", scalar_value=self.last_loss_clm, global_step=self.n_total_iter\n )\n if self.alpha_mse > 0.0:\n self.tensorboard.add_scalar(\n tag=\"losses/loss_mse\", scalar_value=self.last_loss_mse, global_step=self.n_total_iter\n )\n self.tensorboard.add_scalar(\n tag=\"learning_rate/lr\", scalar_value=self.scheduler.get_lr()[0], global_step=self.n_total_iter\n )\n\n self.tensorboard.add_scalar(\n tag=\"global/memory_usage\",\n scalar_value=psutil.virtual_memory()._asdict()[\"used\"] / 1_000_000,\n global_step=self.n_total_iter,\n )\n self.tensorboard.add_scalar(\n tag=\"global/speed\", scalar_value=time.time() - self.last_log, global_step=self.n_total_iter\n )", "title": "" }, { "docid": "7f43f40553b53326f81e217f4afe333e", "score": "0.5974603", "text": "def tensorboard(self, kind, sed_loss, doa_loss, total_loss, step):\n with self.loss_writer[kind]['sed'].as_default():\n tf.summary.scalar('loss', sed_loss, step)\n with self.loss_writer[kind]['doa'].as_default():\n tf.summary.scalar('loss', doa_loss, step)\n with self.loss_writer[kind]['total'].as_default():\n tf.summary.scalar('loss', total_loss, step)\n with self.loss_writer['lr'].as_default():\n tf.summary.scalar('loss', self.optimizer.lr.numpy(), step)", "title": "" }, { "docid": "6ab4a2153cd5f2c89af798f42fb83233", "score": "0.5974372", "text": "def add_loss_summaries(total_loss):\n # Compute the moving average of all individual losses and the total loss.\n #Checkout the following link on Moving Averages\n #https://www.tensorflow.org/versions/r0.11/api_docs/python/train/moving_averages\n loss_averages = tf.train.ExponentialMovingAverage(0.9, name='avg')\n losses = tf.get_collection('losses')\n loss_averages_op = loss_averages.apply(losses + [total_loss])\n\n # Attach a scalar summary to all individual losses and the total loss; do the\n # same for the averaged version of the losses.\n for l in losses + [total_loss]:\n # Name each loss as '(raw)' and name the moving average version of the loss\n # as the original loss name.\n tf.summary.scalar(l.op.name + ' (raw)', l)\n tf.summary.scalar(l.op.name, loss_averages.average(l))\n\n return loss_averages_op", "title": "" }, { "docid": "cc56f57e0e2e031b6aae71eefc62344f", "score": "0.59668076", "text": "def _activation_summary(x):\n tensor_name = x.op.name\n print('load summary for : ', tensor_name)\n tf.summary.histogram(tensor_name + '/activations', x)\n # tf.summary.scalar(tensor_name + '/sparsity', tf.nn.zero_fraction(x))", "title": "" }, { "docid": "8d78f6f27818e414f07895c828af7ab6", "score": "0.59588784", "text": "def _summaries(self):\n tf.summary.scalar(\"Total Loss\", self.cost)\n tf.summary.scalar(\"Cross Entropy Loss\", self.xentropy)\n tf.summary.scalar(\"Weight Decay Loss\", self.weight)", "title": "" }, { "docid": "6b69510702bdf34f81fbc226e8c11af3", "score": "0.59412855", "text": "def add_mean_metric(name, values, eval_metric_ops):\n if eval_metric_ops is not None:\n v, u = tf.metrics.mean(values)\n # This line is required to make the statistics show up on Tensorboard.\n tf.summary.scalar(name, v)\n eval_metric_ops[name] = (v, u)", "title": "" }, { "docid": "e67a75a56a5cef80d4d9311efdc2a0e5", "score": "0.593097", "text": "def activation_summary(x):\n\t#We first need to remove 'tower_[0-9]/' from names incase this is a multi-GPU training Op, for summary visualization\n\top_name = re.sub('%s_[0-9]*/'%TOWER_NAME, '', x.op.name)\n\t#Next we need to print the summaries into the hidden files\n\t#First Record the histogram summary of the activations\n\ttf.summary.histogram(op_name+'/activations', x)\n\t#Record the summary of the sparsity of the activations\n\ttf.summary.scalar(op_name+'/sparsity', x)", "title": "" }, { "docid": "9c55a38a09f63ce5def081a71416eda2", "score": "0.59265316", "text": "def _add_loss_summaries(total_loss):\n # Compute the moving average of all individual losses and the total loss.\n loss_averages = tf.train.ExponentialMovingAverage(0.9, name='avg')\n losses = tf.get_collection('losses')\n loss_averages_op = loss_averages.apply(losses + [total_loss])\n \n # Attach a scalar summmary to all individual losses and the total loss; do the\n # same for the averaged version of the losses.\n for l in losses + [total_loss]:\n # Name each loss as '(raw)' and name the moving average version of the loss\n # as the original loss name.\n tf.summary.scalar(l.op.name +' (raw)', l)\n tf.summary.scalar(l.op.name, loss_averages.average(l))\n \n return loss_averages_op", "title": "" }, { "docid": "67081749a8d578c4c1d93dab2780f48e", "score": "0.5909802", "text": "def add_compression_summaries(self):\n with tf.compat.v1.name_scope(self._spec.name + '_summaries'):\n tf.compat.v2.summary.scalar('last_alpha_update_step',\n self._last_alpha_update_step)\n tf.compat.v2.summary.scalar(self.alpha.op.name + '/alpha', self.alpha)\n tf.compat.v2.summary.scalar(\n self.a_matrix_tfvar.op.name + '/a_matrix_norm',\n tf.norm(tensor=self.a_matrix_tfvar))\n tf.compat.v2.summary.scalar(\n self.b_matrix_tfvar.op.name + '/b_matrix_norm',\n tf.norm(tensor=self.b_matrix_tfvar))", "title": "" }, { "docid": "b409f3bf52474c449a3dc17f7ad40f1b", "score": "0.5904856", "text": "def summary_routines(self):\n # Note that summary_routines are called outside of the self.mode name_scope. Hence, self.mode should be\n # prepended to the summary name if needed.\n tf.summary.scalar(self.mode+\"/loss\", self.loss, collections=[self.mode+\"/model_summary\"])\n tf.summary.scalar(self.mode+\"/gradients\", self.gradients_visual, collections=[self.mode+\"/model_summary\"])\n\n if self.fidelity:\n tf.summary.scalar(self.mode + \"/loss_continuity\", self.loss_continuity, collections=[self.mode + \"/model_summary\"])\n tf.summary.scalar(self.mode + \"/loss_fidelity\", self.loss_fidelity, collections=[self.mode + \"/model_summary\"])\n tf.summary.scalar(self.mode + \"/gradients_disc\", self.gradients_visual_disc, collections=[self.mode + \"/model_summary\"])\n\n if self.is_training:\n tf.summary.scalar(self.mode + \"/learning_rate\",\n self.learning_rate,\n collections=[self.mode + \"/model_summary\"])\n\n self.summary_update = tf.summary.merge_all(self.mode+\"/model_summary\")", "title": "" }, { "docid": "e47b44f7212a7fea050803417aebee3f", "score": "0.5904785", "text": "def tf_build_stats_summaries(tensor, name_scope):\n with tf.name_scope(name_scope):\n m_mean = tf.reduce_mean(tensor)\n m_var = tf_reduce_var(tensor)\n m_min = tf.reduce_min(tensor)\n m_max = tf.reduce_max(tensor)\n m_sum = tf.reduce_sum(tensor)\n\n mean_op = tf.summary.scalar('mean', m_mean)\n sd_op = tf.summary.scalar('sd', tf.sqrt(m_var))\n min_op = tf.summary.scalar('min', m_min)\n max_op = tf.summary.scalar('max', m_max)\n sum_op = tf.summary.scalar('sum', m_sum)\n\n stats_summaries = []\n stats_summaries.append(mean_op)\n stats_summaries.append(sd_op)\n stats_summaries.append(min_op)\n stats_summaries.append(max_op)\n stats_summaries.append(sum_op)\n\n return stats_summaries", "title": "" }, { "docid": "16835dd0b737e279138bb4981de1616a", "score": "0.5889432", "text": "def _update_metrics(self, experience, monitor_dict):\n if tf.math.equal(self._global_step % self._summary_log_interval, 0):\n is_action = ~experience.is_boundary()\n\n self._data_action_mean.update_state(\n experience.action, sample_weight=is_action)\n self._data_reward_mean.update_state(\n experience.reward, sample_weight=is_action)\n self._num_trajectories.update_state(experience.is_first())\n\n # Check earlier rather than later if we should record summaries.\n # TF also checks it, but much later. Needed to avoid looping through\n # the dict so gave the if a bigger scope\n if tf.summary.should_record_summaries():\n with tf.name_scope('default/'):\n tf.summary.scalar(\n name='data_action_mean',\n data=self._data_action_mean.result(),\n step=self._global_step)\n tf.summary.scalar(\n name='data_reward_mean',\n data=self._data_reward_mean.result(),\n step=self._global_step)\n tf.summary.scalar(\n name='num_trajectories',\n data=self._num_trajectories.result(),\n step=self._global_step)\n\n for name_scope, d in monitor_dict.items():\n with tf.name_scope(name_scope + '/'):\n for key, value in d.items():\n tf.summary.scalar(name=key, data=value, step=self._global_step)\n\n tf.summary.histogram(\n name='reward', data=experience.reward, step=self._global_step)", "title": "" }, { "docid": "5d4dfbb2dd972fc088dd2c7fe1dc61c3", "score": "0.58712786", "text": "def add_compression_summaries(self):\n with tf.name_scope(self._spec.name + '_summaries'):\n logging.info('add_compression_summaries scope name is %s',\n self._spec.name)\n tf.compat.v2.summary.scalar(self.alpha.op.name + '/alpha', self.alpha)\n tf.compat.v2.summary.scalar(\n self.a_matrix_tfvar.op.name + '/a_matrix_norm',\n tf.norm(self.a_matrix_tfvar))\n tf.compat.v2.summary.scalar(\n self.b_matrix_tfvar.op.name + '/d_matrix_norm',\n tf.norm(tf.reshape(self.b_matrix_tfvar, [-1]), ord=1))\n tf.compat.v2.summary.scalar(\n self.c_matrix_tfvar.op.name + '/c_matrix_norm',\n tf.reduce_sum(self.c_matrix_tfvar))", "title": "" }, { "docid": "55f411208c85600eb08150d48f7ba916", "score": "0.587101", "text": "def _train_batch(self, batch):\n feed_dict = {\n self.x: batch['x'],\n self.a: batch['a'],\n self.c: batch['c'],\n self.d: batch['d']\n }\n outputs_list = [self.train_op, self.summary_op, self.global_step]\n session = tf.get_default_session()\n _, summary, step = session.run(outputs_list, feed_dict=feed_dict)\n\n if self.summary_writer:\n self.summary_writer.add_summary(tf.Summary.FromString(summary), step)\n self.summary_writer.flush()", "title": "" }, { "docid": "24822dc05c6bb12ab728d83f2f92e931", "score": "0.58704084", "text": "def summary(summary_writer, metrics, global_step, tag=None):\r\n summary_values = []\r\n for key, value in metrics.items():\r\n summary_values.append(tf.Summary.Value(tag=\"/\".join([tag, key]) if tag else key,\r\n simple_value=value))\r\n summary_writer.add_summary(tf.Summary(value=summary_values), global_step)\r\n summary_writer.flush()", "title": "" }, { "docid": "abfaef33d0e21e2a41d4e9e5da2dca46", "score": "0.5863428", "text": "def write_summaries(self, loss_dict, phase='train'):\n for k, v in loss_dict.items():\n k = k.replace('_', '/') # Group in TensorBoard by phase\n self.summary_writers[phase].add_scalar(k, v, self.global_step)", "title": "" }, { "docid": "989e30aaf0839357715ac9ec1672041e", "score": "0.5862858", "text": "def variable_summaries(var):\n with tf.name_scope('summaries'):\n mean = tf.reduce_mean(var)\n tf.summary.scalar('mean', mean)\n # with tf.name_scope('stddev'):\n # stddev = tf.sqrt(tf.reduce_mean(tf.square(var - mean)))\n # tf.summary.scalar('stddev', stddev)\n tf.summary.scalar('max', tf.reduce_max(var))\n tf.summary.scalar('min', tf.reduce_min(var))\n # tf.summary.histogram('histogram', var)", "title": "" }, { "docid": "938146e5cf4dc25a90ac3838623c8b84", "score": "0.5859998", "text": "def add_compression_summaries(self):\n with tf.name_scope(self._spec.name + '_summaries'):\n logging.info('add_compression_summaries scope name is %s',\n self._spec.name)\n tf.compat.v2.summary.scalar(self.alpha.op.name + '/alpha', self.alpha)\n tf.compat.v2.summary.scalar(\n self.a_matrix_tfvar.op.name + '/a_matrix_norm',\n tf.norm(self.a_matrix_tfvar))\n tf.compat.v2.summary.scalar(\n self.b_matrix_tfvar.op.name + '/b_matrix_norm',\n tf.norm(tf.reshape(self.b_matrix_tfvar, [-1]), ord=1))\n tf.compat.v2.summary.scalar(\n self.c_matrix_tfvar.op.name + '/c_matrix_norm',\n tf.reduce_sum(self.c_matrix_tfvar))", "title": "" }, { "docid": "112d4eb6618e519a4025ce787fba119c", "score": "0.5852878", "text": "def scalar_summary(self, train_loss, test_loss, step):\n\n summary = self.session.run(self.merged, {self.loss: train_loss})\n self.train_writer.add_summary(summary, step) \n self.train_writer.flush()\n\n summary = self.session.run(self.merged, {self.loss: test_loss})\n self.test_writer.add_summary(summary, step) \n self.test_writer.flush()", "title": "" }, { "docid": "3c5bb76b6832b989f4ad674a94ec6b97", "score": "0.58282375", "text": "def _write_eval_summary(self, summary_writer, step, train_acc, val_acc, val_smooth, test_acc,\n other_metrics):\n if summary_writer:\n with summary_writer.as_default(), tf.contrib.summary.always_record_summaries():\n tf.contrib.summary.scalar('train/train_acc', train_acc, step=step)\n tf.contrib.summary.scalar('val/val_acc', val_acc, step=step)\n tf.contrib.summary.scalar('val/val_smooth', val_smooth, step=step)\n tf.contrib.summary.scalar('test/test_acc', test_acc, step=step)\n for metric_name, metric_val in other_metrics.items():\n if not isinstance(metric_val, (tuple, list, np.ndarray)):\n tf.contrib.summary.scalar('test/'+metric_name, metric_val, step=step)\n summary_writer.flush()", "title": "" }, { "docid": "d986e08d02079de4b4d8317bb54cf866", "score": "0.5826271", "text": "def _add_loss_summaries(total_loss):\n # Compute the moving average of all individual losses and the total loss.\n loss_averages = tf.train.ExponentialMovingAverage(0.9, name='avg')\n losses = tf.get_collection('losses')\n loss_averages_op = loss_averages.apply(losses + [total_loss])\n\n # Attach a scalar summary to all individual losses and the total loss; do the\n # same for the averaged version of the losses.\n for l in losses + [total_loss]:\n # Name each loss as '(raw)' and name the moving average version of the loss\n # as the original loss name.\n tf.summary.scalar(l.op.name + ' (raw)', l)\n tf.summary.scalar(l.op.name, loss_averages.average(l))\n\n return loss_averages_op", "title": "" }, { "docid": "76144aa73d5f2cdb69aecfe73bd52e0b", "score": "0.58144426", "text": "def _add_loss_summaries(total_loss, scope=None):\n # Compute the moving average of all individual losses and the total loss.\n loss_averages = tf.train.ExponentialMovingAverage(0.9, name='avg')\n losses = tf.get_collection('losses', scope=scope)\n loss_averages_op = loss_averages.apply(losses + [total_loss])\n\n # Attach a scalar summary to all individual losses and the total loss; do the\n # same for the averaged version of the losses.\n for l in losses + [total_loss]:\n # Name each loss as '(raw)' and name the moving average version of the loss\n # as the original loss name.\n loss_name = re.sub('%s_[0-9]*/' % TOWER_NAME, '', l.op.name)\n tf.summary.scalar(loss_name + ' (raw)', l)\n tf.summary.scalar(loss_name, loss_averages.average(l))\n\n return loss_averages_op", "title": "" }, { "docid": "7c4ebee3a3fc2a4a39e70a002e723fc2", "score": "0.58080655", "text": "def _variable_summaries(var):\n if not tf.get_variable_scope().reuse:\n name = var.op.name\n #logging.debug(\"Creating Summary for: %s\" % name)\n with tf.name_scope('summaries'):\n tf.summary.scalar(name, var)\n #mean = tf.reduce_mean(var)\n #tf.summary.scalar(name + '/mean', mean)\n #with tf.name_scope('stddev'):\n # stddev = tf.sqrt(tf.reduce_sum(tf.square(var - mean)))\n #tf.summary.scalar(name + '/sttdev', stddev)\n #tf.summary.scalar(name + '/max', tf.reduce_max(var))\n #tf.summary.scalar(name + '/min', tf.reduce_min(var))\n tf.summary.histogram(name, var)", "title": "" }, { "docid": "98d95d8f4167fcf8875efbdabb4b001d", "score": "0.5804085", "text": "def _add_loss_summaries(total_loss):\n # Compute the moving average of all individual losses and the total loss.\n loss_averages = tf.train.ExponentialMovingAverage(0.9, name='avg')\n losses = tf.get_collection('losses')\n loss_averages_op = loss_averages.apply(losses + [total_loss])\n\n # Attach a scalar summary to all individual losses and the total loss; do the\n # same for the averaged version of the losses.\n for l in losses + [total_loss]:\n # Name each loss as '(raw)' and name the moving average version of the loss\n # as the original loss name.\n tf.summary.scalar(l.op.name + ' (raw)', l)\n tf.summary.scalar(l.op.name, loss_averages.average(l))\n\n return loss_averages_op", "title": "" }, { "docid": "98d95d8f4167fcf8875efbdabb4b001d", "score": "0.5804085", "text": "def _add_loss_summaries(total_loss):\n # Compute the moving average of all individual losses and the total loss.\n loss_averages = tf.train.ExponentialMovingAverage(0.9, name='avg')\n losses = tf.get_collection('losses')\n loss_averages_op = loss_averages.apply(losses + [total_loss])\n\n # Attach a scalar summary to all individual losses and the total loss; do the\n # same for the averaged version of the losses.\n for l in losses + [total_loss]:\n # Name each loss as '(raw)' and name the moving average version of the loss\n # as the original loss name.\n tf.summary.scalar(l.op.name + ' (raw)', l)\n tf.summary.scalar(l.op.name, loss_averages.average(l))\n\n return loss_averages_op", "title": "" }, { "docid": "bd66e44dfe9a47e09112c6a11a9c5415", "score": "0.57744944", "text": "def _add_loss_summaries(total_loss):\n # Compute the moving average of all individual losses and the total loss.\n loss_averages = tf.train.ExponentialMovingAverage(0.9, name='avg')\n losses = tf.get_collection('losses')\n loss_averages_op = loss_averages.apply(losses + [total_loss])\n\n # Attach a scalar summary to all individual losses and the total loss; do the\n # same for the averaged version of the losses.\n for l in losses + [total_loss]:\n # Name each loss as '(raw)' and name the moving average version of the loss\n # as the original loss name.\n tf.scalar_summary(l.op.name +' (raw)', l)\n tf.scalar_summary(l.op.name, loss_averages.average(l))\n\n return loss_averages_op", "title": "" }, { "docid": "bd45289baac13a5cf47707d33b6672e9", "score": "0.57704085", "text": "def _variable_summaries(self, v, name):\n with tf.name_scope(name):\n tf.summary.histogram('histogram', v)", "title": "" }, { "docid": "610943a39a26a5d411e5409acdbccba4", "score": "0.57688767", "text": "def create_scalar_summary(self, sess):\n pass", "title": "" }, { "docid": "4a4aa6bb2aeb255f3ea7195fd2fc4083", "score": "0.57631695", "text": "def add_tensorboard(self, tensorboard_dir, timeline_enabled=False):\n if tensorboard_dir is None:\n return\n self.tensorboard_dir = tensorboard_dir\n self.timeline_enabled = timeline_enabled\n\n # Define GraphKeys for TensorBoard\n graphkey_training = tf.GraphKeys()\n graphkey_test = tf.GraphKeys()\n\n # Learning rate\n tf.summary.scalar('Learning_rate', self.learning_rate_var, collections=[graphkey_training, graphkey_test])\n\n # Loss\n with tf.name_scope('Mean_loss'):\n mean_loss = tf.divide(self.accumulated_mean_loss, self.mini_batch)\n tf.summary.scalar('Training', mean_loss, collections=[graphkey_training])\n tf.summary.scalar('Test', mean_loss, collections=[graphkey_test])\n\n # Accuracy\n with tf.name_scope('Accuracy_-_Error_Rate'):\n mean_error_rate = tf.divide(self.accumulated_error_rate, self.mini_batch)\n tf.summary.scalar('Training', mean_error_rate, collections=[graphkey_training])\n tf.summary.scalar('Test', mean_error_rate, collections=[graphkey_test])\n\n # Hidden state\n\n\n self.train_summaries_op = tf.summary.merge_all(key=graphkey_training)\n self.test_summaries_op = tf.summary.merge_all(key=graphkey_test)", "title": "" }, { "docid": "400f7fb288ef13daa4e08705950c8d76", "score": "0.57400244", "text": "def variable_summaries(var):\n with tf.name_scope('summaries'):\n mean = tf.reduce_mean(var)\n tf.summary.scalar('mean', mean)\n with tf.name_scope('stddev'):\n stddev = tf.sqrt(tf.reduce_mean(tf.square(var - mean)))\n tf.summary.scalar('stddev', stddev)\n tf.summary.scalar('max', tf.reduce_max(var))\n tf.summary.scalar('min', tf.reduce_min(var))\n tf.summary.histogram('histogram', var)", "title": "" }, { "docid": "6d9c77d723d7e391126c8a08085a1eed", "score": "0.57281667", "text": "def _activation_summary(x):\n # Remove 'tower_[0-9]/' from the name in case this is a multi-GPU training\n # session. This helps the clarity of presentation on tensorboard.\n tensor_name = x.op.name\n if tensor_name in _activation_summary.summarized:\n return\n _activation_summary.summarized.append(tensor_name)\n # tensor_name = re.sub('%s_[0-9]*/' % TOWER_NAME, '', x.op.name)\n tf.summary.histogram(tensor_name + '/activations', x)\n tf.summary.scalar(tensor_name + '/sparsity', tf.nn.zero_fraction(x))", "title": "" }, { "docid": "bd540e8936f426a568b0e94ea0a24628", "score": "0.5721574", "text": "def tensorboard_logs(self, history):\n if self.config.trainer.tensorboard_enabled:\n loss = history.history[\"loss\"][0]\n summary = tf.Summary(value=[tf.Summary.Value(tag='loss', simple_value=loss)])\n self.file_writer.add_summary(summary, self.epochs)\n\n policy_out_loss = history.history[\"policy_out_loss\"][0]\n summary = tf.Summary(value=[tf.Summary.Value(tag='policy_out_loss', simple_value=policy_out_loss)])\n self.file_writer.add_summary(summary, self.epochs)\n\n value_out_loss = history.history[\"value_out_loss\"][0]\n summary = tf.Summary(value=[tf.Summary.Value(tag='value_out_loss', simple_value=value_out_loss)])\n self.file_writer.add_summary(summary, self.epochs)\n\n summary = tf.Summary(value=[tf.Summary.Value(tag='lr', simple_value=self.lr)])\n self.file_writer.add_summary(summary, self.epochs)", "title": "" }, { "docid": "4bac43c2fba90317de97e4bb45ad8c7d", "score": "0.5704652", "text": "def flush(self):\n if self._enabled:\n tf.nest.map_structure(tf.summary.flush, self._summary_writers)", "title": "" }, { "docid": "3ca2f6f79abbaa27490013ee3cddbac4", "score": "0.57014436", "text": "def save_metrics(self, metrics_dict):\n step = self.get_current_train_step()\n if FLAGS.save_summaries_with_epoch:\n step = int(step / self.datasets.steps_per_epoch * 1000)\n with self.summary_writer.as_default():\n for k, v in metrics_dict.items():\n tf.summary.scalar(k, v, step=step)", "title": "" }, { "docid": "c168ca959237d5ce1118faf6a76daf91", "score": "0.5690031", "text": "def tensor_summary( # pylint: disable=invalid-name\n name,\n tensor,\n summary_description=None,\n collections=None):\n # pylint: disable=line-too-long\n # pylint: enable=line-too-long\n\n if summary_description is None:\n summary_description = summary_pb2.SummaryDescription()\n\n description = json_format.MessageToJson(summary_description)\n with ops.name_scope(name, None, [tensor]) as scope:\n val = gen_logging_ops._tensor_summary(\n tensor=tensor,\n description=description,\n name=scope)\n _Collect(val, collections, [ops.GraphKeys.SUMMARIES])\n return val", "title": "" }, { "docid": "fb72346fb6125819e340b7563d6ea371", "score": "0.56884325", "text": "def add_loss_summaries(total_loss, loss, regul_losses_collection_key,\n name_prefix=\"\", summaries_collection_key=None,\n exp_moving_avg=0.9, ema_num_updates=None):\n # Compute the moving average of all individual losses and the total loss.\n loss_averages = tf.train.ExponentialMovingAverage(exp_moving_avg,\n ema_num_updates,\n name='moving_avg')\n other_losses = tf.get_collection(regul_losses_collection_key)\n\n # Attach a scalar summmary to all individual losses and the total loss;\n # do the same for the averaged version of the losses.\n # Name each loss as '(raw)' and name the moving average version of the loss\n # as the original loss name.\n\n # Only add total loss, if it has more than one loss...\n if len(other_losses) > 0 and total_loss is not None:\n loss_averages_op = loss_averages.apply(\n [total_loss] + [loss] + other_losses)\n summ_name = \"- Loss & var loss/\" + name_prefix\n get_summary(\"scalar\", summ_name, loss_averages.average(total_loss),\n summaries_collection_key)\n get_summary(\"scalar\", summ_name + ' (raw)', total_loss,\n summaries_collection_key)\n elif total_loss is not None:\n loss_averages_op = loss_averages.apply([loss] + other_losses)\n else:\n loss_averages_op = loss_averages.apply([loss])\n\n # For tflearn wrapper visibility\n summ_name = \"- Loss/\" + name_prefix\n get_summary(\"scalar\", summ_name, loss_averages.average(loss),\n summaries_collection_key)\n get_summary(\"scalar\", summ_name + ' (raw)', loss, summaries_collection_key)\n\n for wdl in other_losses:\n # No prefix, we store every variable into their own scope\n summ_name = wdl.op.name\n get_summary(\"scalar\", summ_name, loss_averages.average(wdl),\n summaries_collection_key)\n get_summary(\"scalar\", summ_name + ' (raw)', wdl,\n summaries_collection_key)\n\n return loss_averages_op", "title": "" }, { "docid": "15da0b8b4df266d1ead65e9ed2e884f6", "score": "0.5681126", "text": "def variable_summaries(var): ###################################################################ADDDDDED########\n with tf.name_scope('summaries'):\n mean = tf.reduce_mean(var)\n tf.summary.scalar('mean', mean)\n with tf.name_scope('stddev'):\n stddev = tf.sqrt(tf.reduce_mean(tf.square(var - mean)))\n tf.summary.scalar('stddev', stddev)\n tf.summary.scalar('max', tf.reduce_max(var))\n tf.summary.scalar('min', tf.reduce_min(var))\n tf.summary.histogram('histogram', var)", "title": "" }, { "docid": "c559fe31d236f1d05993081f141ddd53", "score": "0.5651535", "text": "def variable_summaries(var):\n with tf.variable_scope('summaries'):\n mean = tf.reduce_mean(var)\n tf.scalar_summary('mean', mean)\n stddev = tf.sqrt(tf.reduce_mean(tf.square(var - mean)))\n tf.scalar_summary('stddev', stddev)\n tf.scalar_summary('max', tf.reduce_max(var))\n tf.scalar_summary('min', tf.reduce_min(var))\n tf.histogram_summary('histogram', var)", "title": "" }, { "docid": "5ad576e81a37d3e07b4dd09bbe4ab51c", "score": "0.5643607", "text": "def push_tensorboard_eval(self, epoch, dataset_name):\n self.tensorboard_writer.add_scalar(dataset_name + \"/BLEU-1\", self.bleu1[-1], epoch)\n self.tensorboard_writer.add_scalar(dataset_name + \"/BLEU-2\", self.bleu2[-1], epoch)\n self.tensorboard_writer.add_scalar(dataset_name + \"/BLEU-3\", self.bleu3[-1], epoch)\n self.tensorboard_writer.add_scalar(dataset_name + \"/BLEU-4\", self.bleu4[-1], epoch)", "title": "" }, { "docid": "e435212d1a7abd0d94663ce14c72055b", "score": "0.5627793", "text": "def attention_image_summary(attn, image_shapes=None):\n attn = tf.cast(attn, tf.float32)\n num_heads = common_layers.shape_list(attn)[1]\n # [batch, query_length, memory_length, num_heads]\n image = tf.transpose(attn, [0, 2, 3, 1])\n image = tf.pow(image, 0.2) # for high-dynamic-range\n # Each head will correspond to one of RGB.\n # pad the heads to be a multiple of 3\n image = tf.pad(image, [[0, 0], [0, 0], [0, 0], [0, tf.mod(-num_heads, 3)]])\n image = split_last_dimension(image, 3)\n image = tf.reduce_max(image, 4)\n if image_shapes is not None:\n if len(image_shapes) == 4:\n q_rows, q_cols, m_rows, m_cols = list(image_shapes)\n image = tf.reshape(image, [-1, q_rows, q_cols, m_rows, m_cols, 3])\n image = tf.transpose(image, [0, 1, 3, 2, 4, 5])\n image = tf.reshape(image, [-1, q_rows * m_rows, q_cols * m_cols, 3])\n else:\n assert len(image_shapes) == 6\n q_rows, q_cols, q_channnels, m_rows, m_cols, m_channels = list(\n image_shapes)\n image = tf.reshape(\n image,\n [-1, q_rows, q_cols, q_channnels, m_rows, m_cols, m_channels, 3])\n image = tf.transpose(image, [0, 1, 4, 3, 2, 5, 6, 7])\n image = tf.reshape(\n image,\n [-1, q_rows * m_rows * q_channnels, q_cols * m_cols * m_channels, 3])\n tf.summary.image(\"attention\", image, max_outputs=1)", "title": "" }, { "docid": "41c2b242ce8561080982573243a0546b", "score": "0.5614434", "text": "def profile():\n\n logdir = os.path.join(LOGDIR, \"profile\")\n\n @tf.function\n def f(i):\n return tf.constant(i) + tf.constant(i)\n\n @tf.function\n def g(i):\n return tf.constant(i) * tf.constant(i)\n\n with tf.summary.create_file_writer(logdir).as_default():\n for step in range(3):\n tf.summary.trace_on(profiler=True)\n print(f(step).numpy())\n tf.summary.trace_export(\"prof_f\", step=step, profiler_outdir=logdir)\n\n tf.summary.trace_on(profiler=False)\n print(g(step).numpy())\n tf.summary.trace_export(\"prof_g\", step=step)", "title": "" }, { "docid": "8539f51c6be585fbb3c656c26c17b7de", "score": "0.5611068", "text": "def variable_summaries(var, name):\n with tf.name_scope('summaries'):\n with tf.name_scope(name):\n mean = tf.reduce_mean(var)\n tf.summary.scalar('mean', mean)\n with tf.name_scope('stddev'):\n stddev = tf.sqrt(tf.reduce_mean(tf.square(var - mean)))\n tf.summary.scalar('stddev', stddev)\n tf.summary.scalar('max', tf.reduce_max(var))\n tf.summary.scalar('min', tf.reduce_min(var))\n tf.summary.histogram('histogram', var)", "title": "" }, { "docid": "566e8ae3b791200bdf2250e6ecfdde12", "score": "0.55996406", "text": "def add_activations_summary(activation_ops, name_prefix=\"\", name_suffix=\"\",\n collection_key=None):\n\n summ = []\n for ao in activation_ops:\n ao_name = ao.op.name\n summ_name = format_scope_name(ao_name, name_prefix,\n \"Activations/\" + name_suffix)\n summ_exists = summary_exists(summ_name)\n if summ_exists is not None:\n tf.add_to_collection(collection_key, summ_exists)\n else:\n get_summary(\"histogram\", summ_name, ao, collection_key)\n\n summ_name = format_scope_name(ao_name, name_prefix,\n \"Sparsity/\" + name_suffix)\n summ_exists = summary_exists(summ_name)\n if summ_exists is not None:\n tf.add_to_collection(collection_key, summ_exists)\n summ.append(summ_exists)\n else:\n summ.append(get_summary(\"scalar\", summ_name,\n tf.nn.zero_fraction(ao), collection_key))\n return summ", "title": "" }, { "docid": "bd0d0fd0ccb600b4b93037fc90e0cfc9", "score": "0.55986524", "text": "def add_trainable_vars_summary(variables, name_prefix=\"\", name_suffix=\"\",\n collection_key=None):\n\n # Add histograms for trainable variables.\n summ = []\n for var in variables:\n summ_name = format_scope_name(var.op.name, name_prefix, name_suffix)\n summ_exists = summary_exists(summ_name)\n if summ_exists is not None:\n tf.add_to_collection(collection_key, summ_exists)\n summ.append(summ_exists)\n else:\n summ.append(get_summary(\"histogram\", summ_name, var, collection_key))\n return summ", "title": "" }, { "docid": "505b2d135631c09bc249452ba7a0c5cd", "score": "0.55960125", "text": "def __variable_summaries(var):\r\n with tf.name_scope('summaries'):\r\n mean = tf.reduce_mean(var)\r\n tf.summary.scalar('mean', mean)\r\n with tf.name_scope('stddev'):\r\n stddev = tf.sqrt(tf.reduce_mean(tf.square(var - mean)))\r\n tf.summary.scalar('stddev', stddev)\r\n tf.summary.scalar('max', tf.reduce_max(var))\r\n tf.summary.scalar('min', tf.reduce_min(var))\r\n tf.summary.histogram('histogram', var)", "title": "" }, { "docid": "eecde7b72ddea73fbe1d5cba4bd94bf2", "score": "0.55836385", "text": "def variable_summaries(var):\n with tf.name_scope('summaries'):\n mean = tf.reduce_mean(var)\n tf.summary.scalar('mean', mean)\n with tf.name_scope('stddev'):\n stddev = tf.sqrt(tf.reduce_mean(tf.square(var - mean)))\n tf.summary.scalar('stddev', stddev)\n tf.summary.scalar('max', tf.reduce_max(var))\n tf.summary.scalar('min', tf.reduce_min(var))\n tf.summary.histogram('histogram', var)", "title": "" }, { "docid": "c933973e6d1fff4aec2eada2ccf40969", "score": "0.55834097", "text": "def variable_summaries(var,name):\n with tf.name_scope(name+'/summaries'):\n mean = tf.reduce_mean(var)\n tf.summary.scalar('mean', mean)\n with tf.name_scope('stddev'):\n stddev = tf.sqrt(tf.reduce_mean(tf.square(var - mean)))\n tf.summary.scalar('stddev', stddev)\n tf.summary.scalar('max', tf.reduce_max(var))\n tf.summary.scalar('min', tf.reduce_min(var))\n tf.summary.histogram('histogram', var)", "title": "" }, { "docid": "1327616d409e7527a3f11dc388a7db9a", "score": "0.5576775", "text": "def variable_summaries(var, label):\n with tf.name_scope('summaries'):\n mean = tf.reduce_mean(var)\n tf.summary.scalar('mean-' + label, mean)\n tf.summary.tensor_summary(label,var)\n with tf.name_scope('stddev-' + label):\n stddev = tf.sqrt(tf.reduce_mean(tf.square(var - mean)))\n tf.summary.scalar('stddev-' + label, stddev)\n tf.summary.scalar('max-' + label, tf.reduce_max(var))\n tf.summary.scalar('min-' + label, tf.reduce_min(var))\n tf.summary.histogram('histogram-' + label , var)", "title": "" }, { "docid": "0acd1162093ae11ee1b1f3c05e432f30", "score": "0.55684334", "text": "def add_batch_stats(\n self, n_batches, preds, targets, scores, loss, m_input, **context\n ):\n super().add_batch_stats(\n n_batches, preds, targets, scores, loss, m_input, **context\n )\n self.aggregate_topkpreds(scores, context)", "title": "" }, { "docid": "08c47e0196485fccbd6a68541ab51ec6", "score": "0.55627775", "text": "def _add_loss_summaries(self, total_loss):\n # Compute the moving average of all individual losses and the total\n # loss.\n loss_averages = tf.train.ExponentialMovingAverage(0.9, name='avg')\n losses = tf.get_collection('losses')\n loss_averages_op = loss_averages.apply(losses + [total_loss])\n\n # Attach a scalar summary to all individual losses and the total loss; do the\n # same for the averaged version of the losses.\n for l in losses + [total_loss]:\n # Name each loss as '(raw)' and name the moving average version of the loss\n # as the original loss name.\n tf.scalar_summary(l.op.name + ' (raw)', l)\n tf.scalar_summary(l.op.name, loss_averages.average(l))\n\n return loss_averages_op", "title": "" }, { "docid": "e01f61b45963be913d78865cf2c6da25", "score": "0.5554702", "text": "def _define_saver_summary(self, summary = True):\n if (self.logdir_train == None) or (self.logdir_test == None):\n raise ValueError('Train/Test directory not assigned')\n else:\n with tf.device(self.cpu): \n self.saver = tf.train.Saver()\n if summary:\n with tf.device(self.cpu):## TODO\n self.train_summary = tf.summary.FileWriter(self.logdir_train, tf.get_default_graph())\n self.test_summary = tf.summary.FileWriter(self.logdir_test)\n # self.weight_summary = tf.summary.FileWriter(self.logdir_train, tf.get_default_graph())", "title": "" }, { "docid": "ff5166b9de30923bac26cc8a30789875", "score": "0.5553847", "text": "def _activation_summary(x):\n\n tensor_name = re.sub('%s_[0-9]*/' % TOWER_NAME, '', x.op.name)\n tf.summary.histogram(tensor_name + '/activations', x)\n tf.summary.scalar(tensor_name + '/sparsity', tf.nn.zero_fraction(x))", "title": "" }, { "docid": "1c5850e29107c463319dde44d1fcfeac", "score": "0.55536085", "text": "def variable_summaries(self, var):\n with tf.name_scope('summaries'):\n mean = tf.reduce_mean(var)\n tf.summary.scalar('mean', mean)\n with tf.name_scope('stddev'):\n stddev = tf.sqrt(tf.reduce_mean(tf.square(var - mean)))\n tf.summary.scalar('stddev', stddev)\n tf.summary.scalar('max', tf.reduce_max(var))\n tf.summary.scalar('min', tf.reduce_min(var))\n tf.summary.histogram('histogram', var)", "title": "" }, { "docid": "4050536ee128df3e8f94dddc8f5883e7", "score": "0.5543027", "text": "def variable_summaries(var):\n with tf.variable_scope('summaries'):\n mean = tf.reduce_mean(var)\n tf.summary.scalar('mean', mean)\n with tf.variable_scope('stddev'):\n stddev = tf.sqrt(tf.reduce_mean(tf.square(var - mean)))\n tf.summary.scalar('stddev', stddev)\n tf.summary.scalar('max', tf.reduce_max(var))\n tf.summary.scalar('min', tf.reduce_min(var))\n tf.summary.histogram('histogram', var)", "title": "" }, { "docid": "a0de69230c1e55596bfb5df7ff6ab120", "score": "0.5539859", "text": "def variable_summaries(self, var, name):\n with tf.name_scope('summaries'):\n mean = tf.reduce_mean(var)\n tf.summary.scalar('mean/' + name, mean)\n with tf.name_scope('stddev'):\n stddev = tf.sqrt(tf.reduce_sum(tf.square(var - mean)))\n tf.summary.scalar('sttdev/' + name, stddev)\n tf.summary.scalar('max/' + name, tf.reduce_max(var))\n tf.summary.scalar('min/' + name, tf.reduce_min(var))\n tf.summary.histogram(name, var)", "title": "" }, { "docid": "3d5632b1974488e0d1d8b097bbcff5ea", "score": "0.55392635", "text": "def variable_summaries(var):\n with tf.name_scope(\"summaries\"):\n mean = tf.reduce_mean(var)\n tf.summary.scalar(\"mean\", mean)\n\n with tf.name_scope(\"stddev\"):\n stddev = tf.sqrt(tf.reduce_mean(tf.square(var - mean)))\n\n tf.summary.scalar(\"stddev\", stddev)\n tf.summary.scalar(\"max\", tf.reduce_mean(var))\n tf.summary.scalar(\"min\", tf.reduce_min(var))\n tf.summary.histogram(\"histogram\", var)", "title": "" }, { "docid": "dba60b4a2e49e77e1446d1f592fb49f1", "score": "0.55379325", "text": "def scalar(name, tensor, is_tpu=True):\n logging.info('Adding scale summary {}'.format(Pair(name, tensor)))\n if is_tpu:\n tf.add_to_collection('scalar_summaries', Pair(name, tf.reduce_mean(tensor)))\n else:\n tf.summary.scalar(name, tf.reduce_mean(tensor))", "title": "" }, { "docid": "e92f60709a02052a0bd43071b1d6eba6", "score": "0.5537742", "text": "def _variable_summaries(self, var, name):\n with tf.name_scope(name):\n mean = tf.reduce_mean(var)\n tf.summary.scalar('mean', mean)\n with tf.name_scope('stddev'):\n stddev = tf.sqrt(tf.reduce_mean(tf.square(var - mean)))\n tf.summary.scalar('stddev', stddev)\n tf.summary.scalar('max', tf.reduce_max(var))\n tf.summary.scalar('min', tf.reduce_min(var))\n tf.summary.histogram('histogram', var)", "title": "" }, { "docid": "57cc9ec1fd73894dc973c4e92ef77ed7", "score": "0.5535716", "text": "def variable_summaries(var):\n with tf.name_scope('summaries'):\n mean = tf.reduce_mean(var)\n tf.summary.scalar('mean', mean)\n with tf.name_scope('stddev'):\n stddev = tf.sqrt(tf.reduce_mean(tf.square(var - mean)))\n tf.summary.scalar('stddev', stddev)\n tf.summary.scalar('max', tf.reduce_max(var))\n tf.summary.scalar('min', tf.reduce_min(var))\n tf.summary.histogram('histogram', var)", "title": "" }, { "docid": "57cc9ec1fd73894dc973c4e92ef77ed7", "score": "0.5535716", "text": "def variable_summaries(var):\n with tf.name_scope('summaries'):\n mean = tf.reduce_mean(var)\n tf.summary.scalar('mean', mean)\n with tf.name_scope('stddev'):\n stddev = tf.sqrt(tf.reduce_mean(tf.square(var - mean)))\n tf.summary.scalar('stddev', stddev)\n tf.summary.scalar('max', tf.reduce_max(var))\n tf.summary.scalar('min', tf.reduce_min(var))\n tf.summary.histogram('histogram', var)", "title": "" }, { "docid": "57cc9ec1fd73894dc973c4e92ef77ed7", "score": "0.5535716", "text": "def variable_summaries(var):\n with tf.name_scope('summaries'):\n mean = tf.reduce_mean(var)\n tf.summary.scalar('mean', mean)\n with tf.name_scope('stddev'):\n stddev = tf.sqrt(tf.reduce_mean(tf.square(var - mean)))\n tf.summary.scalar('stddev', stddev)\n tf.summary.scalar('max', tf.reduce_max(var))\n tf.summary.scalar('min', tf.reduce_min(var))\n tf.summary.histogram('histogram', var)", "title": "" }, { "docid": "57cc9ec1fd73894dc973c4e92ef77ed7", "score": "0.5535716", "text": "def variable_summaries(var):\n with tf.name_scope('summaries'):\n mean = tf.reduce_mean(var)\n tf.summary.scalar('mean', mean)\n with tf.name_scope('stddev'):\n stddev = tf.sqrt(tf.reduce_mean(tf.square(var - mean)))\n tf.summary.scalar('stddev', stddev)\n tf.summary.scalar('max', tf.reduce_max(var))\n tf.summary.scalar('min', tf.reduce_min(var))\n tf.summary.histogram('histogram', var)", "title": "" }, { "docid": "57cc9ec1fd73894dc973c4e92ef77ed7", "score": "0.5535716", "text": "def variable_summaries(var):\n with tf.name_scope('summaries'):\n mean = tf.reduce_mean(var)\n tf.summary.scalar('mean', mean)\n with tf.name_scope('stddev'):\n stddev = tf.sqrt(tf.reduce_mean(tf.square(var - mean)))\n tf.summary.scalar('stddev', stddev)\n tf.summary.scalar('max', tf.reduce_max(var))\n tf.summary.scalar('min', tf.reduce_min(var))\n tf.summary.histogram('histogram', var)", "title": "" }, { "docid": "57cc9ec1fd73894dc973c4e92ef77ed7", "score": "0.5535716", "text": "def variable_summaries(var):\n with tf.name_scope('summaries'):\n mean = tf.reduce_mean(var)\n tf.summary.scalar('mean', mean)\n with tf.name_scope('stddev'):\n stddev = tf.sqrt(tf.reduce_mean(tf.square(var - mean)))\n tf.summary.scalar('stddev', stddev)\n tf.summary.scalar('max', tf.reduce_max(var))\n tf.summary.scalar('min', tf.reduce_min(var))\n tf.summary.histogram('histogram', var)", "title": "" }, { "docid": "918b09cda976ec34704c8b87df341f0d", "score": "0.55356544", "text": "def variable_summaries(self, var):\n with tf.name_scope('summaries'):\n mean = tf.reduce_mean(var)\n tf.summary.scalar('mean', mean)\n with tf.name_scope('stddev'):\n stddev = tf.sqrt(tf.reduce_mean(tf.square(var - mean)))\n tf.summary.scalar('stddev', stddev)\n tf.summary.scalar('max', tf.reduce_max(var))\n tf.summary.scalar('min', tf.reduce_min(var))\n tf.summary.histogram('histogram', var)", "title": "" }, { "docid": "270f635700815563b61319105181ce42", "score": "0.5530175", "text": "def variable_summaries(var, name):\n with tf.name_scope('summaries'):\n mean = tf.reduce_mean(var)\n tf.summary.scalar('mean/' + name, mean)\n with tf.name_scope('stddev'):\n stddev = tf.sqrt(tf.reduce_mean(tf.square(var - mean)))\n tf.summary.scalar('stddev/' + name, stddev)\n tf.summary.scalar('max/' + name, tf.reduce_max(var))\n tf.summary.scalar('min/' + name, tf.reduce_min(var))\n tf.summary.histogram(name, var)\n pass", "title": "" }, { "docid": "79bee9ea6d275b5d853cb553a5e6ad68", "score": "0.552782", "text": "def _log_tf_eager_mode(params, metrics, full_experiment_dir):\n with summary_file_writer(full_experiment_dir).as_default():\n hp.hparams(params)\n for metric_name, metric_value in metrics.items():\n summary_scalar(metric_name, metric_value.value, step=1)", "title": "" }, { "docid": "58c3c8e32993fd56217c82cc6c018fc0", "score": "0.55274945", "text": "def variable_summaries(var, name):\n with tf.name_scope('summaries'):\n mean = tf.reduce_mean(var)\n tf.scalar_summary('mean/' + name, mean)\n with tf.name_scope('stddev'):\n stddev = tf.sqrt(tf.reduce_mean(tf.square(var - mean)))\n tf.scalar_summary('stddev/' + name, stddev)\n tf.scalar_summary('max/' + name, tf.reduce_max(var))\n tf.scalar_summary('min/' + name, tf.reduce_min(var))\n tf.histogram_summary(name, var)", "title": "" } ]
e12b204058941f7972fcc012f64c5c74
calibrate load z calibration data from file and return lookup table
[ { "docid": "b05059abdc9e9a381f41838516343bd6", "score": "0.5337697", "text": "def z_calibration(self, SaveAs=False):\n z = np.genfromtxt(calibration_data, names=True, skiprows=1)\n\n #fit = exp2d.Exp2D()\n p, best, err = fit().fit(np.array(z['mm']), np.array(z['value']), y0=0)\n fdata = fit().fitdata(np.arange(15,200), best, interpolate=True)\n #print fit.fitfunc(best, 50.3)\n\n if SaveAs:\n fig = plt.figure(1, (12,10))\n fig.clf()\n ax = fig.add_subplot(111)\n ax.scatter(z['mm'], z['value'], label='samples')\n ax.plot(fdata[0], fdata[1], 'y-', label='fit')\n ax.set_xlabel('height [nm]')\n ax.set_ylabel('intensity [a.u.]')\n plt.savefig(SaveAs)\n return lambda x: fit.fitfunc(best, x)", "title": "" } ]
[ { "docid": "7374324898ccb78687f58e814c21b167", "score": "0.6299177", "text": "def load_calib_data(self, filename):\n\n if os.path.exists(filename):\n data = pickle_load(filename)\n\n for camera in stereo.cameras:\n camera.cam_mat = data['cameras'][camera.id]['cam_mat']\n camera.cam_mat = data['cameras'][camera.id]['dist_coeff']\n\n stereo.rot_mat = data['rot_mat']\n stereo.trans_vec = data['trans_vec']\n stereo.rect_mat = data['rect_mat']\n stereo.proj_mat = data['proj_mat']\n else:\n print('Calibration file not found. Recalibrate all.')\n\n return", "title": "" }, { "docid": "d68af1ac99ea095638aca95124a942e0", "score": "0.6084279", "text": "def load_and_validate_calibration(self):\n try:\n data = numpy.loadtxt(self.RAW_DATA_PATH, delimiter=',')\n mag_x = data[:,0]\n mag_y = data[:,1]\n mag_z = data[:,2]\n acc_x = data[:,3]\n acc_y = data[:,4]\n acc_z = data[:,5]\n\n self.cal['mag_offsets'], self.cal['mag_transform'] = \\\n self.ellipsoid_fit(mag_x, mag_y, mag_z)\n self.cal['acc_offsets'], self.cal['acc_transform'] = \\\n self.ellipsoid_fit(acc_x, acc_y, acc_z)\n self.cal['misalignment'] = [[1., 0, 0],\n [0, 1., 0],\n [0, 0, 1.]]\n self.cal['gyro_bias'] = [[0], [0], [0]]\n self.cal['gyro_scale'] = [[1., 0, 0],\n [0, 1., 0],\n [0, 0, 1.]]\n\n rospy.loginfo(\n self.rover + ': IMU raw data file loaded from ' +\n self.RAW_DATA_PATH\n )\n except IOError as e:\n msg = (self.rover +\n ': FATAL ERROR. Extended calibration file not found.')\n rospy.logfatal(msg)\n self.diags_log.publish('<font color=Red>' + msg + '</font>')\n raise\n except ValueError as e:\n msg = (self.rover +\n ': FATAL ERROR. Error reading extended calibration file.')\n rospy.logfatal(msg)\n self.diags_log.publish('<font color=Red>' + msg + '</font>')\n raise\n\n # Calibration matrices are stored as lists and converted to numpy\n # arrays when needed.\n self.acc_offsets = self.cal['acc_offsets']\n self.acc_transform = self.cal['acc_transform']\n self.mag_offsets = self.cal['mag_offsets']\n self.mag_transform = self.cal['mag_transform']\n self.misalignment = self.cal['misalignment']\n self.gyro_bias = self.cal['gyro_bias']\n self.gyro_scale = self.cal['gyro_scale']\n\n # Check variance in errors\n mag_var_err = self.error(mag_x, mag_y, mag_z,\n self.mag_offsets, self.mag_transform)\n acc_var_err = self.error(acc_x, acc_y, acc_z,\n self.acc_offsets, self.acc_transform)\n\n mag_msg = '{}: Magnetometer v[Err]: {:7.6f}'.format(self.rover,\n mag_var_err)\n acc_msg = '{}: Accelerometer v[Err]: {:7.6f}'.format(self.rover,\n acc_var_err)\n self.diags_log.publish(mag_msg)\n rospy.loginfo(mag_msg)\n self.diags_log.publish(acc_msg)\n rospy.loginfo(acc_msg)\n\n if (math.isnan(mag_var_err) or\n abs(mag_var_err) >= IMU.MAG_VAR_TOLERANCE):\n msg = \"{}: The magnetometer fit is too poor to use.\".format(\n self.rover\n )\n rospy.logwarn(msg)\n self.diags_log.publish('<font color=Red>' + msg + '</font>')\n self.needs_calibration = True\n self._set_mode(IMU.MODE_2D)\n\n if (math.isnan(acc_var_err) or\n abs(acc_var_err) >= IMU.ACC_VAR_TOLERANCE):\n msg = \"{}: The accelerometer fit is too poor to use.\".format(\n self.rover\n )\n rospy.logwarn(msg)\n self.diags_log.publish('<font color=Red>' + msg + '</font>')\n self.needs_calibration = True\n self._set_mode(IMU.MODE_2D)\n\n # Check roll and pitch\n self.current_state = IMU.STATE_VALIDATE\n try:\n rospy.wait_for_message(\n self.rover + '/imu/raw',\n SwarmieIMU,\n timeout=5\n )\n except rospy.ROSException:\n # hopefully this doesn't happen\n pass\n\n # wait for 2 seconds for messages to come in and populate\n # self.rolls and self.pitches\n rospy.sleep(2)\n\n avg_roll = numpy.average(self.rolls) * 180 / math.pi\n avg_pitch = numpy.average(self.pitches) * 180 / math.pi\n\n self.diags_log.publish('{}: Average roll: {:6.3f} deg'.format(\n self.rover,\n avg_roll)\n )\n self.diags_log.publish('{}: Average pitch: {:6.3f} deg'.format(\n self.rover,\n avg_pitch)\n )\n\n if abs(avg_roll) > IMU.ROLL_PITCH_TOLERANCE:\n msg = '{}: Roll exceeds tolerance threshold of {:.1f} deg.'.format(\n self.rover,\n IMU.ROLL_PITCH_TOLERANCE\n )\n rospy.logwarn(msg)\n self.diags_log.publish('<font color=Red>' + msg + '</font>')\n self.needs_calibration = True\n self._set_mode(IMU.MODE_2D)\n\n if abs(avg_pitch) > IMU.ROLL_PITCH_TOLERANCE:\n msg = '{}: Pitch exceeds tolerance threshold of {:.1f} deg.'.format(\n self.rover,\n IMU.ROLL_PITCH_TOLERANCE\n )\n rospy.logwarn(msg)\n self.diags_log.publish('<font color=Red>' + msg + '</font>')\n self.needs_calibration = True\n self._set_mode(IMU.MODE_2D)\n\n self.finished_validating = True\n self.store_calibration(EmptyRequest())\n self.current_state = IMU.STATE_NORMAL", "title": "" }, { "docid": "84511f5600fc927a652854b8965841fb", "score": "0.59658164", "text": "def load_smartlab(fpath):\n RAS_HEADER_START = \"*RAS_HEADER_START\"\n RAS_HEADER_END = \"*RAS_HEADER_END\"\n RAS_INT_START = \"*RAS_INT_START\"\n RAS_INT_END = \"*RAS_INT_END\"\n HEADER_SPLIT = \"\\\"\"\n DATA_SPLIT = \" \"\n \n data = {}\n internal = {}\n data['name'] = os.path.basename(fpath)[:-4] #remove .ras\n data['header'] = dict()\n data['counts'] = []\n internal['MEAS_COND_AXIS_NAME'] = dict() #Array\n internal['MEAS_COND_AXIS_NAME_INTERNAL'] = dict() #Array\n internal['MEAS_COND_AXIS_OFFSET'] = dict() #Array\n internal['MEAS_COND_AXIS_POSITION'] = dict() #Array\n internal['MEAS_COND_AXIS_UNIT'] = dict() #Once\n \n data['scanaxis'] = \"\"\n internal['scan_axis_internal'] = \"\"\n data['angles'] = []\n data['numscans'] = 0\n internal['points_per_scan'] = 0\n\n with open(fpath, encoding=\"Latin-1\", mode=\"r\") as f:\n scan_start = False\n scan_end = False\n header_start = False\n scan_data = []\n scan_angle = []\n header_initialized = False\n scan_is_3d = False\n\n for line in f:\n if line.strip():\n line = line.strip()\n # print(\"Scan start: \", scan_start)\n if line.startswith(RAS_HEADER_START):\n header_start = True\n # print(line)\n continue\n if line.startswith(RAS_HEADER_END):\n header_start = False\n header_initialized = True\n # print(line)\n continue\n if line.startswith(RAS_INT_START):\n scan_start = True\n continue\n if line.startswith(RAS_INT_END):\n scan_start = False\n pad_points = internal['points_per_scan'] - len(scan_data)\n if pad_points > 0:\n print(\"Data not complete. Number of data point missing for this scan: \", pad_points)\n pad_data = [0]*pad_points\n scan_data.extend(pad_data)\n data['angles'] = scan_angle\n data['counts'].append(scan_data)\n data['numscans'] +=1\n scan_data = []\n scan_angle= []\n # continue\n \n if scan_start:\n ls = line.split(DATA_SPLIT)\n # print(ls)\n elif header_start:\n ls = line.split(HEADER_SPLIT)\n else:\n continue\n \n \n if header_start:\n key = ls[0][1:].strip()\n val = ls[1].strip()\n if not header_initialized: #If the header is read for the first time, we need to fill different metadata information (basically all)\n data['header'][key] = val #We collect all metadata in the header - done only Once.\n if \"MEAS_COND_AXIS_NAME-\" in key:\n tmp = key.split(\"-\")\n order = int(tmp[1].strip())\n internal['MEAS_COND_AXIS_NAME'][order] = val\n if \"MEAS_COND_AXIS_NAME_INTERNAL-\" in key:\n tmp = key.split(\"-\")\n order = int(tmp[1].strip())\n internal['MEAS_COND_AXIS_NAME_INTERNAL'][order] = val\n if \"MEAS_COND_AXIS_OFFSET-\" in key:\n tmp = key.split(\"-\")\n order = int(tmp[1].strip())\n try:\n val = float(val)\n except:\n val = 0\n internal['MEAS_COND_AXIS_OFFSET'][order] = val\n if \"MEAS_COND_AXIS_POSITION-\" in key:\n tmp = key.split(\"-\")\n order = int(tmp[1].strip())\n try:\n val = float(val)\n internal['MEAS_COND_AXIS_POSITION'][order] = [val]\n except:\n internal['MEAS_COND_AXIS_POSITION'][order] = val\n if \"MEAS_COND_AXIS_UNIT-\" in key:\n tmp = key.split(\"-\")\n order = int(tmp[1].strip())\n internal['MEAS_COND_AXIS_UNIT'][order] = val\n if \"MEAS_DATA_COUNT\" in key:\n internal['points_per_scan'] = int(float(val))\n if key == \"MEAS_SCAN_AXIS_X\":\n data['scanaxis'] = val\n if key == \"MEAS_SCAN_AXIS_X_INTERNAL\":\n internal['scan_axis_internal'] = val\n if key == \"MEAS_SCAN_START\":\n internal['scan_angle_start'] = float(val)\n if key == \"MEAS_SCAN_STEP\":\n internal['scan_angle_step'] = float(val)\n if key == \"MEAS_SCAN_STOP\":\n internal['scan_angle_stop'] = float(val)\n if key == \"MEAS_SCAN_START_TIME\":\n data['date'], data['time'] = val.split(' ')\n if key == \"MEAS_SCAN_MODE\":\n data['scanmode'] = val\n if key == \"MEAS_SCAN_SPEED\":\n data['scanspeed'] = float(val)\n if key == \"MEAS_3DE_STEP_AXIS_INTERNAL\":\n scan_is_3d = True\n internal['MEAS_3DE_STEP_AXIS_INTERNAL'] = val.strip()\n\n \n else: #Header already initialized, we add new position to the axis, if they are number and not string.\n if \"MEAS_COND_AXIS_POSITION-\" in key:\n tmp = key.split(\"-\")\n order = int(tmp[1].strip())\n try:\n val = float(val)\n internal['MEAS_COND_AXIS_POSITION'][order].append(val)\n except:\n continue\n \n if scan_start:\n a = float(ls[0].strip())\n v = float(ls[1].strip())\n scan_angle.append(a)\n scan_data.append(v)\n # print(\"Angle {:.2f} Intensity: {:.2f}\".format(a,v))\n \n data['counts'] = np.asarray(data['counts'])\n if data['numscans'] == 1:\n data['counts'] = data['counts'][0]\n # data['angles'] = np.linspace(internal['scan_angle_start'], internal['scan_angle_stop'], internal['points_per_scan'])\n\n if scan_is_3d:\n for k, v in internal['MEAS_COND_AXIS_NAME'].items():\n if v == internal['MEAS_3DE_STEP_AXIS_INTERNAL']:\n axis2_idx = k\n data['angles2'] = internal['MEAS_COND_AXIS_POSITION'][axis2_idx]\n\n\n return data", "title": "" }, { "docid": "830dcd84c4e8e81ba8b67803a9b215a9", "score": "0.59506077", "text": "def load_calib_file(self):\n\n try:\n\n with h5py.File(self.calib_file_path, 'r') as hf:\n self.n_harmonics = hf[\"/\"].attrs['n_harm']\n fan = hf.get('fan')\n self.fan_array = np.array(fan)\n fbn = hf.get('fbn')\n self.fbn_array = np.array(fbn)\n\n except:\n self.log.error(f\"Problem in reading time from calibration file {self.calib_file_path}\")", "title": "" }, { "docid": "ee8d334801bd2519b36b7a21a1afb6df", "score": "0.5824416", "text": "def read_calib(self, calib_fp, device):\n data = {}\n\n with open(calib_fp, \"r\") as f:\n for line in f.readlines():\n key, value = line.split(\":\", 1)\n # The only non-float values in these files are dates, which\n # we don't care about anyway\n try:\n data[key] = np.array([float(x) for x in value.split()])\n except ValueError:\n pass\n\n # indices 0, 1, 2, 3 = left-gray, right-gray, left-rgb, right-rgb\n # note: left-rgb, right-rgb have same width, height, fx, fy, cx, cy\n\n # width = data['S_rect_02'][0]\n # height = data['S_rect_02'][1]\n sx = self.width / data[\"S_rect_02\"][0]\n sy = self.height / data[\"S_rect_02\"][1]\n\n fx = data[\"P_rect_02\"][0] * sx\n fy = data[\"P_rect_02\"][5] * sy\n\n cx = data[\"P_rect_02\"][2] * sx\n cy = data[\"P_rect_02\"][6] * sy\n\n # 3D-2D Projection:\n # u = (fx*x + cx * z) / z\n # v = (fy*y + cy * y) / z\n # shift on plane: delta_x = (fx * bx) / z\n # delta_y = (fy * by) / z\n # uv = (P * xyz) / z\n # P = [ fx 0 cx]\n # [ 0 fy cy]\n projection_matrix = np.array([[fx, 0.0, cx], [0.0, fy, cy]], dtype=np.float32)\n\n # 2D-3D Re-Projection:\n # x = (u/fx - cx/fx) * z\n # y = (v/fy - cy/fy) * z\n # z = z\n # xyz = (RP * uv1) * z\n # RP = [ 1/fx 0 -cx/fx ]\n # [ 0 1/fy -cy/fy ]\n # [ 0 0 1 ]\n reprojection_matrix = np.array(\n [[1 / fx, 0.0, -cx / fx], [0.0, 1 / fy, -cy / fy], [0.0, 0.0, 1.0]],\n dtype=np.float32,\n )\n\n projection_matrix = torch.from_numpy(projection_matrix).to(device)\n reprojection_matrix = torch.from_numpy(reprojection_matrix).to(device)\n\n return projection_matrix, reprojection_matrix", "title": "" }, { "docid": "4688fc8ee93520695c7fa0baddfa9444", "score": "0.5778891", "text": "def apply_caltable_uvfits(gaincaltable, datastruct, filename_out,cal_amp=False):\n\n if datastruct.dtype != \"EHTIM\":\n raise Exception(\"datastruct must be in EHTIM format in apply_caltable_uvfits!\")\n\n gains0 = pd.read_csv(gaincaltable)\n polygain={}\n mjd_start={}\n polyamp={}\n\n #deterimine which calibration to use when multiple options for multiple periods\n mjd_mean = datastruct.data['time'].mean() - MJD_0\n gains = gains0[(gains0.mjd_start<=mjd_mean)&(gains0.mjd_stop>=mjd_mean)].reset_index(drop=True).copy()\n\n for cou, row in gains.iterrows():\n polygain[row.station] = poly_from_str(str(row.ratio_phas))\n #if mjd0 provided, use it as mjd time reference offset, otherwise use mjd_start\n try: mjd_start[row.station] = row.mjd0\n except AttributeError: mjd_start[row.station] = row.mjd_start\n if cal_amp==True:\n polyamp[row.station] = poly_from_str(str(row.ratio_amp))\n else:\n polyamp[row.station] = poly_from_str('1.0')\n\n #print(gains0)\n #print(polygain)\n # interpolate the calibration table\n rinterp = {}\n linterp = {}\n skipsites = []\n\n #-------------------------------------------\n # sort by baseline\n data = datastruct.data\n idx = np.lexsort((data['t2'], data['t1']))\n bllist = []\n for key, group in it.groupby(data[idx], lambda x: set((x['t1'], x['t2'])) ):\n bllist.append(np.array([obs for obs in group]))\n bllist = np.array(bllist)\n\n # apply the calibration\n\n datatable = []\n coub=0\n for bl_obs in bllist:\n t1 = bl_obs['t1'][0]\n t2 = bl_obs['t2'][0]\n coub=coub+1\n print('Calibrating {}-{} baseline, {}/{}'.format(t1,t2,coub,len(bllist)))\n time_mjd = bl_obs['time'] - MJD_0 #dates are in mjd in Datastruct\n\n\n###########################################################################################################################\n#OLD VERSION WHERE LCP IS SHIFTED TO RCP\n# if t1 in skipsites:\n# rscale1 = lscale1 = np.array(1.)\n# else:\n# try:\n# rscale1 = 1./np.sqrt(polyamp[t1](time_mjd))\n# lscale1 = np.sqrt(polyamp[t1](time_mjd))*np.exp(1j*polygain[t1](time_mjd - mjd_start[t1])*np.pi/180.)\n# except KeyError:\n# rscale1 = lscale1 = np.array(1.)\n#\n# if t2 in skipsites:\n# rscale2 = lscale2 = np.array(1.)\n# else:\n# try:\n# rscale2 = 1./np.sqrt(polyamp[t2](time_mjd))\n# lscale2 = np.sqrt(polyamp[t2](time_mjd))*np.exp(1j*polygain[t2](time_mjd - mjd_start[t2])*np.pi/180.)\n# except KeyError:\n# rscale2 = lscale2 = np.array(1.) \n###########################################################################################################################\n\n###########################################################################################################################\n#NEW VERSION WHERE RCP IS SHIFTED TO LCP // MW 2018/NOV/13\n if t1 in skipsites:\n rscale1 = lscale1 = np.array(1.)\n else:\n try:\n rscale1 = 1./np.sqrt(polyamp[t1](time_mjd))*np.exp(-1j*polygain[t1](time_mjd - mjd_start[t1])*np.pi/180.)\n lscale1 = np.sqrt(polyamp[t1](time_mjd))\n except KeyError:\n rscale1 = lscale1 = np.array(1.)\n\n if t2 in skipsites:\n rscale2 = lscale2 = np.array(1.)\n else:\n try:\n rscale2 = 1./np.sqrt(polyamp[t2](time_mjd))*np.exp(-1j*polygain[t2](time_mjd - mjd_start[t2])*np.pi/180.)\n lscale2 = np.sqrt(polyamp[t2](time_mjd))\n except KeyError:\n rscale2 = lscale2 = np.array(1.)\n###########################################################################################################################\n\n\n rrscale = rscale1 * rscale2.conj()\n llscale = lscale1 * lscale2.conj()\n rlscale = rscale1 * lscale2.conj()\n lrscale = lscale1 * rscale2.conj()\n\n bl_obs['rr'] = (bl_obs['rr']) * rrscale\n bl_obs['ll'] = (bl_obs['ll']) * llscale\n bl_obs['rl'] = (bl_obs['rl']) * rlscale\n bl_obs['lr'] = (bl_obs['lr']) * lrscale\n\n bl_obs['rrweight'] = (bl_obs['rrweight']) / (np.abs(rrscale)**2)\n bl_obs['llweight'] = (bl_obs['llweight']) / (np.abs(llscale)**2)\n bl_obs['rlweight'] = (bl_obs['rlweight']) / (np.abs(rlscale)**2)\n bl_obs['lrweight'] = (bl_obs['lrweight']) / (np.abs(lrscale)**2)\n\n if len(datatable):\n datatable = np.hstack((datatable, bl_obs))\n else:\n datatable = bl_obs\n\n # put in uvfits format datastruct\n # telescope arrays\n tarr = datastruct.antenna_info\n tkeys = {tarr[i]['site']: i for i in range(len(tarr))}\n tnames = tarr['site']\n tnums = np.arange(1, len(tarr) + 1)\n xyz = np.array([[tarr[i]['x'],tarr[i]['y'],tarr[i]['z']] for i in np.arange(len(tarr))])\n\n # uvfits format output data table\n bl_list = []\n for i in xrange(len(datatable)):\n entry = datatable[i]\n t1num = entry['t1']\n t2num = entry['t2']\n rl = entry['rl']\n lr = entry['lr']\n if tkeys[entry['t2']] < tkeys[entry['t1']]: # reorder telescopes if necessary\n #print entry['t1'], tkeys[entry['t1']], entry['t2'], tkeys[entry['t2']]\n entry['t1'] = t2num\n entry['t2'] = t1num\n entry['u'] = -entry['u']\n entry['v'] = -entry['v']\n entry['rr'] = np.conj(entry['rr'])\n entry['ll'] = np.conj(entry['ll'])\n entry['rl'] = np.conj(lr)\n entry['lr'] = np.conj(rl)\n datatable[i] = entry\n bl_list.append(np.array((entry['time'],entry['t1'],entry['t2']),dtype=BLTYPE))\n _, unique_idx_anttime, idx_anttime = np.unique(bl_list, return_index=True, return_inverse=True)\n _, unique_idx_freq, idx_freq = np.unique(datatable['freq'], return_index=True, return_inverse=True)\n\n # random group params\n u = datatable['u'][unique_idx_anttime]\n v = datatable['v'][unique_idx_anttime]\n t1num = [tkeys[scope] + 1 for scope in datatable['t1'][unique_idx_anttime]]\n t2num = [tkeys[scope] + 1 for scope in datatable['t2'][unique_idx_anttime]]\n bls = 256*np.array(t1num) + np.array(t2num)\n jds = datatable['time'][unique_idx_anttime]\n tints = datatable['tint'][unique_idx_anttime]\n\n # data table\n nap = len(unique_idx_anttime)\n nsubchan = 1\n nstokes = 4\n nchan = datastruct.obs_info.nchan\n\n outdat = np.zeros((nap, 1, 1, nchan, nsubchan, nstokes, 3))\n outdat[:,:,:,:,:,:,2] = -1.0\n\n vistypes = ['rr','ll','rl','lr']\n for i in xrange(len(datatable)):\n row_freq_idx = idx_freq[i]\n row_dat_idx = idx_anttime[i]\n\n for j in range(len(vistypes)):\n outdat[row_dat_idx,0,0,row_freq_idx,0,j,0] = np.real(datatable[i][vistypes[j]])\n outdat[row_dat_idx,0,0,row_freq_idx,0,j,1] = np.imag(datatable[i][vistypes[j]])\n outdat[row_dat_idx,0,0,row_freq_idx,0,j,2] = datatable[i][vistypes[j]+'weight']\n\n # package data for saving\n obsinfo_out = datastruct.obs_info\n antennainfo_out = Antenna_info(tnames, tnums, xyz)\n uvfitsdata_out = Uvfits_data(u,v,bls,jds, tints, outdat)\n datastruct_out = Datastruct(obsinfo_out, antennainfo_out, uvfitsdata_out)\n\n # save final file\n save_uvfits(datastruct_out, filename_out)\n return", "title": "" }, { "docid": "c48f794bd4a90cb19cb935025207fbda", "score": "0.575878", "text": "def loadCoefficients(path):\n with open(path) as f:\n calibration_data = yaml.load(f)\n #Camera matrix and distortion\n cam_mtx = calibration_data.get(\"camera_matrix\")\n distort_mtx = calibration_data.get(\"dist_coeff\")\n cam_mtx = np.asarray(cam_mtx)\n distort_mtx = np.asarray(distort_mtx)\n return cam_mtx, distort_mtx", "title": "" }, { "docid": "6b650508a6b81ec3c28d1c000e84fa09", "score": "0.5713833", "text": "def tsai_calibration(filename):\n\n\t#Extract calibration object points\n\tcpoint = identify_points(filename)\n\tcamera_points = point_dict_to_points(cpoint)\n\tparams = calibrate(camera_points)\n\n\t#Create camera dict for triangulate function\n\tcamera = {}\n\tcamera['R'] = np.array(params[\"rotationMatrix\"])\n\tt = []\n\tfor v in ['tx','ty','tz']:\n\t\tt.append(params[v])\n\tcamera['t'] = np.array(t)\n\tf = params['f']\n\tcamera['K'] = np.array([\n\t\t\t[f,0,0],\n\t\t\t[0,f,0],\n\t\t\t[0,0,1]\n\t\t])\n\treturn camera", "title": "" }, { "docid": "5efc2ef47b9170b335b427eacef0e63b", "score": "0.5702899", "text": "def final_calibration(file_name, Polarization_number='first', Gain=1, velocity_correction='off' ):\t\n gain = 300 # will calculate it later once\t\n data = fits.open(str(file_name))\n header = data_header(file_name)\n N = header['NSPEC']\t\n\t\n # Average of data, either first or second polarization. \n average_spectra = average_spectrum(data, N, Polarization_number)\n print ('average_spectra', average_spectra)\n #frequency responce of our AVERAGE data spectrum\n freq = frequency_spectrum(header, average_spectra)\n Temperature = (average_spectra - base_fit_average(average_spectra, freq)) * Gain\n print ('Temperature', Temperature)\n doppler_velocity = doppler_velocity_correction(header, freq, velocity_correction)\n return doppler_velocity, Temperature", "title": "" }, { "docid": "ddc1027844f177353f4f477cf77d713a", "score": "0.5671455", "text": "def _read_coefficients(self):\n self._calibration['dig_T1'] = bbspi.read16_LE(self._cs, BME280_REGISTER_DIG_T1)\n self._calibration['dig_T2'] = bbspi.readS16_LE(self._cs, BME280_REGISTER_DIG_T2)\n self._calibration['dig_T3'] = bbspi.readS16_LE(self._cs, BME280_REGISTER_DIG_T3)\n\n self._calibration['dig_P1'] = bbspi.read16_LE(self._cs, BME280_REGISTER_DIG_P1)\n self._calibration['dig_P2'] = bbspi.readS16_LE(self._cs, BME280_REGISTER_DIG_P2)\n self._calibration['dig_P3'] = bbspi.readS16_LE(self._cs, BME280_REGISTER_DIG_P3)\n self._calibration['dig_P4'] = bbspi.readS16_LE(self._cs, BME280_REGISTER_DIG_P4)\n self._calibration['dig_P5'] = bbspi.readS16_LE(self._cs, BME280_REGISTER_DIG_P5)\n self._calibration['dig_P6'] = bbspi.readS16_LE(self._cs, BME280_REGISTER_DIG_P6)\n self._calibration['dig_P7'] = bbspi.readS16_LE(self._cs, BME280_REGISTER_DIG_P7)\n self._calibration['dig_P8'] = bbspi.readS16_LE(self._cs, BME280_REGISTER_DIG_P8)\n self._calibration['dig_P9'] = bbspi.readS16_LE(self._cs, BME280_REGISTER_DIG_P9)\n\n self._calibration['dig_H1'] = bbspi.read8(self._cs, BME280_REGISTER_DIG_H1)\n self._calibration['dig_H2'] = bbspi.readS16_LE(self._cs, BME280_REGISTER_DIG_H2)\n self._calibration['dig_H3'] = bbspi.read8(self._cs, BME280_REGISTER_DIG_H3)\n self._calibration['dig_H4'] = (bbspi.read8(self._cs, BME280_REGISTER_DIG_H4) << 4) | (bbspi.read8(self._cs, BME280_REGISTER_DIG_H4+1) & 0xF)\n self._calibration['dig_H5'] = (bbspi.read8(self._cs, BME280_REGISTER_DIG_H5+1) << 4) | (bbspi.read8(self._cs, BME280_REGISTER_DIG_H5) >> 4)\n self._calibration['dig_H6'] = bbspi.read8(self._cs, BME280_REGISTER_DIG_H6)", "title": "" }, { "docid": "1d7d813f9cbf01daff352f323ffb5e6f", "score": "0.5649281", "text": "def load(self, gz_file):\n\n # Make sure it's a .key.gz file\n if gz_file[-7:] != '.key.gz':\n raise Exception, 'File type must be of type .key.gz'\n\n # Unzip file\n os.system('gunzip ' + gz_file)\n file = gz_file[:-3]\n\n # Open file.key and check its header\n with open(file, 'rt') as fp:\n \n line = fp.readline().split()\n num_desc = int(line[0])\n desc_len = int(line[1])\n\n # Read first line to get sizes\n if num_desc:\n # Keypoint locations\n locs_tmp = np.fromstring(fp.readline(), sep=' ')\n locs = np.zeros((num_desc, len(locs_tmp)))\n locs[0,:] = locs_tmp\n\n # Descriptor\n desc = np.zeros((num_desc, desc_len))\n cur_len = 0\n while cur_len < desc_len:\n desc_tmp = np.fromstring(fp.readline(), sep=' ')\n desc[0, cur_len:cur_len+len(desc_tmp)] = desc_tmp\n cur_len += len(desc_tmp)\n else:\n # Error, we couldn't read any descriptors\n locs = np.zeros((0,))\n desc = np.zeros((0,))\n\n\n # Parse file.key\n for i in range(num_desc):\n # First line is row, col, scale, ori\n vector = np.fromstring(fp.readline(), sep = ' ') \n locs[i, :] = vector\n\n # Following lines contain the descriptor\n cur_len = 0\n while cur_len < desc_len:\n vector = np.fromstring(fp.readline(), sep=' ')\n desc[i, cur_len:cur_len+len(vector)] = vector\n cur_len += len(vector)\n \n # Normalize each input vector to unit length\n desc[i, :] /= np.linalg.norm(desc[i, :])\n\n # Restore gzipped file\n os.system('gzip ' + file)\n\n return locs, desc", "title": "" }, { "docid": "2cd300ffb68432c4692bd716d399c93a", "score": "0.56086564", "text": "def get_calibration_from_file(self, path):\n # read file storage\n cv_file = cv2.FileStorage(path, cv2.FILE_STORAGE_READ)\n # specify the type of data to retrieve otherwise get a FileNode object back instead of a matrix\n self.__camera_matrix = cv_file.getNode(\"camera_matrix\").mat()\n self.__dist_matrix = cv_file.getNode(\"dist_coeff\").mat()\n print(\"Camera Matrix: \", self.__camera_matrix.tolist())\n print(\"Dist Matrix: \", self.__dist_matrix.tolist())\n cv_file.release()", "title": "" }, { "docid": "fcaefc33a9b36f09cf26f0c22844c8af", "score": "0.5606932", "text": "def read_offsets(beamline: dict,\n z: np.arange) -> interpolate.interp1d:\n\n field_files = {}\n F = 0\n offset_correct_x, offset_correct_y = 0, 0\n offset_correct_xp, offset_correct_yp = 0, 0\n\n if not beamline:\n z_data = [i / len(z) for i in range(len(z))]\n F_data = [0 for i in range(len(z))]\n f = interpolate.interp1d(\n z_data, F_data,\n fill_value=(0, 0), bounds_error=False\n )\n F = F + f(z)\n offset_correct_x, offset_correct_y = F, F\n offset_correct_xp, offset_correct_yp = F, F\n else:\n for element in beamline.values():\n if not (element.file_name in field_files):\n field_files[element.file_name] = np.loadtxt(element.file_name)\n M = field_files[element.file_name]\n z_data = M[:, 0] + element.z0\n F_data = M[:, 1]\n\n if not element.length == 0:\n z_data = np.linspace(element.z_start, element.z_stop,\n len(z_data))\n x, xp = element.x, element.xp\n y, yp = element.y, element.yp\n z0 = element.z0\n f_x = interpolate.interp1d(\n z_data,\n [x + (z_data[i] - z0) * xp for i in range(len(z_data))],\n fill_value=(0, 0),\n bounds_error=False\n )\n f_xp = interpolate.interp1d(\n z_data,\n [xp for i in range(len(z_data))],\n fill_value=(0, 0),\n bounds_error=False\n )\n f_y = interpolate.interp1d(\n z_data,\n [y + (z_data[i] - z0) * yp for i in range(len(z_data))],\n fill_value=(0, 0),\n bounds_error=False\n )\n f_yp = interpolate.interp1d(\n z_data,\n [yp for i in range(len(z_data))],\n fill_value=(0, 0),\n bounds_error=False\n )\n else:\n f = interpolate.interp1d(\n z_data,\n 0 * F_data,\n kind='cubic',\n fill_value=(0, 0),\n bounds_error=False\n )\n f_x, f_xp, f_y, f_yp = f, f, f, f\n\n offset_correct_x = offset_correct_x + f_x(z)\n offset_correct_xp = offset_correct_xp + f_xp(z)\n offset_correct_y = offset_correct_y + f_y(z)\n offset_correct_yp = offset_correct_yp + f_yp(z)\n\n offset_correct_x = interpolate.interp1d(\n z,\n offset_correct_x,\n kind='linear',\n fill_value=(0, 0),\n bounds_error=False\n )\n offset_correct_y = interpolate.interp1d(\n z,\n offset_correct_y,\n kind='linear',\n fill_value=(0, 0),\n bounds_error=False\n )\n offset_correct_xp = interpolate.interp1d(\n z,\n offset_correct_xp,\n kind='linear',\n fill_value=(0, 0),\n bounds_error=False\n )\n offset_correct_yp = interpolate.interp1d(\n z,\n offset_correct_yp,\n kind='linear',\n fill_value=(0, 0),\n bounds_error=False\n )\n return (offset_correct_x, offset_correct_xp,\n offset_correct_y, offset_correct_yp)", "title": "" }, { "docid": "c037ad0c647cbaee8fabd438cef1f6c4", "score": "0.5603512", "text": "def getcube(filename, read_idx=[1, None], calibdir='calibrations/20160408/', \n bgsub=True, mask=True, gain=2, noisefac=0, \n R=30,method='optext', refine=False, suppressrn=False, fitshift=False, \n flatfield=True, smoothandmask=True, saveresid=False,\n maxcpus=multiprocessing.cpu_count()):\n \n ################################################################\n # Initiate the header with critical data about the observation.\n # Then add basic information about the calibration data used to\n # extract a cube.\n ################################################################\n \n tstart = time.time()\n header = utr.metadata(filename)\n\n try:\n calhead = fits.open(calibdir + '/cal_params.fits')[0].header\n header.append(('comment', ''), end=True)\n header.append(('comment', '*'*60), end=True)\n header.append(('comment', '*'*21 + ' Calibration Data ' + '*'*21), end=True)\n header.append(('comment', '*'*60), end=True) \n header.append(('comment', ''), end=True)\n for key in calhead:\n header.append((key, calhead[key], calhead.comments[key]), end=True)\n except:\n print('Unable to append calibration parameters to FITS header.') \n\n ################################################################\n # Read in file and return an instance of the Image class with the\n # up-the-ramp combination of reads. Subtract the thermal\n # background and apply a bad pixel mask.\n ################################################################\n \n maskarr = None\n if mask == True:\n maskarr = fits.open(calibdir + '/mask.fits')[0].data \n \n inImage = utr.calcramp(filename=filename, mask=maskarr,read_idx=read_idx, \n header=header, gain=gain, noisefac=noisefac, \n maxcpus=maxcpus,fitnonlin=True,fitexpdecay=True)\n \n\n ################################################################\n # Read in necessary calibration files and extract the data cube.\n # Optionally fit for a position-dependent offset \n ################################################################\n\n header.append(('comment', ''), end=True)\n header.append(('comment', '*'*60), end=True)\n header.append(('comment', '*'*22 + ' Cube Extraction ' + '*'*21), end=True)\n header.append(('comment', '*'*60), end=True) \n header.append(('comment', ''), end=True)\n\n if flatfield:\n lensletflat = fits.open(calibdir + '/lensletflat.fits')[0].data\n else:\n lensletflat = None\n header['flatfld'] = (flatfield, 'Flatfield the detector and lenslet array?')\n\n datacube = None\n\n\n if method == 'optext':\n loc = primitives.PSFLets(load=True, infiledir=calibdir)\n lam_midpts = fits.open(calibdir + '/polychromekeyR%d.fits' % (R))[0].data\n datacube = primitives.optext_spectra(inImage, loc, lam_midpts, header=inImage.header, flat=lensletflat, maxcpus=maxcpus)\n else:\n print('Only optext is not supported for quicklook')\n\n if datacube is None:\n raise ValueError(\"Datacube extraction method \" + method + \" not implemented.\")\n\n\n\n ################################################################\n # Add the original header for reference as the last HDU\n ################################################################\n\n datacube.extrahead = fits.open(filename)[0].header\n print(\"Total time elapsed: %.0f seconds\" % (time.time() - tstart))\n\n return datacube,inImage", "title": "" }, { "docid": "193c51221a4e0470610a140a7cbfb681", "score": "0.5597413", "text": "def parseCalibrationFile(data:str, peak_file:bool=False):\n lines = data.split(\"\\n\")\n i = 0\n out = {}\n while (lines[i].startswith(\"\\t#\")):\n out[lines[i].replace(\"\\t#\",\"\").split(\":\")[0]] = lines[i].split(\":\")[1].strip()\n i += 1\n\n if \"intensity_calibration\" in out.keys():\n if out[\"intensity_calibration\"] == \"\":\n out[\"intensity_calibration\"] = None\n else:\n out[\"intensity_calibration\"] = list(map(lambda x : float(x.strip()), out[\"intensity_calibration\"].split(\",\")))\n\n data = []\n for strip in lines[i:]:\n if strip.strip() == \"\":\n data.append(None)\n else:\n data.append(list(map(lambda x : float(x.strip()) if not peak_file else (float(x.split(\":\")[0].strip()), float(x.split(\":\")[1].strip())), strip.split(\" \"))))\n\n out[\"data\"] = data\n return out", "title": "" }, { "docid": "7a34db26e395a2b9eda50e186df25c3f", "score": "0.5588083", "text": "def cal_load_txtfile(cal, fname):\n data = np.loadtxt(fname)\n cal.xbins = data[0, 1:]\n cal.nbins = len(cal.xbins)\n senids = map(int, data[1:, 0])\n cal.indexes = []\n cal.values = np.array([[0]*cal.nbins, ]*cal.nsensors)\n for i, senid in enumerate(senids):\n index = index_of_sensorid(senid)\n cal.indexes.append(index)\n # print('sensor ID {} index {} i {}'.format(senid,index,i))\n cal.values[index] = data[i+1, 1:]\n print('loaded calibration data from file {}'.format(fname))\n print('number of sensors with data {}'.format(len(cal.indexes)))\n return", "title": "" }, { "docid": "81d42a6a4129882e6c0d68b16e5fa84d", "score": "0.55679667", "text": "def loadCalibration(self, path=None):\n with self.lock:\n self.lensCalibration = dict(\n np.load(path or self.output_file, allow_pickle=True)\n )\n if (\n \"mtx\" in self.lensCalibration.keys()\n and self.lensCalibration[\"mtx\"] is not None\n ):\n self.lensCalibration[\"state\"] = STATE_SUCCESS\n else:\n self.lensCalibration[\"state\"] = STATE_FAIL\n if not \"resolution\" in self.lensCalibration.keys():\n self.lensCalibration[\"resolution\"] = LEGACY_STILL_RES\n self.onChange()", "title": "" }, { "docid": "f1dc255f16125a2badc32cb31579c823", "score": "0.5560603", "text": "def load_calib(filename, debug=False):\n with open(filename) as f_calib:\n lines = f_calib.readlines()\n \n P_rect = [] \n for line in lines:\n title = line.strip().split(' ')[0]\n if len(title):\n if title[0] == \"R\":\n R_rect = np.array(line.strip().split(' ')[1:], dtype=np.float32)\n R_rect = np.reshape(R_rect, (3,3))\n elif title[0] == \"P\":\n p_r = np.array(line.strip().split(' ')[1:], dtype=np.float32)\n p_r = np.reshape(p_r, (3,4))\n P_rect.append(p_r)\n elif title[:-1] == \"Tr_velo_to_cam\":\n Tr = np.array(line.strip().split(' ')[1:], dtype=np.float32)\n Tr = np.reshape(Tr, (3,4))\n Tr = np.vstack([Tr,np.array([0,0,0,1])])\n \n return R_rect, P_rect, Tr", "title": "" }, { "docid": "ab1ffa4cd933eb1afddb9d5ef3cf7b6b", "score": "0.5538729", "text": "def __loadvlsr__(infile, gbtfits):\n\t# Load the GBT NH3 FITS image\n\tdata_gbt, hdr_gbt = __nh3_read__(gbtfits)\n\txaxis, yaxis, vaxis = __nh3_load_axes__(hdr_gbt)\n\n\tfirstguess = pylab.np.zeros([4,hdr_gbt['NAXIS2'],hdr_gbt['NAXIS1']])\n\n\t# Open Zoey's result\n\ttemp = open(infile)\n\ttext = temp.readlines()\n\tfor block in pylab.np.arange(len(text)/14):\n\t\tindices = text[block*14].split()\n\t\txno = pylab.np.int(indices[0])\n\t\tyno = pylab.np.int(indices[1])\n\t\tfirstguess[0,yno,xno] = xaxis[xno]\n\t\tfirstguess[1,yno,xno] = yaxis[yno]\n\t\tvhit1 = pylab.np.round(pylab.np.float(text[block*14+1].split()[2]))\n\t\tvhit2 = pylab.np.round(pylab.np.float(text[block*14+6].split()[2]))\n\t\tif vhit1 > 0:\n\t\t\tfirstguess[2,yno,xno] = vaxis[vhit1]\n\t\tif vhit2 > 0:\n\t\t\tfirstguess[3,yno,xno] = vaxis[vhit2]\n\ttemp.close()\n\tdel temp\n\n\treturn firstguess, xaxis, yaxis", "title": "" }, { "docid": "f03e45fd3a22968dfa584c2f0895663b", "score": "0.5506295", "text": "def read(self):\n try:\n f = open(\"/home/robot/calibrate.txt\", \"r\")\n self.colorsensor[port].black = int(f.readline())\n self.colorsensor[port].white = int(f.readline())\n f.close()\n except:\n print(\"we can not find the calibration file\")", "title": "" }, { "docid": "186f605c3ea84dcf789935ec11e40bb2", "score": "0.54861134", "text": "def calibration(self):\n\t\treturn self.query('V')", "title": "" }, { "docid": "cc07dfa259914e8c65deca6e3340501d", "score": "0.5481746", "text": "def importComsol(filename):\n\n raw = np.loadtxt(filename, skiprows=9, delimiter=\",\")\n\n x = raw[:, 0]\n y = raw[:, 1]\n z = raw[:, 2]\n Bx = raw[:, 3]\n By = raw[:, 4]\n Bz = raw[:, 5]\n Bnorm = raw[:, 6]\n\n def getRes(x):\n res = np.abs(np.unique(x)[1] - np.unique(x)[0])\n return res\n\n def getShift(x):\n shift = x[np.argmin(np.abs(x))]\n return shift\n\n res = (getRes(x), getRes(y), getRes(z))\n shift = (getShift(x), getShift(y), getShift(z))\n\n xInd = np.array((x - shift[0]) / res[0], dtype=int)\n yInd = np.array((y - shift[1]) / res[1], dtype=int)\n zInd = np.array((z - shift[2]) / res[2], dtype=int)\n\n xInd -= np.min(xInd)\n yInd -= np.min(yInd)\n zInd -= np.min(zInd)\n\n dims = (np.unique(x).shape[0], np.unique(y).shape[0], np.unique(z).shape[0])\n data = np.zeros((dims))\n data[data == 0] = \"NaN\"\n\n for i in range(len(xInd)):\n data[xInd[i], yInd[i], zInd[i]] = Bz[i]\n\n # change ij indexing to xy indexing -> see numpy meshgrid documentation\n data = data # .swapaxes(0,1)\n try:\n info = np.loadtxt(filename, skiprows=7, max_rows=1, dtype=np.str)[1:]\n try:\n print(\n *info,\n \"\\nResolution x: {0} {3}, y: {1} {3}, z: {2} {3}\".format(*res, info[2]),\n )\n except IndexError:\n print(info)\n except TypeError:\n print(\"Update your numpy to have nice output.\")\n\n return data, np.mean(res)", "title": "" }, { "docid": "8d7c561077b0cc2eb1db2b7e842fb0cd", "score": "0.5478196", "text": "def _read(self):\n with open(self.filename, 'r') as f:\n map_str=f.readlines()\n self.header = map_str[:4]\n arr1 = np.array(''.join(map_str[4:]).split(), dtype='float')\n ### Data contains a list of vector lines\n self.data = list(generate_vl(arr1))\n ### Maximum height of all the vector lines\n self.max_height = np.array([v.h for v in self.data]).max()", "title": "" }, { "docid": "518915af32096fa8eefea29e184a5a40", "score": "0.54707724", "text": "def load_aux_data (file_path, minimum_scan_angle, file_object=None) :\n \n # make our return structure\n aux_data_sets = { }\n \n # load the longitude and latitude\n file_object, aux_data_sets[LON_KEY] = load_variable_from_file (ctp_guidebook.LONGITUDE_NAME,\n file_path=file_path, file_object=file_object)\n file_object, aux_data_sets[LAT_KEY] = load_variable_from_file (ctp_guidebook.LATITUDE_NAME,\n file_path=file_path, file_object=file_object)\n \n # load the day/night flag to make day/night mask\n file_object, day_night_flag = load_variable_from_file (ctp_guidebook.DAY_NIGHT_FLAG_NAME,\n file_path=file_path, file_object=file_object)\n\n # build the day and night masks\n aux_data_sets[DAY_MASK_KEY] = (day_night_flag == 1)\n aux_data_sets[NIGHT_MASK_KEY] = (day_night_flag == 2)\n \n return file_object, aux_data_sets", "title": "" }, { "docid": "8d9b0881c5f0d882d6f8052e397ee866", "score": "0.5398856", "text": "def load_throttle_calibration(self):\n\t\tif self.debugging:\n\t\t\trospy.loginfo('Loading calibration')\n\t\t\n\t\t# Check for existance of the throttle calibration file on the ros param server\n\t\tif not rospy.has_param('throttle_calib_file'):\n\t\t\trospy.signal_shutdown('Throttle calibration file has not been loaded into the parameter server')\n\t\telse:\n\t\t\tthrottle_calib_file = rospy.get_param('throttle_calib_file')\n\t\t\n\t\t# Load the throttle calibration file into a dict\n\t\twith open(throttle_calib_file) as f:\n\t\t\tfor line in f:\n\t\t\t\tline = line.replace('\\'',\"\")\n\t\t\t\t(key, val) = line.split(':')\n\t\t\t\tself.throttle_mappings[float(key)] = float(val)\n\t\t\n\t\t# Sort the dict\n\t\tself.throttle_mappings = collections.OrderedDict(sorted(self.throttle_mappings.items()))", "title": "" }, { "docid": "10e0fcc5297e0ee69742d01dc33921ce", "score": "0.5377507", "text": "def _init_lookup(self):\n if self.lookup is not None:\n return\n\n if self.lookup_file is not None:\n lookup = np.load(self.lookup_file)\n assert lookup.ndim == 3\n assert lookup.dtype == np.bool\n rospy.loginfo(\"Loaded lookup table from lookup_file\")\n\n elif self.pixel_file is not None:\n # load pixels' npy files\n calib = np.load(self.pixel_file)\n lookup = np.zeros((2**(self.nbits),)*3, dtype=np.float32)\n indices = self.lookup_idx(calib)\n\n # lookup[indices] += 1 doesn't work for repeated indices\n np.add.at(lookup, indices, 1)\n\n # normalize\n lookup /= np.max(lookup)\n\n # this makes the pixels brighter - better for debugging\n lookup = lookup ** 0.5\n rospy.loginfo(\"Generated lookup table from pixel_file\")\n\n else:\n raise ValueError(\"Either the lookup_file or pixel_file params must be set\")\n\n # push back into a uint8\n self.lookup = np.clip((255 * lookup).astype(np.uint8), 0, 255)", "title": "" }, { "docid": "f3adc7104ba9df8691436243e231ce7f", "score": "0.53744656", "text": "def load_discales(regs=[], pattern_R0='/default/%s/%s/R0',pattern_Z='/default/%s/%s/Z/gaus0'):\n res = {}\n for det in dets:\n res[det] = {}\n for reg in regs:\n res[det][reg] = {}\n aR0 = a.data[pattern_R0%(det,reg)]; assert aR0\n if True: # False if z peak fit was not run\n aZ = a.data[pattern_Z%(det,reg)]; assert aZ\n res[det][reg]['ksf'] = scales(aR0['ksf'],aR0['chie'],aZ['data_mz'],aZ['data_emz'],aZ['mc_mz'])\n if _DISABLE_CHI:\n res[det][reg]['chif'] = scales(aR0['ksf'],aR0['chie'],aZ['data_mz'],aZ['data_emz'],aZ['mc_mz'])\n else:\n res[det][reg]['chif'] = scales(aR0['chif'],aR0['chie'],aZ['data_mz'],aZ['data_emz'],aZ['mc_mz'])\n res[det][reg]['ksp'] = scales(aR0['ksp'],aR0['chie'],aZ['data_mz'],aZ['data_emz'],aZ['mc_mz'])\n res[det][reg]['chip'] = scales(aR0['chip'],aR0['chie'],aZ['data_mz'],aZ['data_emz'],aZ['mc_mz'])\n else:\n res[det][reg]['ksf'] = scales(aR0['ksf'],aR0['chie'],1.0,0.1,1.01)\n res[det][reg]['chif'] = scales(aR0['chif'],aR0['chie'],1.0,0.1,1.01)\n res[det][reg]['ksp'] = scales(aR0['ksp'],aR0['chie'],1.0,0.1,1.01)\n res[det][reg]['chip'] = scales(aR0['chip'],aR0['chie'],1.0,0.1,1.01)\n return res", "title": "" }, { "docid": "d74738a38fc8870da297eb940c8e1db4", "score": "0.53428614", "text": "def read_lightcurve(asciifile, f):\n\n skip = 0\n if f == 'u':\n columns = (0,1,2)\n skip = 40\n elif f == 'g':\n columns = (3,4,5)\n elif f == 'r':\n columns = (6,7,8)\n elif f == 'i':\n columns = (9,10,11)\n elif f == 'z':\n columns = (12,13,14)\n else:\n pass\n lc = np.genfromtxt(asciifile, dtype=[('day', np.float), ('mag', np.float), ('error', np.float)],\n skip_header = 1, skip_footer = skip, usecols = columns)\n return {f:lc}", "title": "" }, { "docid": "458cf2428c8045dfd739021be3eec109", "score": "0.53404593", "text": "def loadModels(dir=basedir+'TMB03/', file='alpha-models', abundance='', suffix='.dat', \\\n Fe1='Fe5270', Fe2='Fe5335', Fe3='Fe5406', rawTable=False, verbose=True):\n\n tab = ap.Table(dir+file+abundance+suffix, type='ascii')\n tab2 = ap.Table(dir+file+abundance+suffix, type='ascii') # 2nd table for removing columns\n tab2.remove_columns(['age', '[Z/H]','[alpha/Fe]'])\n \n # add [MgFe], [MgFe]'\n tab2.add_column('MgFeP', it.calcMgFePrime(tab2.Mgb, tab2[Fe1], tab2[Fe2]))\n tab2.add_column('MgFe', it.calcMgFe(tab2.Mgb, tab2[Fe1], tab2[Fe2]))\n if Fe3 is not None: tab2.add_column('MgFe3', it.calcMgFe(tab2.Mgb, tab2[Fe1], tab2[Fe2], tab2[Fe3]))\n tab2.add_column('meanFe', it.calcMeanFe(tab2[Fe1], tab2[Fe2]))\n if Fe3 is not None: tab2.add_column('mean3Fe', it.calcMeanFe(tab2[Fe1], tab2[Fe2], tab2[Fe3]))\n \n if verbose: print 'Available rows are: ', tab2.keys()\n \n if rawTable:\n newtab=tab\n else:\n Zs = list(set(tab['[Z/H]']))\n As = list(set(tab['[alpha/Fe]']))\n Zs.sort()\n As.sort()\n\n if verbose:\n print \"Found [Z/H] values of \", Zs\n print 'Found [alpha/Fe] values of ', As\n\n newtab={}\n for zi in Zs:\n minitab={}\n for ai in As:\n loc = np.where((tab['[alpha/Fe]']==ai) & (tab['[Z/H]']==zi))[0]\n minitab['A='+str(ai)]=tab2.rows(loc)\n newtab['Z='+str(zi)]=minitab\n if verbose: print 'Length of Z='+str(zi)+', A='+str(ai)+' data is '+str(len(loc))\n\n # add helper keys\n newtab['Zkeys']=np.array(['Z='+str(x) for x in Zs]) # don't just call tab.keys() here as order gets scrambled compared to Zs, As\n newtab['Akeys']=np.array(['A='+str(y) for y in As])\n \n newtab['Zs']=np.array(Zs)\n newtab['As']=np.array(As)\n\n newtab['age']=tab.age[loc]\n\n return newtab", "title": "" }, { "docid": "d0e0a256b9c6835ae23157f8c884f03b", "score": "0.5339301", "text": "def read_calib_file(filepath):\n data = {}\n\n with open(filepath, 'r') as f:\n for line in f.readlines():\n key, value = line.split(':', 1)\n # The only non-float values in these files are dates, which\n # we don't care about anyway\n try:\n data[key] = np.array([float(x) for x in value.split()])\n except ValueError:\n pass\n\n return data", "title": "" }, { "docid": "9eccbf1e4bb89291add273c04aec5d77", "score": "0.5336127", "text": "def import_bss():\n cat = np.loadtxt('cross-matching/bss.dat', usecols=range(1,7))\n return cat", "title": "" }, { "docid": "1b69107a4ae29f288746f59ad2c3f25b", "score": "0.532969", "text": "def calc_zpcoef(calfile, fpcoef):\n #read in the values for the calfile--assumes the format for the file\n #r,r_err,z,t,w,img=np.loadtxt(calfile, usecols=(0,1,4,5,6,8), unpack=True, dtype={8,str})\n data=np.loadtxt(calfile, dtype={'names': ('r','r_err', 'x', 'y', 'z', 't', 'w', 'dn', 'image'),'formats':('f4', 'f4', 'f4', 'f4', 'f4', 'f4', 'f4', 'f4','S30')})\n #set the start time and convert the time to a date time unit\n time_list=[]\n time_start=None\n\n if data.size==0:\n raise SaltError('%s needs to have at least one entry' % calfile)\n elif data.size==1:\n img_list=[str(data['image'])]\n else:\n img_list=data['image']\n\n for img in img_list:\n if not os.path.isfile(img): \n raise SaltError('%s needs to be available to open' % img)\n t=get_datetime(saltio.openfits(img))\n if time_start is None: time_start=t\n time_list.append((t-time_start).seconds)\n time_arr=np.array(time_list)\n\n #calculate the coefficients\n wf=fpfunc(data['z'], data['r'], time_arr, coef=fpcoef)\n if data.size==1:\n coef=np.array([0, data['w']-wf])\n else:\n coef=np.polyfit(time_arr, data['w']-wf, 1)\n \n return coef, time_start", "title": "" }, { "docid": "96972c938ad8a743b2ea95e6931e50b9", "score": "0.5327936", "text": "def get_calibration_matrix(path):\n object_points = [] # 3d point in real world space\n img_points = [] # 2d points in image plane.\n\n images = glob.glob(path)\n total_image_count = len(images)\n\n image_count = 1\n fig = plt.figure()\n for filename in images:\n img = cv2.imread(filename)\n nx, ny = 6, 9\n retval, corners = cv2.findChessboardCorners(img, (nx, ny))\n objp = np.zeros((nx * ny, 3), np.float32)\n objp[:, :2] = np.mgrid[0: nx, 0: ny].T.reshape(-1, 2)\n \n if not retval:\n nx, ny = 5, 9 # Trying with 5 rows\n objp = np.zeros((nx * ny, 3), np.float32)\n objp[:, :2] = np.mgrid[0: nx, 0: ny].T.reshape(-1, 2)\n retval, corners = cv2.findChessboardCorners(img, (nx, ny))\n \n if retval:\n object_points.append(objp)\n img_points.append(corners)\n\n ax = fig.add_subplot(math.ceil(total_image_count / 2), 2, image_count)\n chessboard_with_corners = cv2.drawChessboardCorners(img, (nx, ny), corners, retval)\n chessboard_with_corners = cv2.cvtColor(chessboard_with_corners, cv2.COLOR_BGR2RGB)\n ax.imshow(chessboard_with_corners)\n ax.axis('off')\n image_count += 1\n\n return cv2.calibrateCamera(object_points, img_points, img.shape[0:2], None, None), fig", "title": "" }, { "docid": "be17b6bf0864222c3747f5b76a3c0613", "score": "0.53271466", "text": "def read_raw_calib_file(self, filepath):\n data = {}\n with open(filepath, 'r') as f:\n for line in f:\n key, value = line.split(':', 1)\n # The only non-float values in these files are dates, which we don't\n # care about.\n try:\n data[key] = np.array([float(x) for x in value.split()])\n except ValueError:\n pass\n return data", "title": "" }, { "docid": "f02f276a74c60f43a11dc98e9b49cad2", "score": "0.53076315", "text": "def read_calib_file(filepath):\n data = {}\n with open(filepath, 'r') as f:\n for line in f.readlines():\n line = line.rstrip()\n if len(line) == 0:\n continue\n key, value = line.split(':', 1)\n # The only non-float values in these files are dates, which\n # we don't care about anyway\n try:\n data[key] = np.array([float(x) for x in value.split()])\n except ValueError:\n pass\n\n return data", "title": "" }, { "docid": "62a6cbfbcfdf4a5bb83c2db1b42b00a2", "score": "0.52968794", "text": "def _load(self):\n syms = subprocess.check_output(['head','-1',self.fn]).decode().replace('\\r','').replace('\\n','').split(',')[1:]\n rates = np.genfromtxt(self.fn, delimiter=',',skip_header=1,dtype='str')\n days = []\n utcs = []\n for d in rates[:,0]:\n dt = datetime.datetime.strptime(d, '%Y-%m-%d')\n days.append(dt.strftime('%Y%m%d'))\n utcs.append(int(dt.strftime('%s'))+17*3600) # utc at 17:00 of the day\n\n rates[np.nonzero(rates=='')]='1'\n rates=rates[:,1:].astype(float)\n return np.array(syms), np.array(days), np.array(utcs), rates", "title": "" }, { "docid": "328b018cd7f83d657a36c523c45d455f", "score": "0.5279339", "text": "def read_cfgs(self, filename=\"output.data\"):\n data_pool = []\n with zopen(filename, \"rt\") as f:\n lines = f.read()\n\n block_pattern = re.compile(\"begin\\n(.*?)end\", re.S)\n lattice_pattern = re.compile(\"lattice(.*?)\\n\")\n position_pattern = re.compile(\"atom(.*?)\\n\")\n energy_pattern = re.compile(\"energy(.*?)\\n\")\n\n for block in block_pattern.findall(lines):\n d = {\"outputs\": {}}\n lattice_str = lattice_pattern.findall(block)\n lattice = Lattice(\n np.array([latt.split() for latt in lattice_str], dtype=np.float64) * self.bohr_to_angstrom\n )\n position_str = position_pattern.findall(block)\n positions = pd.DataFrame([pos.split() for pos in position_str])\n positions.columns = [\"x\", \"y\", \"z\", \"specie\", \"charge\", \"atomic_energy\", \"fx\", \"fy\", \"fz\"]\n coords = np.array(positions.loc[:, [\"x\", \"y\", \"z\"]], dtype=np.float64)\n coords = coords * self.bohr_to_angstrom\n species = np.array(positions[\"specie\"])\n forces = np.array(positions.loc[:, [\"fx\", \"fy\", \"fz\"]], dtype=np.float64)\n forces = forces / self.eV_to_Ha / self.bohr_to_angstrom\n energy_str = energy_pattern.findall(block)[0]\n energy = float(energy_str.lstrip()) / self.eV_to_Ha\n struct = Structure(lattice=lattice, species=species, coords=coords, coords_are_cartesian=True)\n d[\"structure\"] = struct.as_dict()\n d[\"outputs\"][\"energy\"] = energy\n d[\"outputs\"][\"forces\"] = forces\n d[\"num_atoms\"] = len(struct)\n\n data_pool.append(d)\n _, df = convert_docs(docs=data_pool)\n return data_pool, df", "title": "" }, { "docid": "1a8426e6bf471309218fe0dfcd035922", "score": "0.5271775", "text": "def load_hdf5_files(\n hdf5files,\n calibration_map=None,\n energy_cutoff=100.0/HARTREE_TO_KCAL_PER_MOL,\n use_fitted=False):\n\n # zs = []\n Xs = []\n ys = []\n\n print(\"Loading...\")\n\n num_samples = 0\n\n for hdf5file in hdf5files:\n print(\"Processing\", hdf5file)\n adl = pya.anidataloader(hdf5file)\n for data in adl:\n\n # Extract the data\n P = data['path']\n R = data['coordinates']\n E = data['energies']\n S = data['species']\n smi = data['smiles']\n\n path = P.split(\"/\")[-1]\n\n Z = convert_species_to_atomic_nums(S)\n\n if len(Z) > MAX_ATOM_LIMIT:\n print(\"skippng\", P, 'n_atoms too large:', len(Z), '>', MAX_ATOM_LIMIT)\n continue\n\n minimum_wb97 = np.amin(E)\n\n if use_fitted:\n\n calibration_offset = 0\n\n if calibration_map:\n calibration_offset = calibration_map[path] - minimum_wb97 \n\n for k in range(len(E)):\n if energy_cutoff is not None and E[k] - minimum_wb97 > energy_cutoff:\n continue\n\n\n # BROKEN FOR NOW\n js18pairwiseOffset = correction.jamesPairwiseCorrection_C(R[k], Z)/HARTREE_TO_KCAL_PER_MOL\n y = E[k] - js18pairwiseOffset + calibration_offset\n\n ys.append(y)\n X = featurizer.ANI1(R[k], Z)\n Xs.append(X)\n # BROKEN FOR NOW\n\n else:\n wb97offset = 0\n wb97Xoffset = 0\n mo62xoffset = 0\n\n for z in Z:\n wb97offset += selfIxnNrgWB97[z]\n wb97Xoffset += selfIxnNrgWB97X[z]\n mo62xoffset += selfIxnNrgMO62x[z]\n\n calibration_offset = 0\n\n if calibration_map:\n min_atomization_wb97 = minimum_wb97 - wb97offset\n min_atomization_mo62x = calibration_map[path] - mo62xoffset\n # difference between the wb97_min and the mo62x_min\n calibration_offset = min_atomization_mo62x - min_atomization_wb97\n\n for k in range(len(E)):\n if energy_cutoff is not None and E[k] - minimum_wb97 > energy_cutoff:\n continue\n\n\n # LDJ: using wb97x offset \n y = E[k] - wb97Xoffset + calibration_offset\n ys.append(y)\n\n X = np.concatenate([np.expand_dims(Z, 1), R[k]], axis=1)\n Xs.append(X)\n\n return Xs, ys", "title": "" }, { "docid": "d1449b9bc46b12e9458310f23a0fa73a", "score": "0.5262481", "text": "def read_calib_file(path):\n float_chars = set(\"0123456789.e+- \")\n data = {}\n with open(path, 'r') as f:\n for line in f.readlines():\n key, value = line.split(':', 1)\n value = value.strip()\n data[key] = value\n if float_chars.issuperset(value):\n # try to cast to float array\n try:\n data[key] = np.array(list(map(float, value.split(' '))))\n except ValueError:\n # casting error: data[key] already eq. value, so pass\n pass\n\n return data", "title": "" }, { "docid": "45366e05edf6ca90e94fed7fb66b63c1", "score": "0.524578", "text": "def load_calibration_file(cls, calibration_fpath: str) -> pd.DataFrame:\n try:\n calibration = pd.read_csv(\n calibration_fpath, index_col=[\"Primary\", \"Setting\"]\n )\n calibration.columns = calibration.columns.astype(\"int64\")\n calibration.columns.name = \"Wavelength\"\n return calibration\n\n except Exception as e:\n print(cls.load_calibration_file.__doc__)\n raise e", "title": "" }, { "docid": "942c997f8b4095ef2686a6e5fd00e8ee", "score": "0.5235805", "text": "def get_irradiance_calibration(self):\n msg = self.cmd.HEADER\n msg[4] = STS_CMD['get_irrad_calib']\n msg += self.cmd.FOOTER\n self.write(''.join(msg))\n irrad_data = self.read()\n if irrad_data is None:\n return\n irrad_data = ''.join([chr(i) for i in irrad_data])\n self.irradiance_data = [struct.unpack('<f', j)[0] \\\n for j in [irrad_data[i:i+4]\\\n for i in range(0, len(irrad_data), 4)]]", "title": "" }, { "docid": "f2e779a892139bc4a158d45ffe6698a2", "score": "0.52332497", "text": "def load_data(dir='/home/iwsatlas1/peller/work/oscNext/level7_v01.04/140000_i3cols',\n labels=['x', 'y', 'z', 'time', 'azimuth','zenith', 'cascade_energy', 'track_energy'],\n geo='geo_array.npy',\n dtype=np.float32):\n \n hits_idx = np.load(os.path.join(dir, 'SRTTWOfflinePulsesDC/index.npy'))\n hits = np.load(os.path.join(dir, 'SRTTWOfflinePulsesDC/data.npy'))\n mctree_idx = np.load(os.path.join(dir, 'I3MCTree/index.npy'))\n mctree = np.load(os.path.join(dir, 'I3MCTree/data.npy'))\n mcprimary = np.load(os.path.join(dir, 'MCInIcePrimary/data.npy'))\n\n geo = np.load(geo)\n \n # constrcut hits array\n\n # shape N x (x, y, z, t, q)\n single_hits = np.empty(hits.shape + (5,), dtype=dtype)\n string_idx = hits['key']['string'] - 1\n om_idx = hits['key']['om'] - 1\n\n single_hits[:, 0:3] = geo[string_idx, om_idx]\n single_hits[:, 3] = hits['pulse']['time']\n single_hits[:, 4] = hits['pulse']['charge']\n \n total_charge = get_total_charge(hits, hits_idx, dtype=dtype)\n \n # construct params array\n\n neutrino_energy, track_energy, cascade_energy = get_energies(mcprimary, mctree, mctree_idx, dtype=dtype)\n \n params = np.empty(mcprimary.shape + (len(labels), ), dtype=dtype)\n\n for i, label in enumerate(labels):\n if label == 'x': params[:, i] = mcprimary['pos']['x']\n elif label == 'y': params[:, i] = mcprimary['pos']['y']\n elif label == 'z': params[:, i] = mcprimary['pos']['z']\n elif label == 'time': params[:, i] = mcprimary['time']\n elif label == 'azimuth': params[:, i] = mcprimary['dir']['azimuth']\n elif label == 'zenith': params[:, i] = mcprimary['dir']['zenith']\n elif label == 'neutrino_energy': params[:, i] = neutrino_energy\n elif label == 'energy': params[:, i] = track_energy + cascade_energy\n elif label == 'cascade_energy': params[:, i] = cascade_energy\n elif label == 'track_energy': params[:, i] = track_energy\n\n repeats = (hits_idx['stop'] - hits_idx['start']).astype(np.int64)\n repeated_params = np.repeat(params, repeats=repeats, axis=0)\n\n \n return single_hits, repeated_params, total_charge, params, labels", "title": "" }, { "docid": "bbd4c9ecf4411f8e2e277620714b11aa", "score": "0.5230578", "text": "def read_reference_data(self):\n ## Number of atoms\n self.na = -1\n self.ref_eigvals = []\n self.ref_eigvecs = []\n an = 0\n ln = 0\n cn = -1\n for line in open(self.vfnm):\n line = line.split('#')[0] # Strip off comments\n s = line.split()\n if len(s) == 1 and self.na == -1:\n self.na = int(s[0])\n xyz = np.zeros((self.na, 3))\n cn = ln + 1\n elif ln == cn:\n pass\n elif an < self.na and len(s) == 4:\n xyz[an, :] = np.array([float(i) for i in s[1:]])\n an += 1\n elif len(s) == 1:\n if float(s[0]) < 0:\n logger.warning('Warning: Setting imaginary frequency = % .3fi to zero.\\n' % abs(float(s[0])))\n self.ref_eigvals.append(0.0)\n else:\n self.ref_eigvals.append(float(s[0]))\n self.ref_eigvecs.append(np.zeros((self.na, 3)))\n an = 0\n elif len(s) == 3:\n self.ref_eigvecs[-1][an, :] = np.array([float(i) for i in s])\n an += 1\n elif len(s) == 0:\n pass\n else:\n logger.info(line + '\\n')\n logger.error(\"This line doesn't comply with our vibration file format!\\n\")\n raise RuntimeError\n ln += 1\n self.ref_eigvals = np.array(self.ref_eigvals)\n self.ref_eigvecs = np.array(self.ref_eigvecs)\n for v2 in self.ref_eigvecs:\n v2 /= np.linalg.norm(v2)\n return", "title": "" }, { "docid": "6a0df2f88076b9e98091866db803cd8b", "score": "0.5226541", "text": "def load_ref_system():\n return psr.make_system(\"\"\"\n C 0.0000 1.2363 3.3275\n C -1.1758 0.3820 3.3275\n C -0.7267 -1.0002 3.3275\n C 0.7267 -1.0002 3.3275\n C 1.1758 0.3820 3.3275\n C 2.3047 0.7489 2.5939\n C 3.0314 -0.2513 1.8298\n C 2.6002 -1.5785 1.8298\n C 1.4244 -1.9605 2.5939\n C 0.6977 -2.9607 1.8298\n C -0.6977 -2.9607 1.8298\n C -1.4244 -1.9605 2.5939\n C -2.6002 -1.5785 1.8298\n C -3.0314 -0.2513 1.8298\n C -2.3047 0.7489 2.5939\n C -2.3047 1.9852 1.8298\n C -3.0314 1.7490 0.5935\n C -3.4805 0.3668 0.5935\n C -3.4805 -0.3668 -0.5935\n C -3.0314 -1.7490 -0.5935\n C -2.6002 -2.3426 0.5935\n C -1.4244 -3.1968 0.5935\n C -0.7267 -3.4235 -0.5935\n C 0.7267 -3.4235 -0.5935\n C 1.4244 -3.1968 0.5935\n C 2.6002 -2.3426 0.5935\n C 3.0314 -1.7490 -0.5935\n C 3.4805 -0.3668 -0.5935\n C 3.4805 0.3668 0.5935\n C 3.0314 1.7490 0.5935\n C 2.6002 2.3426 -0.5935\n C 2.6002 1.5785 -1.8298\n C 3.0314 0.2513 -1.8298\n C 2.3047 -0.7489 -2.5939\n C 2.3047 -1.9852 -1.8298\n C 1.1758 -2.8054 -1.8298\n C 0.0000 -2.4234 -2.5939\n C -1.1758 -2.8054 -1.8298\n C -2.3047 -1.9852 -1.8298\n C -2.3047 -0.7489 -2.5939\n C -3.0314 0.2513 -1.8298\n C -2.6002 1.5785 -1.8298\n C -2.6002 2.3426 -0.5935\n C -1.4244 3.1968 -0.5935\n C -0.6977 2.9607 -1.8298\n C -1.4244 1.9605 -2.5939\n C -0.7267 1.0002 -3.3275\n C -1.1758 -0.3820 -3.3275\n C 0.0000 -1.2363 -3.3275\n C 1.1758 -0.3820 -3.3275\n C 0.7267 1.0002 -3.3275\n C 1.4244 1.9605 -2.5939\n C 0.6977 2.9607 -1.8298\n C 1.4244 3.1968 -0.5935\n C 0.7267 3.4235 0.5935\n C -0.7267 3.4235 0.5935\n C -1.1758 2.8054 1.8298\n C 0.0000 2.4234 2.5939\n C 1.1758 2.8054 1.8298\n C 2.3047 1.9852 1.8298\n \"\"\")", "title": "" }, { "docid": "891d2950458e3ed760416104e46f0962", "score": "0.5225493", "text": "def read_calib_file(self, filepath):\r\n data = {}\r\n with open(filepath, \"r\") as f:\r\n for line in f.readlines():\r\n line = line.rstrip()\r\n if len(line) == 0:\r\n continue\r\n key, value = line.split(\":\", 1)\r\n # The only non-float values in these files are dates, which\r\n # we don't care about anyway\r\n try:\r\n data[key] = np.array([float(x) for x in value.split()])\r\n except ValueError:\r\n pass\r\n return data", "title": "" }, { "docid": "b6412362d82aba715880f764bb1eef8a", "score": "0.52240264", "text": "def load_data(filename):\n with open(filename,'r') as f:\n data = np.loadtxt(filename).transpose()\n times = data[0]\n a_values = a_rescaling * data[1]\n a_offset = a_values[0]\n a_values -= a_offset\n rho_values = data[2]\n p_values = data[3]\n return times,a_values,rho_values,p_values,a_offset", "title": "" }, { "docid": "62289364c72fcddf2ddf6c3de01631cf", "score": "0.5223903", "text": "def load_ref_system():\n return psr.make_system(\"\"\"\n N -1.3289 1.0488 -1.5596\n C 0.1286 1.0198 -1.8261\n C 0.3335 0.8585 -3.3268\n O -0.0551 -0.0282 -4.0649\n O 1.0668 1.8338 -3.9108\n C 0.8906 -0.1043 -1.0999\n H 1.9534 -0.0888 -1.4126\n H 0.4975 -1.0987 -1.3971\n C 0.8078 0.0465 0.3677\n C 1.5802 0.8809 1.1516\n N 1.1567 0.7746 2.4944\n H 1.7094 1.0499 3.2650\n C 0.1694 -0.2350 2.5662\n C -0.0897 -0.6721 1.2403\n C -1.0740 -1.6418 1.0106\n H -1.2812 -1.9849 -0.0088\n C -1.7623 -2.1470 2.0948\n H -2.5346 -2.9080 1.9416\n C -1.4948 -1.7069 3.4060\n H -2.0660 -2.1385 4.2348\n C -0.5337 -0.7507 3.6638\n H -0.3249 -0.4086 4.6819\n H 2.3719 1.5631 0.8380\n H -1.4726 1.2086 -0.5841\n H -1.7404 0.1740 -1.8129\n H 0.5299 2.0096 -1.4901\n H 1.1361 1.6737 -4.8470\n \"\"\")", "title": "" }, { "docid": "11eda617f10b7891b672d1929f73119e", "score": "0.52216643", "text": "def calibrate():\n images = glob.glob('/home/robotics/catkin_ws/src/lane_detection/src/calibration_images/image*.jpg')\n objpoints = []\n imgpoints = []\n objp = np.zeros((1, checkboard[0]*checkboard[1], 3), np.float32)\n objp[0,:,:2] = np.mgrid[0:checkboard[0], 0:checkboard[1]].T.reshape(-1, 2)\n for fnames in images:\n img = cv2.imread(fnames)\n gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)\n ret,corners = cv2.findChessboardCorners(gray, (6,9), cv2.CALIB_CB_ADAPTIVE_THRESH+cv2.CALIB_CB_FAST_CHECK+cv2.CALIB_CB_NORMALIZE_IMAGE)\n if ret == True:\n image = cv2.drawChessboardCorners(img, (6,9),corners, ret)\n objpoints.append(objp)\n cv2.cornerSubPix(gray,corners,(3,3),(-1,-1),subpix_criteria)\n imgpoints.append(corners)\n N_OK = len(objpoints)\n objpoints = np.reshape(objpoints, (N_OK, 1, 6*9, 3))\n imgpoints = np.reshape(imgpoints, (N_OK, 1, 6*9, 2))\n rvecs = [np.zeros((1, 1, 3), dtype=np.float64) for i in range(N_OK)]\n tvecs = [np.zeros((1, 1, 3), dtype=np.float64) for i in range(N_OK)]\n K = np.zeros((3,3), np.float32)\n D = np.zeros((4,1), np.float32)\n flag = cv2.fisheye.CALIB_RECOMPUTE_EXTRINSIC + cv2.fisheye.CALIB_CHECK_COND + cv2.fisheye.CALIB_FIX_SKEW\n ret, camera_matrix, dist_coeff,rvecs, tvecs = cv2.fisheye.calibrate(objpoints, imgpoints, gray.shape[::-1],K, D, rvecs, tvecs, flag ,(cv2.TERM_CRITERIA_EPS+cv2.TERM_CRITERIA_MAX_ITER, 30, 1e-6))\n return camera_matrix, dist_coeff", "title": "" }, { "docid": "b8588bdbf2770baebbdbd041f8a30845", "score": "0.5210504", "text": "def read_raw_calib_file(self, filepath):\n # From https://github.com/utiasSTARS/pykitti/blob/master/pykitti/utils.py\n data = {}\n\n with open(filepath, 'r') as f:\n for line in f.readlines():\n key, value = line.split(':', 1)\n # The only non-float values in these files are dates, which\n # we don't care about anyway\n try:\n data[key] = np.array([float(x) for x in value.split()])\n except ValueError:\n pass\n return data", "title": "" }, { "docid": "b8588bdbf2770baebbdbd041f8a30845", "score": "0.5210504", "text": "def read_raw_calib_file(self, filepath):\n # From https://github.com/utiasSTARS/pykitti/blob/master/pykitti/utils.py\n data = {}\n\n with open(filepath, 'r') as f:\n for line in f.readlines():\n key, value = line.split(':', 1)\n # The only non-float values in these files are dates, which\n # we don't care about anyway\n try:\n data[key] = np.array([float(x) for x in value.split()])\n except ValueError:\n pass\n return data", "title": "" }, { "docid": "8229e97fbf7088a990c8fd1eecc2e80c", "score": "0.5208596", "text": "def load_extrinsics(self):\n print('Loading extrinsic calibration parameters ...')\n extrinsics_file_path = os.path.join(\n self.calib_path, 'extrinsics/extrinsics.yaml')\n extrinsics_all = yaml.load(open(extrinsics_file_path, 'r'))\n\n base_to_jai_camera = self._load_transformation(\n extrinsics_all['jai_camera'])\n base_to_kinect = self._load_transformation(\n extrinsics_all['kinect'])\n base_to_fx8 = self._load_transformation(\n extrinsics_all['fx8'])\n base_to_vlp_front = self._load_transformation(\n extrinsics_all['vlp_front'])\n base_to_vlp_rear = self._load_transformation(\n extrinsics_all['vlp_rear'])\n base_to_ublox_gps = self._load_transformation(\n extrinsics_all['ublox_gps'])\n base_to_leica_gps = self._load_transformation(\n extrinsics_all['leica_gps'])\n\n # Combined\n extrinsics_data = {}\n extrinsics_data['base_to_jai_camera'] = base_to_jai_camera\n extrinsics_data['base_to_kinect'] = base_to_kinect\n extrinsics_data['base_to_fx8'] = base_to_fx8\n extrinsics_data['base_to_vlp_front'] = base_to_vlp_front\n extrinsics_data['base_to_vlp_rear'] = base_to_vlp_rear\n extrinsics_data['base_to_ublox_gps'] = base_to_ublox_gps\n extrinsics_data['base_to_leica_gps'] = base_to_leica_gps\n\n self.extrinsics = namedtuple('Extrinsics', extrinsics_data.keys())(\n *extrinsics_data.values())", "title": "" }, { "docid": "8c4652f348263d366e80ecbb8cdc8f16", "score": "0.5202562", "text": "def calibrate(row, fix_illumination=False):\n channels = [None, 'Cy3', 'A594', 'Atto647']\n\n raw, calibrated = row['raw'], row['calibrated']\n raw_data = lasagna.io.read_stack(lasagna.config.paths.full(raw))\n raw_data = np.array([lasagna.config.calibration.fix_dead_pixels(frame) for frame in raw_data])\n if fix_illumination:\n fixed_data = np.array([lasagna.config.calibration.fix_illumination(frame, channel=channel)\n for frame, channel in zip(raw_data, channels)])\n else:\n fixed_data = raw_data\n lasagna.io.save_hyperstack(lasagna.config.paths.full(calibrated), fixed_data,\n display_ranges=display_ranges, luts=luts)", "title": "" }, { "docid": "4cf839466ce109384d652d8d63e3f1c1", "score": "0.5195876", "text": "def load_by_index(fp, alongStrike, downDip, x_coor, y_coor, num_layers):\n\tdis = np.array([], float)\n\tvalues_x, values_y, values_z = np.array([], float), np.array([], float), np.array([], float)\n\tbase = alongStrike*downDip*3*num_layers # number of data in past layers\n\n\tfor i in range(0, len(x_coor)):\n\t\tx = x_coor[i]\n\t\ty = y_coor[i]\n\n\t\t# index = number of data in past layers + postion in current layer\n\t\tindex = base + (downDip*x+y)*3\n\t\toffset = index*8 # offset measured in byte\n\t\ttry:\n\t\t\tdis = np.memmap(fp, np.float64, 'r', offset, (3)) # load three numbers for x/y/z\n\t\texcept ValueError:\n\t\t\tprint \"[ERROR]: unable to load file.\"\n\t\t\tsys.exit()\n\n\t\tvalues_x = np.append(values_x, dis[0])\n\t\tvalues_y = np.append(values_y, dis[1])\n\t\tvalues_z = np.append(values_z, dis[2])\n\treturn values_x, values_y, values_z", "title": "" }, { "docid": "314520f43d272058214ee93b9c74aaf4", "score": "0.51916045", "text": "def get_calibration(self):\n return np.array(lib.ShamrockGetCalibration(self.idx,self.get_number_pixels()))*1E-9", "title": "" }, { "docid": "e95e1f908f0a70eba0b3568c1e126aca", "score": "0.5186755", "text": "def load_camera_calib(filename):\n\n # Get the keys so we can easily set the params object\n try:\n fs = cv.FileStorage(filename, cv.FILE_STORAGE_READ)\n except Exception as e:\n print(repr(e))\n raise e\n\n cam_matrix = fs.getNode('camera_matrix').mat()\n dist_coeffs = fs.getNode('distortion_coefficients').mat()\n proj_matrix = fs.getNode('projection_matrix').mat()\n\n return (cam_matrix, dist_coeffs, proj_matrix)", "title": "" }, { "docid": "0898c23cf5e418a0bed72f7da5af04d8", "score": "0.5173898", "text": "def load_ref_system():\n return psr.make_system(\"\"\"\n C 0.00000 0.00890 -2.60438\n C 0.00000 -1.33644 -2.18334\n C 0.00000 -1.66339 -0.74838\n C 0.00000 -0.71325 0.21050\n C 0.00000 0.71325 -0.21050\n C 0.00000 1.07292 -1.66905\n C 0.00000 0.28619 -3.99627\n C 0.00000 -2.37647 -3.12403\n H 0.00000 -2.72107 -0.52097\n C 0.00000 -1.07292 1.66905\n C 0.00000 1.66339 0.74838\n C 0.00000 2.39320 -2.15246\n C 0.00000 -2.09379 -4.48674\n C 0.00000 -0.77020 -4.91980\n C 0.00000 1.61427 -4.43741\n H 0.00000 -3.41490 -2.80207\n H 0.00000 -2.90372 -5.21131\n H 0.00000 -0.56804 -5.98873\n C 0.00000 -0.00890 2.60438\n C 0.00000 1.33644 2.18334\n C 0.00000 -2.39320 2.15246\n H 0.00000 2.72107 0.52097\n C 0.00000 -0.28619 3.99627\n C 0.00000 2.37647 3.12403\n C 0.00000 2.09379 4.48674\n C 0.00000 0.77020 4.91980\n C 0.00000 -1.61427 4.43741\n H 0.00000 3.41490 2.80207\n H 0.00000 2.90372 5.21131\n H 0.00000 0.56804 5.98873\n C 0.00000 2.66082 -3.52182\n C 0.00000 -2.66082 3.52182\n H 0.00000 3.24627 -1.48248\n H 0.00000 1.84476 -5.50040\n H 0.00000 -3.24627 1.48248\n H 0.00000 -1.84476 5.50040\n H 0.00000 3.68953 -3.87328\n H 0.00000 -3.68953 3.87328\n \"\"\")", "title": "" }, { "docid": "6312c3076907af2f486fceba2af5f1f8", "score": "0.5169767", "text": "def convert_xy_radec(filelist, soft='sextractor'):\n for filename in filelist:\n path, filename2 = os.path.split(filename)\n if path:\n folder = path + '/'\n else:\n folder = ''\n\n magfilewcs = filename + \".magwcs\"\n\n if soft == 'iraf':\n from pyraf import iraf\n\n magfile = filename2 + \".magfiltered\"\n iraf.wcsctran(input=magfile, output=magfilewcs, image=filename, inwcs=\"physical\", outwcs=\"world\")\n data1 = ascii.read(magfile, names=['Xpos', 'Ypos', 'Mag_aper', 'Mag_err_aper' ])\n data2 = ascii.read(magfilewcs, names=['RA', 'DEC', 'Mag_aper', 'Mag_err_aper' ])\n data = Table([data1['Xpos'],data1['Ypos'], data2['RA'], data2['DEC'], data2['Mag_aper'], data2['Mag_err_aper'], [filename]*len(data1)], names=['Xpos', 'Ypos', 'RA', 'DEC', 'Mag_inst', 'Magerr_inst', 'filenames'])\n \n elif soft == 'sextractor':\n sources = ascii.read(folder + 'sourcesdet_%s.cat' % (filename2.split('.fits')[0]), format='sextractor')\n header = fits.getheader(filename)\n w = wcs.WCS(header)\n ra, dec = w.wcs_pix2world(sources['X_IMAGE'], sources['Y_IMAGE'], 1)\n filenames = [filename] * len(ra)\n data = Table([sources['X_IMAGE'], sources['Y_IMAGE'], ra,dec, sources['MAG_AUTO'], sources['MAGERR_AUTO'], filenames], names=['Xpos', 'Ypos', 'RA', 'DEC', 'Mag_isnt', 'Magerr_inst', 'filenames'])\n\n data.write(magfilewcs, format='ascii.commented_header', overwrite=True)\n data4=data['RA', 'DEC']\n data4.write(magfilewcs+'2',format='ascii.commented_header', overwrite=True)", "title": "" }, { "docid": "d089c6afa27a128b0687d543a6be7071", "score": "0.5166698", "text": "def doL0L1Calibration(url,calib,nq=6):\n\n #derive L0 and L1 calibrations for 3 different signal regions\n fIn=ROOT.TFile.Open(url)\n data=fIn.Get('data')\n for ireg in [1,2,3]:\n x=getEnergiesForCalibration(data,ireg)\n xq=np.percentile(x, [i*100./nq for i in xrange(0,nq+1)], axis=0)\n\n #relative calibration versus eta \n resolVsEta = ROOT.TH2F('resolvseta',';|#eta|;#DeltaE/E', nq, array('d',xq[:,1]), 100,-1,1)\n for i in xrange(0,len(x)):\n genEn, genEta, recEn,_ = x[i]\n deltaE=recEn/genEn-1.\n resolVsEta.Fill(genEta,deltaE)\n calib['L0'][ireg]=calibrateSpectrum(resolVsEta,'SR%d'%ireg,'H#rightarrow#gamma#gamma (PU=0)','pol2')\n \n #relative calibration versus energy\n resolVsEn = ROOT.TH2F('resolvsen',';Reconstructed energy [GeV];#DeltaE/E', nq, array('d',xq[:,0]), 100,-1,1)\n for i in xrange(0,len(x)):\n genEn,genEta,recEn,_=x[i]\n recEn=recEn/(calib['L0'][ireg].Eval(genEta)+1.0)\n deltaE=recEn/genEn-1.\n resolVsEn.Fill(recEn,deltaE)\n resolVsEta.Fill(genEta,deltaE)\n calib['L1'][ireg]=calibrateSpectrum(resolVsEn,'SR%d'%ireg,'H#rightarrow#gamma#gamma (PU=0)','pol1')\n\n fIn.Close()\n\n return calib", "title": "" }, { "docid": "7f2b03429d860bb41d1d1929d83d0ad7", "score": "0.5160201", "text": "def load_ref_system():\n return psr.make_system(\"\"\"\n C 2.3195 -0.7239 -1.3037\n C 0.8488 -0.5028 -0.9352\n N 0.0878 -0.2837 -2.1955\n C 0.6222 0.6941 0.0031\n H 1.4438 0.7480 0.7518\n H 0.6699 1.6506 -0.5583\n C -0.6861 0.5736 0.7157\n C -1.7900 1.3156 0.2937\n H -1.7012 1.9898 -0.5666\n C -3.0030 1.2022 0.9647\n H -3.8665 1.7872 0.6311\n C -3.1206 0.3494 2.0568\n H -4.0763 0.2625 2.5841\n C -2.0220 -0.3923 2.4797\n H -2.1126 -1.0636 3.3400\n C -0.8079 -0.2817 1.8127\n H 0.0572 -0.8650 2.1484\n O 2.7408 -1.5525 -2.0839\n O 3.3406 -0.0308 -0.7547\n H 0.5247 -1.4346 -0.3967\n H -0.8782 -0.1687 -1.9701\n H 0.2054 -1.0640 -2.8055\n H 3.0107 0.6082 -0.1296\n \"\"\")", "title": "" }, { "docid": "91317cfb3dc1d9c55d51fd06928de0d4", "score": "0.51582235", "text": "def TableRecalibration(self):\n tmp = []\n \n knownSites = ' '.join(['-knownSites '+i for i in (self.hg19_omni_vcf,self.hg19_hapmap_vcf,self.dbsnp)])\n #knownSites = '-knownSites %s -knownSites %s -knownSites %s' % (self.hg19_omni_vcf,self.hg19_hapmap_vcf,self.dbsnp)\n for fbam in self.products:\n fbase = fbam.rstrip('bam')\n fn_csv_output = fbase+'recal.csv'\n app = 'GenomeAnalysisTK.jar -T CountCovariates -R %s' % self.genome_fasta\n reserved_params = ['-I','-recalFile']\n cmd_params = '-I %s -recalFile %s -cov QualityScoreCovariate %s' % (fbam,fn_csv_output,knownSites)\n user_params = self.params.get('-CountCovariates',None)\n if user_params:\n cmd_params = self.util.update_cmd_params(cmd_params,reserved_params,user_params[0])\n self.commands.append('%s %s' % (app,cmd_params))\n \n fn_bam_output = fbase+'recal.bam'\n app = 'GenomeAnalysisTK.jar -T TableRecalibration -R %s' % self.genome_fasta\n cmd_params = '-I %s -recalFile %s -o %s' % (fbam,fn_csv_output,fn_bam_output) \n reserved_params = ['-I','-recalFile','-o']\n user_params = self.params.get('-TableRecalibration',None)\n if user_params:\n cmd_params = self.util.update_cmd_params(cmd_params,reserved_params,user_params[0]) \n self.commands.append('%s %s' % (app,cmd_params))\n \n if not self.reserve_intermediate:\n self.commands.append('rm -f %s' % fbase+'ba*')\n self.commands.append('rm -f %s' % fn_csv_output)\n tmp.append(fn_bam_output)\n \n self.products = tmp", "title": "" }, { "docid": "b0f2d820a3f6b99427e15f22106f4d1f", "score": "0.5154085", "text": "def load_ref_system():\n return psr.make_system(\"\"\"\n C -0.00000 -0.70800 1.21622\n C 0.00000 0.70800 1.21622\n C 0.00000 1.40705 -0.00000\n C 0.00000 0.70800 -1.21622\n C -0.00000 -0.70800 -1.21622\n C -0.00000 -1.40705 0.00000\n C -0.00000 -1.40244 2.43645\n C 0.00000 1.40244 2.43645\n H 0.00000 2.49187 -0.00000\n C 0.00000 1.40244 -2.43645\n C -0.00000 -1.40244 -2.43645\n H -0.00000 -2.49187 0.00000\n C -0.00000 0.69871 3.64423\n C -0.00000 -0.69871 3.64423\n H -0.00000 -2.48586 2.45339\n H 0.00000 2.48586 2.45339\n H -0.00000 1.23796 4.58270\n H -0.00000 -1.23796 4.58270\n C 0.00000 0.69871 -3.64423\n C 0.00000 -0.69871 -3.64423\n H 0.00000 2.48586 -2.45339\n H -0.00000 -2.48586 -2.45339\n H 0.00000 1.23796 -4.58270\n H 0.00000 -1.23796 -4.58270\n \"\"\")", "title": "" }, { "docid": "ec76cad8310292c3babc185c37b872e5", "score": "0.51439095", "text": "def load_geology(self):\n f = open(self.basename + \".g12\")\n method = 'standard' # standard method to read file\n # method = 'numpy' # using numpy should be faster - but it messes up the order... possible to fix?\n if method == 'standard':\n i = 0\n j = 0\n k = 0\n self.block = np.ndarray((self.nx,self.ny,self.nz))\n for line in f.readlines():\n if line == '\\n':\n # next z-slice\n k += 1\n # reset x counter\n i = 0\n continue\n l = [int(l1) for l1 in line.strip().split(\"\\t\")]\n self.block[i,:,self.nz-k-1] = np.array(l)[::-1]\n i += 1\n \n \n elif method == 'standard_old':\n j = 0 \n j_max = 0\n k_max = 0\n i_max = 0\n self.block = np.ndarray((self.nz,self.ny,self.nx))\n for k,line in enumerate(f.readlines()):\n if line == '\\n':\n # next y-slice\n j += 1\n if j > j_max : j_max = j\n continue\n for i,l1 in enumerate(line.strip().split(\"\\t\")):\n if i > i_max: i_max = i\n if k/self.nz > k_max : k_max = k/self.nz\n self.block[j,i,k/self.nz-1] = int(l1)\n print((i_max, j_max, k_max))\n \n \n elif method == 'numpy':\n # old implementation - didn't work, but why?\n self.block = np.loadtxt(f, dtype=\"int\")\n # reshape to proper 3-D shape\n self.block = self.block.reshape((self.nz,self.ny,self.nx))\n self.block = np.swapaxes(self.block, 0, 2)\n # self.block = np.swapaxes(self.block, 0, 1)\n # print np.shape(self.block)", "title": "" }, { "docid": "8a2e3dbd78f29ab6a1e340ebcc4f5bb1", "score": "0.5139174", "text": "def __init__(self, filename):\n assert os.path.exists(filename), 'File \"{}\" not found'.format(filename)\n self.hdus = pyfits.open(filename) \n self.data = self.hdus[1].data\n self.nside = self.hdus[1].header['NSIDE']\n \n # expect to find circle in header\n hdr = self.hdus[0].header\n try:\n dstypes = sorted(filter(lambda n:n.startswith('DSTYP'), hdr.keys())); \n i = [hdr[x] for x in dstypes].index('POS(RA,DEC)'); i\n circ=hdr['DSVAL{}'.format(i)]; \n ra,dec, self.radius = np.array(circ[7:-1].split(','),float)\n except Exception as msg:\n print ('failed to parse file {}: expected header to have a DSVAL with POS(RA,DEC)'.format(filename), msg)\n raise\n self.center = SkyDir(ra, dec)\n self.indexfun = Band(self.nside).index\n self.lookup = dict(zip(self.data.PIX, self.data.CHANNEL1))", "title": "" }, { "docid": "24d7dad4327b3118ed7395a92c558e5d", "score": "0.51365954", "text": "def load_ref_system():\n return psr.make_system(\"\"\"\n C -0.00003 -2.03084 1.52511\n C -0.00003 -1.86078 0.14855\n C -0.00002 -0.57169 -0.43269\n C -0.00000 0.57171 0.43271\n C -0.00002 -0.38553 -1.85654\n C -0.00000 0.93373 -2.38194\n C 0.00001 2.03085 -1.52509\n C 0.00001 1.86080 -0.14853\n C -0.00004 -1.46617 -2.78011\n H 0.00003 2.76022 0.43953\n C 0.00000 0.38553 1.85656\n C -0.00001 -0.93370 2.38195\n C 0.00002 1.46619 2.78013\n C -0.00001 -1.14805 3.77035\n C -0.00001 1.14805 -3.77033\n H 0.00002 3.03772 -1.92748\n C -0.00004 -1.23043 -4.15704\n C -0.00002 0.06993 -4.65003\n H -0.00005 -2.49567 -2.47109\n H 0.00001 2.15465 -4.17232\n H -0.00005 -2.06535 -4.84624\n H -0.00002 0.24331 -5.71848\n H -0.00004 -3.03770 1.92750\n H -0.00005 -2.76021 -0.43951\n C 0.00002 1.23044 4.15706\n C 0.00000 -0.06990 4.65004\n H 0.00003 2.49568 2.47111\n H -0.00003 -2.15464 4.17234\n H 0.00003 2.06536 4.84626\n H 0.00000 -0.24330 5.71850\n \"\"\")", "title": "" }, { "docid": "807a715dc88ae7cc7abb3c5e04aa291a", "score": "0.5124896", "text": "def load_ref_system():\n return psr.make_system(\"\"\"\n C 2.9711 -1.0371 0.0910\n C 1.8817 0.0283 -0.1809\n C 0.4656 -0.6153 -0.1140\n C -0.6589 0.4628 -0.1520\n C -2.0598 -0.1836 0.0497\n C -3.1420 0.8635 -0.2174\n O 4.2178 -0.4769 0.3869\n O 2.1286 0.7141 -1.3836\n O -2.1849 -0.8442 1.2800\n O -3.9705 1.1899 0.5973\n O 0.2607 -1.4567 -1.2327\n O -0.5054 1.4221 0.8735\n H 3.0584 -1.7427 -0.7599\n H 2.7376 -1.6229 0.9972\n H 1.9527 0.8559 0.5697\n H 0.3839 -1.1993 0.8358\n H -0.6421 0.9854 -1.1414\n H -2.1912 -1.0175 -0.6799\n H -3.1325 1.3167 -1.2220\n H 4.4523 0.0972 -0.3325\n H 1.9415 0.1195 -2.1006\n H -2.2515 -0.1817 1.9578\n H 0.6098 -2.3037 -0.9920\n H 0.3953 1.7276 0.8314\n \"\"\")", "title": "" }, { "docid": "1f787335fb9a9287a21bb23f6a433e1b", "score": "0.512138", "text": "def load_default(self):\n input_str = \"\"\"\\\nalat 3.2 # lattice constant (would be in a more realistic example in the structure file)\nalpha 0.1 # noise amplitude\na_0 3 # equilibrium lattice constant\na_1 0\na_2 1.0 # 2nd order in energy (corresponds to bulk modulus)\na_3 0.0 # 3rd order\na_4 0.0 # 4th order\ncount 10 # number of calls (dummy)\nepsilon 0.2 # energy prefactor of lennard jones\nsigma 2.4 # distance unit of lennard jones\ncutoff 4.0 # cutoff length (relative to sigma)\nwrite_restart True\nread_restart False\n\"\"\"\n self.load_string(input_str)", "title": "" }, { "docid": "cdbe332cfe768e7cf5b64d0da8fb4b86", "score": "0.5118959", "text": "def calibration(self):\n return _pysmu.calibration(self.serial)", "title": "" }, { "docid": "96e97df0cf34ac98aea687cfeefb4060", "score": "0.5117651", "text": "def set_calib_coeffs(self, data):\n self.ac = [None] # So that ac[1] corresponds to ac1 in the datasheet\n self.ac += [toint16(data[2 * i:2 * i + 2]) for i in range(3)]\n self.ac += [int.from_bytes(data[2 * i + 6:2 * i + 8], 'big')\n for i in range(3)]\n self.b = [None]\n self.b += [toint16(data[2 * i + 12:2 * i + 14]) for i in range(2)]\n self.mb, self.mc, self.md = [\n toint16(data[2 * i + 16:2 * i + 18]) for i in range(3)]", "title": "" }, { "docid": "156d99d5ce3b499f15794436c4f1bedd", "score": "0.51144373", "text": "def load(filename):\n\n ftype = determine_filetype(filename)\n cl = {\n FILE_CALIBRATION: mx.calibrate.Calibration,\n FILE_XES: mx.emission.EmissionSpectrum,\n FILE_RIXS: mx.rixs.RIXS,\n FILE_EXPOSURE: mx.exposure.Exposure,\n }.get(ftype, None)\n if cl:\n return cl(filename)\n else:\n raise InvalidFileError", "title": "" }, { "docid": "fe7dc32762a075bb0e900da7773ccd0a", "score": "0.5112343", "text": "def zva_dat_2_ntwks(filename):\n header, comments, d = read_zva_dat(filename)\n col_headers = header.split(',')\n\n # set impedance to 50 Ohm (doesn't matter for now)\n z0 = npy.ones(npy.shape(d)[0])*50\n # read f values, convert to GHz\n f = d[:,0]/1e9\n\n name = os.path.splitext(os.path.basename(filename))[0]\n\n if 're' in header.lower() and 'im' in header.lower():\n # this is a cvs in re/im format\n # -> no conversion required\n s = npy.zeros((len(f),2,2), dtype=complex)\n\n for k, h in enumerate(col_headers):\n if 's11' in h.lower() and 're' in h.lower():\n s[:,0,0] = d[:,k] + 1j*d[:,k+1]\n elif 's21' in h.lower() and 're' in h.lower():\n s[:,1,0] = d[:,k] + 1j*d[:,k+1]\n elif 's12' in h.lower() and 're' in h.lower():\n s[:,0,1] = d[:,k+1] #+ 1j*d[:,k+2]\n elif 's22' in h.lower() and 're' in h.lower():\n s[:,1,1] = d[:,k+1] #+ 1j*d[:,k+2]\n\n elif 'db' in header.lower() and \"deg\" not in header.lower():\n # this is a cvs in db format (no deg values)\n # -> conversion required\n s = npy.zeros((len(f),2,2), dtype=complex)\n\n for k, h in enumerate(col_headers):\n # this doesn't always work! (depends on no. of channels, sequence of adding traces etc.\n # -> Needs changing!\n if 's11' in h.lower() and 'db' in h.lower():\n s[:,0,0] = mf.dbdeg_2_reim(d[:,k], d[:,k+2])\n elif 's21' in h.lower() and 'db' in h.lower():\n s[:,1,0] = mf.dbdeg_2_reim(d[:,k], d[:,k+2])\n\n n = Network(f=f,s=s,z0=z0, name = name)\n return n\n\n else:\n warn(\"File does not seem to be formatted properly (dB/deg or re/im)\")", "title": "" }, { "docid": "d5f800171943310b4c0ec99579ddd54c", "score": "0.5111356", "text": "def __init__(self, filename):\n \"\"\"self.lines\"\"\"\n \"\"\"self.elts\"\"\"\n \"\"\"self.data\"\"\"\n \"\"\" \"\"\"\n \"\"\"self.lon\"\"\"\n \"\"\"self.scans\"\"\"\n \"\"\"self.Nchan\"\"\"\n \"\"\"self.chan\"\"\"\n \"\"\"self.velo\"\"\"\n \"\"\"self.Ta[scan] a dictionary\"\"\"\n #\n f = open(filename, 'r')\n self.lines=f.readlines()\n f.close()\n self.lines = list(map(str.strip,self.lines))\n self.elts = list(map(str.split,self.lines))\n for i in range(len(self.lines)):\n if (len(self.elts[i]) > 6) and ((self.elts[i][5]=='galactic') or (self.elts[i][5]=='CygEMN') or (self.elts[i][5]=='Sun') ) :\n self.lon=float(self.elts[i][6])\n self.lat=float(self.elts[i][7])\n\n Nlines=0\n for i in range(len(self.lines)):\n if ((self.elts[i][0][0:2])=='20'):\n self. deltaf=float(self.elts[i][6])\n Nlines=Nlines+1\n self.Nchan = int(self.elts[i][8])\n self.mydate=self.elts[i][0][:8]\n self.myt=self.elts[i][0][9:]\n self.Az=float(self.elts[i][1])\n self.El=float(self.elts[i][2])\n self.freq0=float(self.elts[i][5])\n self.Vlsr=float(self.elts[i][self.Nchan+10])\n\n self.data=np.zeros((Nlines,self.Nchan),float)\n Nlines=0\n self.Ta={}\n for i in range(len(self.lines)):\n if ((self.elts[i][0][0:2]) == '20'):\n for j in range(self.Nchan):\n self.data[Nlines,j] = float(self.elts[i][9+j])\n #self.Ta[\"scan\" + str(Nlines)]=self.data[Nlines]\n self.Ta[Nlines]=self.data[Nlines]\n Nlines=Nlines+1\n self.scans=self.Ta.keys()\n\n self.chan=range(1,self.Nchan+1)\n c=299792.458\n self.deltaV=-(self.deltaf*c/(self.freq0)) #+int(self.Nchan/2.)*self.deltaf)\n self.Nchan0=int(self.Nchan/2.)\n self.velo=np.zeros(self.Nchan,float)\n\n\n #Alex\n frest=1420.4\n self.Voffset=((frest-(self.freq0+self.Nchan0*self.deltaf))/frest)*c\n self.Voffset=0.\n\n for j in range(self.Nchan):\n V0=c*((self.deltaf*self.Nchan0)/self.freq0)\n self.velo[j]=V0+j*self.deltaV\n #self.velo[j]=-self.Nchan0*self.deltaV+j*self.deltaV-self.Vlsr", "title": "" }, { "docid": "d224f27d9f3ebaa4dfc88817b4682ed2", "score": "0.51015204", "text": "def load_calibration_data(self, platetype, filename =''):\n\n if not filename: # If no filename was entered into function\n # Checks OS of system \n if platform.system() == 'Windows':\n if platetype == 24: # Loads calibration data for 24-well plate\n with open('System Settings/calibration24Wells', 'r') as f:\n self.positions24well = pickle.load(f)\n self.positions24well[0][0] = 1048\n self.positions24well[0][1] = 1513\n elif platetype == 96: # Loads calibration data for 96-well plate\n with open('System Settings/calibration96Wells', 'r') as f:\n self.positions96well = pickle.load(f)\n print self.positions96well\n elif platform.system() == 'Darwin':\n if platetype == 24:\n with open('System Settings/calibration24Wells', 'r') as f:\n self.positions24well = pickle.load(f)\n self.positions24well[0][0] = 1048\n self.positions24well[0][1] = 1513\n elif platetype == 96:\n with open('System Settings/calibration96Wells', 'r') as f:\n self.positions96well = pickle.load(f)\n print self.positions96well\n elif platform.system() == 'Linux':\n if platetype == 24:\n with open('System Settings/calibration24Wells', 'rb') as f:\n self.positions24well = pickle.load(f)\n self.positions24well[0][0] = 1048\n self.positions24well[0][1] = 1513\n elif platetype == 96:\n with open('System Settings/calibration96Wells', 'rb') as f:\n self.positions96well = pickle.load(f)\n print self.positions96well \n else: # If filename was provided as an input\n if platform.system() == 'Windows':\n if platetype == 24:\n with open('System Settings/' + filename, 'r') as f:\n self.positions24well = pickle.load(f)\n elif platetype == 96:\n with open('System Settings/' + filename, 'r') as f:\n self.positions96well = pickle.load(f)\n elif platform.system() == 'Darwin':\n if platetype == 24:\n with open('System Settings/' + filename, 'r') as f:\n self.positions24well = pickle.load(f)\n elif platetype == 96:\n with open('System Settings/' + filename, 'r') as f:\n self.positions96well = pickle.load(f)\n elif platform.system() == 'Linux':\n if platetype == 24:\n with open('System Settings/' + filename, 'rb') as f:\n self.positions24well = pickle.load(f)\n elif platetype == 96:\n with open('System Settings/' + filename, 'rb') as f:\n self.positions96well = pickle.load(f)", "title": "" }, { "docid": "07c2bd65cde4d2c68d0efbdf1b0e2b04", "score": "0.5101028", "text": "def getCalibrationCoefficients(self, index):\n\t\t# get raw calibration data from a specified location\n\t\tcd = self.getCalibrationData(index) + [0, 0, 0, 0]\n\t\tcc = {\"a0\":0, \"b1\":0, \"b2\":0, \"c12\":0, \"c11\":0, \"c22\":0}\n\t\t# cell not alive\n\t\tif max(cd) != 0:\n\t\t\t# undo Two's complement if applicable, pack into proper bit width\n\t\t\tcc[\"a0\"] = _unTwos(((cd[0] << 8) | cd[1]), 16)\n\t\t\tcc[\"b1\"] = _unTwos(((cd[2] << 8) | cd[3]), 16)\n\t\t\tcc[\"b2\"] = _unTwos(((cd[4] << 8) | cd[5]), 16)\n\t\t\tcc[\"c12\"] = _unTwos(((cd[6] << 6) | (cd[7] >> 2)), 14)\n\t\t\tcc[\"c11\"] = _unTwos(((cd[8] << 3) | (cd[9] >> 5)), 11)\n\t\t\tcc[\"c22\"] = _unTwos(((cd[10] << 3) | (cd[11] >> 5)), 11)\n\t\t\t# divide by float(1 << (fractionalBits + zeroPad)) to handle weirdness\n\t\t\tcc[\"a0\"] /= float(1 << 3)\n\t\t\tcc[\"b1\"] /= float(1 << 13)\n\t\t\tcc[\"b2\"] /= float(1 << 14)\n\t\t\tcc[\"c12\"] /= float(1 << 22)\n\t\t\tcc[\"c11\"] /= float(1 << 21)\n\t\t\tcc[\"c22\"] /= float(1 << 25)\n\t\treturn (index, cc)", "title": "" }, { "docid": "e1f96f066b855bf419b1160e4d50f6c9", "score": "0.51003975", "text": "def _ensure_calib_is_populated(self) -> None:\n if self._calib is None:\n self._calib = {}\n for log in self.log_list:\n calib_filename = os.path.join(self.root_dir, log, \"vehicle_calibration_info.json\")\n self._calib[log] = load_calib(calib_filename)", "title": "" }, { "docid": "c7192c6db092930551649dee37674efe", "score": "0.50982094", "text": "def loadInfo():\n \n \"\"\"\n INTRINSIC CAMERA CALIBRATION PARAMETERS\n The coefficients fx and fy are the focal lengths and the coefficients cx and cy are the camera centers\n The coefficients k1, k2, p1, p2 and k3 indicate radial (k's) and tangential (p's) distorsion\n fx [px], fy [px], cx [px], cy [px], k1, k2, p1, p2, k3\n \"\"\"\n mono = np.array([904.04572636,907.01811462,645.74398382,512.14951996,-0.3329137,0.10161043,0.00123166,-0.00096204])\n omni0 = np.array([482.047,485.211,373.237,211.02,-0.332506,0.154213,-9.5973e-05,-0.000236179,-0.0416498])\n omni1 = np.array([479.429,482.666,367.111,230.626,-0.334792,0.161382,4.29188e-05,-0.000324466,-0.0476611])\n omni2 = np.array([483.259,486.027,340.948,204.701,-0.334384,0.15543,0.000171604,0.000300507,-0.0439626])\n omni3 = np.array([483.895,486.584,375.161,220.184,-0.337111,0.160611,0.000146382,0.000406074,-0.0464726])\n omni4 = np.array([473.571,477.53,378.17,212.577,-0.333605,0.159377,6.11251e-05,4.90177e-05,-0.0460505])\n omni5 = np.array([473.368,477.558,371.65,204.79,-0.3355,0.162877,4.34759e-05,2.72184e-05,-0.0472616])\n omni6 = np.array([476.784,479.991,381.798,205.64,-0.334747,0.162797,-0.000305541,0.000163014,-0.0517717])\n omni7 = np.array([480.086,483.581,361.268,221.179,-0.348515,0.199388,-0.000381909,8.83314e-05,-0.0801161])\n omni8 = np.array([478.614,481.574,377.363,194.839,-0.333512,0.157163,-8.2852e-06,0.000265461,-0.0447446])\n omni9 = np.array([480.918,484.086,386.897,206.923,-0.33305,0.156207,-5.95668e-05,0.000376887,-0.0438085])\n \n \"\"\"\n ROVER TRANSFORMS\n trans_x [m], trans_y [m], trans_z [m], quat_x, quat_y, quat_z, quat_w\n \"\"\" \n #Omnidirectional camera 0 relative to Omnidirectional sensor\n T_OrefP0 = np.array([0.000,0.004,0.056,0.002,0.001,-0.006,1.000])\n #Omnidirectional camera 1 relative to Omnidirectional sensor\n T_OrefP1 = np.array([-0.001,0.127,0.054,0.005,0.002,-0.002,1.000])\n #Omnidirectional camera 2 relative to Omnidirectional sensor\n T_OrefP2 = np.array([0.060,0.005,0.023,-0.000,0.585,-0.010,0.811])\n #Omnidirectional camera 3 relative to Omnidirectional sensor\n T_OrefP3 = np.array([0.059,0.128,0.020,-0.006,0.586,-0.007,0.810])\n #Omnidirectional camera 4 relative to Omnidirectional sensor\n T_OrefP4 = np.array([0.030,0.013,-0.046,0.006,0.950,-0.002,0.311])\n #Omnidirectional camera 5 relative to Omnidirectional sensor\n T_OrefP5 = np.array([0.032,0.134,-0.047,0.019,0.951,-0.011,0.309])\n #Omnidirectional camera 6 relative to Omnidirectional sensor\n T_OrefP6 = np.array([-0.033,0.009,-0.048,-0.006,0.951,0.002,-0.310])\n #Omnidirectional camera 7 relative to Omnidirectional sensor\n T_OrefP7 = np.array([-0.034,0.131,-0.048,-0.012,0.951,0.001,-0.310])\n #Omnidirectional camera 8 relative to Omnidirectional sensor \n T_OrefP8 = np.array([-0.056,0.005,0.017,0.004,-0.587,0.005,0.809])\n #Omnidirectional camera 9 relative to Omnidirectional sensor \n T_OrefP9 = np.array([-0.057,0.128,0.015,0.002,-0.586,0.008,0.810])\n # Front left wheel relative to Rover, T_RWfl,0.256,0.285,0.033,-,-,-,-\n # Front right wheel relative to Rover, T_RWfr,0.256,-0.285,0.033,-,-,-,-\n # Rear left wheel relative to Rover, T_RWrl,-0.256,0.285,0.033,-,-,-,-\n # Rear right wheel relative to Rover, T_RWrr,-0.256,-0.285,0.033,-,-,-,-\n \n # all omnidirectional camera intrinsics\n allOmni = np.array([omni0, omni1, omni2, omni3, omni4, omni5, omni6, omni7, omni8, omni9]) \n # all omnidirectional camera transforms\n allT_OrefP = np.array([T_OrefP0, T_OrefP1, T_OrefP2, T_OrefP3, T_OrefP4, T_OrefP5, T_OrefP6, T_OrefP7, T_OrefP8, T_OrefP9]) \n\n return allOmni, allT_OrefP", "title": "" }, { "docid": "d6736873ffc8ce327e755cffa5a4bc98", "score": "0.50921553", "text": "def vasp_read_pot(potfile=\"LOCPOT\",zshift=0.0,bulk_flag=0,direct_flag='0'):\n ifile = open(potfile,'r')\n ifile.readline()\n\n latt_scale = float(ifile.readline()) # scale for the lattice vectors\n\n # read the lattice vectors \n latt_vec=[]\n for i in range(3):\n line_s = ifile.readline().split()\n x = float(line_s[0])\n y = float(line_s[1])\n z = float(line_s[2])\n latt_vec.append([x,y,z])\n #print latt_vec \n mat=numpy.array(latt_vec)\n if direct_flag=='0':\n\t if mat[2][2]>mat[0][0]:\n\t\tif mat[2][2]>mat[1][1]:\n\t\t\tzdirect=2\n\t\telse:\n\t\t\tzdirect=1\n\t else:\n\t\tif mat[0][0]>mat[1][1]:\n\t\t\tzdirect=0\n\t\telse:\n\t\t\tzdirect=1\n else:\n\t if direct_flag=='x':\n\t \tzdirect=0\n\t elif direct_flag=='y':\n\t \tzdirect=1\n\t else:\n\t \tzdirect=2\n# print mat\n trans_mat=numpy.transpose(mat) \n # print trans_mat\n inv_mat=numpy.linalg.inv(trans_mat)\n # print inv_mat\n # read species\n\n specs = ifile.readline().split()\n\n nat_sp =[]\n line_s = ifile.readline().split()\n nat = 0\n for i in range(len(specs)):\n nat_sp.append(int(line_s[i]))\n nat += nat_sp[i]\n \n cord_type=ifile.readline()\n #add by ZHC\n cord_type=cord_type.lstrip()\n zlist=[]\t\n zmin=1000000.0\n zmax=0.0\n for i in range(nat):\n\tline = ifile.readline().split()\n\tatvec=[]\n\tfor j in range(3):\n\t\tatvec.append(float(line[j]))\n\tatvec=numpy.array(atvec)\n#\tprint atvec\n#\tzpos=float(line[2])\n\tif cord_type[0]=='D' or cord_type[0]=='d': ### all convert to Cartizian to find the center of the slab\n\t\t#print \"NOTE: The coordination is DIRECT!!!\"\t\n\t\tfor j in range(3):\n\t\t\tif j==zdirect:\n\t\t\t\tatvec[j]=atvec[j]+zshift\n\t\t\tatvec[j]=atvec[j]-math.floor(atvec[j])\t\t\n\t\tcart_vec=numpy.dot(trans_mat,atvec)\n\t\tif cart_vec[zdirect]<zmin:\n\t\t\tzmin=cart_vec[zdirect]\n\t\tif cart_vec[zdirect]>zmax:\n\t\t\tzmax=cart_vec[zdirect]\t\n\telse:\n\t\t#print \"NOTE: The coordination is CARTISIAN!!!\"\n\t\tdir_vec=numpy.dot(inv_mat,atvec)\n\t\t#print dir_vec\n\t\tfor j in range(3):\n\t\t\tif j==zdirect:\n\t\t\t\tdir_vec[j]=dir_vec[j]+zshift\n\t\t\tdir_vec[j]=dir_vec[j]-math.floor(dir_vec[j])\n\t\t#print dir_vec\n\t\t\n\t\tcart_vec=numpy.dot(trans_mat,dir_vec)\n\t\t#print cart_vec\n\t\tif cart_vec[zdirect]<zmin:\n\t\t\tzmin=cart_vec[zdirect]\n\t\tif cart_vec[zdirect]>zmax:\n\t\t\tzmax=cart_vec[zdirect]\t\n \t\n center_pos=(zmax+zmin)/2.0\n\n# print \"zmax,zmin\"\n# print zmax,zmin\n# print \"CENTER:\"\n# print center_pos\n dis_vac_up=mat[zdirect][zdirect]-zmax\n dis_vac_down=zmin-0.0\n thick_vac=abs(dis_vac_up+dis_vac_down)\n \n if zmax-zmin>0.8*mat[zdirect][zdirect] and mat[zdirect][zdirect]>12.0 and bulk_flag==0:\n\tfrac=(mat[zdirect][zdirect]-zmax)/float((mat[zdirect][zdirect]))\n\tprint \"PLEASE SET -z tag to ensure the slab does not cross the boundary!!!\"\n\tsys.exit(0)\t\n vac_center=thick_vac/2.0+zmax\n if vac_center>mat[zdirect][zdirect]+zshift*mat[zdirect][zdirect]:\n\tvac_center=vac_center-mat[zdirect][zdirect]\n# print \"vac_center\" \n# print vac_center\n \n ifile.readline()\n\n line_s = ifile.readline().split()\n nx = int(line_s[0])\n ny = int(line_s[1])\n nz = int(line_s[2]) \n ndata = nx*ny*nz\n nlines = ndata/5\n if ndata%5 != 0: nlines += 1\n data = []\n for il in range(nlines):\n line_s = ifile.readline().split()\n for i in range(len(line_s)):\n data.append(float(line_s[i]))\n ifile.close()\n return data,(nx,ny,nz),latt_vec,center_pos,vac_center,zmax,zmin,zdirect,thick_vac", "title": "" }, { "docid": "d3386135ac969cae90046a9a722a4c7f", "score": "0.50908035", "text": "def get_calibration(self, images):\n # Arrays to store object points and image points from all the images.\n obj_points = [] # 3d point in real world space\n img_points = [] # 2d points in image plane.\n self.grids = [self.find_chessboard(img) for img in images]\n for grid in self.grids:\n if grid is not None:\n corners = grid.reshape((-1, 2))\n img_points.append(corners)\n obj_points.append(self.pattern_points)\n self.rms, self.camera_mat, self.dist_coef, rvecs, tvecs = cv2.calibrateCamera(obj_points, img_points, get_size(images[0]))\n self.calculate_reprojection_errors(self.grids)\n return self.camera_mat, self.dist_coef", "title": "" }, { "docid": "e62940af11da8449d19c8d95ed21d47d", "score": "0.5090213", "text": "def get_calibration_data(self):\n data = {}\n\n # 64 bit numbers (8 bytes/number, 16 bytes per point)\n self.write(\"FORM3;\")\n\n # Read for every cal type\n for t in CalType:\n name = t.name\n\n # Set the channel as appropriate\n if t == CalType.CALIS111:\n ch = CHANNELS[SParam.S11]\n elif t == CalType.CALIS221:\n ch = CHANNELS[SParam.S22]\n else:\n ch = CHANNELS[SParam.S11]\n\n self.write(\"{};\".format(ch))\n\n # Is calibration present?\n if bool(int(self.query(name + \"?;\"))):\n data2 = []\n for i in range(CAL_DATA_LENGTH[t]):\n # data2.append(self.query(\"OUTPCALC\"+\"{:02d}\".format(i+1)+\";\"))\n self.write(\"OUTPCALC{:02d};\".format(i + 1))\n\n if not self.dummy:\n header = self.vna.read_bytes(4) # 4-byte header\n # big-endian, 2 bytes\n values = int(struct.unpack(\">h\", header[2:])[0] / 8)\n d = []\n\n for i in range(values):\n d.append(self.vna.read_bytes(8))\n else:\n d = (1, 2, 3)\n data2.append(d)\n data[t] = data2\n\n if self.dummy:\n return {}\n\n return data", "title": "" }, { "docid": "df1db89060310cac4055edca61737b1e", "score": "0.5087069", "text": "def load_velo_scan(file):\n scan = np.fromfile(file, dtype=np.float32) \n return scan.reshape((-1, 4))", "title": "" }, { "docid": "29f3d613096b457461b45130188b0733", "score": "0.50777614", "text": "def applyCalibrationTo(url,calib,title):\n \n pfix=''.join(calib.keys())\n\n\n fIn=ROOT.TFile.Open(url)\n data=fIn.Get('data')\n\n histos={}\n for ireg in xrange(1,4):\n histos['dm%d'%ireg] = ROOT.TH1F('dm%d'%ireg, ';#Delta m_{#gamma#gamma}/m_{#gamma#gamma};PDF',50,-0.1,0.1)\n histos['den%d'%ireg] = ROOT.TH1F('den%d'%ireg,';#Delta E/E;PDF',50,-0.1,0.1)\n for h in histos:\n histos[h].Sumw2()\n histos[h].SetLineColor(1)\n histos[h].SetMarkerColor(1)\n histos[h].SetMarkerStyle(20)\n histos[h].SetDirectory(0)\n\n for i in xrange(0,data.GetEntriesFast()):\n \n data.GetEntry(i)\n\n #generator level photons\n genphotons=[]\n for ia in xrange(1,3):\n genen = getattr(data,'genen%d'%ia)\n geneta = getattr(data,'geneta%d'%ia)\n genphi = getattr(data,'genphi%d'%ia)\n genphotons.append(ROOT.TLorentzVector(0,0,0,0))\n genphotons[-1].SetPtEtaPhiM(genen/ROOT.TMath.CosH(geneta),geneta,genphi,0.)\n genh = genphotons[0]+genphotons[1]\n\n #H->gg fiducial cuts\n if genphotons[0].Pt()<20 or genphotons[1].Pt()<20 : continue\n if genphotons[0].Pt()<40 and genphotons[1].Pt()<40 : continue\n if abs(genphotons[0].Eta())<1.5 or abs(genphotons[1].Eta())<1.5 : continue\n if abs(genphotons[0].Eta())>2.8 or abs(genphotons[1].Eta())>2.8 : continue\n\n #reconstructed photons in different regions\n for ireg in xrange(1,4):\n\n photons=[]\n for ia in xrange(1,3):\n genen = getattr(data,'genen%d'%ia)\n geneta = getattr(data,'geneta%d'%ia)\n genphi = getattr(data,'genphi%d'%ia)\n recen = getattr(data,'en%d_%d'%(ia,ireg))\n #avgnoise = getattr(data,'noise%d_%d'%(ia,ireg))\n avgnoise=getattr(data,'noise%d_3'%ia)*A[ireg-1]/A[2]\n\n if 'L0' in calib:\n recen=recen/(calib['L0'][ireg].Eval(abs(geneta))+1.0)\n if 'L1' in calib:\n recen=recen/(calib['L1'][ireg].Eval(recen)+1.0)\n if 'L2' in calib and ireg in calib['L2']:\n recen=recen-calib['L2'][ireg].Eval(avgnoise)\n\n deltaE = recen/genen-1.\n histos['den%d'%ireg].Fill(deltaE)\n photons.append(ROOT.TLorentzVector(0,0,0,0))\n photons[-1].SetPtEtaPhiM(recen/ROOT.TMath.CosH(geneta),geneta,genphi,0.)\n\n h = photons[0]+photons[1]\n deltaM=h.M()/genh.M()-1\n histos['dm%d'%ireg].Fill(deltaM)\n\n fIn.Close()\n\n c=ROOT.TCanvas('c','c',500,500)\n c.SetTopMargin(0.05)\n c.SetBottomMargin(0.1)\n c.SetLeftMargin(0.12)\n c.SetRightMargin(0.03)\n for ireg in xrange(1,4):\n for k in ['dm','den']:\n h=histos['%s%d'%(k,ireg)]\n h.Scale(1./h.Integral())\n h.Draw()\n h.GetYaxis().SetTitleOffset(0.9)\n h.GetYaxis().SetRangeUser(0,h.GetMaximum()*1.2)\n h.Fit('gaus','M+')\n gaus=h.GetListOfFunctions().At(0)\n tex=ROOT.TLatex()\n tex.SetTextFont(42)\n tex.SetTextSize(0.04)\n tex.SetNDC()\n tex.DrawLatex(0.12,0.96,'#bf{CMS} #it{simulation preliminary}')\n tex.DrawLatex(0.15,0.88,'SR%d (%s-calibrated)'%(ireg,pfix))\n tex.DrawLatex(0.15,0.84,'#mu=%3.3f#pm%3.3f'%(gaus.GetParameter(1),gaus.GetParError(1)))\n tex.DrawLatex(0.15,0.80,'#sigma=%3.3f#pm%3.3f'%(gaus.GetParameter(2),gaus.GetParError(2)))\n tex.SetTextAlign(31)\n tex.DrawLatex(0.97,0.96,title)\n c.SaveAs('%s%s.png'%(pfix,h.GetName()))\n\n #save in a local file\n fOut=ROOT.TFile.Open('calib%s.root'%pfix,'RECREATE')\n for h in histos: histos[h].Write()\n fOut.Close()", "title": "" }, { "docid": "2a7c8343c4d75b845934b3335fdba9fd", "score": "0.5071515", "text": "def readTurekReference(refCase):\n name = \"reference/ref_\" + refCase.lower() + \".point\" \n data = np.genfromtxt(name)\n \n refData={}\n refData['time'] = data[:,0]\n refData['Ux'] = data[:,10] \n refData['Uy'] = data[:,11]\n if refCase.lower() == \"fsi1\":\n refData['drag'] = data[:,6] \n refData['lift'] = data[:,7]\n else:\n refData['drag'] = data[:,4] + data[:,6] \n refData['lift'] = data[:,5] + data[:,7]\n return refData", "title": "" }, { "docid": "f36f9350a7c83b9e5717242f9b6e87f8", "score": "0.5053698", "text": "def read_zc(filename):\n header,footer,bestfit,zcdata = [],[],[],[]\n file = open(filename,'r')\n Galaxy = extract_title(filename)[1]\n lines = file.readlines() \n \n nlines = len(lines)\n for i in range(nlines):\n if lines[i].startswith('\\n'): continue\n if lines[i].startswith('Found'):\n header.append(lines[i])\n elif lines[i].startswith('Best'):\n bestfit.append(lines[i+1])\n bestfit.append(lines[i+2])\n elif lines[i].startswith('background'):\n footer.append(lines[i])\n else:\n try:\n zcdata.append(map(float,lines[i].split()))\n except ValueError:\n continue\n zcdata = np.transpose(zcdata)\n data = {'To':zcdata[0],\n 'Tf':zcdata[1],\n 'Mag':zcdata[2],\n 'SFR':zcdata[3],\n 'SFRerr1':zcdata[4],\n 'SFRerr2':zcdata[5],\n 'Zave':zcdata[6],\n 'Zaveerr1':zcdata[7],\n 'Zaveerr2':zcdata[8],\n 'Zspread':zcdata[9],\n 'Zsprederr1':zcdata[10],\n 'Zsprederr2':zcdata[11],\n 'CSFH':zcdata[12],\n 'CSFHerr1':zcdata[13],\n 'CSFHerr2':zcdata[14],\n 'Galaxy':Galaxy,\n 'Bestfits':bestfit,\n 'Header':header,\n 'Footer':footer\n }\n return data", "title": "" }, { "docid": "f126283018edd013b34b9ca2c7b91b94", "score": "0.50518805", "text": "def pipetteLoadCalib(p, liquid, path):\n serial = XmlSerializer(PipetteCalibrationInfo)\n stream = FileStream(path, FileMode.Open, FileAccess.Read)\n try:\n calib = serial.Deserialize(stream)\n p[liquid] = calib\n finally:\n stream.Close()", "title": "" }, { "docid": "5b3a7d8202ac07d323b726c9121d7e13", "score": "0.5033101", "text": "def process_file(in_file, out_dir, prefix):\n\n # Read in the text file\n lines = []\n with open(in_file) as f:\n for line in f:\n lines.append(line)\n\n logging.debug(\"Decoding header\")\n # Read in the header info\n header = decode_header(lines[0:11])\n\n ngates = int(header['Number of gates'])\n # nrays = int(header['No. of rays in file']) # Cant do this apparently. Not always correct (wtf)\n len_data = len(lines[17:])\n nrays = len_data / (ngates + 1)\n\n gate_length = float(header['Range gate length (m)'])\n start_time = datetime.strptime(header['Start time'], '%Y%m%d %H:%M:%S.%f')\n scan_type = lookup[header['Scan type']]\n\n logging.info(\"Processing file type: %s\" % scan_type)\n logging.debug(\"Number of rays: %s\" % nrays)\n\n logging.debug(\"Reading data\")\n # Read in the actual data\n az = np.zeros(nrays)\n hour = np.zeros(nrays)\n elev = np.zeros(nrays)\n pitch = np.zeros(nrays)\n roll = np.zeros(nrays)\n rng = np.asarray([(gate + .5) * gate_length for gate in range(ngates)])\n\n vel = np.zeros((ngates, nrays))\n intensity = np.zeros((ngates, nrays))\n beta = np.zeros((ngates, nrays))\n\n try:\n for ray in range(nrays):\n # Get the scan info\n info = lines[ray * (ngates + 1) + 17].split()\n hour[ray] = float(info[0])\n az[ray] = float(info[1])\n elev[ray] = float(info[2])\n pitch[ray] = float(info[3])\n roll[ray] = float(info[4])\n\n for gate in range(ngates):\n data = lines[ray * (ngates + 1) + 17 + gate + 1].split()\n vel[gate, ray] = float(data[1])\n intensity[gate, ray] = float(data[2])\n beta[gate, ray] = float(data[3])\n\n except IndexError:\n logging.warning(\"Something went wrong with the indexing here...\")\n\n logging.debug('Preparing to write netcdf')\n\n # Get the times and dates figured out for the netcdf\n time = []\n epoch = []\n for h in hour:\n dt = datetime(start_time.year, start_time.month, start_time.day) + timedelta(hours=h)\n time.append(dt)\n epoch.append(_to_epoch(dt))\n\n time = np.asarray(time)\n epoch = np.asarray(epoch)\n base_time = _to_epoch(start_time)\n time_offset = epoch - base_time\n\n # Figure out netcdf attrs\n nc_attrs = {'start_time': start_time.strftime('%Y-%m-%dT%H:%M:%S')} # None right now\n\n # Get the filename figured out\n if prefix is None:\n filename = start_time.strftime(\"{type}_%Y%m%d_%H%M%S.nc\".format(type=scan_type))\n else:\n filename = start_time.strftime(\"{prefix}_{type}_%Y%m%d_%H%M%S.nc\".format(type=scan_type, prefix=prefix))\n\n if not os.path.exists(out_dir):\n os.makedirs(out_dir)\n\n filename = os.path.join(out_dir, filename)\n\n # Write out the netcdf\n logging.info(\"Writing netcdf\")\n logging.debug(\"Creating file: {}\".format(filename))\n\n nc = netCDF4.Dataset(filename, \"w\", format=\"NETCDF4\")\n\n # Create the dimensions\n nc.createDimension('time', size=None)\n nc.createDimension('range', size=len(rng))\n\n # Set the netcdf attributes\n logging.debug('Writing attributes')\n nc.setncatts(nc_attrs)\n\n logging.debug('Writing base_time')\n var = nc.createVariable('base_time', 'i8')\n var.setncattr('long_name', 'Time')\n var.setncattr('units', 'seconds since 1970-01-01 00:00:00 UTC')\n var[:] = base_time\n\n logging.debug('Writing time_offset')\n var = nc.createVariable('time_offset', 'i8', dimensions=('time',))\n var.setncattr('long_name', 'Time offset')\n var.setncattr('unis', 'seconds since base_time')\n var[:] = time_offset\n\n logging.debug('Writing epoch')\n var = nc.createVariable('epoch', 'i8', dimensions=('time',))\n var.setncattr('long_name', 'Epoch Time')\n var.setncattr('units', 'seconds since 1970-01-01 00:00:00 UTC')\n var[:] = epoch\n\n logging.debug('Writing hour')\n var = nc.createVariable('hour', 'f8', dimensions=('time',))\n var.setncattr('long_name', 'Hour of Day')\n var.setncattr('units', 'UTC')\n var[:] = hour\n\n logging.debug('Writing range')\n var = nc.createVariable('range', 'f8', dimensions=('range',))\n var.setncattr('long_name', 'height')\n var.setncattr('units', 'km AGL')\n var[:] = rng\n\n logging.debug('Writing azimuth')\n var = nc.createVariable('azimuth', 'f8', dimensions=('time', 'range'))\n var.setncattr('long_name', 'Azimuth Angle')\n var.setncattr('units', 'degrees')\n var[:] = np.tile(az, (len(rng), 1)).transpose()\n\n logging.debug('Writing elevation')\n var = nc.createVariable('elevation', 'f8', dimensions=('time', 'range'))\n var.setncattr('long_name', 'Elevation angle')\n var.setncattr('units', 'degrees above the horizon')\n var[:] = np.tile(elev, (len(rng), 1)).transpose()\n\n logging.debug('Writing pitch')\n var = nc.createVariable('pitch', 'f8', dimensions=('time', 'range'))\n var.setncattr('long_name', 'Instrument Pitch')\n var.setncattr('units', 'degrees')\n var[:] = np.tile(pitch, (len(rng), 1)).transpose()\n\n logging.debug('Writing roll')\n var = nc.createVariable('roll', 'f8', dimensions=('time', 'range'))\n var.setncattr('long_name', 'Instrument Roll')\n var.setncattr('units', 'degrees')\n var[:] = np.tile(roll, (len(rng), 1)).transpose()\n\n logging.debug('Writing velocity')\n var = nc.createVariable('velocity', 'f8', dimensions=('time', 'range'))\n var.setncattr('long_name', 'Doppler velocity')\n var.setncattr('units', 'm/s')\n var.setncattr('comment', 'Positive values are toward the radar')\n var[:] = vel.transpose()\n\n logging.debug('Writing intensity')\n var = nc.createVariable('intensity', 'f8', dimensions=('time', 'range'))\n var.setncattr('long_name', 'Intensity')\n var.setncattr('units', 'Unitless')\n var.setncattr('comment', 'This is computed as (SNR+1)')\n var[:] = intensity.transpose()\n\n logging.debug('Writing backscatter')\n var = nc.createVariable('backscatter', 'f8', dimensions=('time', 'range'))\n var.setncattr('long_name', 'Attenuated backscatter')\n var.setncattr('units', 'km^(-1) sr^(-1)')\n var[:] = (beta*1e3).transpose()\n\n logging.debug('Closing file')\n nc.close()\n\n logging.info(\"Netcdf successfully created!\")\n\n return filename", "title": "" }, { "docid": "d016e6e2e8e8df8fbdc8ea6a937606ff", "score": "0.5026587", "text": "def load_ref_system():\n return psr.make_system(\"\"\"\n C 1.5151 -0.7028 -1.6924\n C 0.5628 -1.0444 -0.5451\n C -0.0726 0.2120 0.0615\n H 0.7006 0.9918 0.2065\n H -0.8036 0.6561 -0.6450\n C -0.7302 -0.1134 1.3934\n H -1.5281 -0.8727 1.2577\n H 0.0139 -0.5769 2.0746\n C -1.2879 1.1360 2.0434\n O -0.7237 1.7638 2.9324\n N -2.5317 1.6149 1.6216\n H -2.9733 2.3583 2.1066\n H -3.0661 1.0983 0.9670\n N -0.4933 -1.9991 -0.9575\n O 1.2265 0.3827 -2.4452\n O 2.5137 -1.3131 -2.0286\n H 1.1747 -1.5580 0.2437\n H -0.0807 -2.8370 -1.3026\n H -1.0834 -1.5935 -1.6537\n H 1.9532 0.5532 -3.0378\n \"\"\")", "title": "" }, { "docid": "e303f9b722dabe2f254609bd6943782f", "score": "0.5022817", "text": "def calibrate(self, sensorNameDB6, sensorNameMB):\r\n\r\n #in the command below, the unit of magnitude is mV and should not be supplied.\r\n #everything is working except for the calibration file\r\n #This currently has to be set via the Touchscreen Interface of the device.\r\n #self.setValue(self.devices[\"db7\"] + \":TYPE:NTC:EXCT:TYPE:UNIP:MAG:7:CALB:X96620.dat\")\r\n\r\n calList = []\r\n sensor_db6List = []\r\n sensor_mb1List = []\r\n \r\n while True:\r\n try:\r\n cal = self.getSensorInformation(\"db7\", includeTemperature = True)\r\n sensor_db6 = self.getSensorInformation(\"db6\")\r\n sensor_mb1 = self.getSensorInformation(\"mb1\")\r\n\r\n calList.append(cal)\r\n sensor_db6List.append(sensor_db6)\r\n sensor_mb1List.append(sensor_mb1)\r\n\r\n exportArray = np.hstack((np.array(calList), np.array(sensor_db6List), np.array(sensor_mb1List)))\r\n\r\n print \"Export Array, \", exportArray\r\n print \"Rc: \", cal[2], \"T: \", cal[3], \"R1: \", sensor_db6[2], \"R2: \", sensor_mb1[2]\r\n \r\n headerString = \"\"\"######################################\r\n\r\n Calibration Log\r\n\r\n Calibrated sensor: X_____.dat \r\n (Columns 1 to 4: Voltage, Current, Resitance, Temperature)\r\n\r\n First sensor to calibrate:\"\"\" + sensorNameDB6 + \"\"\"\r\n (Columns 5 to 7: Voltage, Current, Resistance)\r\n\r\n Second sensor to calibrate:\"\"\" + sensorNameMB + \"\"\"\r\n (Columns 8 to 10: Voltage, Current, Resistance)\r\n\r\n######################################\"\"\"\r\n \r\n np.savetxt(\"calibration.txt\", exportArray, fmt=\"%.6e\", header = headerString, comments = \"#\")\r\n\r\n np.savetxt(sensorNameDB6 + \".dat\", exportArray[:,[3,6]], fmt=\"%.6e\", header = \"Temperature (K)\\t Resistance (Ohm)\\nExcitation: Constant Voltage, 7mV\", comments = \"#\")\r\n np.savetxt(sensorNameMB + \".dat\", exportArray[:,[3,9]], fmt=\"%.6e\", header = \"Temperature (K)\\t Resistance (Ohm)\\nExcitation: Constant Voltage, 7mV\", comments = \"#\")\r\n time.sleep(1) \r\n \r\n except KeyboardInterrupt:\r\n print \"Keyboard Interrupt caught. Finishing Calibration.\"\r\n print \"Minimum Temperature achieved: \", np.min(exportArray[:,3])\r\n print \"Maximum Temperature achieved: \", np.max(exportArray[:,3])\r\n break", "title": "" }, { "docid": "44c84e9a3fd760285c955c5217c9496c", "score": "0.50222087", "text": "def _getChebyshevPhotoCalib(self, coefficients, err, xyMax, offset, scaling):\n\n orderPlus1 = self.chebyshevOrder + 1\n pars = np.zeros((orderPlus1, orderPlus1))\n\n bbox = lsst.geom.Box2I(lsst.geom.Point2I(0.0, 0.0),\n lsst.geom.Point2I(*xyMax))\n # Take the zeropoint, apply the absolute relative calibration offset,\n # and whatever flat-field scaling was applied\n pars[:, :] = (coefficients.reshape(orderPlus1, orderPlus1) *\n (offset*units.ABmag).to_value(units.nJy) * scaling)\n\n field = afwMath.ChebyshevBoundedField(bbox, pars)\n calibMean = field.mean()\n\n calibErr = (np.log(10.) / 2.5) * calibMean * err\n\n photoCalib = afwImage.PhotoCalib(field, calibErr)\n\n return photoCalib", "title": "" }, { "docid": "6422d7d5d22cee277736176dd895e937", "score": "0.5017742", "text": "def loading_data(path,version):\n e = 1.602176634e-19 \n\n n0 = collect(\"Nnorm\", path=path, info=False)\n T0 = collect(\"Tnorm\", path=path, info=False)\n Cs0 = collect('Cs0',path=path, info=False)\n wci = collect(\"Omega_ci\", path=path, info=False)\n rhos = collect('rho_s0', path=path, info=False)\n\n n = collect(\"Ne\", path=path, info=False) * n0 \n Pe = collect(\"Pe\", path=path, info=False) * n0 * T0 * e\n\n \n phi = collect(\"phi\", path=path, info=False) \n\n \n t_array = collect(\"t_array\", path=path, info=False)/wci\n dt = (t_array[1] - t_array[0]) \n\n # R0 = collect('R0', path=path, info=False) * rhos\n B0 = collect('Bnorm', path=path, info=False)\n dx = collect('dx', path=path, info=False) * rhos * rhos\n dx=dx[0, 0]\n dz = collect('dz', path=path, info=False)\n dy = collect('dy', path=path, info=False) \n dy =dy[0,0]\n \n \n beta_e= collect('beta_e',path=path,info=False)\n \n Jpar=collect('Jpar',path=path,info=False) * n0 * Cs0\n Vort=collect('Vort',path=path,info=False) * wci\n \n \n if (version==2):\n Pi = collect(\"Pi\", path=path, info=False) * n0 * T0 * e\n # Rzrad = collect(\"Rzrad\",path=path, info=False)\n psi =collect(\"psi\", path=path, info=False)\n psi_zero=collect(\"psi_zero\",path=path,info=False)\n external_field=collect('external_field',path=path,info=False)\n Vi=collect('Vi',path=path,info=False) * Cs0 \n NVi=collect('NVi',path=path,info=False) * n0 * Cs0\n VePsi=collect('VePsi',path=path,info=False) / Cs0 / e *T0 \n\n np.savez(path+'data.npz', n=n, Pe=Pe,Pi=Pi,n0=n0,T0=T0,B0=B0,phi=phi,dt=dt, t_array=t_array,psi=psi,psi_zero=psi_zero,external_field=external_field,beta_e=beta_e,Vi=Vi,NVi=NVi,Jpar=Jpar,dx=dx,dz=dz,dy=dy,Vort=Vort,VePsi=VePsi)\n\n return n, Pe,Pi,n0,T0,B0,phi,dt, t_array,psi,psi_zero,external_field,beta_e,Vi,NVi,Jpar,dx,dz,dy,Vort,VePsi \n else:\n np.savez(path+'data.npz', n=n, Pe=Pe,n0=n0,T0=T0,B0=B0,phi=phi,dt=dt, t_array=t_array,beta_e=beta_e,Jpar=Jpar,dx=dx,dz=dz,dy=dy,Vort=Vort)\n\n return n, Pe,n0,T0,B0,phi,dt, t_array,beta_e,Jpar,dx,dz,dy,Vort", "title": "" }, { "docid": "bae63549d5466eed57d62da303343fd3", "score": "0.5014704", "text": "def load_1D(file_reference, *subpath, **kwargs):\n dimname = process_kwargs([('dimname','')], kwargs)\n v = load_acqu(file_reference, *subpath)\n td2 = int(v['TD'])\n td1 = 1\n td2_zf = int(np.ceil(td2/256.)*256) # round up to 256 points, which is how it's stored\n fp = open_subpath(file_reference, *(subpath+('fid',)),mode='rb')\n data = fp.read()\n if int(v['BYTORDA']) == 1:\n data = np.fromstring(data, dtype=np.dtype('>i4'), count=(len(data)//4))\n else:\n data = np.fromstring(data, dtype=np.dtype('<i4'), count=(len(data)//4))\n data = np.complex128(data)\n data = data[0::2]+1j*data[1::2]\n rg = det_rg(v['RG'])\n data /= rg\n data = bruker_data(data,[td1,td2_zf//2],[dimname,'t2'])\n data = data['t2',0:td2//2] # now, chop out their zero filling\n t2axis = 1./v['SW_h']*r_[1:td2//2+1]\n t1axis = r_[1]\n data.labels([dimname,'t2'],[t1axis,t2axis])\n shiftpoints = int(det_phcorr(v)) # use the canned routine to calculate the second order phase shift\n #print 'shiftpoints = ',shiftpoints\n data.setaxis('t2',lambda x: x-shiftpoints/v['SW_h'])\n logger.debug('yes, I called with %d shiftpoints'%shiftpoints)\n # finally, I will probably need to add in the first order phase shift for the decimation --> just translate this\n data.set_prop('title',\n load_title(file_reference,*subpath))\n data.set_prop('acq',\n v)\n with open_subpath(file_reference, *(subpath+('pulseprogram',)),mode='r') as fp:\n ppg = fp.read()\n data.set_prop('pulprog',ppg)\n if type(file_reference) is tuple:\n data.set_prop('filename',\n file_reference[1])\n else:\n data.set_prop('filename',\n file_reference)\n if open_subpath(file_reference,\n *(subpath+('pdata','1','procs')),\n test_only=True):\n data.set_prop('proc',\n load_jcamp(file_reference,\n *(subpath+('pdata','1','procs'))))\n return data", "title": "" }, { "docid": "5f787dae1365fc1bd46340cfc67d2b0f", "score": "0.50117224", "text": "def load_ref_system():\n return psr.make_system(\"\"\"\n C 1.5608 -0.1292 0.1036\n C 0.4146 -0.8131 -0.6981\n C -0.7992 -1.1375 0.1999\n C -1.1994 0.1158 1.0239\n C 0.0102 0.5609 1.8588\n O 1.0519 0.9487 0.9514\n O -1.5643 1.1600 0.0998\n O -1.8913 -1.6058 -0.5684\n O 0.0596 0.0530 -1.7748\n O 2.4313 0.5364 -0.7567\n H 0.3662 -0.2179 2.5535\n H -0.1758 1.4940 2.4264\n H 2.1824 -0.8044 0.7206\n H 0.7920 -1.7211 -1.2265\n H -0.5852 -2.0009 0.8747\n H -2.1222 -0.0679 1.6157\n H 1.9201 1.0105 -1.4771\n H -0.5937 0.7491 -1.4598\n H -0.8805 1.8820 0.1194\n H -2.1269 -0.9426 -1.2685\n \"\"\")", "title": "" }, { "docid": "b6dab704316ca1e609049a0f08c4a063", "score": "0.50086874", "text": "def __init__(self,filename):\n # psfdata = fits.open(filename, memmap=False)\n with fits.open(filename, memmap=False) as psfdata:\n xcoeff=psfdata[0].data\n hdr=psfdata[0].header\n wmin=hdr['WAVEMIN']\n wmax=hdr['WAVEMAX']\n ycoeff=psfdata[1].data\n \n arm = hdr['CAMERA'].lower()[0]\n npix_x = hdr['NPIX_X']\n npix_y = hdr['NPIX_Y']\n\n if 'XSIGMA' in psfdata:\n self.xsigma_boot=psfdata['XSIGMA'].data\n if 'WSIGMA' in psfdata: #- w sigma legendre expansion coefficients\n self.wcoeff=psfdata['WSIGMA'].data\n \n if arm not in ['b','r','z']:\n raise ValueError(\"arm not in b, r, or z. File should be of the form psfboot-r0.fits.\") \n #- Get the coeffiecients\n nspec=xcoeff.shape[0]\n ncoeff=xcoeff.shape[1]\n \n self.npix_x=npix_x\n self.npix_y=npix_y\n self.xcoeff=xcoeff\n self.ycoeff=ycoeff\n self.wmin=wmin\n self.wmax=wmax\n self.nspec=nspec\n self.ncoeff=ncoeff\n #invertion should be done at psf creation time and saved into file\n c,ymin,ymax=self.invert(coeff=self.ycoeff)\n self.icoeff=c\n self.ymin=ymin\n self.ymax=ymax", "title": "" }, { "docid": "5f956ae2fbf9bfafab2c43807f705375", "score": "0.50064707", "text": "def AuxReadInMagFld3D(filePath, sCom):\n \n from array import array\n from srwlib import SRWLMagFld3D\n \n f = open(filePath, 'r')\n f.readline() #1st line: just pass\n global xStart,xStep,xNp,yStart,yStep,yNp,zStart,zStep,zNp\n xStart = float(f.readline().split(sCom, 2)[1]) #2nd line: initial X position [m]; it will not actually be used\n xStep = float(f.readline().split(sCom, 2)[1]) #3rd line: step vs X [m]\n xNp = int(f.readline().split(sCom, 2)[1]) #4th line: number of points vs X\n yStart = float(f.readline().split(sCom, 2)[1]) #5th line: initial Y position [m]; it will not actually be used\n yStep = float(f.readline().split(sCom, 2)[1]) #6th line: step vs Y [m]\n yNp = int(f.readline().split(sCom, 2)[1]) #7th line: number of points vs Y\n zStart = float(f.readline().split(sCom, 2)[1]) #8th line: initial Z position [m]; it will not actually be used\n zStep = float(f.readline().split(sCom, 2)[1]) #9th line: step vs Z [m]\n zNp = int(f.readline().split(sCom, 2)[1]) #10th line: number of points vs Z\n totNp = xNp*yNp*zNp\n locArBx = array('d', [0]*totNp)\n locArBy = array('d', [0]*totNp)\n locArBz = array('d', [0]*totNp)\n for i in range(totNp):\n curLineParts = f.readline().split('\\t')\n locArBx[i] = float(curLineParts[0])\n locArBy[i] = float(curLineParts[1])\n locArBz[i] = float(curLineParts[2])\n f.close()\n xRange = xStep\n if xNp > 1: xRange = (xNp - 1)*xStep\n yRange = yStep\n if yNp > 1: yRange = (yNp - 1)*yStep\n zRange = zStep\n if zNp > 1: zRange = (zNp - 1)*zStep\n \n return SRWLMagFld3D(locArBx, locArBy, locArBz, xNp, yNp, zNp, xRange, yRange, zRange, 1)", "title": "" }, { "docid": "48e8ed1e48231198883d2eda123e9cb6", "score": "0.5005118", "text": "def calibrate(self):\n self.set_offset(0, 0, 0)\n samples = self.get_axes()\n\n x = samples['x']\n y = samples['y']\n z = samples['z']\n \n abs_x = math.fabs(x)\n abs_y = math.fabs(y)\n abs_z = math.fabs(z)\n\n # Find which axe is in the field of gravity and set its expected value to 1g absolute value\n if self._equal(abs_x, 1) and self._equal(abs_y, 0) and self._equal(abs_z, 0):\n cal_x = 1 if x > 0 else -1\n cal_y = 0\n cal_z = 0\n elif self._equal(abs_x, 0) and self._equal(abs_y, 1) and self._equal(abs_z, 0):\n cal_x = 0\n cal_y = 1 if y > 0 else -1\n cal_z = 0\n elif self._equal(abs_x, 0) and self._equal(abs_y, 0) and self._equal(abs_z, 1):\n cal_x = 0\n cal_y = 0\n cal_z = 1 if z > 0 else -1\n else:\n raise ValueError(\"Could not determine ADXL position. One axe should be set in field of gravity\")\n\n offset_x = cal_x - x\n offset_y = cal_y - y\n offset_z = cal_z - z\n \n self.set_offset(offset_x, offset_y, offset_z)\n \n return {'x': offset_x,\n 'y': offset_y,\n 'z': offset_z}", "title": "" }, { "docid": "a0ac5f4f10ebb8bad8ab3ff8c4792c06", "score": "0.50045604", "text": "def read_matchpars(filename):\n file = open(filename,'r')\n IMF,dmodmin,dmodmax,dmodstep,Avmin,Avmax,Avstep = file.readline().split()\n logZmin,logZmax,dlogZ= file.readline().split()\n BF,Bad0,Bad1= file.readline().split()\n Ncmds= file.readline().split()\n Mag1step,Colorstep,fake_sm,Colormin,Colormax,Colors=file.readline().split()\n Mag1min, Mag1max, Mag1name = file.readline().split()\n Mag2min, Mag2max, Mag2name = file.readline().split()\n Ntbins= file.readline()\n lines = file.readlines()\n bgline2 = lines.pop()\n bgline1 = lines.pop()\n times = []\n for line in lines:\n times.append(map(float,line.split()))\n \n tbins = np.transpose(times)\n data = {'IMF':float(IMF),\n 'dmodmin': float(dmodmin), \n 'dmodmax': float(dmodmax), \n 'dmodstep': float(dmodstep), \n 'Avmin': float(Avmin), \n 'Avmax': float(Avmax), \n 'Avstep': float(Avstep), \n 'logZmin': float(logZmin), \n 'logZmax': float(logZmax), \n 'dlogZ': float(dlogZ), \n 'BF': float(BF), \n 'Bad0': float(Bad0), \n 'Bad1': float(Bad1), \n 'Mag1step': float(Mag1step), \n 'Colortep': float(Colorstep), \n 'fake_sm': float(fake_sm), \n 'Colormin': float(Colormin), \n 'Colormax': float(Colormax), \n 'Colors': Colors, \n 'Mag1min':float( Mag1min), \n 'Mag1max': float(Mag1max), \n 'Mag1name': Mag1name, \n 'Mag2min': float(Mag2min), \n 'Mag2max': float(Mag2max), \n 'Mag2name': Mag2name, \n 'Ntbins': int(Ntbins), \n 'To': tbins[0], \n 'Tf': tbins[1], \n 'bgline2': bgline2, \n 'bgline1': bgline1 \n }\n return data", "title": "" }, { "docid": "e58f7f367ed17a01bf65c2873d65e8d8", "score": "0.5002472", "text": "def read_abunds(path):\n f = open(path)\n header = f.readlines()[0][:-1]\n f.close()\n ret = {}\n\n dat = np.genfromtxt(path)\n ret['P'] = dat[:, 0]\n ret['T'] = dat[:, 1]\n ret['rho'] = dat[:, 2]\n\n for i in range(int((len(header) - 21) / 22)):\n\n name = header[21 + i * 22:21 + (i + 1) * 22][3:].replace(' ', '')\n\n if name == 'C2H2,acetylene':\n name = 'C2H2'\n\n if i % 2 == 0:\n number = int(header[21 + i * 22:21 + (i + 1) * 22][0:3])\n ret[name] = dat[:, number]\n\n return ret", "title": "" }, { "docid": "d9d37ef8c7c4930db3cff6c899150b8f", "score": "0.49943233", "text": "def load_ref_system():\n return psr.make_system(\"\"\"\n C 1.3108 0.4875 0.6872\n O 0.2819 1.4066 1.1001\n C -1.0116 0.8235 1.3225\n C -1.5332 0.1819 0.0262\n C -0.5213 -0.8406 -0.5377\n O -1.0003 -1.0985 -1.8653\n C 0.9022 -0.2547 -0.6139\n O 1.7405 -1.4107 -0.7519\n O -2.7205 -0.5043 0.4401\n O 2.4071 1.3383 0.4292\n H -0.9645 0.0925 2.1500\n H -1.6208 1.6947 1.6317\n H 1.6189 -0.1867 1.5107\n H 1.0198 0.3991 -1.5065\n H -0.5268 -1.7942 0.0402\n H -1.7958 0.9357 -0.7505\n H 2.0924 2.2500 0.2013\n H 2.6741 -1.1309 -0.8998\n H -0.4411 -1.7904 -2.2902\n H -3.1369 -0.9411 -0.3408\n \"\"\")", "title": "" }, { "docid": "ea2b99233ffa471fcd0668409e2b45c0", "score": "0.49910453", "text": "def get_calibration(inputFileDer, file_name):\n color_of_interest = raw_input(\"What color is of interest?(red, green, or blue): \")\n if ('Red' in color_of_interest) or ('red' in color_of_interest):\n color_of_interest = 0\n elif ('Green' in color_of_interest) or ('green' in color_of_interest):\n color_of_interest = 1\n elif ('Blue' in color_of_interest) or ('blue' in color_of_interest):\n color_of_interest = 2\n\n #data = arrayFromFile(inputFileDer+fileNum)\n org_image = jpg_to_array(inputFileDer, file_name)\n\n image_stripped, image_proc, center = strip_color(org_image, color_of_interest, sig=7)\n\n #Show detected rings using the canny algorithm\n #plt.subplot(5, 4, 1)\n plt.figure(1)\n # plt.subplot(2, 1, 1)\n amps = file_name.replace('.jpg', '').replace('_', '.')\n amps = float(amps)\n\n plt.title(str(amps) + ' amps')\n plt.ylabel('Pixel Bin')\n plt.xlabel('Pixel Bin')\n plt.imshow(image_proc, origin='lower')\n plt.gray()\n\n #Show the stripped down image\n plt.imshow(image_stripped, origin='lower', alpha=.5)\n plt.gray()\n\n #Print average of the center in the y axis.\n avrg_y = np.round(np.mean(center, axis=0)[0], decimals=0)\n print('Average y value for center: ' + str(avrg_y))\n uncertanty_y = np.round(np.std(center, axis=0)[0] / (math.sqrt(len(center)) * 2), decimals=1)\n print('Uncertainty in y: ' + str(uncertanty_y))\n\n #Print average of the center in the x axis.\n avrg_x = np.round(np.mean(center, axis=0)[1], decimals=1)\n print('Average x value for center: ' + str(avrg_x))\n uncertanty_x = np.round(np.std(center, axis=0)[1] / (math.sqrt(len(center)) * 2), decimals=1)\n print('Uncertainty in x: ' + str(uncertanty_x))\n\n #y value will give horizontal slice.\n l = plt.axhline(y=avrg_y, color='r')\n l = plt.axvline(x=avrg_x, color='r')\n #plt.show()\n\n #Use edge array to get peak lists for horizontal and vertical cross sections\n edges_left, edges_right = getedge(avrg_x, image_proc[avrg_y])\n edges_botton, edges_top = getedge(avrg_y, image_proc[::, avrg_x])\n\n left_calibration, right_calibration, left_main_peak_list, right_main_peak_list = \\\n create_calibration_pair(image_stripped[avrg_y], image_proc[avrg_y], edges_left, edges_right, uncertanty_x, avrg_x, amps)\n bottom_calibration, top_calibration, bottom_main_peak_list, top_main_peak_list = \\\n create_calibration_pair(image_stripped[::, avrg_x], image_proc[::, avrg_x], edges_botton, edges_top, uncertanty_y, avrg_y, amps)\n\n os.system(\"afplay woohoo.wav\")\n\n plt.show()\n\n return image_stripped, image_proc, org_image, left_calibration, right_calibration, \\\n bottom_calibration, top_calibration, color_of_interest, avrg_y, avrg_x", "title": "" }, { "docid": "e0f954a52bd5ff728ff0969352e6ef6c", "score": "0.4990834", "text": "def test_calibration(self, fname):\n # Load image, draw chessboard and undistort.\n img = cv2.imread(fname)\n img_undist = self.undistort_image(img)\n self.show_undistorted_images(img, img_undist)", "title": "" }, { "docid": "e2c3a00d3463acae8ecd68714140b688", "score": "0.49881336", "text": "def _read_from_file( self, filename ):\n with open( filename, 'r' ) as file_in:\n file_in.readline()\n self._number_of_k_points, self._number_of_bands, self._number_of_ions = [ int( f ) for f in get_numbers_from_string( file_in.readline() ) ]\n self.read_in = file_in.read()\n self.parse_k_points()\n self.parse_bands()\n self.parse_projections()\n self.sanity_check()\n self.read_in = None # clear memory\n if self.calculation[ 'spin_polarised' ]:\n self._data = self.projection_data.reshape( self._spin_channels, self._number_of_k_points, self._number_of_bands, self._number_of_ions+1, self._number_of_projections+1 )[:,:,:,:,1:].swapaxes( 0, 1).swapaxes( 1, 2 )\n else:\n self._data = self.projection_data.reshape( self._number_of_k_points, self._number_of_bands, self._spin_channels, self._number_of_ions+1, self._number_of_projections+1 )[:,:,:,:,1:]", "title": "" }, { "docid": "ca32ef1efbace78a641c162247907121", "score": "0.49786255", "text": "def calibration(self, img_list, x, y):\n # prepare object points, like (0,0,0), (1,0,0), (2,0,0) ....,(6,5,0)\n objp = np.zeros((x*y, 3), np.float32)\n objp[:, :2] = np.mgrid[0:y, 0:x].T.reshape(-1, 2)\n\n # Arrays to store object points and image points from all the images.\n objpoints = [] # 3d points in real world space\n imgpoints = [] # 2d points in image plane.\n\n # Step through the list and search for chessboard corners\n for fname in img_list:\n gray = cv2.imread(fname, 0)\n\n # Find the chessboard corners\n ret, corners = cv2.findChessboardCorners(gray, (9, 6), None)\n\n # If found, add object points, image points\n if ret == True:\n objpoints.append(objp)\n imgpoints.append(corners)\n\n # Draw and display the corners\n #img = cv2.drawChessboardCorners(gray, (9,6), corners, ret)\n # cv2.imshow('img',img)\n # cv2.waitKey(30)\n\n # cv2.destroyAllWindows()\n img_shape = cv2.imread(img_list[0], 0).shape[::-1]\n ret, mtx, dist, rvecs, tvecs = cv2.calibrateCamera(\n objpoints, imgpoints, img_shape, None, None)\n if ret > 1.0:\n print('Warning: retval %f is better to less than 1.0' % ret)\n\n self.mtx = mtx\n self.dist = dist\n self.rvecs = rvecs\n self.tvecs = tvecs\n\n return ret", "title": "" } ]
69d4dc11c310fbf809daa1fe6ad75e3e
Compares two nested models by likelihood ratio test
[ { "docid": "f177e33e18cf8c07f6bdfec041c53fc1", "score": "0.6138221", "text": "def LikelihoodRatioTest(null_model, alt_model):\n\n chisq = -2.0 * null_model.loglikelihood() + 2.0 * alt_model.loglikelihood()\n df = null_model.df - alt_model.df # Null model has more DFs\n res = LikelihoodRatioTestResult(chisq, df, stats.chi2, len(null_model.observations()))\n return res", "title": "" } ]
[ { "docid": "bf8e7021a8b42eba46d0f452fe785a88", "score": "0.6811791", "text": "def mismatch(model1, model2):\n\n return 1.0 - np.abs(np.vdot(model1, model2) / np.vdot(model1, model1))", "title": "" }, { "docid": "783d78e07638ddc84b9c6d56582419be", "score": "0.6533749", "text": "def compare_models(A1, B1, pi1, A2, B2, pi2, T, kernel=spscicomp.hmm.kernel.python):\n obs = kernel.random_sequence(A2, B2, pi2, T)\n logprob1, _, _ = kernel.forward(A1, B1, pi1, obs)\n logprob2, _, _ = kernel.forward(A2, B2, pi2, obs)\n similarity1 = (logprob2 - logprob1) / float(T)\n obs = kernel.random_sequence(A1, B1, pi1, T)\n logprob1, _, _ = kernel.forward(A1, B1, pi1, obs)\n logprob2, _, _ = kernel.forward(A2, B2, pi2, obs)\n similarity2 = (logprob2 - logprob1) / float(T)\n return 0.5 * (similarity1 + similarity2)", "title": "" }, { "docid": "760951d46fd5c47a3442f4a8efd122db", "score": "0.6508991", "text": "def similarity_comparison_np(self, a, b):\n\n a_damage = a.damage.all()[0]\n b_damage = b.damage.all()[0]\n\n likelihood = 0\n\n if a.make.lower() == b.make.lower():\n likelihood = likelihood + (self.comparison_weights[\"make\"] * 100)\n\n if a.model.lower() == b.model.lower():\n likelihood = likelihood + (self.comparison_weights[\"model\"] * 100)\n\n if a.color.lower() == b.color.lower():\n likelihood = likelihood + (self.comparison_weights[\"color\"] * 100)\n\n if a_damage.location == b_damage.location:\n likelihood = likelihood + (self.comparison_weights[\"damage\"] * 100)\n \n if not a_damage and not b_damage:\n likelihood = likelihood + 20\n\n return likelihood", "title": "" }, { "docid": "790dc6820c51b9dc0c288a2a3dc57e40", "score": "0.63138413", "text": "def similarity_comparison(self, a, b):\n\n a_damage = a.damage.all()[0]\n b_damage = b.damage.all()[0]\n\n likelihood = 0\n\n if a.make.lower() == b.make.lower():\n likelihood = likelihood + (self.weights[\"make\"] * 100)\n\n if a.model.lower() == b.model.lower():\n likelihood = likelihood + (self.weights[\"model\"] * 100)\n\n if a.color.lower() == b.color.lower():\n likelihood = likelihood + (self.weights[\"color\"] * 100)\n\n if a_damage.location == b_damage.location:\n likelihood = likelihood + (self.weights[\"damage\"] * 100)\n\n if not a_damage and not b_damage:\n likelihood = likelihood + 20\n\n if a.license_plate.lower() == b.license_plate.lower():\n likelihood = likelihood + (self.weights[\"license_plate\"] * 100)\n\n return likelihood", "title": "" }, { "docid": "c9dc61ffdbe4589843efc09c73045946", "score": "0.615655", "text": "def test_model_equal(model_1, model_2):\n models_differ = 0\n for key_item_1, key_item_2 in zip(model_1.state_dict().items(),\n model_2.state_dict().items()):\n if torch.equal(key_item_1[1], key_item_2[1]):\n pass\n else:\n models_differ += 1\n if (key_item_1[0] == key_item_2[0]):\n print('Mismatch found at', key_item_1[0])\n else:\n raise Exception\n if models_differ == 0:\n return True\n else:\n return False", "title": "" }, { "docid": "e046c85a7aab40e734c5bd4688c1ae01", "score": "0.6047852", "text": "def compare_models_weight(model_1, model_2):\n param_dict_model_1 = get_model_weight(model_1)\n param_dict_model_2 = get_model_weight(model_2)\n assert compare_dict_keys(param_dict_model_1, param_dict_model_2)\n return all([torch.isclose(param_dict_model_1[k], param_dict_model_2[k]).all() for k in param_dict_model_1.keys()])", "title": "" }, { "docid": "7b983c4aa617f5968a33f1107be62857", "score": "0.60120445", "text": "def cmpRatio(subInfo1, subInfo2):\n val1 = subInfo1[VALUE]\n val2 = subInfo2[VALUE]\n work1 = subInfo1[WORK]\n work2 = subInfo2[WORK]\n return float(val1) / work1 > float(val2) / work2", "title": "" }, { "docid": "ad58c53d39894644a2345ba643d6a039", "score": "0.59816575", "text": "def cmpRatio(subInfo1, subInfo2):\n\n return (subInfo1[0]+0.0)/subInfo1[1] > (subInfo2[0]+0.0)/subInfo2[1]", "title": "" }, { "docid": "5a4197ae9e3794060a4b4cefb4c6a5ad", "score": "0.5969221", "text": "def test_nested2(self):\n model = NestedModel().eval()\n qconfig_dict = {\n 'fc3': default_qconfig,\n 'sub2': default_qconfig\n }\n model = prepare(model, qconfig_dict)\n\n def checkPrepModules(model, before_calib=False):\n if before_calib:\n self.checkObservers(model)\n self.checkNoPrepModules(model)\n self.checkNoPrepModules(model.sub1)\n self.checkNoPrepModules(model.sub1.fc)\n self.checkNoPrepModules(model.sub1.relu)\n self.checkNoPrepModules(model.sub2)\n self.checkHasPrepModules(model.sub2.fc1)\n self.checkHasPrepModules(model.sub2.fc2)\n self.checkHasPrepModules(model.fc3)\n\n checkPrepModules(model, True)\n\n test_only_eval_fn(model, self.calib_data)\n convert(model)\n\n def checkQuantized(model):\n checkPrepModules(model)\n self.checkLinear(model.sub1.fc)\n self.assertEqual(type(model.sub1.relu), torch.nn.ReLU)\n self.checkQuantizedLinear(model.sub2.fc1)\n self.checkQuantizedLinear(model.sub2.fc2)\n self.checkQuantizedLinear(model.fc3)\n test_only_eval_fn(model, self.calib_data)\n\n checkQuantized(model)\n\n # test one line API\n model = quantize(NestedModel().eval(), test_only_eval_fn, self.calib_data, qconfig_dict)\n checkQuantized(model)", "title": "" }, { "docid": "46779e16984b172e28518bd569db4350", "score": "0.59671474", "text": "def compare(Y, first_model, test_effect, sub=None):\n a1 = lm(Y, first_model, sub=sub)\n a2 = lm(Y, first_model + test_effect, sub=sub)\n print \n print a1.anova(title='MODEL 1:')\n print '\\n'\n print a2.anova(title='MODEL 2:')\n # compare\n SS_diff = a1.SS_res - a2.SS_res\n df_diff = test_effect.df\n MS_diff = SS_diff / df_diff\n #if not round(SS_diff, 6) == round(SS_cov_1 - SS_cov_2, 6):\n # txt = \"\\nWARNING: SS_diff: {0} a1.SS_res - a2.SS_res: {1}\"\n # print txt.format(SS_diff, a1.SS_res - a2.SS_res)\n F = MS_diff / a2.MS_res\n p = 1 - sp.stats.distributions.f.cdf(F, df_diff, a2.df_res)\n stars = test.star(p).replace(' ', '')\n difftxt = \"Residual SS reduction: {SS}, df difference: {df}, \" + \\\n \"F = {F}{s}, p = {p}\"\n print '\\n'+difftxt.format(SS=SS_diff, df=df_diff, F=F, s=stars, p=p)", "title": "" }, { "docid": "fd5385f1647378ad48ba21cbcc784ac4", "score": "0.5898964", "text": "def likelihood_ratio(self, other, a):\n logli = self.logli(a)\n other_logli = other.logli(a)\n return F.exp(logli - other_logli)", "title": "" }, { "docid": "7c16ff613d31d57b3c727524d5f476c1", "score": "0.5894124", "text": "def test_second_tree_likelihood(self):\n tau = self.tree.get_tau_matrix()\n second_tree = Tree(TreeTypes.REGULAR)\n second_tree.fit(1, 3, tau, self.tree)\n uni_matrix = np.array([[0.1, 0.2, 0.3, 0.4]])\n\n first_value, new_u = self.tree.get_likelihood(uni_matrix)\n second_value, out_u = second_tree.get_likelihood(new_u)\n\n # assert second_value < 0", "title": "" }, { "docid": "41b7995368a1103ec079a4590005766e", "score": "0.58445466", "text": "def test_score(self):\n\n score1 = self.model.score(self.outcomes, self.modelparams, self.expparams, return_L=False)\n L1 = self.model.likelihood(self.outcomes, self.modelparams, self.expparams)\n score, L = self.model.score(self.outcomes, self.modelparams, self.expparams, return_L=True)\n\n # Ensure some consistency\n assert_almost_equal(score1, score, 3)\n assert_almost_equal(L1, L, 3)\n\n # Dimensions must be correct\n assert(score.shape == (\n self.model.n_modelparams,\n self.n_outcomes,\n self.n_models,\n self.n_expparams)\n )", "title": "" }, { "docid": "efcca34589592535db00a70e73440e0e", "score": "0.58393836", "text": "def lrt(models, refit=True):\n models_list = copy.deepcopy(models)\n if not isinstance(models_list, list):\n models_list = [models_list]\n if len(models_list) < 2:\n raise ValueError(\"Must have 2 models to perform comparison\")\n if not all(list(map(lambda m: isinstance(m, Lmer), models_list))):\n raise TypeError(\"All models are not Lmer\")\n\n # refit models if needed\n refitted = False\n if refit:\n for i, m in enumerate(models_list):\n if m._REML:\n refitted = True\n m.fit(REML=False, summarize=False)\n models_list[i] = m\n\n # Get number of coefs for each model\n all_params = []\n for m in models_list:\n all_params.append(_get_params(m))\n all_params = np.array(all_params)\n idx = np.argsort(all_params)\n all_params = all_params[idx]\n models_list = np.array(models_list)[idx]\n out = pd.DataFrame()\n for i, m in enumerate(models_list):\n df = _get_params(m) - (_get_params(models_list[i - 1])) if i > 0 else np.nan\n chisq = (\n ((-2 * models_list[i - 1].logLike) - (-2 * m.logLike)) if i > 0 else np.nan\n )\n pval = _lrt([models_list[index] for index in [i - 1, i]]) if i > 0 else np.nan\n out = pd.concat(\n [\n out,\n pd.DataFrame(\n {\n \"model\": m.formula,\n \"npar\": _get_params(m),\n \"AIC\": m.AIC,\n \"BIC\": m.BIC,\n \"deviance\": -2 * m.logLike,\n \"log-likelihood\": m.logLike,\n \"Chisq\": chisq,\n \"Df\": df,\n \"P-val\": pval,\n },\n index=[0],\n ),\n ],\n ignore_index=True,\n )\n out[\"Sig\"] = out[\"P-val\"].apply(lambda x: _sig_stars(x))\n out = out[\n [\n \"model\",\n \"npar\",\n \"AIC\",\n \"BIC\",\n \"log-likelihood\",\n \"deviance\",\n \"Chisq\",\n \"Df\",\n \"P-val\",\n \"Sig\",\n ]\n ]\n\n if refitted:\n print(\"refitting model(s) with ML (instead of REML)\")\n return out.fillna(\"\")", "title": "" }, { "docid": "0b747ba0fa62e9e49317ccab88095cde", "score": "0.5827247", "text": "def assert_models_same(self, model, model2):\n if model.optimizer:\n assert (\n model.optimizer.get_config() == model2.optimizer.get_config()\n )\n assert len(model.layers) == len(model2.layers) # shallow comparison\n layers = list(iterlayers(model))\n layers2 = list(iterlayers(model2))\n assert len(layers) == len(layers2) # deep comparison", "title": "" }, { "docid": "97ccc5465512eb11a91d2bf670e692c3", "score": "0.58188003", "text": "def _check_model_same(model1: torch.nn.Module, model2: torch.nn.Module, cuda=False) -> float:\n model1.eval()\n model2.eval()\n\n rand_input = torch.rand((8, 3, 224, 224)) # the same input size as ImageNet\n\n if cuda:\n rand_input = rand_input.cuda()\n model1.cuda()\n model2.cuda()\n\n out1, _ = model1(rand_input) # ignore aux output for resnet\n out2, _ = model2(rand_input)\n\n diff = out1 - out2\n max_diff = torch.max(diff.abs().view(-1)).item()\n\n return max_diff", "title": "" }, { "docid": "c5a2b8f7e9bf537bdb6a11ec80cc969a", "score": "0.5753253", "text": "def test_diff_of_same_yaml():\n a = MDF(tdir + \"samples/test-model-a.yml\", handle=\"test\")\n b = MDF(tdir + \"samples/test-model-a.yml\", handle=\"test\")\n actual = diff_models(a.model, b.model)\n expected = {}\n assert actual == expected", "title": "" }, { "docid": "82bbde908971b4b7155a8c134ca79a86", "score": "0.5752676", "text": "def test_nested1(self):\n model = NestedModel().eval()\n qconfig_dict = {\n 'fc3': default_qconfig,\n 'sub2.fc1': default_qconfig\n }\n\n def checkPrepModules(model, before_calib=False):\n if before_calib:\n self.checkObservers(model)\n self.checkNoPrepModules(model)\n self.checkNoPrepModules(model.sub1)\n self.checkNoPrepModules(model.sub1.fc)\n self.checkNoPrepModules(model.sub1.relu)\n self.checkNoPrepModules(model.sub2)\n self.checkHasPrepModules(model.sub2.fc1)\n self.checkNoPrepModules(model.sub2.fc2)\n self.checkHasPrepModules(model.fc3)\n\n model = prepare(model, qconfig_dict)\n checkPrepModules(model, True)\n test_only_eval_fn(model, self.calib_data)\n convert(model)\n\n def checkQuantized(model):\n checkPrepModules(model)\n self.checkLinear(model.sub1.fc)\n self.checkQuantizedLinear(model.fc3)\n self.checkQuantizedLinear(model.sub2.fc1)\n self.checkLinear(model.sub2.fc2)\n test_only_eval_fn(model, self.calib_data)\n\n checkQuantized(model)\n\n # test one line API\n model = quantize(NestedModel().eval(), test_only_eval_fn, self.calib_data, qconfig_dict)\n checkQuantized(model)", "title": "" }, { "docid": "18d34bbf3d5b0dc2baa628be725bbedc", "score": "0.5735922", "text": "def cmpRatio(subInfo1, subInfo2):\n return subInfo1[0]/float(subInfo1[1]) > subInfo2[0]/float(subInfo2[1])", "title": "" }, { "docid": "283a13eaff32a5dd7405dd179eb32add", "score": "0.57019323", "text": "def test_second_tree_likelihood(self):\n tau = self.tree.get_tau_matrix()\n second_tree = Tree(TreeTypes.CENTER)\n second_tree.fit(1, 3, tau, self.tree)\n uni_matrix = np.array([[0.1, 0.2, 0.3, 0.4]])\n\n first_value, new_u = self.tree.get_likelihood(uni_matrix)\n second_value, out_u = second_tree.get_likelihood(new_u)\n\n expected = 0.540089320412914\n assert abs(second_value - expected) < 10E-3", "title": "" }, { "docid": "286575d2fa81983630c2a3d32cd5605c", "score": "0.5674125", "text": "def test_second_tree_likelihood(self):\n tau = self.tree.get_tau_matrix()\n\n second_tree = Tree(TreeTypes.DIRECT)\n second_tree.fit(1, 3, tau, self.tree)\n\n uni_matrix = np.array([[0.1, 0.2, 0.3, 0.4]])\n\n first_value, new_u = self.tree.get_likelihood(uni_matrix)\n second_value, out_u = second_tree.get_likelihood(new_u)\n\n expected = 0.7184205492690413\n assert abs(second_value - expected) < 10E-3", "title": "" }, { "docid": "baf820ced10ccf004d24a6d4d7723b13", "score": "0.5655352", "text": "def compare_models_weight_grad(model_1, model_2):\n param_grad_dict_model_1 = get_model_weight_grad(model_1)\n param_grad_dict_model_2 = get_model_weight_grad(model_2)\n compare_dict_keys(param_grad_dict_model_1, param_grad_dict_model_2)\n return all([torch.isclose(param_grad_dict_model_1[k], param_grad_dict_model_2[k]).all() for k in param_grad_dict_model_1.keys()])", "title": "" }, { "docid": "55c6312b334547f6328b93f614c47cd1", "score": "0.5652966", "text": "def models_are_equivalent(model_a: TopLevelOscalModel, model_b: TopLevelOscalModel) -> bool:\n # this will change the second model as a side-effect\n model_b.metadata.last_modified = model_a.metadata.last_modified\n return model_a == model_b", "title": "" }, { "docid": "af4542fc8ad7e055150d440ec47f547f", "score": "0.5650115", "text": "def compare_models(\n dataframes,\n phi1,\n train_func1,\n phi2=None,\n train_func2=None,\n vectorize1=True,\n vectorize2=True,\n stats_test=scipy.stats.wilcoxon,\n trials=10,\n train_size=0.7,\n score_func=utils.safe_macro_f1):\n if phi2 == None:\n phi2 = phi1\n if train_func2 == None:\n train_func2 = train_func1\n experiments1 = [experiment(dataframes,\n phi=phi1,\n train_func=train_func1,\n score_func=score_func,\n vectorize=vectorize1,\n verbose=False) for _ in range(trials)]\n experiments2 = [experiment(dataframes,\n phi=phi2,\n train_func=train_func2,\n score_func=score_func,\n vectorize=vectorize2,\n verbose=False) for _ in range(trials)]\n scores1 = np.array([d['scores'][0] for d in experiments1])\n scores2 = np.array([d['scores'][0] for d in experiments2])\n # stats_test returns (test_statistic, p-value). We keep just the p-value:\n pval = stats_test(scores1, scores2)[1]\n # Report:\n print('Model 1 mean: {0:.03f}'.format(scores1.mean()))\n print('Model 2 mean: {0:.03f}'.format(scores2.mean()))\n print('p = {0:.03f}'.format(pval if pval >= 0.001 else 'p < 0.001'))\n # Return the scores for later analysis, and the p value:\n return scores1, scores2, pval", "title": "" }, { "docid": "50f66fcafd254d3dbcad32834a109e07", "score": "0.5622192", "text": "def test_likelihood(self):\n\n L = self.model.likelihood(self.outcomes, self.modelparams, self.expparams)\n\n assert(L.shape == (\n self.n_outcomes,\n self.n_models,\n self.n_expparams)\n )", "title": "" }, { "docid": "9161d8451dec7ae89a3a525b327c8696", "score": "0.55708843", "text": "def test_sub(self):\n np.random.seed(0)\n\n for _ in range(10):\n p1 = PoseSE3(np.random.random_sample(3), np.random.random_sample(4))\n p2 = PoseSE3(np.random.random_sample(3), np.random.random_sample(4))\n\n p1.normalize()\n p2.normalize()\n\n expected = np.dot(np.linalg.inv(p2.to_matrix()), p1.to_matrix())\n self.assertAlmostEqual(np.linalg.norm((p1 - p2).to_matrix() - expected), 0.)", "title": "" }, { "docid": "a4d403a8c53f3b3c3c8467768e24a682", "score": "0.5565829", "text": "def check_agents_similarity(agent1, agent2, all_models, mapping=False):\n judgments = create_languages_array([agent1, agent2], all_models,\n mapping)\n # proportion of models where the judgments of the two agents are different\n prop_different = L1_dist(judgments[:, 0], judgments[:, 1])\n return prop_different", "title": "" }, { "docid": "96266a8415cb398b47899d4047bf2114", "score": "0.55384755", "text": "def same_weights(model_a, model_b):\n for params_a, params_b in zip(model_a.parameters(), model_b.parameters()):\n if (params_a.data != params_b.data).sum() > 0:\n return False\n return True", "title": "" }, { "docid": "86647c1b77771dc8ffe48e873a57fb8f", "score": "0.55348057", "text": "def similaritytest(orig, B):\n #ft.begin(\"similaritytest\")\n r = len(intersection(orig.textWords, B.textWords)) / len(orig.textWords)\n #ft.end(\"similaritytest\")\n return r", "title": "" }, { "docid": "c964f68b37dbc8c0aa2702f93ffa8400", "score": "0.5524041", "text": "def log_likelihood_ratio_test(self):\n from lifelines.statistics import _chisq_test_p_value, StatisticalResult\n\n ll_null = self._ll_null\n ll_alt = self.log_likelihood_\n\n test_stat = 2 * ll_alt - 2 * ll_null\n degrees_freedom = self.params_.shape[0] - 2 # delta in number of parameters between models\n p_value = _chisq_test_p_value(test_stat, degrees_freedom=degrees_freedom)\n return StatisticalResult(\n p_value,\n test_stat,\n test_name=\"log-likelihood ratio test\",\n degrees_freedom=degrees_freedom,\n null_distribution=\"chi squared\",\n )", "title": "" }, { "docid": "5be3b65b78283aa4d76978075d6e7691", "score": "0.5504087", "text": "def test_ratio_difference(self):\n ingredients, _ = test_data()\n ratio_1 = create_ratio(ingredients, [1, 30, 100])\n ratio_2 = create_ratio(ingredients, [1, 60, 50])\n difference, differences = percentage_difference(ratio_1, ratio_2)\n self.assertAlmostEquals(0.496, difference, 2)\n self.assertAlmostEquals(differences[1][0], 0.81, 2)\n self.assertEquals(differences[1][1], EGG)\n self.assertAlmostEquals(differences[0][0], 0.17, 2)\n self.assertEquals(differences[0][1], FLOUR)", "title": "" }, { "docid": "5717f9991cdc67fd841732dd79d5a61a", "score": "0.5503754", "text": "def test_inner_comp_with_nsbj(elitmodels, elit_to_logic):\n sentence = 'I like when you walk'\n tok, pos, dp, cr = elitmodels(sentence)\n mentions, merges = elit_to_logic(tok, pos, dp)\n\n # assert len(mentions) == 4\n (i_sp,) = [span for span, mgraph in mentions.items() if mgraph.features[span][\"span_data\"].string == 'i']\n (you_sp,) = [span for span, mgraph in mentions.items() if mgraph.features[span][\"span_data\"].string == 'you']\n (like_sp,) = [span for span, mgraph in mentions.items() if mgraph.features[span][\"span_data\"].string == 'like']\n (walk_sp,) = [span for span, mgraph in mentions.items() if mgraph.features[span][\"span_data\"].string == 'walk']\n\n i_mg = mentions[i_sp]\n assert i_mg.has('user', 'center')\n assert i_mg.has('user', 'focus')\n\n like_mg = mentions[like_sp]\n like_preds = like_mg.predicates(predicate_type='like')\n assert len(like_preds) == 1\n ((s, t, o, i),) = like_preds\n assert o is not None\n assert like_mg.has(i, 'focus')\n assert like_mg.has(i, 'time', 'now')\n assert like_mg.has('like', 'center')\n\n walk_mg = mentions[walk_sp]\n walk_preds = walk_mg.predicates(predicate_type='walk')\n assert len(walk_preds) == 1\n ((s, t, o, i),) = walk_preds\n assert o is None\n assert walk_mg.has(i, 'focus')\n assert walk_mg.has('walk', 'center')\n\n you_mg = mentions[you_sp]\n assert you_mg.has('emora', 'center')\n assert you_mg.has('emora', 'focus')\n\n # assert len(merges) == 3\n assert ((like_sp, 'subject'), (i_sp, 'self')) in merges\n assert ((like_sp, 'object'), (walk_sp, 'self')) in merges\n assert ((walk_sp, 'subject'), (you_sp, 'self')) in merges", "title": "" }, { "docid": "e6b94cfc7a79268945306d591a3f5861", "score": "0.5490593", "text": "def compare_marginals(ad1, ad2, **kwargs):\n ad1.compare_marginals(ad=ad2, **kwargs)", "title": "" }, { "docid": "7f37fff470eaa76ca9d683a086d229e4", "score": "0.5467283", "text": "def compare_nested_dicts(first, second, epsilon=10E-6):\n\n assert first.keys() == second.keys()\n\n for key in first.keys():\n if isinstance(first[key], dict):\n compare_nested_dicts(first[key], second[key])\n\n elif isinstance(first[key], np.ndarray):\n assert (compare_values_epsilon(first[key], second[key])).all()\n\n elif isinstance(first[key], pd.DataFrame):\n assert first[key].equals(second[key])\n\n elif isinstance(first[key], float):\n assert compare_values_epsilon(first[key], second[key])\n\n elif isinstance(first[key], list):\n compare_nested_iterables(first[key], second[key])\n\n else:\n assert first[key] == second[key], \"{} doesn't equal {}\".format(first[key], second[key])", "title": "" }, { "docid": "45f23407d478d03f0fb25ef828b41dc8", "score": "0.545155", "text": "def _operator_test(self, model):\n\n # Ensure the two instances have the same dimension.\n if np.any(self.shape != model.shape):\n msg = (f\"Models could not be broadcast together with shapes \"\n f\"{self.shape} and {model.shape}.\")\n raise ValueError(msg)\n\n # Ensure the two instances have the same case.\n if self.case != model.case:\n msg = (\"Models must be of the same resistivity type but have types\"\n f\" '{self.case_names[self.case]}' and\"\n f\" '{model.case_names[model.case]}'.\")\n raise ValueError(msg)\n\n # Ensure both or none has mu_r:\n if hasattr(self.mu_r, 'dtype') != hasattr(model.mu_r, 'dtype'):\n msg = (\"Either both or none of the models must have `mu_r` \"\n f\"defined; provided: '{hasattr(self.mu_r, 'dtype')}' \"\n f\"and '{hasattr(model.mu_r, 'dtype')}'.\")\n raise ValueError(msg)\n\n # Ensure both or none has epsilon_r:\n if (hasattr(self.epsilon_r, 'dtype') !=\n hasattr(model.epsilon_r, 'dtype')):\n msg = (\"Either both or none of the models must have `epsilon_r` \"\n f\"defined; provided: '{hasattr(self.epsilon_r, 'dtype')}' \"\n f\"and '{hasattr(model.epsilon_r, 'dtype')}'.\")\n raise ValueError(msg)\n\n return self._vol", "title": "" }, { "docid": "714779751f83bb14acf0efed9e78a306", "score": "0.54304636", "text": "def test_allclose(model_two, versions):\n\n for v in versions[1:]:\n print(v.__name__)\n _test_version(v, model_two, atol=5e-4)\n print(v.__name__, 'align 256')\n _test_version(v, model_two, atol=5e-4, align=256)", "title": "" }, { "docid": "3fa0c99dbdfabe8d3a258763cef4b8d5", "score": "0.5426498", "text": "def compare_simmilar(model, w):\n print(model.most_similar(w))", "title": "" }, { "docid": "f0750e78d96bb633685e989d0027b335", "score": "0.54215086", "text": "def test_compare_1view_vs_3view_model(set_up_models_optimizers_loss):\n set_up = set_up_models_optimizers_loss()\n assert compare_models_weight(set_up['model_1view'], set_up['model_3view']), \"model 1 view and 3 views weight does not match\"", "title": "" }, { "docid": "7d3c4339726ae63ade0a72139aed2e68", "score": "0.5413363", "text": "def compare_vmesh(v1, v2):\n equal = True\n if v1.param['nvrt'] != v2.param['nvrt']:\n equal = False\n print(\"The number of vertical layers are not equal between the two meshes\")\n\n if (np.abs((v1.kbps - v2.kbps) > 0)).any():\n equal = False\n print(\"kbps are not equal between the two meshes\")\n\n if (np.abs((v1.sigma - v2.sigma) > 0)).any():\n equal = False\n print(\"sigma are not equal between the two meshes\")\n\n if equal:\n print(\"the two meshes are equal\")", "title": "" }, { "docid": "6ec6ad5ad3eb33231bbbc68a28b9cae2", "score": "0.54097927", "text": "def get_probability_of_true_models(ml_models, dft_models, str_exps, str_dfts, str_mls, str_vars, method=\"dp4\"):\n\n n_models = len(ml_models)\n n_samples = [len(d[1]) for d in str_exps]\n\n # Get log numerator for the different models\n ml_num = {}\n dft_num = {}\n for mol in str_dfts[0]:\n ml_num[mol] = 0\n dft_num[mol] = 0\n for i in range(n_models):\n # Get likelihoods for the model that we assume is the true one\n if method == \"pdf\":\n ml_num[mol] += ml_models[i].get_log_likelihood(str_exps[i][1], str_mls[i][mol], str_vars[i][mol])\n dft_num[mol] += dft_models[i].get_log_likelihood(str_exps[i][1], str_dfts[i][mol])\n elif method == \"dp4\":\n ml_num[mol] += ml_models[i].get_log_dp4(str_exps[i][1], str_mls[i][mol], str_vars[i][mol])\n dft_num[mol] += dft_models[i].get_log_dp4(str_exps[i][1], str_dfts[i][mol])\n else:\n raise SystemExit(\"wrong method\")\n\n\n # Calculate probability of true model\n dft_denom = np.log(sum(np.exp(num) for num in dft_num.values()))\n ml_denom = np.log(sum(np.exp(num) for num in ml_num.values()))\n for mol1 in str_dfts[0]:\n dft_log_posterior = dft_num[mol1] - dft_denom\n dft_posterior = np.exp(dft_log_posterior)\n ml_log_posterior = ml_num[mol1] - ml_denom\n ml_posterior = np.exp(ml_log_posterior)\n print(mol1, dft_posterior, ml_posterior)", "title": "" }, { "docid": "b21a60bfb31a78cba1ce4a55712d2f51", "score": "0.54001266", "text": "def test_MLEProbDist():", "title": "" }, { "docid": "ef0e9cdff02139869a7ba47382e8377e", "score": "0.5396556", "text": "def testWrappers(self):\n l1 = []\n l1.append(lsst.meas.modelfit.MixtureComponent(1))\n l1.append(lsst.meas.modelfit.MixtureComponent(1))\n l1.append(lsst.meas.modelfit.MixtureComponent(1))\n l1[0].weight = 1.0\n l1[0].setMu(numpy.array([1.0], dtype=float))\n l1[0].setSigma(numpy.array([[4.0]], dtype=float))\n l1[1].weight = 0.5\n l1[2].weight = 0.5\n m1 = lsst.meas.modelfit.Mixture(1, l1)\n self.assertEqual(m1[0].weight, 0.5)\n self.assertEqual([0.5, 0.25, 0.25], [c.weight for c in m1])\n self.assertFloatsAlmostEqual(m1[0].getMu(), numpy.array([1.0], dtype=float))\n self.assertFloatsAlmostEqual(m1[0].getSigma(), numpy.array([4.0], dtype=float))\n self.assertFloatsAlmostEqual(m1.evaluate(m1[1], numpy.array([0.0], dtype=float)),\n m1[1].weight*(2.0*numpy.pi)**(-0.5))\n self.assertFloatsAlmostEqual(m1.evaluate(numpy.array([0.0], dtype=float)),\n (m1[0].weight*numpy.exp(-0.125)/2 + m1[1].weight + m1[2].weight)\n * (2.0*numpy.pi)**(-0.5))", "title": "" }, { "docid": "910126c3be210704bf0ad949b3c5e775", "score": "0.53957033", "text": "def test_noise_models_equal(self):\n roerror = [[0.9, 0.1], [0.5, 0.5]]\n error1 = pauli_error([['X', 1]], standard_gates=False)\n error2 = pauli_error([['X', 1]], standard_gates=True)\n\n model1 = NoiseModel()\n model1.add_all_qubit_quantum_error(error1, ['u3'], False)\n model1.add_quantum_error(error1, ['u3'], [2], False)\n model1.add_nonlocal_quantum_error(error1, ['cx'], [0, 1], [3], False)\n model1.add_all_qubit_readout_error(roerror, False)\n model1.add_readout_error(roerror, [0], False)\n\n model2 = NoiseModel()\n model2.add_all_qubit_quantum_error(error2, ['u3'], False)\n model2.add_quantum_error(error2, ['u3'], [2], False)\n model2.add_nonlocal_quantum_error(error2, ['cx'], [0, 1], [3], False)\n model2.add_all_qubit_readout_error(roerror, False)\n model2.add_readout_error(roerror, [0], False)\n self.assertEqual(model1, model2)", "title": "" }, { "docid": "f47630a2f2678446f63e8e691bf487f3", "score": "0.5391149", "text": "def test(self, loader, device):\n correct = 0\n total = 0\n ct = 0\n self.model.eval()\n with torch.no_grad():\n for inputs, labels in loader:\n inputs, labels = inputs.to(device), labels.to(device)\n outputs = self.model(inputs)\n predicted = torch.argmax(outputs.data, 1)\n total += labels.size(0)\n correct += (predicted == labels).sum().item()\n\n return correct / total", "title": "" }, { "docid": "0bdee48689469404dd7f63031547b7e3", "score": "0.53830594", "text": "def test():\n source1 = TextModel('source1')\n source1.add_string('It is interesting that she is interested.')\n\n source2 = TextModel('source2')\n source2.add_string('I am very, very excited about this!')\n\n mystery = TextModel('mystery')\n mystery.add_string('Is he interested? No, but I am.')\n mystery.classify(source1, source2)\n print(mystery.similarity_scores(source1)) \n print(compare_dictionaries(source1.word_lengths, mystery.word_lengths))", "title": "" }, { "docid": "5e5854ea4812763d5f5723c21dcda314", "score": "0.53565353", "text": "def test_kld():\n # Test raw inputs\n a = [1,1,2]\n b = [1,2,2]\n\n # Analytically computed result\n analytical = 0.23105\n assert analytical-analytical*.1 <= kl_divergence(a,b) <= analytical+analytical*.1\n\n # Test probability inputs\n a = {1:2, 2:1}\n b = {1:1, 2:2}\n assert analytical-analytical*.1 <= kl_divergence(a,b) <= analytical+analytical*.1", "title": "" }, { "docid": "213ca44b7b5291e411ff04a3e863daa8", "score": "0.5356273", "text": "def test_ratio_difference_zero(self):\n ingredients, _ = test_data()\n ratio_1 = create_ratio(ingredients, [1, 2, 3])\n ratio_2 = create_ratio(ingredients, [1, 2, 3])\n difference, _ = percentage_difference(ratio_1, ratio_2)\n self.assertEquals(0, difference)", "title": "" }, { "docid": "09e01f9af7eeb6d43985368945df19d0", "score": "0.5344245", "text": "def hallucinate_merge(self, other):\n start_time = time.time()\n ap = AttributeProjection()\n pw_score,_,_ = self.best_pairwise(other)\n ap.update(self.ment.attributes, self.torch_model.sub_ent_model)\n ap.update(other.ment.attributes, self.torch_model.sub_ent_model)\n\n num_ms = self.num_pts + other.num_pts\n if 'tes' in ap.aproj_sum:\n ap.aproj_sum['tea'] = ap['tes'] / num_ms\n\n ap.aproj_local['my_pw'] = pw_score\n ap.aproj_local['new_edges'] = self.num_pts * other.num_pts\n\n self_entity_score = 1.0\n other_entity_score = 1.0\n\n if self.num_pts > 1 and 'es' in self.ment.attributes.aproj_local:\n self_entity_score = self.ment.attributes.aproj_local['es']\n if self.config.expit_e_score:\n self_entity_score = expit(self_entity_score)\n else:\n assert self.num_pts == 1\n if other.num_pts > 1 and 'es' in other.ment.attributes.aproj_local:\n other_entity_score = other.ment.attributes.aproj_local['es']\n if self.config.expit_e_score:\n other_entity_score = expit(other_entity_score)\n else:\n assert other.num_pts == 1\n\n if self_entity_score >= other_entity_score:\n ap.aproj_local['child_e_max'] = self_entity_score\n ap.aproj_local['child_e_min'] = other_entity_score\n else:\n ap.aproj_local['child_e_max'] = other_entity_score\n ap.aproj_local['child_e_min'] = self_entity_score\n if self.config.expit_e_score:\n assert ap.aproj_local['child_e_max'] <= 1.0\n assert ap.aproj_local['child_e_min'] <= 1.0\n assert ap.aproj_local['child_e_max'] >= -0.0\n assert ap.aproj_local['child_e_min'] >= -0.0\n end_time = time.time()\n new_score = self.torch_model.e_score(ap).data.numpy()[0]\n new_node = ACorefModel(self.config,self.torch_model,Ment(ap,None),None,self.num_pts+other.num_pts,self.pair_to_pw)\n new_node.ment.attributes.aproj_local['es'] = new_score\n return new_node", "title": "" }, { "docid": "a4178e18b59382aa8812abc9d4e305f4", "score": "0.53405094", "text": "def compare_across(args):\n results = load_results(args)\n results_directories = list(results[\"y_test_outer_folds\"].keys())\n\n # The first directory is baseline\n baseline_dir = results_directories[0]\n\n # Iterate over results directories, get models that appear in every results\n # directory as a set, and cast set back to list\n model_names = set()\n for results_dir in results_directories:\n model_names_this = results[\"y_hat_test_outer_folds\"][results_dir]\n model_names.update(set(model_names_this))\n model_names = list(model_names)\n\n # Get true labels from baseline directory\n y_baseline = results[\"y_test_outer_folds\"][baseline_dir]\n\n # Iterate over model names\n for model_name in model_names:\n\n # Get labels and estimates values for baseline model\n y_hat_baseline = results[\"y_hat_test_outer_folds\"][baseline_dir][model_name]\n y_hat_baseline_calibrated = results[\"y_hat_test_calibrated_outer_folds\"][\n baseline_dir\n ][model_name]\n\n # Bootstrap sample labels and estimates for baseline directory and this model\n (\n y_bootstrap_baseline,\n y_hat_baseline_bootstrap,\n y_hat_baseline_bootstrap_calibrated,\n ) = generate_bootstrap_distributions(\n seed=args.seed,\n y=y_baseline,\n y_hat=y_hat_baseline,\n y_hat_calibrated=y_hat_baseline_calibrated,\n bootstrap_samplings=args.bootstrap_samplings,\n )\n\n # Iterate over results directories\n for results_dir in results_directories:\n\n # Isolate data in simpler variable names\n y_compare = results[\"y_test_outer_folds\"][results_dir]\n y_hat_compare = results[\"y_hat_test_outer_folds\"][results_dir][model_name]\n y_hat_compare_calibrated = results[\"y_hat_test_calibrated_outer_folds\"][\n results_dir\n ][model_name]\n\n (\n y_bootstrap_compare,\n y_hat_compare_bootstrap,\n y_hat_compare_bootstrap_calibrated,\n ) = generate_bootstrap_distributions(\n seed=args.seed,\n y=y_compare,\n y_hat=y_hat_compare,\n y_hat_calibrated=y_hat_compare_calibrated,\n bootstrap_samplings=args.bootstrap_samplings,\n )\n\n metrics_bootstrap = compare_bootstrap_metrics(\n args=args,\n y_baseline=y_bootstrap_baseline,\n y_compare=y_bootstrap_compare,\n y_hat_baseline=y_hat_baseline_bootstrap,\n y_hat_baseline_calibrated=y_hat_baseline_bootstrap_calibrated,\n y_hat_compare=y_hat_compare_bootstrap,\n y_hat_compare_calibrated=y_hat_compare_bootstrap_calibrated,\n prefix_str=model_name,\n model_name_baseline=baseline_dir,\n model_name_compare=results_dir,\n )\n\n # Convert metrics dictionary into dataframe, and round\n df_metrics = format_bootstrap_metrics_to_dataframe(\n metrics=metrics_bootstrap,\n decimals=args.decimals,\n )\n\n logging.info(\n f\"metrics of {model_name} comparing {baseline_dir} vs {results_dir}\",\n )\n log_dataframe(\n df=df_metrics,\n format_scientific=False,\n )\n\n # Save metrics dataframe to CSV\n save_dataframe_to_csv(\n args=args,\n df=df_metrics,\n fname=f\"metrics_compare_across_{model_name}_{baseline_dir}_vs_{results_dir}.csv\",\n keep_index=True,\n )", "title": "" }, { "docid": "4279bf395354f9031fb33d82940c1862", "score": "0.53184474", "text": "def assert_cv_equal(results_1, results_2):\n\n for field_name in ['metrics', 'n_samples']:\n nose.tools.eq_([r['train'][field_name] for r in results_1],\n [r['train'][field_name] for r in results_2])\n nose.tools.eq_([r['test'][field_name] for r in results_1],\n [r['test'][field_name] for r in results_2])\n\n nose.tools.eq_([tuple(r['approach']['selected_features']) for r in results_1],\n [tuple(r['approach']['selected_features']) for r in results_2])", "title": "" }, { "docid": "f37d127aa46143f92668413b9e3579ee", "score": "0.5318367", "text": "def compare_dictionaries(d1, d2):\n \n score = 0\n total = 0\n \n for i in d1:\n total += d1[i]\n \n for i in d2:\n if i in d1:\n score += d2[i]*(math.log(d1[i]/total))\n else:\n score += math.log(0.5/total)\n \n return score", "title": "" }, { "docid": "aa2c86e5c6fda16761ae7b28fa889953", "score": "0.53118604", "text": "def sup_test_exp(model, dataloaders):\n model.eval()\n\n total = 0\n correct_1 = 0\n correct_2 = 0\n\n with torch.no_grad():\n for (inputs, labels) in dataloaders['sup_test']:\n inputs = inputs.cuda()\n labels = labels.cuda()\n\n out_1, out_2 = model(inputs)\n _, pred_1 = torch.max(out_1.data, 1)\n _, pred_2 = torch.max(out_2.data, 1)\n total += labels.size(0)\n correct_1 += (pred_1 == labels).sum().item()\n correct_2 += (pred_2 == labels).sum().item()\n \n return (100 * correct_1 / total), (100 * correct_2 / total)", "title": "" }, { "docid": "f74362f873739d0b46881056707a27b5", "score": "0.5309062", "text": "def test_model_loss(self):\n self.optimizer.zero_grad()\n composed, rep_1, rep_2 = self.model(self.input_1)\n loss_1 = loss_functions.get_loss_cosine_distance(original_phrase=self.input_1[\"l\"], composed_phrase=rep_1,\n dim=1, normalize=False).item()\n composed, rep_1, rep_2 = self.model(self.input_2)\n loss_2 = loss_functions.get_loss_cosine_distance(original_phrase=self.input_1[\"l\"], composed_phrase=rep_2,\n dim=1, normalize=False).item()\n np.testing.assert_equal(math.isnan(loss_1), False)\n np.testing.assert_equal(math.isnan(loss_2), False)\n\n np.testing.assert_equal(loss_1 >= 0, True)\n np.testing.assert_equal(loss_2 >= 0, True)", "title": "" }, { "docid": "0feceba1f43daabb8085a86992adaab6", "score": "0.5291239", "text": "def test_model_blend(self):\n expected_data = np.array([[[0.8]], [[0.4]], [[0]]], dtype=np.float32)\n result = self.plugin_model.process(\n [self.ukv_cube, self.enukx_cube],\n model_id_attr=\"mosg__model_configuration\",\n record_run_attr=\"mosg__model_run\",\n cycletime=self.cycletime,\n )\n self.assertArrayAlmostEqual(result.data, expected_data)\n self.assertEqual(\n result.attributes[\"mosg__model_configuration\"], \"uk_det uk_ens\"\n )\n self.assertEqual(\n result.attributes[\"mosg__model_run\"],\n \"uk_det:20180910T0300Z:0.500\\nuk_ens:20180910T0300Z:0.500\",\n )\n result_coords = [coord.name() for coord in result.coords()]\n self.assertNotIn(\"model_id\", result_coords)\n self.assertNotIn(\"model_configuration\", result_coords)\n for coord in [\"forecast_reference_time\", \"forecast_period\"]:\n self.assertIn(\"deprecation_message\", result.coord(coord).attributes)\n self.assertIn(\n \"will be removed\", result.coord(coord).attributes[\"deprecation_message\"]\n )", "title": "" }, { "docid": "86204d2f75d64d6287ccc11a21dda6d3", "score": "0.5290807", "text": "def test_align_models():\n\n temp = np.arange((15), dtype=np.float32).reshape((3, 5))\n targ = np.zeros((3, 3, 5))\n targ[:] = temp\n targ[0, 1, 1] += 0.3\n targ[0, 2, 1] += 0.7\n targ[0, 0, 3] -= 0.5\n targ[0, 1, 2] -= 1.3\n targ[1, 0, 1] += 0.7\n targ[1, 2, 1] += 0.2\n targ[1, 2, 3] += 0.8\n targ[1, 2, 2] -= 1.8\n targ[2, 1, 1] += 0.9\n targ[2, 2, 2] -= 0.5\n targ[2, 1, 0] += 0.8\n targ[2, 1, 2] += 0.8\n\n ref = targ.copy()\n ref[1, 2, 3] -= 5.0\n ref[2, 0, 3] -= 1.6\n\n mask = ref[0, :, :]\n mask = ref[0, :, :] * 0 + 1\n mask[1, 1] = 0\n mask[1, 2] = 0\n\n targ_mod = datamodels.CubeModel(data=targ)\n mask_mod = datamodels.ImageModel(data=mask)\n ref_mod = datamodels.CubeModel(data=ref)\n\n am_results = imageregistration.align_models(ref_mod, targ_mod, mask_mod)\n results_sub = am_results.data[:3, :2, 2, :3]\n\n truth_results_sub = np.array(\n [\n [[10.0, 11.7, 12.0], [10.036278, 11.138131, 10.180669]],\n [[10.053974, 11.1953335, 11.993213], [10.36224, 10.805556, 10.274276]],\n [[9.988604, 11.33026, 11.968155], [10.024722, 10.971058, 10.108071]],\n ]\n )\n\n npt.assert_allclose(results_sub, truth_results_sub, atol=1e-6)", "title": "" }, { "docid": "ed7afa063f613eefd503be1995682704", "score": "0.52905893", "text": "def check_probability_matching_few_models():\n\n repetitions_per_model = 10000\n prob_models = [0.1, 0.9]\n models = [[0, 1, 1], [1, 1, 0]]\n\n for model, p_model in zip(models, prob_models):\n model = np.tile(model, (repetitions_per_model, 1))\n judgs = np.random.binomial(n=1, p=p_model, size=(repetitions_per_model, 1))\n agent = pop.NetworkAgent(3)\n agent.learn(model, judgs)\n print(np.column_stack((prob_models, agent.produce(models))))", "title": "" }, { "docid": "17f87b8fd9044d83d5f1102985944f98", "score": "0.52876896", "text": "def compare_dictionaries(d1, d2):\n score = 0\n total = 0\n for i in d1:\n total += d1[i]\n for j in d2:\n if j in d1:\n score += d2[j] * math.log(d1[j]/total)\n else:\n score += d2[j] * math.log(.5/total)\n return score", "title": "" }, { "docid": "1376ab5f274d17bd7c0ac13065d4c984", "score": "0.5279671", "text": "def assert_models_equal(self, benchmark1, benchmark2):\n if (not isinstance(benchmark1, rpe_comp.RPEBenchmarkComparison) or\n not isinstance(benchmark2, rpe_comp.RPEBenchmarkComparison)):\n self.fail('object was not a RPEBenchmarkComparison')\n self.assertEqual(benchmark1.identifier, benchmark2.identifier)\n self.assertEqual(benchmark1.offset, benchmark2.offset)\n self.assertEqual(benchmark1.max_difference, benchmark2.max_difference)", "title": "" }, { "docid": "c8ca830912203fd37c69c745fc29f205", "score": "0.52777815", "text": "def likelihood_ratio(pf, data, **kw):\n # Store the initial values of the parameters\n params = pf.get_list_free_params()\n popt = [None] * len(params)\n for ipar,par in enumerate(params):\n popt[ipar] = par.value\n\n result = Result()\n\n # Store inputs\n result.pf = pf\n result.data = data\n result.mode = kw.get('mode','asymptotic')\n result.t_pf = kw.get('t_pt',None)\n\n # Probability function of the test statistics\n if result.t_pf != None: # Specified as input\n result.mode = 'input'\n elif result.mode == 'asymptotic': # Wilks approximation\n df = [0, 0]\n for i,hypo in enumerate(['h0','h1']):\n if hypo in kw:\n for key in kw[hypo]:\n if type(kw[hypo][key]) == list: continue\n df[i] += 1\n elif hypo == 'h0':\n for par in params:\n if par.poi: df[i] += 1\n result.t_pf = sp.PF(\"chi2(t|df)\", df=df[0]-df[1])\n else: # Generate a toy MC sample (very slow...)\n result.ntoys = kw.get('ntoys', 10000)\n logger.info('Number of toys = %d', result.ntoys)\n if isinstance(data, np.ndarray):\n nevts = data.shape[0]\n else:\n nevts = 1\n t_exp = np.empty(result.ntoys)\n def get_t_exp():\n for iexp in range(result.ntoys):\n if result.ntoys > 20 and iexp % (result.ntoys / 20) == 0:\n logger.info('Pseudo exp %d / %d', iexp, result.ntoys)\n for ipar,par in enumerate(params):\n par.value = popt[ipar]\n pseudo_exp = pf.rvs(size=nevts)\n yield pf.pllr(pseudo_exp, **kw)\n for i, el in enumerate(get_t_exp()): t_exp[i] = el\n result.t_pf = sp.PF(hist=np.histogram(t_exp, bins=200))\n\n # Compute the observed value of the test statistics\n result.t_obs = pf.pllr(data, **kw)\n\n # Extract the pvalue and the Zvalue\n result.Zvalue = math.sqrt(result.t_obs)\n result.pvalue = result.t_pf.pvalue(result.t_obs)\n logger.info('PLLR test: pvalue = %f, Zvalue = %f' % (result.pvalue, \n result.Zvalue))\n\n # Restore the initial values of the parameters\n for ipar,par in enumerate(params):\n par.value = popt[ipar]\n\n return result", "title": "" }, { "docid": "f576120363b144e4390d2100a825e4a9", "score": "0.5273205", "text": "def test_eq_method_evaluates_correctly():\n\n # create sets of images\n u1 = np.random.rand(50, 50)\n v1 = np.random.rand(50, 50)\n u2 = np.random.rand(50, 50)\n v2 = np.random.rand(50, 50)\n u3 = np.random.rand(10, 10)\n v3 = np.random.rand(10, 10)\n dp1 = dense_predictor.DensePredictor(u1, v1)\n dp1_copy = dense_predictor.DensePredictor(u1, v1)\n dp2 = dense_predictor.DensePredictor(u2, v2)\n dp3 = dense_predictor.DensePredictor(u3, v3)\n\n # check dp1 and dp1_copy return equal\n assert dp1 == dp1_copy\n assert not dp1 == dp2\n assert not dp2 == dp3\n\n # check that NotImplemented is raised if compared to another object\n assert dp1.__eq__(4) == NotImplemented", "title": "" }, { "docid": "4b120daeab31c6dffc4a790a4d481c88", "score": "0.52702284", "text": "def log_likelihood_ratio_test(self):\n if hasattr(self, \"_log_likelihood_null\"):\n ll_null = self._log_likelihood_null\n\n else:\n trivial_dataset = self.start_stop_and_events\n trivial_dataset = trivial_dataset.join(self.weights)\n trivial_dataset = trivial_dataset.reset_index()\n ll_null = (\n CoxTimeVaryingFitter()\n .fit(\n trivial_dataset,\n start_col=self.start_col,\n stop_col=self.stop_col,\n event_col=self.event_col,\n id_col=self.id_col,\n weights_col=\"__weights\",\n strata=self.strata,\n )\n .log_likelihood_\n )\n\n ll_alt = self.log_likelihood_\n test_stat = 2 * (ll_alt - ll_null)\n degrees_freedom = self.params_.shape[0]\n p_value = _chisq_test_p_value(test_stat, degrees_freedom=degrees_freedom)\n return StatisticalResult(\n p_value, test_stat, name=\"log-likelihood ratio test\", degrees_freedom=degrees_freedom, null_distribution=\"chi squared\"\n )", "title": "" }, { "docid": "a985c79954c627a27c4c799e9ab59aa2", "score": "0.5265926", "text": "def compare(keras_model, hls_model, X, plot_type=\"dist_diff\"):\n\n # Take in output from both models\n # Note that each y is a dictionary with structure {\"layer_name\": flattened ouput array}\n ymodel = get_ymodel_keras(keras_model, X)\n _, ysim = hls_model.trace(X)\n\n print(\"Plotting difference...\")\n f = plt.figure()\n if plot_type == \"norm_diff\":\n f = _norm_diff(ymodel, ysim)\n elif plot_type == \"dist_diff\":\n f = _dist_diff(ymodel, ysim)\n\n return f", "title": "" }, { "docid": "2d2248ed1568811192fe7d5ab861892b", "score": "0.5260995", "text": "def test_ConditionalProbDist():", "title": "" }, { "docid": "f3f481166a1c605ad138ed92d58aac4d", "score": "0.5260669", "text": "def perturb_nospec_modelsBased(evals, evecs, original_KL, refs, models_ref_list):\n\n max_basis = original_KL.shape[0]\n N_wv,N_ref,N_pix = models_ref_list.shape\n\n refs_mean_sub = refs - np.nanmean(refs, axis=1)[:, None]\n refs_mean_sub[np.where(np.isnan(refs_mean_sub))] = 0\n\n # perturbed KL modes\n delta_KL_nospec = np.zeros([max_basis, N_wv, N_pix]) # (numKL,N_ref,N_pix)\n\n for k,models_ref in enumerate(models_ref_list):\n models_mean_sub = models_ref # - np.nanmean(models_ref, axis=1)[:,None] should this be the case?\n models_mean_sub[np.where(np.isnan(models_mean_sub))] = 0\n\n evals_tiled = np.tile(evals,(N_ref,1))\n np.fill_diagonal(evals_tiled,np.nan)\n evals_sqrt = np.sqrt(evals)\n evalse_inv_sqrt = 1./evals_sqrt\n evals_ratio = (evalse_inv_sqrt[:,None]).dot(evals_sqrt[None,:])\n beta_tmp = 1./(evals_tiled.transpose()- evals_tiled)\n beta_tmp[np.diag_indices(N_ref)] = -0.5/evals\n beta = evals_ratio*beta_tmp\n\n C = models_mean_sub.dot(refs.transpose())+refs.dot(models_mean_sub.transpose())\n alpha = (evecs.transpose()).dot(C).dot(evecs)\n\n delta_KL = (beta*alpha).dot(original_KL)+(evalse_inv_sqrt[:,None]*evecs.transpose()).dot(models_mean_sub)\n delta_KL_nospec[:,k,:] = delta_KL[:,:]\n\n\n return delta_KL_nospec", "title": "" }, { "docid": "02dc3bfb2b3af8d20f1b647454ddb9f2", "score": "0.52534634", "text": "def compare_within(args):\n results = load_results(args)\n results_dir = os.path.split(args.path_to_results[0])[-1]\n model_names = list(results[\"y_hat_test_outer_folds\"][results_dir].keys())\n\n # Define string of baseline model\n model_name_baseline = \"logreg\"\n\n # Iterate over model names\n for model_name in model_names:\n\n # Isolate data in simpler variable names\n y = results[\"y_test_outer_folds\"][results_dir]\n y_hat_compare = results[\"y_hat_test_outer_folds\"][results_dir][model_name]\n y_hat_compare_calibrated = results[\"y_hat_test_calibrated_outer_folds\"][\n results_dir\n ][model_name]\n\n y_hat_baseline = results[\"y_hat_test_outer_folds\"][results_dir][\n model_name_baseline\n ]\n y_hat_baseline_calibrated = results[\"y_hat_test_calibrated_outer_folds\"][\n results_dir\n ][model_name_baseline]\n\n # Generate bootstrap distributions: baseline\n (\n y_bootstrap_baseline,\n y_hat_baseline_bootstrap,\n y_hat_baseline_bootstrap_calibrated,\n ) = generate_bootstrap_distributions(\n seed=args.seed,\n y=y,\n y_hat=y_hat_baseline,\n y_hat_calibrated=y_hat_baseline_calibrated,\n bootstrap_samplings=args.bootstrap_samplings,\n )\n\n # Generate bootstrap distributions: model to compare\n (\n y_bootstrap_compare,\n y_hat_compare_bootstrap,\n y_hat_compare_bootstrap_calibrated,\n ) = generate_bootstrap_distributions(\n seed=args.seed,\n y=y,\n y_hat=y_hat_compare,\n y_hat_calibrated=y_hat_compare_calibrated,\n bootstrap_samplings=args.bootstrap_samplings,\n )\n\n metrics_bootstrap = compare_bootstrap_metrics(\n args=args,\n y_baseline=y_bootstrap_baseline,\n y_compare=y_bootstrap_compare,\n y_hat_baseline=y_hat_baseline_bootstrap,\n y_hat_baseline_calibrated=y_hat_baseline_bootstrap_calibrated,\n y_hat_compare=y_hat_compare_bootstrap,\n y_hat_compare_calibrated=y_hat_compare_bootstrap_calibrated,\n prefix_str=\"\",\n model_name_baseline=model_name_baseline,\n model_name_compare=model_name,\n )\n\n # Convert metrics dictionary into dataframe, and round\n df_metrics = format_bootstrap_metrics_to_dataframe(\n metrics=metrics_bootstrap,\n decimals=args.decimals,\n )\n\n logging.info(\n f\"metrics from {results_dir} comparing {model_name_baseline} vs {model_name}\",\n )\n log_dataframe(\n df=df_metrics,\n format_scientific=False,\n )\n\n # Save metrics dataframe to CSV\n save_dataframe_to_csv(\n args=args,\n df=df_metrics,\n fname=f\"metrics_compare_within_{model_name_baseline}_vs_{model_name}.csv\",\n keep_index=True,\n )", "title": "" }, { "docid": "2d1154291907874ea1cd1632097ed9ee", "score": "0.52446777", "text": "def test_inner_comp_with_nsbj_obj(elitmodels, elit_to_logic):\n sentence = 'I like when you buy clothes'\n tok, pos, dp, cr = elitmodels(sentence)\n mentions, merges = elit_to_logic(tok, pos, dp)\n\n # assert len(mentions) == 5\n (i_sp,) = [span for span, mgraph in mentions.items() if mgraph.features[span][\"span_data\"].string == 'i']\n (like_sp,) = [span for span, mgraph in mentions.items() if mgraph.features[span][\"span_data\"].string == 'like']\n (you_sp,) = [span for span, mgraph in mentions.items() if mgraph.features[span][\"span_data\"].string == 'you']\n (buy_sp,) = [span for span, mgraph in mentions.items() if mgraph.features[span][\"span_data\"].string == 'buy']\n (clothes_sp,) = [span for span, mgraph in mentions.items() if mgraph.features[span][\"span_data\"].string == 'clothes']\n\n i_mg = mentions[i_sp]\n assert i_mg.has('user', 'center')\n assert i_mg.has('user', 'focus')\n\n you_mg = mentions[you_sp]\n assert you_mg.has('emora', 'center')\n assert you_mg.has('emora', 'focus')\n\n like_mg = mentions[like_sp]\n like_preds = like_mg.predicates(predicate_type='like')\n assert len(like_preds) == 1\n ((s, t, o, i),) = like_preds\n assert o is not None\n assert like_mg.has(i, 'focus')\n assert like_mg.has(i, 'time', 'now')\n assert like_mg.has('like', 'center')\n\n buy_mg = mentions[buy_sp]\n buy_preds = buy_mg.predicates(predicate_type='buy')\n assert len(buy_preds) == 1\n ((s, t, o, i),) = buy_preds\n assert o is not None\n assert buy_mg.has(i, 'focus')\n assert buy_mg.has('buy', 'center')\n\n # todo - clothes should be instantiated!\n clothes_mg = mentions[clothes_sp]\n assert clothes_mg.has('clothing', 'center')\n assert clothes_mg.has('clothing', 'focus')\n\n # assert len(merges) == 4\n assert ((like_sp, 'subject'), (i_sp, 'self')) in merges\n assert ((like_sp, 'object'), (buy_sp, 'self')) in merges\n assert ((buy_sp, 'subject'), (you_sp, 'self')) in merges\n assert ((buy_sp, 'object'), (clothes_sp, 'self')) in merges", "title": "" }, { "docid": "781574587d49bb8deee9536bae49b1e7", "score": "0.5232548", "text": "def test_likeChanges(self):\n # self.sf_parameters1 is circle 1 sf parameters\n sf_params1 = self.sf_parameters1[:]\n sf_params1[5] = np.arctan(sf_params1[5]) # convert from slope to phi\n sf_params1[2] = 7.0 # change tau\n sf_params2 = self.sf_parameters1[:]\n sf_params2[5] = np.arctan(sf_params2[5]) # convert from slope to phi\n sf_params2[3] = 3.0 # change t_start\n sf_params4 = self.sf_parameters1[:]\n sf_params4[5] = np.arctan(15.0) # change sf_slope and convert to phi\n\n assert calculateAge.lnlike(self.sf_parameters1, self.SED, self.SED_err, self.redshift, self.sp) >calculateAge.lnlike(sf_params1, self.SED, self.SED_err, self.redshift, self.sp), \"Changing tau should lower likelihood\"\n assert calculateAge.lnlike(self.sf_parameters1, self.SED, self.SED_err, self.redshift, self.sp) > calculateAge.lnlike(sf_params2, self.SED, self.SED_err, self.redshift, self.sp), \"Changing t_start should lower likelihood\"\n assert calculateAge.lnlike(self.sf_parameters1, self.SED, self.SED_err, self.redshift, self.sp) > calculateAge.lnlike(sf_params4, self.SED, self.SED_err, self.redshift, self.sp), \"Changing sf_slope should lower likelihood\"\n assert calculateAge.lnlike(self.sf_parameters1, self.SED, self.SED_err, self.redshift, self.sp) > calculateAge.lnlike(self.sf_parameters3, self.SED, self.SED_err, self.redshift, self.sp), \"Correct SF parameters should be more likely than another set\"", "title": "" }, { "docid": "7857128f2270084b0ba5f2d15c565904", "score": "0.52254367", "text": "def test_overload_div_operator_sums_correctly():\n\n u1 = np.arange(1, 82).reshape((9, 9))\n u2 = np.arange(101, 182).reshape((9, 9))\n u3 = np.arange(201, 282).reshape((9, 9))\n\n dp1 = dense_predictor.DensePredictor(u1, u2)\n dp2 = dense_predictor.DensePredictor(u2, u3)\n dp3 = dp1 / dp2\n assert np.allclose(dp3.u, u1 / u2)\n assert np.allclose(dp3.v, u2 / u3)", "title": "" }, { "docid": "ae20f557076aaee85ffe4da9d3efdd93", "score": "0.52227527", "text": "def fidelityMPS(A,B):\n return innerProductOBC(A,B)*innerProductOBC(B,A)\\\n /innerProductOBC(A,A)/innerProductOBC(B,B)", "title": "" }, { "docid": "93a384cf312d6a3c574bf8307581d554", "score": "0.52198535", "text": "def show_model_perf(correct_lst, model_lst):\n assert(len(correct_lst) == len(model_lst))\n \n trues, falses = 0, 0\n for cur_pred in range(len(correct_lst)):\n if model_lst[cur_pred] == correct_lst[cur_pred]:\n trues += 1\n else: \n falses += 1\n total = trues + falses\n\n if VERB: \n print('right predictions: %d, wrong predictions: %d' % (trues, falses))\n print('right percentage: %f, wrong percentage: %f' % (trues/total, falses/total))\n\n return trues/total", "title": "" }, { "docid": "72e7c4bcde80d2b29de92a217305e871", "score": "0.52115595", "text": "def construct_difference_model(model_1, model_2, norm_type='euclidean'):\n #Get index mappings\n common_dict = {}\n #Using copies of the models so things are modified above\n combined_model = model_1 = model_1.copy()\n model_2 = model_2.copy()\n for reaction_1 in model_1.reactions:\n try:\n reaction_2 = model_2.reactions.get_by_id(reaction_1.id)\n common_dict[reaction_1] = reaction_2\n except:\n continue\n \n #Add a prefix in front of the mutant_model metabolites and reactions to prevent\n #name collisions in DictList\n for the_dict_list in [model_2.metabolites,\n model_2.reactions]:\n [setattr(x, 'id', 'mutant_%s'%x.id)\n for x in the_dict_list]\n the_dict_list._generate_index() #Update the DictList.dicts\n\n \n combined_model.add_reactions(model_2.reactions)\n [setattr(x, 'objective_coefficient', 0.)\n for x in combined_model.reactions]\n #Add in the difference reactions. The mutant reactions and metabolites are already added.\n #This must be a list to maintain the correct order when adding the difference_metabolites\n difference_reactions = [] #Add the difference reactions at the end to speed things up\n difference_metabolites = []\n for reaction_1, reaction_2 in iteritems(common_dict):\n reaction_1._difference_partner = reaction_2\n reaction_2._difference_partner = reaction_1\n difference_reaction = Reaction('difference_%s'%reaction_1.id)\n difference_reactions.append(difference_reaction)\n difference_reaction.upper_bound = 100000\n difference_reaction.lower_bound = -1* difference_reaction.upper_bound\n difference_metabolite = Metabolite('difference_%s'%reaction_1.id)\n difference_metabolites.append(difference_metabolite)\n if norm_type == 'linear':\n difference_metabolite._constraint_sense = 'G'\n reaction_1.add_metabolites({difference_metabolite: -1.}, add_to_container_model=False)\n reaction_2.add_metabolites({difference_metabolite: 1.}, add_to_container_model=False)\n difference_reaction.add_metabolites({difference_metabolite: 1.}, add_to_container_model=False)\n\n combined_model.add_metabolites(difference_metabolites)\n combined_model.add_reactions(difference_reactions)\n return(combined_model)", "title": "" }, { "docid": "d342a705ac68ebf1b293376b5fa19c80", "score": "0.5203969", "text": "def test_inner_comp_with_obj(elitmodels, elit_to_logic):\n sentence = 'I like to buy clothes'\n tok, pos, dp, cr = elitmodels(sentence)\n mentions, merges = elit_to_logic(tok, pos, dp)\n\n # assert len(mentions) == 4\n (i_sp,) = [span for span, mgraph in mentions.items() if mgraph.features[span][\"span_data\"].string == 'i']\n (like_sp,) = [span for span, mgraph in mentions.items() if mgraph.features[span][\"span_data\"].string == 'like']\n (buy_sp,) = [span for span, mgraph in mentions.items() if mgraph.features[span][\"span_data\"].string == 'buy']\n (clothes_sp,) = [span for span, mgraph in mentions.items() if mgraph.features[span][\"span_data\"].string == 'clothes']\n\n i_mg = mentions[i_sp]\n assert i_mg.has('user', 'center')\n assert i_mg.has('user', 'focus')\n\n like_mg = mentions[like_sp]\n like_preds = like_mg.predicates(predicate_type='like')\n assert len(like_preds) == 1\n ((s, t, o, i),) = like_preds\n assert o is not None\n assert like_mg.has(i, 'focus')\n assert like_mg.has(i, 'time', 'now')\n assert like_mg.has('like', 'center')\n\n buy_mg = mentions[buy_sp]\n buy_preds = buy_mg.predicates(predicate_type='buy')\n assert len(buy_preds) == 1\n ((s, t, o, i),) = buy_preds\n assert o is not None\n assert buy_mg.has(i, 'focus')\n assert buy_mg.has('buy', 'center')\n\n clothes_mg = mentions[clothes_sp]\n assert clothes_mg.has('clothing', 'center')\n assert clothes_mg.has('clothing', 'focus')\n\n # assert len(merges) == 4\n assert ((like_sp, 'subject'), (i_sp, 'self')) in merges\n assert ((like_sp, 'object'), (buy_sp, 'self')) in merges\n assert ((buy_sp, 'subject'), (i_sp, 'self')) in merges\n assert ((buy_sp, 'object'), (clothes_sp, 'self')) in merges", "title": "" }, { "docid": "31a5718ae0717f7371deb8b7f125fd34", "score": "0.5194077", "text": "def test_nested3(self):\n model = NestedModel().eval()\n custum_options = {\n 'dtype': torch.quint8,\n 'qscheme': torch.per_tensor_affine\n }\n custom_qconfig = QConfig(weight=default_weight_observer(),\n activation=default_observer(**custum_options))\n qconfig_dict = {\n 'fc3': default_qconfig,\n 'sub2': default_qconfig,\n 'sub2.fc1': custom_qconfig\n }\n model = prepare(model, qconfig_dict)\n\n def checkPrepModules(model, before_calib=False):\n if before_calib:\n self.checkObservers(model)\n self.checkNoPrepModules(model)\n self.checkNoPrepModules(model.sub1)\n self.checkNoPrepModules(model.sub1.fc)\n self.checkNoPrepModules(model.sub1.relu)\n self.checkNoPrepModules(model.sub2)\n self.checkHasPrepModules(model.sub2.fc1)\n self.checkHasPrepModules(model.sub2.fc2)\n self.checkHasPrepModules(model.fc3)\n\n checkPrepModules(model, True)\n\n test_only_eval_fn(model, self.calib_data)\n convert(model)\n\n def checkQuantized(model):\n checkPrepModules(model)\n self.checkQuantizedLinear(model.sub2.fc1)\n self.checkQuantizedLinear(model.sub2.fc2)\n self.checkQuantizedLinear(model.fc3)\n test_only_eval_fn(model, self.calib_data)\n\n checkQuantized(model)\n\n # test one line API\n model = quantize(NestedModel().eval(), test_only_eval_fn, self.calib_data, qconfig_dict)\n checkQuantized(model)", "title": "" }, { "docid": "86ed4d74677cb019758e6e524e848713", "score": "0.51915216", "text": "def compare_models(a, b):\n if (a and not b) or (not a and b):\n return False\n if a and b:\n return a.key() == b.key\n if not a and not b:\n return True", "title": "" }, { "docid": "dc3cf6ecd8461329f336f3aeb86edf9d", "score": "0.518445", "text": "def oov(self: object) -> float :\n mcs_test = [mc for (mc, tc) in self.mcs_test]\n mcs_train = [mc for (mc, tc) in self.mcs_train]\n pst, tot = zip(*((int(mct in mcs_train), 1) for mct in mcs_test))\n\n return sum(pst) / sum(tot)", "title": "" }, { "docid": "dc3cf6ecd8461329f336f3aeb86edf9d", "score": "0.518445", "text": "def oov(self: object) -> float :\n mcs_test = [mc for (mc, tc) in self.mcs_test]\n mcs_train = [mc for (mc, tc) in self.mcs_train]\n pst, tot = zip(*((int(mct in mcs_train), 1) for mct in mcs_test))\n\n return sum(pst) / sum(tot)", "title": "" }, { "docid": "6eaeb39e43f7d976f62f3e5b2fc9d7d4", "score": "0.518354", "text": "def _evaluate_model(self, model):\n\n model.eval()\n correct = 0\n len_dataset = len(self.test_loader.dataset)\n\n with torch.no_grad():\n for image, target in self.test_loader:\n output = model(image)\n\n if isinstance(output, tuple):\n output = output[0]\n\n pred = output.argmax(dim=1, keepdim=True)\n correct += pred.eq(target.view_as(pred)).sum().item()\n\n return correct / len_dataset", "title": "" }, { "docid": "79fae5f4eba10d3a11e4376ae00f2fcb", "score": "0.5181783", "text": "def get_test_diagnostics(left_pairs_o,right_pairs_o,sim_labels,threshold,class_id=None, plot_hist=False, breakpoint=None):\n matching = np.zeros(len(sim_labels))\n# l2_normalized_diff = util.l2_normalize(left_pairs_o-right_pairs_o)\n \n l2_normalized_diff = left_pairs_o-right_pairs_o\n l2_distances = sl.norm(l2_normalized_diff,axis=1)\n \n if plot_hist:\n util.get_separation_distance_hist(l2_distances[0:breakpoint],l2_distances[breakpoint:])\n \n false_pos = 0\n false_neg = 0\n inter_class_errors = 0\n p = np.sum(sim_labels)\n n = len(sim_labels) - p\n for i in range(len(sim_labels)):\n if np.isinf(l2_normalized_diff[i,:]).any() or np.isnan(l2_normalized_diff[i,:]).any():\n# print('Got inf or Nan in L2 norm; Change hyperparameters to avoid')\n if sim_labels[i] == 1:\n false_neg = false_neg + 1\n elif l2_distances[i] < threshold:\n matching[i] = 1\n if sim_labels[i] == 0:\n false_pos = false_pos + 1\n if not class_id is None:\n if class_id[i] == 1:\n inter_class_errors += 1\n else:\n if sim_labels[i] == 1:\n false_neg = false_neg + 1\n \n precision = np.sum((matching == sim_labels.T))/len(sim_labels)\n tp = 0\n tn = 0\n for i in range(len(sim_labels)):\n if matching[i] == 1 and sim_labels[i] == 1:\n tp += 1\n elif matching[i] == 0 and sim_labels[i] == 0:\n tn += 1\n recall = tp/p\n tnr = tn/n\n fnr = 1 - recall\n fpr = false_pos/n\n \n return precision, false_pos, false_neg, recall, fnr, fpr, tnr#, inter_class_errors", "title": "" }, { "docid": "1c9f5b577dedc56e3005b971f6ce6db9", "score": "0.5170649", "text": "def results_equal(a, b):\n\n if a.v_is_parameter or b.v_is_parameter:\n raise ValueError('Both inputs are not results.')\n\n if a.v_is_parameter or b.v_is_parameter:\n return False\n\n if not a.v_name == b.v_name:\n return False\n\n if not a.v_location == b.v_location:\n return False\n\n if not a.v_full_name == b.v_full_name:\n return False\n\n akeyset = set(a._data.keys())\n bkeyset = set(b._data.keys())\n\n if akeyset != bkeyset:\n return False\n\n for key in a._data:\n val = a._data[key]\n bval = b._data[key]\n\n if not nested_equal(val, bval):\n return False\n\n return True", "title": "" }, { "docid": "637ce3d26ab6a0d7e1ff7902317e3c74", "score": "0.5160243", "text": "def test_2(self):\n # Set initial constraints\n # Set initial constraints\n constr = {\"interpolation\": {\"flag\": False}}\n\n params_spec, options_spec = generate_random_model(\n point_constr=constr, deterministic=True\n )\n baseline = None\n\n # Solve with and without interpolation code\n for _ in range(2):\n # Process and solve\n respy_obj = RespyCls(params_spec, options_spec)\n respy_obj = simulate_observed(respy_obj)\n\n # Extract class attributes\n states_number_period, periods_emax = dist_class_attributes(\n respy_obj, \"states_number_period\", \"periods_emax\"\n )\n\n # Store and check results\n if baseline is None:\n baseline = periods_emax\n else:\n np.testing.assert_array_almost_equal(baseline, periods_emax)\n\n # Updates for second iteration\n options_spec[\"interpolation\"][\"points\"] = max(states_number_period)\n options_spec[\"interpolation\"][\"flag\"] = True", "title": "" }, { "docid": "6c9741a5d2fe4e9f7769d315aa78e2ea", "score": "0.5134293", "text": "def test_ConditionalProbDistI():", "title": "" }, { "docid": "064367b2621f986da5fee6abfd2ed83f", "score": "0.5132953", "text": "def assert_equal(main_loop1, main_loop2, check_log=True):\n W1 = (main_loop1.model.get_top_bricks()[0].linear_transformations[0]\n .params[0].get_value())\n W2 = (main_loop2.model.get_top_bricks()[0].linear_transformations[0]\n .params[0].get_value())\n assert numpy.all(W1 == W2)\n if check_log:\n assert sorted(list(main_loop1.log)) == sorted(list(main_loop2.log))\n assert numpy.all(\n next(main_loop1.epoch_iterator)[\"numbers\"] ==\n next(main_loop2.epoch_iterator)[\"numbers\"])", "title": "" }, { "docid": "991ed489b05c26c61c3e23f5ec271114", "score": "0.5095589", "text": "def calculate_likelihood_diff(b_expected_outcomes, expected_outcomes):\n diff = np.subtract(expected_outcomes, b_expected_outcomes)\n total_diff = np.sum(np.absolute(diff))\n avg_diff = total_diff / len(expected_outcomes)\n return diff, total_diff, avg_diff", "title": "" }, { "docid": "0988e97a03a317d1a497049e8070171b", "score": "0.50942373", "text": "def test_two_layers(self):\n model = TwoLayerLinearModel().eval()\n qconfig_dict = {\n 'fc2': default_qconfig\n }\n model = prepare(model, qconfig_dict)\n\n self.checkNoPrepModules(model)\n self.checkObservers(model)\n self.checkNoPrepModules(model.fc1)\n self.checkHasPrepModules(model.fc2)\n\n test_only_eval_fn(model, self.calib_data)\n convert(model)\n\n def checkQuantized(model):\n self.checkNoPrepModules(model)\n self.checkNoPrepModules(model.fc1)\n self.checkHasPrepModules(model.fc2)\n self.assertEqual(type(model.fc1), torch.nn.Linear)\n self.checkQuantizedLinear(model.fc2)\n test_only_eval_fn(model, self.calib_data)\n\n checkQuantized(model)\n\n # test one line API\n model = quantize(TwoLayerLinearModel().eval(), test_only_eval_fn, self.calib_data, qconfig_dict)\n checkQuantized(model)", "title": "" }, { "docid": "c325d4e1164f9b713dd12494abcf7300", "score": "0.50915456", "text": "def is_ratio_of(self,other):\r\n if self.numerator == self.denominator and other.is_simplified:\r\n return self.numerator == other.numerator\r\n \r\n return False", "title": "" }, { "docid": "b1ac2372e460ad4a32b1e898f8f2c6ba", "score": "0.5089624", "text": "def match_outcome(model, manager_1, manager_2):\n match_weights = model.match_weights\n\n age_p = average_age_win_probability(manager_1, manager_2)\n age_weight = match_weights[1]\n\n market_p = market_value_win_probability(manager_1, manager_2)\n market_weight = match_weights[0]\n\n spi_p = spi_win_probability(manager_1, manager_2)\n spi_weight = match_weights[2]\n\n victory_p = ((market_p * market_weight) + (age_p * age_weight) + (spi_p * spi_weight)) / (market_weight + age_weight + spi_weight)\n draw = get_draw(victory_p)\n\n chance = random.uniform(0, 1)\n result = None\n if chance < draw:\n result = 2\n elif chance < victory_p:\n result = 0\n elif chance > victory_p:\n result = 1\n\n return result", "title": "" }, { "docid": "cbb5d07beac18bd60d6133673e5a6f2e", "score": "0.5087804", "text": "def testEqualsWithDifferentObject(self):\n other_example_file = \"dubrovnik-1-1-pre.txt\"\n other_result = SfmResult(\n gtsam.readBal(gtsam.findExampleDataFile(other_example_file)),\n total_reproj_error=1.1e1,\n )\n\n self.assertNotEqual(EXAMPLE_RESULT, other_result)", "title": "" }, { "docid": "257778d1482007e9558f6c1c65275a1d", "score": "0.50864565", "text": "def test_WittenBellProbDist():", "title": "" }, { "docid": "88f1065806c58a25750c0398357cbcc9", "score": "0.5082483", "text": "def run_mcnemar_tests(model1,model2,X_test,y_test):\n\n y_test = y_test.flatten()\n\n model1_name = \" \".join([x.capitalize() for x in model1.model.split(\"_\")])\n model2_name = \" \".join([x.capitalize() for x in model2.model.split(\"_\")])\n\n #get predictions for models\n model1_predictions=model1.learner.predict(X_test)\n model2_predictions=model2.learner.predict(X_test)\n \n #Values in 2x2 Contingency Table\n A = np.sum((model1_predictions == y_test) & (model2_predictions == y_test))\n B = np.sum((model1_predictions == y_test) & (model2_predictions != y_test))\n C = np.sum((model1_predictions != y_test) & (model2_predictions == y_test))\n D = np.sum((model1_predictions != y_test) & (model2_predictions != y_test))\n\n # Compute p-value\n if B > 50 and C > 50: # value of 50 from Raschka\n # Compute p-value from chi^2 score\n\n # Compute chi squared\n try:\n chi_sq=(abs(B - C) - 1.0)**2/(B+C)\n except:\n chi_sq=0.0\n \n p = chi2.sf(x=chi_sq, df=1)\n \n else:\n # Compute exact p-value from binomial test\n p = binom_test(x=[B, C], p=0.5)\n \n table = \"\\\\begin{table}[H]\\n\"\n table += \"\\t\\\\centering\\n\"\n table += (\"\\t\\\\caption{{McNemar test results for {} and {} (p = {:.3e})}}\\n\"\n .format(model1_name.lower(), model2_name.lower(), p))\n table += \"\\t\\\\begin{tabular}{|c||c|c|}\\n\"\n table += \"\\t\\t\\\\hline\\n\"\n table += \"\\t\\t& {0} Correct & {0} Incorrect \\\\\\\\ \\\\hline \\\\hline\\n\".format(model1_name)\n table += \"\\t\\t{} Correct & {:d} & {:d} \\\\\\\\ \\\\hline\\n\".format(model2_name, A, B)\n table += \"\\t\\t{} Incorrect& {:d} & {:d} \\\\\\\\ \\\\hline\\n\".format(model2_name, C, D)\n table += \"\\t\\end{tabular}\\n\"\n table += \"\\end{table}\\n\"\n\n print(table)", "title": "" }, { "docid": "bbfcd76cc691117f2a1f00c3ef5ab7bd", "score": "0.50769234", "text": "def test_ratio_13(self):\n case = self.data.loc[\n (self.data.school == 'Brigham Young University (Clark)') &\n (self.data.year == 2012),\n ].reset_index(drop=True)\n weights = np.array([73, 73, 73, 86, 91, 110, 130, 130, 0]) / 130\n expected = np.array([3, 24, 6, 6, 4, 5, 1, 5, 0])\n expected = np.dot(weights, expected) / sum(expected)\n print(\"Ratio\")\n print(case['ratio'][0])\n print(\"Expected\")\n print(expected)\n assert np.allclose(case['ratio'][0], expected)", "title": "" }, { "docid": "c9b39eb775230f4c22f2725254650adf", "score": "0.5075318", "text": "def _calc_dissimilarity(\n self, model_embedding: Any, real_embedding: Any, context: Any\n ) -> float:\n pass # pragma: no cover", "title": "" }, { "docid": "848d68c99f7661515c0e7ae375fca547", "score": "0.5066264", "text": "def check_probability_matching_other_agent(real_teacher, uncertainty=1.):\n max_model_size = 7\n all_models = generate_list_models(max_model_size)\n agent2 = pop.NetworkAgent(max_model_size)\n if real_teacher:\n agent1 = pop.NetworkAgent(max_model_size)\n else:\n agent1 = pop.ConfidenceTeacher(max_model_size, uncertainty)\n\n distances = []\n for i in range(3000):\n random_indices = np.random.randint(0, len(all_models),\n int(0.9*len(all_models))\n )\n # models are randomly picked rows of all_models\n models = all_models[random_indices]\n production = agent1.sample(models)\n agent2.learn(models, production)\n distances.append(check_agents_similarity(agent1, agent2, all_models))\n\n plt.scatter(range(len(distances)), distances)\n plt.show()", "title": "" }, { "docid": "852cc0e241fe018c0d2ba45b80023bb0", "score": "0.50655115", "text": "def test_models(num_sentences):\n\n print '### TEST MODELS ###'\n\n PERC_LEARN_RATE = 0.35\n PERC_EPOCHS = 1\n\n x_train, x_test, y_train, y_test, suppx, suppy = get_data(max_sentences=num_sentences)\n t, e = mle(x_train, y_train, suppx, suppy)\n\n models = [\n ('HMM', phi_hmm(suppx, suppy, e, t)),\n ('HMM Perceptron', phi_hmm(suppx, suppy, e, t)),\n ('Alternative', phi_alt(suppx, suppy))\n ]\n\n for model_name, (phi, w0) in models:\n print 'model: %s' % model_name,\n\n if model_name == 'HMM':\n w = w0\n elif model_name == 'HMM Perceptron':\n w = perceptron(x_train, y_train, suppx, suppy, phi, np.zeros(w0.shape) + 1e-12, PERC_LEARN_RATE, PERC_EPOCHS)[-1]\n else:\n w = perceptron(x_train, y_train, suppx, suppy, phi, w0, PERC_LEARN_RATE, PERC_EPOCHS)[-1]\n\n losses = []\n for x, y in zip(x_test, y_test):\n x_hat = viterbi2(y, suppx, suppy, phi, w)\n losses.append(loss(x, x_hat))\n\n print 'loss: %.5f' % np.mean(losses)", "title": "" }, { "docid": "f351fa7736c1bf4de192280d0d2764b1", "score": "0.50556", "text": "def _compareKernelImages(self, psf1, psf2):\n im1 = psf1.computeKernelImage()\n im2 = psf2.computeKernelImage()\n bboxIntersection = im1.getBBox()\n bboxIntersection.clip(im2.getBBox())\n im1Intersection = afwImage.ImageD(im1, bboxIntersection)\n im2Intersection = afwImage.ImageD(im2, bboxIntersection)\n scale1 = im1.getArray().sum() / im1Intersection.getArray().sum()\n scale2 = im2.getArray().sum() / im2Intersection.getArray().sum()\n im1Arr = scale1 * im1Intersection.getArray()\n im2Arr = scale2 * im2Intersection.getArray()\n self.assertTrue(np.allclose(im1Arr, im2Arr),\n \"kernel images %s, %s do not match\" % (im1Arr, im2Arr))", "title": "" }, { "docid": "379b47eaff6a31e3b38050d18c758c02", "score": "0.504611", "text": "def test_constraints_division_whether_equal(tng):\r\n constraints = gen_constraints(tng)\r\n\r\n constraints2 = constraints_for_links(tng)\r\n constraints2 = constraints_for_sender(tng) + constraints2\r\n constraints2 = constraints_for_receiver(tng) + constraints2\r\n constraints2 = constraints_for_path_ban(tng) + constraints2\r\n constraints2 = constraints_for_offloading(tng) + constraints2\r\n\r\n x = np.ones(2*len(s_list)**2, float)\r\n # for i in range(len(constraints)):\r\n # print(\"Constraint is:\", constraints[i])\r\n # print(\"Constraint %d's value is: %f\" % (i,constraints[i]['fun'](x)))\r\n \r\n # f = gen_objective(gen_args(tng))\r\n # print(\"Objective function:\", f(x))\r\n print(\"for constraints one:\")\r\n for c in constraints:\r\n print(c['fun'](x))\r\n print(\"for constraints two:\")\r\n for c in constraints2:\r\n print(c['fun'](x))", "title": "" }, { "docid": "029984aa5c986b1fb2a7716501f7a9b9", "score": "0.5045237", "text": "def test_harmonic_similarity_populated_2():\n assert metrics.harmonic_similarity(LEFT_POPULATED_2, RIGHT_POPULATED_2) == 0.333", "title": "" }, { "docid": "6219232da794d468ac3a6f2c7641b510", "score": "0.5038728", "text": "def test_overload_div_throws_error_if_masks_different():\n\n u1 = np.arange(1, 82).reshape((9, 9))\n u2 = np.arange(101, 182).reshape((9, 9))\n mask1 = np.random.randint(0, 2, (9, 9))\n\n dp1 = dense_predictor.DensePredictor(u1, u2, mask1)\n dp2 = dense_predictor.DensePredictor(u1, u2)\n with pytest.raises(ValueError):\n dp1 / dp2", "title": "" }, { "docid": "1b1f680a5f0f3dc20353681322840cf2", "score": "0.50379926", "text": "def __gt__(self, other):\n if self.__numerator * other.__denominator - other.__numerator * self.__denominator > 0:\n return True\n else:\n return False", "title": "" }, { "docid": "0c493d41031288862ff4e8634565fc0a", "score": "0.5037091", "text": "def _compare(self, other):\n mod = self.modifier.trade_value - other.modifier.trade_value\n lev = self.level - other.level\n avg = (lev + mod) / 2.0\n return avg", "title": "" }, { "docid": "14f47bbe5073e51b79bab00496e0f296", "score": "0.5031721", "text": "def compare_models(models, model_names, docs, docs_ids, performance=False):\r\n\r\n # output df\r\n cols = ['doc_id','doc_length', 'text']\r\n cols = cols + model_names\r\n if performance:\r\n for n in model_names:\r\n cols.append(n+ \"_runtime\")\r\n df_scores = pd.DataFrame(columns = cols)\r\n length, text, runtime = [], [], []\r\n for d in docs:\r\n length.append(len(d.split()))\r\n text.append(d[:15])\r\n df_scores['doc_id'] = docs_ids\r\n df_scores['doc_length'] = length\r\n df_scores['text'] = text\r\n\r\n # for each model calculate docs perplexity\r\n for i, completer in enumerate(models):\r\n # start = time()\r\n # print(score_perplexity(completer.model, completer.tokenizer, [text1]))\r\n # timing(\"--------- Scoring time {} -----------\".format(names[i]), start)\r\n df_scores[model_names[i]] = score_perplexity(completer.model, completer.tokenizer, docs)\r\n # if performance check is needed - calculate prediction runtime for doc's next phrase\r\n if performance:\r\n runtime = []\r\n for d in docs:\r\n # check runtime of full document next words predicion\r\n start = time()\r\n completions, delta, options = completer.predict(d)\r\n runtime.append(time()-start)\r\n df_scores[completer.model_type + \"_runtime\"] = runtime\r\n\r\n df_scores.to_csv(\"./logs/user_session/compare_models\"+\"_\"+today+\".csv\")\r\n return df_scores", "title": "" } ]
c80064b9608786be6f6d43da0b4d693c
Branching is not random. A branch should occur inside the method or to the beginning of another method. This method analyzes the branching address of a branch instruction and then provides a probability of the address being correct
[ { "docid": "1cd624279e6db30d049094eafcc8f170", "score": "0.7213883", "text": "def _compute_branch_address(self, inst, current_fn, lowest_addr, highest_addr):\n if inst.ignore or not inst.is_branch:\n return\n jmp_addr = inst.jumping_address\n\n if jmp_addr is None:\n # It might be possible that the address is computed dynamically,\n # therefore the jmp_address will be unknown\n inst.scores_by_rule['pbd'] = self._model.just_any_jump_is_valid * 2\n # Award high prob to addresses inside this method or to the begin of other methods\n elif jmp_addr >= current_fn and jmp_addr <= self._functions[current_fn][1]:\n inst.scores_by_rule['pbd'] = self._model.branch_to_this_method\n # Award medium-high prob to a branch to the start of other method\n elif jmp_addr in self._functions:\n inst.scores_by_rule['pbd'] = self._model.branch_to_other_method_start\n # Award low prob to addresses outside the program\n elif jmp_addr > highest_addr or jmp_addr < lowest_addr:\n inst.scores_by_rule['pbd'] = self._model.just_any_jump_is_valid\n # Award low to med prob to anithing else\n else:\n inst.scores_by_rule['pbd'] = self._model.just_any_jump_is_valid * 2", "title": "" } ]
[ { "docid": "1307af128f0a5c78af4461deb19951e6", "score": "0.6226275", "text": "def perform_always_branch(self, data, addr):\n\t\treturn None", "title": "" }, { "docid": "8a73668c32d2ea65e4e433a87fa449c8", "score": "0.5962285", "text": "def determine_next_step(self, branch: int, x: dict) -> int:", "title": "" }, { "docid": "3f4906ab5bd513f111f32587c95cd589", "score": "0.57664937", "text": "def _execute_branch(self) -> None:\n dest_addr = self._current_op.inputs[0].get_addr()\n if dest_addr.is_constant:\n expr = self.state.scratch.ins_addr\n self.state.scratch.statement_offset = dest_addr.offset + self._current_op.seq.uniq\n else:\n expr = dest_addr.offset\n\n self.successors.add_successor(\n self.state,\n expr,\n self.state.scratch.guard,\n \"Ijk_Boring\",\n exit_stmt_idx=DEFAULT_STATEMENT,\n exit_ins_addr=self.state.scratch.ins_addr,\n )\n\n self.state.scratch.exit_handled = True", "title": "" }, { "docid": "32c8dc0bb0d07452a4fc198e18ae8904", "score": "0.56417775", "text": "def test_bat_ref():\n for i in range(1, 10):\n d = 2**i-1\n a = bat_ref(d)\n b = int((d+1)*(log2(d+1)-1)+(d+1)/2)\n assert a==b, (d, a, b)", "title": "" }, { "docid": "dc1005ac4b9cfba3034c7d7c5ff92e2a", "score": "0.56216973", "text": "def makeBranchEval(first_branch):\n\n def score(game, player):\n if not first_branch:\n first_branch.append(game.root)\n if game.root in first_branch:\n return 1.\n return 0.\n\n return score", "title": "" }, { "docid": "4f768c7cba8a06809b0249bdb276ca62", "score": "0.5604568", "text": "def makeBranchEval(first_branch):\r\n\r\n def score(game, player):\r\n if not first_branch:\r\n first_branch.append(game.root)\r\n if game.root in first_branch:\r\n return 1.\r\n return 0.\r\n\r\n return score", "title": "" }, { "docid": "2a79d75514f0293de5967d3dfedec410", "score": "0.5510209", "text": "def branchlength2support(self): # -> None:\n ...", "title": "" }, { "docid": "c05465d1f590621f8d14301a69504bdc", "score": "0.54938525", "text": "def _execute_b(self, opcode):\n self.cpu.log(\"Jump to location nnn + V0\")\n self.cpu.pc = (opcode & 0x0fff) + self.cpu.Vx[0]", "title": "" }, { "docid": "31ff0046a7d7768fe251a60bde6cd8fe", "score": "0.5487819", "text": "def _compute_proper_cfg(self, inst, cpmd, addr):\n\n ###################################################\n # THE INSTRUCTION IS PLACED NEAR SAME CONDITIONAL\n ###################################################\n\n # Compute the probability of the previous instruction having equal conditinoal\n a = addr - 4\n prev_inst = self._program[a] if a in self._program else None\n # Get posterior instruction\n a = addr + 4\n post_inst = self._program[a] if a in self._program else None\n\n # The previous instruction has an score computed. We use that\n prev = 0\n # Previous instructions with the same conditional\n same_cond = 0\n # Instructions modifiying the flag register\n flag_mod = 0\n # Instructions both having the same conditional and modifying the registers\n union_count = 0\n # Posterior instructions with the same conditional\n after_same_cond = 0\n\n\n\n ln_prev = 0\n ln_post = 0\n\n if prev_inst:\n for i in prev_inst:\n if not i.ignore:\n ln_prev += 1\n a = i.conditional_field == inst.conditional_field\n b = AReg.CPSR in i.registers_written()\n if a:\n same_cond += 1\n if b:\n flag_mod += 1\n # COMPUTE THE UNION OF BOTH EVENTS\n if a and b:\n union_count += 1\n\n # Compute the probability of the following instruction having equal conditinal\n after = 0\n if post_inst:\n after_same_cond = 0\n for i in post_inst:\n if not i.ignore:\n ln_post += 1\n if i.conditional_field == inst.conditional_field:\n after_same_cond += 1\n\n all_conditional = 14\n p1, p2, p3 = 0, 0, 0\n result = 0\n if inst.conditional_field != all_conditional:\n if same_cond > 0 and flag_mod > 0:\n if after_same_cond > 0:\n p1 = (same_cond + flag_mod - union_count) * after_same_cond / (ln_prev * ln_post)\n p1 *= self._model.branch_after_cpsr_and_near_cond_are_equals\n\n p2 = (same_cond + flag_mod - union_count) / ln_prev\n p2 *= self._model.branch_after_cpsr_and_prev_cond_are_equals\n\n result = max(p1, p2)\n\n elif same_cond > 0:\n if after_same_cond > 0:\n p1 = same_cond * after_same_cond / (ln_prev * ln_post)\n p1 *= self._model.both_conditionals_are_equals\n\n p2 = same_cond / len(prev_inst)\n p2 *= self._model.prev_conditionals_are_equals\n result = max(p1, p2)\n\n elif flag_mod > 0:\n if after_same_cond > 0:\n p1 = flag_mod * after_same_cond / (ln_prev * ln_post)\n p1 *= self._model.branch_after_cpsr_and_after_cond_are_equals\n\n p2 = flag_mod / ln_prev\n p2 *= self._model.branch_after_cpsr\n result = max(p1, p2)\n else:\n if flag_mod > 0:\n p3 = flag_mod / ln_prev\n p3 *= 1 - self._model.branch_after_cpsr\n result = p3\n elif same_cond > 0:\n if after_same_cond > 0:\n p1 = same_cond * after_same_cond / (ln_prev * ln_post)\n p1 *= self._model.both_conditionals_are_equals\n p2 = same_cond / len(prev_inst)\n p2 *= self._model.prev_conditionals_are_equals\n result = max(p1, p2)\n elif after_same_cond > 0:\n p1 = after_same_cond / ln_post\n p1 *= self._model.prev_conditionals_are_equals\n result = p1\n\n if result == 0:\n result = self._model.low_probability\n elif result > 1:\n raise RuntimeError('Invalid probability')\n\n inst.scores_by_rule['pcfg'] = result", "title": "" }, { "docid": "361e65989bf9cdbb2a301f85f822d272", "score": "0.54786503", "text": "def check_balanced():\n\n pass", "title": "" }, { "docid": "5e02221c25f52b593c6bc68dbcebb6fb", "score": "0.547078", "text": "def get_resulting_branch_fitness(self) -> float:\n return self._approach_level + ff.normalise(self._branch_distance)", "title": "" }, { "docid": "d66873dbcda1ba71fc73d3af2b1e3811", "score": "0.54619896", "text": "def __init__(self, reg = [0] * 8, ram = [0] * 256, pc = 0):\n self.reg = reg\n self.ram = ram\n self.pc = pc\n self.sp = self.reg[7]\n self.fl = [0] * 8\n self.running = True \n\n #branchtable\n self.branchtable = {\n HLT: self.hlt,\n LDI: self.ldi,\n PRN: self.prn,\n MUL: self.mul,\n ADD: self.add,\n PUSH: self.push,\n POP: self.pop,\n CALL: self.call,\n RET: self.ret,\n CMP: self.compare,\n JMP: self.jmp,\n JEQ: self.jeq,\n JNE: self.jne\n }", "title": "" }, { "docid": "6d7ccc25901efae71db0fb511261f15a", "score": "0.5452825", "text": "def Branches():\n\n global Asm\n\n opcode = dec.Asm.Instructions[dec.Asm.Mnemonic][1]\n dec.Asm.Timing = dec.Asm.Instructions[dec.Asm.Mnemonic][2]\n\n dest = assem.EvalExpr()[0]\n\n offset = (dest - dec.Asm.BOL_Address - 2) // 2\n\n if opcode == 0xD000 or opcode == 0xD800:\n # BRA and CALL, use an 11-bit offset\n minoffset = -0x400\n maxoffset = 0x3FF\n mask = 0x7FF\n else:\n # Others use an 8-bit offset\n minoffset = -0x80\n maxoffset = 0x7F\n mask = 0xFF\n\n if dec.Asm.Pass == 2:\n # Do the range check only during pass 2\n if dest % 2 != 0:\n # Oops, the destination must be an even address\n errors.DoError('range', False)\n if offset < minoffset or offset > maxoffset:\n # Oops, out of range\n errors.DoError('range', False)\n\n CodeWord(opcode + (offset & mask))\n\n NoMore()", "title": "" }, { "docid": "99dda71c29c964224d23fc457ae8f3a1", "score": "0.54254055", "text": "def trace_fn_randomwalk(self, _, pkr):\n results = pkr.inner_results.inner_results\n return (\n results.accepted_results.target_log_prob,\n ~(results.log_accept_ratio > -1000.0),\n results.is_accepted,\n )", "title": "" }, { "docid": "1e69497327c39fde8507b3c284ba5da0", "score": "0.54081976", "text": "def _execute_branchind(self) -> None:\n expr = self._get_value(self._current_op.inputs[0])\n\n self.successors.add_successor(\n self.state,\n expr,\n self.state.scratch.guard,\n \"Ijk_Boring\",\n exit_stmt_idx=DEFAULT_STATEMENT,\n exit_ins_addr=self.state.scratch.ins_addr,\n )\n\n self.state.scratch.exit_handled = True", "title": "" }, { "docid": "7aedf5bfeab99cfe699bc83f740f6793", "score": "0.53953665", "text": "def make_decision(probability):\n return random() < probability", "title": "" }, { "docid": "83cb32278e1a5060068512d53e04cccd", "score": "0.5382021", "text": "def sim_point(gamestate, vars):\n r = random.random()\n if gamestate['server'] =='a':\n return r <= vars.a_hold_pct\n else:\n return r >= vars.b_hold_pct", "title": "" }, { "docid": "eba904d738fd8dbcfd7a8489c531cabf", "score": "0.5338927", "text": "def always_branch(self, data, addr):\n\t\tdata = str(data)\n\t\tbuf = (ctypes.c_ubyte * len(data))()\n\t\tctypes.memmove(buf, data, len(data))\n\t\tif not core.BNArchitectureAlwaysBranch(self.handle, buf, addr, len(data)):\n\t\t\treturn None\n\t\tresult = ctypes.create_string_buffer(len(data))\n\t\tctypes.memmove(result, buf, len(data))\n\t\treturn result.raw", "title": "" }, { "docid": "3900a891a98240b30a42c0f566fab13f", "score": "0.5321005", "text": "def pseudo_code():\n pass", "title": "" }, { "docid": "ed0c4034746ceaac322507b8e9edb7f9", "score": "0.5305507", "text": "def HeuristicBP(self, state):\n bp = 0\n for i in range(self.pancake_num - 1):\n if abs(state[i] - state[i+1]) != 1:\n bp += 1\n if state[self.pancake_num - 1] != self.pancake_num:\n bp += 1\n return bp", "title": "" }, { "docid": "bc04d50feb0bedcebc2e6ab8b7883da8", "score": "0.529164", "text": "def event_m20_24_x50(z80=20243000):\r\n \"\"\"State 0,1: [Lib] [BEST] [Reproduction] Phantom management of Andy _SubState\"\"\"\r\n call = event_m20_24_x47(z80=z80)\r\n if call.Get() == 1:\r\n pass\r\n elif call.Get() == 0:\r\n \"\"\"State 3: [Lib] [BEST] [Condition] Phantom management of Andy_SubState\"\"\"\r\n call = event_m20_24_x48(z80=z80)\r\n if call.Get() == 1:\r\n pass\r\n elif call.Get() == 0:\r\n \"\"\"State 2: [Lib] [BEST] [Execution] Phantom management of Andy_SubState\"\"\"\r\n assert event_m20_24_x49(z80=z80)\r\n \"\"\"State 5: Rerun\"\"\"\r\n return 1\r\n \"\"\"State 4: Finish\"\"\"\r\n return 0", "title": "" }, { "docid": "47e8ffc1a6ebc6651519637dafb6e39f", "score": "0.52803355", "text": "def find_branch_points(img):\n kernel = numpy.array([[0,1,0],[1,0,1],[0,1,0]])\n img=numpy.asarray(img,numpy.uint8)\n branch_points = img*ndimage.convolve(img,kernel,mode='constant')\n return (branch_points >= 3)", "title": "" }, { "docid": "1e3856a5123fe565b42bce3b76e98ecc", "score": "0.5263602", "text": "def test_part_2b_probability(self):\n state_sequence, probability = self.part_2b_answer\n\n msg = ('incorrect probability. received {}, but expected {}'\n '').format(round(probability, 6), self.correct_probability)\n# print \"my probability\", probability\n self.assertTrue(round(probability, 6) == self.correct_probability, msg)", "title": "" }, { "docid": "534a28ab84aac425a0e4a39d7af00688", "score": "0.52608055", "text": "def test_ball_probability(balls, result):\n from ball_bags_and import ball_probability\n assert ball_probability(balls) == result", "title": "" }, { "docid": "8f2c0dc8c7bc5d8a17a4d4c3009690f3", "score": "0.52488846", "text": "def predictable(self, branch, parent):\n y = branch.y\n X = branch.X\n weights = branch.weights\n if len(set(y)) == 1: # branch is pure\n branch.predict = y[0]\n return True\n elif X.shape[0] == 0: # no sample\n # create a node here, and predict using parent's\n # majority vote\n y_parent = parent.y\n weights_parent = parent.weights\n branch.predict = self.majority_vote(y_parent, weights_parent)\n return True\n elif branch.level == self.max_depth or len(branch.possible_features) == 0:\n # the tree reaches max depth or no remaining feature\n # predict using majority vote\n branch.predict = self.majority_vote(y, weights)\n return True\n else:\n return False", "title": "" }, { "docid": "19b67ae604dba7df7bf23446daaa41cc", "score": "0.52346766", "text": "def _jump_to_branch_loc(self):\n if any(self.branch_point):\n bps = [i for i, point in enumerate(self.branch_point[:]) if\n not point[1]]\n if any(bps):\n idx = max(bps)\n coord = self.branch_point[idx][0]\n self.set_viewer_loc(coord)\n else:\n msg = 'no branch points found'\n self.upd_msg(msg)\n else:\n msg = 'no branch point found'\n self.upd_msg(msg)", "title": "" }, { "docid": "bd718c3c2168dfbc21b99d8e190f1cc7", "score": "0.52291393", "text": "def callp(self,pred):\n self.state.pc += 1 #return to following instruction\n self.state.calls.append(Interpreter.CallStackFrame(self.state))\n self.state.jumpTo = pred\n self.state.pc = -1", "title": "" }, { "docid": "1aecc4c209295d9c813d906d12e9f7f3", "score": "0.52265984", "text": "def lemur(branches):\n\n assert branches[0] == 0, \"First branch must be alive\"\n assert branches[-1] == 0, \"Last branch must be alive\"\n\n # given a bunch of 0s and 1s, i have to return the num of jumps it takes the lemur\n # to reach the last branch (last 0)\n # she can only hop a distance of two branches max\n # 0 - 0 - 1 - 0 - 0 - 1 - 0\n # >>> 4\n # in a simpler problem, if there were ONLY 0s, the max num is half the 0s\n # 0 - 0 - 0 - 0\n # >>> 2\n # if i add the 1s, how does the logic change?\n # 0 - 1 - 0 - 1 - 0 - 1 - 0\n # >>> 3\n # HINT: there will never be two 1s in a row\n # one-to-three dead branches have the impact of one additional hop\n # every three dead branches should add one hop\n # 0 - 1 - 0 - 0 - 1 - 0 - 0 - 1 - 0 - 1 - 0\n # >>> 6\n # 0 - 0 - 0 - 0 - 0 - 0 - 0\n # >>> 3\n # in this case, though, the 1s doubled the hops BECAUSE of their placement\n # the lemur can't hop two at a time because it has to scoot over one and then bound over the branch\n # if the pattern goes 0, 1, 0, 1 the dead branches add one hop\n # if the pattern goes 0, 1, 0, 0, 1, the dead branches double the hops\n # How do I check for both cases?\n # HINT: Involves loops and lists.\n\n if len(branches) < 2: # If she's already where she needs to be, return zero jumps\n return 0\n \n # Let's handle our simplest case where no dead branches are involved\n if 1 not in set(branches): # I'm looking in a set because that's O(1)\n return len(branches) / 2", "title": "" }, { "docid": "6bbd3bfcbc95094d0f6d5946658db188", "score": "0.5205197", "text": "def perform_invert_branch(self, data, addr):\n\t\treturn None", "title": "" }, { "docid": "d1414150ae3c8ef2456bfcd25e4d60fc", "score": "0.51767564", "text": "def test_jmp(self):\n for i in range(NUM_TESTS):\n intel_machine.re_init()\n label_addr = random.randint(FIRST_INST_ADDRESS, MAX_INSTRUCTIONS)\n intel_machine.labels[\"test_label\"] = label_addr\n assemble(\"jmp test_label\", intel_machine)\n self.assertEqual(intel_machine.get_ip(), label_addr)", "title": "" }, { "docid": "9806008b19860e9f1aaf4c81f8a54f67", "score": "0.51750594", "text": "def perform_is_always_branch_patch_available(self, data, addr):\n\t\treturn False", "title": "" }, { "docid": "6ce20a8c1fcb383a391fe8c8096b61ff", "score": "0.5166487", "text": "def __init__(self):\n self.running = True\n self.ram = [0] * 256\n self.reg = [0] * 8\n self.reg[7] = 0xF4\n self.pc = 0\n self.ir = 0\n self.fl = 0b00000000\n \n self.branchtable = {}\n self.branchtable[0b00000001] = self.HLT \n self.branchtable[0b10000010] = self.LDI \n self.branchtable[0b01000111] = self.PRN\n self.branchtable[0b10100000] = self.ADD\n self.branchtable[0b10100010] = self.MUL \n self.branchtable[0b01000101] = self.PUSH \n self.branchtable[0b01000110] = self.POP\n self.branchtable[0b01010000] = self.CALL\n self.branchtable[0b00010001] = self.RET\n self.branchtable[0b10100111] = self.CMP\n self.branchtable[0b01010100] = self.JMP\n self.branchtable[0b01010101] = self.JEQ\n self.branchtable[0b01010110] = self.JNE", "title": "" }, { "docid": "ea0a1a0f57a1aa68d29791d6a64ea4a8", "score": "0.5154985", "text": "def fib_reserve_lookup(cond):\n return -1", "title": "" }, { "docid": "dad4c7a51c83e33b29dad92d987e58be", "score": "0.512656", "text": "def branch_to(self, state):\n raise NotImplementedError()", "title": "" }, { "docid": "63424e764b37a947884becd6b5b398f7", "score": "0.512499", "text": "def berry_finder(t):\n if label(t) == \"berry\":\n return True\n for b in branches(t):\n if berry_finder(b):\n return True\n return False", "title": "" }, { "docid": "a404ece068fdb1127094afbb023ff3e6", "score": "0.5114816", "text": "def test_call(self):\n for i in range(NUM_TESTS):\n intel_machine.re_init()\n call_instr_addr = random.randint(FIRST_INST_ADDRESS,\n MAX_INSTRUCTIONS)\n label_addr = random.randint(FIRST_INST_ADDRESS,\n MAX_INSTRUCTIONS)\n\n code_to_run = [NO_OP] * (MAX_INSTRUCTIONS + 1)\n code_to_run[call_instr_addr] = \"call \" + TEST_LABEL + \"\\n\"\n prev_label_info = code_to_run[label_addr]\n code_to_run[label_addr] = TEST_LABEL + \": \" + prev_label_info\n\n intel_machine.labels[TEST_LABEL] = label_addr\n intel_machine.set_ip(call_instr_addr)\n\n # We step once through the code, executing only `call`.\n assemble(\"\".join(code_to_run), intel_machine, step=True)\n\n self.assertEqual(intel_machine.get_ip(), label_addr)", "title": "" }, { "docid": "ea589ed5037529d483575e91e567b715", "score": "0.5112417", "text": "def branch_for_instance(self, x: dict) -> int:", "title": "" }, { "docid": "f98ada7da1bb845a0d37f040a2884ba6", "score": "0.5110153", "text": "def lemur(branches):\n\n assert branches[0] == 0, \"First branch must be alive\"\n assert branches[-1] == 0, \"Last branch must be alive\"\n\n if len(branches) == 1:\n return 0\n\n if len(branches) <= 3:\n return 1\n\n min_jumps_to_location = [0 if x == 0 else float('inf') for x in branches]\n if min_jumps_to_location[1] == 0:\n min_jumps_to_location[1] = 1\n\n for i in xrange(2, len(branches)):\n if branches[i] == 0:\n min_jumps_to_location[i] = 1 + min(min_jumps_to_location[i-2],\n min_jumps_to_location[i-1])\n\n return min_jumps_to_location[-1]", "title": "" }, { "docid": "fdae808cdfd06c4cd88354a9aebf2992", "score": "0.5106651", "text": "def test_houses_visited(instructions, total_houses):\n assert houses_visited(instructions) == total_houses", "title": "" }, { "docid": "ea0857247c5240343a7246fb86a1ac43", "score": "0.5089158", "text": "def cpu_jump_to_subroutine(self):\n self.cpu_memory[self.cpu_registers['sp']] = self.cpu_registers['pc'] & ADDRESS_2\n self.cpu_registers['sp'] += 1\n self.cpu_memory[self.cpu_registers['sp']] = (self.cpu_registers['pc'] & ADDRESS_14) >> 8\n self.cpu_registers['sp'] += 1\n self.cpu_registers['pc'] = self.cpu_operand & ADDRESS_13", "title": "" }, { "docid": "cb6ba8d09cf6ad0a36df364f7715f2f0", "score": "0.50851697", "text": "def decision(self):\n\t\trand = random.random()\n\t\tprint rand\n\t\treturn rand < self.stickProb", "title": "" }, { "docid": "0fbee87b9deeff608389e33d2b13c19c", "score": "0.507741", "text": "def _execute_cbranch(self) -> None:\n exit_state = self.state.copy()\n cond = self._get_value(self._current_op.inputs[1])\n dest_addr = self._current_op.inputs[0].get_addr()\n\n if dest_addr.is_constant:\n expr = exit_state.scratch.ins_addr\n exit_state.scratch.statement_offset = dest_addr.offset + self._current_op.seq.uniq\n else:\n expr = dest_addr.offset\n\n self.successors.add_successor(\n exit_state,\n expr,\n cond != 0,\n \"Ijk_Boring\",\n exit_stmt_idx=DEFAULT_STATEMENT,\n exit_ins_addr=self.state.scratch.ins_addr,\n )\n\n cont_state = self.state\n cont_condition = cond == 0\n cont_state.add_constraints(cont_condition)\n cont_state.scratch.guard = claripy.And(cont_state.scratch.guard, cont_condition)", "title": "" }, { "docid": "d9a6072c69a22730cb6a5b088d7a9516", "score": "0.50736094", "text": "def _branch(self, int_res):\n btype = int_res[0]\n if btype == Node.FRACTIONAL_TEST_ON_VEHICLE:\n # create two branches\n constr1 = BranchConstr(int_res[1], None, int_res[-1], BranchConstr.TYPE_TEST_ONE_VEHICLE,\n BranchConstr.FIX_TO_ONE)\n constr2 = BranchConstr(int_res[1], None, int_res[-1], BranchConstr.TYPE_TEST_ONE_VEHICLE,\n BranchConstr.FIX_TO_ZERO)\n elif btype == Node.FRACTIONAL_TEST_PAIR:\n constr1 = BranchConstr(int_res[1], int_res[2], None, BranchConstr.TYPE_TEST_PAIR_TOGETHER,\n BranchConstr.FIX_TO_ONE)\n constr2 = BranchConstr(int_res[1], int_res[2], None, BranchConstr.TYPE_TEST_PAIR_TOGETHER,\n BranchConstr.FIX_TO_ZERO)\n elif btype == Node.FRACTIONAL_TEST_ORDER_PAIR_ON_VEHICLE:\n constr1 = BranchConstr(int_res[1], int_res[2], int_res[-1], BranchConstr.TYPE_TEST_PAIR_ORDER_ON_VEHICLE,\n BranchConstr.FIX_TO_ONE)\n constr2 = BranchConstr(int_res[1], int_res[2], int_res[-1], BranchConstr.TYPE_TEST_PAIR_ORDER_ON_VEHICLE,\n BranchConstr.FIX_TO_ZERO)\n else:\n print \"Integrality result btype error\"\n constr1 = None\n constr2 = None\n\n # two branches created\n\n constr_list1 = self._branch_constr_list[:]\n constr_list2 = self._branch_constr_list[:]\n constr_list1.append(constr1)\n constr_list2.append(constr2)\n node1 = Node(self._col_set, constr_list1, self._bp_solver)\n node2 = Node(self._col_set, constr_list2, self._bp_solver)\n\n return node1, node2", "title": "" }, { "docid": "5da5ed1be403eeb91dc30df6770824bd", "score": "0.50697196", "text": "def test_brute():\n hc.reset()\n hc.set_my_ios(hash_file='tests/hashes/4dig.txt',\n masks_file='tests/masks/4dig.hcmask',\n outfile='tests/crk_bf.txt')\n hc.brute_force(increment=True)", "title": "" }, { "docid": "310403b4d89eec60b21dab1a63e1651d", "score": "0.50607795", "text": "def lemur(branches):\n\n jumps = 0\n\n # if only one branch no jumps needed\n if len(branches) == 1:\n return jumps\n\n # if 3 or fewer branches will never need more than 1 jump\n elif len(branches) <= 3:\n jumps += 1\n return jumps\n\n # when only 3 or fewer branches left will have number of jumps to return\n while len(branches) >= 3:\n\n jumps += 1\n\n # if the branch at furthest possible distance is not dead - jump to that\n if branches[2] != 1:\n branches = branches[2:]\n\n # if the branch at furthest possible distance is dead - jump to the prev\n else:\n branches = branches[1:]\n\n return jumps", "title": "" }, { "docid": "3a8edf7560a817557dfbd73b0bc9b53c", "score": "0.50601184", "text": "def bt(p0,p1,bs):\n if numba_isclose((p1-p0),0.0,abs_tol=0.001):\n return bs[-1]\n else: return np.abs(p1-p0)/(p1-p0)", "title": "" }, { "docid": "7736f901155ec6ae8c5bb18b872aa89c", "score": "0.5046352", "text": "def func_aux_b(n, p):\n cont = 0\n for i in range(1000):\n G, G2 = random_graph(n, p)\n v1, v2, v3, v4, v5 = graph_stats(G2) #v4 es la metrica de bridges del grafo\n cont += v4\n ans = cont / 1000\n return ans", "title": "" }, { "docid": "1a3bd7b6fa13f3f249c7e4ba16ca7285", "score": "0.5030071", "text": "def perform_is_never_branch_patch_available(self, data, addr):\n\t\treturn False", "title": "" }, { "docid": "5389ad5b2dd12c0c138d59f53fcf67a2", "score": "0.50108886", "text": "def evaluate_state(self):", "title": "" }, { "docid": "49a372842084a5cc7b98c72c2c0e3632", "score": "0.50097007", "text": "def perform_is_invert_branch_patch_available(self, data, addr):\n\t\treturn False", "title": "" }, { "docid": "216d9f851c63b24b5da53a8d3c4032a7", "score": "0.49931443", "text": "def test_fibonacci_case_17():\n a = 17\n actual = 1597\n assert fibonacci(a) == actual", "title": "" }, { "docid": "1abce4b57b8ddfa0ff984cd0b1e27bdb", "score": "0.4988724", "text": "def randomness_reduction(self, entropy_src, entropy_branch):\n return (round(entropy_src - entropy_branch, 3))", "title": "" }, { "docid": "d6d4c1eec2f0aaa11e2bf886f488c5a5", "score": "0.4984068", "text": "def cpu_jump_to_address(self):\n self.cpu_registers['pc'] = self.cpu_operand & ADDRESS_13", "title": "" }, { "docid": "dc32b4d339a0f0729886b2d4a516cc99", "score": "0.49778956", "text": "def test_fibonacci_case_1():\n a = 1\n actual = 1\n assert fibonacci(a) == actual", "title": "" }, { "docid": "e5c82428a4fd725de6316c93378cd030", "score": "0.49774992", "text": "def __call__(self, prediction, reference):\n\n def length(nodes):\n return np.sum([\n self.graph.edges[edge].get(self.weight, 1.0)\n for edge in zip(nodes[:-1], nodes[1:])\n ])\n\n coverage = np.mean([\n np.exp(-np.min([ # pylint: disable=g-complex-comprehension\n self.distance[u][v] for v in prediction\n ]) / self.threshold) for u in reference\n ])\n expected = coverage * length(reference)\n score = expected / (expected + np.abs(expected - length(prediction)))\n return coverage * score", "title": "" }, { "docid": "8aaff73eedb4f02ed147109bd84e1018", "score": "0.4975684", "text": "def _op_prob_and_check_equal_registers(self, opcode, addr, push_reg_readed):\n reg_equals_inst = False\n c, t = 0, 0\n for i in self._program[addr]:\n if not i.ignore:\n t += 1\n # Include the register information\n # i.e. we are looking for a pop with equal registers\n if i.is_a(opcode):\n c += 1\n if not reg_equals_inst:\n a = True\n pop_reg_read = i.registers_written()\n for r in pop_reg_read:\n if not (r in push_reg_readed or r is AReg.PC):\n a = False\n break\n for r in push_reg_readed:\n if not (r in pop_reg_read or r is AReg.LR or r is AReg.SP):\n a = False\n break\n if a:\n reg_equals_inst = True\n p1 = c / t\n return p1, reg_equals_inst", "title": "" }, { "docid": "7ed16e0ee16425632e1defb0f237191e", "score": "0.49676925", "text": "def test_find_bouncy_percentage(self):\n pct_50, _, _ = find_bouncy_percentage(50)\n\n self.assertEqual(pct_50, 538)\n\n pct_90, _, _ = find_bouncy_percentage(90)\n\n self.assertEqual(pct_90, 538)", "title": "" }, { "docid": "2e65d096fc4ff535dc99555880c78e40", "score": "0.4967685", "text": "def follow_branch(node, branch):\n count = 0\n if branch:\n matches = [child for child in node.children if match_node_pattern(child, branch[0])]\n if not matches:\n return(0)\n\n count += sum(follow_branch(match, branch[1:]) for match in matches)\n return(count)\n else:\n return(1)", "title": "" }, { "docid": "a5d85447e2f21172670f3257ac575e79", "score": "0.49663544", "text": "def simulate_match(pl1, pl2):\n score1 = score2 = 0\n leading = 1\n for i in range(30):\n if leading == 1:\n if (random() < pl1) & (random() > pl2):\n score1 += 1\n elif (random() < pl1) & (random() < pl2): #chaos 50%\n if random() < 0.5:\n score1 += 1\n else:\n leading = 2\n else:\n if (random() < pl2) & (random() > pl1):\n score2 += 1\n elif (random() < pl2) & (random() < pl1): #chaos 50%\n if random() < 0.5:\n score2 += 1 \n else: leading = 1\n if score1 > score2:\n return 1\n else:\n return 2", "title": "" }, { "docid": "85965b29fda412fdb92f153839aab14c", "score": "0.4961949", "text": "def prob_dealer_bust(showing):\n case_1 = (shoe[0] / cards_left) * dealer_cards_for_bust(showing + 1) / cards_left\n case_2 = (shoe[1] / cards_left) * dealer_cards_for_bust(showing + 2) / cards_left\n case_3 = (shoe[2] / cards_left) * dealer_cards_for_bust(showing + 3) / cards_left\n case_4 = (shoe[3] / cards_left) * dealer_cards_for_bust(showing + 4) / cards_left\n case_5 = (shoe[4] / cards_left) * dealer_cards_for_bust(showing + 5) / cards_left\n case_6 = (shoe[5] / cards_left) * dealer_cards_for_bust(showing + 6) / cards_left\n case_7 = (shoe[6] / cards_left) * dealer_cards_for_bust(showing + 7) / cards_left\n case_8 = (shoe[7] / cards_left) * dealer_cards_for_bust(showing + 8) / cards_left\n case_9 = (shoe[8] / cards_left) * dealer_cards_for_bust(showing + 9) / cards_left\n case_10 = (shoe[9] / cards_left) * dealer_cards_for_bust(showing + 10) / cards_left\n #prob that the dealer will bust on the next card\n bust_on_next = case_1 + case_2 + case_3 + case_4 + case_5 + case_6 + case_7 + case_8 + case_9 + case_10\n return bust_on_next", "title": "" }, { "docid": "50ab89a7d4ec3c565c50de4adcd75bbb", "score": "0.4959219", "text": "def bf_resolve_jump(state):\n # CCall won't give us the addr of the current instruction, so we have to figure that out. Ugh.\n real_ip = state.se.any_int(state.ip)\n offset = 0\n while True:\n inst = chr(state.mem_concrete(real_ip + offset, 1))\n if inst == \"]\" or inst == '[':\n addr = real_ip + offset\n break\n offset += 1\n # We don't have a persistent place to compute the jump table, and because brackets can be nested, we must construct\n # the full table each time instead of doing a scan back/forward.\n # Some day, if we ever get a nice place to put this, this should only be computed once.\n jtable = _build_jump_table(state)\n real_ip = state.se.any_int(addr)\n try:\n return (claripy.BVV(jtable[real_ip], 64), [])\n except KeyError:\n raise ValueError(\"There is no entry in the jump table at \" + repr(real_ip))", "title": "" }, { "docid": "cf714f98b1620bb00e9e056ad90cae41", "score": "0.49571583", "text": "def _jumpThreshold(self): # DEBUGGING\n return (self.instructionRamData >> 10) & (2 ** 16 - 1)", "title": "" }, { "docid": "fd03f5652416371522ad4bf68c13e470", "score": "0.49533373", "text": "def test_fibonacci_case_0():\n a = 0\n actual = 0\n assert fibonacci(a) == actual", "title": "" }, { "docid": "f940fed9c29f4d68c324a586487c3752", "score": "0.49448133", "text": "def test_fibonacci(self):\n self.assertEqual(samtestcode.fibonacci(9),34)", "title": "" }, { "docid": "91910f5b2f9799cb046f12be5bd22e99", "score": "0.4943745", "text": "def test_viterbi_probability(self):\n state_sequence, probability = self.part_1b_answer\n\n msg = ('incorrect probability. received {}, but expected {}'\n '').format(round(probability, 6), self.correct_probability)\n print \"my\", probability, \"true\", self.correct_probability\n self.assertTrue(round(probability, 6) == self.correct_probability, msg)", "title": "" }, { "docid": "459b4ae4bb9925ce88eaa47966eba06f", "score": "0.4942266", "text": "def ballHit(state):\n paddle_xpos = state[TOP_PADDLE_ROW, SCREEN_L:SCREEN_R, 0]\n paddlex = next((i for i, x in enumerate(paddle_xpos) if x), MIDDLE_X)\n\n ball_xpos = np.sum(state[BOTTOM_BLOCK_ROW:TOP_PADDLE_ROW, SCREEN_L:SCREEN_R, 0], axis=0)\n ballx = next((i for i, x in enumerate(ball_xpos) if x != 0), MIDDLE_X)\n\n ball_ypos = np.sum(state[BOTTOM_BLOCK_ROW:TOP_PADDLE_ROW, SCREEN_L:SCREEN_R, 0], axis=1)\n # unlike featureExtractor, add BOTTOM_BLOCK_ROW because bally otherwise offset relative to TOP_PADDLE_ROW\n # (don't need to do this for ballx and paddlex because they're in the same frame of reference (only black space considered))\n bally = next((i for i, x in enumerate(ball_ypos) if x != 0), MIDDLE_Y) + BOTTOM_BLOCK_ROW\n\n ##reward if exact hit: ballx matches paddlex and bally matches paddle y\n if ballx - paddlex < PADDLE_LEN and ballx - paddlex > 0 and abs(bally - TOP_PADDLE_ROW) < 10 :\n ## alternatively, reward if ball really close to hitting paddle\n #if abs(ballx - paddlex) < 20 and abs(bally - TOP_PADDLE_ROW) < 15 :\n return True\n return False", "title": "" }, { "docid": "d609f39f812103ac18510a8bc08c830c", "score": "0.49418616", "text": "def probability(self, state, action):\n pass", "title": "" }, { "docid": "64ff64f91531acaaea2d7d4239f2f82c", "score": "0.49403083", "text": "def jumpahead(self, n):\n pass", "title": "" }, { "docid": "64ff64f91531acaaea2d7d4239f2f82c", "score": "0.49403083", "text": "def jumpahead(self, n):\n pass", "title": "" }, { "docid": "a29f217b27b6cf08d02d65e12f451461", "score": "0.49375582", "text": "def decide(self, func_addr):\n pass", "title": "" }, { "docid": "177cfc886e269781026b6db73c58b60b", "score": "0.49363175", "text": "def describe_condition_for_branch(self, branch: int, shorten=False) -> str:", "title": "" }, { "docid": "2d4620fd830288937c544c6fcba18c24", "score": "0.49282894", "text": "def __init__(self):\n self.register = [0] * 8\n self.memory = [0] * 256 #256 bytes of memory\n self.pc = 0 # Program Counter, address of the currently executing instruction\n self.fl = 0 # Flag\n self.branchtable = {}\n self.branchtable[HLT] = self.handle_hlt\n self.branchtable[LDI] = self.handle_ldi\n self.branchtable[PRN] = self.handle_prn\n self.branchtable[MUL] = self.handle_mul\n self.branchtable[ADD] = self.handle_add\n self.branchtable[CMP] = self.handle_cmp\n \n self.branchtable[PUSH] = self.push\n self.branchtable[POP] = self.pop\n self.branchtable[CALL] = self.call\n self.branchtable[RET] = self.ret\n self.branchtable[JMP] = self.jmp\n self.branchtable[JNE] = self.jne\n self.branchtable[JEQ] = self.jeq\n\n self.sp = 7\n self.register[self.sp] = 0xf4 # initialize stack pointer to empty stack", "title": "" }, { "docid": "e9a8380467210aa4247bbacbbbc17958", "score": "0.4926496", "text": "def test_jne(self):\n for i in range(NUM_TESTS):\n intel_machine.re_init()\n label_addr = random.randint(FIRST_INST_ADDRESS, MAX_INSTRUCTIONS)\n intel_machine.labels[\"test_label\"] = label_addr\n zero_flag = random.getrandbits(1)\n intel_machine.flags[\"ZF\"] = zero_flag\n assemble(\"jne test_label\", intel_machine)\n if(not zero_flag):\n self.assertEqual(intel_machine.get_ip(), label_addr)\n else:\n self.assertEqual(intel_machine.get_ip(), 1)", "title": "" }, { "docid": "8ed2ce8854288e37dfe53b477cdb2cf8", "score": "0.4920139", "text": "def a_star_heuristic(state):\n\n\n return 0.0", "title": "" }, { "docid": "328871c3b44386d6be63259d22d5ba62", "score": "0.49123836", "text": "def test_fibonacci_case_5():\n a = 5\n actual = 5\n assert fibonacci(a) == actual", "title": "" }, { "docid": "bbad7e3c6ff5d5d3c97bce1ad6d2f5bd", "score": "0.4910638", "text": "def jumpahead(self, n):\n pass", "title": "" }, { "docid": "e9ad9801d99fb631b3b20528fd516e68", "score": "0.49017912", "text": "def get_random_branching_node(self):\n (I,) = np.where(self.branch_order[self.n_soma:] == 2)\n if(len(I) == 0):\n n = Node()\n n.type = 'empty'\n else:\n I += self.n_soma\n i = np.floor(len(I)*np.random.rand())\n n = self.nodes_list[I[i]]\n return n", "title": "" }, { "docid": "97ab1283c554efb2e1a9be1a0943ec19", "score": "0.48956534", "text": "def compute_expected_return(gamble):\r\n return ((gamble[2] * gamble[3]) +\r\n (gamble[4] * gamble[5]) +\r\n (gamble[6] * gamble[7]))", "title": "" }, { "docid": "541ba2ce19a09328704b1ebcd5980a67", "score": "0.48887214", "text": "def test_success_prob(n_basis, br, prob_ref):\n prob = qml.resource.FirstQuantization.success_prob(n_basis, br)\n\n assert prob == prob_ref", "title": "" }, { "docid": "29f3dc5ed14a087532c1e0f2292400a3", "score": "0.48673746", "text": "def test_check_returned_value(self):\n self.assertTrue(self.bgen._return_probs)", "title": "" }, { "docid": "aa89b7c47ba8255cebfd78597e6af7c2", "score": "0.48665473", "text": "def test_binary_balance(target_analysis_binary):\n\n _, _, y_train, y_test = target_analysis_binary\n plot.target_analysis(y_train)", "title": "" }, { "docid": "7d9d9e1fdc78d7a161cb47075ed9c288", "score": "0.48632568", "text": "def is_always_branch_patch_available(self, data, addr):\n\t\tdata = str(data)\n\t\tbuf = (ctypes.c_ubyte * len(data))()\n\t\tctypes.memmove(buf, data, len(data))\n\t\treturn core.BNIsArchitectureAlwaysBranchPatchAvailable(self.handle, buf, addr, len(data))", "title": "" }, { "docid": "3c6ff994c7c2dabbe9cb291e786b5ecd", "score": "0.48620036", "text": "def main_8():\n\n n = 8\n stabs = \"ZIZZZIII IZZZIZII ZIIIZIZZ XXXIIIXI XXIXIIIX IIXIXXXI\"\n logops = \"IIXXIIII XIIIXIII ZZIIIIII IIZIIIZI\"\n code = Code(n, stabs, logops)\n code.check()\n\n #vs = list(code.get_encoded())\n v0 = code.get_encoded()\n v1 = code.logops[0] * v0\n v2 = code.logops[1] * v0\n v3 = code.logops[1] * v1\n\n for stab in code.stabs:\n assert stab*v0 == v0\n assert stab*v1 == v1\n assert stab*v2 == v2\n assert stab*v3 == v3\n\n assert v0 != v1\n\n # these are n-qubit operators:\n CZ = lambda i,j : Z.control(i-1, j-1, rank=n)\n CX = lambda i,j : X.control(i-1, j-1, rank=n)\n SWAP = lambda i,j : CX(i,j)*CX(j,i)\n\n #A = CZ(1,3)*CZ(4,5)*CZ(6,8)*CZ(2,7) # not a weak duality !\n\n for A in [\n #(I @ I @ I @ I @ I @ I @ I @ I)\n (I @ S @ I @ ~S @ S @ I @ ~S @ I)*CZ(1,6)*CZ(3,8), # logical S @ ~S\n #(S @ I @ ~S @ I @ I @ S @ I @ ~S)*CZ(2,5)*CZ(4,7), # logical S @ ~S\n #(I @ ~S @ I @ S @ ~S @ I @ S @ I)*CZ(1,6)*CZ(3,8), # logical ~S @ S\n #(S @ I @ I @ ~S @ I @ S @ ~S @ I)*CZ(2,5)*CZ(3,8), # logical S @ ~S\n #(H @ H @ H @ H @ H @ H @ H @ H) * SWAP(1,6) * SWAP(3,8), # fail....\n ]:\n\n #print(opstr(A))\n \n assert A*code.P == code.P*A\n #P1 = A*code.P*~A\n #assert(P1 == code.P)\n \n vs = [v0, v1, v2, v3]\n op = []\n for u in vs:\n u = A*u\n row = []\n for v in vs:\n r = u.dag() * v\n row.append(r)\n print(\"%.2f+%.2fj\"%(r.real, r.imag), end=\" \")\n print()\n op.append(row)\n \n op = numpy.array(op)\n #op.shape = (2,2,2,2)\n #print(op)\n\n print()", "title": "" }, { "docid": "a72c850deeb3f114b797515aed9116e3", "score": "0.48567745", "text": "def test_worker():\n assert len(set(visited)) == 16", "title": "" }, { "docid": "d1dcdb61d8dade09e9a88d7f5438f2c3", "score": "0.48492956", "text": "def __init__(self):\n self.ram = [0] * 256\n self.reg = [0] * 8\n self.SP = 7\n self.reg[self.SP] = 0xF4\n self.IR = 0\n self.pc = 0 # counter\n self.flag_E = 0\n self.flag_G = 0\n self.flag_L = 0 \n self.running = True\n self.branchtable = {}\n self.branchtable[LDI] = self.handle_LDI\n self.branchtable[PRN] = self.handle_PRN\n self.branchtable[MUL] = self.handle_MUL\n self.branchtable[ADD] = self.handle_ADD\n self.branchtable[HLT] = self.handle_HLT\n self.branchtable[PUSH] = self.handle_PUSH\n self.branchtable[POP] = self.handle_POP\n self.branchtable[CALL] = self.handle_CALL\n self.branchtable[RET] = self.handle_RET\n self.branchtable[CMP] = self.handle_CMP\n self.branchtable[JMP] = self.handle_JMP\n self.branchtable[JNE] = self.handle_JNE\n self.branchtable[JEQ] = self.handle_JEQ\n #stretch\n self.branchtable[AND] = self.handle_AND\n self.branchtable[OR] = self.handle_OR\n self.branchtable[XOR] = self.handle_XOR\n self.branchtable[NOT] = self.handle_NOT\n self.branchtable[SHL] = self.handle_SHL\n self.branchtable[SHR] = self.handle_SHR\n self.branchtable[MOD] = self.handle_MOD", "title": "" }, { "docid": "f43c56008fbc9f68fe4183e457631092", "score": "0.48478696", "text": "def B(p):\n if p < 0:\n raise ValueError(\"Something wrong has happened\")\n elif p == 1:\n return 0\n elif p == 0:\n return 1\n return -(p * np.log2(p) + (1 - p) * np.log2(1 - p))", "title": "" }, { "docid": "d929e0c613d3820492298926406372a0", "score": "0.48471344", "text": "def randomstate():\n return check_random_state(1337)", "title": "" }, { "docid": "29dfd815e3157222aa23f05b7c29600b", "score": "0.4846954", "text": "def test_evaluate_fitness(self):\n\t\ttest = BitPopulation([self.chromosome_a, self.chromosome_a, self.chromosome_a, self.chromosome_a], 1, \"0100\")\n\t\ttest.evaluate_fitness()\n\t\tself.assertEqual(test.optimal, True)\n\t\tself.population.evaluate_fitness()\n\t\tself.assertEqual(self.population.optimal, False)", "title": "" }, { "docid": "2ede10cda50be6ecc3752f3f68e2ebcd", "score": "0.48386478", "text": "def dfs_tree_randomised(self): \n stack = [self.start]\n found = False\n time = 0\n while not found and stack:\n #pop the deepest node from the stack\n current_state = stack.pop()\n if time < 100:\n print(\"Parent state: \", current_state)\n #get the neighbouring states\n neighbours = list(self.get_next_states(current_state))\n if time < 100:\n print(\"Its neighbouring states: \", neighbours)\n #randomly picks a state from the neighbouring states\n neighbour = neighbours[random.randrange(len(neighbours))]\n #count the explored nodes\n time += 1\n #check if the state is the goal state\n if neighbour[0:self.tiles_no] == self.goal[0:self.tiles_no]:\n found = True\n break\n #add node to the stack\n stack.append(neighbour) \n if found:\n print(time)\n return time", "title": "" }, { "docid": "44027ab3695da6e96a059b46db1e54dd", "score": "0.48370445", "text": "def check(self, workbench, stack):\n raise NotImplementedError # pragma NO COVERAGE", "title": "" }, { "docid": "608277c12b751ad0819b8a44ad342bb0", "score": "0.4835149", "text": "def opt_branch_len(node):\n if not hasattr(node, \"branch_neg_log_prob\") or node.branch_neg_log_prob is None:\n return 0.0\n return min_interp(node.branch_neg_log_prob)", "title": "" }, { "docid": "e9b355eb4c744a3f16d892e0f6e8f605", "score": "0.4833707", "text": "def calculate_hit_roll(probability):\r\n hit_roll = ((random.randrange(0, 100) + (random.randrange(0, 100))) / 2)\r\n if hit_roll <= probability:\r\n return True\r\n else:\r\n return False", "title": "" }, { "docid": "089ec45d10fb00723b9c2b17ea041db8", "score": "0.48291737", "text": "def go(myarray: List[float], branch_factor: int):\n pass", "title": "" }, { "docid": "3051c0f53c28fd0f476726779a632219", "score": "0.4829135", "text": "def test_match(self):\n\n string = 'string to be hashed'\n known_hash = -2513295840610095741\n result = self._bt['func'](string)\n self.assertEqual(result, known_hash)", "title": "" }, { "docid": "5b4a039a101727764cd738eebdc204c6", "score": "0.4828621", "text": "def test_objective_function(states1):\n assert objective_function(states1) == 17", "title": "" }, { "docid": "2d99f10a37f09d798e66ad1573ec493c", "score": "0.48263913", "text": "def test_positive_case(test_input):\n assert check_fibonacci(test_input)", "title": "" }, { "docid": "079d8b46db79379b0b51600ad0ca571f", "score": "0.482054", "text": "def _compute_registers(self, inst, cpmd):\n if inst.is_branch or inst.is_push_pop:\n return\n r = inst.storages_used()\n try:\n # Assuming independence is faster than the actual probabilities\n av = 1\n for rr in r:\n #av *= self._collector.storage_count[rr] / cpmd.address_with_reg[rr]\n av = min(av, self._collector.storage_count[rr] / cpmd.address_with_reg[rr])\n pr = av\n except KeyError:\n pr = 0\n inst.scores_by_rule['pr'] = pr", "title": "" }, { "docid": "ec4d1b8daced5236d70090928fd24b6e", "score": "0.48165113", "text": "def _indicator(probability):\n if probability < 0 or probability > 1:\n raise ValueError('probability should be from 0 to 1')\n\n return random.random() <= probability", "title": "" }, { "docid": "b75f9916029d170a8ebfe068d5396ed7", "score": "0.48144552", "text": "def test_jumping(program: solution2.OpCodes, comparison: int, expected: int, monkeypatch):\n monkeypatch.setattr(\"builtins.input\", lambda msg: comparison)\n result_output = []\n monkeypatch.setattr(\"builtins.print\", lambda val: result_output.append(val))\n\n processor = solution2.Processor()\n processor(program)\n assert result_output[0] == expected", "title": "" }, { "docid": "651a7bfb0875c59708f79b76d182641f", "score": "0.48119867", "text": "def expect_prob(A, B):\n return 1 / (1 + 10 ** ((B - A) / 400))", "title": "" } ]
b01a5967c443eb60787ab383abd1670a
Loads the given schema file
[ { "docid": "f636b948ad3143cd695eff59d138a523", "score": "0.72388625", "text": "def _load_json_schema(filename):\n\n relative_path = join('.', filename)\n absolute_path = join(dirname(__file__), relative_path)\n\n with open(absolute_path) as schema_file:\n return json.loads(schema_file.read())", "title": "" } ]
[ { "docid": "6e4995ac5e247b12c5dfe531342cb979", "score": "0.77058876", "text": "def _load_schema_from_local_file(file_name):\n print \"Loading schema from file '{}'\".format(file_name)\n try:\n with open(file_name) as schema_file:\n schema = pickle.load(schema_file)\n return schema\n except Exception as err:\n print err, \"\\nUnable to load schema from file '{}'\".format(file_name)\n sys.exit(1)\n pass", "title": "" }, { "docid": "180d1768290f68b2b62102bfc0c4aec2", "score": "0.7254769", "text": "def load_json_schema(self, filename):\n absolute_path = '{}/{}'.format(self.schema_location, filename)\n with open(absolute_path) as schema_file:\n schemas_file = schema_file.read()\n return jsonref.loads(schemas_file, jsonschema=True)", "title": "" }, { "docid": "3bd831f7e9151442d2dac31bcb752921", "score": "0.722566", "text": "def load_schemas():\n with open(app.config['BASE_DIR'] + '/scripts/db/schema.sql', 'r') as ddlfile:\n ddltext = ddlfile.read()\n db.session().execute(text(ddltext))\n std_commit()", "title": "" }, { "docid": "91b1b9037ea7fd4587c23922a4daa8a6", "score": "0.71481013", "text": "def load_schema(self):\n if self.type == 'fid':\n self._schema = SchemaFid(self)\n elif self.type == '2dseq':\n self._schema = Schema2dseq(self)\n elif self.type == 'rawdata':\n self._schema = SchemaRawdata(self)\n elif self.type == 'ser':\n self._schema = SchemaSer(self)\n elif self.type == 'traj':\n self._schema = SchemaTraj(self)", "title": "" }, { "docid": "477423fe961716ab75cbbed122dcab98", "score": "0.7068649", "text": "def load_schema(self) -> dict:\n with open(self.path, encoding=\"utf-8\") as file:\n content = file.read()\n return json.loads(content) if \".json\" in self.path else yaml.load(content, Loader=yaml.FullLoader)", "title": "" }, { "docid": "b5824b4633825f9cd4c0a55feb053b83", "score": "0.7038991", "text": "def load_schema(file_name):\n if not os.path.isfile(file_name):\n print (\"Unable to locate file %s\" % file_name)\n sys.exit(1) \n f = open(file_name)\n file_contents = f.read()\n f.close()\n# \n# with file(file_name) as f:\n# file_contents = f.read()\n try:\n # use use ast.literal_eval to parse\n pydict = ast.literal_eval(file_contents)\n except Exception as e:\n print (\"** Unable to parse file '%s' (should be mostly JSON)\" % file_name)\n print (\"Error is: %s\" % e)\n sys.exit(1)\n assert isinstance(pydict, dict), \"** File '%s does not contain python dictionary\" % file_name\n return pydict", "title": "" }, { "docid": "7c52e2d7b2dbd64e97210fe43d841ac0", "score": "0.6963822", "text": "def read_schema(schema):\n with open(schema_path(schema)) as schema_file:\n return schema_file.read()", "title": "" }, { "docid": "8742881a0cef062cd5b8cac9f2f37dd0", "score": "0.6871677", "text": "def _parse_schema(filename, file_data_dict):\r\n if filename not in file_data_dict:\r\n raise RuntimeError(\"Could not parse XML schema file '%s', no such file\" % filename)\r\n \r\n schema_data = file_data_dict[filename]\r\n \r\n import lxml.etree\r\n \r\n parser = lxml.etree.XMLParser()\r\n class Resolver(lxml.etree.Resolver):\r\n def resolve(self, url, id, context):\r\n if url not in file_data_dict:\r\n log.error(\"Could not resolve schema file '%s', no such file\" % url)\r\n raise RuntimeError(\"No file named '%s'\" % url)\r\n data = file_data_dict[url]\r\n return self.resolve_string(data, context)\r\n parser.resolvers.add(Resolver())\r\n \r\n try:\r\n schema_doc = lxml.etree.fromstring(schema_data, parser=parser)\r\n schema = lxml.etree.XMLSchema(schema_doc)\r\n except lxml.etree.LxmlError, e:\r\n raise RuntimeError(\r\n \"Error parsing schema file '%s': %s: %s\" \\\r\n % (filename, e.__class__.__name__, str(e)))\r\n return schema", "title": "" }, { "docid": "801f652554ff5676634a10dce080aef9", "score": "0.6781162", "text": "def importFile(self):\n\n self.log(\"Importing schemas from file \" + self.file_path)\n LOG(\"Importing schemas from file\", INFO, self.file_path)\n\n doc = ElementTree(file=self.file_path)\n self.updateSchemas(doc.getroot())", "title": "" }, { "docid": "a8bf8740826efc6e8f11f3321b1172a8", "score": "0.6778249", "text": "def read_schema(path):\n result = schema_pb2.Schema()\n contents = file_io.read_file_to_string(path)\n text_format.Parse(contents, result)\n return result", "title": "" }, { "docid": "04300570f592f3a366ae630a6210a2b0", "score": "0.6759979", "text": "def _load_xsd(self):\n \n try:\n self._xsd_doc = et.XMLSchema(et.parse(self._xsd_file)) # @UndefinedVariable\n except (et.XMLSyntaxError, et.XMLSchemaParseError), e: # @UndefinedVariable\n raise ConfigError(e.message, self._xsd_file)", "title": "" }, { "docid": "9db51c5acd3a457554a4b5cc63bbc392", "score": "0.6712271", "text": "def load_schema(version, name='isp'):\n schemapath = os.path.join(path, str(version), '%s.json'%(name,))\n with open(schemapath) as f:\n return json.load(f)", "title": "" }, { "docid": "3e10fb63bada0e2df3fc16d54ccfacc9", "score": "0.6710026", "text": "def _load_schema(buf):\n reader = pa.RecordBatchStreamReader(buf)\n return reader.schema", "title": "" }, { "docid": "1d6b2c43856b09e88f12500ae61df201", "score": "0.6645302", "text": "def load_schema(self):\n # entity types are given explicitly in self.natural_entity_types\n super().load_schema()\n\n # load relation types from file\n with open(os.path.join(self.data_dir(), f'schemas.json'), 'r') as f:\n types = json.load(f)\n\n # self.entity_types = {name: EntityType(\n # natural=self.to_natural_entity_type(name),\n # ) for name in types[2].values()}\n\n self.relation_types = {name: RelationType(\n natural=self.to_natural_relation_type(name)\n ) for name in types[0].values()}", "title": "" }, { "docid": "22541d0d38dbf9c5a21b44341fab7afb", "score": "0.66235715", "text": "def load_schema(self) -> dict:\n raise NotImplementedError(\"The `load_schema` method has to be overwritten.\")", "title": "" }, { "docid": "220dba71e9a58df346a94a50b527b0cf", "score": "0.6621565", "text": "def test_init_schema_file():\n ZenodoMetadata(metadata=metadata,\n schema=schemafile)", "title": "" }, { "docid": "6971154d2b514e8dd4fce98a342b8c20", "score": "0.65907395", "text": "def get_schema(filename):\n with open(f'{filename}.json', 'r') as file:\n schema = json.load(file)\n return schema", "title": "" }, { "docid": "f0744d392477e58f0766d46ad63d7208", "score": "0.6502343", "text": "def from_path(path: PathLike) -> BaseSchema:\n with open(path) as fd:\n return from_file(fd)", "title": "" }, { "docid": "53b146b27f350ebabb166318b8b7718d", "score": "0.64939284", "text": "def load_schema(self, schema, uri=None):\n if not uri:\n uri = schema.get('id')\n if not uri:\n raise ValueError(\"No id property found; set uri param instead.\")\n\n # check the schema\n vcls = jsch.validator_for(schema)\n vcls.check_schema(schema)\n\n # now add it\n self._schemaStore[uri] = schema", "title": "" }, { "docid": "b02185f9601ef7771828571afa9ad007", "score": "0.64895576", "text": "def load_schema(self) -> dict:\n return loads(dumps(self.schema_generator.get_schema(public=True)))", "title": "" }, { "docid": "9007c7a7d3437546f3eec30d050e7672", "score": "0.6487612", "text": "def get_schema():\n if not os.path.isfile(_schema_file):\n create_schema()\n with open(_schema_file, 'r') as fd:\n out = decode_json(fd)\n return out", "title": "" }, { "docid": "9389edb1854d1ba85893b60a3b1527e0", "score": "0.6486269", "text": "def load_schema(self):\n super().load_schema() # this is to initialize the fake entity types 'head' and 'tail'\n\n with open(os.path.join(self.data_dir(), 'pid2name.json'), 'r') as f:\n data = json.load(f)\n self.relation_types = {\n short: RelationType(short=short, natural=description[0])\n for short, description in data.items()\n }", "title": "" }, { "docid": "1c553b3b0670548597884a3cf167adee", "score": "0.64778817", "text": "def load_schemas(self, path, schema={}):\n from glob import glob\n location = os.path.join(path, \"*.json\")\n files = glob(location)\n \n for file in files:\n with open(file) as file_object:\n obj = json.load(file_object)\n name = os.path.split(file)[1]\n name = os.path.splitext(name)[0]\n schema[name] = obj\n return schema", "title": "" }, { "docid": "a7621ef0b2347b8ae15fc037c48408a2", "score": "0.6455446", "text": "def validate(schema_file, json_file):\n\n click.echo(\"schema_file_name: {}\".format(schema_file))\n click.echo(\"json_file {}\".format(json_file))\n print(type(schema_file))\n json_file_path = Path.cwd() / json_file\n assert json_file_path.exists(), \"Json file path {} does not exist\".format(json_file_path)\n schema_file_path = Path.cwd() / schema_file\n assert schema_file_path.exists()\n\n # Load schema\n\n # Load file\n\n validator = Draft4Validator(valid_schema)", "title": "" }, { "docid": "f09fb124207d0d368683a615ea01141c", "score": "0.6431682", "text": "def load_schema_raw(name):\n json_obj = load_schema(name)\n return json_obj", "title": "" }, { "docid": "2a8952795f70f68641514fda6cb40dd1", "score": "0.6425977", "text": "def load_api_data(path):\n if path[0] != '/':\n raise ValueError('path to json schema is not absoltue.')\n\n with open(path, 'rt') as f:\n api_schema = json.load(f)\n\n return (\n api_schema,\n jsonschema.RefResolver(\n 'file://' + os.path.dirname(path) + '/',\n api_schema\n ),\n )", "title": "" }, { "docid": "43897af722f7cb7f914f2c052fc1a8c0", "score": "0.64003575", "text": "def get_schema_file_data(file):\r\n resource_path = CONFML_SCHEMA_DIR + '/' + file\r\n if pkg_resources.resource_exists('cone.validation', resource_path):\r\n data = pkg_resources.resource_string('cone.validation', resource_path)\r\n return data\r\n else:\r\n msg = \"Could not get schema file '%s': Package resource '%s' does not exist\" \\\r\n % (file, resource_path)\r\n raise ValueError(msg)", "title": "" }, { "docid": "3e56436f04c700246b1bad6de61b727a", "score": "0.6363927", "text": "def load(cls, path, schema=SCHEMA):\n with open(path, 'r') as f:\n data = yaml.load(f)\n jsonschema.validate(data, schema)\n return cls.from_json(data)", "title": "" }, { "docid": "88f9dc7160cc5d48e18b93379eae75c4", "score": "0.63378185", "text": "def load(self, config_data):\n self.config_data = config_data\n\n # Load the schema file based on the config that was provided\n try:\n schema_name = self.config_data['schema']['name']\n except KeyError as err:\n raise ConfigFileError(\"The specified schema was not found: {}. Try to update your ingest client library or double check your ingest job configuration file\".format(self.config_data['schema']['name']))\n with open(os.path.join(resource_filename(\"ingestclient\", \"schema\"), \"{}.json\".format(schema_name)), 'rt') as schema_file:\n self.schema = json.load(schema_file)", "title": "" }, { "docid": "67784544d89e31ae4e54f08836a1eded", "score": "0.6252947", "text": "def load_schema(self) -> dict:\n odict_schema = self.schema_generator.get_schema(None, True)\n return loads(dumps(odict_schema.as_odict()))", "title": "" }, { "docid": "8859bc1318c0ae958bf3d7c9ceae3376", "score": "0.62508506", "text": "def load(cfg, name):\n schema_path = os.path.join(cfg[\"path\"][\"data\"], make_subpath(name))\n return json.load(open(schema_path))", "title": "" }, { "docid": "320876126aa81dcdc220449aff7f6158", "score": "0.6244164", "text": "def load_ref_schema(ref_schema_uri):\n sub_schema = generate_schema_name_from_uri(ref_schema_uri)\n return import_schema_to_json(sub_schema)", "title": "" }, { "docid": "ce8c5aaab155d8609751f39e2cafda9c", "score": "0.62297195", "text": "def from_file(file: Union[IO[str], str]) -> BaseSchema:\n raw = yaml.safe_load(file)\n return from_dict(raw)", "title": "" }, { "docid": "1be7bf64a69eb04fd3125150389ec4bd", "score": "0.62004167", "text": "def loadXmlFromFile(self):\n \n try:\n xmlFD = open(\"db_mapping.xml\")\n except IOError:\n logging.debug('Error - invalid file name or path')\n return None\n else:\n try:\n self.doc = parse(xmlFD)\n except Exception:\n logging.debug(\"Error - loading fail\")\n else:\n logging.debug(\"XML document loading complete\")\n return None", "title": "" }, { "docid": "a2ba531993df6ffb32fb84f3ab881477", "score": "0.6130473", "text": "def load_schema(self):\n if self.natural_entity_types is not None:\n self.entity_types = {short: EntityType(\n short=short,\n natural=natural,\n ) for short, natural in self.natural_entity_types.items()}\n\n if self.natural_relation_types is not None:\n self.relation_types = {short: RelationType(\n short=short,\n natural=natural,\n ) for short, natural in self.natural_relation_types.items()}", "title": "" }, { "docid": "8523896e3a2ee37d2a28a7c897a72839", "score": "0.60734844", "text": "def validate_schema(file_path, file_type):\n schema = get_schema(file_type)\n if (schema is None):\n paasta_print(f'{SCHEMA_NOT_FOUND}: {file_path}')\n return\n validator = Draft4Validator(schema, format_checker=FormatChecker())\n basename = os.path.basename(file_path)\n extension = os.path.splitext(basename)[1]\n try:\n config_file = get_file_contents(file_path)\n if extension == '.yaml':\n config_file_object = yaml.safe_load(config_file)\n elif extension == '.json':\n config_file_object = json.loads(config_file)\n else:\n config_file_object = config_file\n except Exception:\n paasta_print(f'{FAILED_READING_FILE}: {file_path}')\n raise\n try:\n validator.validate(config_file_object)\n except ValidationError:\n paasta_print(f'{SCHEMA_INVALID}: {file_path}')\n\n errors = validator.iter_errors(config_file_object)\n paasta_print(' Validation Message: %s' % exceptions.best_match(errors).message)\n else:\n paasta_print(f'{SCHEMA_VALID}: {basename}')\n return True", "title": "" }, { "docid": "5b63f956bf1d9582676abd2ae5542dab", "score": "0.6032589", "text": "def _load_schemas():\n schema_dir = _get_schema_dir()\n for schema_file in os.listdir(schema_dir):\n with open(os.path.join(schema_dir, schema_file)) as f:\n for schema in yaml.safe_load_all(f):\n name = schema['metadata']['name']\n if name in _SCHEMAS:\n raise RuntimeError(\n 'Duplicate schema specified for: %s.' % name)\n _SCHEMAS[name] = _get_schema_info(name, schema['data'])", "title": "" }, { "docid": "b3e35ed6aa0e85220fa6e84970b1e54b", "score": "0.60122687", "text": "def test_correct_schema(filename, schema, test_data):\n filename = test_data + filename\n schema = scl.yaml_load(ZEPHYR_BASE +'/scripts/schemas/twister//' + schema)\n data = TwisterConfigParser(filename, schema)\n data.load()\n assert data", "title": "" }, { "docid": "380350d9d34e3885ebce62b02ce8f090", "score": "0.60099876", "text": "def load_config_schema(key):\n return json.loads((CONFIGS / \"{}.schema.json\".format(key)).read_text())", "title": "" }, { "docid": "51286a2867b2b959d5d2074330db4811", "score": "0.6005238", "text": "def load_test_data_json():\n absolute_path = join(dirname(__file__), 'test_data.json')\n with open(absolute_path) as schema_file:\n return json.loads(schema_file.read())", "title": "" }, { "docid": "0646d8a0af83009a1ac75a2eba6ad9ec", "score": "0.59886354", "text": "def read_schema() -> pd.DataFrame:\n with _get_zip_file().open(SCHEMA_2019) as file:\n return pd.read_csv(file)", "title": "" }, { "docid": "131aeb98b8c5e6339e4c87ada664d040", "score": "0.59846824", "text": "def load_schema(schemafield):\n basedir = os.path.dirname(__file__)\n schema_json = json.load(open(os.path.join(basedir, 'schema.json')))\n def make_field(spec):\n spec['field_type'] = spec.pop('type')\n if 'fields' in spec:\n spec['fields'] = [make_field(f) for f in spec['fields']]\n return schemafield(**spec)\n return [make_field(s) for s in schema_json]", "title": "" }, { "docid": "04c314182fa58597025143fcddaa28e6", "score": "0.59693545", "text": "def get_schema():", "title": "" }, { "docid": "5751cc82b152c32ea13de24e867557c2", "score": "0.59206843", "text": "def load(filename):", "title": "" }, { "docid": "5751cc82b152c32ea13de24e867557c2", "score": "0.59206843", "text": "def load(filename):", "title": "" }, { "docid": "b21b15edc2db5c4ba9771720eec8df27", "score": "0.5907031", "text": "def load_schema(path):\n signatures = {}\n with open(path, 'r') as f:\n for line in f:\n parts = line.strip().split(' ')\n assert 3 <= len(parts)\n dist = parts[0]\n assert dist == 'bernoulli'\n feature = parts[1]\n domains = tuple(parts[2:])\n signatures[feature] = domains\n return signatures", "title": "" }, { "docid": "b6a3bb03e6d7ca2571d0f99d2b085f7a", "score": "0.590207", "text": "def read_dataset_schema(schema_path: str) -> Dict[str, List[TableColumn]]:\n schema: Dict[str, List[TableColumn]] = defaultdict(list)\n for i, line in enumerate(open(schema_path, \"r\")):\n if i == 0:\n header = [x.strip() for x in line.split(\",\")]\n elif line[0] == \"-\":\n continue\n else:\n data = {key: value for key, value in zip(header, [x.strip() for x in line.split(\",\")])}\n\n table = data.get(\"Table Name\", None) or data.get(\"Table\")\n column = data.get(\"Field Name\", None) or data.get(\"Field\")\n is_primary_key = data.get(\"Primary Key\") == \"y\"\n schema[table.upper()].append(TableColumn(column.upper(), data[\"Type\"], is_primary_key))\n\n return {**schema}", "title": "" }, { "docid": "605e871dff6275cde502d423a5c1f3a7", "score": "0.5899586", "text": "def from_uri(uri: str) -> BaseSchema:\n response = urlopen(uri)\n data = response.read()\n return from_file(data)", "title": "" }, { "docid": "45d44bece2b8e43ba6d3e5f173eb17f6", "score": "0.58993536", "text": "def load(self, filename):\n if filename:\n filename = os.path.abspath(filename)\n else:\n filename = os.path.abspath(self.SpreadsheetFileName)\n \n if (not filename) or (not os.path.exists(filename)):\n log_func.warning(u'Unable to load file <%s>' % filename)\n return None\n \n ext = os.path.splitext(filename)[1]\n if ext in ('.ODS', '.ods', '.Ods'):\n return self.loadODS(filename)\n elif ext in ('.XML', '.xml', '.Xml'):\n return self.loadXML(filename)\n else:\n log_func.warning(u'Unsupported file type <%s>' % ext)\n return None", "title": "" }, { "docid": "b1829423c1fed2c0d3006837f0222ab7", "score": "0.589787", "text": "def import_schema_to_json(name, store_it=False):\n\n schema_file = u\"%s.json\" % name\n file_path = os.path.join(SCHEMA_ROOT, schema_file)\n log.debug(u\"trying to load %s \" % file_path)\n schema = None\n try:\n schema_file = open(file_path, \"r\").read()\n except IOError, e:\n log.error(u\"file not found %s\" % e)\n msg = \"Could not find schema file. %s\" % file_path\n raise SalesKingException(\"SCHEMA_NOT_FOUND\", msg)\n schema = json.loads(schema_file)\n\n if schema is None:\n msg = \"loading failed foo %s\" % name\n raise SalesKingException(\"SCHEMA_NOT_FOUND\", msg)\n return schema", "title": "" }, { "docid": "6bd69e8d136f8b9c22510abd034ce3cf", "score": "0.58921456", "text": "def _load_data(data_file_path: Path) -> None:\n app.logger.info(f\"Loading data from {str(data_file_path.absolute())} ...\")\n\n with open(Path(data_file_path), \"r\") as data_file:\n _data = data_file.read()\n try:\n data = json.loads(_data)\n except ValueError:\n echo(\n f\"* data in {click_style(str(data_file_path.absolute()), fg='blue')} contains \"\n f\"{click_style('invalid JSON', fg='red')} and cannot be validated\"\n )\n raise ValueError(f\"{str(data_file_path.absolute())} is invalid JSON\")\n\n with resources.path(\n \"bas_web_map_inventory.resources.json_schemas\", \"data-resources-schema.json\"\n ) as data_resources_schema_file_path:\n with open(data_resources_schema_file_path, \"r\") as data_resources_schema_file:\n data_resources_schema_data = data_resources_schema_file.read()\n try:\n data_resources_schema = json.loads(data_resources_schema_data)\n jsonschema_validate(instance=data, schema=data_resources_schema)\n echo(\n f\"* data resources in {click_style(str(data_file_path.absolute()), fg='blue')} have \"\n f\"{click_style('valid', fg='green')} syntax\"\n )\n except ValidationError:\n echo(\n f\"* data sources in {click_style(str(data_file_path.absolute()), fg='blue')} have \"\n f\"{click_style('invalid', fg='red')} syntax\"\n )\n raise ValueError(f\"{str(data_file_path.absolute())} does not validate against JSON schema\")\n\n servers = Servers()\n for server in data[\"servers\"]:\n server = Server(\n server_id=server[\"id\"],\n label=server[\"label\"],\n hostname=server[\"hostname\"],\n server_type=server[\"type\"],\n version=server[\"version\"],\n )\n servers[server.id] = server\n\n namespaces = Namespaces()\n for namespace in data[\"namespaces\"]:\n namespace = Namespace(\n namespace_id=namespace[\"id\"],\n label=namespace[\"label\"],\n title=namespace[\"title\"],\n namespace=namespace[\"namespace\"],\n server=servers[namespace[\"relationships\"][\"servers\"]],\n )\n namespaces[namespace.id] = namespace\n\n repositories = Repositories()\n for repository in data[\"repositories\"]:\n repository = Repository(\n repository_id=repository[\"id\"],\n label=repository[\"label\"],\n title=repository[\"title\"],\n repository_type=repository[\"type\"],\n hostname=repository[\"hostname\"],\n database=repository[\"database\"],\n schema=repository[\"schema\"],\n namespace=namespaces[repository[\"relationships\"][\"namespaces\"]],\n )\n repositories[repository.id] = repository\n\n styles = Styles()\n for style in data[\"styles\"]:\n _namespace = None\n if style[\"relationships\"][\"namespaces\"] is not None:\n _namespace = namespaces[style[\"relationships\"][\"namespaces\"]]\n style = Style(\n style_id=style[\"id\"],\n label=style[\"label\"],\n title=style[\"title\"],\n style_type=style[\"type\"],\n namespace=_namespace,\n )\n styles[style.id] = style\n\n layers = Layers()\n for layer in data[\"layers\"]:\n _styles = []\n for style_id in layer[\"relationships\"][\"styles\"]:\n _styles.append(styles[style_id])\n layer = Layer(\n layer_id=layer[\"id\"],\n label=layer[\"label\"],\n title=layer[\"title\"],\n layer_type=layer[\"type\"],\n geometry_type=layer[\"geometry\"],\n services=layer[\"services\"],\n table_view=layer[\"table_view\"],\n namespace=namespaces[layer[\"relationships\"][\"namespaces\"]],\n repository=repositories[layer[\"relationships\"][\"repositories\"]],\n styles=_styles,\n )\n layers[layer.id] = layer\n\n layer_groups = LayerGroups()\n for layer_group in data[\"layer-groups\"]:\n _namespace = None\n if layer_group[\"relationships\"][\"namespaces\"] is not None:\n _namespace = namespaces[layer_group[\"relationships\"][\"namespaces\"]]\n _layers = []\n for layer_id in layer_group[\"relationships\"][\"layers\"]:\n _layers.append(layers[layer_id])\n _styles = []\n for style_id in layer_group[\"relationships\"][\"styles\"]:\n _styles.append(styles[style_id])\n layer_group = LayerGroup(\n layer_group_id=layer_group[\"id\"],\n label=layer_group[\"label\"],\n title=layer_group[\"title\"],\n services=layer_group[\"services\"],\n namespace=_namespace,\n layers=_layers,\n styles=_styles,\n )\n layer_groups[layer_group.id] = layer_group\n\n app.config[\"data\"] = {\n \"servers\": servers,\n \"namespaces\": namespaces,\n \"repositories\": repositories,\n \"styles\": styles,\n \"layers\": layers,\n \"layer_groups\": layer_groups,\n }", "title": "" }, { "docid": "b43dd8bf7bf091b26424ff8555ccd1b7", "score": "0.5876242", "text": "def from_db_file(cls, file):\n raise ValueError(\"MongograntStore doesn't implement from_db_file\")", "title": "" }, { "docid": "96be87e6e849c93a8f778e87107aa9b2", "score": "0.5868699", "text": "def load_local_schemas(schemas_path):\n # Loads all schemas found in path into\n # the in memory schema cache.\n logging.info(\"Loading local schemas from %s \" % schemas_path)\n\n if not os.path.isdir(schemas_path):\n raise RuntimeError(\n \"Could not load local schemas. \"\n \"%s is not a directory \" % schemas_path\n )\n\n for path, subdirs, files in os.walk(schemas_path):\n for f in files:\n url = 'file://' + os.path.join(path, f)\n logging.info(\"Loading schema from %s\" % url)\n scid = scid_from_uri(url)\n validate_scid(scid)\n\n if scid:\n cache_schema(\n scid,\n url_get_schema(url)\n )", "title": "" }, { "docid": "8eb88b034d2400ff300e3b6e4b0d92d6", "score": "0.5864958", "text": "def load(self, filename):\n pass", "title": "" }, { "docid": "ab81758fe388df1da632da66aaefad49", "score": "0.5862627", "text": "def loadFile(self):\n handler = open(self.file, 'ro')\n for i in handler:\n predata = self.remove_tags(i.strip())\n if predata[:2].upper() == 'AS':\n asn = predata.split(' ', 1)[0][2:]\n name = predata.split(' ', 1)[1]\n self.database[asn] = name\n else:\n continue", "title": "" }, { "docid": "ef6d910a9f78df73fa5fe0edfc9b019e", "score": "0.585899", "text": "def _loadFromFile(self):\r\n try:\r\n f = open(self._filename, \"r\")\r\n except IOError:\r\n # file not exist\r\n return\r\n line = f.readline().strip()\r\n while line != \"\":\r\n attrs = line.split(\" \")\r\n if attrs[1] == -1:\r\n self.addVertex(attrs[0])\r\n else:\r\n #self.addEdge(int(attrs[0]), int(attrs[1]), int(attrs[2]))\r\n self.addEdgeUndirectedGraph(int(attrs[0]), int(attrs[1]),int(attrs[2]))\r\n line = f.readline().strip()\r\n f.close()", "title": "" }, { "docid": "eb5358d95eeb66384a26c653e194051e", "score": "0.5857467", "text": "def _sbi_schema():\n schema_path = join(dirname(__file__), '..', 'schema',\n 'sbi_configuration_0.1.0.json')\n with open(schema_path, 'r') as file:\n schema_str = file.read()\n schema = json.loads(schema_str)\n return schema", "title": "" }, { "docid": "9707cac5396d670b8b8dd6714e53f663", "score": "0.58485234", "text": "def get_schema(file_type):\n schema_path = 'schemas/%s_schema.json' % file_type\n try:\n schema = pkgutil.get_data('paasta_tools.cli', schema_path).decode()\n except IOError:\n return None\n return json.loads(schema)", "title": "" }, { "docid": "a59ad87801aa656d0584d5099ee33687", "score": "0.5845724", "text": "def __init__(self, schema, add_all):\n self.schema = json.load(open(schema, 'r'))\n self.add_all = add_all", "title": "" }, { "docid": "d72c25ca14ebbddb83eb6810a36812f5", "score": "0.58343285", "text": "def init_db():\n db = get_db()\n with app.open_resource('schema.sql',mode='r') as f:\n db.cursor().executescript(f.read())", "title": "" }, { "docid": "9312f42f3f39568df67a4ac015e90b09", "score": "0.58277917", "text": "def __init__(self, file='/tmp/database.sqlite'):\n self.database = sqlite3.connect(file)\n\n with open(os.path.join(os.getcwd(), './model/schema.sql')) as schema:\n self.database.executescript(schema.read())", "title": "" }, { "docid": "e8300891c86b8e35bf21f17a7707eed6", "score": "0.581642", "text": "def _load_xsd(self):\n \n super(ResourceConfig, self)._load_xsd()", "title": "" }, { "docid": "1555d44855c26e77100ae492fa0e8cc3", "score": "0.58117604", "text": "def test_incorrect_schema(filename, schema, test_data):\n filename = test_data + filename\n schema = scl.yaml_load(ZEPHYR_BASE +'/scripts/schemas/twister//' + schema)\n with pytest.raises(Exception) as exception:\n scl.yaml_load_verify(filename, schema)\n assert str(exception.value) == \"Schema validation failed\"", "title": "" }, { "docid": "26bf87368f6be98b1cc5fabf7a0771f5", "score": "0.5806613", "text": "def loadFromFile(fileName):\n rel = Relation()\n\n with open(fileName, \"r\") as f:\n lines = f.readlines()\n\n try:\n relName = \"\"\n fieldNames = []\n fieldTypes = []\n dataPart = False\n datasets = []\n classColName = None\n skipCols = []\n skipCounter = 0\n for l in lines:\n l = l.strip()\n if \"\" == l or \"%\" == l[0]:\n continue\n\n if \"@\" == l[0]:\n if not dataPart:\n fields = re.split(\"\\s+\", l.strip())\n if \"@RELATION\" == fields[0].upper():\n relName = fields[1]\n elif \"@ATTRIBUTE\" == fields[0].upper():\n if \"NUMERIC\" == fields[2].upper() or \"REAL\" == fields[2].upper():\n fieldTypes.append(float)\n fieldNames.append(fields[1])\n else:\n classColName = fields[1]\n skipCols.append(skipCounter)\n skipCounter += 1\n elif \"@DATA\" == fields[0].upper():\n if len(fieldNames) != 0:\n if classColName is None:\n # class column is numeric, but we need a string\n classColName = fieldNames[-1]\n fieldTypes[-1] = str\n else:\n skipCols.pop() # last column is class column, don't skip it\n fieldNames.append(classColName)\n fieldTypes.append(str)\n dataPart = True\n rel.relName = relName\n rel.fieldNames = fieldNames\n elif dataPart:\n fieldsTmp = re.split(\",\", l.strip())\n fields = []\n for i, f_ in enumerate(fieldsTmp):\n if i not in skipCols:\n fields.append(f_)\n\n for i, t in enumerate(fieldTypes):\n fields[i] = t(fields[i])\n\n if len(fields) > 1:\n rel.allClasses.add(fields[-1])\n datasets.append(fields)\n rel.datasets = datasets\n rel.numDatasets = len(datasets)\n rel.activeClasses = set(rel.allClasses)\n except:\n raise Exception(\"ARFF parsing error!\")\n\n return rel", "title": "" }, { "docid": "0848d772285c1790b324990ea451458a", "score": "0.5804216", "text": "def init_db():\n\n # open the schema file and close it when done\n with current_app.open_resource(\"schema.sql\") as f:\n # get the database connection, save and close when done\n with get_db() as con:\n # begin a transaction\n with con.cursor() as cur:\n # use the file's text to execute the SQL queries within\n cur.execute(f.read())", "title": "" }, { "docid": "fb010b734d82e82ddccc9ea806282387", "score": "0.5803747", "text": "def load(path):\n with open(path, \"r\") as file:\n db_json = json.load(file)\n print(db_json)\n db = {\n lf_id: LabelingFunction.read_json(lf_json) \\\n for lf_id, lf_json in db_json.items()\n }\n return LFDB(db=db)", "title": "" }, { "docid": "8f61c4561a35fdda6a4fdfae10ace80b", "score": "0.57923263", "text": "def parseSchema(inFName):\n\n if not os.path.isfile(inFName):\n sys.stderr.write(\"File '%s' does not exist\\n\" % inFName)\n sys.exit(1)\n\n in_table = None\n in_col = None\n in_colDescr = None\n table = {}\n\n colNum = 1\n\n iF = open(inFName, mode='r')\n for line in iF:\n m = _tableStart.search(line)\n if m is not None and not _isCommentLine(line):\n tableName = m.group(1)\n table[tableName] = {}\n colNum = 1\n in_table = table[tableName]\n in_col = None\n elif _tableEnd.match(line):\n m = _engineLine.match(line)\n if m is not None:\n engineName = m.group(2)\n in_table[\"engine\"] = engineName\n in_table = None\n elif in_table is not None: # process columns for given table\n m = _columnLine.match(line)\n if m is not None:\n firstWord = m.group(1)\n if _isIndexDefinition(firstWord):\n t = \"-\"\n if firstWord == \"PRIMARY\":\n t = \"PRIMARY KEY\"\n elif firstWord == \"UNIQUE\":\n t = \"UNIQUE\"\n idxInfo = {\"type\" : t,\n \"columns\" : _retrIdxColumns(line)\n }\n in_table.setdefault(\"indexes\", []).append(idxInfo)\n else:\n in_col = {\"name\" : firstWord,\n \"displayOrder\" : str(colNum),\n \"type\" : _retrType(line),\n \"notNull\" : _retrIsNotNull(line),\n }\n dv = _retrDefaultValue(line)\n if dv is not None:\n in_col[\"defaultValue\"] = dv\n colNum += 1\n if \"columns\" not in in_table:\n in_table[\"columns\"] = []\n in_table[\"columns\"].append(in_col)\n elif _isCommentLine(line): # handle comments\n if in_col is None: # table comment\n\n if _containsDescrTagStart(line):\n if _containsDescrTagEnd(line):\n in_table[\"description\"] = _retrDescr(line)\n else:\n in_table[\"description\"] = _retrDescrStart(line)\n elif \"description\" in in_table:\n if _containsDescrTagEnd(line):\n in_table[\"description\"] += _retrDescrEnd(line)\n else:\n in_table[\"description\"] += _retrDescrMid(line)\n else:\n # column comment\n if _containsDescrTagStart(line):\n if _containsDescrTagEnd(line):\n in_col[\"description\"] = _retrDescr(line)\n else:\n in_col[\"description\"] = _retrDescrStart(line)\n in_colDescr = 1\n elif in_colDescr:\n if _containsDescrTagEnd(line):\n in_col[\"description\"] += _retrDescrEnd(line)\n in_colDescr = None\n else:\n in_col[\"description\"] += _retrDescrMid(line)\n\n # units\n if _isUnitLine(line):\n in_col[\"unit\"] = _retrUnit(line)\n\n # ucds\n if _isUcdLine(line):\n in_col[\"ucd\"] = _retrUcd(line)\n\n iF.close()\n return table", "title": "" }, { "docid": "704a5039c4310ab4f652591a88139e05", "score": "0.5787634", "text": "def load_schema(self):\n\n with open(self.schema_path, encoding=\"UTF-8\") as stream:\n config = next(yaml.safe_load_all(stream))\n\n schema = {\n \"type\": \"object\",\n \"properties\": {}\n }\n\n # For Open SDG, certain fields are required. We have to hardcode these\n # here, because _prose.yml has no mechanism for requiring fields.\n # TODO: Should we just add \"required\" properties in _prose.yml, purely\n # for this purpose?\n schema['required'] = ['reporting_status']\n\n # Convert the Prose.io metadata into JSON Schema.\n for field in config['prose']['metadata']['meta']:\n is_required = field['name'] in schema['required']\n key = self.alter_key(field['name'])\n jsonschema_field = self.prose_field_to_jsonschema(field['field'], is_required)\n schema['properties'][key] = jsonschema_field\n self.add_item_to_field_order(key)\n\n # And similarly, there are certain conditional validation checks.\n schema['allOf'] = [\n # If reporting_status is 'complete', require data_non_statistical.\n {\n 'if': {\n 'properties': { 'reporting_status': { 'enum': ['complete'] } }\n },\n 'then': {\n 'required': ['data_non_statistical']\n }\n },\n # If graphs will display, require graph_title and graph_type.\n {\n 'if': {\n 'properties': {\n 'reporting_status': { 'enum': ['complete'] },\n 'data_non_statistical': { 'const': False }\n }\n },\n 'then': {\n 'required': ['graph_title', 'graph_type']\n }\n }\n ]\n\n self.schema = schema", "title": "" }, { "docid": "d327a893179786307143213f1cc235c8", "score": "0.5767997", "text": "def load_database(self):\n pass", "title": "" }, { "docid": "dc660741eaef1511acf54ce7ee15a3e4", "score": "0.57260156", "text": "def with_schema_dir(cls, dirpath, ejsprefix='$'):\n return cls(loader.SchemaLoader.from_directory(dirpath),\n ejsprefix=ejsprefix)", "title": "" }, { "docid": "0c840d25fc3e0472418618a8bfaeb65a", "score": "0.57168776", "text": "def _init_db(self, db, schema: str):\n with self._app.app_context():\n with self._app.open_resource(schema, mode=\"r\") as f:\n db.cursor().executescript(f.read())\n db.commit()", "title": "" }, { "docid": "fd6c7f477f4f33544553f1c03b66891a", "score": "0.57158625", "text": "def test_valid_schema_from_xsd(self):\n\n self.assertIs(self.schema.validate(self.valid_xml_file), True)", "title": "" }, { "docid": "1d94b9bb600dd11828f6bb087a837d75", "score": "0.57154566", "text": "def load(self,f):\n if type(f) is str:\n f = open(f,\"r\")\n line = f.readline()\n self.load_line(line)", "title": "" }, { "docid": "bfd35aa04d5c4c97dd6147d9343b83fb", "score": "0.5710985", "text": "def _avro_fetch_schema(self, schema_name):\n http_client = httpclient.AsyncHTTPClient()\n url = self._avro_schema_url(schema_name)\n LOGGER.info('Loading schema for %s from %s', schema_name, url)\n try:\n response = yield http_client.fetch(url)\n except httpclient.HTTPError as error:\n LOGGER.error('Could not fetch Avro schema for %s (%s)', schema_name,\n error)\n raise ValueError('Error fetching avro schema')\n raise gen.Return(response.body)", "title": "" }, { "docid": "8ffbca6b7dac45b49fa17522c5f6c7ec", "score": "0.5705933", "text": "def _load_xsd(self):\n \n super(ToolConfig, self)._load_xsd()", "title": "" }, { "docid": "73598220ea7ce97b02579780d2890012", "score": "0.57034564", "text": "def validate_schema_helper(filename):\n with open(\"./validation/schemas/jsonschema-draft-v7.json\") as handle:\n schema = load(handle)\n\n with open(filename) as handle:\n json_to_check = load(handle)\n\n validator = Draft7Validator(schema)\n errors = [e for e in validator.iter_errors(json_to_check)]\n output_validation_errors(errors, filename)", "title": "" }, { "docid": "42a0883ababd9237809e807be0f4663a", "score": "0.56995803", "text": "def loadsqljson(file,filename = \"sql.json\"):\n path = pathlib.Path(file).resolve()\n parent = path.parent\n definitions = (parent / filename).resolve()\n try:\n definitions.relative_to(parent)\n except:\n raise ValueError(\"sql configuration file must be located relative to the module's root file!\")\n with open(definitions,'r') as f:\n data = json.load(f)\n return data", "title": "" }, { "docid": "5e1ed35efa96de07e5e3d8a74fef75c9", "score": "0.56963414", "text": "def test_configuration_schema():\n with open_text(\n \"memote.experimental.schemata\", \"configuration.json\", encoding=\"utf-8\"\n ) as file_handle:\n schema = json.load(file_handle)\n Draft4Validator.check_schema(schema) # Will raise an exception if invalid.", "title": "" }, { "docid": "7aa32c8e5c28394a977943f4ddc40670", "score": "0.5673164", "text": "def get_xmlschema_obj():\n # get the xsd files path\n path = os.path.dirname(__file__) + '\\\\schema\\\\'\n # get all files name\n list_all_file = os.listdir(path)\n \n for filename in list_all_file:\n try:\n full_path = path + filename\n xmlschema_doc = ElementTree.parse(full_path).getroot()\n xmlschema = ElementTree.XMLSchema(xmlschema_doc)\n Parse.m_xmlschema_list.append(xmlschema)\n except Exception,e:\n err_info = \"Parse xml schema files occures error:%s\" % e\n log.debug_info(err_info)", "title": "" }, { "docid": "95b7a95bf9caab6a562492ee2b376d08", "score": "0.5665148", "text": "def load_entity_type(self, filename):\n return None", "title": "" }, { "docid": "7ba9312e1377bd6e722b9146d1859ebb", "score": "0.5655998", "text": "def init_db(self):\n db = self.db\n cur = db.cursor(cursor_factory=psycopg2.extras.DictCursor)\n with self.app.open_resource('schema.sql', mode='r') as f:\n cur.execute(f.read())\n db.commit()", "title": "" }, { "docid": "a54d1d6513408a443a4279920681cc97", "score": "0.56558895", "text": "def loadFile(filenames,initSchema=None):\n db = MatrixDB(initSchema=initSchema)\n db.startBuffers()\n for f in filenames.split(\":\"):\n db.bufferFile(f)\n logging.info('buffered file %s' % f)\n db.flushBuffers()\n logging.info('loaded database has %d relations and %d non-zeros' % (db.numMatrices(),db.size()))\n return db", "title": "" }, { "docid": "57a57cf3081e0b97961196cbac2d5221", "score": "0.565526", "text": "def init_db() -> None:\n\n con = data.connection()\n\n if not os.path.isfile(data.SCHEMA_PATH):\n raise AttributeError(\n f\"No schema.sql found. Please move it to {data.SCHEMA_PATH} or edit arxivedits/data.py's SCHEMA_PATH variable.\"\n )\n\n with open(data.SCHEMA_PATH) as file:\n con.executescript(file.read())", "title": "" }, { "docid": "5e961a2b784900a4a99e17ae2fbc1a45", "score": "0.5650483", "text": "def load (self, filename):\n if self.filename is None:\n self.filename = filename\n\n stream = open(self.filename, \"rb\")\n for doc in yaml.load_all(stream):\n for table in doc:\n self.add(table)\n stream.close()", "title": "" }, { "docid": "5dc1dc37f27c439d0bf7e047ac703b33", "score": "0.56474364", "text": "def load_schema(name):\n\n schema = import_schema_to_json(name)\n\n #salesking specific swap\n #//set link relation as key name to make it easier to call these\n for item in schema['links']:\n #//set link relation as key name to make it easier to call these\n # foreach($schema->links as $key => $link)\n # {\n # $schema->links[$link->rel] = $link;\n # unset($schema->links[$key]);\n # }\n # this here seems not to work as expected\n # something is wrong\n href_value = item['href']\n rel_value = item['rel']\n schema[rel_value] = href_value\n del item\n\n ## sk use nesting of schema\n ## dynamically loading\n for prop in schema['properties']:\n value = schema['properties'][prop]\n # arrays may contain the nesting\n is_type_array = (value['type'] == 'array')\n is_type_object = (value['type'] == 'object')\n if ((is_type_array or is_type_object)\n and (_value_properties_are_referenced(value))):\n schema = _load_referenced_schema_from_properties(value, schema, prop)\n\n if is_type_array and _value_is_default_any(value) and _value_has_items_key(value):\n schema = _load_referenced_schemes_from_list(value['items'], value, schema, prop)\n\n if _value_is_required(value):\n # remove required\n schema['properties'][prop]['required'] = False\n \n # hack to bypass text format valitation to string\n if _value_is_type_text(value):\n log.debug(\"patched text to string\")\n schema['properties'][prop]['type'] = u\"string\"\n \n #ignore the readonly properties auto validation\n #if 'readonly' in value.keys() and value['readonly'] == True:\n # log.debug(\"patched required validation to none required\")\n # schema['properties'][property]['readonly'] = False\n\n # sk works on title and not name\n schema['name'] = schema['title']\n ## go one level deeper as we now have some replacements\n\n # put it to storage when done\n # if not JsonSchemaStore.is_stored(name) and (schema is not None):\n # JsonSchemaStore.copy_to_store(name, schema)\n return schema", "title": "" }, { "docid": "2aa7c40653bccedeca28b2749b5db415", "score": "0.5642843", "text": "def _load_file(self, path: Text) -> Column:\n result = dict()\n if os.path.exists(path):\n with open(path, \"rb\") as f:\n result = dill.load(f)\n return result", "title": "" }, { "docid": "693944c732c4d2dce5bcb71ad07a5a0a", "score": "0.56410134", "text": "def init_db():\n with app.app_context():\n db = get_db()\n with app.open_resource('schema.sql', mode='r') as f:\n db.cursor().executescript(f.read())\n db.commit()", "title": "" }, { "docid": "85ae2ac777c0927042beea1ea1ec89dd", "score": "0.56361204", "text": "def load(file): # real signature unknown; restored from __doc__\n pass", "title": "" }, { "docid": "fcd1b843e4c8f9637e9e89e267b184d0", "score": "0.5635887", "text": "def get_schema(post_args):\n\n if post_args['schema']:\n schema_file = post_args['schema'].stream\n return json.load(schema_file)\n elif post_args['schema_url']:\n resp = requests.get(post_args['schema_url'])\n return resp.json()", "title": "" }, { "docid": "41f7d148a8ab6cf9077e7ceb26f596c3", "score": "0.5618444", "text": "def verify_schema(self, schema_filename, schema_name, sample, status_code=None):\n schema = self.load_json_schema(schema_filename)\n if status_code is None:\n schema = schema[schema_name]\n else:\n schema = schema[schema_name][status_code]\n try:\n jsonschema.validate(sample, schema)\n except jsonschema.ValidationError as e:\n print(sample)\n raise jsonschema.ValidationError(\n 'Validation error for schema {}: {}'.format(schema_name, e.message))", "title": "" }, { "docid": "09c565625c246e402a6a927175d3897f", "score": "0.5616667", "text": "def load(self, path):", "title": "" }, { "docid": "09c565625c246e402a6a927175d3897f", "score": "0.5616667", "text": "def load(self, path):", "title": "" }, { "docid": "6b6ce4bba3574dfe2d82ab4bdfdc8bf0", "score": "0.56023973", "text": "def init_schemas():\n with open(datadir / 'schemas.yml') as f:\n schema_data = yaml.safe_load(f)\n\n for schema_id in (schema_ids := [s.value for s in ODPMetadataSchema] +\n [s.value for s in ODPTagSchema] +\n [s.value for s in ODPVocabularySchema]):\n schema_spec = schema_data[schema_id]\n schema_type = schema_spec['type']\n schema = Session.get(Schema, (schema_id, schema_type)) or Schema(id=schema_id, type=schema_type)\n schema.uri = schema_spec['uri']\n\n if (md5 := schema_md5(schema.uri)) != schema.md5:\n schema.md5 = md5\n schema.timestamp = datetime.now(timezone.utc)\n print(f'Updated MD5 and timestamp for schema {schema_id}')\n\n schema.save()\n\n if orphaned_yml_schemas := [schema_id for schema_id in schema_data if schema_id not in schema_ids]:\n print(f'Warning: orphaned schema definitions in schemas.yml {orphaned_yml_schemas}')\n\n if orphaned_db_schemas := Session.execute(select(Schema.id).where(Schema.id.not_in(schema_ids))).scalars().all():\n print(f'Warning: orphaned schema definitions in schema table {orphaned_db_schemas}')", "title": "" }, { "docid": "2c48692863fff65fddeb9cd227cce37d", "score": "0.55878985", "text": "def init_db():\n with app.app_context():\n db = get_db()\n with app.open_resource('schema.sql', mode='r') as f:\n db.cursor().executescript(f.read())\n db.commit()", "title": "" }, { "docid": "2c48692863fff65fddeb9cd227cce37d", "score": "0.55878985", "text": "def init_db():\n with app.app_context():\n db = get_db()\n with app.open_resource('schema.sql', mode='r') as f:\n db.cursor().executescript(f.read())\n db.commit()", "title": "" }, { "docid": "69a9f3a151eb9f3c4c3c8319624b2f16", "score": "0.55855083", "text": "def load_model(self, filename):\n raise NotImplementedError", "title": "" }, { "docid": "15bfcaa758e70f9bf83c08b6a17e8f7c", "score": "0.55810344", "text": "def load_error_json_schema():\n schema_path = 'data/marathon/error.schema.json'\n schema_bytes = pkg_resources.resource_string('dcos', schema_path)\n return json.loads(schema_bytes.decode('utf-8'))", "title": "" }, { "docid": "e81894f6afa91e7156ec55a30c71cdae", "score": "0.557908", "text": "def _get_schema(self):\n from intake.source.base import Schema\n\n self.urlpath = self._get_cache(self.urlpath)[0]\n\n if self._ds is None:\n self._open_dataset()\n metadata = {}\n\n self._schema = Schema(\n datashape=None, dtype=None, shape=None, npartitions=None, extra_metadata=metadata\n )\n\n return self._schema", "title": "" }, { "docid": "750e5918a837db619346a2153cc23e5b", "score": "0.55560845", "text": "def init():\n create_schema()", "title": "" }, { "docid": "fcdbe11d185a236b937366738ba8e297", "score": "0.554294", "text": "def read_metadata(path):\n schema_file = os.path.join(path, 'schema.pbtxt')\n legacy_schema_file = os.path.join(path, 'v1-json', 'schema.json')\n if file_io.file_exists(schema_file):\n text_proto = file_io.FileIO(schema_file, 'r').read()\n schema_proto = text_format.Parse(text_proto, schema_pb2.Schema(),\n allow_unknown_extension=True)\n elif file_io.file_exists(legacy_schema_file):\n schema_json = file_io.FileIO(legacy_schema_file, 'r').read()\n schema_proto = _parse_schema_json(schema_json)\n else:\n raise IOError(\n 'Schema file {} does not exist and neither did legacy format file '\n '{}'.format(schema_file, legacy_schema_file))\n return dataset_metadata.DatasetMetadata(schema_proto)", "title": "" } ]
f3ac5f2be00b23cd61aace8c8e723530
linepair is the sequence of two lines; the first runs the function and assigns the output, and the second generates an array of indices where the output error exceeds a tolerance.
[ { "docid": "2cee509d40c4a062da1f7d729e373d6b", "score": "0.54965043", "text": "def __init__(self, linepair):\n\n self.linepair = linepair\n self.runline = linepair[0]\n self.testline = linepair[1]\n\n # parse the line that runs the function\n head, tail = self.runline.split('=')\n self.outstrings = [s.strip() for s in head.split(',')]\n self.outstr = ','.join(self.outstrings)\n funcstr, argpart = tail.split('(', 1)\n self.name = funcstr.strip()\n self.argstrings = [s.strip() for s in argpart[:-1].split(',')]\n self.argstr = ','.join(self.argstrings)\n\n # parse the line that checks the results\n head, tail = self.testline.split('=', 1)\n self.resultstr = head.strip() # cv.I*\n head, tail = tail.split('(', 1)\n self.teststr = tail.strip()[:-1] # argument of \"find()\"\n self.teststr = self.teststr.replace('abs(', 'np.abs(')\n\n # To be set when run() is successful\n self.outlist = None\n self.result = None # will be a reference to the cv.I* array\n self.passed = None # will be set to True or False\n\n # To be set if run() is not successful\n self.exception = None", "title": "" } ]
[ { "docid": "8ce386777d1ed111f3cb2783d0814d00", "score": "0.61191684", "text": "def get_parallel_line_pairs(slope_bucket : Dict[int, List[List[tuple]]]) -> List[List[tuple]]:\n parallel_line_pairs = []\n for slope, lines in slope_bucket.items():\n print(f'Looping for slope : {slope}:\\n')\n \n # fetch points in pair of two and form their pairs:\n index, i = 0, 0\n while (i < len(lines)):\n index = i # index = 2 * i\n line1 = lines[index]\n print(f'Now making pairs for line1: {line1}:-')\n \n def _get_line2(index, counter):\n return lines[index + counter] if (index + counter < len(lines)) else None\n \n counter = 1\n line2 = _get_line2(index, counter)\n \n # Looping to form the maximum possible pairs with line1.\n while (line2):\n distance = get_distance_between_two_parallel_lines(line1, line2)\n print(f'Dist: {distance} for {line1, line2}')\n \n #edge cases:\n # Lines should not fall into each other\n if round(distance) == 0: \n print('REJECTED: round(distance) == 0')\n counter += 1\n line2 = _get_line2(index, counter)\n continue\n \n # Lines should be overlapping\n if not are_lines_overlapping(line1, line2, slope):\n print('REJECTED: not are_lines_overlapping(line1, line2, slope)')\n counter += 1\n line2 = _get_line2(index, counter)\n continue \n \n # Lines should be inside a threshold\n if distance > MAXIMUM_DISTANCE_BETWEEN_CENTRE_LINES: \n print('REJECTED: distance > MAXIMUM_DISTANCE_BETWEEN_CENTRE_LINES')\n # break \n counter += 1\n line2 = _get_line2(index, counter)\n continue \n \n if does_lines_belong_to_same_polyline(line1, line2):\n print('REJECTED: does_lines_belong_to_same_polyline(line1, line2)')\n counter += 1\n line2 = _get_line2(index, counter)\n continue \n\n if not is_almost_parallel(line1, line2):\n print('REJECTED: is_almost_parallel(line1, line2)')\n counter += 1\n line2 = _get_line2(index, counter)\n continue \n \n print(f'Forming a pair b/w {line1, line2}, slope: {slope}\\n')\n #Make pair of these lines\n parallel_line_pairs.append((line1, line2))\n \n # Storing the width\n parallel_line_pair_meta[str((line1, line2))] = distance\n \n counter += 1\n line2 = _get_line2(index, counter)\n \n i +=1\n \n \n return parallel_line_pairs", "title": "" }, { "docid": "434fcf9afb6ae9587ba4eaef25f5a725", "score": "0.59136295", "text": "def error_management(result_list: list, source_points, target_points, n_row = 5):\n num = []\n for i,ent1 in enumerate(result_list):\n if ent1[1][-1] < 0:\n continue\n row1 = int(i/n_row)\n col1 = i % n_row\n max=0\n for j,ent2 in enumerate(result_list):\n if ent2[1][-1]<0:\n continue\n row2 = int(j/n_row)\n col2 = j%n_row\n val1 = ent1[1][0]-(col1-col2)*100*np.cos(np.deg2rad(ent1[0]))+(row1-row2)*100*np.sin(np.deg2rad(ent1[0]))\n val2 = ent1[1][1]-(row1-row2)*100*np.cos(np.deg2rad(ent1[0]))-(col1-col2)*100*np.sin(np.deg2rad(ent1[0]))\n if np.absolute(val1-ent2[1][0])<25 and np.absolute(val2-ent2[1][1])<25:\n max += 1\n if max > 7:\n num.append(i)\n num = np.array(num).astype(np.int32)\n source_points = np.asarray(source_points)\n target_points = np.asarray(target_points)\n source_points = source_points[num]\n target_points = target_points[num]\n source_points = np.fliplr(source_points)\n target_points = np.float32(target_points)*2\n target_points = np.fliplr(target_points)\n return source_points,target_points", "title": "" }, { "docid": "73f906a9050f0f18cf28e3001f637e70", "score": "0.57838476", "text": "def pairChecking(self, avg_fam_sized):\n i=0\n all_pairs_list=[] ## this will hold all of the pairs\n #Looks for pairs\n ## two loops here to compare every element against on another in the list.\n while i<len(avg_fam_sized)-1:\n j=i+1\n ref_line=avg_fam_sized[i]\n while j<len(avg_fam_sized):\n check_line=avg_fam_sized[j]\n if self.similarSlope(ref_line[0],check_line[0],.10)==True:\n if self.searchByNormalVector(ref_line,check_line)==True: #Checks to see if the line groups are pairs\n pair_size= ref_line[1]+check_line[1] #says how many individual lines are involved in the specific pair.\n pair=[ref_line,check_line,pair_size]\n all_pairs_list.append(pair) #if so, add them to the all pairs list.\n j+=1\n i+=1\n all_pairs_list.sort(reverse=True, key=lambda x: x[2]) #Finds the largest pairs, sorts them to the top of the list based on pair size\n if len(all_pairs_list)<2:\n self.message = \"Cannot find 2 pairs of lines. Measure on SolidWorks\"\n raise SystemError(\"Cannot find 2 pairs of lines!\")\n self.removeSimPairs(all_pairs_list.copy()) ## makes sure there are no duplicates pairs ( 1 line family existing in 2 pairs.\n\n self.clean_pairs_list= self.clean_pairs_list[:2] #Takes the top 2 pairs from clean pairs list\n\n #Breaks apart the top 2 pairs. Sorts each family from left to right\n top_lines_list=[self.clean_pairs_list[0][0],self.clean_pairs_list[0][1], self.clean_pairs_list[1][0],self.clean_pairs_list[1][1]]\n top_lines_list.sort(key=lambda x: x[0][2][0])\n self.left_line.append([top_lines_list[0][0], top_lines_list[1][0]])\n self.right_line.append([top_lines_list[2][0], top_lines_list[3][0]])", "title": "" }, { "docid": "77f65b0f48d3e0a2c2e750216af68e26", "score": "0.5640563", "text": "def spatially_analyze_pair(self, input_f, output_f):", "title": "" }, { "docid": "fa82e74d32d18062feca3f1f649200bc", "score": "0.5521013", "text": "def findTraceLines(self, points):", "title": "" }, { "docid": "97b6734026296e39d0a4b1520325669d", "score": "0.5517777", "text": "def error(line, data): #error function for a line\n \n #Metric: Sum of squared Y-axis differences, y2 - c0.x1 + c1\n err = np.sum((data[:,1] - line[0]*data[:, 0] + line[1])**2)\n return err", "title": "" }, { "docid": "17f2d684cd3c7171ad36a17ecd3a6ee7", "score": "0.5498572", "text": "def error_function(line_var, data):\n sum_of_squares_diffs = np.sum((data[:, 1] - (line_var[0] * data[:, 0] + line_var[1])) ** 2)\n return sum_of_squares_diffs", "title": "" }, { "docid": "d3b9dfc5f8bd9186458ccdcd46cd4b6c", "score": "0.5438695", "text": "def single_pair(select_point, temp_dif_select,spec,spec_min,spec_max,temp_bound,model):\n spec_location=int(temp_dif_select[select_point,0]) # first entry is row, second entry is column\n location_similar=int(temp_dif_select[select_point,1])\n temp_truth=temp_dif_select[select_point,3]\n temp_truth_sim=temp_dif_select[select_point,4]\n\n #%% according to location, find\n spec_select=spec[spec_location]\n spec_select_norm=(spec_select-spec_min)/(spec_max-spec_min)\n spec_select_norm_res=np.reshape(spec_select_norm,[1,-1])\n\n spec_select_sim=spec[location_similar]\n spec_select_sim_norm=(spec_select_sim-spec_min)/(spec_max-spec_min)\n spec_select_sim_norm_res=np.reshape(spec_select_sim_norm,[1,-1])\n #%%\n temp_pred_norm=model.predict(spec_select_norm_res)\n temp_pred_sim_norm=model.predict(spec_select_sim_norm_res)\n #%%\n temp_pred=temp_pred_norm*(temp_bound[1]-temp_bound[0])+temp_bound[0]\n abs_err=np.abs(temp_pred-temp_truth)\n\n temp_pred_sim=temp_pred_sim_norm*(temp_bound[1]-temp_bound[0])+temp_bound[0]\n abs_err_sim=np.abs(temp_pred_sim-temp_truth)\n return np.array([spec_location, location_similar,temp_truth,temp_truth_sim,temp_pred[0],temp_pred_sim[0]])", "title": "" }, { "docid": "2f360b428dbfe506efcb4295e0618d82", "score": "0.54321176", "text": "def test_get_right_number_of_lines(energy_low, energy_high, element, result):\n assert len(get_lines(energy_low, energy_high, element=element)) == result", "title": "" }, { "docid": "b82149778be00ad9a85722610345fec0", "score": "0.54282373", "text": "def line_error(params, args):\n x, y = args\n m, b = params[0:2]\n y_star = m * x + b\n\n return y - y_star", "title": "" }, { "docid": "db82dfef2e9821e5b84ce4f1e7c20392", "score": "0.54243743", "text": "def pair_jumps(array,threshold):\n pass", "title": "" }, { "docid": "9bd4ff6931b762f3c5a21b6f88e80b3d", "score": "0.53686094", "text": "def test_method_on_function(method,func,limits,analytical_solution,N_min,N_max):\n\n errors = [];\n hs = [];\n for N in range(N_min,N_max+1):\n (I,h) = method(func,limits,N);\n error = np.abs(I - analytical_solution);\n\n errors.append(error);\n hs.append(h);\n\n return (errors,hs);", "title": "" }, { "docid": "b8ae730d16eef2f68a3bbe2d1b268b50", "score": "0.5360841", "text": "def find_pairs(self, lines):\n near_lines = []\n for l in lines:\n near = []\n for i in xrange(len(lines)):\n if abs(l[1]-lines[i][1]) < self.angle_thresh:\n near.append(i)\n near_lines.append(near)\n\n # Find maximally distant pairs of parallel lines\n pairs = {}\n for near in near_lines:\n if len(near) < 2:\n continue\n sl = sorted(near, key=lambda x: lines[x][0])\n pairs[len(pairs)] = {\n \"min\": sl[0],\n \"max\": sl[-1],\n \"rho\": (lines[sl[0]][0] + lines[sl[-1]][0])/2.0,\n \"theta\": (lines[sl[0]][1] + lines[sl[-1]][1])/2.0,\n }\n return pairs", "title": "" }, { "docid": "6836d4fa4cc95cc0d731104341ec304f", "score": "0.5354157", "text": "def efficient_closest_pair_routine(points):\n n=len(points) #First checks the number of points being returned in recursion to the fucntion\n if n<=2: #Means that we have reached the final level of division\n if n==1: #If just one value remaining after dividing the previous level, we take the distance between points and the 2nd points as None, hence returning just the single point\n return [None,points[0],None]\n elif n==2: #If two values remain, we just find out the distance between the two and return it along with the two points\n return [dist(points[0],points[1]),points[0],points[1]]\n \n mid=n//2 #Middle point to draw the middle line\n s1=points[:mid] #The points are divided into two sets\n s2=points[mid:]\n d1=efficient_closest_pair_routine(s1) #The function is called recursively, and assigned to the new sets d1 and d2, for s1 and s2 respectively, until final level is reached\n d2=efficient_closest_pair_routine(s2)\n d=0\n if d1[0]==None and d2[0]!=None: #Takes care of the single point left case by assinging the mindistance value accordingly\n d=d2[0]\n elif d1[0]!=None and d2[0]==None:\n d=d1[0]\n else: #If the last level has two sets of two points\n d=min(d1[0],d2[0])\n\n if d==d1[0]: #Checks to which set does d belong to, and assigns the set to final\n final=d1\n else :\n final=d2\n\n for x in points: #Loop checks for point lying outside the line-d line+d set and eliminates them\n if x[0]<(points[mid][0]-d) or x[0]>(points[mid][0]+d):\n points.remove(x)\n\n points=sort_points_by_Y(points) #Points sorted by y coordinate\n l=closest_pair_in_strip(points,d) #Function is ran, in which the points are compared with the adjacent next 5 points\n\n if l == -1: #If the prev function returns -1, it means that the preexisting value for d was the least and hence returns final, which holds the points for that distance and the distance\n return final\n else: #If there is a new least value extracted from the prev function, we return that value\n return l", "title": "" }, { "docid": "8e1d92b94617d49abb715cffeea9ddeb", "score": "0.5294852", "text": "def warn_x_std(value_arr, path=None, ret_inv_lst=False, x=3):\n avg = np.average(value_arr)\n std = np.std(value_arr)\n invalid_index = list()\n\n for idx, value in enumerate(value_arr):\n if abs(value - avg) >= x * std:\n if not ret_inv_lst:\n if path:\n error_msg = (\n \"Line: %d, Value: %s is not located in three std range, path: %s\"\n % ((idx + 1), value, path)\n )\n else:\n error_msg = (\n \"Line: %d, Value: %s is not located in three std range of list: %s\"\n % ((idx + 1), value, \", \".join(map(str, value_arr)))\n )\n raise RuntimeError(error_msg)\n else:\n invalid_index.append(idx)\n\n print(\"[DEBUG] Len of value_arr: %d\" % len(value_arr))\n return (value_arr, invalid_index)", "title": "" }, { "docid": "37dc04f0fd2150931b96533dfb9f3055", "score": "0.5291188", "text": "def severity(NBR, NBRDist, CDist, ChangeDir, NBRoutlier, CDistoutlier, t,method='NBRdist'):\n sevindex = 0\n startdate = 0\n duration = 0\n \n notnanind = np.where(~np.isnan(CDist))[0] # remove the nan values for each pixel\n\n if method == 'NBR': # cosdist above the line and NBR<0\n outlierind = np.where((CDist[notnanind] > CDistoutlier) & (NBR[notnanind] < 0))[0]\n cosdist = CDist[notnanind]\n\n elif method == 'NBRdist': # both cosdist and NBR dist above the line and it is negative change\n outlierind = np.where((CDist[notnanind] > CDistoutlier) &\n (NBRDist[notnanind] > NBRoutlier) &\n (ChangeDir[notnanind] == 1))[0]\n\n cosdist = CDist[notnanind]\n else:\n raise ValueError\n t = t.astype('datetime64[ns]')\n t = t[notnanind]\n outlierdates = t[outlierind]\n n_out = len(outlierind)\n area_above_d0 = 0\n if n_out >= 2:\n tt = []\n for ii in range(0, n_out):\n if outlierind[ii] + 1 < len(t):\n u = np.where(t[outlierind[ii] + 1] == outlierdates)[0] # next day have to be outlier to be included\n # print(u)\n\n if len(u) > 0:\n t1_t0 = (t[outlierind[ii] + 1] - t[outlierind[ii]]) / np.timedelta64(1, 's') / (60 * 60 * 24)\n y1_y0 = (cosdist[outlierind[ii] + 1] + cosdist[outlierind[ii]]) - 2 * CDistoutlier\n area_above_d0 = area_above_d0 + 0.5 * y1_y0 * t1_t0 # calculate the area under the curve\n duration = duration + t1_t0\n tt.append(ii) # record the index where it is detected as a change\n\n if len(tt) > 0:\n startdate = t[outlierind[tt[0]]] # record the date of the first change\n sevindex = area_above_d0\n \n\n return sevindex, startdate, duration", "title": "" }, { "docid": "c0c8b825b6a69b25a3fee367d67c6829", "score": "0.525903", "text": "def main():\n errLimit = 1.0\n # grn=genfromtxt('OriginalSpots/SpA0001.txt', skiprows=1)\n # red=genfromtxt('OriginalSpots/SpA0002.txt', skiprows=1)\n grn=genfromtxt('PolishedSpots/PSpA0001.txt', skiprows=1)\n red=genfromtxt('PolishedSpots/PSpA0002.txt', skiprows=1)\n if red.shape[0] < 7:\n print \"There are not enough beads for a unique interpolation\"\n return\n\n # Remove the spots with which polishment failed. Keep the id and x-y location.\n grntrim=column_stack([grn[grn[:,-1]==1,0], grn[grn[:,-1]==1,4:6]])\n redtrim=column_stack([red[red[:,-1]==1,0], red[red[:,-1]==1,4:6]])\n\n #Calculate the paired distance\n sparseg = calcpairdist(grntrim)\n sparser = calcpairdist(redtrim)\n \n savetxt(\"sparsepairdgrn.txt\", sparseg, fmt='%d\\t%10.5f\\t%10.5f\\t%d\\t%d\\t%d\\t%d\\t%d\\t%10.5f\\t%10.5f\\t%10.5f\\t%10.5f\\t%10.5f')\n savetxt(\"sparsepairdred.txt\", sparser, fmt='%d\\t%10.5f\\t%10.5f\\t%d\\t%d\\t%d\\t%d\\t%d\\t%10.5f\\t%10.5f\\t%10.5f\\t%10.5f\\t%10.5f')\n\n \n grnout,redout = comparepaird(sparseg,sparser)\n cGtoR, cRtoG, eGtoR, eRtoG = calccoeff(grnout, redout) \n coeff = hstack((cGtoR, cRtoG))\n savetxt(\"coefficients_ols_nonlimit.txt\",coeff)\n\n print \"Error GtoR > \",errLimit,\" : \",(eGtoR>errLimit).nonzero()\n print \"Error RtoG > \",errLimit,\" : \",(eRtoG>errLimit).nonzero() \n savetxt(\"alignedbeadspairNonLimit.txt\", column_stack([grnout,redout,eGtoR,eRtoG]), fmt='%d\\t%10.5f\\t%10.5f\\t%d\\t%10.5f\\t%10.5f\\t%10.5f\\t%10.5f')\n indLimit = (eGtoR<=errLimit) * (eRtoG<=errLimit)\n print \"indLimit==False :\",(indLimit==False).nonzero()\n cGtoR, cRtoG, eGtoR, eRtoG = calccoeff(grnout[indLimit], redout[indLimit])\n coeff = hstack((cGtoR, cRtoG))\n savetxt(\"coefficients_ols_limit.txt\",coeff)\n\n savetxt(\"alignedbeadspairLimit.txt\", column_stack([grnout[indLimit],redout[indLimit],eGtoR,eRtoG]), fmt='%d\\t%10.5f\\t%10.5f\\t%d\\t%10.5f\\t%10.5f\\t%10.5f\\t%10.5f')", "title": "" }, { "docid": "b6c0ff0d83c4bffce5494907ce5773d6", "score": "0.5220362", "text": "def lineseg(x, y, tol):\n # indices of the first and last point of the contour\n fst = 0\n lst = len(x)\n\n # list to hold the list of segmented points\n seglist = [[x[0], y[0]]]\n\n while fst < lst:\n # find the size and position of the maximum deviation\n maxdev, idxmaxdev = maxlinedev(x[fst:lst], y[fst:lst])\n\n # while the maximum deviation from the line > tol shorten the line to\n # the point of maximum deviation by adjusting the last point\n while maxdev > tol:\n lst = fst + idxmaxdev + 1\n maxdev, idxmaxdev = maxlinedev(x[fst:lst], y[fst:lst])\n\n # add the last point for which deviation is less than tol to the\n # segemented list\n seglist.append([x[lst-1], y[lst-1]])\n # print('seglist: ', seglist)\n\n # rest the first and the last point for the next iteration\n fst = lst\n lst = len(x)\n\n return np.asarray(seglist)", "title": "" }, { "docid": "e9de66f7b75fab3e1134911e80ea2607", "score": "0.5186641", "text": "def polygon_line(line, tolerance = 0.001):\n\n last = len(line) - 1\n if last < 1: return line\n dims = len(line[0])\n manhattan = 0.0\n for d in range(dims): manhattan += abs(line[0][d] - line[last][d])\n if manhattan > tolerance: return line\n return line[:-1]", "title": "" }, { "docid": "f8784a3d0209ead99d11b91032a43512", "score": "0.5180951", "text": "def getFinalAngle(self):\n self.grouped_list.sort(key=len, reverse=True)\n if len(self.grouped_list)<4: ##This means there are not enough lines to make 2 pairs.\n self.message=self.message+\" Not enough line groups found! Measure on Solidworks\"\n raise SystemError(\" Not enough line groups found! Measure on Solidworks\")\n avg_fam_sized = []\n counter = 0\n\n while counter < 10 and counter < len(self.grouped_list): #If the line group is less than the 10th largest, we don't want it. The two biggest pairs should not exist\n # outside of the top 10 groups. AFter the top ten, things get innaccurate.\n avg_fam_sized.append([self.get_bin_angle(self.grouped_list[counter]),len(self.grouped_list[counter])]) #place the line, and the group size (line weight) into array\n # self.draw_line(avg_fam_sized[counter][0], 0, 0, 255) ##Used for debugging. Draws each line family in blue.\n counter += 1\n\n #Creates a list of the largest 4 line groups.\n average_family_list = avg_fam_sized.copy()\n average_family_list = average_family_list[:4]\n average_family_list.sort(key=lambda x: x[0][2][0]) #Sorts these lines from left to right based off the x value of their intercept vector.\n\n\n #Out of the top 4 lines, if the two leftmost and two rightmost lines are not similar\n # angles, the program prints a message letting the user know the angle may be inacccurate. The pairing algorithm has to be used.\n if self.similarSlope(average_family_list[0][0], average_family_list[1][0], .10) == False or self.similarSlope(average_family_list[2][0], average_family_list[3][0], .10) == False:\n self.message = self.message + \" Angle may be innacurate. Look at the red and green lines on the image. If they do not match up with B-Flex, measure on Solidworks\"\n # self.pairChecking(avg_fam_sized)\n # else:\n # self.left_line.append([average_family_list[0][0],average_family_list[1][0]])\n # self.right_line.append([average_family_list[2][0],average_family_list[3][0]])\n #\n # self.left_line.append([average_family_list[0][0], average_family_list[1][0]])\n # self.right_line.append([average_family_list[2][0],average_family_list[3][0]])\n\n self.pairChecking(avg_fam_sized) #Searches for the top 2 pairs\n\n left_line=self.left_line[0]\n right_line= self.right_line[0]\n #Draws the line families from the top 2 groups (used to calculate final angle) in yellow.\n self.draw_line(left_line[0],191,183,73);self.draw_line(left_line[1],191,183,73)\n self.draw_line(right_line[0],191,183,73);self.draw_line(right_line[1],191,183,73)\n\n actual_left = self.get_bin_angle(left_line) #combines the left 2 line families\n actual_right = self.get_bin_angle(right_line) #combines the right 2 line families.\n self.draw_line(actual_left, 0, 255, 0) #Draws left line in green\n self.draw_line(actual_right, 255, 0, 0) #Draws right line in red.\n if (actual_left[1][1] * actual_right[1][\n 1] > 0): ##makes sure the 2 vectors are in opposite diretions, so the angle is the large one (170 rather than 10)\n actual_left[1] = actual_left[\n 1] * -1 ## This makes angle calculations only valid if the actual angle is greater than 90\n final_angle = np.arccos(np.dot(actual_left[1], actual_right[1]) / (\n np.linalg.norm(actual_left[1]) * np.linalg.norm(actual_right[1])))\n final_angle = final_angle * 180 / math.pi # TODO need to make sure giving the correct angle here.\n if self.edgecase180(actual_left, actual_right) == True:\n final_angle = 180 + 180 - final_angle\n return final_angle", "title": "" }, { "docid": "a57e52c411ea2c55124a5ffbe004655f", "score": "0.517056", "text": "def calculate_err_lines(UL_array):\n lower = np.abs(np.diff(UL_array, axis=1))[:, 0]\n upper = np.sum(UL_array, axis=1)\n return lower, upper", "title": "" }, { "docid": "e079f65576c26bfbcbb32554b9de6657", "score": "0.51439863", "text": "def efficient_closest_pair(points):\n points=sort_points_by_X(points) #Points are sorted by x coordinate and passed into the routine function, as asked for\n fin=efficient_closest_pair_routine(points)\n return fin #Final answer returned", "title": "" }, { "docid": "4051a0bc6cb4ee43f5f909f4351b45ee", "score": "0.51377016", "text": "def extract_paths(x,i_pairs):\n paths=[]\n indcs=[]\n for i,x_ in enumerate(x):\n if (x_ > 0.5):\n pr=i_pairs[i]\n fnd=False\n j=0\n while (j < len(paths)) and not fnd:\n if (paths[j][-1][1] == pr[0]):\n paths[j].append(pr)\n indcs[j].append(i)\n fnd=True\n j+=1\n if not fnd:\n # start new path\n paths.append([pr])\n indcs.append([i])\n return (paths,indcs)", "title": "" }, { "docid": "a928aa788d0a6256c6f005b19b4823d3", "score": "0.5092025", "text": "def line_process_loss(I_opt,I_computed):\n\n\t#computed the closest;\n\tclosest=[find_nearest_element(I_opt,x) for x in I_computed];\n\n\tS=0;\n\tfor (i,c) in zip(I_computed,closest):\n\t\tS+=int(np.abs(i-c));\n\n\treturn S;", "title": "" }, { "docid": "2c961aeabf6b8be24e97059a8e92f53c", "score": "0.50817436", "text": "def xy(self,output):\n #output[-2] -average in micsec waiting for PLL unlock\n #output[-1] -average in micsec waiting for PLL lock\n self.vb.io.write(\"average unlock wait:%s average total lock wait:%s\\n\"\n %(output[-2],output[-1]))\n ll=len(output)-2\n if ll:\n xy=[]\n for i in range(0,ll,2):\n xynow=(float(output[i]),float(output[i+1]))\n flag=1\n for j in xy:\n if (j == xynow):\n flag=0\n if flag:\n xy.append(xynow)\n else:\n xy=None\n return xy", "title": "" }, { "docid": "e1e4144f53863b620e96557efebabb89", "score": "0.50591075", "text": "def error(line, data):\n\t# Metric: Sum of squared Y-axis difference\n\terr = np.sum((data[:, 1] - (line[0] * data[:, 0] + line[1])) ** 2)\n\treturn err", "title": "" }, { "docid": "68d10fa540cb7a26fe2c467ae8963ac3", "score": "0.50581396", "text": "def guess_segments_lines(segments, lines, nearline_tolerance=5.0):\n ys = segments[:, 1]\n closeness = numpy.abs(numpy.subtract.outer(ys, lines)) # each row a y, each collumn a distance to each line\n line_of_y = numpy.argmin(closeness, axis=1)\n distance = numpy.min(closeness, axis=1)\n bad = distance > numpy.mean(distance) + nearline_tolerance * numpy.std(distance)\n line_of_y[bad] = -1\n return line_of_y", "title": "" }, { "docid": "a3b6fd92e4174021df7141ff6b301652", "score": "0.50525224", "text": "def fit_line(x,y,yerr,initial_index=50,final_ratio=0.5,step=10):\n pinit=[1,-1]\n final_index=int(final_ratio*len(x))\n out = optimize.leastsq(errfunc, pinit, args=(x[initial_index:final_index:step],y[initial_index:final_index:step],yerr[initial_index:final_index:step]), full_output=1)\n pfinal = out[0] #fitting coefficients\n cov=out[1] #Covariance\n \n return pfinal,cov", "title": "" }, { "docid": "59c5767288e4852e82e98d61ca8351d0", "score": "0.5050679", "text": "def error(line, data):\n # Metric: Sum of squared Y-axis differences\n err = np.sum((data[:, 1] - (line[0] * data[:, 0] + line[1])) ** 2)\n return err", "title": "" }, { "docid": "5a95484c88593d2ce6187c22d64b284b", "score": "0.50417197", "text": "def matchTruth_ell1(true_timestamps, d, coeffs, segment_indices, offset, threshrange):\r\n\r\n dlen, numOfelements = np.shape(d)\r\n\r\n threshlen = len(threshrange)\r\n truemiss = np.zeros((numOfelements, threshlen))\r\n falsealarm = np.zeros((numOfelements, threshlen))\r\n nonzerocoeffs = np.zeros((numOfelements, threshlen))\r\n match = np.zeros((numOfelements, threshlen))\r\n\r\n fa_coeffs = {t:{} for t in threshrange}\r\n true_coeffs = {t:{} for t in threshrange}\r\n\r\n print(\"Computing error statistics for threshold of {} ~ {} with interval {:.3f}\".format(threshrange[0], threshrange[-1], threshrange[1]-threshrange[0]))\r\n for tidx, threshold in enumerate(tqdm(threshrange)):\r\n\r\n fa = fa_coeffs[threshold]\r\n\r\n true_match = {true_idx:[] for true_idx in true_timestamps}\r\n numOfmatch = np.zeros(numOfelements, dtype=int)\r\n numOfnonzerocoeffs = np.zeros(numOfelements, dtype=int)\r\n\r\n # Iterate through data\r\n for key in coeffs.keys():\r\n coeffs_seg = coeffs[key]\r\n thresholded_coeffs={fidx:{} for fidx in range(numOfelements)}\r\n for fidx in range(numOfelements):\r\n if len(coeffs_seg[fidx]['amp'])>0:\r\n indices = np.where(coeffs_seg[fidx]['amp'] * np.min(d[:, fidx]) < threshold)[0]\r\n else:\r\n indices = []\r\n\r\n if len(indices)>0:\r\n thresholded_coeffs[fidx]['idx'] = coeffs_seg[fidx]['idx'][indices]\r\n thresholded_coeffs[fidx]['amp'] = coeffs_seg[fidx]['amp'][indices]\r\n else:\r\n thresholded_coeffs[fidx]['idx'] = []\r\n thresholded_coeffs[fidx]['amp'] = []\r\n\r\n fa[key]={fidx:{'idx':np.array([], dtype=int), 'amp': np.array([])} for fidx in np.arange(numOfelements)}\r\n for fidx in np.arange(numOfelements):\r\n if len(coeffs_seg[fidx]['idx'])>0:\r\n # Need to align the local segment indices with the global indices\r\n coeffs_ts_start = segment_indices[key][0]\r\n\r\n indices = thresholded_coeffs[fidx]['idx']\r\n numOfnonzerocoeffs[fidx] += len(indices)\r\n\r\n for idx_iter, idx_value in enumerate(indices):\r\n timestamp = idx_value + coeffs_ts_start\r\n match_ts = true_timestamps[(timestamp < true_timestamps) & (true_timestamps<timestamp+offset)]\r\n\r\n if len(match_ts)>0: # Corresponding intracellular exists\r\n for elem in match_ts:\r\n true_match[elem].append(fidx)\r\n else: # The code corresponds to the false alarm\r\n fa[key][fidx]['idx'] = np.append(fa[key][fidx]['idx'], idx_value)\r\n amp = thresholded_coeffs[fidx]['amp'][idx_iter]\r\n fa[key][fidx]['amp'] = np.append(fa[key][fidx]['amp'], amp)\r\n\r\n for key, value in true_match.items():\r\n for fidx in np.arange(numOfelements):\r\n if fidx in value:\r\n numOfmatch[fidx] += 1\r\n\r\n truemiss[:, tidx] = len(true_timestamps) - numOfmatch\r\n falsealarm[:, tidx] = (numOfnonzerocoeffs - numOfmatch)/numOfnonzerocoeffs\r\n nonzerocoeffs[:, tidx] = numOfnonzerocoeffs\r\n match[:, tidx] = numOfmatch\r\n\r\n fa_coeffs[threshold] = fa\r\n true_coeffs[threshold] = true_match\r\n\r\n return truemiss, falsealarm, nonzerocoeffs, match, fa_coeffs, true_coeffs", "title": "" }, { "docid": "e7433488d84713671494e482493d16cc", "score": "0.5037586", "text": "def line_search(x_k, p_k, alpha_max, c1, c2):\n \n a_0 = 0 # Inital value of alpha (alpha_0)\n \n phi_zero = booth_func(x_k + a_0*p_k) # Function value at alpha_0\n grad_phi_zero = grad_booth_func(x_k + a_0*p_k) @ p_k # Directional Gradient of function at alpha_0 in direction of p_k\n \n # Add intial value to array\n a_arr = [a_0]\n \n # Set a_i to a random value in range [0, alpha_max]\n a_i = np.random.random_sample()*alpha_max # current alpha\n \n # Add current alpha(a_i) to array \n a_arr.append(a_i)\n \n # Counter/number of iterations\n i = 1\n \n phi_a_i_1 = phi_zero # value of function at previous alpha (i.e. at alpha_{i-1})\n a_i_1 = a_0 # previous value of alpha (alpha_{i-1})\n \n # Repeat the loop till a stop condition is met\n while True:\n # Find function value at next x_k (i.e. at x_{k+1}\n phi_a_i = booth_func(x_k + a_i*p_k)\n \n # First Wolfe condition/Armijo Condition. Use zoom to find good alhpa between previous and current alpha\n if phi_a_i > (phi_zero + c1*a_i*grad_phi_zero) or (phi_a_i >= phi_a_i_1 and i > 1):\n return zoom_func(x_k, p_k, a_i_1, a_i, c1, c2)\n \n # Find directional gradient at x_{k+1} \n grad_phi_a_i = grad_booth_func(x_k + a_i*p_k) @ p_k\n \n # Second Wolfe condition/Curvature condition\n if (np.abs(grad_phi_a_i) <= -c2*grad_phi_zero):\n return a_i\n \n # If direction gradient is positive, Use zoom to find good alhpa between current alpha and alpha_max\n if grad_phi_a_i >= 0:\n return zoom_func(x_k, p_k, a_i, alpha_max, c1, c2)\n \n # Save current alpha and functional value as previous values\n a_i_1 = a_i\n phi_a_i_1 = phi_a_i\n \n # Add current alpha(a_i) to array \n a_i = (alpha_max - a_i)*np.random.random_sample() + a_i\n \n # Update number of iterations\n i += 1", "title": "" }, { "docid": "ecc33353009138cb6f6e906b3ce0d2db", "score": "0.50362724", "text": "def check_if_valid(self, a, b):\n \"\"\"\n in_free_space = True\n A = b[1] - a[1]\n B = a[0] - b[0]\n C = A * (a[0]) + B * (a[1])\n if B < 0:\n #print(\"The line passing through points P and Q is:\",A, \"x \", B, \"y = \", C, \"\\n\")\n else:\n #print(\"The line passing through points P and Q is: \",A, \"x + \", B, \"y = \", C, \"\\n\")\n x_min = int(a[0] * 10)\n x_max = int(b[0] * 10)\n y_min = int(a[1] * 10)\n y_max = int(b[1] * 10)\n if x_min > x_max:\n range_x_min = x_max\n range_x_max = x_min\n else:\n range_x_min = x_min\n range_x_max = x_max\n\n if y_min > y_max:\n range_y_min = y_max\n range_y_max = y_min\n else:\n range_y_min = y_min\n range_y_max = y_max\n print(range_x_min, range_x_max, 'y', range_y_min, range_y_max)\n lista_punktow = []\n for i in range(range_x_min, range_x_max + 1):\n for j in range(range_y_min, range_y_max + 1):\n if C + 0.1 > A * i * 0.1 + B * j * 0.1 > C - 0.1:\n lista_punktow.append(np.array([i, j]))\n print(lista_punktow)\n for i in lista_punktow:\n if self.map[i[1]][i[0]] == 0:\n in_free_space = True\n print('TRUE')\n else:\n in_free_space = False\n print('FALSE')\n break\n \"\"\"\n in_free_space = True\n x_range = np.linspace(a[0]+10, b[0]+10, num=1000)\n y_range = np.linspace(a[1]+10, b[1]+10, num=1000)\n for i in range(len(x_range)):\n x = int(x_range[i]*20)\n y = int(y_range[i]*20)\n #print(self.map[383, 383])\n for i in range(-rozmiar_x/2, rozmiar_x/2):\n for j in range(-rozmiar_y / 2, rozmiar_y / 2):\n obstacle = self.map[y+j, x+i]\n if obstacle == 100:\n #print('sciana')\n in_free_space = False\n else:\n pass\n\n return in_free_space", "title": "" }, { "docid": "4ece4742f04f6e02abfe83099a980da9", "score": "0.50012904", "text": "def points_at_interval(lines, interval):\r\n points = []\r\n for line in lines:\r\n x1, y1, x2, y2 = line\r\n points.append([x1, y1])\r\n points.append([x2, y2])\r\n if dist(x1, y1, x2, y2) > interval:\r\n i = 1\r\n while i > 0:\r\n s = ((2 * (y1 - y2) * (x2 * y1 - y2 * x1)) + (\r\n x1 ** 2 + y1 ** 2 - x2 ** 2 - y2 ** 2 - (interval * i) ** 2 + (\r\n (dist(x1, y1, x2, y2) - (interval * i)) ** 2)) * (x1 - x2)) / (\r\n 2 * ((x2 - x1) ** 2 + (y2 - y1) ** 2))\r\n t = ((2 * (x2 - x1) * (x2 * y1 - y2 * x1)) + (\r\n x1 ** 2 + y1 ** 2 - x2 ** 2 - y2 ** 2 - (interval * i) ** 2 + (\r\n (dist(x1, y1, x2, y2) - (interval * i)) ** 2)) * (y1 - y2)) / (\r\n 2 * ((x2 - x1) ** 2 + (y2 - y1) ** 2))\r\n points.append([s, t])\r\n i = i + 1\r\n if dist(s, t, x2, y2) < interval:\r\n i = 0\r\n\r\n return points", "title": "" }, { "docid": "dd0bd58140a83485b2d891baec75ca77", "score": "0.4982772", "text": "def test_4():\n x_val = x + np.random.rand(1)\n y_val = func(x_val)\n error = ErrorEstimation(surr_object=pce_model(polys, x, y))\n assert round(error.validation(x_val, y_val), 3) == 0.432", "title": "" }, { "docid": "fe05405730036d1122750cbe523ae277", "score": "0.4971671", "text": "def error(self, X, y):", "title": "" }, { "docid": "72dbf0659bb717e57627c1d9e6a75891", "score": "0.4961871", "text": "def get_centre_lines_from_pairs(parallel_line_pairs : List[List[tuple]]) -> List[\"CentreLine\"]:\n def get_centered_line_segments(line1, line2) -> list:\n \"\"\"Function which returns a centered line segments from parallel pairs.\n\n Args:\n line1 (List of Tuples): A line is a collection of tuple of points in the following manner [(x1, y1), (x2, y2)].\n line2 (List of Tuples): A line is a collection of tuple of points in the following manner [(x1, y1), (x2, y2)].\n \n Returns:\n line (List of Tuples): A linesegment in middle of line1 and line2.\n \"\"\"\n # Calculate lenght of both the line segment.\n line1_length = get_length_of_line_segment(line1)\n line2_length = get_length_of_line_segment(line2)\n \n smaller_line = line1 if line1_length <= line2_length else line2\n bigger_line = line1 if smaller_line == line2 else line2\n \n # We need to get perpendicular points from the smaller line to the bigger line\n p1 = smaller_line[0]\n perpendicular_point1 = find_perpendicular_point(p1, bigger_line[0], bigger_line[1])\n \n # Check if the perpendicular point exists on the other line or not:\n if is_between(perpendicular_point1, bigger_line[0], bigger_line[1]):\n # Find centre point in this case\n centre_point1 = get_mid_points_between_points(p1, perpendicular_point1)\n else:\n # we need to get the point from the bigger line now\n for point in bigger_line:\n perpendicular_point1 = find_perpendicular_point(point, smaller_line[0], smaller_line[1])\n # if this perpendicular point lies in the smaller line then break\n if is_between(perpendicular_point1, smaller_line[0], smaller_line[1]):\n # calculate the centre point first\n centre_point1 = get_mid_points_between_points(point, perpendicular_point1) \n break\n \n # We need to get perpendicular points from the smaller line to the bigger line\n p2 = smaller_line[1]\n perpendicular_point2 = find_perpendicular_point(p2, bigger_line[0], bigger_line[1])\n \n # Check if the perpendicular point exists on the other line or not:\n if is_between(perpendicular_point2, bigger_line[0], bigger_line[1]):\n # Find centre point in this case\n centre_point2 = get_mid_points_between_points(p2, perpendicular_point2)\n else:\n # we need to get the point from the bigger line now\n for point in bigger_line:\n perpendicular_point2 = find_perpendicular_point(point, smaller_line[0], smaller_line[1])\n # if this perpendicular point lies in the smaller line then break\n if is_between(perpendicular_point2, smaller_line[0], smaller_line[1]):\n # calculate the centre point first\n centre_point2 = get_mid_points_between_points(point, perpendicular_point2)\n break\n \n return [centre_point1, centre_point2]\n \n \n centre_lines = []\n number = 1\n for pair in parallel_line_pairs:\n line1, line2 = pair\n \n if line2:\n x1, y1, x2, y2 = get_line_points_2d(line1)\n \n print(f'Now calculating line segment {line1, line2}') \n \n line_segment = get_centered_line_segments(line1, line2)\n \n print(f\"\"\"\n line1: {line1},\n line2: {line2},\n line_segment: {line_segment}\\n\\n\n \"\"\")\n \n width = parallel_line_pair_meta[str(pair)]\n if find_distance(line_segment[0], line_segment[1]) >= 5:\n centre_line = CentreLine(number, line_segment[0], line_segment[1], line_segment[0], line_segment[1], width)\n \n centre_lines.append(centre_line)\n \n number += 1\n \n return centre_lines", "title": "" }, { "docid": "05887d8a21d58ac9fe3d91e382e81201", "score": "0.49599773", "text": "def _line_points(a, b):\n x1, y1 = [int(x) for x in a]\n x2, y2 = [int(y) for y in b]\n if x1 == x2:\n return [(x1, y1 + (y if y2 > y1 else -y)) for y in range(0, abs(y1 - y2) + 1)]\n elif x1 > x2:\n return _line_points(b, a)\n\n dx = x2 - x1\n dy = y2 - y1\n if abs(dy) > abs(dx):\n return [(p[1], p[0]) for p in _line_points((y1, x1), (y2, x2))]\n\n points = []\n derr = abs(dy / dx)\n err = 0.0\n y = y1\n for x in range(x1, x2+1):\n points.append((x, y))\n err += derr\n if err > 0.5:\n y += 1 if dy > 0 else -1\n err -= 1.0\n return points", "title": "" }, { "docid": "14b4213656aa3409a20120fc135d634e", "score": "0.49408123", "text": "def get_pairs(lines):\n pairs = []\n for l in lines:\n if l.startswith('] def'):\n break\n elif l.startswith('['):\n first,second = l.strip('\\n[]').split()\n pairs.append([int(first)-1,int(second)-1])\n return pairs", "title": "" }, { "docid": "87947184fbba40cc5a9527a54721947a", "score": "0.493228", "text": "def straight_line_residuals(a,x,y):\n\n residuals = y - straight_line_function(a,x)\n\n return residuals", "title": "" }, { "docid": "f1273da193c3c88b9dc1882362b2521f", "score": "0.49293476", "text": "def closest_points(self, line):\n mu1, mu2 = self.closest_points_params(line)\n return self.point_on_line(mu1), line.point_on_line(mu2)", "title": "" }, { "docid": "7735a0fe55c898398429ed246e38428b", "score": "0.49156538", "text": "def error_matrix(radius, size_factor=1):\n\n if size_factor == 0: size_factor = 1 #0 is for non-interpolated algo...\n radius_large = radius * size_factor \n \n mx_index= np.zeros((radius_large +1 , radius, 2)).astype(int)\n mx_err = np.zeros((radius_large +1 , radius))\n mx_mask = np.zeros(mx_err.shape).astype(bool)\n\n min_err = {}\n\n j=0 #keep 0 line empty\n\n for m in range (0, radius_large+1 ): # 45 deg line is added (+1) \n\n x_f, y_f = radius, radius #x0,y0\n\n #dy = x; dx = y \n dy,dx= m, radius_large #SWAPPED x and y! MESSY\n\n \n #x and y = delta x and y but y is steep!\n #fist line is min y then it ascends till 45°\n\n D=0\n for i in xrange (0, radius ): #restrict iteration to actual radius! \n x_f += 1\n if 2* (D + dy) < dx:\n D += dy # y_f remains\n else:\n y_f += 1\n D += dy - dx\n \n #reverse x,y for data array!\n yx= (y_f,x_f)\n mx_index[j,i,0:2]=yx\n \n if D: e=D/dx; err=abs(e)\n else: e, err = 0,0\n\n mx_err[j,i]=e\n # keep pixel dictionary to sort out best pixels\n try:\n err_old = min_err[yx][0] \n if err < err_old: min_err[yx]=[err,j,i]\n except:\n min_err[yx]=[err,j,i]\n \n j+=1\n \n\n #check-out minimum errors\n # numpy style would be np.argmin.at!\n for key in min_err:\n ix=min_err[key][1:3]\n er = min_err[key][0]\n mx_mask[ix[0], ix[1]]= 1\n\n mx_err_dir = np.where(mx_err > 0, 1, -1)\n mx_err_dir[mx_err == 0]=0 #should use some multiple criteria in where... \n\n #take the best pixels \n #cannot simply use indices as pairs [[x,y], [...]]- np thing...\n #cannot use mx : has a lot of duplicate indices\n\n \n mx_err_index = mx_index [:,:, 0] + mx_err_dir\n # we do not need negative errors any more\n return mx_index, mx_err_index, np.absolute(mx_err), mx_mask", "title": "" }, { "docid": "9cb96589b70e72fb872b34f44d9bf457", "score": "0.48997262", "text": "def analysis_line_linearity(target_list, measure_list_mm, boundary_index_list, linearity_list):\n\n boundary_max_linearity = [0, [0, 0]]\n center_max_linearity = [0, [0, 0]]\n for i in range(len(linearity_list)):\n for j in range(len(linearity_list[i])):\n if j in boundary_index_list[i]:\n if linearity_list[i][j] > boundary_max_linearity[0]:\n boundary_max_linearity[0] = linearity_list[i][j]\n boundary_max_linearity[1] = [i, j]\n else:\n if linearity_list[i][j] > center_max_linearity[0]:\n center_max_linearity[0] = linearity_list[i][j]\n center_max_linearity[1] = [i, j]\n print(\"\\tLinearity:\")\n print_linearity_list = [[\"boundary\", boundary_max_linearity], [\"center \", center_max_linearity]]\n for print_linearity in print_linearity_list:\n name = print_linearity[0]\n max_L = print_linearity[1][0]\n max_i = print_linearity[1][1][0]\n max_j = print_linearity[1][1][1]\n print(\"\\t\\tMax %s linearity %f -------------- target: (%f %f)->(%f, %f), measure: (%f, %f), line %d\" \\\n % ( name, max_L, target_list[max_i][0], target_list[max_i][1], target_list[max_i][2], target_list[max_i][3], \\\n measure_list_mm[max_i][max_j][0], measure_list_mm[max_i][max_j][1], max_i + 1 ))\n return boundary_max_linearity, center_max_linearity", "title": "" }, { "docid": "8d203b76b25be5254cbf4e88d469dcf6", "score": "0.4899218", "text": "def line_checks(checks):\n def run_line_checks(file_path):\n errors = []\n ln = 0\n with open(file_path) as fp:\n for line in fp:\n ln += 1\n for check in checks:\n err = check(line)\n if err is not None:\n errors.append((ln, err))\n return errors\n return run_line_checks", "title": "" }, { "docid": "39ecd2217f24cccedb7d9bcfa7044193", "score": "0.48943546", "text": "def test_straight_line(self):\n test_x = np.linspace(0, 9, 10)\n test_y = np.linspace(0, 18, 10)\n result_y = utils.straight_line(test_x, 2, 0)\n assert_almost_equal(result_y, test_y)", "title": "" }, { "docid": "97c78e482782e14b6b1b47d4b2a03b6d", "score": "0.4885796", "text": "def get_line(start, end):\n # Setup initial conditions\n x1, y1 = start\n x2, y2 = end\n dx = x2 - x1\n dy = y2 - y1\n \n # Determine how steep the line is\n is_steep = abs(dy) > abs(dx)\n \n # Rotate line\n if is_steep:\n x1, y1 = y1, x1\n x2, y2 = y2, x2\n \n # Swap start and end points if necessary and store swap state\n swapped = False\n if x1 > x2:\n x1, x2 = x2, x1\n y1, y2 = y2, y1\n swapped = True\n \n # Recalculate differentials\n dx = x2 - x1\n dy = y2 - y1\n \n # Calculate error\n error = int(dx / 2.0)\n ystep = 1 if y1 < y2 else -1\n \n # Iterate over bounding box generating points between start and end\n y = y1\n points = []\n #points_y = []\n for x in range(x1, x2 + 1):\n coord = (y, x) if is_steep else (x, y)\n points.append(coord)\n #points_y.append(coord[1])\n\n error -= abs(dy)\n if error < 0:\n y += ystep\n error += dx\n \n # Reverse the list if the coordinates were swapped\n if swapped:\n points.reverse()\n #points_y.reverse()\n #points_x=np.asarray(points_x)\n #points=np.asarray(points)\n\n return points", "title": "" }, { "docid": "fd3446995cfd3a088d205ce9f8e53506", "score": "0.48780882", "text": "def linesweep(self):\r\n \r\n segments = [] # will contain line segments according to by significance \r\n # sorted center-neighbor value ranges\r\n \r\n vals = [] # container for the later estimation of dataset parameters\r\n # for iteration\r\n \r\n cur.execute('SELECT * FROM line_sweep')\r\n if not cur.fetchone():\r\n cur.execute(\"\"\"INSERT INTO line_sweep \r\n SELECT loc.\"CenterID\", nb.\"PolygonID\", nb.\"Center\", nb.\"Neighbor\", loc.\"min\", loc.\"Note\"\r\n FROM \"locExtremePairs\" loc, \"neighborPairs\" nb\r\n WHERE loc.\"CenterID\" = nb.\"CenterID\" and loc.\"min\"=ABS(nb.\"Difference\")\r\n GROUP BY loc.\"CenterID\", nb.\"PolygonID\", nb.\"Center\", nb.\"Neighbor\"\r\n ORDER BY loc.\"min\" DESC;\"\"\")\r\n con.commit()\r\n\r\n cur.execute(\"SELECT rowid, centerid, polygonid, center, neighbor, min, note FROM line_sweep\")\r\n data = cur.fetchall()\r\n \r\n # getting data set specific value ranges and create line segments\r\n for i, line in enumerate(data):\r\n\r\n # get values for center, neighbor and significance\r\n uid = line[0]\r\n center_val = line[3]\r\n neighbor_val = line[4]\r\n\r\n #create line segments\r\n segments.append([uid, LineString([(center_val, i+1), (neighbor_val, i+1)])])\r\n\r\n # get value range\r\n vals.append(center_val)\r\n\r\n \r\n # dataset parameters for intersection search\r\n minval = min(vals)\r\n maxval = max(vals)\r\n valrange = len(vals)\r\n #calc = abs((maxval-minval)/valrange)\r\n #min_dif = (calc/10 if 0.9 < calc < 1.1 else calc) # <-- ???how to determine the optimal sweep interval with respect to dataset value range???\r\n min_dif = self.swp\r\n \r\n # intersection search\r\n sweep = minval # set starting point for iteration\r\n \r\n # result of intersection search, containing: \r\n #(# of intersections, sweep, respective segment-ids)\r\n intersection = [] \r\n \r\n # until the max value of the dataset is not reached the sweep will iterate \r\n # through the value range with the given sweep interval and check for \r\n # intersections with the given set of line segments\r\n \r\n while sweep <= maxval:\r\n match_segments = [segment[0] for i, segment in enumerate(segments) \r\n if segment[1].contains(Point(sweep, i+1))]\r\n \r\n sweep += min_dif\r\n \r\n intersection.append((len(match_segments), round(sweep,2), [x for x in match_segments]))\r\n \r\n if sys.version_info.major < 3:\r\n to_db = [(intersection[i][0], \r\n intersection[i][1], \r\n sql.Binary(array.array('L', intersection[i][2]).tostring())) \r\n for i, intersect in enumerate(intersection)]\r\n else:\r\n to_db = [(intersection[i][0], \r\n intersection[i][1], \r\n sql.Binary(array.array('L', intersection[i][2]).tobytes())) \r\n for i, intersect in enumerate(intersection)]\r\n \r\n return to_db", "title": "" }, { "docid": "a687dab9bb1c5c14d820b77aeba380ff", "score": "0.48750824", "text": "def _test(a0=None):\r\n import math\r\n if a0 is None:\r\n a0 = np.array([[10, 10], [10, 20], [20, 20], [10, 10]])\r\n x0, y0 = p0 = a0[-2]\r\n p1 = a0[-1]\r\n dx, dy = p1 - p0\r\n dist = math.hypot(dx, dy)\r\n xc, yc = pc = p0 + (p1 - p0)/2.0\r\n slope = math.atan2(dy, dx)\r\n step = 2.\r\n xn = x0 + math.cos(slope) * step # dist / fact\r\n yn = y0 + math.sin(slope) * step # dist / fact\r\n # better\r\n start = 0\r\n step = 2.0\r\n stop = 10.0 + step/2\r\n x2 = np.arange(start, stop + step, step)\r\n return a0, dist, xc, yc, pc, slope, xn, yn, x2", "title": "" }, { "docid": "a687dab9bb1c5c14d820b77aeba380ff", "score": "0.48750824", "text": "def _test(a0=None):\r\n import math\r\n if a0 is None:\r\n a0 = np.array([[10, 10], [10, 20], [20, 20], [10, 10]])\r\n x0, y0 = p0 = a0[-2]\r\n p1 = a0[-1]\r\n dx, dy = p1 - p0\r\n dist = math.hypot(dx, dy)\r\n xc, yc = pc = p0 + (p1 - p0)/2.0\r\n slope = math.atan2(dy, dx)\r\n step = 2.\r\n xn = x0 + math.cos(slope) * step # dist / fact\r\n yn = y0 + math.sin(slope) * step # dist / fact\r\n # better\r\n start = 0\r\n step = 2.0\r\n stop = 10.0 + step/2\r\n x2 = np.arange(start, stop + step, step)\r\n return a0, dist, xc, yc, pc, slope, xn, yn, x2", "title": "" }, { "docid": "c1087b0ffa26f642b1cc70c866ac12c4", "score": "0.48719808", "text": "def test_regression_and_recovery(self):\n # create an array filled with 50s' then set some ranges to 100\n series = np.full(45, 50, dtype=int)\n series[2] = 100\n series[15:30] = 100\n series[32] = 100\n\n with deterministic_numpy_random(1000):\n points = e_divisive(series, pvalue=0.01)\n assert points == [33, 15]", "title": "" }, { "docid": "bdb229fc37ac39f20770fb1839e86120", "score": "0.48698682", "text": "def line_position(spec, line_center, res = 0.1, mode = 'absorption'):\n\n lista = []\n for i in range(len(spec)):\n if spec[i, 0] > line_center and \\\n spec[i, 0] < line_center + res:\n lista.append(i)\n spec2 = spec[lista[0]:lista[-1]]\n #Now obtains the line_center and its flux\n if mode.lower() == 'absorption':\n line_pos = spec2[np.argsort(spec2[:, 1])[0]]\n elif mode.lower() == 'emission':\n line_pos = spec2[np.argsort(spec2[:, 1])[-1]]\n\n return line_pos", "title": "" }, { "docid": "6cc29fd65345c7a73a52bad933d2c091", "score": "0.48665413", "text": "def get_h2(points, function, h):\n max_val = 0\n max_set = False\n\n for i in range(len(points)):\n if i != h:\n val = function.calculate(tuple(points[i]))\n if not max_set or val > max_val:\n max_val = val\n h2 = i\n max_set = True\n\n return h2", "title": "" }, { "docid": "20b8b7f2d875adb72f5287c1863d94f1", "score": "0.48611313", "text": "def get_simple_reg_error_and_points(self, center_position):\n assert center_position.shape == (1,3)\n self.iteration += 1\n traj_matlab, difference = self.get_traj_matlabarray(center_position, self.traj, self.traj_center)\n errors = np.zeros((center_position.shape[0],1))\n idx = 0\n for trajectory in traj_matlab: #list comprehension wouldnt work on mlarray\n registered_points, _, regError = self.perform_reg(self.aorta, trajectory, nargout=3)\n errors[idx] = regError / trajectory.size[0]\n idx += 1\n return errors, registered_points", "title": "" }, { "docid": "0b899f3c20e078eca890d8b2d8131d62", "score": "0.48608646", "text": "def quick_check_line_components(line_bin,dpi):\n return 1.0", "title": "" }, { "docid": "07011b8ea993f86d9b0f3936d584b014", "score": "0.48536655", "text": "def line_bisect(p1, p2, div=fdiv):\n ((x1, y1), (x2, y2)) = (p1, p2)\n s = fdiv(x1 + x2 + y1 + y2, 2)\n return ((s - y1, s - x2), (s - y2, s - x1))", "title": "" }, { "docid": "677e3cb71d33083da02ecf5b12870b2c", "score": "0.48494026", "text": "def _check_line_steps(line: np.ndarray) -> np.ndarray:\n prev = line[0]\n for i in range(1, len(line)):\n curr_val = line[i]\n if curr_val > (prev + 1):\n line[i] = prev + 1\n elif curr_val < (prev - 1):\n line[i] = prev - 1\n prev = curr_val\n return line", "title": "" }, { "docid": "e3188684ca05ee7f9ad6819449628f54", "score": "0.48492503", "text": "def evaluate_line(self, pin_start, pin_end, logLevel=\"INFO\"):\n logeval = logging.getLogger(f\"{self.__class__.__name__}.console.evaluate_line\")\n logeval.setLevel(logLevel)\n # logeval.debug(f'Line from pin {pin_start} to {pin_end}')\n\n # line = np.zeros( drawn.shape, dtype=drawn.dtype)\n line = np.zeros(self.residual.shape, dtype=self.residual.dtype)\n x = tuple(self.pins[pin_start])\n y = tuple(self.pins[pin_end])\n # logeval.log(5, f'Punti {x} {y}')\n cv2.line(line, x, y, self.line_weight)\n # remove the last dot from the line WHAT the transposed hell\n line[y[1], y[0]] -= self.line_weight\n # logeval.log(5, f'the LINE\\n{line}')\n\n # line_int = line.astype(self.residual.dtype)\n # cv2.subtract(self.residual, line_int, residual)\n new_residual = cv2.subtract(self.residual, line)\n\n new_loss = np.sum(np.abs(new_residual))\n logeval.debug(\n f\"Line from pin {pin_start} to {pin_end} has loss {new_loss} and delta {self.loss-new_loss}\"\n )\n\n return new_loss", "title": "" }, { "docid": "6d27bdcfd836fe9f55395abd757452fa", "score": "0.4848484", "text": "def compute_lines(x, y, xl, yl, xr, yr, vx, vy, dens, eps, tracer,\n injec, sf0, tstep,\n nx, ny, lx, ly, dx, dy, gammaad, div, itemax, resamp,\n int_method, int_test, CGS_units, c, rho0, a, fB0, tr0,\n input_file, output_file):\n\n print '\\nComputing the current lines...'\n\n start_time = time.time()\n all_lines = []\n int_diff = []\n excluded_lines = []\n\n for i0, j0 in injec:\n line_values = []\n tc = 0.\n xc, yc = initial_position(x[i0], y[j0])\n ic, jc = initial_indices(i0, j0)\n densc, epsc, vxc, vyc, divc, tracerc = initial_variables(dens[ic, jc],\n eps[ic, jc], vx[ic, jc],\n vy[ic, jc], div[ic,jc],\n tracer[ic, jc])\n ic_aux, jc_aux = ic, jc\n buffer_present_line(xc, yc, ic, jc, densc, epsc,\n vxc, vyc, divc, tracerc, tc, line_values)\n\n ite = 1\n while True:\n tc += tstep\n xc, yc = update_position(xc, yc, vxc, vyc, tstep)\n\n if (xc > lx) or (xc < 0) or (yc > ly) or (yc < 0):\n break\n\n ic, jc = update_indices(xc, yc, xl, yl, xr, yr, ic, jc,\n vxc, vyc, nx, ny)\n\n tstep_test(ic, jc, ic_aux, jc_aux)\n ic_aux, jc_aux = ic, jc\n\n try:\n (densc, epsc, vxc,\n vyc, divc, tracerc, densc2) = interpolate(xc, yc, ic, jc, x, y,\n dens, eps, vx, vy, div,\n tracer, nx, ny, int_method,\n int_test)\n except Exception:\n print 'WARNING: Interpolation failed for line starting at [{}, {}]'.format(i0,j0)\n\n if int_test != 0:\n buffer_diff(densc, densc2, int_diff)\n\n buffer_present_line(xc, yc, ic, jc, densc, epsc,\n vxc, vyc, divc, tracerc, tc, line_values)\n ite += 1\n if ite == itemax:\n print 'WARNING: Line starting at [{}, {}] did not converged. Increase itemax parameter?'.format(i0,j0)\n excluded_lines += [(i0,j0)]\n break\n\n if ite != itemax:\n buffer_all_lines(all_lines, line_values)\n\n if resamp != 0:\n all_lines = resamp_line(all_lines, resamp, a, CGS_units)\n\n if CGS_units == 1:\n all_lines, sf0 = code_units_to_CGS(all_lines, sf0, c, rho0, a)\n\n save_all_lines(output_file, all_lines)\n\n if CGS_units == 1:\n all_lines = save_one_file_per_line(output_file, all_lines, gammaad, c, fB0,\n tr0, sf0, excluded_lines, input_file)\n else:\n print 'WARNING: current lines not saved. CGS unit conversion is off'\n\n if int_test != 0:\n print \"Density difference between the two interpolation methods: \" \\\n \"{:.2f} %\".format(np.average(np.array(int_diff)))\n print \"Done (elapsed time: {:.0f} seconds) \"\\\n .format(time.time() - start_time)\n return all_lines", "title": "" }, { "docid": "01da22be95dca54a4fdb936276e1691b", "score": "0.4846423", "text": "def return_proximity_segments(lines, index_segments):\n nb_lines = lines.shape[0]\n nb_matches = len(index_segments)\n reduce_lines = []\n distances = np.zeros((nb_lines, nb_lines))\n index_matching = []\n # compute distances between lines\n for i in tqdm(range(nb_lines)):\n for j in range(i+1, nb_lines):\n distances[i, j] = distance_line2line(lines[i], lines[j])\n distances[j, i] = distances[i, j]\n # compute matching index\n for i in tqdm(range(nb_lines)):\n L = []\n for j in range(nb_matches):\n if index_segments[j][0] == i:\n L.append(index_segments[j][1])\n elif index_segments[j][1] == i:\n L.append(index_segments[j][0])\n index_matching.append(L)\n for (index_1, index_2) in tqdm(index_segments):\n L = index_matching[index_1] + index_matching[index_2]\n if are_closest_line(index_1, index_2, L, distances):\n reduce_lines.append([index_1, index_2])\n\n return reduce_lines", "title": "" }, { "docid": "55239666bf25a5344bc504f4a7eccd8a", "score": "0.48379546", "text": "def min_of_pairs(number):\r\n \r\n # your code here\r", "title": "" }, { "docid": "63c9deb3bddd518a5ffc1e8f29051226", "score": "0.48315352", "text": "def parse_line(self):\n def add_element(value, index):\n self.out_lines.append(Line(value, int(index)))\n line = self.input_lines[self.current_line]\n splitted = line.split()\n indices = splitted[0]\n if len(splitted) == 2:\n value = splitted[1]\n else:\n value = None\n indices_extremes = indices.split('-')\n if len(indices_extremes) == 2:\n lower = int(indices_extremes[0])\n upper = int(indices_extremes[1])\n for i in range(lower, upper+1):\n add_element(value, i)\n else:\n add_element(value, indices_extremes[0])", "title": "" }, { "docid": "29742c48de092a38aaa37dab3748bb27", "score": "0.48186344", "text": "def unmatched_pair(self):\n input = UnitArray((1, 2, 3), units=meters / second)\n output = UnitArray((4, 5, 6), units=meters / second)\n return input, output", "title": "" }, { "docid": "395cc121480c4df1df2a951252459502", "score": "0.48064417", "text": "def are_closest_line(index_line_1, index_line_2, index_lines, distance_segment):\n distance_segment_1 = distance_segment[index_line_1, index_lines]\n distance_segment_2 = distance_segment[index_line_2, index_lines]\n distance_1_2 = distance_segment_1[index_lines == index_line_2]\n for index_line in range(len(index_lines)):\n if distance_1_2 > distance_segment_1[index_line] and distance_1_2 > distance_segment_2[index_line]:\n return False\n return True", "title": "" }, { "docid": "7be6b1375cc93101a8f1f8ff68bf0b8c", "score": "0.48028225", "text": "def getLine(p1, p2):\r\n try:\r\n slope = float((p1[1] - p2[1]) / (p1[0] - p2[0]))\r\n yint = float((-1 * (p1[0])) * slope + p1[1])\r\n return (slope, yint)\r\n except ZeroDivisionError:\r\n print('Divided by Zero Error.')", "title": "" }, { "docid": "21bc6ee093976f1dcf4862f5297b4d2d", "score": "0.48010075", "text": "def generate_satellite_output_vectorial(satrec, line2, error_list):\n\n if satrec.method == 'd':\n yield from generate_satellite_output(satrec, line2, error_list)\n return\n \n import numpy as np\n \n mu = satrec.whichconst.mu\n\n tstart, tend, tstep = (float(field) for field in line2[69:].split())\n times = np.arange(tstart, tend, tstep)\n \n if times[-1] - tend < tstep - 1e-6: # do not miss last line!\n times = np.append(times, tend)\n\n _r, _v = np.array(sgp4(satrec, times))\n \n if _r.shape == (3,):\n if isnan(_r[0]) and isnan(_r[1]) and isnan(_r[2]):\n error_list.append((satrec.error, satrec.error_message))\n print(error_list)\n return\n\n for i in range(_r.shape[1]):\n r = list(_r[:,i])\n v = list(_v[:,i])\n t = satrec.t[i]\n\n if i == 0:\n yield format_short_line(t, r, v)\n else:\n yield format_long_line(satrec, t, mu, r, v)", "title": "" }, { "docid": "9a00eb193f80ca7f60e7d0d430c79c91", "score": "0.48001173", "text": "def part2(input_lines):\n target = int(input_lines[0])\n g = SpiralGrid()\n return g.locate_target_sum(target)", "title": "" }, { "docid": "7f59de8a25a926550bfcc4c4dfdde1d5", "score": "0.4799166", "text": "def test_get_error_horizontal(self) -> None:\n test_model = line2d.Line2D(slope=0, y_int=5, x_int=math.nan)\n test_point = line2d.Point2D(2, 1)\n\n error = test_model.calc_error(point=test_point)\n\n self.assertEqual(error, 4)", "title": "" }, { "docid": "d8294227b23760806f61fee2dda0059c", "score": "0.47925478", "text": "def _distance_along_line(start, end, distance, dist_func, tol):\n initial_distance = dist_func(start, end)\n if initial_distance < distance:\n raise ValueError(\n f\"End is closer to start ({initial_distance}) than \"\n f\"given distance ({distance}).\"\n )\n\n if tol <= 0:\n raise ValueError(f\"Tolerance is not positive: {tol}\")\n\n # Binary search for a point at the given distance.\n left = start\n right = end\n\n while not np.isclose(dist_func(start, right), distance, rtol=tol):\n midpoint = (left + right) / 2\n\n # If midpoint is too close, search in second half.\n if dist_func(start, midpoint) < distance:\n left = midpoint\n # Otherwise the midpoint is too far, so search in first half.\n else:\n right = midpoint\n\n return right", "title": "" }, { "docid": "a7f3bc4fc121fba6de415c7cb382e8a4", "score": "0.47914663", "text": "def lineXYZ(i,data,ewk,qcd,py=None):\n assert type(data) != type([]), 'Data cannot be a tuple, since it has no systematic on it!'\n low = '%.2f'%(data.GetBinLowEdge(i))\n high = '%.2f'%(data.GetBinLowEdge(i)+data.GetBinWidth(i))\n maxd = max( [data.GetBinContent(ii) for ii in xrange(1,data.GetNbinsX()+1)] )\n maxd1 = max( [data.GetBinError(ii) for ii in xrange(1,data.GetNbinsX()+1)] )\n maxe = max( [ewk[0].GetBinContent(ii) for ii in xrange(1,ewk[0].GetNbinsX()+1)] )\n maxe1 = max( [ewk[0].GetBinError(ii) for ii in xrange(1,ewk[0].GetNbinsX()+1)] )\n maxe2 = max( [ewk[1].GetBinError(ii) for ii in xrange(1,ewk[0].GetNbinsX()+1)] )\n maxq = max( [qcd[0].GetBinContent(ii) for ii in xrange(1,qcd[0].GetNbinsX()+1)] )\n maxq1 = max( [qcd[0].GetBinError(ii) for ii in xrange(1,qcd[0].GetNbinsX()+1)] )\n maxq2 = max( [qcd[0].GetBinError(ii) for ii in xrange(1,qcd[0].GetNbinsX()+1)] )\n print '$%s..%s$'%(low,high),\n print ' & ',\n def d(v,m): return '%s%d'%(pho(v,m),v)\n def f(v,m): return '%s%.1f'%(pho(v,m),v)\n print '$%s'%d(data.GetBinContent(i),maxd),'\\pm','%s$'%(d(data.GetBinError(i),maxd1)),\n print ' & ',\n print '$%s'%f(ewk[0].GetBinContent(i),maxe),'\\pm','%s'%f(ewk[0].GetBinError(i),maxe1),'\\pm','%s$'%f(ewk[1].GetBinError(i),maxe2),\n print ' & ',\n print '$%s'%f(qcd[0].GetBinContent(i),maxq),'\\pm','%s'%f(qcd[0].GetBinError(i),maxq1),'\\pm','%s$'%f(qcd[1].GetBinError(i),maxq2),\n print '\\\\\\\\'", "title": "" }, { "docid": "663ab7b7746142f38629324f6de21d6b", "score": "0.47624433", "text": "def _line_int(l1, l2, precision=0):\n i = [0, 0] # point\n a1 = l1[1][1] - l1[0][1]\n b1 = l1[0][0] - l1[1][0]\n c1 = a1 * l1[0][0] + b1 * l1[0][1]\n a2 = l2[1][1] - l2[0][1]\n b2 = l2[0][0] - l2[1][0]\n c2 = a2 * l2[0][0] + b2 * l2[0][1]\n det = a1 * b2 - a2 * b1\n if not _scalar_eq(det, 0, precision): # lines are not parallel\n i[0] = (b2 * c1 - b1 * c2) / det\n i[1] = (a1 * c2 - a2 * c1) / det\n return i", "title": "" }, { "docid": "c6e26b5d27c8d2126740d1b60e7fbff7", "score": "0.47540274", "text": "def calcuC(Value):\n filterValue = Value\n\n # Find the minimum/maximun value of the curve by calculating the larger/smaller difference.\n diff, MaxIndx, MaxIndxValue, MinIndx, MinIndxValue = [], [], [], [], []\n for i in range(len(filterValue) - 1):\n diff.append(filterValue[i + 1] - filterValue[i])\n for i in range(thrd):\n maxIndx = np.argmax(diff)\n minIndx = np.argmin(diff)\n if maxIndx + int(point // 2) < len(filterValue):\n if filterValue[maxIndx + int(point // 2)] > filterValue[maxIndx + int((point // 2) // 2)]:\n if filterValue[maxIndx + 1] > filterValue[maxIndx] and filterValue[maxIndx - 1] > filterValue[maxIndx]:\n MaxIndx.append(maxIndx)\n MaxIndxValue.append(filterValue[maxIndx])\n if minIndx + int(point // 2) < len(filterValue):\n if filterValue[minIndx + int(point // 2)] < filterValue[minIndx + int((point // 2) // 2)]:\n if filterValue[minIndx + 1] < filterValue[minIndx] and filterValue[minIndx - 1] < filterValue[minIndx]:\n MinIndx.append(minIndx)\n MinIndxValue.append(filterValue[minIndx])\n diff[maxIndx] = 0\n diff[minIndx] = 0\n\n # Delete the error items of maximum/minimum value\n for i in range(len(MaxIndx)):\n for j in range(1, int(point // 2) - 1):\n if filterValue[MaxIndx[i] + j] < filterValue[(MaxIndx[i] + 1)]:\n MaxIndx[i] = 0\n MaxIndxValue[i] = 0\n if MaxIndx.count(0):\n if 0 in MaxIndx:\n MaxIndx.remove(0)\n # MaxIndxValue.remove(0)\n\n for i in range(len(MinIndx)):\n for j in range(1, int(point // 2) - 1):\n if filterValue[MinIndx[i] + j] > filterValue[(MinIndx[i] + 1)]:\n MinIndx[i] = 0\n MinIndxValue[i] = 0\n if MinIndx.count(0):\n if 0 in MinIndx:\n MinIndx.remove(0)\n # MinIndxValue.remove(0)\n\n ###\n # case 1: rise\n C_rise = []\n for p in range(len(MaxIndx)):\n V_tau = (filterValue[MaxIndx[p] + int(point // 2)] - filterValue[MaxIndx[p]]) * tauPercent + filterValue[\n MaxIndx[p]]\n for i in range(int(point // 2) + 1):\n if V_tau < filterValue[MaxIndx[p] + i]:\n tauIndex = i - 1\n break\n t_tau = float((V_tau - filterValue[MaxIndx[p] + tauIndex]) / (\n filterValue[MaxIndx[p] + tauIndex + 1] - filterValue[\n MaxIndx[p] + tauIndex])) * sampleRate + tauIndex * sampleRate\n C_rise.append(abs(t_tau) / R)\n\n\n # case 2: down\n C_down = []\n for p in range(len(MinIndx)):\n V_tau = (filterValue[MinIndx[p]] - (filterValue[MinIndx[p]] - filterValue[MinIndx[p] + int(point // 2)]) * tauPercent)\n for i in range(int(point // 2) + 1):\n if V_tau > filterValue[MinIndx[p] + i]:\n tauIndex = i - 1\n break\n t_tau = float((filterValue[MinIndx[p] + tauIndex] - V_tau) / (filterValue[MinIndx[p] + tauIndex] - filterValue[MinIndx[p] + tauIndex + 1])) * sampleRate + tauIndex * sampleRate\n C_down.append(abs(t_tau) / R)\n C_rise.append(abs(t_tau) / R)\n\n dic = {'time': time.ctime(), 'Length of C': len(C_rise), 'Average capacitance value(F)': np.average(C_rise)}\n print(dic)\n print()\n return len(C_rise), C_rise, dic", "title": "" }, { "docid": "a57a7410c7d2fc2921ae365fe229966f", "score": "0.4751851", "text": "def remove_pair_overlaps(hit_table, **params):\n yield iterable_to_stream(generate_nonoverlapping_lines(hit_table, **params))", "title": "" }, { "docid": "0f5f2486c2f493382fcd30d7dbb0b916", "score": "0.47487056", "text": "def test_get_error_vertical(self) -> None:\n test_model = line2d.Line2D(slope=math.nan, y_int=math.nan, x_int=3)\n test_point = line2d.Point2D(1, 2)\n\n error = test_model.calc_error(point=test_point)\n\n self.assertEqual(error, 2)", "title": "" }, { "docid": "548f2b715cfedd54cd37374cc7feaacb", "score": "0.47482628", "text": "def filter_lines2(lines):\n cross_pt = []\n length = len(lines)\n\n # find all cross if it's in MID_POINT_REGION\n for i in range(length):\n for j in range(i+1, length):\n line1 = lines[i]\n line2 = lines[j]\n x, y = calc_cross_point(line1[0], line1[1], line2[0], line2[1])\n if x is None:\n continue\n\n if not (MID_POINT_RERION[0] < x < MID_POINT_RERION[2]):\n continue\n\n if not (MID_POINT_RERION[1] < y < MID_POINT_RERION[2]):\n continue\n\n cross_pt.append([x, y])\n\n # find max clustering point\n length_pt = len(cross_pt)\n mid_point = []\n max_num = 0\n # TODO: preformance is bad\n for i in range(length_pt):\n x0, y0 = cross_pt[i][0], cross_pt[i][1]\n min_point_x = x0\n min_point_y = y0\n num = 1\n for j in range(length_pt):\n x1, y1 = cross_pt[j][0], cross_pt[j][1]\n if calc_distance2(x0, y0, x1, y1) < MID_POINT_R ** 2:\n mid_point_x = (min_point_x * num + x1) / (num + 1)\n mid_point_y = (min_point_y * num + y1) / (num + 1)\n num += 1\n\n if num > max_num:\n mid_point = [[mid_point_x, mid_point_y]]\n max_num = num\n elif num == max_num:\n mid_point.append([mid_point_x, mid_point_y])\n return mid_point, cross_pt", "title": "" }, { "docid": "ea0481992a05cb6a2935faa2ec5074cb", "score": "0.47480404", "text": "def generate_satellite_output(satrec, line2, error_list):\n\n mu = satrec.whichconst.mu\n\n r, v = sgp4(satrec, 0.0)\n if isnan(r[0]) and isnan(r[1]) and isnan(r[2]):\n error_list.append((satrec.error, satrec.error_message))\n yield '(Use previous data line)'\n return\n yield format_short_line(satrec.t , r, v)\n\n tstart, tend, tstep = (float(field) for field in line2[69:].split())\n\n tsince = tstart\n while tsince <= tend:\n if tsince == tstart == 0.0:\n tsince += tstep\n continue # avoid duplicating the first line\n\n r, v = sgp4(satrec, tsince)\n\n if isnan(r[0]) and isnan(r[1]) and isnan(r[2]):\n error_list.append((satrec.error, satrec.error_message))\n return\n yield format_long_line(satrec, satrec.t, mu, r, v)\n\n tsince += tstep\n\n if tsince - tend < tstep - 1e-6: # do not miss last line!\n r, v = sgp4(satrec, tend)\n if isnan(r[0]) and isnan(r[1]) and isnan(r[2]):\n error_list.append((satrec.error, satrec.error_message))\n return\n yield format_long_line(satrec, satrec.t, mu, r, v)", "title": "" }, { "docid": "976c48e2b6f302c3168386f9744d6294", "score": "0.47458017", "text": "def alignedToWhere(error,tolerance):\n\tif(math.fabs(error)>tolerance):\n\t\tif(error>0):\n\t\t\treturn -1\n\t\telse:\n\t\t\treturn 1\n\telse:\n\t\treturn 0;", "title": "" }, { "docid": "180a26f3a97cf09f8869f2d47477825a", "score": "0.4743842", "text": "def test_pairwise():\n n = np.random.randint(4, 9)\n iterable = np.random.randint(10, size=(n, 3))\n pair = list(tube_seg.pairwise(iterable))\n\n \"\"\"\n Verify:\n \n I. the number of pairs\n II. the first element of each pair\n III. the second elemnt of each pair\n \"\"\"\n assert len(pair) == n - 1\n assert (iterable[:-1, :] == [a[0] for a in pair]).all()\n assert (iterable[1:, :] == [b[1] for b in pair]).all()", "title": "" }, { "docid": "65bf24452749cbe619bafca1551b802a", "score": "0.47435492", "text": "def cointegration_mixed(df_integrated_pairs, viable_pairs, desired_num=20, confidence=0.05, show_progress_bar=True):\n integrated_pairs = df_integrated_pairs.index.unique(0)\n cointegrated = []\n\n for pair in tqdm(viable_pairs, desc ='Finding cointegrations across pairs', disable= not show_progress_bar):\n if pair[0] not in integrated_pairs or pair[1] not in integrated_pairs:\n continue\n\n x = df_integrated_pairs.loc[pair[0], \"logClose\"].fillna(method=\"ffill\").values\n x = x.reshape((x.shape[0], 1))\n y = df_integrated_pairs.loc[pair[1], \"logClose\"].fillna(method=\"ffill\").values\n y = y.reshape((y.shape[0], 1))\n if ts.coint(x, y)[1] <= confidence:\n model = sm.OLS(y, sm.add_constant(x))\n results = model.fit()\n # the model is like \"second(logClose) - coef*first(logClose) = mean(logClose)+epsilon\" in the pair\n cointegrated.append([pair, results.params])\n \n if len(cointegrated) >= desired_num:\n break\n \n return cointegrated", "title": "" }, { "docid": "29bbbeadc4c239e4ff5873b6e196473d", "score": "0.47423267", "text": "def get_target_array(inp: list, target_value: int):\n \n for i in inp:\n if i < target_value:\n pair = target_value - i\n if pair in inp:\n # print(f\"the first number= {i} the second number {pair}\")\n return[inp.index(i), inp.index(pair)]\n break", "title": "" }, { "docid": "3ae54f2eee30afb2de291e3f6fa5c487", "score": "0.47399426", "text": "def match_lines2(s, wvl, ref_line_list):\n\n # find centroids of s\n from libs.find_peak import find_peaks\n sol_list = find_peaks(s, sigma=3)\n cent_list = np.array([sol[0] for sol in sol_list])\n\n # define transform from lambda to pixel\n wvl2pix = interp1d(wvl, np.arange(len(wvl)))\n\n ref_pix_list = wvl2pix(ref_line_list)\n\n\n # find nearest matches\n\n kdtree = spatial.KDTree(ref_pix_list.reshape([-1,1]))\n dists, indices = kdtree.query(cent_list.reshape([-1,1]))\n\n # filter out multiple hits. Only the nearest one remains.\n filtered_indices = []\n for k, l in itertools.groupby(zip(indices,\n sol_list, dists),\n operator.itemgetter(0)):\n l = list(l)\n i = np.argmin([l1[-1] for l1 in l])\n filtered_indices.append(l[i])\n\n matched_indices = [s_[0] for s_ in filtered_indices]\n matched_fit_params = [s_[1] for s_ in filtered_indices]\n matched_distances = [s_[2] for s_ in filtered_indices]\n\n return matched_indices, matched_fit_params, matched_distances", "title": "" }, { "docid": "bb2a71b66bdbabb0dd7b4b35765b3500", "score": "0.47368088", "text": "def trim(linelist):\n\n firstpass = 0.3\n searchpass = 0.1\n\n sub = lambda x, y: ((x[0] - y[0]), (x[1] - y[1]))\n\n nonintersections = []\n intersections = []\n i = 0\n\n while i < len(linelist):\n line = linelist[i]\n last = linelist[i - 1]\n #print(\"Trimming\", line[1:], last[1:])\n\n #print(\"Intersect:\", line[1], sub(line[2], line[1]), last[1], sub(last[2], last[1]))\n intersection = intersect(line[1], sub(line[2], line[1]), last[2], sub(last[1], last[2]),\n \"abs \" + str(firstpass))\n\n #intersection = tuple(Decimal((a+b)/2) for (a, b) in zip(line[1], last[2]))\n\n #print(\"Intersection at\", intersection)\n\n if intersection:\n linelist[i][1] = intersection\n linelist[i - 1][2] = intersection\n intersections.append([i - 1, i])\n else:\n #print(\"Lines don't intersect:\", line[1:], last[1:])\n nonintersections.append([i - 1, i])\n i += 1\n\n\n # Group lines\n i = 0\n while i < len(intersections):\n pair = intersections[i]\n last = intersections[i - 1]\n\n if pair[0] == last[-1]:\n last.append(pair[1])\n intersections.pop(i)\n else:\n i += 1\n\n print(intersections)\n\n print(nonintersections)\n\n removals = []\n\n for i, pair in enumerate(nonintersections):\n fixed = False\n\n print(\"Trying to fix\", pair)\n\n # Find intersections with first point\n print(\"Searching with first point\")\n search = linelist[pair[0]]\n\n for j in range(pair[-1] + 1, len(linelist)):\n\n if j >= len(linelist):\n j -= (len(linelist) + 1)\n\n line = linelist[j]\n intersection = intersect(line[1], sub(line[2], line[1]), search[2], sub(search[1], search[2]),\n \"abs \" + str(searchpass))\n #print(j, intersection)\n if intersection:\n print(\"Fixing\", pair, intersection)\n linelist[pair[0]][2] = intersection\n linelist[j][1] = intersection\n\n removals.extend(range(pair[0] + 1, j))\n fixed = True\n break\n\n if not fixed:\n\n # Find intersections with second point\n print(\"Searching with second point\")\n search = linelist[pair[-1]]\n\n for j in range(0, pair[0]):\n\n if j >= len(linelist):\n j -= (len(linelist) + 1)\n\n line = linelist[j]\n intersection = intersect(line[1], sub(line[2], line[1]), search[2], sub(search[1], search[2]),\n \"abs \" + str(searchpass))\n #print(j, intersection)\n if intersection:\n print(\"Fixing\", pair, intersection)\n linelist[pair[-1]][1] = intersection\n linelist[j][2] = intersection\n\n removals.extend(range(j + 1, pair[-1]))\n fixed = True\n break\n\n if not fixed:\n print(\"Couldn't fix\", pair)\n\n removals = list(set(removals))\n removals.sort()\n print(\"Remove:\", removals)\n for i, r in enumerate(removals):\n if r - i < len(linelist):\n #linelist[r] = [(Decimal('0'), Decimal('0'), Decimal('0')), (Decimal('0'), Decimal('0')), (Decimal('0'), Decimal('0'))]\n linelist.pop(r - i)\n pass\n\n #print(\"Trimmed to:\", linelist)\n\n\n return linelist", "title": "" }, { "docid": "f36c2643975a97162e6d89ed1ccdb26a", "score": "0.47286493", "text": "def max_points_on_a_line_containing_point_i(i):\r\n def slope_coprime(x1, y1, x2, y2):\r\n \"\"\" to avoid the precision issue with the float/double number,\r\n using a pair of co-prime numbers to represent the slope.\r\n \"\"\"\r\n delta_x, delta_y = x1 - x2, y1 - y2\r\n if delta_x == 0: # vertical line\r\n return (0, 0)\r\n elif delta_y == 0: # horizontal line\r\n return (sys.maxsize, sys.maxsize)\r\n elif delta_x < 0:\r\n # to have a consistent representation,\r\n # keep the delta_x always positive.\r\n delta_x, delta_y = - delta_x, - delta_y\r\n gcd = math.gcd(delta_x, delta_y)\r\n slope = (delta_x / gcd, delta_y / gcd)\r\n return slope\r\n\r\n\r\n def add_line(i, j, count, duplicates):\r\n \"\"\"\r\n Add a line passing through i and j points.\r\n Update max number of points on a line containing point i.\r\n Update a number of duplicates of i point.\r\n \"\"\"\r\n # rewrite points as coordinates\r\n x1 = points[i][0]\r\n y1 = points[i][1]\r\n x2 = points[j][0]\r\n y2 = points[j][1]\r\n # add a duplicate point\r\n if x1 == x2 and y1 == y2: \r\n duplicates += 1\r\n # add a horisontal line : y = const\r\n elif y1 == y2:\r\n nonlocal horizontal_lines\r\n horizontal_lines += 1\r\n count = max(horizontal_lines, count)\r\n # add a line : x = slope * y + c\r\n # only slope is needed for a hash-map\r\n # since we always start from the same point\r\n else:\r\n slope = slope_coprime(x1, y1, x2, y2)\r\n lines[slope] = lines.get(slope, 1) + 1\r\n count = max(lines[slope], count)\r\n return count, duplicates\r\n \r\n # init lines passing through point i\r\n lines, horizontal_lines = {}, 1\r\n # One starts with just one point on a line : point i.\r\n count = 1\r\n # There is no duplicates of a point i so far.\r\n duplicates = 0\r\n # Compute lines passing through point i (fixed)\r\n # and point j (interation).\r\n # Update in a loop the number of points on a line\r\n # and the number of duplicates of point i.\r\n for j in range(i + 1, n):\r\n count, duplicates = add_line(i, j, count, duplicates)\r\n return count + duplicates", "title": "" }, { "docid": "a5b8c5629cc8881d6f4edfe1f682c990", "score": "0.47256804", "text": "def test_process_address_line_broken_pair_failure(address_line):\n result = load_data._process_address_line(address_line)\n assert result is None", "title": "" }, { "docid": "40af820517aec1215400bc369e9c8db9", "score": "0.472277", "text": "def validatelinesegments(self):\n drop = []\n for f, line in self.linelist.iteritems():\n low = line.getstart()\n hi = line.getend()\n flow = False\n fhigh = False\n for seg in self.segments:\n if not flow and seg[0] <= low <= seg[1]:\n flow = True\n if not fhigh and seg[0] <= hi <= seg[1]:\n fhigh = True\n if fhigh and flow:\n continue\n if not fhigh and not flow:\n found = False\n for seg in self.segments:\n if low <= seg[0] <= hi:\n found = True\n break\n if not found:\n drop.append(f)\n continue\n if not fhigh:\n distance = 1000000.\n current = -1\n # find the closest that is less than\n for i in range(len(self.segments)):\n dist = hi - self.segments[i][1]\n if 0 < dist < distance:\n distance = dist\n current = i\n if current >= 0:\n self.linelist[f].setend(self.spec.chans()[self.spec.getchanindex(self.segments[current][1])])\n self.linelist[f].setfend(max(self.spec.getfreq(self.linelist[f].getend()),\n self.spec.getfreq(self.linelist[f].getstart())))\n if not flow:\n distance = 1000000.\n current = -1\n # find the closest that is less than\n for i in range(len(self.segments)):\n dist = self.segments[i][0] - low\n if 0 < dist < distance:\n distance = dist\n current = i\n if current >= 0:\n self.linelist[f].setstart(self.spec.chans()[self.spec.getchanindex(self.segments[current][0])])\n self.linelist[f].setfstart(min(self.spec.getfreq(self.linelist[f].getend()),\n self.spec.getfreq(self.linelist[f].getstart())))\n\n for f in drop:\n del self.linelist[f]", "title": "" }, { "docid": "a2f61b67279393416cdf95fea7ce40f1", "score": "0.47199696", "text": "def _process_proximity_line(\n source_line,\n xs,\n ys,\n pan_near_x,\n pan_near_y,\n is_forward,\n line_id,\n width,\n max_distance,\n line_proximity,\n nearest_xs,\n nearest_ys,\n values,\n distance_metric,\n):\n start = width - 1\n end = -1\n step = -1\n if is_forward:\n start = 0\n end = width\n step = 1\n\n n_values = len(values)\n for pixel in prange(start, end, step):\n is_target = False\n # Is the current pixel a target pixel?\n if n_values == 0:\n if source_line[pixel] != 0 and np.isfinite(source_line[pixel]):\n is_target = True\n else:\n for i in prange(n_values):\n if source_line[pixel] == values[i]:\n is_target = True\n\n if is_target:\n line_proximity[pixel] = 0.0\n nearest_xs[pixel] = pixel\n nearest_ys[pixel] = line_id\n pan_near_x[pixel] = pixel\n pan_near_y[pixel] = line_id\n continue\n\n # Are we near(er) to the closest target to the above (below) pixel?\n near_distance_square = max_distance ** 2 * 2.0\n if pan_near_x[pixel] != -1:\n # distance_square\n x1 = xs[pan_near_y[pixel], pan_near_x[pixel]]\n y1 = ys[pan_near_y[pixel], pan_near_x[pixel]]\n x2 = xs[line_id, pixel]\n y2 = ys[line_id, pixel]\n\n dist = _distance(x1, x2, y1, y2, distance_metric)\n dist_sqr = dist ** 2\n if dist_sqr < near_distance_square:\n near_distance_square = dist_sqr\n else:\n pan_near_x[pixel] = -1\n pan_near_y[pixel] = -1\n\n # Are we near(er) to the closest target to the left (right) pixel?\n last = pixel - step\n if pixel != start and pan_near_x[last] != -1:\n x1 = xs[pan_near_y[last], pan_near_x[last]]\n y1 = ys[pan_near_y[last], pan_near_x[last]]\n x2 = xs[line_id, pixel]\n y2 = ys[line_id, pixel]\n\n dist = _distance(x1, x2, y1, y2, distance_metric)\n dist_sqr = dist ** 2\n if dist_sqr < near_distance_square:\n near_distance_square = dist_sqr\n pan_near_x[pixel] = pan_near_x[last]\n pan_near_y[pixel] = pan_near_y[last]\n\n # Are we near(er) to the closest target to the\n # topright (bottom left) pixel?\n tr = pixel + step\n if tr != end and pan_near_x[tr] != -1:\n x1 = xs[pan_near_y[tr], pan_near_x[tr]]\n y1 = ys[pan_near_y[tr], pan_near_x[tr]]\n x2 = xs[line_id, pixel]\n y2 = ys[line_id, pixel]\n\n dist = _distance(x1, x2, y1, y2, distance_metric)\n dist_sqr = dist ** 2\n if dist_sqr < near_distance_square:\n near_distance_square = dist_sqr\n pan_near_x[pixel] = pan_near_x[tr]\n pan_near_y[pixel] = pan_near_y[tr]\n\n # Update our proximity value.\n if (\n pan_near_x[pixel] != -1\n and max_distance * max_distance >= near_distance_square\n and (\n line_proximity[pixel] < 0\n or near_distance_square < line_proximity[pixel]\n * line_proximity[pixel]\n )\n ):\n line_proximity[pixel] = sqrt(near_distance_square)\n nearest_xs[pixel] = pan_near_x[pixel]\n nearest_ys[pixel] = pan_near_y[pixel]\n return", "title": "" }, { "docid": "99ba8d95db868ae8bbc34ab57c9fdc66", "score": "0.4719808", "text": "def thin_tabulated_values(x, f, rel_err=1.e-4, preserve_range=False):\n x = np.array(x)\n f = np.array(f)\n\n # Check for valid inputs\n if len(x) != len(f):\n raise ValueError(\"len(x) != len(f)\")\n if rel_err <= 0 or rel_err >= 1:\n raise ValueError(\"rel_err must be between 0 and 1\")\n if not (np.diff(x) >= 0).all():\n raise ValueError(\"input x is not sorted.\")\n\n # Check for trivial noop.\n if len(x) <= 2:\n # Nothing to do\n return\n\n # Start by calculating the complete integral of |f|\n total_integ = np.trapz(abs(f), x)\n if total_integ == 0:\n return np.array([x[0], x[-1]]), np.array([f[0], f[-1]])\n thresh = rel_err * total_integ\n\n if not preserve_range:\n # Remove values from the front that integrate to less than thresh.\n integ = 0.5 * (abs(f[0]) + abs(f[1])) * (x[1] - x[0])\n k0 = 0\n while k0 < len(x)-2 and integ < thresh:\n k0 = k0+1\n integ += 0.5 * (abs(f[k0]) + abs(f[k0+1])) * (x[k0+1] - x[k0])\n # Now the integral from 0 to k0+1 (inclusive) is a bit too large.\n # That means k0 is the largest value we can use that will work as the staring value.\n\n # Remove values from the back that integrate to less than thresh.\n k1 = len(x)-1\n integ = 0.5 * (abs(f[k1-1]) + abs(f[k1])) * (x[k1] - x[k1-1])\n while k1 > k0 and integ < thresh:\n k1 = k1-1\n integ += 0.5 * (abs(f[k1-1]) + abs(f[k1])) * (x[k1] - x[k1-1])\n # Now the integral from k1-1 to len(x)-1 (inclusive) is a bit too large.\n # That means k1 is the smallest value we can use that will work as the ending value.\n\n x = x[k0:k1+1] # +1 since end of range is given as one-past-the-end.\n f = f[k0:k1+1]\n\n # Start a new list with just the first item so far\n newx = [x[0]]\n newf = [f[0]]\n\n k0 = 0 # The last item currently in the new array\n k1 = 1 # The current item we are considering to skip or include\n while k1 < len(x)-1:\n # We are considering replacing all the true values between k0 and k1+1 (non-inclusive)\n # with a linear approxmation based on the points at k0 and k1+1.\n lin_f = f[k0] + (f[k1+1]-f[k0])/(x[k1+1]-x[k0]) * (x[k0:k1+2] - x[k0])\n # Integrate | f(x) - lin_f(x) | from k0 to k1+1, inclusive.\n integ = np.trapz(abs(f[k0:k1+2] - lin_f), x[k0:k1+2])\n # If the integral of the difference is < thresh, we can skip this item.\n if integ < thresh:\n # OK to skip item k1\n k1 = k1 + 1\n else:\n # Have to include this one.\n newx.append(x[k1])\n newf.append(f[k1])\n k0 = k1\n k1 = k1 + 1\n\n # Always include the last item\n newx.append(x[-1])\n newf.append(f[-1])\n\n return newx, newf", "title": "" }, { "docid": "2926caa519f2766acbe61dc9fa71244a", "score": "0.47181755", "text": "def duplicate_vertices_removed(line, tolerance = 0.001):\n\n assert line.ndim == 2\n if len(line) < 3: return line\n dims = line.shape[1]\n whittled = np.zeros(line.shape)\n whittled[0] = line[0]\n c_i = 0\n for i in range(1, len(line)):\n manhattan = 0.0\n for d in range(dims): manhattan += abs(line[i, d] - whittled[c_i, d])\n if manhattan > tolerance:\n c_i += 1\n whittled[c_i] = line[i]\n if c_i == len(line) - 1: return line\n whittled[c_i] = line[-1]\n return whittled[:c_i + 1]", "title": "" }, { "docid": "6835fb551c5d65bec116495d51be49c4", "score": "0.47098392", "text": "def lines2_intersect(p10, p11, p20, p21):\n t = (p20 - p10) / (p11 - p10 - p21 + p20)\n return p10 + t * (p11 - p10)", "title": "" }, { "docid": "f007a41f38e91a7d05d9a9577d00437c", "score": "0.47079766", "text": "def make_points_pairs(pcr_data, calculation_data, verbose=True):\n n_multi = 0\n point_pairs = []\n for event in pcr_data:\n if event not in calculation_data:\n continue\n elif len(calculation_data[event]) == 1:\n # skip if it not expressed\n if calculation_data[event][0] < 0:\n continue\n point_pairs.append((calculation_data[event][0], pcr_data[event]))\n else:\n n_multi += 1\n if verbose:\n print(('\\tPoints covered: {} ({:.4f} %; removed {}'\n ' point(s) duo to \"duplicated\" event)').format(len(point_pairs), len(point_pairs) * 100 / len(pcr_data),\n n_multi))\n return point_pairs", "title": "" }, { "docid": "f2447b337591c60fb9a7021de2b9a9df", "score": "0.47059524", "text": "def _line2line(line1):\n a=768\n b=1024\n import math\n #step1: cross prod the two lines to find common perp vector\n L1x1,L1y1,L1x2,L1y2 = line1\n L2x1,L2y1,L2x2,L2y2 =int(b/2) ,0 ,int(b/2) ,a \n L1dx,L1dy = L1x2-L1x1,L1y2-L1y1\n L2dx,L2dy = L2x2-L2x1,L2y2-L2y1\n commonperp_dx,commonperp_dy = (L1dy - L2dy, L2dx-L1dx)\n\n #step2: normalized_perp = perp vector / distance of common perp\n commonperp_length = math.hypot(commonperp_dx,commonperp_dy)\n commonperp_normalized_dx = commonperp_dx/float(commonperp_length)\n commonperp_normalized_dy = commonperp_dy/float(commonperp_length)\n\n #step3: length of (pointonline1-pointonline2 dotprod normalized_perp).\n # Note: According to the first link above, it's sufficient to\n # \"Take any point m on line 1 and any point n on line 2.\"\n # Here I chose the startpoint of both lines\n shortestvector_dx = (L1x1-L2x1)*commonperp_normalized_dx\n shortestvector_dy = (L1y1-L2y1)*commonperp_normalized_dy\n mindist = math.hypot(shortestvector_dx,shortestvector_dy)\n\n #return results\n result = mindist\n return result", "title": "" }, { "docid": "234c726ed485c9cceeff732bcc6182ba", "score": "0.4705493", "text": "def _line(the_line_segment):\n\n assert len(list(zip(*the_line_segment.coords.xy))) == 2, \"The line segment\" + str(the_line_segment) + \"is NOT defined by 2 points!\"\n\n p1 = list(zip(*the_line_segment.coords.xy))[0]\n p2 = list(zip(*the_line_segment.coords.xy))[1]\n A = (p1[1] - p2[1])\n B = (p2[0] - p1[0])\n C = (p1[0] * p2[1] - p2[0] * p1[1])\n\n return A, B, -C", "title": "" }, { "docid": "27296df7e34ae5213f54ec5c975a3edf", "score": "0.46977264", "text": "def func_line(x, a, b):\n return a * x + b", "title": "" }, { "docid": "edd917030c38fdcd9f950ff4258ff25a", "score": "0.46944255", "text": "def line_intersect(self, line):\r\n a, b, c = line\r\n d = self.a * b - self.b * a\r\n if d != 0:\r\n dx = -self.c * b + self.b * c\r\n dy = self.c * a - self.a * c\r\n x = dx / float(d)\r\n y = dy / float(d)\r\n return np.array([x, y])\r\n else:\r\n return np.array([])", "title": "" }, { "docid": "8587cda55a42b5da5fcdd1eaa910f030", "score": "0.4688707", "text": "def match_line(self,force=False,pad=2,padmid=0.725,lhm=2):\n #TODO: rework other preprocesses...\n try:\n if force:\n self.df[\"calc_line_idx\"] = -1\n if self.df.loc[self.df[\"calc_line_idx\"] == -1].empty: return False\n print(\"Start line matching\")\n tdf = self.df.reset_index()\n tdf[\"line_height\"] = tdf[\"line_y1\"] - tdf[\"line_y0\"]\n linedict = tdf.to_dict(orient=\"list\")\n pparam = PParam()\n pparam.max_row = max(linedict[\"line_idx\"])*3\n pparam.y1_max = max(linedict[\"line_y1\"])+1\n while True:\n print(f\"Match line: {pparam.lineIdx}\")\n pparam.y0 = min(linedict[\"line_y0\"])\n pparam.y1 = linedict[\"line_y1\"][linedict[\"line_y0\"].index(pparam.y0)]\n if pparam.y0 > pparam.y1:\n linedict[\"line_y1\"][linedict[\"line_y0\"].index(pparam.y0)] = pparam.y0+1\n pparam.y1 = pparam.y0+1\n if -1 not in linedict[\"calc_line_idx\"]:\n print(\"Match lines ✓\")\n break\n pparam.diff = (pparam.y1 - pparam.y0) * pad\n pparam.diffmid = pparam.diff\n if pad > padmid: pparam.diffmid = (pparam.y1 - pparam.y0) * padmid\n # Select all y0 which are smaller as y0+25%diff and greater as y0+25%diff\n con = ((pparam.y1-pparam.y0) < np.array([x*lhm for x in linedict['line_height']])) & \\\n ((pparam.y0 - pparam.diff) < np.array(linedict['line_y0'])) & \\\n ((pparam.y0 + pparam.diffmid) > np.array(linedict['line_y0'])) & \\\n ((pparam.y1 - pparam.diffmid) < np.array(linedict['line_y1'])) & \\\n ((pparam.y1 + pparam.diff) > (np.array(linedict['line_y1'])))\n offset = 0\n word_set = {}\n for idx in np.nonzero(con)[0].tolist():\n engine_stat = (linedict[\"ocr\"][idx], linedict[\"ocr_profile\"][idx])\n if word_set.get(engine_stat,None) == None: word_set[engine_stat] = {}\n word_set[engine_stat][idx] = linedict[\"word_x0\"][idx]\n old_idx = 0\n for (ocr,engine) in word_set:\n old_idx = 0\n for idx, x0 in sorted(word_set[(ocr,engine)].items(),key=lambda x: x[1]):\n linedict[\"calc_line_idx\"][idx] = pparam.lineIdx\n linedict[\"word_idx\"][idx] = old_idx+offset\n linedict[\"line_y0\"][idx] = pparam.y1_max\n old_idx += 1\n pparam.lineIdx += 1\n if pparam.lineIdx == pparam.max_row:\n print(\"Match lines ✗\")\n print(f\"The max of {pparam.max_row} col was reached. Maybe something went wrong?\")\n break\n self.df[\"calc_line_idx\"] = linedict[\"calc_line_idx\"]\n self.df[\"calc_word_idx\"] = linedict[\"word_idx\"]\n except Exception as e:\n print(f\"Exception: {e}\")\n pass\n return True", "title": "" }, { "docid": "c000a1197d3f3e3b393e1c28f5dc54a8", "score": "0.46848756", "text": "def _assert_results(self, lines, linter_name):\n for line in lines:\n self._assert_result(line, linter_name)", "title": "" }, { "docid": "22631ad79fb13233a1ca64dcef5a59a7", "score": "0.4679408", "text": "def ec_lines_pymol_script(ec_table, output_file, distance_cutoff=5,\n score_column=\"cn\", chain=None):\n t = ec_table.copy()\n\n # assign line styles\n for prop, val in [\n (\"dash_radius\", 0.345), (\"dash_gap\", 0.075), (\"dash_length\", 0.925)\n ]:\n t.loc[:, prop] = val\n\n # adjust line width/radius based on score, if selected\n if score_column is not None:\n scaling_factor = 0.5 / ec_table.loc[:, score_column].max()\n t.loc[:, \"dash_radius\"] = ec_table.loc[:, score_column] * scaling_factor\n # avoid negative values\n t.loc[t.dash_radius < 0, \"dash_radius\"] = 0\n\n if \"dist\" in ec_table and distance_cutoff is not None:\n t.loc[t.dist <= distance_cutoff, \"color\"] = \"green\"\n t.loc[t.dist > distance_cutoff, \"color\"] = \"red\"\n else:\n t.loc[:, \"color\"] = \"green\"\n\n if chain is not None:\n if isinstance(chain, dict):\n chain_sel = \", chain \" + \" or chain \".join([x for x in chain.values()])\n else:\n # otherwise just take the name of the chain as it is\n chain_sel = \", chain '{}'\".format(chain)\n else:\n chain_sel = \"\"\n\n with open(output_file, \"w\") as f:\n f.write(\"as cartoon{}\\n\".format(chain_sel))\n f.write(\"color grey80{}\\n\".format(chain_sel))\n pymol_pair_lines(t, f, chain)", "title": "" }, { "docid": "25fad7af89eaeffbe6cf993fd4f41063", "score": "0.46763787", "text": "def compute_error(training_data_indicies, results):\n\n x_data = []\n blank = np.zeros([1, 28, 28])\n\n # row 1\n x_data.append(boxify_center(np.copy(blank)))\n x_data.append(boxify_center_hollow(np.copy(blank)))\n x_data.append(lineify_center(np.copy(blank)))\n x_data.append(lineify_center_horizontal(np.copy(blank)))\n x_data.append(circleify_center(np.copy(blank)))\n x_data.append(circleify_center_hollow(np.copy(blank)))\n x_data.append(triangulify_center(np.copy(blank)))\n x_data.append(triangulify_center_hollow(np.copy(blank)))\n # row 2\n x_data.append(boxify_top_left(np.copy(blank)))\n x_data.append(boxify_bottom_right(np.copy(blank)))\n x_data.append(lineify_top_left(np.copy(blank)))\n x_data.append(lineify_bottom_right(np.copy(blank)))\n x_data.append(circleify_top_left(np.copy(blank)))\n x_data.append(circleify_bottom_right(np.copy(blank)))\n x_data.append(triangulify_top_left(np.copy(blank)))\n x_data.append(triangulify_bottom_right(np.copy(blank)))\n # row 3\n x_data.append(noiseify())\n x_data.append(noiseify_blur())\n # x_data.append(house(np.copy(blank)))\n\n training_data_indicies_nonzero = np.nonzero(training_data_indicies)[0]\n errors = []\n\n for i in range(results.shape[0]):\n\n # print(training_data_indicies)\n # print(training_data_indicies_nonzero)\n # print(training_data_indicies_nonzero[i])\n org = x_data[training_data_indicies_nonzero[i]].flatten()\n gen = results[i].flatten()\n\n error = pearsonr(org, gen)\n errors.append(error)\n\n errors = np.array(np.abs(errors))\n\n return errors[:, 0], training_data_indicies_nonzero", "title": "" }, { "docid": "872b70bdac80ae67494fd06e9c8a186b", "score": "0.4674242", "text": "def getVanishingPoints(lines, confidences, image):\n\n\n\t#initialize model\n\tmodel = ransac.crossProductModel(mindist,image.shape[1], image.shape[0], True, True)\n\tlines = np.array([normalizeLine(i) for i in lines])\n print \"lines total: \", len(lines)\n\t#run ransac\n\tprint \"ransacIterations: \" ,ransacIterations\n\tmodel1, ransac_data1 , ransac_error1= ransac.ransac(lines ,model, 2, ransacIterations, mindist, minNoOfInliers, \\\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\tdebug=debug,return_all=True)\n\t#get the inlier set of lines\n\tlines1 = [lines[i] for i in ransac_data1] \n\t#get the line closest to the model\n\tbestLine1 = getBestLine(lines1, model1)\n\tlinesn = np.array([])\n\t#print 'lines1:' , lines1 \n\tprint \"lines taken by vp1: \", len(lines1)\n\t#take only lines with angle > pi/8\n\tfor line in lines:\n\t\tangle = getAngle(line, bestLine1)\n\t\t#select lines outside pi/8 range\n\t\tif abs(angle) > np.pi/8:\n\t\t\tif linesn.size > 0: \n\t\t\t\tlinesn= np.vstack((linesn, line))\n\t\t\telse:\n\t\t\t\tlinesn= np.array(line)\n\n\t#img = image.copy()\n\t#superImpressLines(img, lines1, 0,0,255)\n\t#cv.imwrite('vanishingLines.jpg', img)\n \n\tprint \"lines left for second direction: \", len(linesn)\n\t\t\n\tif len(linesn)<=4:\n\t linesn = np.array([])\n\t for c in range(1,5):\n\t if abs(bestLine1[1])>eps:\n\t line = np.array([1., -bestLine1[0]/bestLine1[1] ,c])\n\t else:\n\t line = np.array([0., 1. ,c])\n\n\t if linesn.size > 0: \n\t\t linesn= np.vstack((linesn, line))\n\t else:\n\t\t linesn= np.array(line)\n\t \n\t\n\t#second round of ransac for the remaining lines\n\tmodel2, ransac_data2 , ransac_error2= ransac.ransac(linesn ,model, 2, ransacIterations, mindist, minNoOfInliers, \\\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\tdebug=debug,return_all=True)\n\tlines2 = [linesn[i] for i in ransac_data2] \n\tbestLine2 = getBestLine(lines2, model2)\n\t \n\t \n\tif writeFile:\n\t\timg = image.copy()\n\t\tsuperImpressLines(img, lines1, 0,0,255)\n\t\tsuperImpressLines(img, lines2, 0,255, 0)\n\t\tcv.imwrite('vanishingLines.jpg', img)\n\n\tif debug:\n\t\tprint 'model1:', model1 \n\t\tprint 'No of inliers:', len(ransac_data1)\n\t\tprint 'model error:' , ransac_error1\n\t\tprint 'bestline1:' , bestLine1 \n\t\tprint 'model2:', model2 \n\t\tprint 'No of inliers:', len(ransac_data2)\n\t\tprint 'model error:' , ransac_error2\n\t\tprint 'bestline2:' , bestLine2 \n\n\n\tmodels= [model1, model2] \n\tinlierLines = [lines1, lines2]\n\tbestLines = [bestLine1, bestLine2]\n\treturn (models, bestLines, inlierLines)", "title": "" }, { "docid": "7489e24ce3b32c223430037d9f1e2631", "score": "0.46737075", "text": "def _compute_error(\n self, page, content, target_pixelmap, diff_weights, is_aux):\n delta_page = target_pixelmap.compute_delta_page(\n page, content, diff_weights[page, :], is_aux)\n cond = delta_page < 0\n candidate_offsets = self._OFFSETS[cond]\n priorities = delta_page[cond]\n\n # Don't use deterministic order for page, offset. Otherwise,\n # we get the \"venetian blind\" effect when filling large blocks of\n # colour.\n deltas = [\n (priorities[i], random.getrandbits(8), candidate_offsets[i])\n for i in range(len(candidate_offsets))\n ]\n heapq.heapify(deltas)\n\n while deltas:\n pri, _, offset = heapq.heappop(deltas)\n assert pri < 0\n assert 0 <= offset <= 255\n\n yield -pri, offset", "title": "" }, { "docid": "c4ceac4188855b5c65d1360e80241f2b", "score": "0.4671888", "text": "def get_slope_bucket(lines: List[List[tuple]]) -> Dict[int, List[List[tuple]]]:\n def get_slope(x1, y1, x2, y2): \n try:\n slope = (float)(y2-y1)/(float)(round(x2-x1))\n except ZeroDivisionError:\n print(f'Zerodivisionerror:: Variables are: {(x1, y1), (x2, y2)}')\n return math.inf\n return slope\n \n slopes = dict()\n #Finding slopes of every line:\n for line in lines:\n p1, p2 = line[0], line[1]\n slope = get_slope(p1[0], p1[1], p2[0], p2[1])# slope = find_slope(p1, p2)\n \n #Rounding slope for 3 decimal:\n if slope != math.inf:\n slope = round(slope, 1) #slope = round(slope, ndigits = 5)\n \n # Check if the slope exists in the slope_dict\n if slope in slopes.keys():\n slopes[slope].append(line)\n else:\n slopes[slope] = [line]\n \n return slopes", "title": "" }, { "docid": "8d579a978ffbd2f52897b8e22ad69f88", "score": "0.46673915", "text": "def traverse(high, low, points):\n j = 0\n price = points[j]\n hit = np.zeros(len(points))\n timesteps = np.zeros(len(points))\n for i in range(len(high)):\n if low[i] < price < high[i]:\n hit[j] = 1\n timesteps[j] = i\n j += 1\n if j == len(points):\n break\n else:\n price = points[j]\n return (hit, timesteps, high[i], low[i])", "title": "" } ]
53995d40d676c5765cc0c649e11ab981
Returns the current values of all weights in the network in a dictionary with the same keys as self.weight_keys.
[ { "docid": "92a8aac52d5eb688bc26546c1edf7d65", "score": "0.7493311", "text": "def get_weights(self, sess):\n keys = sorted(self.weight_keys)\n weight_dict = {}\n for i, k in enumerate(keys):\n weight_dict[k] = sess.run(self.parameters[i])\n\n return weight_dict", "title": "" } ]
[ { "docid": "eca0e62d23f2146e66f80ec97663dc41", "score": "0.8380025", "text": "def get_weights(self):\n weights = {}\n for key in self.weights:\n weights[key] = self.weights[key].get_value()\n return weights", "title": "" }, { "docid": "0c397626a40b260f6ee205b759902c55", "score": "0.7638268", "text": "def get_weights(self):\n return {}", "title": "" }, { "docid": "7661bdfa3cb93194b27dd75b6326d495", "score": "0.7507765", "text": "def _weight_values(self):\n return self.weights[~self.mask].ravel()", "title": "" }, { "docid": "a067fe18126c12e5e8602b8e52b34b57", "score": "0.7504422", "text": "def individual_weights(self):\n weights = self.weights\n feature_dims = self.feature_dims()\n indiv_weights = {}\n i1 = 0\n for i, v in enumerate(self.targets.keys()):\n i2 = i1 + feature_dims[i]\n indiv_weights[v] = weights[i1:i2]\n i1 = i2\n return indiv_weights", "title": "" }, { "docid": "15f403ac62c7a86f3fc2324d937d114d", "score": "0.7421016", "text": "def get_weights(self):\n _wts = self.weight.detach().cpu().numpy()\n if self.bias is not None:\n _bias = self.bias.detach().cpu().numpy()\n _wts = np.hstack([_wts, _bias])\n return _wts", "title": "" }, { "docid": "d8aa2531123031ad3233efb3c6c5fc52", "score": "0.73289925", "text": "def get_weights(self):\n _wts = self.weight.detach().cpu().numpy()\n if self.padding_idx is not None:\n _wts = _wts[:-1, :]\n if (self.bias is not None):\n _bias = self.bias.detach().cpu().numpy()\n if self.padding_idx is not None:\n _bias = _bias[:-1, :]\n _wts = np.hstack([_wts, _bias])\n return _wts", "title": "" }, { "docid": "52036bc33c76dfd5cb963d253f4d13d0", "score": "0.7307032", "text": "def weights(self):\n return self._weights", "title": "" }, { "docid": "7de6c9352c1cd9fc6f7b4a8a39adb59c", "score": "0.725716", "text": "def get_weights(self):\n return np.concatenate([wmat.flatten() for wmat in self.weights])", "title": "" }, { "docid": "af266e3f2c9660885c1218ff901cc60a", "score": "0.7234315", "text": "def __getAllWeightedDicts(self):\r\n\r\n # this will hold the all weighted dictionaries\r\n weightedDicts = []\r\n\r\n for key in self.__weightMultipliers:\r\n #print(f\"Creating a weighted dict for {key} tag...\")\r\n weightedDicts.append(self.__getWeightsForTagType(key))\r\n return weightedDicts", "title": "" }, { "docid": "3865f4c531cef416965f485b42bbcb1b", "score": "0.7228197", "text": "def weights(self):\n return (self.__weights)", "title": "" }, { "docid": "00cbda8dffa32392a8f76c5dac4ae4ee", "score": "0.72061104", "text": "def get_weights(self):\r\n return self.weights", "title": "" }, { "docid": "00cbda8dffa32392a8f76c5dac4ae4ee", "score": "0.72061104", "text": "def get_weights(self):\r\n return self.weights", "title": "" }, { "docid": "00cbda8dffa32392a8f76c5dac4ae4ee", "score": "0.72061104", "text": "def get_weights(self):\r\n return self.weights", "title": "" }, { "docid": "a840fe512b74483fb09f8ee2916df3d6", "score": "0.7187458", "text": "def get_weights(self):\n return self.weights", "title": "" }, { "docid": "10694d32d2848d906a6fee96b80b2165", "score": "0.71351576", "text": "def get_weights(self):\n flat_weights = np.hstack([arr.flatten() for arr in (self.weights+self.bias)])\n return flat_weights", "title": "" }, { "docid": "aa39b2ded23c7098d606c3bb4dfe1480", "score": "0.7126323", "text": "def get_weights(self):\n return np.concatenate((self.weight_set_1.ravel(), self.weight_set_2.ravel())) # concatenate, make 1D", "title": "" }, { "docid": "976b926dbd3b2b9664cf08b53a457027", "score": "0.71239495", "text": "def get_weights(self):\n pass", "title": "" }, { "docid": "976b926dbd3b2b9664cf08b53a457027", "score": "0.71239495", "text": "def get_weights(self):\n pass", "title": "" }, { "docid": "80bb73a474dc47408724fb4235f850a3", "score": "0.712209", "text": "def get_weights(self):\n\n return self.weights + self.biases", "title": "" }, { "docid": "51483ca6a1b049de368f1f8b3c62cdea", "score": "0.7095723", "text": "def get_weights(self):\n\n raise NotImplementedError", "title": "" }, { "docid": "2a8283a4db6c4a19d0c0c97c8ed965b2", "score": "0.7090818", "text": "def get_weights(self):\n self.update_network_description()\n return self.network_weights", "title": "" }, { "docid": "1d5f2a55d83a500d31e5ce39c61c9989", "score": "0.7034696", "text": "def get_weights(self):\n out = [item.get_weights() for item in self.classifier]\n return np.vstack(out)", "title": "" }, { "docid": "04406e34ca7745acc17d940328e6d4a3", "score": "0.70157033", "text": "def getWeights(self):\n return self.W, self.b", "title": "" }, { "docid": "cbe3b71c6cff8372035d84132fbd1479", "score": "0.7004771", "text": "def get_weights(self):\n network_weights = deque()\n for layer in self.layers:\n layer.get_weights(network_weights)\n return network_weights", "title": "" }, { "docid": "dfb3e6680223489da54c2cdf2b95225b", "score": "0.7002672", "text": "def get_weights(self):\n\n params = {\n 'w': self.w,\n 'b': self.b,\n 'pdw': self.pdw,\n 'pdd': self.pdd,\n }\n\n return params", "title": "" }, { "docid": "d8847f40ae2fefce37857dabf2412216", "score": "0.6874815", "text": "def allweights(self):\n aw = list(self.weights())\n weights,derivs,names = zip(*aw)\n weights = [w.ravel() for w in weights]\n derivs = [d.ravel() for d in derivs]\n return concatenate(weights),concatenate(derivs)", "title": "" }, { "docid": "d8847f40ae2fefce37857dabf2412216", "score": "0.6874815", "text": "def allweights(self):\n aw = list(self.weights())\n weights,derivs,names = zip(*aw)\n weights = [w.ravel() for w in weights]\n derivs = [d.ravel() for d in derivs]\n return concatenate(weights),concatenate(derivs)", "title": "" }, { "docid": "e7d99b781e47f8dbaebf2639608511af", "score": "0.68285125", "text": "def compute_weight_caches(self):\n ew_cache = {}\n for u, v in self.G.edges_iter():\n val = self._edge_weight_func(u, v)\n ew_cache[(u, v)] = val\n ew_cache[(v, u)] = val\n\n nw_cache = {}\n for u in self.G.nodes_iter():\n nw_cache[u] = self._node_weight_func(u)\n\n self.ew_cache = ew_cache\n self.nw_cache = nw_cache\n return ew_cache, nw_cache", "title": "" }, { "docid": "a5bdf4ef613ca9d6f6bd0b61e1b131a0", "score": "0.6821242", "text": "def get_weights(self):\n return self.model.get_weights()", "title": "" }, { "docid": "7b64220f4d7381b8224ffe941b19a6f0", "score": "0.6777656", "text": "def weighted(self):\n return self._weighted.all()", "title": "" }, { "docid": "fa0a9d882b64c268654a80c6f927efae", "score": "0.67751026", "text": "def weights(self):\n return self.variables", "title": "" }, { "docid": "3b266dad409fba1f64b4b36748dbce83", "score": "0.67598426", "text": "def weights_dist(self):\n return np.hstack(list(map(lambda layer: layer.weight.data.numpy().flatten(), self.layers)))", "title": "" }, { "docid": "822dd606def3e5859e7b6a5315d3b030", "score": "0.6757973", "text": "def variables(self):\n return self.weights", "title": "" }, { "docid": "36b6958abff3d1d6e040a198da4da9ca", "score": "0.6741814", "text": "def weights(self):\n return self.trainable_weights + self.non_trainable_weights", "title": "" }, { "docid": "e55071d5d5561c251739456064eb85d2", "score": "0.67389846", "text": "def get_weights(self):\n weights = []\n for part in self.parts:\n weights += part.weights\n return [w.get_value() for w in weights]", "title": "" }, { "docid": "9f4e3856cc5116e32306a5eac762b728", "score": "0.6730947", "text": "def update_weights(self):\n \n \"\"\"\n weights=[]\n for idx in sorted(self.network.nodes):\n node_id = self.network.nodes[idx].key\n weights += [self.network.get_node_neighbors_states(node_id) * self.network.get_node_state(node_id)] #/ self.neurons\n self.network.update_edge_weights(weights)\n \"\"\"\n edges = self.network.get_edge_list()\n weights = self.network.get_edge_weights()\n for ci, adj_ci in enumerate(edges):\n weights[ci] += self.network.get_node_neighbors_states(ci) * self.network.get_node_state(ci) #/ self.neurons\n self.network.update_edge_weights(weights)", "title": "" }, { "docid": "da210f74bd6e583f38f7fc75d2c7cfbc", "score": "0.67249286", "text": "def get_weights_flat(self, sess):\n weight_dict = self.get_weights(sess)\n keys = sorted(weight_dict.keys())\n weight_vector = []\n\n for key in keys:\n weight_vector.append(weight_dict[key].flatten())\n\n return np.concatenate(weight_vector)", "title": "" }, { "docid": "da210f74bd6e583f38f7fc75d2c7cfbc", "score": "0.67249286", "text": "def get_weights_flat(self, sess):\n weight_dict = self.get_weights(sess)\n keys = sorted(weight_dict.keys())\n weight_vector = []\n\n for key in keys:\n weight_vector.append(weight_dict[key].flatten())\n\n return np.concatenate(weight_vector)", "title": "" }, { "docid": "f5305e4d3d9d46d6ada1feeea4819a27", "score": "0.66996276", "text": "def generate_weights(self):\n if not self.inner: return\n for node, neighbors in self.inner.items():\n for neighbor in neighbors:\n self.weights[(node,neighbor[0])] = neighbor[1]", "title": "" }, { "docid": "e3509f5ce939fc9ff61dd0649ea7f886", "score": "0.6688216", "text": "def get_weights(self):\n self.graph_executor.get_weights()", "title": "" }, { "docid": "69bbc8a07e776901bba52ac5df149d94", "score": "0.6683862", "text": "def get_weights(self):\n\t\treturn self.model_params, self.pi_k", "title": "" }, { "docid": "8ef7c41c0a6c88cc1bdd1fb886ab57b7", "score": "0.6676405", "text": "def weights(self):\n pass", "title": "" }, { "docid": "8ef7c41c0a6c88cc1bdd1fb886ab57b7", "score": "0.6676405", "text": "def weights(self):\n pass", "title": "" }, { "docid": "8d02b0b3f34605487666326841c0af91", "score": "0.6674354", "text": "def get_weights(self):\n weights = []\n for cell in self.cells:\n if isinstance(cell, Layer):\n weights += cell.weights\n return K.batch_get_value(weights)", "title": "" }, { "docid": "f7c9fb876f4426f8af5868de4193f89e", "score": "0.66687995", "text": "def get_weights(self) -> np.ndarray:\n raise NotImplementedError(\"The layer protocol is not usable.\")", "title": "" }, { "docid": "1aa9aec86a4b3036ae44f3f332072b64", "score": "0.66614574", "text": "def weights(self):\n\t\treturn self.base_fit.weights", "title": "" }, { "docid": "4d03581ae26869d73c41103f64bf8d03", "score": "0.6624152", "text": "def get_weights(self):\n return self.sess.run(self.w), self.sess.run(self.vb), self.sess.run(self.hb)", "title": "" }, { "docid": "298ccd718ad77143f9bb282eb023e692", "score": "0.6605407", "text": "def get_weights(self):\n d1, d2, d3 = self.weights.shape\n winit = self.weights_init.reshape(d1*d2, d3)\n w = self.weights.reshape(d1*d2, d3)\n \n return winit, w", "title": "" }, { "docid": "58b6454b14040528262c8537736899c2", "score": "0.65795565", "text": "def variables(self):\n assert self.built\n return self.weights", "title": "" }, { "docid": "4ec7878f34f0b93cebe7b249c54f4d19", "score": "0.65686905", "text": "def weight_map(self):\n return self._wcube", "title": "" }, { "docid": "bbb4d632dc758ee2dc74685666a1ff75", "score": "0.6524381", "text": "def get_weights(self):\n return self.W[0]", "title": "" }, { "docid": "d098a1a518e5337e8d295273ed0313c9", "score": "0.65183586", "text": "def get_model_weights(self): \n for layer in self.model.layers:\n weights = layer.get_weights()\n return weights", "title": "" }, { "docid": "8cd8910eeb69b70f9a795c0fe1c40ea3", "score": "0.650336", "text": "def get_model_weights_list(self):\n weig = self.get_model_weights()\n \n result = []\n \n result.append(weig[1].tolist()) # get bias weights \n # get all other weights \n for i in weig[0]:\n result.append(i.tolist())\n\n return result", "title": "" }, { "docid": "f8db45bb37926a44b27400ecb6280522", "score": "0.64953744", "text": "def get_weights(self) -> Sequence[float]:", "title": "" }, { "docid": "ab107722867e77de2e73dfd6a7e773ef", "score": "0.64935225", "text": "def save_as_dict(self):\r\n return {'w': self.weights, 'idfs': self.idfs}", "title": "" }, { "docid": "9e8059715a302a573bf69758dc75cd05", "score": "0.6449336", "text": "def get_pruned_weights(self):\n pass", "title": "" }, { "docid": "29b5c53e5912c6d677b29dad1ab2d482", "score": "0.6443098", "text": "def trainable_weights(self):\n return self.trainable_variables", "title": "" }, { "docid": "d33a47ea81437675fd22f4daa93229e5", "score": "0.6418199", "text": "def getWeightsAndBias(self):\n return numpy.concatenate((self.weights, self.bias))", "title": "" }, { "docid": "52025ef04eaf229ed033fb00054cbaa2", "score": "0.6410406", "text": "def get_ordered_weights(self):\n weights = dict()\n for n in self.nodes():\n w = 0.0\n predecessors = self.predecessors(n)\n if len(predecessors) == 0:\n weights[n] = w\n continue\n \n for v in predecessors:\n w += self[v][n]['weight']\n \n weights[n] = w\n \n return sorted(weights.items(), key=itemgetter(1))", "title": "" }, { "docid": "99f053d8d9b0c869b3b7be46d62d46b2", "score": "0.6404128", "text": "def generator_weights(self):\n return self.generator.weights", "title": "" }, { "docid": "e8262aa57e25ef6441dad21024e0d8d3", "score": "0.63777685", "text": "def weights(self):\n if self._weights is not None:\n return self._weights\n else:\n if self.strategy.fixed_income:\n vals = pd.DataFrame(\n {x.full_name: x.notional_values for x in self.strategy.members}\n )\n vals = vals.div(self.strategy.notional_values, axis=0)\n else:\n vals = pd.DataFrame(\n {x.full_name: x.values for x in self.strategy.members}\n )\n vals = vals.div(self.strategy.values, axis=0)\n self._weights = vals\n return vals", "title": "" }, { "docid": "f3c9f22937bbc1427ba1f62078d4d106", "score": "0.6370188", "text": "def binary_weights(self):\n return self._binary_weights", "title": "" }, { "docid": "8afde32ba58abcb478bc7256dd139c13", "score": "0.63687474", "text": "def init_weight(self):\n self.W_dict = {}\n for step in xrange(1,self.rating_num+1):\n W = np.random.uniform(low=-1/(self.n_visible * self.n_hidden), high=1/(self.n_visible * self.n_hidden), size=(self.n_visible, self.n_hidden)).astype(\"float32\")\n self.W_dict.update({step:W})", "title": "" }, { "docid": "218bc4e7be12d965c6ca418cbcb0c2b1", "score": "0.6366157", "text": "def get_weights(self):\n return NotImplemented", "title": "" }, { "docid": "78733794ef3ddb67aabca5af0b3caa12", "score": "0.6363333", "text": "def _collect_weights(self):\n for layer in self._layers:\n if self.trainable:\n add_variable(\n layer._trainable_weights, self._trainable_weights)\n else:\n add_variable(\n layer._trainable_weights, self._non_trainable_weights)\n add_variable(\n layer._non_trainable_weights, self._non_trainable_weights)", "title": "" }, { "docid": "47de4448578112038751fe343a455d45", "score": "0.6312301", "text": "def get_pruned_cared_weights(self, weights_dict):\n pass", "title": "" }, { "docid": "6fbe83458662321aec0ce2626e93f1bc", "score": "0.6301323", "text": "def _init_weights(self):\n \n weights = {}\n shared_weights = {}\n \n # create the numpy weights\n weights['conv1_W'] = utils.norm_weight_4d(self.conv1_filter, self.input_channel, self.conv1_window_size, self.conv1_window_size)\n weights['conv1_b'] = utils.zero_bias(self.conv1_filter)\n weights['conv2_W'] = utils.norm_weight_4d(self.conv2_filter, self.conv1_filter, self.conv2_window_size, self.conv2_window_size)\n weights['conv2_b'] = utils.zero_bias(self.conv2_filter)\n weights['conv3_W'] = utils.norm_weight_4d(self.conv3_filter, self.conv2_filter, self.conv3_window_size, self.conv3_window_size)\n weights['conv3_b'] = utils.zero_bias(self.conv3_filter)\n weights['full_W'] = utils.norm_weight(self.full_in_dim, self.full_out_dim)\n weights['full_b'] = utils.zero_bias(self.full_out_dim)\n weights['output_W'] = utils.norm_weight(self.full_out_dim, self.output_dim)\n weights['output_b'] = utils.zero_bias(self.output_dim)\n \n # convert the numpy weights to theano shared variable\n for key, value in weights.iteritems():\n shared_weights[key] = theano.shared(value, name=key)\n \n return shared_weights", "title": "" }, { "docid": "0cd7c75c353901c65defc677d94aa239", "score": "0.6299324", "text": "def get_weights(self):\n\n return (self.w1, self.w2)", "title": "" }, { "docid": "64b0ff9aab8b220218ce12cbc3ba5bc2", "score": "0.62864506", "text": "def retrieve_weights_biases():\n\n # Retrieve initial network weights\n ga_weights_biases = {\n \"l1/weights\": policy.ga.net.weights[0],\n \"l1/bias\": policy.ga.net.weights[1],\n \"l2/weights\": policy.ga.net.weights[2],\n \"l2/bias\": policy.ga.net.weights[3],\n \"mu/weights\": policy.ga.mu.weights[0],\n \"mu/bias\": policy.ga.mu.weights[1],\n \"log_sigma/weights\": policy.ga.log_sigma.weights[0],\n \"log_sigma/bias\": policy.ga.log_sigma.weights[1],\n }\n ga_target_weights_biases = {\n \"l1/weights\": policy.ga_.net.weights[0],\n \"l1/bias\": policy.ga_.net.weights[1],\n \"l2/weights\": policy.ga_.net.weights[2],\n \"l2/bias\": policy.ga_.net.weights[3],\n \"mu/weights\": policy.ga_.mu.weights[0],\n \"mu/bias\": policy.ga_.mu.weights[1],\n \"log_sigma/weights\": policy.ga_.log_sigma.weights[0],\n \"log_sigma/bias\": policy.ga_.log_sigma.weights[1],\n }\n lc_weights_biases = {\n \"l1/w1_s\": policy.lc.w1_s,\n \"l1/w1_a\": policy.lc.w1_a,\n \"l1/b1\": policy.lc.b1,\n \"l2/weights\": policy.lc.net.weights[0],\n \"l2/bias\": policy.lc.net.weights[1],\n }\n lc_target_weights_biases = {\n \"l1/w1_s\": policy.lc.w1_s,\n \"l1/w1_a\": policy.lc.w1_a,\n \"l1/b1\": policy.lc.b1,\n \"l2/weights\": policy.lc.net.weights[0],\n \"l2/bias\": policy.lc.net.weights[1],\n }\n\n # Return weights and biases\n return (\n ga_weights_biases,\n ga_target_weights_biases,\n lc_weights_biases,\n lc_target_weights_biases,\n )", "title": "" }, { "docid": "e5f96d10f707cc46bd28c9a624b5476d", "score": "0.62640345", "text": "def get_weights(net):\n return [p.data for p in net.parameters()]", "title": "" }, { "docid": "e76f95fb496d7fa7c7a1f908ec5069e9", "score": "0.62379146", "text": "def get_class_weight_dict(self, y_train):\n self.y_train_unencoded = unencode(y_train)\n weights = compute_class_weight('balanced', np.arange(1, 6), self.y_train_unencoded)\n weights_dict = {i: weights[i] for i in np.arange(0, len(weights))}\n log.debug(f'class weights: {weights}')\n log.info(f'Computed class weight dictionary: {weights_dict}')\n return weights_dict", "title": "" }, { "docid": "48f4cec40043660e167ead79e1f18257", "score": "0.61971104", "text": "def unflatten_weights(self, weight_vector):\n keys = sorted(self.lenet_VAR_DIMS.keys())\n weight_dict = {}\n slice_index = 0\n\n for key in keys:\n dims = self.lenet_VAR_DIMS[key]\n size = np.prod(dims)\n values = weight_vector[slice_index: slice_index + size]\n slice_index += size\n\n weight_dict[key] = values.reshape(dims)\n\n return weight_dict", "title": "" }, { "docid": "89a758124fbc1bd23b1d9a562fc8e499", "score": "0.6177165", "text": "def print_weights(self):\n print \"Weights:\"\n for w in self.network.weights:\n print self.eval(w)", "title": "" }, { "docid": "d4cccabab8a09b7793f5c717f96d719e", "score": "0.61769783", "text": "def initialize_weights(self):\n # embedding weights\n weights = dict()\n weights_initializer = tf.glorot_normal_initializer()\n bias_initializer = tf.constant_initializer(0.0)\n weights[\"feature_embedding\"] = tf.get_variable(\n name='weights',\n dtype=tf.float32,\n initializer=weights_initializer,\n shape=[self._feature_size, self._embedding_size])\n # cross net weights\n for i in range(self._num_cross_layer):\n weights['w_cross_layer{}'.format(i)] = tf.get_variable(\n name='cross_layer{}_weights'.format(i),\n dtype=tf.float32,\n initializer=weights_initializer,\n shape=[self._field_size * self._embedding_size, 1])\n weights['b_cross_layer{}'.format(i)] = tf.get_variable(\n name='cross_layer{}_bias'.format(i),\n dtype=tf.float32,\n initializer=bias_initializer,\n shape=[self._field_size * self._embedding_size, 1])\n # fnn weights\n for i, v in enumerate(self._deep_units):\n if i == 0:\n weights['fnn_hidden_layer{}'.format(i)] = tf.get_variable(\n name='fnn_hidden_layer{}'.format(i),\n dtype=tf.float32,\n initializer=weights_initializer,\n shape=[self._field_size * self._embedding_size, v])\n else:\n weights['fnn_hidden_layer{}'.format(i)] = tf.get_variable(\n name='fnn_hidden_layer{}'.format(i),\n dtype=tf.float32,\n initializer=weights_initializer,\n shape=[self._deep_units[i-1], v])\n weights['combination_weights'] = tf.get_variable(\n name='combination_weights',\n dtype=tf.float32,\n initializer=weights_initializer,\n shape=[2, 1]\n )\n return weights", "title": "" }, { "docid": "67616443e92e779148907d137b6e3a06", "score": "0.6165107", "text": "def _collect_weights(self):\n if self._layers is None:\n pass\n for layer in self._layers:\n if self.trainable:\n add_variable(\n layer._trainable_weights, self._trainable_weights)\n else:\n add_variable(\n layer._trainable_weights, self._non_trainable_weights)\n add_variable(\n layer._non_trainable_weights, self._non_trainable_weights)", "title": "" }, { "docid": "515821db982900ad1543b77e46906e42", "score": "0.61644965", "text": "def update_weights(self): \n a = np.mean(self.network.get_states())\n A = a * (1-a)\n normalized_state = (self.network.get_states() - a) / np.sqrt(A)\n normalized_state_list=list(normalized_state)\n edges = self.network.get_edge_list()\n weights = self.network.get_edge_weights()\n for ci, adj_ci in enumerate(edges):\n weights[ci] += np.array([normalized_state_list[x.key] for x in adj_ci]) * normalized_state_list[ci]#self.network.get_node_neighbors_states(ci) * self.network.get_node_state(ci) #normalized_state[adj_ci] * normalized_state[ci] #/ self.neurons\n self.network.update_edge_weights(weights)", "title": "" }, { "docid": "0d1b54bd92cc7ef6b74380c0835e3c05", "score": "0.6132494", "text": "def get_weights(self):\n # User embedding parameters\n u_emb_params = get_params(self.user_embedding.parameters())\n # Item embedding parameters\n i_emb_params = get_params(self.item_embedding.parameters())\n # Recommendation model parameters\n rec_params = get_params(self.rec_model.parameters())\n return u_emb_params, i_emb_params, rec_params", "title": "" }, { "docid": "158d96538ce43b82878c6fd48b1ac5f5", "score": "0.6125365", "text": "def update_weights(self):\r\n for l in self.layers:\r\n v = self.momentum * l.velocity + self.lr * l.gradient\r\n l.weights -= v\r\n l.velocity = v\r\n #l.gradient *= 0\r\n return [k.weights for k in self.layers]", "title": "" }, { "docid": "8572245e36da3bb3fbc5ec926dd8aaa3", "score": "0.6117197", "text": "def with_weights(self):\n return _add_w(self._potential, self._weights, self._nodes)", "title": "" }, { "docid": "a98036eebdfc13b8fc8b89cb3375b621", "score": "0.6113021", "text": "def get_weights(net):\n return [p.data for p in net.parameters()]", "title": "" }, { "docid": "b72b5de93fcd78cfdb43cbde06c491e4", "score": "0.61021346", "text": "def get_class_weight(self) -> Dict[str, torch.Tensor]:\n raise NotImplementedError", "title": "" }, { "docid": "f422ac9aeae4f9a76ff3e6a0e81ed922", "score": "0.6093295", "text": "def highest_weight_dict_inv(self):\n hw = [x for x in self.hw_auxiliary() if x.epsilon(6) == 0]\n dic = dict( ( tuple( [self.affine_weight(x), len(x)] ), x ) for x in hw )\n assert len(hw) == len(dic)\n return dic", "title": "" }, { "docid": "f0f894c4cdb459ebc09bc46181f6daa7", "score": "0.60837966", "text": "def print_weights(self):\r\n return(self.weights)\r\n #raise Warning(\"You must implement print_weights\")\r", "title": "" }, { "docid": "d4a4aa162076b3e664da89f59791d9d1", "score": "0.6083286", "text": "def get_weights(model: torch.nn.ModuleList) -> fl.common.NDArrays:\n return [val.cpu().numpy() for _, val in model.state_dict().items()]", "title": "" }, { "docid": "3f6a4570a038aaa9ddcaa455bee02dfa", "score": "0.6076204", "text": "def get_weights(self):\n se = self.pb.format.network_format.network == pb.NetworkFormat.NETWORK_SE_WITH_HEADFORMAT\n if self.weights == []:\n self.denorm_layer(self.pb.weights.ip2_val_b, self.weights)\n self.denorm_layer(self.pb.weights.ip2_val_w, self.weights)\n self.denorm_layer(self.pb.weights.ip1_val_b, self.weights)\n self.denorm_layer(self.pb.weights.ip1_val_w, self.weights)\n self.denorm_conv_block(self.pb.weights.value, self.weights)\n\n if self.pb.format.network_format.policy == pb.NetworkFormat.POLICY_CONVOLUTION:\n self.denorm_plain_conv(self.pb.weights.policy, self.weights)\n self.denorm_conv_block(self.pb.weights.policy1, self.weights)\n else:\n self.denorm_layer(self.pb.weights.ip_pol_b, self.weights)\n self.denorm_layer(self.pb.weights.ip_pol_w, self.weights)\n self.denorm_conv_block(self.pb.weights.policy, self.weights)\n\n for res in reversed(self.pb.weights.residual):\n if se:\n self.denorm_se_unit(res.se, self.weights)\n self.denorm_conv_block(res.conv2, self.weights)\n self.denorm_conv_block(res.conv1, self.weights)\n\n self.denorm_conv_block(self.pb.weights.input, self.weights)\n\n return self.weights", "title": "" }, { "docid": "f1d5fdaee2a373d0f8c3b8b627e743e7", "score": "0.60727483", "text": "def as_vector(self):\n return self.weights", "title": "" }, { "docid": "378d7be95daeab1d73ea93085f73c0e4", "score": "0.6059989", "text": "def new_weights(self) -> Tuple[ISelectNewWeight, ...]:\n return self.__new_weights", "title": "" }, { "docid": "b7fdabc025cc09ff1e68c39b4ce008fb", "score": "0.60548896", "text": "def set_weights(self, **args):\n for k in args:\n assert k in self.values.__dict__.keys()\n self.values.__setattr__(k, args[k])\n return self.values.summary()", "title": "" }, { "docid": "454a1a742bb19e223251dac0a400f40b", "score": "0.60524505", "text": "def update_weights(self, weights, g):\n for i in range(len(weights)):\n for param in weights[i].keys():\n weights[i][param] += self.step_size * g[i][param]\n\n return weights", "title": "" }, { "docid": "bcb4217c8df093fff78ce79c490e2a70", "score": "0.6022603", "text": "def resetWeights(self):\n\n for key in self.dictionary:\n self.dictionary[key] = 50.0", "title": "" }, { "docid": "909e7c4649c28a16f7cae3b8ff3dd581", "score": "0.601884", "text": "def highest_weight_dict(self):\n hw = [x for x in self.hw_auxiliary() if x.epsilon(1) == 0]\n dic = dict( ( x, tuple( [self.affine_weight(x), len(x)] ) ) for x in hw )\n assert len(hw) == len(dic)\n return dic", "title": "" }, { "docid": "8dc5f56e949536c96cbebcd3973a1c0d", "score": "0.6016105", "text": "def data_weights(self) -> List[float]:\n if not hasattr(self._data[0], 'data_weight'):\n return [1. for d in self._data]\n\n return [d.data_weight for d in self._data]", "title": "" }, { "docid": "38ab7ebd3458dce8fbe23a4a6c156137", "score": "0.6008903", "text": "def to_native_weights(self):\n def reorder_weights(w):\n z, r, n = torch.chunk(w, 3, dim=-1)\n return torch.cat([r, z, n], dim=-1)\n\n kernel = reorder_weights(self.kernel).permute(1, 0).contiguous()\n recurrent_kernel = reorder_weights(self.recurrent_kernel).permute(1, 0).contiguous()\n bias1 = reorder_weights(self.bias).contiguous()\n bias2 = reorder_weights(self.recurrent_bias).contiguous()\n\n kernel = torch.nn.Parameter(kernel)\n recurrent_kernel = torch.nn.Parameter(recurrent_kernel)\n bias1 = torch.nn.Parameter(bias1)\n bias2 = torch.nn.Parameter(bias2)\n return kernel, recurrent_kernel, bias1, bias2", "title": "" }, { "docid": "3f4487b5c073ce889015e57584a293b7", "score": "0.6001952", "text": "def get_weights(self):\n if self.has_lfs():\n return self.label_model.get_weights()\n else:\n return []", "title": "" }, { "docid": "7bb3b08072070183d0dcc2c9cc65197d", "score": "0.5994701", "text": "def params(self):\n return [self.emb.weight]", "title": "" }, { "docid": "0497b352de39c50235d8f976c9ab0a4e", "score": "0.59915024", "text": "def layer_weight_tensors(self):\n warn('Class \"GaussianBNNWrapper\" didn\\'t modify the attribute ' +\n '\"layer_weight_tensors\", such that the contained weights only ' +\n 'represent mean parameters.')\n\n #return super(MainNetInterface, self).layer_weight_tensors\n return super().layer_weight_tensors", "title": "" }, { "docid": "2f8a73498cb7e7e0dd4f8399f1fe2dfd", "score": "0.59874994", "text": "def weights(self):\n return self.model.regressor_.coef_", "title": "" }, { "docid": "6069a867861ffbd3bb815ce1e19a9c5f", "score": "0.59836483", "text": "def new_weights(self):\n return np.zeros(self.num_features)", "title": "" }, { "docid": "13bd3cb0776b16ce4e322089d9296663", "score": "0.5982416", "text": "def trainable_weights(self):\n weights = []\n for part in self.parts:\n weights += part.trainable_weights\n return weights", "title": "" } ]
d767493699a07ddcd42621c67fd2e118
Given a positive integer n, generate the primes < n.
[ { "docid": "47565328f245af61b47771408283e044", "score": "0.0", "text": "def basicSieveSum(n):\n s = [1]*n\n for p in xrange(2, 1+int(math.sqrt(n-1))):\n if s[p]:\n a = p*p \n s[a::p] = [0] * -((a-n)//p)\n sm = 0\n for p in xrange(2, n): \n if s[p]:\n sm += p\n return sm", "title": "" } ]
[ { "docid": "bb38992d97c5b1bdca1c86ad4f61d2a4", "score": "0.8815054", "text": "def generate_primes(n):\n primes = []\n for i in range(2, n + 1):\n if is_prime(i):\n primes.append(i)\n return primes", "title": "" }, { "docid": "343d3818ff2bab1820a2586b19dd5c27", "score": "0.8634073", "text": "def generate_primes_less_than_n(n):\n\tif n <= 1:\n\t\treturn []\n\n\tlist_of_primes = [2]\n\tprime_candidate = 3\n\n\twhile(prime_candidate < n):\n\t\tis_prime = True\n\t\tsqrt_prime_candidate = math.sqrt(prime_candidate)\n\t\tfor i in list_of_primes:\n\t\t\tif prime_candidate%i == 0:\n\t\t\t\tis_prime = False\n\t\t\t\tbreak\n\t\t\tif i > sqrt_prime_candidate:\n\t\t\t\tbreak\n\t\tif is_prime:\n\t\t\tlist_of_primes.append(prime_candidate)\n\t\tprime_candidate += 2\t\n\n\treturn list_of_primes", "title": "" }, { "docid": "3c64559dc79a4f759dfbb9a6dc386581", "score": "0.86219674", "text": "def generate_primes(n):\n # pass\n result = []\n if n == 1:\n return False\n for i in range(2, n + 1):\n if prime(i):\n result.append(i)\n return result", "title": "" }, { "docid": "eabe9b5af4be302b019804dc95cd4028", "score": "0.8619999", "text": "def generate_primes(n):\n a = []\n for i in range(0,n + 1):\n if is_prime(i):\n a.append(i)\n return a", "title": "" }, { "docid": "fa48a0d4954c01d182ca9b0e511065b7", "score": "0.85146993", "text": "def generate_primes_before_n(n):\n p = primes()\n largest_so_far = next(p)\n while largest_so_far < n:\n yield largest_so_far\n largest_so_far = next(p)", "title": "" }, { "docid": "ac77deb74cd48f0dbc73a5751dd2f9e8", "score": "0.8481048", "text": "def primes(n):\n # http://stackoverflow.com/questions/2068372/fastest-way-to-list-all-primes-below-n-in-python/3035188#3035188\n sieve = [True] * n\n for i in xrange(3,int(n**0.5)+1,2):\n if sieve[i]:\n sieve[i*i::2*i]=[False]*((n-i*i-1)/(2*i)+1)\n return [2] + [i for i in xrange(3,n,2) if sieve[i]]", "title": "" }, { "docid": "f78a662384f62bbd382fd9f989f109a2", "score": "0.84598637", "text": "def generate_primes(n):\n\n # check for invalid input\n if type(n) is not int:\n raise TypeError('n must be an integer')\n\n # there are no primes less than 2\n if n <= 2:\n return []\n\n # start by assuming all numbers are prime\n sieve = [True] * n\n\n # mark multiples of prime numbers as NOT prime (i = prime number, j = multiple of a prime number)\n limit = int(sqrt(n)) + 1\n for i in range(2, limit): # only need to mark multiples for primes up to sqrt(n)\n if sieve[i] is True:\n for j in range(i**2, n, i): # start marking multiples at i^2, because lower multiples are already marked\n sieve[j] = False\n\n # remaining numbers are prime\n return [k for k in range(2, len(sieve)) if sieve[k] is True]", "title": "" }, { "docid": "b0c37b309466f888712086acae30b5ba", "score": "0.84563875", "text": "def generate_primes(n):\n \n if type(n) != type(0) and type(n) != type(0.1):\n raise ValueError(\"invalid input, expecting a number\")\n \n primes = []\n\n for i in range(2, n+1):\n\n divisions = 0\n\n for j in range(2, i):\n\n if i % j == 0:\n divisions +=1\n\n if divisions == 0:\n primes.append(i)\n\n return primes", "title": "" }, { "docid": "baf1ab3d3713804d054349fb3fa1130b", "score": "0.8410762", "text": "def primeGen(n):\n primes= [2,3,5,7,11]\n if n in xrange(1,len(primes)+1):\n return primes[:n]\n else:\n banlist=[]\n count = 6\n while count <= n :\n Next = (primes[-2] + primes[-1]) - primes[-3]\n if not is_prime(Next):\n count -=1\n banlist.append(Next)\n count +=1\n primes.append(Next)\n filterout(banlist,primes)\n return primes", "title": "" }, { "docid": "a027bc44e5a9b4152931fef1d83b5d17", "score": "0.827716", "text": "def generate_primes_after_n(n):\n p = primes()\n largest_so_far = next(p)\n while largest_so_far < n:\n largest_so_far = next(p)\n\n yield largest_so_far\n while True:\n yield next(p)", "title": "" }, { "docid": "5d2bc663c47bad7d8ce335d3e0c71df6", "score": "0.82471675", "text": "def make_n_primes(n):\n\tprimes = []\n\tnum = 2\n\tprime_stat = \"maybe\"\n\twhile len(primes) < n:\n\t\tfor i in primes:\n\t\t\tif num % i == 0:\n\t\t\t\tnum = num + 1\n\t\t\t\tprime_stat = \"no\"\n\t\t\t\tbreak\n\t\t\telse:\n\t\t\t\tNone\n\t\tif prime_stat == \"no\":\n\t\t\tprime_stat = \"maybe\"\n\t\telse:\n\t\t\tprimes.append(num)\n\t\t\tnum = num + 1\n\t\t\tprime_stat = \"maybe\"\n\treturn primes", "title": "" }, { "docid": "9229598a0466d89a52fa8f5578cfada4", "score": "0.82395995", "text": "def generate_primes(n):\n if n < 2:\n return set()\n # Create a set of consecutive integers from 2 to n (inclusive)\n primes = set([i for i in range(2, n + 1)])\n # Initially, let p equal 2, the smallest prime number\n p = 2\n # When p * p is > n, stop the while loop because all the multiples,\n # of p which are also multiples of smaller primes, have been removed \n while p * p <= n:\n # If p hasn't been removed from primes in the below for loop, it is a prime\n if p in primes:\n # Enumerate the multiples of p by counting to n from 2p \n # in increments of p, and remove them from the primes set\n for i in range(p * 2, n + 1, p):\n if i in primes:\n primes.remove(i)\n # Increment p until the first number greater than p has not been \n # removed from the above for loop. If there was no such number, stop. \n # Otherwise, let p now equal this new number (which is the next prime)\n p += 1\n return primes", "title": "" }, { "docid": "c05891c8553af7bbdffa3d1022e0735b", "score": "0.8233458", "text": "def generate_primes(self, n):\n if n <= 0:\n raise ValueError('n_prime is int and > 0.')\n elif n == 1:\n return [1]\n elif n == 2:\n return [1,2]\n elif n == 3:\n return [1,2,3]\n else:\n primes_ = [1,2,3]\n itr = n - 3\n\n new = primes_[-1]\n\n while itr > 0:\n new += 1\n is_prime = False\n\n if new & 1 == 1:\n is_prime = True\n for k in range(3, int(math.floor(math.sqrt(new)))+1):\n if new % k == 0:\n is_prime = False\n break\n if is_prime:\n primes_.append(new)\n itr -= 1\n return np.array(primes_)", "title": "" }, { "docid": "3a4c9c3d92e19f15645afd72161eecda", "score": "0.8214206", "text": "def primes_list(n):\n # http://stackoverflow.com/questions/2068372/fastest-way-to-list-all-primes-below-n-in-python/3035188#3035188\n sieve = [True] * n\n for i in xrange(3,int(n**0.5)+1,2):\n if sieve[i]:\n sieve[i*i::2*i]=[False]*((n-i*i-1)/(2*i)+1)\n return [2] + [i for i in xrange(3,n,2) if sieve[i]]", "title": "" }, { "docid": "4b2ed318b536388acb500b7e4f47c59c", "score": "0.81379503", "text": "def prime_numbers_generator(n): \r\n \r\n primes_list = []\r\n \r\n # return false if n is 0 or 1\r\n if type(n) == int and n == 0 or n ==1:\r\n return False\r\n \r\n # reject string arguments\r\n if type(n) == str:\r\n return \"String arguments are not allowed\"\r\n \r\n # reject negative numbers \r\n if type(n) == int and n < 0:\r\n return \"Negative numbers are not allowed\"\r\n \r\n # reject dictionary inputs \r\n if type(n) is dict:\r\n return \"Dictionary inputs are not allowed\"\r\n \r\n # reject list inputs \r\n if type(n) is list:\r\n return \"List inputs are not allowed\"\r\n \r\n \r\n # reject tuple inputs \r\n if type(n) is tuple:\r\n return \"Tuple inputs are not allowed\"\r\n \r\n # reject float inputs \r\n if type(n) is float:\r\n return \"Floats are not allowed\"\r\n \r\n # no excessive sizes\r\n if n > sys.maxsize:\r\n return \"Overflow\" \r\n \r\n i = 2\r\n while(i < n):\r\n \r\n j = 2\r\n while(j <= (i/j)):\r\n \r\n if not(i%j): \r\n break\r\n \r\n j = j + 1\r\n \r\n if (j > i/j) : \r\n primes_list.append(i)\r\n \r\n i = i + 1", "title": "" }, { "docid": "59b386305fee4c3e5107d8b1ccc88368", "score": "0.8090662", "text": "def get_primes(n):\n sieve = [True] * n\n for i in range(3, int(sqrt(n)) + 1, 2):\n if sieve[i]:\n sieve[i * i::2 * i] = [False] * ((n - i * i - 1) // (2 * i) + 1)\n return [2] + [i for i in range(3, n, 2) if sieve[i]]", "title": "" }, { "docid": "3b9d1cf0765fadcce9dba2787610e85e", "score": "0.8081545", "text": "def primes_of(n):\n\tif n < 2:\n\t\treturn None\n\n\tsieve = {i:True for i in range(2,n+1)}\n\n\tfor j in range(2, n+1):\n\t\tif sieve[j] == True:\n\t\t\tm = 2\n\t\t\twhile j*m <= n:\n\t\t\t\tsieve[j*m] = False\n\t\t\t\tm+=1\n\n\treturn [x for x,y in sieve.items() if y == True]", "title": "" }, { "docid": "7d55b02d6716ad736487c7b10fdb153a", "score": "0.8068936", "text": "def primes(n):\n sieve = [True] * n\n for i in range(3, int(n ** 0.5) + 1, 2):\n if sieve[i]:\n sieve[i * i::2 * i] = [False] * ((n - i * i - 1) // (2 * i) + 1)\n return [2] + [i for i in range(3, n, 2) if sieve[i]]", "title": "" }, { "docid": "971a28b38327d97146e10fa0580e44ed", "score": "0.80664337", "text": "def Generate_n_Pandigit_Number_Prime(n):\n digits = [str(i) for i in xrange(n, 0, -1)]\n\n for num in permutations(digits, len(digits)-1):\n ln = list(num)\n if('1' not in ln): ln.append('1')\n elif('3' not in ln): ln.append('3')\n elif('7' not in ln): ln.append('7')\n else: continue\n yield int(''.join(ln))", "title": "" }, { "docid": "03838c5864f83e09c974b5a7ceb3883e", "score": "0.80544895", "text": "def generate_prime_numbers(n):\n\n j = 2\n prime_numbers = []\n while j <= n:\n k = 2\n while not(k == j) and not(j%k == 0):\n k = k + 1\n if k == j:\n prime_numbers.append(j)\n j = j + 1\n return prime_numbers", "title": "" }, { "docid": "e573ab55421c86740e6fee9e36150244", "score": "0.80414903", "text": "def primes(n):\n sieve = [True] * n\n for i in xrange(3,int(n**0.5)+1,2):\n if sieve[i]: sieve[i*i::2*i]=[False]*((n-i*i-1)/(2*i)+1)\n return [2] + [i for i in xrange(3,n,2) if sieve[i]]", "title": "" }, { "docid": "97b2279c506c02ac6483e50c9fda1b2e", "score": "0.8023877", "text": "def rwh_primes(n):\n # http://stackoverflow.com/questions/2068372/fastest-way-to-list-all-primes-below-n-in-python/3035188#3035188\n sieve = [True] * n\n for i in xrange(3,int(n**0.5)+1,2):\n if sieve[i]:\n sieve[i*i::2*i]=[False]*((n-i*i-1)/(2*i)+1)\n return [2] + [i for i in xrange(3,n,2) if sieve[i]]", "title": "" }, { "docid": "9fb16c1c08e1856de1b4e7c32c7a74d0", "score": "0.8009447", "text": "def primes(n):\n r = [i if i%2 != 0 else 0 for i in list(range(0,n+1)) ]\n r[1] = 0\n r[2] = 2\n bottom = 2\n top = n\n while(bottom * bottom <= top):\n while(bottom <= top):\n if(r[top] and bottom * top <= n):\n r[bottom*top] = 0\n top -= 1\n top=n\n bottom += 1\n return [x for x in r if x]", "title": "" }, { "docid": "f247bbe3ad11dbc4ea9d7fbe60593990", "score": "0.7995365", "text": "def generate_primes(n):\n numbers = list(range(2, n))\n for i in range(len(numbers)):\n if isinstance(numbers[i], int):\n for j in range(i + 1, len(numbers)):\n if isinstance(numbers[j], int):\n if numbers[j] % numbers[i] == 0 and numbers[j] != numbers[i]:\n numbers[j] = '-'\n\n numbers = [numbers[i] for i in range(len(numbers)) if isinstance(numbers[i], int)]\n return numbers", "title": "" }, { "docid": "eff64973af4527413c2310ff37f80d68", "score": "0.79842937", "text": "def get_prime_numbers(n: int) -> list:\n\n sieve = [True] * n\n for i in range(3, int(n**0.5) + 1, 2):\n if sieve[i]:\n sieve[i * i :: 2 * i] = [False] * ((n - i * i - 1) // (2 * i) + 1)\n return [2] + [i for i in range(3, n, 2) if sieve[i]]", "title": "" }, { "docid": "a6fa09496dd0d481d7bda2e385194808", "score": "0.79710615", "text": "def prime_generator(n):\r\n cand = [i for i in range(3, n + 1, 2)]\r\n end = int(n ** 0.5) // 2\r\n\r\n # Loop over candidates (cand), marking out each multiple.\r\n for i in range(end):\r\n if cand[i]:\r\n cand[cand[i] + i::cand[i]] = [None] * (\r\n (n // cand[i]) - (n // (2 * cand[i])) - 1)\r\n\r\n # Filter out non-primes and return the list.\r\n return [2] + [i for i in cand if i]", "title": "" }, { "docid": "b41209835bcb3848474a43af56dae85e", "score": "0.79393405", "text": "def primes_to(n):\n # http://stackoverflow.com/questions/2068372/fastest-way-to-list-all-primes-below-n-in-python/3035188#3035188\n sieve = np.ones(n//3 + (n%6==2), dtype=np.bool)\n sieve[0] = False\n for i in xrange(int(n**0.5)//3+1):\n if sieve[i]:\n k=3*i+1|1\n sieve[((k*k)//3)::2*k] = False\n sieve[(k*k+4*k-2*k*(i&1))//3::2*k] = False\n return np.r_[2,3,((3*np.nonzero(sieve)[0]+1)|1)]", "title": "" }, { "docid": "42ff94caa5abf80cc0637c0a7a93cea5", "score": "0.7935812", "text": "def listprimes(n):\n sieve = [True] * (n//2)\n for i in range(3,int(n**0.5)+1,2):\n if sieve[i//2]:\n sieve[i*i//2::i] = [False] * ((n-i*i-1)//(2*i)+1)\n return [2] + [2*i+1 for i in range(1,n//2) if sieve[i]]", "title": "" }, { "docid": "323b796e802a16a660a7b70014015cf7", "score": "0.7934606", "text": "def rwh_primes1(n):\n # http://stackoverflow.com/questions/2068372/fastest-way-to-list-all-primes-below-n-in-python/3035188#3035188\n sieve = [True] * (n/2)\n for i in xrange(3,int(n**0.5)+1,2):\n if sieve[i/2]:\n sieve[i*i/2::i] = [False] * ((n-i*i-1)/(2*i)+1)\n return [2] + [2*i+1 for i in xrange(1,n/2) if sieve[i]]", "title": "" }, { "docid": "f12d4190a6bdcf05367fe0c7563e0fce", "score": "0.7929728", "text": "def n_primes(n):\n\tprimes = sieve(estimate_primes(n))\n\treturn primes[:n]", "title": "" }, { "docid": "c99f30e1f687cbb3dc2980d140c02975", "score": "0.7919449", "text": "def primes(n):\n DOUBLE = 2\n divisors = [i for i in range(DOUBLE, math.ceil(n/DOUBLE))]\n non_primes = []\n for item in divisors:\n for item2 in divisors:\n non_primes += [item*item2]\n \n return [i for i in range(2, n) if i not in non_primes]", "title": "" }, { "docid": "ce1e271b07cd1ccb83a297c1ac39cfc0", "score": "0.7906036", "text": "def make_primes_up_to(n: int):\n\tsieve = np.ones(n // 3 + (n % 6 == 2), dtype=np.bool)\n\tsieve[0] = False\n\tfor i in range(int(n ** 0.5) // 3 + 1):\n\t\tif sieve[i]:\n\t\t\tk = 3 * i + 1 | 1\n\t\t\tsieve[((k * k) // 3)::2 * k] = False\n\t\t\tsieve[(k * k + 4 * k - 2 * k * (i & 1)) // 3::2 * k] = False\n\tps = np.r_[2, 3, ((3 * np.nonzero(sieve)[0] + 1) | 1)]\n\tfor p in ps:\n\t\tPRIMES.append(p)", "title": "" }, { "docid": "7017f8ab6402490d284746e39caa4c4c", "score": "0.78825635", "text": "def primes(n):\n sieve = [1] * n\n for i in range(3, int(n**0.5) + 1, 2):\n if sieve[i]:\n for y in range(2, n):\n try:\n sieve[i * y] = 0\n except:\n break\n return [2] + [i for i in range(3, n, 2) if sieve[i]]", "title": "" }, { "docid": "ba1bf09d7e3d4d7f6740426e384c7b84", "score": "0.7881484", "text": "def generate_prime(n):\n\n\tprime_list = []\n\n if n == 0 or n == 1:\n return \"number is not prime.\"\n\n if n < 2:\n return \"number less than two are not prime.\"\n\n if not type(n) == int:\n return \"strings not allowed.\"\n\n for i in range(2, n + 1):\n if i > 1:\n for x in range(2, i):\n if (i % x) == 0:\n break\n else:\n prime_list.append(i)\n return prime_list", "title": "" }, { "docid": "112a3fb09f658d3bac0a7001acc745ab", "score": "0.7819165", "text": "def n_primes_fast(n):\n candidates = list(range(n + 1)) # candidates[i] == i\n for i in range(2, n + 1):\n if candidates[i] is not None:\n yield i\n\n # Remove all multiples of i\n j = 2\n while j * i <= n:\n candidates[j * i] = None\n j += 1", "title": "" }, { "docid": "23f280cfc394e5091096a6bd95d87944", "score": "0.78118724", "text": "def prime_number_generator(number_of_primes):", "title": "" }, { "docid": "4801c2610c2208cff7cc3223c1b09980", "score": "0.77806515", "text": "def prime_generator(n):\n\n if n > 1: # Making sure that n is always greater than 1\n # Create the prime numbers list with list comprehension\n prime_num_list = [a for a in range(2, n + 1) if all(a % b != 0 for b in range(2, a))]\n # Yield the prime number at the index,i, in the range of the length of the list\n for i in range(len(prime_num_list)):\n yield prime_num_list[i]\n else:\n raise ValueError(\"Please enter a number greater than 1\")", "title": "" }, { "docid": "fd7526bc4ddc9b436aac3265baa09a1b", "score": "0.77689284", "text": "def primes1(n):\n sieve = [True] * (n//2)\n for i in range(3, int(n**0.5)+1, 2):\n if sieve[i//2]:\n sieve[i*i//2::i] = [False] * ((n-i*i-1)//(2*i)+1)\n return [2] + [2*i+1 for i in range(1, n//2) if sieve[i]]", "title": "" }, { "docid": "9e03270a8f57e673b519a0b1e7abc745", "score": "0.776735", "text": "def primes1(n):\r\n sieve = [True] * (n//2)\r\n for i in range(3,int(n**0.5)+1,2):\r\n if sieve[i//2]:\r\n sieve[i*i//2::i] = [False] * ((n-i*i-1)//(2*i)+1)\r\n return [2] + [2*i+1 for i in range(1,n//2) if sieve[i]]", "title": "" }, { "docid": "74b257c7e83a6bdaa932255a26882439", "score": "0.7754811", "text": "def primes_to(n):\n sieve = numpy.ones(n // 2, dtype=numpy.bool)\n for i in range(3, int(n**0.5)+1, 2):\n if sieve[i // 2]:\n sieve[i * i // 2 :: i] = False\n return 2 * numpy.nonzero(sieve)[0][1::] + 1", "title": "" }, { "docid": "86d961445a0ef885eb87c25ee3758565", "score": "0.774154", "text": "def seive(n):\n potentials = {i: True for i in range(2, int(n) + 1)}\n bound = int(np.sqrt(n))\n for i in range(2, bound + 1):\n if potentials[i]:\n for j in range(i ** 2, int(n) + 1, i):\n potentials[j] = False\n primes = [f for f in potentials.keys() if potentials[f]]\n return primes", "title": "" }, { "docid": "4da957537c4a2ee786b34d1a3c8fea3d", "score": "0.7728052", "text": "def primes(n):\r\n plist = [3]\r\n num = 3\r\n while len(plist) < n:\r\n num += 1\r\n prime = True\r\n for j in range(len(plist)):\r\n if num % plist[j] == 0:\r\n prime = False\r\n break\r\n if prime:\r\n plist.append(num)\r\n\r\n return plist", "title": "" }, { "docid": "bf065022abc21e7cfc2e6d1743b5a12b", "score": "0.77234226", "text": "def list_primes(n):\n return [i for (i, isprime) in enumerate(list_primality(n)) if isprime]", "title": "" }, { "docid": "ccd728e150e8a995ec5fbe00520c794c", "score": "0.7710354", "text": "def compute_primes(n):\n primes = [True for i in range(n + 1)]\n p = 2\n while p * p <= n:\n # If primes[p] is not changed, then it is a prime\n if primes[p]:\n # Update all multiples of p too\n for i in range(p * 2, n + 1, p):\n primes[i] = False\n p += 1\n primes[0] = False\n primes[1] = False\n\n result = []\n for p in range(n + 1):\n if primes[p]:\n result.append(p)\n\n return result", "title": "" }, { "docid": "3b4773abf8428a1f7cf32671327fd6b0", "score": "0.770795", "text": "def sieve(n):\n primes, sieve = [], [True] * (n + 1)\n for p in range(2, n + 1):\n if sieve[p]:\n primes.append(p)\n for i in range(p * p, n + 1, p):\n sieve[i] = False\n return primes", "title": "" }, { "docid": "d9a246659bcdbd55f9fab8b0eb60495f", "score": "0.7702426", "text": "def primesfrom2to(n):\r\n # https://stackoverflow.com/questions/2068372/fastest-way-to-list-all-primes-below-n-in-python/3035188#3035188\r\n sieve = np.ones(n//3 + (n%6==2), dtype=np.bool)\r\n sieve[0] = False\r\n for i in range(int(n**0.5)//3+1):\r\n if sieve[i]:\r\n k=3*i+1|1\r\n sieve[ ((k*k)//3) ::2*k] = False\r\n sieve[(k*k+4*k-2*k*(i&1))//3::2*k] = False\r\n return np.r_[2,3,((3*np.nonzero(sieve)[0]+1)|1)]", "title": "" }, { "docid": "8c869530b16ec5d25f38098081a270b0", "score": "0.7700823", "text": "def primes(n):\n factors = set()\n for x in range(1, int(math.sqrt(n)) + 1):\n if n % x == 0:\n factors.add(int(x))\n factors.add(int(n // x))\n return sorted(factors)", "title": "" }, { "docid": "39d3df4cb22cc1aca91a458aa430658f", "score": "0.76918274", "text": "def primesfrom2to(n):\r\n # http://stackoverflow.com/questions/2068372/fastest-way-to-list-all-primes-below-n-in-python/3035188#3035188\r\n sieve = np.ones(n/3 + (n%6==2), dtype=np.bool)\r\n sieve[0] = False\r\n for i in xrange(int(n**0.5)/3+1):\r\n if sieve[i]:\r\n k=3*i+1|1\r\n sieve[ ((k*k)/3) ::2*k] = False\r\n sieve[(k*k+4*k-2*k*(i&1))/3::2*k] = False\r\n return np.r_[2,3,((3*np.nonzero(sieve)[0]+1)|1)]", "title": "" }, { "docid": "c1a959979b634c0e5102fecc52656386", "score": "0.7685874", "text": "def _prime_generator(self, n):\n it = Iterator()\n primes = []\n\n for i in range(0, n):\n prime = it.next_prime()\n primes.append(prime)\n\n return primes", "title": "" }, { "docid": "aed61d71c8c6868249a8da625b1cf4f2", "score": "0.7673001", "text": "def primesfrom2to(n):\n # http://stackoverflow.com/questions/2068372/fastest-way-to-list-all-primes-below-n-in-python/3035188#3035188\n sieve = np.ones(n/3 + (n%6==2), dtype=np.bool)\n sieve[0] = False\n for i in xrange(int(n**0.5)/3+1):\n if sieve[i]:\n k=3*i+1|1\n sieve[ ((k*k)/3) ::2*k] = False\n sieve[(k*k+4*k-2*k*(i&1))/3::2*k] = False\n return np.r_[2,3,((3*np.nonzero(sieve)[0]+1)|1)]", "title": "" }, { "docid": "aed61d71c8c6868249a8da625b1cf4f2", "score": "0.7673001", "text": "def primesfrom2to(n):\n # http://stackoverflow.com/questions/2068372/fastest-way-to-list-all-primes-below-n-in-python/3035188#3035188\n sieve = np.ones(n/3 + (n%6==2), dtype=np.bool)\n sieve[0] = False\n for i in xrange(int(n**0.5)/3+1):\n if sieve[i]:\n k=3*i+1|1\n sieve[ ((k*k)/3) ::2*k] = False\n sieve[(k*k+4*k-2*k*(i&1))/3::2*k] = False\n return np.r_[2,3,((3*np.nonzero(sieve)[0]+1)|1)]", "title": "" }, { "docid": "9cab1556054bc0a7aa4611e8aea9b3a4", "score": "0.766858", "text": "def primes_to(n):\n size = n//2\n sieve = bitarray(size)\n sieve.setall(1)\n limit = int(n**0.5)\n for i in range(1,limit):\n if sieve[i]:\n val = 2*i+1\n sieve[(i+i*val)::val] = 0\n return [2] + [2*i+1 for i, v in enumerate(sieve) if v and i > 0]", "title": "" }, { "docid": "223dbbcdbb8e6b48cb545f52ae430ddc", "score": "0.75779474", "text": "def primes_via_sieve(n):\r\n a = list(True for i in range(n+1))\r\n first_prime = 2\r\n i = first_prime\r\n import math\r\n while i <= math.sqrt(n):\r\n if a[i]:\r\n j = i*i\r\n while j <= n:\r\n a[j] = False\r\n j += i\r\n i += 1\r\n return [i for i in range(first_prime, n + 1) if a[i]]", "title": "" }, { "docid": "220f043cd6341f06e2a53f7e3d687bfe", "score": "0.7571742", "text": "def sieve(n):\n primes = []\n array = [0, 0] # Mark 0 and 1 as non-prime.\n for i in range(2, n):\n array.append(1) # Mark all other numbers > 2 as possibly-prime.\n \n for i in range(2, n):\n if array[i] == 1:\n for j in range(i*i, n, i):\n array[j] = 0 # Point out the posers.\n primes.append(i)\n if len(primes) >= 10001: # Stop as soon as we reach prime 10,001\n return primes\n return primes", "title": "" }, { "docid": "c5e0dd6c29a6f0af37d6ad7df17334b0", "score": "0.75640994", "text": "def _primes(n):\n n = n + 1\n sieve = range(n)\n sieve[:2] = [0, 0]\n for i in xrange(2, int(math.sqrt(n)) + 1):\n if sieve[i]:\n for j in xrange(i ** 2, n, i):\n sieve[j] = 0\n # Filter out the composites, which have been replaced by 0's\n return [p for p in sieve if p]", "title": "" }, { "docid": "6f420a5eb185d5ae52cdbf57d353281c", "score": "0.7544949", "text": "def n_first_primes(n):\n nth_prime = sympy.prime(n)\n primes_gen = sympy.primerange(1, nth_prime + 1)\n primes_lst = [num for num in primes_gen]\n\n return primes_lst", "title": "" }, { "docid": "3cba3ad75a590f94bc5f9d3a95160a7b", "score": "0.7541103", "text": "def primes_below(self, n):\n for i,p in enumerate(PRIMES):\n if p > n:\n return PRIMES[:i]\n return PRIMES", "title": "" }, { "docid": "3a7136f20c0f8a84836e7a35db644ef5", "score": "0.7534739", "text": "def primes_npsieve1(n):\n if n < 2:\n return None\n\n sieve = numpy.ones(n//2, dtype=numpy.bool)\n for i in range(3, int(n**0.5)+1, 2):\n if sieve[i//2]:\n sieve[i*i//2::i] = False\n return [2]+list(2*numpy.nonzero(sieve)[0][1::]+1)", "title": "" }, { "docid": "a4bf986278ed7f129385269f06a81d93", "score": "0.7513265", "text": "def smallSieve(n):\n # a copy of Robert William Hanks' rwh1 used to get sieving primes for smaller ranges\n # https://stackoverflow.com/questions/2068372/fastest-way-to-list-all-primes-below-n-in-python/3035188#3035188\n sieve = [True] * (n//2)\n for i in range(3,int(n**0.5)+1,2):\n if sieve[i//2]:\n sieve[i*i//2::i] = [False] * ((n-i*i-1)//(2*i)+1)\n return [2] + [2*i+1 for i in range(1,n//2) if sieve[i]]", "title": "" }, { "docid": "412198abc8bb42110ba893ff9030df01", "score": "0.7512989", "text": "def list_primality(n):\n result = [True] * (n + 1)\n result[0] = result[1] = False\n for i in range(sqrt(n) + 1):\n if result[i] == True:\n for j in range(i * i, len(result), i): # Multiples\n result[j] = False\n return result", "title": "" }, { "docid": "86ce48f3693b1adf626dfc5848c5881d", "score": "0.7512424", "text": "def prime_list(n):\n n+=1\n sieve = [True] * n\n for i in xrange(3,int(n**0.5)+1,2):\n if sieve[i]:\n sieve[i*i::2*i]=[False]*((n-i*i-1)/(2*i)+1)\n return [2] + [i for i in xrange(3,n,2) if sieve[i]]", "title": "" }, { "docid": "3aa1aafb18424e62fe7730ed9d6fa390", "score": "0.7451166", "text": "def Sieve_of_Eratosthenes(n):\n sieve = [True] * n\n for i in range(3, int(n ** 0.5) + 1, 2):\n if sieve[i]:\n sieve[i * i::2 * i] = [False] * ((n - i * i - 1) // (2 * i) + 1)\n return [2] + [i for i in range(3, n, 2) if sieve[i]]", "title": "" }, { "docid": "0f9d85f94fadb85e310229da88158271", "score": "0.74398977", "text": "def findPrimes(n):\r\n if n <= 1:\r\n raise ValueError (\"The input you have entered is not a prime number, a prime number is an interger that is didvisable only by itself and 1\")\r\n knownPrimes = []\r\n possibilities = list(range(2,n+1))\r\n while possibilities !=[]:\r\n p = possibilities[0]\r\n knownPrimes.append(p)\r\n x =len(possibilities)\r\n for i in range(x-1,-1,-1):\r\n if possibilities[i]%p == 0:\r\n del possibilities[i]\r\n return knownPrimes", "title": "" }, { "docid": "2f22b5e86e4a34e3e692bbb34a82c819", "score": "0.7430643", "text": "def primesToNumber(n):\r\n sieve = [True] * n\r\n for i in xrange(3,int(n**0.5)+1,2):\r\n if sieve[i]:\r\n sieve[i*i::2*i]=[False]*((n-i*i-1)/(2*i)+1)\r\n return [2] + [i for i in xrange(3,n,2) if sieve[i]]", "title": "" }, { "docid": "22f81b0a7fbe0f215cde9f7d997758b7", "score": "0.7417724", "text": "def sieve(n):\n\tp = 2\n\tintegers = list(range(p, n))\n\tfor i in integers: \n\t\tif i != 0: \n\t\t\tmultiples = list(range(i, n, i))[1:]\n\t\t\tfor m in multiples: \n\t\t\t\tintegers[m-p] = 0\n\n\tprimes = [i for i in integers if i != 0]\n\treturn primes", "title": "" }, { "docid": "6a5770a2b34312a0cb83c4f80d046376", "score": "0.74130094", "text": "def primesfrom3to(n):\n sieve = numpy.ones(n/2, dtype=numpy.bool)\n for i in xrange(3,int(n**0.5)+1,2):\n if sieve[i/2]:\n sieve[i*i/2::i] = False\n return 2*numpy.nonzero(sieve)[0][1::]+1", "title": "" }, { "docid": "98e9325ab2a16d22cad4fdfb5e2d73d1", "score": "0.7380016", "text": "def prime_gen_wrapper(n):\n if n < CUTOFF:\n return smallSieve(n+1) # rwh1 returns primes < N. We need sieving primes <= sqrt(limit)\n else:\n return segmentedSieve(n)", "title": "" }, { "docid": "6a6722972ea4977597a780178449fa47", "score": "0.7364162", "text": "def sieve(n):\n a = [True] * n\n a[0] = a[1] = False\n for (num, is_prime) in enumerate(a):\n if is_prime:\n yield num\n for i in range(2*num, n, num):\n a[i] = False", "title": "" }, { "docid": "634df6e3cdaa5b9e8324d081ad723704", "score": "0.7348917", "text": "def primes_n(count: int)->list:\n _pl = primes_generator()\n prime = []\n for _ in range(count):\n prime.append(next(_pl))\n \n return prime", "title": "" }, { "docid": "1c39c2dc9ed3bc64d6d41e2606d49733", "score": "0.73415154", "text": "def primes(n, filename=None):\n\n _pl = primes_generator(2, n)\n _result = list(_pl)\n\n if filename is not None:\n with open(filename,'w') as outf:\n outf.write(\"Primes list (less than {p}) is below.\\n\".format(p=n))\n outf.write(str(_result))\n #outf.close()\n\n return _result", "title": "" }, { "docid": "b8ba443bc4e66149e2e6336bc1a8fe17", "score": "0.73396456", "text": "def sieve_of_Eratosthenes(n):\n \n table = np.ones(n+1, dtype=bool)\n table[0], table[1] = False, False\n \n upperlimit = int(n/2)\n for i in xrange(2, upperlimit+1):\n if table[i]:\n jupper = n//i\n ind = i\n for j in xrange(2, jupper+1):\n ind += i\n table[ind] = False\n \n return [p for p in xrange(n+1) if table[p] ]", "title": "" }, { "docid": "c885bd322b7ec4d1e4baa105ea66360d", "score": "0.73355746", "text": "def _sieveEratosthenes(self, n: int) -> List[int]:\n minPrimeFactors = [i for i in range(n + 1)]\n for i in range(2, int(n**0.5) + 1):\n if minPrimeFactors[i] == i: # `i` is prime.\n for j in range(i * i, n, i):\n minPrimeFactors[j] = min(minPrimeFactors[j], i)\n return minPrimeFactors", "title": "" }, { "docid": "92c322dd7dcfaa4ab33e5a9e9418af9a", "score": "0.73267174", "text": "def sieve_eratosthenes(n):\n is_prime = [True] * (n + 1)\n is_prime[0] = is_prime[1] = False\n for i in range(2, n + 1):\n for j in range(2 * i, n + 1, i):\n is_prime[j] = False\n return is_prime", "title": "" }, { "docid": "92fcefa06cb7a5afe5fc5b5c0eaf5fcb", "score": "0.7312215", "text": "def primeSieve(n):\n primes = []\n i = 2\n while len(primes) < n:\n isPrime = True\n for p in primes:\n if i%p == 0:\n isPrime = False\n break\n if isPrime:\n primes.append(i)\n i += 1\n return primes", "title": "" }, { "docid": "84aa4b612ab7a139dab2c8dafde2d2f7", "score": "0.7310138", "text": "def search_primes(n):\n num = [2]\n i = 3\n while i < n:\n for j in num[:]:\n if i % j == 0:\n i = i + 1\n break\n else:\n num.append(i)\n i = i + 1\n return num", "title": "" }, { "docid": "8f64f4348ab7855a25718ca19d475d48", "score": "0.73048687", "text": "def primesfrom2to(n):\n sieve = numpy.ones(n/3 + (n%6==2), dtype=numpy.bool)\n for i in xrange(1,int(n**0.5)/3+1):\n if sieve[i]:\n k=3*i+1|1\n sieve[ k*k/3 ::2*k] = False\n sieve[k*(k-2*(i&1)+4)/3::2*k] = False\n return numpy.r_[2,3,((3*numpy.nonzero(sieve)[0][1:]+1)|1)]", "title": "" }, { "docid": "80d722125d9939da15238ab28b34fd24", "score": "0.72901326", "text": "def generate_prime_numbers(input_number):\n for number in range(2, input_number):\n if number > 1 and all(number % i for i in islice(count(2), int(sqrt(number)-1))):\n PRIME_NUMBERS.append(number)", "title": "" }, { "docid": "62ff8e26b7b7514f1bc502c08b723892", "score": "0.72751665", "text": "def primes_under(n):\n sieve = np.ones(n//3 + (n % 6 == 2), dtype=np.bool)\n for i in range(1, int(n**0.5)//3+1):\n if sieve[i]:\n k = 3*i+1 | 1\n sieve[k*k//3::2*k] = False\n sieve[k*(k-2*(i & 1)+4)//3::2*k] = False\n return np.r_[2, 3, ((3*np.nonzero(sieve)[0][1:]+1) | 1)]", "title": "" }, { "docid": "30e4ffd3bb9b893e4f4ebf3413ea0737", "score": "0.7269291", "text": "def first_primes(n):\n\tmy_list = [2, 3]\n\tnumber = 5\n\twhile len(my_list) <= n:\n\t\tif is_prime(number):\n\t\t\tmy_list.append(number)\n\t\tnumber += 2\n\treturn my_list", "title": "" }, { "docid": "e1febdb1a316563e14f8d363c99df140", "score": "0.7261867", "text": "def rwh_primes1v1(n):\n sieve = bytearray([True]) * (n//2)\n for i in range(3, int(n**0.5)+1, 2):\n if sieve[i//2]:\n sieve[i*i//2::i] = bytearray((n-i*i-1)//(2*i)+1)\n return [2, *compress(range(3, n, 2), sieve[1:])]", "title": "" }, { "docid": "95cdc3b4783b4dc073dd5a0a0b72649a", "score": "0.72487986", "text": "def rwh_primes2(n):\n # flake8: noqa\n # http://stackoverflow.com/questions/2068372/fastest-way-to-list-all-primes-below-n-in-python/3035188#3035188\n correction = (n%6>1)\n n = {0:n,1:n-1,2:n+4,3:n+3,4:n+2,5:n+1}[n%6]\n sieve = [True] * (n/3)\n sieve[0] = False\n for i in xrange(int(n**0.5)/3+1):\n if sieve[i]:\n k=3*i+1|1\n sieve[ ((k*k)/3) ::2*k]=[False]*((n/6-(k*k)/6-1)/k+1)\n sieve[(k*k+4*k-2*k*(i&1))/3::2*k]=[False]*((n/6-(k*k+4*k-2*k*(i&1))/6-1)/k+1)\n return [2,3] + [3*i+1|1 for i in xrange(1,n/3-correction) if sieve[i]]", "title": "" }, { "docid": "c6240b84f9f5634081f434e13c3790a0", "score": "0.72404283", "text": "def probably_prime(n, k):\n small_primes = [2, 3, 5, 7, 11, 13, 17, 19, 23, 29, 31] # etc.\n\n if n < 2: return False\n for p in small_primes:\n if n < p * p: return True\n if n % p == 0: return False\n r, s = 0, n - 1\n while s % 2 == 0:\n r += 1\n s //= 2\n\n for _ in range(k):\n a = randrange(2, n - 1)\n x = pow(a, s, n)\n if x == 1 or x == n - 1:\n continue\n for _ in range(r - 1):\n x = pow(x, 2, n)\n if x == n - 1:\n break\n else:\n return False\n return True", "title": "" }, { "docid": "53ef44986716e135694253a59291721b", "score": "0.7236124", "text": "def prime_factorization(n):\n if type(n) != int:\n raise Exception(\"n must be an integer\") \n \n lim = ceil(sqrt(n))+1\n L = []\n \n for p in _primes_():\n while n % p == 0:\n L.append(p)\n n = n // p\n \n if n == 1:\n break\n \n if p > lim:\n L.append(n)\n break\n return L", "title": "" }, { "docid": "59a9d2bf75bc9f9de816d099f24e0ae4", "score": "0.7217341", "text": "def primes_npsieve2(n):\n sieve = numpy.ones(n//3 + (n%6==2), dtype=numpy.bool)\n for i in range(1, int(n**0.5)//3+1):\n if sieve[i]:\n k=3*i+1|1\n sieve[ k*k//3 ::2*k] = False\n sieve[k*(k-2*(i&1)+4)//3::2*k] = False\n return list(numpy.r_[2,3,((3*numpy.nonzero(sieve)[0][1:]+1)|1)])", "title": "" }, { "docid": "89b0737b4bddfdfed835bc85ac2b8c5f", "score": "0.7214607", "text": "def factorize(n):\n for item in primes:\n if item > n:\n break\n yield from how_many_times_divides(n, item)", "title": "" }, { "docid": "f49d7fd0d1263392f718641b1f8af364", "score": "0.7207122", "text": "def primesfrom2to(n):\n sieve = np.ones(n//3 + (n%6==2), dtype=np.bool)\n for i in range(1,int(n**0.5)//3+1):\n if sieve[i]:\n k=3*i+1|1\n sieve[ k*k//3 ::2*k] = False\n sieve[k*(k-2*(i&1)+4)//3::2*k] = False\n return np.r_[2,3,((3*np.nonzero(sieve)[0][1:]+1)|1)]\n\n # Zwraca kolejną czwórkę elementów z przekątnych", "title": "" }, { "docid": "64e044407d2a3ab5e4f15ed3f3718366", "score": "0.7206997", "text": "def sieve(n):\n\n # Traditional method.\n prime = [True] * (n + 1)\n prime[0:2] = False, False\n for i in xrange(2, int(ceil(sqrt(n)))):\n if prime[i]:\n for j in xrange(i ** 2, n + 1, i):\n prime[j] = False\n return [i for i, primality in enumerate(prime) if primality]", "title": "" }, { "docid": "7b69a6a196dae3dff331728b3436cd82", "score": "0.71966827", "text": "def rwh_primes2(n):\n # http://stackoverflow.com/questions/2068372/fastest-way-to-list-all-primes-below-n-in-python/3035188#3035188\n correction = (n%6>1)\n n = {0:n,1:n-1,2:n+4,3:n+3,4:n+2,5:n+1}[n%6]\n sieve = [True] * (n/3)\n sieve[0] = False\n for i in xrange(int(n**0.5)/3+1):\n if sieve[i]:\n k=3*i+1|1\n sieve[ ((k*k)/3) ::2*k]=[False]*((n/6-(k*k)/6-1)/k+1)\n sieve[(k*k+4*k-2*k*(i&1))/3::2*k]=[False]*((n/6-(k*k+4*k-2*k*(i&1))/6-1)/k+1)\n return [2,3] + [3*i+1|1 for i in xrange(1,n/3-correction) if sieve[i]]", "title": "" }, { "docid": "1c97868862798b18923f7a2c3f7e2c66", "score": "0.7194152", "text": "def primes_upto(n):\n result = []\n if is_prime(i):\n result.append(i)\n return result", "title": "" }, { "docid": "a2debc20389dbf18e0bb171b079de829", "score": "0.7191692", "text": "def primes(n):\n if n < 2: return []\n if n == 2: return [2]\n # do only odd numbers starting at 3\n s = range(3, n+1, 2)\n # n**0.5 may be slightly faster than math.sqrt(n)\n mroot = n ** 0.5\n half = len(s)\n i = 0\n m = 3\n while m <= mroot:\n if s[i]:\n j = (m * m - 3)//2\n print j\n print \"--\"+str(len(s))\n s[j] = 0\n while j < half:\n s[j] = 0\n j += m\n i = i + 1\n m = 2 * i + 3\n # make exception for 2\n return [2]+[x for x in s if x]", "title": "" }, { "docid": "e57eea26935e141376c89d943b1c3fe6", "score": "0.7179454", "text": "def naive_enumeration(self, n: int) -> List[int]:\n # edge case\n if n < 2:\n raise ValueError('Please input a number greater then 2')\n\n primes = []\n i = 2\n while i < n:\n j = 2\n isPrime = True\n \"\"\"\n We only need to consider j < sqrt(i) for example if we consider 36 we know that if we consider\n 9 it has a factor less then 6 which is 4\n \"\"\"\n while j * j < i:\n if i % j == 0:\n isPrime = False\n j += 1\n if isPrime:\n primes.append(i)\n i += 1", "title": "" }, { "docid": "ab3b3044bfcc110a8c0b7f62ddeb168e", "score": "0.71602154", "text": "def factorize(n):\n def isPrime(n):\n return not [x for x in xrange(2,int(math.sqrt(n))) if n%x == 0]\n primes = []\n candidates = xrange(2,n+1)\n candidate = 2\n while not primes and candidate in candidates:\n if n%candidate == 0 and isPrime(candidate):\n primes = primes + [candidate] + factorize(n/candidate)\n candidate += 1 \n return primes", "title": "" }, { "docid": "19cb690792dd275ed5717f21477091c4", "score": "0.714573", "text": "def rwh_primes1v2(n):\n sieve = bytearray([True]) * (n//2+1)\n for i in range(1, int(n**0.5)//2 + 1):\n if sieve[i]:\n sieve[2*i*(i+1)::2*i+1] = bytearray((n//2-2*i*(i+1))//(2*i+1)+1)\n return [2, *compress(range(3, n, 2), sieve[1:])]", "title": "" }, { "docid": "1f9016245710a7f221eafe7f788d18d6", "score": "0.7134118", "text": "def sieve_eratosthenes(self, n: int) -> List[int]:\n isPrime = []\n for i in range(n):\n isPrime.append(True)\n\n # we know these are not primes\n isPrime[0] = False\n isPrime[1] = False\n\n for i in range(n):\n if isPrime[i]:\n j = i + i\n # this will be a harmonic series\n # n/2 + n/3 + n/4 ..... harmonic series\n while j < n:\n isPrime[j] = False\n j += i\n primes = []\n for i in range(n):\n if isPrime[i]:\n primes.append(i)\n\n return primes", "title": "" }, { "docid": "985e85000aa08834179ab21f5fb70805", "score": "0.71133", "text": "def primes():\n p = {}\n n = 2\n while True:\n l = p.get(n)\n if l is None:\n yield n\n l = [n]\n else:\n del p[n]\n for v in l:\n # add v to p[n+v] list\n k = n+v\n dest = p.get(k)\n if dest is None:\n p[k] = [v]\n else:\n dest.append(v)\n n += 1", "title": "" }, { "docid": "1203cddfad529ee183723a464d126024", "score": "0.71077627", "text": "def sieve(n):\n mark = [True for i in xrange(n+1)]\n p=2\n while(p*p <= n ):\n if (mark[p] == True):\n for i in xrange(2*p,n+1,p):\n mark[i] = False\n p +=1\n\n primes = []\n for i in range(2,len(mark)):\n if mark[i]:\n primes.append(i)\n \n return mark,primes", "title": "" }, { "docid": "dbcfdbf0de52a8a959ab296b2b539344", "score": "0.70948374", "text": "def countPrimes(self, n: int) -> int:\n # Create a list of consecutive integers from 2 through n: (2, 3, 4, ..., n).\n numbers = [i for i in range(n)]\n\n prime_marker = 2 # Set the marker to the value of the smallest prime number, 2\n while prime_marker is not None and prime_marker ** 2 < n:\n # Mark all of the multiples of prime_marker that have a coefficient > 1 (2p, 3p, ..., but not p itself)\n for i in range((n - prime_marker ** 2) // prime_marker + 1):\n try:\n numbers[prime_marker ** 2 + i * prime_marker] = False\n except IndexError:\n break\n\n try: # If a number greater than prime_marker exists, set prime_marker to be that value.\n prime_marker = next(filter(lambda x: bool(x) is True, numbers[prime_marker + 1:]))\n except StopIteration: # If there was no such number, stop.\n prime_marker = None\n\n # All unmarked numbers in the range [2, n] are primes. Return the length of a list of these numbers.\n return len([x for x in numbers[2:] if bool(x)])", "title": "" }, { "docid": "868d17301ae2256d11ccccdb36af6288", "score": "0.7089235", "text": "def find_primes(n):\n if n < 2: return []\n if n == 2: return [2]\n # do only odd numbers starting at 3\n s = range(3, n, 2)\n # n**0.5 may be slightly faster than math.sqrt(n)\n mroot = n ** 0.5\n half = len(s)\n i = 0\n m = 3\n while m <= mroot:\n if s[i]:\n j = (m * m - 3)//2\n s[j] = 0\n while j < half:\n s[j] = 0\n j += m\n i = i + 1\n m = 2 * i + 3\n # make exception for 2\n return [2]+[x for x in s if x]", "title": "" }, { "docid": "92c8f7adab1db7551c4894547ce0885a", "score": "0.7087147", "text": "def is_prime(n):\n if n < 2: \n return False\n \n for number in itertools.islice(itertools.count(2), int(math.sqrt(n) - 1)):\n if not n % number:\n return False\n\n return True", "title": "" }, { "docid": "bb8167f6a229d500ca1fe52804076b46", "score": "0.70748705", "text": "def primes2(n):\n correction = (n%6>1)\n n = {0:n,1:n-1,2:n+4,3:n+3,4:n+2,5:n+1}[n%6]\n sieve = [True] * (n/3)\n sieve[0] = False\n for i in xrange(int(n**0.5)/3+1):\n if sieve[i]:\n k=3*i+1|1\n sieve[ ((k*k)/3) ::2*k]=[False]*((n/6-(k*k)/6-1)/k+1)\n sieve[(k*k+4*k-2*k*(i&1))/3::2*k]=[False]*((n/6-(k*k+4*k-2*k*(i&1))/6-1)/k+1)\n return [2,3] + [3*i+1|1 for i in xrange(1,n/3-correction) if sieve[i]]", "title": "" }, { "docid": "2b36ee4ea9d7acfb8fb972f9e172b9e5", "score": "0.70680505", "text": "def rwh_primes1v2(n):\n sieve = bytearray([True]) * (n//2+1)\n for i in range(1,int(n**0.5)//2+1):\n if sieve[i]:\n sieve[2*i*(i+1)::2*i+1] = bytearray((n//2-2*i*(i+1))//(2*i+1)+1)\n return [2,*compress(range(3,n,2), sieve[1:])]", "title": "" } ]
4ac309522543ac6843994eb767a69abb
check Checkpoint a mailbox on the server.
[ { "docid": "54457b720b105c250612e134d92c4731", "score": "0.5425262", "text": "def check(self, argv):\n self._print_msg(self._client.check())", "title": "" } ]
[ { "docid": "805dfc8005657b800fd5e4eba32ddf6e", "score": "0.64372647", "text": "def _check_boxes(connection):\r\n response, mailboxes = connection.list()\r\n if response == 'OK':\r\n print \"Mailboxes:\"\r\n print mailboxes", "title": "" }, { "docid": "911a56fcf17678afb8a2ed19a2e7a853", "score": "0.6334141", "text": "def check_mail(self):\r\n connection = imaplib.IMAP4_SSL('imap.gmail.com')\r\n _gmail_login(connection)\r\n #Runs if there are new messages. [None]returned = no new messages\r\n if (connection.recent() != ('OK',[None]) ):\r\n print \"~~checking messages, downloading new messages~~\"\n _check_boxes(connection)\r\n _open_box(connection,\"inbox\")\r\n #process_mailbox(host) #does not work\r", "title": "" }, { "docid": "de3b7be7824184aad05ccf5733de2523", "score": "0.60709715", "text": "def checkMailbox(self,mailboxSettings):\n M = self._connectPOP3SSLMailbox(mailboxSettings)\n numMessages = len(M.list()[1]) \n M.quit()\n return numMessages", "title": "" }, { "docid": "3309c660e922a3294ca4aaf5ea40d14c", "score": "0.5635092", "text": "def check_email(aemail):\n print \"Checking Email...\",\n do_check(aemail)", "title": "" }, { "docid": "c908f84fc37ae2d397a0fd5effc4b411", "score": "0.5552756", "text": "async def check(self, message: types.Message):\n ...", "title": "" }, { "docid": "5c7d0a9c7e782863a401020264b9897b", "score": "0.55473775", "text": "def checkBoxes(self):\n noErrors = True\n for server in self.mailboxes:\n log.info('Connecting to server: %s', server)\n s_vals = self.mailboxes[server]\n\n try:\n for user in s_vals['users']:\n u_vals = s_vals['users'][user]\n # TODO: As near as I can tell, you can only\n # login with 1 username for each connection to a server.\n protocol = s_vals['protocol'].lower()\n if protocol == 'imaps':\n serv = imaplib.IMAP4_SSL(server)\n elif protocol == 'imap':\n serv = imaplib.IMAP4(server)\n else:\n raise ValueError('Unknown protocol %s' % protocol)\n\n password = u_vals['password']\n\n try:\n log.info('Connecting as user: %s', user)\n serv.login(user, password)\n\n for mbox in u_vals['mailboxes']:\n dbhome = u_vals['mailboxes'][mbox]\n log.info('Using mailbox: %s, home: %s',\n mbox, dbhome)\n #access a specific mailbox\n if mbox:\n (t, data) = serv.select(mbox)\n else:\n # Select the default mailbox (INBOX)\n (t, data) = serv.select()\n try:\n nMessages = int(data[0])\n except ValueError:\n nMessages = 0\n\n log.info('Found %s messages', nMessages)\n\n if nMessages:\n self._getMessages(serv, nMessages, dbhome)\n serv.expunge()\n\n # We are done with this mailbox\n serv.close()\n except:\n log.exception('Exception with server %s user %s',\n server, user)\n noErrors = False\n\n serv.logout()\n serv.shutdown()\n del serv\n except:\n log.exception('Exception while connecting to %s', server)\n noErrors = False\n return noErrors", "title": "" }, { "docid": "e7614fa94f3044f10ab37e6ab94bede9", "score": "0.55317384", "text": "def check(self, lastentry, mails):", "title": "" }, { "docid": "6f2274326bd379fe99cef1a1c60b1556", "score": "0.5505692", "text": "def check(self,locator):\r\n self.do_command(\"check\", [locator,])", "title": "" }, { "docid": "6f2274326bd379fe99cef1a1c60b1556", "score": "0.5505692", "text": "def check(self,locator):\r\n self.do_command(\"check\", [locator,])", "title": "" }, { "docid": "36f2c5bda822a88437c91d06692b9897", "score": "0.54845816", "text": "def guard_mailbox(self):\n try:\n self.mail_control.init_control()\n mails = self.mail_control.read_messages(range=self.task.range)\n self.rule_interpreter.interpret(mails)\n except (\n MailControlException,\n NoRulesForTaskException,\n NoValidActionFoundException,\n ) as ex:\n self.task.state = \"ERROR\"\n self.task.message = ex.message\n self.task.save()\n\n if self.mail_control.mailbox_conn is not None:\n self.mail_control.close_mailbox()", "title": "" }, { "docid": "d2e1b45ebf9b0706f28673b0c8288a79", "score": "0.54503727", "text": "def testEmail(self):\n self._CreateTestViewpoint('vp1', self._user.user_id, [], delete_followed=True)\n\n options.options.email = 'kimball.andy@emailscrubbed.com'\n self._RunAsync(self._checker.CheckAllViewpoints)\n\n corruption_text = \\\n ' ---- viewpoint vp1 ----\\n' \\\n ' missing followed (1 instance)\\n' \\\n ' empty viewpoint (1 instance)\\n' \\\n '\\n' \\\n 'python dbchk.py --devbox --repair=True --viewpoints=vp1'\n\n self.assertEqual(self._checker._email_args,\n {'fromname': 'DB Checker',\n 'text': 'Found corruption(s) in database:\\n\\n%s' % corruption_text,\n 'subject': 'Database corruption',\n 'from': 'dbchk@emailscrubbed.com',\n 'to': 'kimball.andy@emailscrubbed.com'})", "title": "" }, { "docid": "9328a740bcd8347a16ddd92ae9e12a63", "score": "0.54373777", "text": "def _check_member(self, member_stack):\n\n member_ip = member_stack.server_stack.floating_ip_address\n member_port = member_stack.application_port\n member_protocol = self.listener_stack.pool_protocol\n\n self._wait_for_request_data(self.client_stack, member_ip,\n member_protocol, member_port)", "title": "" }, { "docid": "c8324aff5f55afa11f9e08d7fa1d8d62", "score": "0.5337182", "text": "def test_mail_member_accepted(self):\n # there should be two mails in the outbox from the application\n self.assertEqual(len(mail.outbox), 2)\n # delete them, they're not relevant for this test\n del mail.outbox[:2]\n\n # change the status from one member to accepted -> he should receive\n # an e-mail\n self.taskmember1.status = TaskMember.TaskMemberStatuses.accepted\n self.taskmember1.save()\n\n # test that the e-mail is indeed sent\n self.assertEqual(len(mail.outbox), 1)\n m = mail.outbox.pop(0)\n self.assertIn('assigned you to a task', m.subject)\n self.assertEqual(m.activated_language,\n self.taskmember1.member.primary_language)\n self.assertEqual(m.recipients()[0], self.some_user.email)", "title": "" }, { "docid": "9b71fc27e4b32f90024a3456a2ceb620", "score": "0.5282473", "text": "def check(self, irc, msg, args):\n (server, user, password) = self._checkServer(irc)\n pop = self._connect(server, user, password)\n n = len(pop.list()[1])\n irc.reply(format('I have %n waiting for me.', (n, 'message')))", "title": "" }, { "docid": "8e646eb0b5d0eec4ce41731390be5360", "score": "0.5264374", "text": "def check(email_to: Union[str, List[str]]):\n stores = get_stores(LOCATION_SEARCH_QUERY, MILES_RADIUS)\n locs = find_available_appts(stores)\n formatted = email_fmt(locs)\n if formatted:\n email(email_to, formatted, 'Rite Aid')\n sleep(3600)", "title": "" }, { "docid": "3a210f560f8675b03b6993041d3da706", "score": "0.52544796", "text": "def verify_account_available(email):\n\n #Run a query, use an ORM, use Twilio to call someone and ask them :-)\n return True", "title": "" }, { "docid": "6bc7d3911ea9d77d6cab5326e399b8e7", "score": "0.5253452", "text": "def Check(self, id, check):", "title": "" }, { "docid": "6bc7d3911ea9d77d6cab5326e399b8e7", "score": "0.5253452", "text": "def Check(self, id, check):", "title": "" }, { "docid": "fcf06ea3910d7fe5631889631f1ae46f", "score": "0.52251714", "text": "def do_check(checkemail):\n customheaders = {\"User-Agent\": \"haveibeenpwned-dot-py\"}\n checkurl = \"https://haveibeenpwned.com/api/v2/breachedaccount/\" + urllib.quote(checkemail)\n r = requests.get(checkurl, headers=customheaders, verify=False)\n if r.status_code == 404:\n print \"You're good, \" + checkemail + \" not found.\"\n elif r.status_code == 200:\n print \"Bad things happened: \" + checkemail + \" found on lists\",\n # parse this shit properly\n report = r.json()\n print report\n else:\n print \"Something else happened\"", "title": "" }, { "docid": "91035bea98c9ae7715a52e3855b69506", "score": "0.52122647", "text": "def SetCheck(self, root: Any, userdata: Any, obj: Any, lColumn: int, bCheck: bool, bcMsg: BaseContainer) -> None:\n ...", "title": "" }, { "docid": "c2b177f50c965a66321c4d27daba7ee4", "score": "0.52078825", "text": "def check_sms(self):\n if self.logged_in:\n sms_list_inbox = self.__get_page(SMSLIST_URL[0])\n sms_list_spam = self.__get_page(SMSLIST_URL[1])\n self.__find_conversations (sms_list_inbox)\n self.__check_conversations(sms_list_inbox)\n self.__find_conversations (sms_list_spam)\n self.__check_conversations(sms_list_spam, 'spam')\n else:\n raise NotLoggedIn(self.username)", "title": "" }, { "docid": "87ed5951fb58af97a16d92308fd027db", "score": "0.51863474", "text": "def perform_check(self, store=True):\n return self.check_instance.check(store=True)", "title": "" }, { "docid": "8d6ccd9fe852b3db5600c054cac6de51", "score": "0.5166097", "text": "async def check(self, host, port):\n return await self._get(f'check/{host}:{port}')", "title": "" }, { "docid": "29ec8d5218037a43d7cda4599709816b", "score": "0.51654845", "text": "def box_with_flag(self, flag):\n for box, flags in self.boxes_flags.iteritems():\n if flag in flags:\n return box\n raise MailboxNotFound(\n 'Mailbox with flag {} does not exist'.format(flag))", "title": "" }, { "docid": "6e2a80429f891cd49fe5d7afb866fde8", "score": "0.51542467", "text": "def test_03(self):\n inbox = tm11.InboxMessage(generate_message_id())\n msg = make_request('/services/test_inbox_1/', \n inbox.to_xml(), \n get_headers(VID_TAXII_SERVICES_11, False), \n MSG_STATUS_MESSAGE, \n st=ST_DESTINATION_COLLECTION_ERROR, \n sd_keys=[SD_ACCEPTABLE_DESTINATION])\n #msg = self.send_inbox_message('/services/test_inbox_1/', VID_TAXII_XML_11, inbox, st=ST_DESTINATION_COLLECTION_ERROR, sd_keys=[SD_ACCEPTABLE_DESTINATION])", "title": "" }, { "docid": "94bc1573857b1b957e16e69110e60ee0", "score": "0.5137699", "text": "def mailbox(self):\r\n return self._mailbox", "title": "" }, { "docid": "671fa0391f3330c85bf380b9da69b8db", "score": "0.5125189", "text": "def check(job):\r\n _core_check(job)\r\n try:\r\n sync.check()\r\n except Exception:\r\n raise click.ClickException(\"sync service is not reachable with current configuration\")\r\n click.echo(\"Everything is ready\")", "title": "" }, { "docid": "a4a6ffaaa6f019ade6eb07bf1183e92c", "score": "0.51157486", "text": "def test_send_email(self):\n self.assertTrue(mail.outbox)", "title": "" }, { "docid": "c7716808ce56a0ed926ea37c098d6217", "score": "0.5111034", "text": "def verify(self, args):\n print \"Verifying email address.\\nCut and paste the token received by email.\"\n token = raw_input(\"Token:\")\n token = os.path.basename(token)\n client = HRClient()\n tokendict = {'mailtoken': token}\n try:\n client.verify_user(tokendict)\n print \"User email %s verified\" % HRUtils.get_token_username_insecure(token)\n except RESTException as re:\n if re.code == 400:\n print \"Token expired or already verified.\"\n else:\n print re", "title": "" }, { "docid": "1aad01c63fdc7b1c6fe25071d290cf0e", "score": "0.5106036", "text": "def test_get_check_exist(self):\n self.valid_payload[\"email_receiver\"] = \"jefeti@pympack.com.pe\"\n response = client.get(reverse('client-email-check-operation'), self.valid_payload)\n self.assertEqual(response.status_code, status.HTTP_200_OK)", "title": "" }, { "docid": "fa57098a285982ba332d40f3bc08d98c", "score": "0.509412", "text": "def testMailBox(self):\r\n\r\n # Use the example from Google's documentation at\r\n # http://code.google.com/apis/talk/jep_extensions/gmail.html#notifications\r\n xml = ET.fromstring(\"\"\"\r\n <iq type=\"result\">\r\n <mailbox xmlns=\"google:mail:notify\"\r\n result-time='1118012394209'\r\n url='http://mail.google.com/mail'\r\n total-matched='95'\r\n total-estimate='0'>\r\n <mail-thread-info tid='1172320964060972012'\r\n participation='1'\r\n messages='28'\r\n date='1118012394209'\r\n url='http://mail.google.com/mail?view=cv'>\r\n <senders>\r\n <sender name='Me' address='romeo@gmail.com' originator='1' />\r\n <sender name='Benvolio' address='benvolio@gmail.com' />\r\n <sender name='Mercutio' address='mercutio@gmail.com' unread='1'/>\r\n </senders>\r\n <labels>act1scene3</labels>\r\n <subject>Put thy rapier up.</subject>\r\n <snippet>Ay, ay, a scratch, a scratch; marry, 'tis enough.</snippet>\r\n </mail-thread-info>\r\n </mailbox>\r\n </iq>\r\n \"\"\")\r\n\r\n iq = self.Iq(xml=xml)\r\n mailbox = iq['mailbox']\r\n self.failUnless(mailbox['result-time'] == '1118012394209', \"result-time doesn't match\")\r\n self.failUnless(mailbox['url'] == 'http://mail.google.com/mail', \"url doesn't match\")\r\n self.failUnless(mailbox['matched'] == '95', \"total-matched incorrect\")\r\n self.failUnless(mailbox['estimate'] == False, \"total-estimate incorrect\")\r\n self.failUnless(len(mailbox['threads']) == 1, \"could not extract message threads\")\r\n\r\n thread = mailbox['threads'][0]\r\n self.failUnless(thread['tid'] == '1172320964060972012', \"thread tid doesn't match\")\r\n self.failUnless(thread['participation'] == '1', \"thread participation incorrect\")\r\n self.failUnless(thread['messages'] == '28', \"thread message count incorrect\")\r\n self.failUnless(thread['date'] == '1118012394209', \"thread date doesn't match\")\r\n self.failUnless(thread['url'] == 'http://mail.google.com/mail?view=cv', \"thread url doesn't match\")\r\n self.failUnless(thread['labels'] == 'act1scene3', \"thread labels incorrect\")\r\n self.failUnless(thread['subject'] == 'Put thy rapier up.', \"thread subject doesn't match\")\r\n self.failUnless(thread['snippet'] == \"Ay, ay, a scratch, a scratch; marry, 'tis enough.\", \"snippet doesn't match\")\r\n self.failUnless(len(thread['senders']) == 3, \"could not extract senders\")\r\n\r\n sender1 = thread['senders'][0]\r\n self.failUnless(sender1['name'] == 'Me', \"sender name doesn't match\")\r\n self.failUnless(sender1['address'] == 'romeo@gmail.com', \"sender address doesn't match\")\r\n self.failUnless(sender1['originator'] == True, \"sender originator incorrect\")\r\n self.failUnless(sender1['unread'] == False, \"sender unread incorrectly True\")\r\n\r\n sender2 = thread['senders'][2]\r\n self.failUnless(sender2['unread'] == True, \"sender unread incorrectly False\")", "title": "" }, { "docid": "6f7d184791e61e1e4c107621856a749f", "score": "0.50803614", "text": "def run(self, **kwargs):\n try:\n # connect to server and login\n box = imaplib.IMAP4_SSL(settings.IMAP_SERVER)\n box.login(settings.IMAP_USERNAME, settings.IMAP_PASSWORD)\n box.select()\n\n # search for all mails in the mailbox\n result, mail_indices = box.search(None, 'ALL')\n\n # if everything was ok...\n if result == 'OK':\n\n # check number of mails\n mail_count = len(mail_indices[0].split())\n logger.info('found %(mail_count)d mails...' % {'mail_count': mail_count})\n\n # iterate the mail indices and fetch the mails\n ips_created = 0\n for mail_index in mail_indices[0].split():\n logger.info('fetching mail %(mail_index)s...' % {'mail_index': int(mail_index)})\n # mail data is a list with a tuple\n sub_result, mail_data = box.fetch(mail_index, '(BODY[TEXT])')\n if sub_result == 'OK':\n\n # fetch the ips\n ips = list_remove_duplicates(\n self.find_ips(''.join([str(data) for data in mail_data[0]]))\n )\n\n # if ips found, add them and delete the mail\n if len(ips) > 0:\n logger.info('found %(count)d IPs' % {'count': len(ips)})\n ips_created += IP.batch_add_ips(ips)\n box.store(mail_index, '+FLAGS', '\\\\Deleted')\n\n else:\n logger.error('fetching mail with index %(index)d failed' % {'index': mail_index})\n\n # finally, if ips were added, unify the IPs\n if ips_created > 0:\n logger.info('created %(count)d IPs' % {'count': ips_created})\n IP.unify_ips()\n\n else:\n logger.error('search returned not OK')\n\n box.close()\n box.logout()\n except:\n logger.exception('retrieving mail failed')", "title": "" }, { "docid": "5573b4e5ebdabffe2ea8a4ecffba6532", "score": "0.5068267", "text": "def checkmessage(self, message, zbase, pubkey=None):\n payload = {\n \"message\": message,\n \"zbase\": zbase,\n \"pubkey\": pubkey,\n }\n return self.call(\"checkmessage\", payload)", "title": "" }, { "docid": "437252f965afe6f16dbf3926383fe2db", "score": "0.50456256", "text": "def select_inbox(self):\n\n\t\tlogger.debug('Attempting to access the inbox.')\n\n\t\tok, mail_count_list = self.mail.select('INBOX')\n\t\tif ok != OK:\n\t\t\traise EmailCheckError('Failed selecting the inbox.')\n\n\t\ttry:\n\t\t\tmail_count = int(mail_count_list[0])\n\t\texcept ValueError as e:\n\t\t\traise EmailCheckError('Failed to get the message count.') from e\n\n\t\tlogger.info('Found %s items in the inbox.', mail_count)", "title": "" }, { "docid": "58ebb3ea6b23eb84c599f244d6e9ac1c", "score": "0.50365543", "text": "def test_member_applied_to_task_mail(self):\n self.task.status = \"in progress\"\n self.assertEquals(len(mail.outbox), 0)\n self.task.save()\n\n self.task_member = TaskMemberFactory.create(task=self.task,\n status='applied')\n\n # Task owner receives email about new task member\n self.assertEquals(len(mail.outbox), 1)\n self.assertNotEquals(mail.outbox[0].body.find(\"applied for your task\"),\n -1)\n self.assertEquals(mail.outbox[0].to[0], self.task.author.email)\n\n self.task_member.status = 'accepted'\n self.task_member.save()\n\n # Task member receives email that he is accepted\n self.assertEquals(len(mail.outbox), 2)\n self.assertNotEquals(mail.outbox[1].subject.find(\"assigned\"), -1)\n self.assertEquals(mail.outbox[1].to[0], self.task_member.member.email)", "title": "" }, { "docid": "f2b05300a202e4bb68d315c36568c464", "score": "0.5023368", "text": "def check_email(self):\n\n self.drag_down()\n gmailcom = self.get_value(\"gmail\")\n if search_text(gmailcom):\n click_textview_by_text(gmailcom)\n sleep(5)\n else:\n goback()", "title": "" }, { "docid": "bc67a7cb204583ae59a0b71b3578ec46", "score": "0.4993459", "text": "def in_box(self, status):\n #try:\n if not self.location: \n return True # no bounding box => anything is valid\n locationbox = [(float(self.location[0]), float(self.location[1])), (float(self.location[0]), float(self.location[3])),\\\n (float(self.location[2]), float(self.location[1])), (float(self.location[2]), float(self.location[3]))]\n if status['coordinates']:\n c = status['coordinates']\n if c['type'] == u'Point':\n coord = c['coordinates']\n if coord[0] <= self.location[0] and coord[0] >= self.location[2]\\\n and coord[1] >= self.location[1] and coord[1] <= self.location[3]:\n return True\n else:\n return False\n elif c['type'] == u'Polygon':\n loc = Polygon([tuple(point) for point in c['coordinates']])\n box = Polygon(locationbox)\n if loc.intersects(box):\n return True\n else:\n return False\n elif status['place']:\n if status['place']['bounding_box']['type'] == u'Point':\n coord = status['place']['bounding_box']['coordinates']\n if coord[0] <= self.location[0] and coord[0] >= self.location[2]\\\n and coord[1] >= self.location[1] and coord[1] <= self.location[3]:\n return True\n else:\n return False\n elif status['place']['bounding_box']['type'] == u'Polygon':\n loc = Polygon([tuple(point) for point in status['place']['bounding_box']['coordinates']][0])\n box = Polygon(locationbox)\n if loc.intersects(box):\n return True\n else:\n return False\n else:\n return False\n #except:\n # print \"ERROR IN_BOX\"\n # return False", "title": "" }, { "docid": "39bbf402c05861b2887257f9af518030", "score": "0.49635702", "text": "def checkMyWorkBox(self):\n self.util.waitForElementToBePresent(\"//div\")\n self.util.waitForElementToBePresent(elem.my_work_checkbox)\n my_objects_tab = self.util.isElementPresent((elem.my_work_checkbox))\n if my_objects_tab:\n self.util.clickOn(elem.my_work_checkbox)", "title": "" }, { "docid": "06aee8c13ad1736bb51b15f8e830f9b1", "score": "0.49429545", "text": "def Check(self, check):", "title": "" }, { "docid": "c4a1d34e7098b92cb537d4b6ada85a37", "score": "0.49391532", "text": "def test_email_contains_link_for_completing_registration(self):\n mbox_path = self.environ.get_config_for('mbox_file')\n def get_email_message():\n mbox = mailbox.mbox(mbox_path)\n assert len(mbox) == 1\n\n utils.try_until(1, get_email_message)\n\n self.mbox = mailbox.mbox(mbox_path)\n for key, message in self.mbox.items():\n continue\n\n trailhead_url = self.environ.get_config_for('trailhead_url')\n url = 'href=\"%s/activate/%s/' % (trailhead_url, self.ramona['email'])\n self.assertIn(url, message.as_string())", "title": "" }, { "docid": "656813bc6553f6128f5362297797a637", "score": "0.49226162", "text": "def is_running(self):\n\t\tif not app.inbox:\n\t\t\treturn None\n\t\t\n\t\tresponse_queue = app.queues[-1]\n\t\tqid = hex(id(response_queue))\n\t\tapp.inbox.put([qid, \"check\", self])\n\n\t\t# Wait two seconds max for a response\n\t\tthen = time.time()\n\t\twhile response_queue.empty():\n\t\t\tnow = time.time()\n\t\t\tif (now - then) >= 0.5:\n\t\t\t\treturn None\n\n\t\treturn response_queue.get()", "title": "" }, { "docid": "b2a5f713593feb8ae3f301e97d0ec770", "score": "0.49200088", "text": "def test_mail_taskmember_applied_sent(self):\n self.assertEqual(len(mail.outbox), 2)\n\n m = mail.outbox.pop(0)\n self.assertEqual(m.subject, 'King applied for your task')\n self.assertEqual(m.activated_language,\n self.some_project.owner.primary_language)\n self.assertEqual(m.recipients()[0], self.some_project.owner.email)\n\n m = mail.outbox.pop(0)\n self.assertEqual(m.subject, 'Kong applied for your task')\n self.assertEqual(m.activated_language,\n self.some_project.owner.primary_language)\n self.assertEqual(m.recipients()[0], self.some_project.owner.email)", "title": "" }, { "docid": "52fb4b19e9363c067dc588cd16b24433", "score": "0.49041083", "text": "def verify_domain(self, domain, mailbox):\n _params = {'domain': domain, 'mailbox': mailbox}\n return self.master.call('senders/verify-domain', _params)", "title": "" }, { "docid": "67ff68f54ac35d0fc565c89a150fc1f2", "score": "0.49026626", "text": "def verify(self, msg):", "title": "" }, { "docid": "582b214d075e56319d727ed2a6c0d756", "score": "0.4898705", "text": "def test_11(self):\n inbox = tm10.InboxMessage(generate_message_id())\n msg = make_request('/services/test_inbox_1/', \n inbox.to_xml(), \n get_headers(VID_TAXII_SERVICES_10, False), \n MSG_STATUS_MESSAGE, \n st=ST_FAILURE)\n #msg = self.send_inbox_message('/services/test_inbox_1/', VID_TAXII_XML_10, inbox)", "title": "" }, { "docid": "fdebd21515c361cb1f26c012d549bd6a", "score": "0.48956987", "text": "def mail_box(self) -> MailBox:\n return self._mail_box", "title": "" }, { "docid": "9085db64fd6a407092d2db4813336902", "score": "0.48852724", "text": "def test_01(self):\n inbox = tm11.InboxMessage(generate_message_id(), destination_collection_names=['default'])\n msg = make_request('/services/test_inbox_1/', inbox.to_xml(), get_headers(VID_TAXII_SERVICES_11, False), MSG_STATUS_MESSAGE, ST_SUCCESS)\n #msg = self.send_inbox_message('/services/test_inbox_1/', VID_TAXII_XML_11, inbox)", "title": "" }, { "docid": "1f886b5fc733886fdf700176a43c2b91", "score": "0.48838288", "text": "def test_12(self):\n inbox = tm10.InboxMessage(generate_message_id())\n msg = make_request('/services/test_inbox_2/', \n inbox.to_xml(), \n get_headers(VID_TAXII_SERVICES_10, False), \n MSG_STATUS_MESSAGE, \n st=ST_SUCCESS)\n #msg = self.send_inbox_message('/services/test_inbox_2/', VID_TAXII_XML_10, inbox)", "title": "" }, { "docid": "a0ea10b190bfd795295c18eb528b641b", "score": "0.48811066", "text": "def checkUser(session, mail):\n sender: object = mail['sender']\n subject: object = mail['subject']\n mailreceived = mail['date']\n getsanswer = 0\n\n print(' sender :', sender)\n if PermittedUser(sender):\n print(' is accepted.')\n return 1\n else:\n print(' is NO user yet.')\n return 0", "title": "" }, { "docid": "20cc05731ffce5edf54d6b0b530090d6", "score": "0.48749492", "text": "def test_04(self):\n inbox = tm11.InboxMessage(generate_message_id(), destination_collection_names=['default'])\n msg = make_request('/services/test_inbox_2/', \n inbox.to_xml(), \n get_headers(VID_TAXII_SERVICES_11, False), \n MSG_STATUS_MESSAGE, \n st=ST_SUCCESS)\n #msg = self.send_inbox_message('/services/test_inbox_2/', VID_TAXII_XML_11, inbox)", "title": "" }, { "docid": "55a0045ab2e227e18781953918505446", "score": "0.48693737", "text": "def check_in(self):\n #TODO: Leader.check_in()\n pass", "title": "" }, { "docid": "82775516a81d4999ff414c002b1c0efe", "score": "0.4868267", "text": "def verify_card_office_email(mail):\n return (mail.title == \"Guest Access\" and\n mail.from_addr == \"Claremont Card Office <noreply@jsatech.com>\")", "title": "" }, { "docid": "43391398d17b9e3084306ae3e43a23c0", "score": "0.48620638", "text": "def Check(self): \n res = self.queue.Execute(\"[%s].Get\" % (self.dwID))\n # print (self.dwID)\n #print res\n count = 0\n if(res['rc'] == 0 and int(res['result']['count']) > 0):\n count = int(res['result']['count'])\n for index in range(count):\n ##__Dispatch\n self.__Dispatch(res[\"result\"][\"value\"][index])", "title": "" }, { "docid": "cefcf1a859a8aa3b71972b6421b0c47c", "score": "0.48613685", "text": "def needs_mailbox():\n def decorator(f):\n @wraps(f)\n def wrapped_f(request, *args, **kwargs):\n if request.user.has_mailbox:\n return f(request, *args, **kwargs)\n raise NeedsMailboxException()\n return wrapped_f\n return decorator", "title": "" }, { "docid": "a5b17b83f7f37f5156595aff340f3985", "score": "0.485874", "text": "def check_message_box_space(redirect_to=None):\n if get_message_count(current_user) >= flaskbb_config[\"MESSAGE_QUOTA\"]:\n flash(\n _(\n \"You cannot send any messages anymore because you have \"\n \"reached your message limit.\"\n ),\n \"danger\",\n )\n return redirect(redirect_to or url_for(\"conversations_bp.inbox\"))", "title": "" }, { "docid": "2aba38743eb3a9becc6d1d7cb11b05fb", "score": "0.48586878", "text": "def test_08(self):\n inbox = tm11.InboxMessage(generate_message_id(), destination_collection_names=['default'])\n msg = make_request('/services/test_inbox_3/', \n inbox.to_xml(), \n get_headers(VID_TAXII_SERVICES_11, False), \n MSG_STATUS_MESSAGE, \n st=ST_DESTINATION_COLLECTION_ERROR)\n #msg = self.send_inbox_message('/services/test_inbox_3/', VID_TAXII_XML_11, inbox, st=ST_DESTINATION_COLLECTION_ERROR)", "title": "" }, { "docid": "bad73ddb7a72581153033b9adc334acb", "score": "0.4851203", "text": "def test_02(self):\n inbox = tm11.InboxMessage(generate_message_id(), destination_collection_names=['default_INVALID'])\n msg = make_request('/services/test_inbox_1/', inbox.to_xml(), get_headers(VID_TAXII_SERVICES_11, False), MSG_STATUS_MESSAGE, ST_NOT_FOUND, sd_keys=[SD_ITEM])\n #msg = self.send_inbox_message('/services/test_inbox_1/', VID_TAXII_XML_11, inbox, st=ST_NOT_FOUND, sd_keys=[SD_ITEM])", "title": "" }, { "docid": "3ddfa8cbe625b42a263d21a6b12454f9", "score": "0.48511237", "text": "def check_for_contact(self, step, t, q):", "title": "" }, { "docid": "748497ff4009b061b530c521996f25b5", "score": "0.48383576", "text": "def test_staff_list_sender_is_member(self):\n\n self.mock_get_ml.return_value.access_level = MailingList.ACCESS_LEVEL_STAFF\n self.post_body['sender'] = 'Staff <{}>'.format(self.staff_member_address)\n\n response = handle_mailing_list_email_route(self._get_post_request())\n\n self.assertEqual(response.status_code, 200)\n self.assertEqual(self.mock_send_bounce.call_count, 0)", "title": "" }, { "docid": "b1fd8a746540401033659b7dc27a5fb6", "score": "0.48369262", "text": "def check_daily_email_digest(self):\n self.checkbox_daily_digest.check()", "title": "" }, { "docid": "6840a1a3471292f3aaf76fdd0ac314f0", "score": "0.4834966", "text": "def _check(isamAppliance, addr, check_mode=False, force=False):\n ret_obj = get_all(isamAppliance)\n\n for hosts in ret_obj['data']:\n if hosts['addr'] == addr:\n return True\n\n return False", "title": "" }, { "docid": "89d8e7ae757a56a50c4f1b541961a059", "score": "0.48315337", "text": "def test_proxy_validate_view_pt_success(self):\n request = self.rf.get(reverse('cas_proxy_validate'), {'service': self.url, 'ticket': self.pt.ticket})\n response = ProxyValidateView.as_view()(request)\n self.assertContains(response, 'authenticationSuccess')\n self.assertEqual(response.get('Content-Type'), 'text/xml')\n\n pt = ProxyTicket.objects.get(ticket=self.pt.ticket)\n self.assertTrue(pt.is_consumed())", "title": "" }, { "docid": "202dc154d1e6058f136ce01839985a9f", "score": "0.48285025", "text": "def test_mail_sender(self, mock_outbox, mock_send_mail):\n token = EmailTokenFactory()\n url = reverse('linksapp:token_auth', args=(str(token.token),))\n result, messagetext = mail_sender(token, url)\n self.assertTrue(mock_send_mail.called)\n self.assertTrue(result)", "title": "" }, { "docid": "c0c28d5ea9c9e0e2b7839facfbe1bdb0", "score": "0.48220634", "text": "def check_email_existence(email):\n calls_left = hunter.account_information().get('calls').get('left')\n if calls_left > 0:\n request_body = hunter.email_verifier(email)\n return request_body.get('webmail')\n else:\n print(\"No more calls left for hunter.io API service!\")\n return", "title": "" }, { "docid": "50295d4bf54cb7ac9a1b0f2e300117a3", "score": "0.48191637", "text": "def assert_check_objects(step, obj, serv_as):\n serv = getattr(world, serv_as)\n node = world.cloud.get_node(serv)\n password = getattr(world, 'rabbitmq_password')\n port = 5672\n if CONF.feature.driver.current_cloud in [Platform.IDCF,\n Platform.CLOUDSTACK]:\n port = world.cloud.open_port(node, port)\n if obj == 'user':\n LOG.info('Check user in rabbitmq')\n out = node.run('rabbitmqctl list_users')[0]\n world.assert_not_in('scalr', out, 'Not user scalr in list_users: %s' % out)\n #if not 'scalr' in out[0]:\n # raise AssertionError('Not user guest in list_users: %s' % out[0])\n elif obj == 'vhost':\n LOG.info('Check vhost in rabbitmq')\n out = node.run('rabbitmqctl list_vhosts')[0]\n world.assert_not_in('testvhost', out, 'Not vhost testvhost in list_vhosts: %s' % out)\n #if not 'testvhost' in out[0]:\n # raise AssertionError('Not vhost testvhost in list_vhosts: %s' % out[0])\n elif obj == 'queue':\n LOG.info('Check queue in rabbitmq')\n out = node.run('rabbitmqctl list_queues')[0]\n world.assert_not_in('test_queue', out, 'Not queue test_queue in list_queues: %s' % out)\n #if not 'test_queue' in out[0]:\n # raise AssertionError('Not queue test_queue in list_queues: %s' % out[0])\n elif obj == 'message':\n LOG.info('Check message in rabbitmq')\n credentials = pika.PlainCredentials('scalr', password)\n connection = pika.BlockingConnection(pika.ConnectionParameters(credentials=credentials,\n host=str(serv.public_ip),\n port=int(port)))\n channel = connection.channel()\n try:\n m = channel.basic_get(queue='test_queue')\n LOG.info('Give message in queue \"test_queue\"')\n world.assert_not_equal(m[2], 'super test message', 'Message is not our, I\\'m get: %s' % m[2])\n #if not m[2] == 'super test message':\n # raise AssertionError('Message is not our, I\\'m get: %s' % m)\n except pika.exceptions.AMQPChannelError:\n raise AssertionError('Queue is not work')", "title": "" }, { "docid": "bffb7f3e9808da1babf8c250b242f4d1", "score": "0.4818976", "text": "def check(self):\n smtp = self.connect()\n smtp.quit()\n return True", "title": "" }, { "docid": "8e7d68024217ec3b76a27a44573d515b", "score": "0.48065257", "text": "def download_mail_boxes(self, mailing_list):\n pass", "title": "" }, { "docid": "91a48e2cd68134395c09ce9ae6833710", "score": "0.48059654", "text": "def run_check(self):\n pass", "title": "" }, { "docid": "e03df13ad55da0c3787571a837a986b2", "score": "0.4805168", "text": "def do_check(self, args):\n pass", "title": "" }, { "docid": "eaa8e69107290f7403239a30f4521606", "score": "0.48047557", "text": "def validate_cas_1(cas_host, service_url, ticket):\r\n # Second Call to CAS server: Ticket found, verify it.\r\n cas_validate = cas_host + \"/serviceValidate?ticket=\" + ticket + \"&service=\" + service_url\r\n f_validate = urllib.request.urlopen(cas_validate)\r\n # Get first line - should be yes or no\r\n response = f_validate.readline()\r\n ticket_status = int( b'Success' in f_validate.readline() )\r\n ticketid = f_validate.readline()\r\n index = ticketid.index( b'>' ) + 1\r\n ticketid = ticketid[ index : index+10 ]\r\n f_validate.close()\r\n return ticket_status, ticketid", "title": "" }, { "docid": "1b308858df9ced7d1a2cc4b88f31df71", "score": "0.47988188", "text": "def test_mailbox(conftest_testdir):\n conftest_testdir.makepyfile(test_mailbox=\"\"\"\n def test_mailbox(appctx, mailbox):\n assert len(mailbox) == 0\n appctx.extensions['mail'].send_message(\n sender='no-reply@localhost',\n subject='testing',\n body='test',\n recipients=['no-reply@localhost'],)\n assert len(mailbox) == 1\n \"\"\")\n # Test what happens if Invenio-Mail is not installed.\n conftest_testdir.makepyfile(test_mailbox_fail=\"\"\"\n import pytest\n\n @pytest.fixture(scope='module')\n def base_app(base_app):\n del base_app.extensions['mail']\n return base_app\n\n def test_mailbox(appctx, mailbox):\n pass # Will never reach here.\n \"\"\")\n conftest_testdir.runpytest().assert_outcomes(passed=1, error=1)", "title": "" }, { "docid": "f9d2d507abb6126756fa148031bfdf0d", "score": "0.47851098", "text": "def list_mailboxes(cfg, debug, conn):\n for f in conn.list_mailboxes():\n print(f)", "title": "" }, { "docid": "6c8bc203858195d5892905614204f88d", "score": "0.47823817", "text": "def check(self, command_to_check, **kwargs):\n payload = {\"command_to_check\": command_to_check}\n payload.update({k: v for k, v in kwargs.items()})\n return self.call(\"check\", payload)", "title": "" }, { "docid": "024a4f1e4b1f0215ad90d5d496e7b429", "score": "0.47694087", "text": "def test_09(self):\n inbox = tm11.InboxMessage(generate_message_id())\n msg = make_request('/services/test_inbox_3/', \n inbox.to_xml(), \n get_headers(VID_TAXII_SERVICES_11, False), \n MSG_STATUS_MESSAGE, \n st=ST_SUCCESS)\n #msg = self.send_inbox_message('/services/test_inbox_3/', VID_TAXII_XML_11, inbox)", "title": "" }, { "docid": "a58a900098f3fc5e21deb09e9a288b49", "score": "0.47683063", "text": "def test_email_backend(self):\n mail.outbox = []\n EmailBackend.send(self.token, self.code)\n self.assertEqual(1, len(mail.outbox))\n message = mail.outbox[0]\n self.assertIn(LoginToken.login_url(self.user.email, self.code), message.body)\n self.assertEqual([self.user.email], message.to)", "title": "" }, { "docid": "ed50db686e1d83621cf92f6d44fa0406", "score": "0.47681642", "text": "def test_can_get_inbox_view(self):\n user = django_messages_drf.tests.factories.UserFactory()\n url = reverse(\"django_messages_drf:inbox\")\n\n response = self.app.get(url, user=user)\n\n self.assertEqual(200, response.status_code)", "title": "" }, { "docid": "0b03f81f0156e4e814f3fe83b146dadc", "score": "0.47656503", "text": "def _verify_on_server(self):\n pass", "title": "" }, { "docid": "b0f261125fee7237f4e21d393fc65f16", "score": "0.47639623", "text": "def test_status_realized_to_ip(self):\n self.task.status = \"realized\"\n self.task.save()\n mail.outbox[:] = []\n\n self.task.status = \"in progress\"\n self.task.save()\n\n self.assertEquals(len(mail.outbox), 0)", "title": "" }, { "docid": "901757978f47b562fa50bf42e81a4caf", "score": "0.47628123", "text": "def check(self, **kwargs):\n result = run('test -f {0}'.format(cfg().machine_key_file()))\n return result.succeeded", "title": "" }, { "docid": "b0e4578e1796dce9e0e9d6035b1bbb07", "score": "0.47597522", "text": "def test_wait_for_matching_emails(self):\n pass", "title": "" }, { "docid": "9617e4bada50fe96a653547590199b80", "score": "0.47580993", "text": "def Check(self):\n return self.__callBack.Check()", "title": "" }, { "docid": "cbba1f27556f04b6f768862986055ddf", "score": "0.47555113", "text": "def ping_check(self, controller, request, done):\n def ping_check_callback(status):\n r = PingCheckResponse()\n for address, result in status:\n s = r.status.add()\n s.address = address\n s.status = result\n done(controller, response=r)\n\n self.activator.ping_check(\n [a for a in request.addresses],\n ping_check_callback)", "title": "" }, { "docid": "4f91b8810fcb114c1e60cadcc4a92a0b", "score": "0.47550628", "text": "def test_email_on_loan_checkout(app, users, testdata, loan_params, mocker):\n app.config.update(CELERY_TASK_ALWAYS_EAGER=True)\n\n loan_data = testdata[\"loans\"][1]\n loan = Loan.get_record_by_pid(loan_data[\"pid\"])\n with app.extensions[\"mail\"].record_messages() as outbox:\n admin = users[\"admin\"]\n login_user(admin)\n\n assert len(outbox) == 0\n current_circulation.circulation.trigger(\n loan, **dict(loan_params, trigger=\"checkout\")\n )\n assert len(outbox) == 1", "title": "" }, { "docid": "e40b61ca6724ddd10806943520349f39", "score": "0.4752626", "text": "def check_messages(self, account_id, message_id=\"\"):\n pass", "title": "" }, { "docid": "cdce17b58ff937868a147d99ec352dff", "score": "0.4750199", "text": "def verify(self):\n LOG.debug(_('Beginning verify() for %s') % self.instance_id)\n # Generic use case transport code goes here\n LOG.debug(_('Finished verify() for %s') % self.instance_id)", "title": "" }, { "docid": "de11ad121189cca61df56fff707a4004", "score": "0.47447574", "text": "def get_voicemail_mailbox(self, **kwargs):\n\n all_params = []\n all_params.append('callback')\n\n params = locals()\n for key, val in iteritems(params['kwargs']):\n if key not in all_params:\n raise TypeError(\n \"Got an unexpected keyword argument '%s'\"\n \" to method get_voicemail_mailbox\" % key\n )\n params[key] = val\n del params['kwargs']\n\n\n\n resource_path = '/api/v2/voicemail/mailbox'.replace('{format}', 'json')\n path_params = {}\n\n query_params = {}\n\n header_params = {}\n\n form_params = []\n local_var_files = {}\n\n body_params = None\n\n # HTTP header `Accept`\n header_params['Accept'] = self.api_client.\\\n select_header_accept(['application/json'])\n if not header_params['Accept']:\n del header_params['Accept']\n\n # HTTP header `Content-Type`\n header_params['Content-Type'] = self.api_client.\\\n select_header_content_type(['application/json'])\n\n # Authentication setting\n auth_settings = ['PureCloud OAuth']\n\n response = self.api_client.call_api(resource_path, 'GET',\n path_params,\n query_params,\n header_params,\n body=body_params,\n post_params=form_params,\n files=local_var_files,\n response_type='VoicemailMailboxInfo',\n auth_settings=auth_settings,\n callback=params.get('callback'))\n return response", "title": "" }, { "docid": "3430f46aa09140843f933d9b4250c736", "score": "0.4741823", "text": "def test_jukebox_guest(self):\n\n jukebox_id = \"ec888ca4-eef0-4840-a316-d4a40d72f396\"\n result = self.client.get(\"/jukebox/\" + jukebox_id)\n self.assertEqual(result.status_code, 200)", "title": "" }, { "docid": "87169d310b3a43f3ebbfb6b6d3a0ca12", "score": "0.47330388", "text": "def emailsVerify(self, emails, status, subject, time, window=100):\r\n\t\tself.counter = 0 #pass counter\r\n\t\tself.results = [] #for result outputs\r\n\t\tself.time = float(time)\r\n\t\tself.window = float(window)\r\n\t\tself.timePlus = int(self.time + self.window) \t#sets upper bound\r\n\t\tself.timeMinus = int(self.time - self.window) #sets lower bound\r\n\t\tallEmails = jostleMailinator.getEmail(self)\r\n\t\tfor j,entry in enumerate(emails):\r\n\t\t\temails[j] = entry.lower() #takes any uppercase and makes it lower so mailinator understands\r\n\t\tif allEmails != None:\r\n\t\t\tfor account in emails:\r\n\t\t\t\tprint \"INFO:\\tLooking for emails sent to \"+ str(account)\r\n\t\t\t\tself.results.append(\"INFO:\\tLooking for emails to \"+ str(account)+\"\\n\")\r\n\t\t\t\tfor lib in allEmails:\r\n\t\t\t\t\temailTime = int(str(lib[\"time\"])[:-3])\r\n\t\t\t\t\tif account == lib[\"to\"]:\r\n\t\t\t\t\t\tif subject == lib[\"subject\"] and (emailTime<self.timePlus and emailTime>self.timeMinus):\r\n\t\t\t\t\t\t\tif status == \"Present\": # there and shoud be\r\n\t\t\t\t\t\t\t\tprint \"VERIFY:\\tEmail found for \"+str(account)+\"\\tPASS\"\r\n\t\t\t\t\t\t\t\tself.results.append(\"VERIFY:\\tEmail found for \"+str(account)+\"\\tPASS\\n\")\r\n\t\t\t\t\t\t\t\tself.counter += 1\r\n\t\t\t\t\t\t\t\tbreak\r\n\t\t\t\t\t\t\tif status == \"Absent\": # there and shouldn't be\r\n\t\t\t\t\t\t\t\tprint \"VERIFY:\\tEmail not found for \"+str(account)+\"\\tFAIL\"\r\n\t\t\t\t\t\t\t\tself.results.append(\"VERIFY:\\tEmail not found for \"+str(account)+\"\\tFAIL\\n\")\r\n\t\t\t\t\t\t\t\tbreak\r\n\t\t\t\t\t\telse: pass\r\n\t\t\t\t\telse:\r\n\t\t\t\t\t\tpass\r\n\t\t\t\telse:\r\n\t\t\t\t\tif status == \"Present\": #isn't there and it should be\r\n\t\t\t\t\t\tprint \"VERIFY:\\tEmail found for \"+str(account)+\"\\tFAIL\\n\"\r\n\t\t\t\t\t\tself.results.append(\"VERIFY:\\tEmail found for \"+str(account)+\"\\tFAIL\\n\")\r\n\t\t\t\t\tif status == \"Absent\": #isn't there and it shouldn't be\r\n\t\t\t\t\t\tprint \"VERIFY:\\tEmail not found for \"+str(account)+\"\\tPass\"\r\n\t\t\t\t\t\tself.results.append(\"VERIFY:\\tEmail not found for \"+str(account)+\"\\tPass\\n\")\r\n\t\t\t\t\t\tself.counter += 1\r\n\t\telse:\r\n\t\t\tprint \" no email search was conducted because the inbox is empty\"\r\n\t\treturn self.counter ,self.results", "title": "" }, { "docid": "c0f5c72817e43f0d8fca2cbf3bfad94f", "score": "0.47313318", "text": "def CheckStatus(self):\n return self.GetEndpoint()", "title": "" }, { "docid": "fe5def73aa936d31bad2129b3b20f0a0", "score": "0.47281557", "text": "def test_06(self):\n inbox = tm11.InboxMessage(generate_message_id())\n msg = make_request('/services/test_inbox_2/', \n inbox.to_xml(), \n get_headers(VID_TAXII_SERVICES_11, False), \n MSG_STATUS_MESSAGE, \n st=ST_SUCCESS)\n #msg = self.send_inbox_message('/services/test_inbox_2/', VID_TAXII_XML_11, inbox, st=ST_SUCCESS)", "title": "" }, { "docid": "cebd409284e89ea0d7bba834dd2615a0", "score": "0.4727324", "text": "def test_05(self):\n inbox = tm11.InboxMessage(generate_message_id(), destination_collection_names=['default_INVALID'])\n msg = make_request('/services/test_inbox_2/',\n inbox.to_xml(),\n get_headers(VID_TAXII_SERVICES_11, False),\n MSG_STATUS_MESSAGE,\n st=ST_NOT_FOUND,\n sd_keys=[SD_ITEM])\n #msg = self.send_inbox_message('/services/test_inbox_2/', VID_TAXII_XML_11, inbox, st=ST_NOT_FOUND, sd_keys=[SD_ITEM])", "title": "" }, { "docid": "77be0b7753c2a56e7ed523aa2a2a8823", "score": "0.47234347", "text": "def check_imap_configuration():\n\n try:\n with open(\"/root/accounts/imap_accounts.json\", 'r') as f:\n datastore = json.load(f)\n enabled = datastore[\"antispambox\"][\"enabled\"]\n host = datastore[\"antispambox\"][\"account\"][\"server\"]\n\n except IndexError:\n print(\"ERROR: was not able to read imap_accounts.json.\")\n sys.exit()\n\n if enabled != \"True\":\n print(\"ERROR: Antispambox configuration is not set to enabled - end the service\")\n sys.exit()\n\n if host == \"imap.example.net\":\n print(\"ERROR: no accounts in imap_accounts.json configured - please configure and restart\")\n sys.exit()", "title": "" }, { "docid": "d1adf462398305a6b25359dc021343bb", "score": "0.47213998", "text": "async def check(self, *args, **kwargs) -> bool: # type: ignore\n pass", "title": "" }, { "docid": "f17fa2146055dcde384579ce7eac30d3", "score": "0.4712533", "text": "async def test_send_wrong_addr(self):\n processor = MessageProcessor(\"test-wrong-from-addr@test.com\", self.message, {})\n await processor.process_message()\n\n self.assertEqual(1, len(mail.outbox))", "title": "" }, { "docid": "ff21dc1d5c56c4cb110e6929a2ad49f3", "score": "0.47023848", "text": "def mailbox(self, irc, msg, args, email):\n # copied the next line from the Webopedia plugin\n # self._wpBackend(irc, msg, term) ", "title": "" }, { "docid": "719e1a337a0deb628d88ab2767ebddcd", "score": "0.47021908", "text": "def test_regitration_sends_confirmation_email(self):\n def check_email_sent():\n mbox_path = self.environ.get_config_for('mbox_file')\n mbox = mailbox.mbox(mbox_path)\n assert len(mbox) == 1\n\n utils.try_until(1, check_email_sent)", "title": "" }, { "docid": "d9bb1ccbe6a914cae8b17422614e14e4", "score": "0.46976346", "text": "def rfc822_mailbox(self, rfc822_mailbox):\n\n self._rfc822_mailbox = rfc822_mailbox", "title": "" }, { "docid": "512e55bbb8e58c33a346d202cda3500c", "score": "0.4693497", "text": "def test_staff_list_from_is_member_sender_is_authorized(self):\n\n send_mail_mock = self.mock_get_ml.return_value.send_mail\n\n alternate_email = 'alt@example.edu'\n from_display_name = 'Staff'\n self.mock_get_ml.return_value.access_level = MailingList.ACCESS_LEVEL_STAFF\n self.mock_get_alt_emails.return_value = [alternate_email]\n self.post_body['sender'] = 'Alt Email <{}>'.format(alternate_email)\n self.post_body['from'] = '{} <{}>'.format(from_display_name,\n self.staff_member_address)\n\n response = handle_mailing_list_email_route(self._get_post_request())\n self.assertEqual(response.status_code, 200)\n self.assertEqual(self.mock_send_bounce.call_count, 0)\n\n # check that first two positional args to send_mail() are what we expect\n sender_display_name = send_mail_mock.call_args[0][0]\n sender_address = send_mail_mock.call_args[0][1]\n expected_display_name = from_display_name + ' via Canvas'\n self.assertEqual(sender_display_name, expected_display_name)\n self.assertEqual(sender_address, self.staff_member_address)", "title": "" }, { "docid": "4c42902662a5836657ccb278b670737e", "score": "0.46923068", "text": "def list(mailbox_id):\n return ottd_connection.get_mailbox(mailbox_id), HTTPStatus.OK", "title": "" }, { "docid": "8066f876c32983e294946e0899664247", "score": "0.4675808", "text": "def check(self, spf):\n\t\treturn self.check1(spf, self.d, 0)", "title": "" } ]
03654944cbdd631faf9f136c19a7b4dd
Frequency of closed GOPs.
[ { "docid": "6d377044c365689374db148beed5adbe", "score": "0.0", "text": "def gop_closed_cadence(self) -> Optional[int]:\n return pulumi.get(self, \"gop_closed_cadence\")", "title": "" } ]
[ { "docid": "b3aecd515721ed0ded243ccad03b82f0", "score": "0.6738275", "text": "def freq() -> int:", "title": "" }, { "docid": "6a3de2c7938da78ca22fa42a0e9a0726", "score": "0.64109796", "text": "def freq():\n ...", "title": "" }, { "docid": "7a3ce4a44641c3eb046228992d0f6530", "score": "0.62869227", "text": "def getFreq(self):\n\t\treturn self.freq", "title": "" }, { "docid": "50fecea16c78934571e8c57edc0f3a87", "score": "0.62155855", "text": "def central_freq(self):\n return self.__f0", "title": "" }, { "docid": "aaefd15cc0240aec1bcd6752fd7d83e5", "score": "0.6131801", "text": "def _frequency(self):\n return 10*astropy.constants.c.to('km/s').value/self.wave", "title": "" }, { "docid": "20c78fe6beda1b25271a387b7b3cb787", "score": "0.61007917", "text": "def freq(self):\n out = {}\n for x in self.units:\n if x in out:\n out[x] += 1\n else:\n out[x] = 1\n return out", "title": "" }, { "docid": "1d2af9d43e3b0a756e6cba746c9fb9f7", "score": "0.6089101", "text": "def inferred_freq(self):\n raise NotImplementedError(\"inferred_freq is not yet supported\")", "title": "" }, { "docid": "300b9169a07317389cc6d8fe207eab03", "score": "0.6077381", "text": "def frequency(self, signal):\n pi = tf.constant(np.pi, dtype=tf.float64)\n Phi = self.params['Phi'].get_value()\n omega_0 = self.params['omega_0'].get_value()\n phi_0 = self.params['phi_0'].get_value()\n\n base_freq = omega_0 * tf.sqrt(tf.abs(tf.cos(pi * Phi / phi_0)))\n self.freq = omega_0 * tf.sqrt(tf.abs(tf.cos(pi * (Phi + signal) / phi_0))) - base_freq\n return self.freq", "title": "" }, { "docid": "e99138044120a8e1f19d81f09f18040a", "score": "0.6075147", "text": "def freq(self, frequency: int|None=...):\n ...", "title": "" }, { "docid": "69649e528a642a8e1831a1254fe869ee", "score": "0.6059676", "text": "def gen_freq_histo(objects):\n\tpass\n\treturn", "title": "" }, { "docid": "e402a3917937f5bdfa18ad78f9f76795", "score": "0.60433704", "text": "def degree_histogram(G):\n degseq=G.degree()\n dmax=max(degseq)+1\n freq= [ 0 for d in xrange(dmax) ]\n for d in degseq:\n freq[d] += 1\n return freq", "title": "" }, { "docid": "ed96d7492189c2836e0525ef4fe072d3", "score": "0.599762", "text": "def freq(self, elem):\n if self.N() == 0:\n return 0.0\n return float(self[elem]) / self.N()", "title": "" }, { "docid": "8528fd1a550e4b067729e81b026efb63", "score": "0.59893125", "text": "def frequency(self) -> float:\n return self._frequency", "title": "" }, { "docid": "c4bf6dcdc5b10980743a96d7d8fbe4f6", "score": "0.59783053", "text": "def freq(self, freq=None):\n return self.pca9685.freq(freq=freq)", "title": "" }, { "docid": "87267180a27859ec65a4e57067eea6ed", "score": "0.5977218", "text": "def print_frequencies(self):\n print(\"# Order of frequencies for %s - %s relaxation:\" % (self.gX.isotope, self.gH.isotope) )\n print(\"# 0 iOmX iOmH-iOmX iOmH iOmH+iOmX\" )\n print( self.omega )", "title": "" }, { "docid": "b52ad7008537a4cf33f62551b684aff4", "score": "0.5961649", "text": "def get_frequency(self):\r\n return self.query('FREQ?', dtype=float)", "title": "" }, { "docid": "b9ffc6a2747a4a31d52eba56f7830ee5", "score": "0.59542954", "text": "def frequency(self) -> int:\n return self._frequency", "title": "" }, { "docid": "8398f25abce59fe01f1efb3c0f7106d4", "score": "0.5937256", "text": "def calc_freq(self):\n\n self.df = 1./((self.n-1)*self.dt)\n\n if self.neven:\n self.f = self.df*np.arange(self.n/2+1)\n else:\n self.f = self.df*np.arange( (self.n-1)/2. + 1 )", "title": "" }, { "docid": "42f511171b47110c48cbb921024250b7", "score": "0.5930255", "text": "def frequency(self):\n return self._frequency", "title": "" }, { "docid": "f54eb4858fbee537277c73e6f56eb340", "score": "0.58632934", "text": "def frequency(x, underlying_p = proba, nb_measures = 1):\n return rdm.binomial(nb_measures, underlying_p(x))/nb_measures", "title": "" }, { "docid": "1e8cfa19afb26ebdcb5c59040411fd8d", "score": "0.58478236", "text": "def f(self):\n return self['frequency']", "title": "" }, { "docid": "88fbffead5032422fb85945de7e44fff", "score": "0.58457947", "text": "def freqz(self):\n return signal.sosfreqz(self.__sos, worN=2000)", "title": "" }, { "docid": "6fc56b4ad27d06195e1ca2e9d213f8fc", "score": "0.584351", "text": "def freq(self):\r\n return self.offset", "title": "" }, { "docid": "0f499d7c7ad822c147a6110edc26b80c", "score": "0.5820671", "text": "def freq(self) -> str:\n return self._freq.freqstr", "title": "" }, { "docid": "0f499d7c7ad822c147a6110edc26b80c", "score": "0.5820671", "text": "def freq(self) -> str:\n return self._freq.freqstr", "title": "" }, { "docid": "44a854555a368037db1d54dca5137026", "score": "0.57062536", "text": "def freq(self, width):\n hfreq.freq_multi(self.region, width, gen='Gen0')", "title": "" }, { "docid": "1966aceac9d26388f522e76b3bfa1e35", "score": "0.57020855", "text": "def get_genotype_distribution(self):\n # Determine the total number of individuals.\n total = float(len(self._individuals))\n # Create a map to hold the distributions.\n m = {}\n for gtype in self._genotype_freq.keys():\n m[gtype] = (self._genotype_freq[gtype]/total)\n return m", "title": "" }, { "docid": "14ad0a07daf6c07da6f33eb45e94c821", "score": "0.569955", "text": "def freq(self, freq=None):\n if freq is not None:\n self.cmd('freq %0.2f\\n' % freq)\n freq = self.query('freq?\\n')\n return freq", "title": "" }, { "docid": "7d1391e18881c13cf2b8c06ff70124bf", "score": "0.5655454", "text": "def __haplotype_population_frequency__(self):\n self.hapFreq = self.sdo[['cohortid', 'h_popUID']].\\\n drop_duplicates().\\\n h_popUID.\\\n value_counts()\n self.hapFreq = self.hapFreq / self.hapFreq.sum()", "title": "" }, { "docid": "aa325847ac29d1bec4d600e535541299", "score": "0.56125605", "text": "def cumfreq(self):\n out = self.freq()\n items = out.keys()\n items.sort()\n last = 0\n for x in items:\n out[x] += last\n last = out[x]\n return out", "title": "" }, { "docid": "3ce3377a95c28c6852f69040df3f768a", "score": "0.5612357", "text": "def relfreq(self):\n out = self.freq()\n for x in out:\n out[x] /= float(len(self))\n return out", "title": "" }, { "docid": "8b70c551af566f437a44932daf0e93bd", "score": "0.55796033", "text": "def phase_frequency(self, window_size, order=3):\n return self.phase_angular_velocity(window_size, order=3) / (2 * np.pi)", "title": "" }, { "docid": "48e91ceafd2475b49f2a6cf66c5fc581", "score": "0.5579464", "text": "def frequency_per_day(self):\n return len(self[0]) * len(self[1])", "title": "" }, { "docid": "7a99361a06684c1947405e5d322ffdbe", "score": "0.5573735", "text": "def CFP_SaveFrequencies(self, minim):\n \n # Generate the supercell in real space\n dyn_sc = minim.dyn.GenerateSupercellDyn( minim.ensemble.supercell )\n\n # Dyagonalize\n w, pols = dyn_sc.DyagDinQ(0)\n self.total_freqs.append(w)\n \n\n if self.__save_each_step:\n self.Save()", "title": "" }, { "docid": "a84700adf4f5dc277b61cf3b1decc7d3", "score": "0.55603707", "text": "def happiness_freq(self):\n return utils.lerp(self.happiness, -3.0, 3.0, 1.0/4.0, 2.0)", "title": "" }, { "docid": "98f7236241d1541e2ff0d8e75578cb9e", "score": "0.5532386", "text": "def freq_finite_forms(self):\n return division(self.finite_tokens, self.verb_tokens)", "title": "" }, { "docid": "722234103f7858b41e9c4c75d342ecbd", "score": "0.5506029", "text": "def test_qso_countrate():\n bp = ObsBandpass('acs,hrc,f850lp')\n\n fname = get_pkg_data_filename(os.path.join('data', 'qso_template.fits'))\n qso = FileSourceSpectrum(fname)\n sp_ext = qso * Extinction(1.0, 'mwavg')\n sp = sp_ext.renorm(20, 'vegamag', ObsBandpass('johnson,v'), force=True)\n\n obs = Observation(sp, bp, force='taper')\n c = obs.countrate()\n np.testing.assert_allclose(c, 2.3554364232173565e-05)", "title": "" }, { "docid": "99adaef7f23acf6d67e84d860e42ecd9", "score": "0.5489091", "text": "def frequency_per_day(self):\n return self.slices.frequency_per_day()", "title": "" }, { "docid": "4d980aba76b373463f071bc6f82a5611", "score": "0.548149", "text": "def freq_axis(self):\n return np.arange(self.fft_size) / np.float32(self.fft_size * 2) * self.fs", "title": "" }, { "docid": "88f9289c759450892da3e5e3e64e4fd9", "score": "0.5481209", "text": "def freq(self):\n try:\n return self['freq']\n except KeyError:\n raise KeyError('freq does not exist, try to load it first')", "title": "" }, { "docid": "8043af816906e773f22d0c26e44421de", "score": "0.5476224", "text": "def frequency(data, probabilities=False, sort=False, reverse=False):\n xis, nis = effectif(data, returnSplitted=True, frequencies=probabilities, sort=sort, reverse=reverse)\n return xis, nis", "title": "" }, { "docid": "0d58da5759e4d8d0fcd6821d3e33e748", "score": "0.5462946", "text": "def test_empty_freq(self):\n h = Histogram()\n self.assertEqual(h.freq('a'), 0.0)", "title": "" }, { "docid": "7f3be370c25c5caee3ecb18fe1db3b0b", "score": "0.54505455", "text": "def test_freq(self):\n h = Histogram('abcabcdefa')\n self.assertTrue(isinstance(h.freq('a'), float))\n self.assertEqual(h.freq('a'), 0.3)\n self.assertEqual(h.freq('b'), 0.2)\n self.assertEqual(h.freq('c'), 0.2)\n self.assertEqual(h.freq('d'), 0.1)\n self.assertEqual(h.freq('e'), 0.1)\n self.assertEqual(h.freq('f'), 0.1)", "title": "" }, { "docid": "f34814f0f8b845f683344dd51dca44b7", "score": "0.54292893", "text": "def frequency(self) -> str:\n return pulumi.get(self, \"frequency\")", "title": "" }, { "docid": "260a8ee9d59bd99e52ae6e747962cfbb", "score": "0.5409215", "text": "def calculate_frequencies(self):\n if self._tokens is None:\n raise Exception('It\\'s necessary execute first tokenize')\n self._fdist = FreqDist(self._tokens)\n return self._fdist.items()", "title": "" }, { "docid": "ace41432444a256e93fff8a55a5f7e97", "score": "0.54081076", "text": "def frequency(self, x):\n return np.r_[0,\n (self.fs / 360) * np.diff(unwrap_deg(self.phase(x, filtered=True)))]", "title": "" }, { "docid": "aee583880e9ac7ea0f26d434f4468b9b", "score": "0.54052037", "text": "def frequency(self, year=None):\n return self.frequency_per_year(year=year) * self.frequency_per_day()", "title": "" }, { "docid": "dcc0d55d8405e49349a7d7b3c17bd083", "score": "0.540298", "text": "def frequency_per_hour(self):\n return len(self[0])", "title": "" }, { "docid": "50e7453c01e5c16928924d049681c701", "score": "0.54029554", "text": "def independence_number(G): \r\n return len(maximum_independent_set(G))", "title": "" }, { "docid": "10cba45fd5a847821ba42f15c5cc5abf", "score": "0.54016405", "text": "def frequence(self):\n # According to https://nmap.org/npcap/guide/npcap-devguide.html#npcap-feature-dot11 # noqa: E501\n self._check_npcap_requirement()\n return int(self._npcap_get(\"freq\"))", "title": "" }, { "docid": "f1c987515170788a0e038e25d879a0d8", "score": "0.53893375", "text": "def f(self) :\n return numpy.fft.fftfreq(self.N, self.dt)", "title": "" }, { "docid": "7b07bb450e1a57790ad2e284b53f0eb7", "score": "0.53788835", "text": "def freq(self, midi):\n\n\n\t\treturn (math.pow(2, (midi - 69) / 12)) * 440", "title": "" }, { "docid": "1259cc1f34f0190d9ec4182a798f1c17", "score": "0.53703976", "text": "def get_frequency_in_graph(self, name):\n if self.__graph is None:\n raise Exception(\"graph has not been initialized\")\n if name.lower() not in self.__dict:\n return 0\n return int(self.__graph.node[self.__dict[name.lower()]]['frequency'])", "title": "" }, { "docid": "f6d233cab76dbd968020928924ef3b35", "score": "0.53688395", "text": "def frequency(self, year=None):\n return self.slices.frequency(year=year)", "title": "" }, { "docid": "325c32db727e1be7657e4ffb28765290", "score": "0.5359332", "text": "def get_qubit_freq_ground(self):\n return self._qubit_freq_ground", "title": "" }, { "docid": "ba5fb26e4c4a70b08a25b7ff8e2178f6", "score": "0.5321412", "text": "def frequency(self) -> float:\n return self._controller.frequency", "title": "" }, { "docid": "9b672f139788d3808444ee1d87cba272", "score": "0.5306408", "text": "def freq_evolve(spec_type,nu_0,beta,temp,fco,nu) :\n x=0.017611907*nu\n ex=np.exp(x)\n fcmb=ex*(x/(ex-1))**2\n if spec_type==\"BB\" : #CMB\n return 1.\n elif spec_type==\"PL\" : #Synch\n return (nu/nu_0)**beta/fcmb\n elif spec_type==\"mBB\" : #Dust\n x_to=0.0479924466*nu/temp\n x_from=0.0479924466*nu_0/temp\n return (nu/nu_0)**(1+beta)*(np.exp(x_from)-1)/(np.exp(x_to)-1)/fcmb\n elif spec_type==\"CO1\" : #CO_1\n fout=np.zeros_like(nu)\n fout[np.where((nu<130.) & (nu>85.))[0]]=1.\n return fout\n elif spec_type==\"CO2\" : #CO_2\n fout=np.zeros_like(nu)\n fout[np.where((nu<260.) & (nu>190.))[0]]=fco\n return fout", "title": "" }, { "docid": "15d0fbdd896b96ea39df539619274c7d", "score": "0.5299664", "text": "def get_sampling_frequency(self):\n pass", "title": "" }, { "docid": "b573bf38cf3248514c98db35cf0b35eb", "score": "0.5299607", "text": "def GetFrequency(self, PRECISION = 2):\n return round((self.GetClocks()/float(self.period)),PRECISION)", "title": "" }, { "docid": "9a5a3f2732561d7dfab95d92df9ee622", "score": "0.52990603", "text": "def get_freq():\n return jsonify(All.frequencies())", "title": "" }, { "docid": "a68127358d795b54189edcb0b09e83f8", "score": "0.529765", "text": "def freqz(sos, j, dB = False):\n resp = 1\n for section in sos:\n z0 = section[0] * y.E ** (-1 * y.I * y.pi * j)\n z1 = section[1] * y.E ** (-2 * y.I * y.pi * j)\n z2 = section[2] * y.E ** (-3 * y.I * y.pi * j)\n p0 = section[3] * y.E ** (-1 * y.I * y.pi * j)\n p1 = section[4] * y.E ** (-2 * y.I * y.pi * j)\n p2 = section[5] * y.E ** (-3 * y.I * y.pi * j)\n\n z = z0 + z1 + z2\n p = p0 + p1 + p2\n\n resp *= (z / p)\n\n if dB:\n return 20 / y.log(10) * y.log(abs(resp))\n else:\n return resp", "title": "" }, { "docid": "c212320104eae591c4eb9b43693080d2", "score": "0.5296178", "text": "def coef_of_var_freq(colData):\n freq=pd.Series.value_counts(colData)\n return pd.Series.std(freq)/pd.Series.mean(freq)", "title": "" }, { "docid": "22a426801c20762b6eddd9cbf1fb67f7", "score": "0.52943707", "text": "def freq_from_HPS(sig, fs):\r\n windowed = sig * blackmanharris(len(sig))\r\n\r\n #from pylab import subplot, plot, log, copy, show\r\n\r\n # harmonic product spectrum:\r\n c = abs(rfft(windowed))\r\n maxharms = 8\r\n plt.subplot(maxharms, 1, 1)\r\n plt.plot(log(c))\r\n for x in range(2, maxharms):\r\n a = copy.copy(c[::x]) # Should average or maximum instead of decimating\r\n # max(c[::x],c[1::x],c[2::x],...)\r\n c = c[:len(a)]\r\n i = argmax(abs(c))\r\n #true_i = parabolic(abs(c), i)[0]\r\n true_i = i\r\n print('Pass %d: %f Hz' % (x, fs * true_i / len(windowed)))\r\n c *= a\r\n plt.subplot(maxharms, 1, x)\r\n plt.plot(log(c))\r\n plt.show()", "title": "" }, { "docid": "8c2c278936c25c40fd9eddbf45abad18", "score": "0.52934563", "text": "def genre_frequency(movies):\n for i in range(0, NO_OF_GENRES):\n gf[i] = movies[genre_cols[i]].sum()\n return gf", "title": "" }, { "docid": "f33f0134648bd84fd922161101ff7d89", "score": "0.52874917", "text": "def calculate_freq_by_population():\n if not os.path.exists(FREQ_BY_POPULATION_PATH):\n print(\"calculating freq by population. it can take a while\")\n os.makedirs(FREQ_BY_POPULATION_PATH)\n plink_runner = Plink2DockerRunner()\n samples = glob(f\"{VCF_BY_POPULATION_PATH}/*.vcf\")\n\n with tqdm(total=len(samples)) as pbar:\n for vcf_sample in samples:\n vcf_sample = vcf_sample.replace('\\\\', '/')\n vcf_file_name = get_filename_from_path(vcf_sample)\n sample_path = \"/\".join([IMAGE_SHARE_FOLDER_PATH] + vcf_sample.split('/')[1:])\n pbar.set_description(f\"Processing {vcf_file_name}\")\n plink_runner(f\"./plink2 --vcf {sample_path} --freq \"\n f\"--out {IMAGE_SHARE_FOLDER_PATH}/{FREQ_BY_POPULATION_FOLDER}/{vcf_file_name}\")\n pbar.update(1)\n else:\n print(f\"freq by population already exists in {FREQ_BY_POPULATION_PATH}\")", "title": "" }, { "docid": "b58fadab2bcd37ce22127893a003a79e", "score": "0.52833956", "text": "def geno_dist(self):\n # mean frequencies for each snp\n mu = np.mean(self.y, axis=0, keepdims=True)\n d_gen = squareform(pdist((self.y - mu), metric='seuclidean')) / self.p\n\n return(d_gen)", "title": "" }, { "docid": "6fc61927c03935cca152b6d3045e8063", "score": "0.52816284", "text": "def get_freq(self):\n if self.edi_list is None:\n self.get_edi_list()\n\n self.mt_obj_list = []\n freq_list = []\n nf_list = np.zeros(self.ns)\n for ii, edi in enumerate(self.edi_list):\n mt_obj = mt.MT(edi)\n self.mt_obj_list.append(mt_obj)\n freq_list.extend(list(mt_obj.Z.freq))\n nf_list[ii] = mt_obj.Z.freq.size\n\n frequencies = np.array(sorted(list(set(freq_list)), reverse=True))\n\n if nf_list.mean() != frequencies.size:\n interp_frequencies = np.logspace(\n np.log10(frequencies.min()),\n np.log10(frequencies.max()),\n num=nf_list.mean(),\n )\n\n print \"interpolating data\"\n for mt_obj in self.mt_obj_list:\n new_z, new_t = mt_obj.interpolate(interp_frequencies)\n mt_obj.Z = new_z\n mt_obj.Tipper = new_t\n\n self.freq = interp_frequencies\n\n else:\n self.freq = frequencies\n\n self.num_freq = self.freq.size", "title": "" }, { "docid": "584db461d67516eba662030f685ffc26", "score": "0.527749", "text": "def frequency_per_hour(self):\n return self.slices.frequency_per_hour()", "title": "" }, { "docid": "ffc36b1dae7376a71f88e49c81ad083c", "score": "0.52542734", "text": "def number_GA_female_offspring(self):\n return self.simulation['Female Offspring Genotypes'][0]", "title": "" }, { "docid": "5dda3df512d9c0e73fb07abc1552d1ac", "score": "0.52428097", "text": "def get_freq(self, **kwargs) -> int:\n code = '''\nimport machine\nmachine.freq()\n'''\n return self.execute(code, **kwargs).output", "title": "" }, { "docid": "e5e4b84e1a32e18ac66e163b9da4340b", "score": "0.52230555", "text": "def mode(self):\r\n # TODO: Add option for bins like value_counts()\r\n from pandas.core.algorithms import mode\r\n return mode(self)", "title": "" }, { "docid": "893ce7c8587d62574e411826ac54cab9", "score": "0.52230173", "text": "def _query_frequency(self):\n haplo_dict = {}\n if self._is_deletion():\n for haplo in self._HAPLOS:\n query = (self.session.query(haplo)\n .filter(haplo.position.startswith(f\"{self.position}.0\"))\n .first())\n freq = query.to_dict().get(\"freq_gap\", \".\")\n haplo_dict[f\"{haplo.__tablename__}\"] = freq\n elif self._is_deletion():\n for haplo in self._HAPLOS:\n query = (self.session.query(haplo)\n .filter(haplo.position.startswith(\n f\"{self.position}.{len(self.alternate.value[0])}\"))\n .first())\n freq = query.to_dict().get(\"freq_oth\", \".\") # TODO: correct?\n haplo_dict[f\"{haplo.__tablename__}\"] = freq\n else:\n for haplo in self._HAPLOS:\n query = (self.session.query(haplo)\n .filter(haplo.position.startswith(f\"{self.position}.0\"))\n .first())\n try:\n freq = query.to_dict()[f\"freq_{self.alternate.value[0]}\"]\n except KeyError:\n freq = query.to_dict()[\"freq_oth\"]\n haplo_dict[f\"{haplo.__tablename__}\"] = freq\n\n return haplo_dict", "title": "" }, { "docid": "f753138dd935918377d0eb41c8908846", "score": "0.52160627", "text": "def _compute_counts_of_counts( self ):\n\n for k in self.UN:\n if self.UN[k] <= 4:\n self.CoC[0][int(self.UN[k]-1)] += 1.\n\n for i,dic in enumerate(self.numerators):\n for k in dic:\n if dic[k]<=4:\n self.CoC[i+1][int(dic[k]-1)] += 1.\n return", "title": "" }, { "docid": "05ff884849ec79f00eb5cb6840ff8e16", "score": "0.5213581", "text": "def compute_frequency(self):\n self.word_frequencies = []\n for w in self.words:\n self.word_frequencies.append(self.word_counts[w])\n self.word_frequencies = np.array(self.word_frequencies, np.float32)\n self.word_frequencies /= np.sum(self.word_frequencies)\n self.word_frequencies = np.log(self.word_frequencies)\n self.word_frequencies -= np.max(self.word_frequencies)", "title": "" }, { "docid": "af4faaa484baabeb61755da6d020d2de", "score": "0.52128404", "text": "def compute_complexity_score(self):\n return math.exp(-self.__frequency)", "title": "" }, { "docid": "5bb12660fa4b7dccade0717ce21ac308", "score": "0.52118754", "text": "def frequency_type(self):\n return self._frequency_type", "title": "" }, { "docid": "5bb12660fa4b7dccade0717ce21ac308", "score": "0.52118754", "text": "def frequency_type(self):\n return self._frequency_type", "title": "" }, { "docid": "5bb12660fa4b7dccade0717ce21ac308", "score": "0.52118754", "text": "def frequency_type(self):\n return self._frequency_type", "title": "" }, { "docid": "5bb12660fa4b7dccade0717ce21ac308", "score": "0.52118754", "text": "def frequency_type(self):\n return self._frequency_type", "title": "" }, { "docid": "2d86fd7439c34b70e0ddb01fa40847d6", "score": "0.5205007", "text": "def find_interaction_frequencies(qubits, couplings, gates, method='optimize'):\n # connected subsets of qubits\n not_assigned_to_group = {qn for g in gates for qn in g}\n connected_groups = set()\n while len(not_assigned_to_group) != 0:\n # find a set of connected qubits (group)\n to_visit = {(next(iter(not_assigned_to_group)), None)}\n group = [] # all active qubits involved in the step and their neighbors\n while len(to_visit) != 0:\n qni, qn_from = to_visit.pop()\n group.append((qni, qn_from))\n for qnj in qubits[qni]['neighbors']:\n if qnj in not_assigned_to_group and qnj != qn_from:\n if qnj in group:\n raise InvalidParallelGatesError(f'Cycle: {group}')\n to_visit.add((qnj, qni))\n not_assigned_to_group -= {g[0] for g in group}\n connected_groups.add(tuple(group))\n\n # neighboring gate graphs\n gate_graphs = []\n inv_gate_graphs = []\n for group in connected_groups:\n unordered_gates = {tuple(sorted(g, key=lambda qn: qubits[qn]['wq'])) for\n g in group if frozenset(g) in gates}\n unordered_connections = {frozenset(g) for g in group if\n frozenset(g) not in gates and None not in g}\n gate_graph = {} # from low int. freq gates to high int. freq gates\n inv_gate_graph = {} # from high int. freq gates to low int. freq gates\n for g in unordered_gates:\n gate_graph[g] = set()\n inv_gate_graph[g] = set()\n for c in unordered_connections:\n if g[0] in c:\n qn = [qn for qn in c if qn != g[0]][0]\n for g2 in unordered_gates:\n if g2[0] == qn:\n raise InvalidParallelGatesError(\n f'Opposing gate directions: {g}, {g2}')\n if g2[1] == qn:\n gate_graph[g].add(g2)\n if g[1] in c:\n qn = [qn for qn in c if qn != g[1]][0]\n for g2 in unordered_gates:\n if g2[1] == qn:\n raise InvalidParallelGatesError(\n f'Opposing gate directions: {g}, {g2}')\n if g2[0] == qn:\n inv_gate_graph[g].add(g2)\n gate_graphs.append(gate_graph)\n inv_gate_graphs.append(inv_gate_graph)\n\n # optimize each gate graph\n gate_wints = {}\n if method == 'equal_spacing':\n # interactions to avoid:\n # for high-freq int. qubits: 2x anh below (1x anh above)\n # for low-freq int. qubits: 1x anh above (1x anh below)\n\n # strategy: start traversing gate graph from low to high, placing each\n # interaction frequency as low as possible and remember, how much\n # higher it could be placed, and in which layer this gate is\n # finally distribute the minimal slack equally\n for gate_graph, inv_gate_graph in zip(gate_graphs, inv_gate_graphs):\n layer = 0\n gate_layers_wints = {}\n next_gates = {g for g, conns in inv_gate_graph.items() if\n len(conns) == 0}\n while len(next_gates) != 0:\n new_next_gates = set()\n for g in next_gates:\n wintmin = qubits[g[0]]['wq']\n wintmax = qubits[g[1]]['wq'] + qubits[g[1]]['anh']\n # low qubit interactions with parked spectator qubits\n for qn in qubits[g[0]]['neighbors']:\n if qn != g[1]:\n wintmax = min(wintmax,\n qubits[qn]['wq'] + qubits[qn]['anh'])\n # high qubit interactions with parked spectator qubits\n for qn in qubits[g[1]]['neighbors']:\n if qn != g[0]:\n wintmin = max(wintmin,\n qubits[qn]['wq'] + qubits[g[1]][\n 'anh'])\n # interactions with neighboring gates\n for g2 in inv_gate_graph[g]:\n wintmin = max(wintmin,\n gate_layers_wints[g2][1] - qubits[g[1]][\n 'anh'])\n gate_layers_wints[g] = (layer, wintmin, wintmax)\n new_next_gates |= gate_graph[g]\n next_gates = new_next_gates\n layer += 1\n slack = min([(wintmax - wintmin) / (layer + 2) for\n g, (layer, wintmin, wintmax) in\n gate_layers_wints.items()])\n for g, (layer, wintmin, wintmax) in gate_layers_wints.items():\n gate_wints[frozenset(g)] = wintmin + (layer + 1) * slack\n elif method == 'optimize':\n gate_wints = find_interaction_frequencies(qubits, couplings, gates,\n method='equal_spacing')\n x0 = [gate_wints[g] for g in gates]\n\n def cost_func(x):\n fid = calculate_step_fidelity(qubits, couplings,\n dict(zip(gates, x)))\n return -np.sum(np.log([f for f in fid.values()]))\n\n x1 = sp.optimize.minimize(cost_func, x0).x\n x2 = sp.optimize.minimize(cost_func, x1).x\n gate_wints = dict(zip(gates, x2))\n else:\n raise ValueError(\n f\"Invalid method: {method}. Valid options are: \"\n \"['equal_spacing', 'optimize']\")\n return gate_wints", "title": "" }, { "docid": "6c45a0ae63b1d61acbd892a946fbc4bd", "score": "0.5200192", "text": "def get_freq_inst(self):\r\n return self._read('FREQ?')", "title": "" }, { "docid": "37a8341fce74b2152f685b5b819730d8", "score": "0.5199713", "text": "def _frequency(self,dt):\n Nt = self.TimeAxis.length\n return numpy.pi*numpy.fft.fftshift(\n numpy.fft.fftfreq(Nt,d=dt))", "title": "" }, { "docid": "7a4b19cb52714cc2beff727aa6c37b7d", "score": "0.51994985", "text": "def mode(dataset):\n modeSoFar = dataset[0]\n freqSoFar = dataset.count(modeSoFar)\n for item in dataset[1:]: #outer loop -> n\n # calling freq each time is n\n # if freq(dataset,item) > freqSoFar:\n if dataset.count(item) > freqSoFar:\n modeSoFar = item\n freqSoFar = dataset.count(item)\n return modeSoFar", "title": "" }, { "docid": "3a38798a190bb966884cf6d093886e44", "score": "0.5197774", "text": "def show_abolute_frequency(G):\n\n degree_list = nx.degree_histogram(G)\n max_degree = len(degree_list)\n x_axis = [i for i in range(max_degree)]\n plt.scatter(x_axis, degree_list)\n plt.show()", "title": "" }, { "docid": "5a3b034263abeb3d1fd814a1b4b0e7dc", "score": "0.51971436", "text": "def InitializeOscillatorFrequencyCalibration(self):\n return pynifgen.niFgen_InitializeOscillatorFrequencyCalibration(self.vi)", "title": "" }, { "docid": "3e81672deda066d8d8ac924eac50c3e6", "score": "0.51961803", "text": "def _sampler_frequency_dep(self):\n pass", "title": "" }, { "docid": "2f0d5c3179a63c4784f0ac4e209aac1e", "score": "0.51931715", "text": "def share_GA_female_offspring(self):\n return self.number_GA_female_offspring / self.number_female_offspring", "title": "" }, { "docid": "a39e510e4fba519ead1de764a92a5a6e", "score": "0.5184226", "text": "def frequency_axis(self,Mode=''):\r\n \r\n if Mode=='m' or Mode=='M':\r\n return self.np.round_(self.np.linspace(-self.sampling()/2,self.sampling()/2,self.total_width(Type='s')) ,9)\r\n else:\r\n return self.np.round_(self.np.linspace(0,self.sampling(),self.total_width(Type='s')) ,9)", "title": "" }, { "docid": "1ac8380d26195ead9fd6ffadb7946e48", "score": "0.5175958", "text": "def frequency_sampling(self):\n return self.__freq_sampling", "title": "" }, { "docid": "a651abe80b6ea2d6209d193f9b7b953e", "score": "0.5175829", "text": "def number_Ga_female_offspring(self):\n return self.simulation['Female Offspring Genotypes'][1]", "title": "" }, { "docid": "e5acc13f48c2f782c534ae563a0f04b7", "score": "0.51702577", "text": "def get_population_size_frequencies(self):\n self.population_size_frequencies = {\n key: {\n k: float(v) / float(len(val))\n for k, v in Counter(val).items()\n }\n for key, val in self.binned_population_sizes.items()\n }", "title": "" }, { "docid": "b4aa913e79a12810697864788f5c3130", "score": "0.5169322", "text": "def get_frequency(self, **kwargs):\n #self.resource.clear()\n channel = kwargs.get(\"channel\", self.active_channel)\n use_log = \"LOG\" in self.scpi.query_sweep_type(channel).upper()\n f_start = self.scpi.query_f_start(channel)\n f_stop = self.scpi.query_f_stop(channel)\n f_npoints = self.scpi.query_sweep_n_points(channel)\n if use_log:\n freq = np.logspace(np.log10(f_start), np.log10(f_stop), f_npoints)\n else:\n freq = np.linspace(f_start, f_stop, f_npoints)\n\n frequency = skrf.Frequency.from_f(freq, unit=\"Hz\")\n frequency.unit = kwargs.get(\"f_unit\", \"Hz\")\n return frequency", "title": "" }, { "docid": "40d83f8a8af46c770c904586059ada07", "score": "0.5163306", "text": "def freqstr(self):\r\n return self.offset.freqstr", "title": "" }, { "docid": "1b956344a033778d05532b4a669cd99a", "score": "0.5161708", "text": "def maxfreq(self):\n return self[-1][0]", "title": "" }, { "docid": "8d2e6f4709f618ecd7bd0fbd539b0e29", "score": "0.51587546", "text": "def outlier_c_freq(colData):\n freq = pd.Series.value_counts(colData)\n return len(outliers_freq(colData))", "title": "" }, { "docid": "4712b40a8de5557d17e06c8a620b1acf", "score": "0.5156295", "text": "def proxima_flare_freq(flare_energy):\n return 10**(-0.68 * np.log10(flare_energy) + 20.9) * u.d**-1", "title": "" }, { "docid": "b3d51bab6876cea7ac5ce53f533d057a", "score": "0.51534164", "text": "def get_frequencies(table):\n return get_field(table, 2)", "title": "" }, { "docid": "5a660a537fffe398813b34474b71cf78", "score": "0.5144785", "text": "def vec_evalfreq(self,times):\n return self._vec_eval(times,PolycoEntry.evalfreq)", "title": "" }, { "docid": "f44d786c4d3da2b24cd6a7c0438a722a", "score": "0.5136554", "text": "def get_ftype_counts(self):\r\n from pandas import Series\r\n return Series(self._data.get_ftype_counts())", "title": "" }, { "docid": "ea94ebf34df917c5d65ab8adbab27a42", "score": "0.5125892", "text": "def compute_freq(self, word_sent):\n freq = defaultdict(int)\n for item in word_sent:\n for word in item:\n if word not in self._stopwords:\n freq[word] += 1\n m = float(max(freq.values()))\n freq2 = freq.copy()\n for w in freq.keys():\n freq[w] = freq[w]/m\n if freq[w] >= self._max_cut or freq[w] <= self._min_cut:\n del freq2[w]\n return freq2", "title": "" }, { "docid": "b040651f83811fdc3fb5f7a261cf7294", "score": "0.51256675", "text": "def FGRayCount(self) -> int:", "title": "" } ]
e71a262dc65258038ea4ad2436938f07
There may be only one possible choice for a particular unit
[ { "docid": "25b3f3d41f0543a51f51798fbfed6c97", "score": "0.63899994", "text": "def only_choice(grid):\n for unit in all_units:\n for d in '123456789':\n choices = [n for n in unit if d in grid[n]]\n if len(choices) == 1:\n grid = assign_value(grid, choices[0], d)\n return grid", "title": "" } ]
[ { "docid": "1982c5e15c66bae15bf1b7fa7414a2dc", "score": "0.6371527", "text": "def only_choice(values):\n # unitlist = self.rowUnits() + self.colUnits() + self.squareUnits()\n\n fillUpUnits(rowUnits(), values)\n fillUpUnits(colUnits(), values)\n fillUpUnits(squareUnits(), values)\n return values", "title": "" }, { "docid": "bc95d1b23cdfe1d754f75dd6189471ac", "score": "0.6233969", "text": "def validate_coin_choice(selection, unique_cans):\n if 0 < selection <= len(unique_cans):\n return True, unique_cans[selection - 1].name\n else:\n print(\"Not a valid selection\\n\")\n return False, None", "title": "" }, { "docid": "a9f2be1225a33cbd82873f3296349102", "score": "0.61921674", "text": "def test_choice(self):\n choice, target = self.hero.choice([self.hero], [self.hero])\n self.assertEqual(choice, \"ATK\")\n self.assertEqual(target, [self.hero])", "title": "" }, { "docid": "5453142ace7dedab208346fdd24b6863", "score": "0.6173981", "text": "def select_ability():\n abil = game.edit_state()[4]\n ability = game.edit_state()[1].abil_methods()[abil]\n squares = game.edit_state()[3]\n if game.edit_state()[1].get_side() == game.get_cur_side():\n Selections.error = ability(squares)\n game.edit_state()[0] = \"unit_selected\"\n game.edit_state()[2] = 0\n game.edit_state()[4] = -1\n game.edit_state()[3].clear()\n else:\n Selections.error = \"It is \" + game.get_cur_side().get_name() + \" to move.\"", "title": "" }, { "docid": "561be8573823fa8255d48ffe76e77c98", "score": "0.6172164", "text": "def unit_type_is_selected(self, obs, unit_type):\n if (len(obs.observation.single_select) > 0 and\n obs.observation.single_select[0].unit_type == unit_type):\n return True\n \n if (len(obs.observation.multi_select) > 0 and \n obs.observation.multi_select[0].unit_type == unit_type):\n return True\n \n return False", "title": "" }, { "docid": "526709cbaeaf111e742636f04b4de306", "score": "0.6065717", "text": "def select_unit(x, y):\n loc = Utils.loc_to_square(x, y)\n if loc is not None:\n unit = game.get_board().get_unit(loc[0], loc[1])\n if unit is not None:\n game.edit_state()[0] = \"unit_selected\"\n game.edit_state()[1] = unit\n game.edit_cur_abils().clear()\n for x in unit.abilities():\n game.edit_cur_abils().append(x)", "title": "" }, { "docid": "01fdba494462eddb905b05e20588b2c7", "score": "0.60414165", "text": "def only_choice(values):\n \n \n for unit in unitlist:\n for digit in '123456789':\n inboxlist=[box for box in unit if digit in values[box]]\n if len(inboxlist) == 1:\n assign_value(values, inboxlist[0], digit)\n \n return values", "title": "" }, { "docid": "5c9071cde73ca03a3334999a831db5f0", "score": "0.60342914", "text": "def only_choice(values):\n for unit in unitlist:\n for i in '123456789':\n choices = [key for key in unit if i in values[key]]\n if len(choices) == 1:\n values[choices[0]] = i\n return values", "title": "" }, { "docid": "8a83994de597b3f313fbadcb4b78681a", "score": "0.6021099", "text": "def only_choice(values):\n for unit in unit_list:\n for d in '123456789':\n possible_boxes = [b for b in unit if d in values[b]]\n if len(possible_boxes) == 1:\n print(possible_boxes)\n values[possible_boxes[0]] = d\n return values", "title": "" }, { "docid": "a427ccacc5791261308ab4ee2f70e943", "score": "0.59260786", "text": "def only_choice(values):\n # TODO: Copy your code from the classroom to complete this function\n \n # loop units, the complete rows, cols, 3x3 boxes, and diagonal\n for unit in unitlist:\n # loop 1-9, which is cols\n for d in cols:\n # get the boxes with the next digit d\n # d = 1, 2, 3 etc\n # box = A1, A2, B1 etc\n # each unit have the nine boxes from 1-9 e.g. [A1, A2, A3, A4, A5, A6, A7, A8, A9]\n boxes_with_d = [box for box in unit if d in values[box]]\n \n # if the digit d is only appears inside one of the nine boxes in the current unit\n if(len(boxes_with_d) == 1):\n # then the only choice is to assign that box with the digit d\n box_target = boxes_with_d[0]\n original_value = values[box_target] + '' # +'' to make a copy of the str, not using ref\n values[box_target] = d\n \n # if the changed result duplicate digit on another unit, revert to original\n box_changed_units = units[box_target]\n if is_duplicate_digit_in_units(box_changed_units, values) == True:\n values[box_target] = original_value\n \n return values", "title": "" }, { "docid": "2c719d999970edc6c407785dc53b8dec", "score": "0.5907359", "text": "def only_choice(values):\n\n vals = [str(i) for i in range(1, 10)]\n\n for unit in unitlist:\n for v in vals:\n temp = []\n for u in unit:\n if v in values[u]:\n temp.append(u)\n if len(temp) == 1:\n values[temp[0]] = v\n\n return values", "title": "" }, { "docid": "446c818d0aa4b62fdfcb9cec4ff44c63", "score": "0.5854285", "text": "def sampling_question():\r\n # TODO: assign value to choice and factor\r\n choice = 1\r\n options = ['Gibbs','Metropolis-Hastings']\r\n factor = 1\r\n return options[choice], factor", "title": "" }, { "docid": "f84107e0187b354d4d4c07706a9333ff", "score": "0.58417183", "text": "def only_choice(values):\n for unit in unitlist:\n # For each unit, we loop all the boxes with a specific digit,\n # if there's only one box to contain this digit, then\n # we directly assign it to this box.\n for digit in '123456789':\n dplaces = [box for box in unit if digit in values[box]]\n if len(dplaces) == 1:\n values = assign_value(values, dplaces[0], digit)\n return values", "title": "" }, { "docid": "68c16ee64ad3588cc27a80f0f0645008", "score": "0.583746", "text": "def only_choice(values):\n for unit in unitlist:\n for digit in '123456789':\n dplaces = [box for box in unit if digit in values[box]]\n if len(dplaces) == 1:\n assign_value(values, dplaces[0], digit)\n return values", "title": "" }, { "docid": "a0c77bda58dddf46b11482f561a75d24", "score": "0.5819334", "text": "def only_choice(values):\n for unit in unitlist:\n for digit in '123456789':\n nboxes = [box for box in unit if digit in values[box]]\n if len(nboxes) == 1:\n values = assign_value(values, nboxes[0], digit)\n return values", "title": "" }, { "docid": "30bd4386b2e47ab622612d4c955d8815", "score": "0.5790909", "text": "def test_single_choice_with_a_weight_of_100(self):\n choice = \"mychoice\"\n result = Choice([100], [choice]).next()\n\n self.assertEqual(result, choice)", "title": "" }, { "docid": "e9415a31619ddbc6711c55014fb3dc88", "score": "0.5788229", "text": "def test_choice(self):\n\t\tchoice=random.choice(self.liste)\n\t\tself.assertIn('choice',self.liste)", "title": "" }, { "docid": "70818a550a4801ebd5d82d2e9ad2a0b0", "score": "0.57772964", "text": "def only_choice(values):\n for unit in unitlist:\n for digit in range(1,10):\n dplaces= [ box for box in unit if str(digit) in values[box]]\n if len(dplaces)==1:\n #values[dplaces[0]]=str(digit)\n values=assign_value(values,dplaces[0],str(digit))\n return values", "title": "" }, { "docid": "e9d0d339023d56479af77437d6a20a86", "score": "0.576694", "text": "def check_unit(number):\r\n # check if the values is greater than one.\r\n if(number > 1):\r\n # return the unit with \"s\"\r\n return \"square meters\"\r\n else:\r\n # otherwise return unit without \"s\"\r\n return \"square meter\"", "title": "" }, { "docid": "c1e5bbb774db70b03a13b642aa15e0be", "score": "0.576416", "text": "def test_single_choice(self):\n lookup = build_choice_weights_lookup([('one', 'One')], max_weight=3)\n\n assert lookup['one'] == 3", "title": "" }, { "docid": "86a6b7640c13f6f76d6515184721c8aa", "score": "0.5763516", "text": "def only_choice(values):\n for unit in unitlist:\n for digit in '123456789':\n boxes=[]\n for box in unit:\n if digit in values[box]:\n boxes.append(box)\n if len(boxes)==1:\n assign_value(values,boxes[0], digit) \n return values", "title": "" }, { "docid": "b62e9349113d7dd0cfaa438cf78d4bed", "score": "0.5733472", "text": "def validate_coin_selection(selection):\n switcher = {\n 1: (True, \"Quarter\"),\n 2: (True, \"Dime\"),\n 3: (True, \"Nickel\"),\n 4: (True, \"Penny\"),\n 5: (True, \"Done\")\n }\n return switcher.get(selection, (False, None))", "title": "" }, { "docid": "1a535c1a42c81b690c3036dc937c1939", "score": "0.57244915", "text": "def only_choice(values):\n # TODO: Implement only choice strategy here\n for unit in unitlist:\n for digit in '123456789':\n matchedKeys = [box for box in unit if digit in values[box]]\n if len(matchedKeys) == 1:\n values[matchedKeys[0]] = digit\n\n return values", "title": "" }, { "docid": "c3f61c5891295a26692461a9b27470e9", "score": "0.5715259", "text": "def only_choice(values):\n for unit in puzzle.unitlist:\n candidates_count = {}\n # count occurence of all candidate values in all boxes in a particular unit\n for candidate in ''.join([values[box] for box in unit]):\n if candidate in candidates_count:\n candidates_count[candidate] += 1\n else:\n candidates_count[candidate] = 1\n for box in unit:\n for candidate in values[box]:\n # if a particular candidate value occurs only once in an unit, assign it to the box\n if candidates_count[candidate] == 1:\n assign_value(values, box, candidate)\n break\n return values", "title": "" }, { "docid": "29ee2e8fdd2708634b7ef21031ad3538", "score": "0.57063806", "text": "def test_no_choices(self):\n result = Choice([5], []).next()\n\n self.assertIsNone(result)", "title": "" }, { "docid": "afef459f73c14388337361af0864fcca", "score": "0.5688323", "text": "def getValidChoice(self):\n choice = input('')\n while True:\n if choice in ['0', '1']:\n return choice\n else: \n choice = input(\"'{}' is an invalid input. Please enter a '0' or a '1': \".format(choice))", "title": "" }, { "docid": "25ae97c38877f322760fabc41440f703", "score": "0.56880724", "text": "def _single_choice(self):\n chosen_license_plate = self._get_license_plate()\n \n rent_type = self._get_rent_type()\n factor = self._get_rent_factor(rent_type)\n \n rent_cost = self.system.rent(\n chosen_license_plate, self.rent_types_dict[rent_type], factor\n )\n\n return chosen_license_plate, rent_cost", "title": "" }, { "docid": "fbfe22b0cbec5bb6b5b3ddc6e8b7879e", "score": "0.56495225", "text": "def choose(self):\n raise NotImplemented", "title": "" }, { "docid": "f561b402597b0695e138f34f494567c4", "score": "0.56405264", "text": "def there_is_no_choice(self):\n if self.min >= len(self.options):\n return True\n if not self.options:\n return True\n return False", "title": "" }, { "docid": "8fe0a3c6c2f18c9288895cbadc188e61", "score": "0.56161743", "text": "def only_choice(values):\n for unit in unitlist:\n for digit in '123456789':\n dplaces = [box for box in unit if digit in values[box]]\n if len(dplaces) == 1:\n values[dplaces[0]] = digit\n\n return values", "title": "" }, { "docid": "cef182d1fe251690af61863e5debec79", "score": "0.5610618", "text": "def only_choice(values):\n for unit in unitlist:\n for digit in '123456789':\n dplaces = [box for box in unit if digit in values[box]]\n if len(dplaces) == 1:\n values[dplaces[0]] = digit\n return values", "title": "" }, { "docid": "20fb0f90cce777f657181ee60cdfd28f", "score": "0.55756027", "text": "def refresh_unit_selection(self):\n selection = self._lb_units.GetSelection()\n if selection != -1:\n unit_id, unit_symbol = self._base_unit_data[selection]\n\n if unit_symbol in self._unit_table:\n self._spn_power.SetValue(self._unit_table[unit_symbol])\n else:\n self._spn_power.SetValue(0)\n\n else:\n self._spn_power.SetValue(0)", "title": "" }, { "docid": "b782ed5f998249bb2bb4a93e4e6a5451", "score": "0.5540143", "text": "def only_choice(values):\n \n n_unsolved_boxes_before = len([box for box in values if len(values[box]) > 1])\n \n # loop through each unit and see if any possible solution is unique to the box\n for units in unitlist:\n for digit in '123456789':\n # For each digit, save the boxes that contain it as possible solution\n choices = [box for box in units if digit in values[box]]\n\n # If there is only one box in the space that contain the digit, assign it as solution\n if len(choices) == 1:\n values = assign_value(values, choices[0], digit)\n \n n_unsolved_boxes_after = len([box for box in values if len(values[box]) > 1])\n print('only_choice solved {0} boxes'.format(n_unsolved_boxes_before - n_unsolved_boxes_after))\n \n return values", "title": "" }, { "docid": "bb26677be43746178f9608170ec8e9bb", "score": "0.5538482", "text": "def test_single_choice(self):\n\n inq = add_inquiry(\"test inq\")\n q1 = add_quest(\"test?\", inq=inq)\n\n choice = add_choice(text=\"Are you even test?\", quest=q1)\n with self.client:\n response = self.client.get(f'choice/{choice.id}')\n data = json.loads(response.data.decode())\n self.assertEqual(response.status_code, 200)\n self.assertTrue('created_at' in data['data'])\n self.assertIn('Are you even test?', data['data']['text'])\n self.assertIn('success', data['status'])", "title": "" }, { "docid": "5012a7a6304c1776f0a5a4533e999bd0", "score": "0.55328214", "text": "def chk_units(self):\n print(\"chk_units :: \",)\n dct = self.var_dict\n for key in dct.keys():\n x = dct.get(key)\n if not is_number(x[-1]):\n print(\"WARNING: Units used for\", key, \"are not default, but\", x[-1])\n print(\"Done.\")", "title": "" }, { "docid": "95a53431bacc762eeb07e0d5e1488634", "score": "0.55263394", "text": "def chk_units(self):\n print(\"chk_units :: \")\n dct = self.var_dict\n for key in dct.keys():\n x = dct.get(key)\n if not is_number(x[-1]):\n print(\"WARNING: Units used for\", key, \"are not default, but\", x[-1])\n print(\"Done.\")", "title": "" }, { "docid": "b41c08d7b118d71e9ae818f65aa27e7c", "score": "0.55107486", "text": "def test_no_weights(self):\n result = Choice([], [\"a\"]).next()\n\n self.assertIsNone(result)", "title": "" }, { "docid": "0caa2af4ac2a4ed3c2de696a003eac4c", "score": "0.54773456", "text": "def only_choice(values):\n new_values = values.copy()\n for unit in diag_unitlist:\n for digit in '123456789':\n dplaces = [box for box in unit if digit in new_values[box]]\n if len(dplaces) == 1:\n new_values[dplaces[0]] = digit\n assign_value(new_values, dplaces[0], digit)\n return new_values", "title": "" }, { "docid": "6cde15ed5ae62ad5da2add80612670f0", "score": "0.5460747", "text": "def choose(multichoice, question, correct):\n counter = 1\n ncorrect = 0\n allowed = '12345'\n print(\"choose a synonym for \"+question)\n for option in multichoice.values():\n print(str(counter)+\")\"+option)\n if option == correct:\n ncorrect = counter\n counter = counter + 1\n res = raw_input(\">\")\n while (len(res) != 1 or res not in allowed):\n \tres = raw_input(\">\")\n #return res\n if int(res) == ncorrect:\n print(\"CORRECT!\")\n return 1\n else:\n print(\"\\n >>>>>> The answer is actually -- \" + correct)\n\tprint \n return 0", "title": "" }, { "docid": "6cde15ed5ae62ad5da2add80612670f0", "score": "0.5460747", "text": "def choose(multichoice, question, correct):\n counter = 1\n ncorrect = 0\n allowed = '12345'\n print(\"choose a synonym for \"+question)\n for option in multichoice.values():\n print(str(counter)+\")\"+option)\n if option == correct:\n ncorrect = counter\n counter = counter + 1\n res = raw_input(\">\")\n while (len(res) != 1 or res not in allowed):\n \tres = raw_input(\">\")\n #return res\n if int(res) == ncorrect:\n print(\"CORRECT!\")\n return 1\n else:\n print(\"\\n >>>>>> The answer is actually -- \" + correct)\n\tprint \n return 0", "title": "" }, { "docid": "8e821071940105ab11a3b7d794a3247f", "score": "0.54104745", "text": "def verify_unit(quantity, unit):\n if not isinstance(unit, UnitBase):\n unit = Unit(unit)\n\n q = quantity * u.one\n if unit.is_equivalent(q.unit):\n return q\n else:\n raise ValueError(\n \"Unit '{}' not equivalent to quantity '{}'.\".format(unit, quantity))", "title": "" }, { "docid": "931f15df5309513bd979fc8ff59a3e12", "score": "0.5391006", "text": "def sample_choice(self) -> Any:\n return random.choice(self.choices)", "title": "" }, { "docid": "7584631171d48081f16c8a5f1ffb6577", "score": "0.537412", "text": "def get_option(self):\n while True:\n choice = input(\"> \")\n for option in self.options:\n if choice == option[0]:\n return choice\n print(\"Sorry, you must choose a valid option\")", "title": "" }, { "docid": "06acfba8202580a40fcc2042ba87c118", "score": "0.5372937", "text": "def option_one():\n typing(\"You exit the room through the large wooden door...\\n\", 0.01)\n typing(\"You notice the cave goes in two different directions!\\n\", 0.01)\n # call two_choice function.\n two_choice_option(\n \"Do you go left or right? (left/right): \\n\",\n \"left\", \"right\", option_two, option_three)", "title": "" }, { "docid": "869f88a321094abdd422b9630f98ae7c", "score": "0.53381836", "text": "def upgrade_unit(type_of_improvement):\n\n if type_of_improvement == regeneration :\n\n # Check conditions\n if condition_upgrade() == True :\n\n # Upgrade regeneration rate of the hub\n info_player_1['hub']['regeneration_rate'] += 5\n\n # Payement\n info_player_1['hub']['energy'] -= 750\n\n else :\n print('upgrade of regeneration is not possible bc not enough ressources')\n\n elif type_of_improvement == storage :\n\n # Check conditions\n if condition_upgrade() == True :\n\n # Upgrade storage of tankers\n for tanker in info_player_1['tankers'] :\n\n info_player_1['tankers'][tanker]['capacity'] += 100\n \n # Payement\n info_player_1['hub']['energy'] -= 600\n\n else :\n print('upgrade of regeneration is not possible bc not enough ressources')\n\n\n elif type_of_improvement == range :\n\n # Check conditions\n if condition_upgrade() == True :\n\n # Upgrade shooting range of cruisers\n for cruiser in info_player_1['cruisers'] :\n\n info_player_1['cruisers'][cruiser]['shooting_range'] += 1 \n\n # Payement\n info_player_1['hub']['energy'] -= 400\n\n else :\n print('upgrade of regeneration is not possible bc not enough ressources')\n\n\n elif type_of_improvement == move :\n\n # Check conditions\n if condition_upgrade() == True :\n\n # Diminuates move price of cruisers\n for cruiser in info_player_1['cruisers'] :\n\n info_player_1['cruisers'][cruiser]['move_price'] -= 1 \n\n # Payement\n info_player_1['hub']['energy'] -= 500\n\n else :\n print('upgrade of regeneration is not possible bc not enough ressources')\n\n else :\n print('there must be an error when you typed your orders')\n\n return info_player_1", "title": "" }, { "docid": "dd07d97ad7d0435d45ac1c76dc94ec00", "score": "0.53185123", "text": "def arm_choice(self, statistics, context):\n raise NotImplementedError()", "title": "" }, { "docid": "897b191ebf2658ba7eb601a654457ace", "score": "0.52935493", "text": "def min_choice(self) -> Any:\n return self.choices[0]", "title": "" }, { "docid": "8f3bb3f0e724a6eaae73493571b2ec2a", "score": "0.5287061", "text": "async def choose(self, ctx, *choices: commands.clean_content):\n if len(choices) < 2:\n return await ctx.send('Not enough choices to pick from.')\n\n await ctx.send(rng.choice(choices))", "title": "" }, { "docid": "eead36d03e1f9c094f8902f684c589ae", "score": "0.5286079", "text": "def user_selection():\n error_code: int = 0\n choice_text = \"\"\n for x in choices:\n if x.lower() != \"off\" and x.lower() != \"report\":\n if choice_text == \"\":\n choice_text += x\n else:\n choice_text += \" / \" + x\n choice = input(f\"What would you like? ({choice_text}): \").lower()\n if choice in choices:\n return choices.index(choice)\n else:\n error_code = 1\n print_error(error_code)\n return \"\"", "title": "" }, { "docid": "6bcf7a363413f268e9a60e85cab2c713", "score": "0.5283423", "text": "def test_building_built_from_tech_has_no_cost(self):\n player = Player.objects.first()\n initial_um = player.resources.money\n tech = player.technologies.select_subclasses().get(class_index=1)\n tech.develop()\n self.assertEqual(Player.objects.first().resources.money, initial_um)", "title": "" }, { "docid": "5010fe10ad1f33fca09928e02056b718", "score": "0.52726614", "text": "def testIllegal(self):\n\n\t\tsg = self.obtain_device()\n\n\t\tsg.write('unit:power v')\n\n\t\ttry:\n\t\t\tsg.power = Quantity(9.001, 'V')\n\t\texcept ValueError:\n\t\t\tpass\n\t\telse:\n\t\t\tassert False, 'Expected ValueError'\n\n\t\ttry:\n\t\t\tsg.frequency = Quantity(0, 'Hz')\n\t\texcept ValueError:\n\t\t\tpass\n\t\telse:\n\t\t\tassert False, 'Expected ValueError'", "title": "" }, { "docid": "cd8dc92b3b1406193dc3a1172d1b31ff", "score": "0.52655613", "text": "def test_choice():\n assert isinstance(random.choice(Deck()), Card)", "title": "" }, { "docid": "478ac409579218c2ff4286346b88199a", "score": "0.5262635", "text": "def menu_choice():\r\n choice = input('Which operation would you like to perform? [l, a, i, d, s or x]: ').lower().strip()\r\n while choice not in ['l', 'a', 'i', 'd', 's', 'x']:\r\n print(\"'{} is not a valid operation\".format(choice))\r\n choice = input('Which operation would you like to perform? [l, a, i, d, s or x]: ').lower().strip()\r\n print() # Add extra line for layout\r\n return choice", "title": "" }, { "docid": "30d2db059b780982d6e2af435dda1094", "score": "0.5248767", "text": "def unitchanger(unit):\n if unit is None:\n return None\n lowerunit = unit.lower()\n if lowerunit in unitdict:\n return unitdict[lowerunit]\n return unit", "title": "" }, { "docid": "28f5b1424a76eecf353e8adda06b57d2", "score": "0.52473396", "text": "def check_for_cpu_player(self): \n if self.player_2.choice == None:\n self.generate_cpu_player(self.player_2)", "title": "" }, { "docid": "b7cc1dc5a5b2b0219d30dddeab9d6f3a", "score": "0.5245918", "text": "def question6():\n\n return NOT_POSSIBLE", "title": "" }, { "docid": "8dddbdaae42abcee03faaf417dc16846", "score": "0.5242147", "text": "def one_try():\n\n items = [0, 1, 2]\n random.shuffle(items)\n\n correctitem = random.choice(items)\n selection = random.choice(items)\n\n return selection == correctitem", "title": "" }, { "docid": "52bbfeb7d2fa7a5816100c6b50bbcbe8", "score": "0.523978", "text": "def select_general(method, x, y, error, success):\n if game.edit_state()[1].get_side() == game.get_cur_side():\n worked = method(game.edit_state()[1], x, y)\n if worked:\n if method == game.get_board().move_unit:\n game.next_turn()\n game.edit_state()[0] = \"select_unit\"\n game.edit_state()[1] = None\n game.edit_cur_abils().clear()\n Selections.error = success\n else:\n Selections.error = \"That \" + error + \" is invalid.\"\n else:\n Selections.error = \"It is \" + game.get_cur_side().get_name() + \" to move.\"", "title": "" }, { "docid": "fd2aaf4436a08157276455e9ef220f08", "score": "0.52372605", "text": "def test_no_choices(self):\n self.chatbot.storage.create(text='Random')\n\n statement = Statement(text='What is your quest?')\n response = self.adapter.process(statement)\n\n self.assertEqual(response.text, 'Random')\n self.assertEqual(response.confidence, 0)", "title": "" }, { "docid": "62af4cb994f01038cff17ece05523ef6", "score": "0.522309", "text": "def get_game_menu_option_from_strem(self):\n is_incorrect = True\n while is_incorrect:\n game_option_number = input('Select option [1-3]: ')\n if game_option_number.isdigit():\n game_option_number = int(game_option_number)\n if game_option_number in [1, 2, 3]:\n is_incorrect = False\n else:\n print('Option number must by from 1 to 3.')\n self.print_line()\n else:\n print('Option number must by an integer number.')\n self.print_line()\n self.print_line()\n return game_option_number", "title": "" }, { "docid": "b2c916a562c7d7948fab48a5e0bc7a85", "score": "0.5221634", "text": "def starter_level():\n choices=['easy','medium','hard']\n level_choice=raw_input('''Please select a level of difficulty \n (easy, medium or hard): ''')\n level_choice=level_choice.lower().strip(' ')\n while level_choice not in choices:\n level_choice=raw_input('''Sorry you have to input easy, \n medium or hard: ''')\n print \"You've chosen \"+level_choice\n if level_choice=='easy':\n level=0\n elif level_choice=='medium':\n level=1\n elif level_choice=='hard':\n level=2\n return level", "title": "" }, { "docid": "e55845335ce4954624c953927a9629dd", "score": "0.5221544", "text": "def is_busy():\n return random.choice(choices)", "title": "" }, { "docid": "e55845335ce4954624c953927a9629dd", "score": "0.5221544", "text": "def is_busy():\n return random.choice(choices)", "title": "" }, { "docid": "37ece3ada99d7819fd2c97bc86c4a4f5", "score": "0.5219427", "text": "def __checkMultiChoice( self, varName, choices ):\n initialVal = False\n if varName not in self.__manifest:\n return S_OK()\n else:\n varValue = self.__manifest[ varName ]\n initialVal = varValue\n choices = self.__getCSValue( \"Choices%s\" % varName , choices )\n for v in List.fromChar( varValue ):\n if v not in choices:\n return S_ERROR( \"%s is not a valid value for %s\" % ( v, varName ) )\n if initialVal != varValue:\n self.__manifest.setOption( varName, varValue )\n return S_OK( varValue )", "title": "" }, { "docid": "91a10b87bd12dd7a0467ea5de2d0ce7b", "score": "0.5216008", "text": "def testCombinedSpec(self):\n self.assertEqual(self.combo.rate,25*units['Bq/kg'])", "title": "" }, { "docid": "d6bf19c0df2787c170b2c2b60e14b7c0", "score": "0.5214284", "text": "def test_blank(self):\n\n unit = Unit()\n\n assert unit.desiredState == 'launched'\n assert unit.options == []", "title": "" }, { "docid": "80a816ffb2f42eeda39d4f4c1d260a5b", "score": "0.5209848", "text": "def check_ressources_ok(choice):\n if resources[\"water\"] <= MENU[choice][\"ingredients\"][\"water\"]:\n print(\"Sorry, there is not enought water\")\n return False\n elif resources[\"coffee\"] <= MENU[choice][\"ingredients\"][\"coffee\"]:\n print(\"Sorry, there is not enought coffee\")\n return False\n elif choice != \"espresso\":\n if resources[\"milk\"] <= MENU[choice][\"ingredients\"][\"milk\"]:\n print(\"Sorry, there is not enought milk\")\n return False\n else:\n return True\n else:\n return True", "title": "" }, { "docid": "40edb8bba004814fc18f3003daeb62c1", "score": "0.52086824", "text": "def get_setting(question, variants, error):\n while True:\n try:\n result = int(input(question))\n except ValueError:\n result = -1\n if variants + 1 > result > 0:\n return result\n else:\n print(error)", "title": "" }, { "docid": "0d0d5da4494fb77faa0bc3fa12c980ff", "score": "0.52070814", "text": "def time_draw(self, unit_type):\n #First draw a station\n station_num = np.random.choice(self.station_list, p=self.first_due_probs[unit_type])\n\n #Give time a default value of nan\n time = np.nan\n\n #Then draw how many units are currently required in area\n num_req = np.array(self.station_dict[station_num].num_required[unit_type].index)\n p = np.array(self.station_dict[station_num].num_required[unit_type])\n num = int(np.random.choice(num_req, p=p))\n\n #If the first due has a unit not responding to an incident, draw whether it is sent\n num_units = self.unit_counts[self.unit_counts['station'] == station_num][unit_type].values[0]\n if num <= num_units:\n p = self.station_dict[station_num].send_given_available[unit_type]\n sends_unit = np.random.binomial(1,p)\n\n #If it sends a unit, draw from relevant distribution\n if sends_unit == 1:\n time = np.random.choice(self.station_dict[station_num].sent_full[unit_type])\n #If a time has not yet been assigned, that means a unit wasn't sent\n if np.isnan(time):\n time = np.random.choice(self.station_dict[station_num].not_sent_full[unit_type])\n\n return time", "title": "" }, { "docid": "1dd0f8c1370323ba1c05fe233f4ed3af", "score": "0.52015364", "text": "def _set_in_units(self, units, no_act=False):\n # First check we have something to do\n current_units = self.get_units()\n if units is current_units:\n return\n # Next, check if the required units can be achieved by suitable choices for operand units\n done = False\n if units in current_units:\n # They can!\n if not no_act:\n self._cml_units = units\n for src_units_set, src_units in current_units._get_sources(units):\n expr = src_units_set.get_expression()\n self._set_element_in_units(expr, src_units, no_act)\n done = True\n if not done and not no_act:\n # Some operators need this to be a UnitsSet\n self._cml_units = UnitsSet([units], self)\n if not done:\n # The behaviour now depends on the operator\n op = self.operator()\n if hasattr(op, '_set_in_units') and callable(op._set_in_units):\n op._set_in_units(units, no_act)\n else:\n raise UnitsError(self, u' '.join([\n \"Don't know how to select units for operands of operator\",\n op.localName, \"when its units are\", units.description()]))", "title": "" }, { "docid": "a9e82961cc0062840103f820161f30ff", "score": "0.5197733", "text": "def choose_difficulty(self):\n print(\"Enter difficulty: (any/easy/medium/hard)\")\n self.difficulty = input(\">> \")\n if self.difficulty in [\"any\", \"easy\", \"medium\", \"hard\"]:\n print(\"Difficulty saved.\")\n else:\n print(\"Invalid difficulty.\")\n exit()", "title": "" }, { "docid": "0976690fa0f9b5ad49d73342fba10190", "score": "0.51873386", "text": "def choosing_categorie(self):\n cntnue = True\n while cntnue:\n z.allquestions[1].play_question()\n choice = input()\n if z.allquestions[1].check_if_choice_is_valable(choice):\n cntnue = False\n if int(choice) < len(z.allquestions[1].propositions):\n c.choosing_food(z.allquestions[1].propositions[int(choice) - 1])", "title": "" }, { "docid": "a8fedf2727e6ae74bef6c29371bb9425", "score": "0.5182728", "text": "def validator(current, pick, choices):\n while True:\n val = pick(choices)\n if not val == current:\n return val", "title": "" }, { "docid": "83b19eb1b5a9ce61c3f9afad000ead89", "score": "0.518005", "text": "def choose_unit(array):\n\n max_abs = np.max(np.abs(array))\n\n if 2e0 < max_abs <= 2e3:\n factor = 1.0\n unit = ''\n elif 2e-12 < max_abs <= 2e-9:\n factor = 1.0e12\n unit = 'p'\n elif 2e-9 < max_abs <= 2e-6:\n factor = 1.0e9\n unit = 'n'\n elif 2e-6 < max_abs <= 2e-3:\n factor = 1.0e6\n unit = r'\\mu'\n elif 2e-3 < max_abs <= 2e0:\n factor = 1.0e3\n unit = 'm'\n elif 2e3 < max_abs <= 2e6:\n factor = 1.0e-3\n unit = 'k'\n elif 2e6 < max_abs <= 2e9:\n factor = 1.0e-6\n unit = 'M'\n elif 2e9 < max_abs <= 2e12:\n factor = 1.0e-6\n unit = 'G'\n else:\n factor = 1.0\n unit = ' '\n\n return factor, unit", "title": "" }, { "docid": "0b55f526b4aa50545b0dfc2c8e1151d0", "score": "0.51766765", "text": "def _set_in_units(self, desired_units, no_act=False):\n app = self.xml_parent\n min_factor, best_factor = None, None\n least_units, best_units = None, None\n desired_factor = desired_units.expand().get_multiplicative_factor()\n DEBUG('validator', '>',self.localName,':',desired_factor,\n desired_units.description())\n units_set = app.get_units().get_consistent_set(desired_units)\n for possible_units in units_set:\n f = possible_units.expand().get_multiplicative_factor()\n if min_factor is None or f<min_factor:\n least_units, min_factor = possible_units, f\n if f >= desired_factor and (best_factor is None or f < best_factor):\n best_units, best_factor = possible_units, f\n if best_units is None:\n # All factors were less than that desired, so just go with the least\n units = least_units\n else:\n units = best_units\n DEBUG('validator', '\\t<-',\n units.expand().get_multiplicative_factor(),\n units.description())\n # Add units conversion code\n app._add_units_conversion(app, units, desired_units, no_act)\n # Set the operand units\n for src_units_set, src_units in app.get_units()._get_sources(units):\n expr = src_units_set.get_expression()\n DEBUG('validator', '#',self.localName,':',\n src_units.description(),expr.localName)\n self._set_element_in_units(expr, src_units, no_act)\n # Record which units we used\n if not no_act:\n app._cml_units = units\n return", "title": "" }, { "docid": "9fb12930a87e0a3e8efad1bf4f44ff57", "score": "0.5176574", "text": "def change_choice(choice, hint):\r\n return 3 - choice - hint", "title": "" }, { "docid": "e7298f4176c29e054a69fd1a1b7feb7a", "score": "0.51755935", "text": "def is_valid_unit(unit: str, unit_type: str) -> bool:\n if unit_type == LENGTH:\n units = LENGTH_UNITS\n elif unit_type == TEMPERATURE:\n units = TEMPERATURE_UNITS\n elif unit_type == MASS:\n units = MASS_UNITS\n elif unit_type == VOLUME:\n units = VOLUME_UNITS\n elif unit_type == PRESSURE:\n units = PRESSURE_UNITS\n else:\n return False\n\n return unit in units", "title": "" }, { "docid": "c0274db5320f882cb66d544b41c11c16", "score": "0.51711714", "text": "def can_buy_unit(self):\n if self.get_current_population() < self.population_limit:\n return True\n return False", "title": "" }, { "docid": "d8ae842d315a52cec623dc71ee18ebc3", "score": "0.5164196", "text": "def test_multiple_choices_with_weights_of_50(self):\n choices = [\"mychoice\", \"mychoice2\"]\n\n done = {}\n\n for _ in xrange(1000):\n result = Choice([50, 50], choices).next()\n\n self.assertIn(result, choices)\n\n done[result] = True\n\n if len(done) == 2:\n return\n\n print done\n raise Exception(\"Never hit 2 choices. This really shouldn't happen.\")", "title": "" }, { "docid": "0c7ff64c75cac4a570f3d4f36313d0fe", "score": "0.51630443", "text": "def validateChoice(self, choice):\n self.okbutton.setEnabled(not choice == \"\")", "title": "" }, { "docid": "c679ecd1e3d7d1cb706d2172fbc8c936", "score": "0.51578987", "text": "def any():\n all_answers = Answer.POSITIVE + Answer.NEGATIVE\n return choices(all_answers)[0]", "title": "" }, { "docid": "1016187718f07d4ff959355c87b47e1a", "score": "0.51530206", "text": "def test_type1(self):\n result = TEST.ask(question=\"Convert 12 inch to feet!\")\n self.assertEqual(result, 1)", "title": "" }, { "docid": "0023f23be7aa0b4a8b9300b2b105d942", "score": "0.51503915", "text": "def check_avail_menu():\n print('AVAILABILITY MENU:')\n print(\"----------------------------------------\")\n print(\"Choose one of the following options?\")\n print(\" i) Individual\")\n print(\" l) List\")\n print(\" m) Main menu\")\n print(\"----------------------------------------\")\n choice = input(\"Choice: \")\n print()\n if choice.lower() in ['i','l','m']:\n return choice.lower()\n else:\n print(choice +\"?\")\n print(\"Invalid option\")\n Time.sleep(3)\n return None", "title": "" }, { "docid": "3fb3cbc9e0ae93495085a926ebf91f08", "score": "0.51406455", "text": "def choose(self):\n prob = self.generator.num\n if prob < 0.833:\n return 'disk'\n else:\n return 'user'", "title": "" }, { "docid": "ee18cc03705c6cf3de1fa8740c114aca", "score": "0.513973", "text": "def Weighted_Choice(choices): #random selecting from a ictionary of {choice, weight} \r\n total = sum(w for c, w in choices.iteritems()) #get teh total value for all the choices\r\n \r\n if total != 1: #check that the total choice is 1\r\n msg = 'The total value for all the choices equals %.2f not one'%total\r\n print msg, logger.critical(msg)\r\n \r\n seed = random.uniform(0, total) #pick a seed between teh total of all choices and zero\r\n upto = 0 #start at zero\r\n for c, w in choices.iteritems(): #icnrement through each choice\r\n if upto + w >= seed: #pick this choice if the seed falls within the increment\r\n return c\r\n else:\r\n upto += w #add this weight to the increment\r\n assert False, \"Shouldn't get here\"", "title": "" }, { "docid": "7599c82bcbf9f11db6c32fde01a8ca5f", "score": "0.51381826", "text": "def test_more_than_one_turn(self):\n actual = self.controller.turn_validator(\"2t\")\n self.assertFalse(actual)", "title": "" }, { "docid": "24400f09c6e306c8e5249cf4679e9f24", "score": "0.513745", "text": "def any_of(self, *choices):\n assert self._random is not None\n return self._random.choice(choices)", "title": "" }, { "docid": "3ae1be6c99e1f285fc4d512b10cb4bc0", "score": "0.5126859", "text": "def substitu_input(self):\n print(Color.HEADER)\n choice_product = input(\"Selectionner l'id d'un produit : \")\n try:\n choice_product = int(choice_product)\n is_exist = self.substitu_exist(choice_product)\n if is_exist is True:\n self.substitu_item(choice_product)\n else:\n self.substitu_menu()\n except ValueError:\n self.substitu_menu()", "title": "" }, { "docid": "fa6c9992bfdcf1dddeb658333ae9bb79", "score": "0.51163626", "text": "def yes(specifier=0):\n if specifier:\n if isinstance(specifier, int):\n choice = random.randint(1, specifier)\n\n else:\n if specifier < 1.0:\n specifier *= 100.0\n\n specifier = int(specifier)\n x = random.randint(0, 100)\n choice = 1 if x <= specifier else 0\n\n else:\n choice = random.choice([0, 1])\n\n return choice", "title": "" }, { "docid": "52561cacd5282a16ca02623c593ca706", "score": "0.5113365", "text": "def getPolicy(self, state):\n \"*** YOUR CODE HERE ***\"\n best_value = self.getValue(state)\n best_actions = [action for action in self.getLegalActions(state) \\\n if self.getQValue(state, action) == best_value]\n \n if not len(best_actions): return None\n else: return random.choice(best_actions)", "title": "" }, { "docid": "452860b0bfe0f66b41b30dcec9c0acbe", "score": "0.5113203", "text": "def get_menu_option():\n while True:\n try:\n option = int(input('Sua opção: '))\n except ValueError:\n print('Valor inválido, tente novamente.')\n else:\n if not 1 <= option <= 3:\n print('Opção inválida. Utilize apenas 1, 2 ou 3.')\n else:\n return option", "title": "" }, { "docid": "24bfc26eb23ceedd4db60bc1a1dd9624", "score": "0.51127285", "text": "def aiturn():\r\n global aichoice\r\n ch = [\"S\", \"W\", \"G\"]\r\n aichoice = (random.choice(ch))\r\n return aichoice", "title": "" }, { "docid": "fbe677d614516bbba915392a8cea2082", "score": "0.51046425", "text": "def show_unit(value):\r\n if value in Stock.UNITS:\r\n return Stock.UNITS[value]\r\n else:\r\n return ''", "title": "" }, { "docid": "89f0b6d1883da21c952f6ef7d8d1bfb7", "score": "0.51026624", "text": "def soda_selection(inventory):\n validated_user_selection = (None, None)\n soda_options = get_unique_can_names(inventory)\n while validated_user_selection[0] is False:\n print(\"Please choose from the following options:\")\n i = 1\n for can in soda_options:\n # Bug: Missing f at the beginning of the quotation\n print(f\"\\n\\tEnter -{i}- for {can} : ${can.price}\")\n # Had i++ instead of i += 1, probably confused languages\n i += 1\n user_selection = try_parse_int(input(\"Selection:\"))\n validated_user_selection = validate_coin_choice(user_selection, soda_options)\n return validated_user_selection[1]", "title": "" }, { "docid": "bef983227c1073c38160702f7013d060", "score": "0.510131", "text": "def is_choice_proper(which: str) -> bool:\n if not which.isdigit():\n print(f\"{which} need to be number of record!\")\n return False\n which = int(which)\n if which < 1 or which > len(phones_list):\n print(f\"{which} need to be number of record!\")\n return False\n return True", "title": "" }, { "docid": "ac9d8dc119ff1d203a76af06834964dc", "score": "0.50999993", "text": "def chooseAction(self, gameState):\n # Pick Action\n legalActions = gameState.getLegalActions(self.index)\n action = None\n \"*** YOUR CODE HERE ***\"\n if(len(legalActions)<1):\n return None\n #Flip a coin with probability epsilon, do some random action if it comes up true\n if(util.flipCoin(self.explorationRate)):\n #print('random')\n return random.choice(legalActions)\n #else return the action with the highest q value\n choiceaction =self.computeActionFromQValues(gameState)\n return choiceaction", "title": "" }, { "docid": "aa0acf81e4e29128123e17aa1d4edfff", "score": "0.50988185", "text": "def select_subtool():\n\tprint(\"\\n Choose a tool:\")\n\tprint(\" 1 - fanqie rhyme builder for English words\")\n\tprint(\" 2 - fanqie finder for Chinese characters\")\n\tprint(\" 3 - fanqie reverser for English\")\n\tprint(\" q - quit\")\n\tselected = input(\"\\n 1, 2, 3 or q? \")\n\tif not re.match(kw_variants['#'], selected) and selected not in kw_variants['quit']:\n\t\tprint(\"\\n I did not recognize your input.\")\n\t\treturn select_subtool()\n\treturn selected", "title": "" }, { "docid": "6ab7b93fbebb3df84d023cfc50c32724", "score": "0.50953925", "text": "def forward_choice(self, x, choice: str):", "title": "" }, { "docid": "247ce9206f41de6630f27f4739277d27", "score": "0.50878495", "text": "def test_chip_flavour_twice(self):\r\n self.choice = \"c\"\r\n with patch('builtins.input', side_effect=[\"d\", \"c\", \"q\"]):\r\n self.assertIn(validate_flavour(self.choice), Data().chips)", "title": "" }, { "docid": "ff8d28aaed4c3381843d3a08d912d5c7", "score": "0.5084694", "text": "def test_get_legal_hold(self):\n pass", "title": "" } ]
6955cbd8ff7a599105390c16094f3506
Sets the reference of this Body25.
[ { "docid": "fbac8f63485bd82c00ccf9b0610ddedc", "score": "0.66401124", "text": "def reference(self, reference: str):\n\n self._reference = reference", "title": "" } ]
[ { "docid": "94e7ab98a43027ed7c456278e641c382", "score": "0.73035944", "text": "def setreference(self, ref):\n self._reference = ref", "title": "" }, { "docid": "cbdf1e730b08f4160e8b55c02f955762", "score": "0.6978299", "text": "def set_reference(self, reference):\n\n self.reference = reference", "title": "" }, { "docid": "2ef976c582721fbef0960613eb72fb51", "score": "0.6958892", "text": "def reference(self, reference):\n\n self._reference = reference", "title": "" }, { "docid": "2ef976c582721fbef0960613eb72fb51", "score": "0.6958892", "text": "def reference(self, reference):\n\n self._reference = reference", "title": "" }, { "docid": "af711e49b34e21704216f07a36546510", "score": "0.68402326", "text": "def ref(self, ref):\n\n self._ref = ref", "title": "" }, { "docid": "af711e49b34e21704216f07a36546510", "score": "0.68402326", "text": "def ref(self, ref):\n\n self._ref = ref", "title": "" }, { "docid": "af711e49b34e21704216f07a36546510", "score": "0.68402326", "text": "def ref(self, ref):\n\n self._ref = ref", "title": "" }, { "docid": "ac589562971143c59e4cd887a9b82f6b", "score": "0.66585565", "text": "def set_reference(self, reference):\n self._reference = reference\n return self", "title": "" }, { "docid": "c76d7f0106a5e74e86e4f699574fb975", "score": "0.6651977", "text": "def reference(self, value):\n self.set_reference(value)", "title": "" }, { "docid": "1de780dd0bc02bb2e031eeae58560bf3", "score": "0.65498924", "text": "def set_reference(self, reference):\n self.mesh.reference = reference\n self.fix_density(self.mesh)\n return self", "title": "" }, { "docid": "64dbe82da23fd19cb3e8a05b857f0bd8", "score": "0.6479356", "text": "def set_ref(self, ref):\n self.ref_old = self.ref\n self.ref = ref", "title": "" }, { "docid": "020230e5e75dfc194d8686b601fed43d", "score": "0.64364845", "text": "def set_reference(self, reference):\n super().set_reference(reference)\n self.set_coordinate_system(reference.coordinate_system)", "title": "" }, { "docid": "5ac3d78046d71ddc339dec786670f482", "score": "0.63817173", "text": "def SetReferenceObject(self, refObj: BaseObject) -> None:\n ...", "title": "" }, { "docid": "fd3eb1a27ef709f4fb4505d1e76c6def", "score": "0.6364741", "text": "def set_stencil_reference(self, reference):\n raise NotImplementedError()", "title": "" }, { "docid": "3c31b732e77047a85abdc4abf93d0b9b", "score": "0.6233581", "text": "def tbref(self, tbref):\n\n self._tbref = tbref", "title": "" }, { "docid": "1d2f4bbe6e4ff6528ad229f3867cecdc", "score": "0.6096397", "text": "def setT(self, value):\n self.tRef = value", "title": "" }, { "docid": "90bc05d42730663a2d33c3bf926eade8", "score": "0.6076433", "text": "def set_reference(self):\n print(\"set ref\")\n self.xrefLabel.setText(str(self.xLabel.text()))\n self.yrefLabel.setText(str(self.yLabel.text()))\n self.zrefLabel.setText(str(self.zLabel.text()))", "title": "" }, { "docid": "c4868ec17153e76bc89a1edb059810c5", "score": "0.599132", "text": "def _set_reference(self):\n if len(self.sources) > 0:\n self.brightest.is_reference = True", "title": "" }, { "docid": "51cde50e225eae8d6889df7c1f0c9ce0", "score": "0.5793951", "text": "def set_references(self, references: IReferences):\n self._references = references", "title": "" }, { "docid": "4b0b491ea11871b53226c4bdb00907e4", "score": "0.5750434", "text": "def set_ref_node(self, ref: int = 0) -> None:\n self.ref_node = self._check_ref_node(ref)", "title": "" }, { "docid": "71014350db992de2ab4cb4abe7ca475c", "score": "0.574088", "text": "def references(self, references):\n\n self._references = references", "title": "" }, { "docid": "e5acdefee431563cb271db3a425a2848", "score": "0.573403", "text": "def reference_id(self, reference_id):\n\n self._reference_id = reference_id", "title": "" }, { "docid": "4273b6cae20d57a2bd7e4a430426e145", "score": "0.5723998", "text": "def _get_reference(self):\n super()._get_reference()\n\n # Additional object references from this env\n self.hole_body_id = self.sim.model.body_name2id(\"hole\")\n self.cyl_body_id = self.sim.model.body_name2id(\"cylinder\")", "title": "" }, { "docid": "63b3cbcb20f6f0a32a02edd8b1cb8891", "score": "0.57216007", "text": "def set_reference_point(self, reference_point=None):\n\n if reference_point is None:\n if (self.sicd.RadarCollection.Area is None or self.sicd.RadarCollection.Area.Plane is None):\n reference_point = self.sicd.GeoData.SCP.ECF.get_array()\n else:\n reference_point = self.sicd.RadarCollection.Area.Plane.RefPt.ECF.get_array()\n\n if not (isinstance(reference_point, numpy.ndarray) and reference_point.ndim == 1\n and reference_point.size == 3):\n raise ValueError('reference_point must be a vector of size 3.')\n self._reference_point = reference_point\n # set the reference hae\n ref_llh = ecf_to_geodetic(reference_point)\n self._reference_hae = ref_llh[2]", "title": "" }, { "docid": "f9013c65a62adfae002b3c14143f306a", "score": "0.5689195", "text": "def set_refl( self, refl):\n self.refl = refl", "title": "" }, { "docid": "bff487162357ed8d97c5a7f63bcc502b", "score": "0.56687725", "text": "def reference(self):\n\n raise NotImplementedError(\"reference() is not defined!\")", "title": "" }, { "docid": "b33b9718f4851bbb54b1e61beda7a19a", "score": "0.5639341", "text": "def grid_reference(self, grid_reference):\n\n self._grid_reference = grid_reference", "title": "" }, { "docid": "5dc75ff31b982a2d75c8cda0dd7a09a0", "score": "0.5629901", "text": "def reference(self, reference):\n if (self.local_vars_configuration.client_side_validation and\n reference is not None and len(reference) > 255):\n raise ValueError(\"Invalid value for `reference`, length must be less than or equal to `255`\") # noqa: E501\n\n self._reference = reference", "title": "" }, { "docid": "b7de3ac834ebb718a93077f9626dbc68", "score": "0.56265634", "text": "def setReference(self, origin: tuple) -> None:\n\t\tself.body = [(origin[0] - self.head[0] + segment[0], origin[1] - self.head[1] + segment[1]) for segment in self.body]\n\t\tself.head = origin", "title": "" }, { "docid": "d4fcdd7b74bd20b3e6c7db47ae7e32bb", "score": "0.5592067", "text": "def reference_code(self, reference_code):\n\n self._reference_code = reference_code", "title": "" }, { "docid": "9b2de9073e389735edf67cd05705b3aa", "score": "0.55788696", "text": "def refseq(self, refseq):\n\n self._refseq = refseq", "title": "" }, { "docid": "926e55e5a9a53d013725b01086c1ca34", "score": "0.55605197", "text": "def reference(self, reference: str):\n if reference is None:\n raise ValueError(\"Invalid value for `reference`, must not be `None`\") # noqa: E501\n\n self._reference = reference", "title": "" }, { "docid": "926e55e5a9a53d013725b01086c1ca34", "score": "0.55605197", "text": "def reference(self, reference: str):\n if reference is None:\n raise ValueError(\"Invalid value for `reference`, must not be `None`\") # noqa: E501\n\n self._reference = reference", "title": "" }, { "docid": "4efe8d9d093e2efc4c52d0911e7feb53", "score": "0.55602807", "text": "def set_refh(self, refh):\n self.refh = refh", "title": "" }, { "docid": "ca8c95feff88979596027452214fdd70", "score": "0.5557447", "text": "def ref_id(self, ref_id):\n\n self._ref_id = ref_id", "title": "" }, { "docid": "41b000804b6a0b75d42acfbfa20fe964", "score": "0.5542829", "text": "def reference(self, name):\n pass", "title": "" }, { "docid": "ac281b2401a8625c41ae9f4ce9635722", "score": "0.5518589", "text": "def __set__(self, instance, value):\n\n self.set(value)", "title": "" }, { "docid": "70a784d10cb4479d4ed538d2ea2bc06d", "score": "0.5503777", "text": "def reference(self, reference):\n if self.local_vars_configuration.client_side_validation and reference is None: # noqa: E501\n raise ValueError(\"Invalid value for `reference`, must not be `None`\") # noqa: E501\n\n self._reference = reference", "title": "" }, { "docid": "96fedb5255a598214450927ff0ab7a02", "score": "0.5487575", "text": "def RigidBody(\n self,\n referenceNode: str,\n position: str = INPUT,\n isothermal: Boolean = OFF,\n elset: str = \"\",\n pinNodes: str = \"\",\n tieNodes: str = \"\",\n analyticSurface: str = \"\",\n ):\n pass", "title": "" }, { "docid": "2d52ddcfc85db243114ea725baa6c723", "score": "0.5481416", "text": "def bounce(self, bounce):\n\n self._bounce = bounce", "title": "" }, { "docid": "361e129911f2375244b00a4657e51224", "score": "0.54599863", "text": "def setBrain(self, brain):\n\t\tself.brain = brain\n\t\tself.brain.agent = self", "title": "" }, { "docid": "23093639264a59ee3ee5a2dbc29e1e19", "score": "0.5453582", "text": "def class_ref(self, class_ref):\n\n self._class_ref = class_ref", "title": "" }, { "docid": "5205a40de4f9f98b9890c5f842058863", "score": "0.5449569", "text": "def set(self, value: ArrayOrTensor):\n self._internal_rewards.assign(value)", "title": "" }, { "docid": "82f7982a173d4e47c8c2548d64259e79", "score": "0.54483145", "text": "def set(self, reference_coords, coords): # -> None:\n ...", "title": "" }, { "docid": "7fbc5b3819615707f3ffd22343705111", "score": "0.5443945", "text": "def object(self,o):\n self.set('reference.target',o)\n if o is not None:\n self.description(\"A reference to: \"+o.getDescription())", "title": "" }, { "docid": "c9ddc270b0bd57e9eaf180310332005a", "score": "0.5431804", "text": "def set_ref(self, refp, refn):\n\n with AutoUpdater._lock:\n AutoUpdater.remove_link(self.ref)\n AutoUpdater.add_link(\n refp,\n self.ref[0])\n AutoUpdater.add_link(\n refn,\n self.ref[1])", "title": "" }, { "docid": "caf39c9c87999700695b24dfdfe23749", "score": "0.5428546", "text": "def set_ref(self, refp, refn):\n with AutoUpdater._lock:\n AutoUpdater.remove_link(self.ref)\n AutoUpdater.add_link(\n refp,\n self.ref[0])\n AutoUpdater.add_link(\n refn,\n self.ref[1])", "title": "" }, { "docid": "e05059c1807466d2d5c837b72745081c", "score": "0.54264915", "text": "def ref_name(self, ref_name):\n\n self._ref_name = ref_name", "title": "" }, { "docid": "07e448ae99bff7548455be7e96366ca7", "score": "0.54217213", "text": "def table_ref(self, table_ref):\n self._table_ref = table_ref", "title": "" }, { "docid": "0825aba64cf9dd6b02e151fbd84b7ab3", "score": "0.5413178", "text": "def view_ref(self, view_ref):\n\n self._view_ref = view_ref", "title": "" }, { "docid": "c9b7fa44b3b8cb21a560e728450f41d4", "score": "0.54067", "text": "def set_reference(self, x_sp):\n self.x_sp = x_sp", "title": "" }, { "docid": "9529c7b42aa0109f26e9098e04f74c86", "score": "0.53986084", "text": "def secret_ref(self, secret_ref):\n\n self._secret_ref = secret_ref", "title": "" }, { "docid": "035fab22d7d9cf13f728dedba48b9d3e", "score": "0.53938466", "text": "def update_value_reference(self, value, reference):\n if value is not None and (self.value != value or self.reference):\n self.value = value\n self.reference = None\n self.selection = []\n elif reference:\n self.reference = reference\n self.value = None\n self._is_reference = True", "title": "" }, { "docid": "6de6da1f5ff96f2cf40cbd6c8b526cc5", "score": "0.5383047", "text": "def setValues(\n self,\n bodyRegion: str = None,\n tieRegion: str = None,\n pinRegion: str = None,\n surfaceRegion: str = None,\n refPointAtCOM: Boolean = OFF,\n isothermal: Boolean = OFF,\n ):\n pass", "title": "" }, { "docid": "9a75e640f7cd1a6c7aabdba933a28966", "score": "0.5359877", "text": "def __set__(self, instance, value):\n self.__set_assigned__(instance, value)", "title": "" }, { "docid": "2b86d4f1e1185fd981c31bd7de42ce81", "score": "0.5355697", "text": "def set(self, value):\n\n return ref.Put(uri(self), None, value)", "title": "" }, { "docid": "761a5013904b28428db9622ed350b227", "score": "0.5345764", "text": "def tower_face_b(self, tower_face_b):\n\n self._tower_face_b = tower_face_b", "title": "" }, { "docid": "aa5b2712354fa55d9e341308b9404932", "score": "0.53414804", "text": "def set_mod(self, value):\n self._ref.value = self._pytype(value)", "title": "" }, { "docid": "ffe52885801ab8a7f30d5c7536b25780", "score": "0.53400224", "text": "def set_light_reference(self):\n if not self.abstr.connected:\n self.connect_to_device()\n if not self.abstr.connected:\n return\n cont_play = False\n if self.active_threads:\n msg = \"You'll need to exit Continuous Measurement mode to take a \" \\\n \"\\nnew light reference. \"\n self.prsnt.confirmation_message(\"Light Reference\", msg)\n return\n device = self.active_device\n if not device:\n return\n msg = \"Illuminate the sensor head with reference lamp, then press \\n\" \\\n \"'OK' to save the light reference\"\n proceed = self.prsnt.ok_cancel(\"Light Reference\", msg)\n if not proceed:\n return\n busy = self.prsnt.busy(\"Taking Light Reference Scan\")\n try:\n data = device.get_pixel_data()\n temp = device.get_internal_temp()\n temperature_compensation = AT * temp ** 3 + \\\n BT * temp ** 2 + CT * temp\n device.light_reference = [i - 1500 - temperature_compensation for i in data]\n except DeviceCommunicationError, data:\n busy = None\n del(busy)\n evt = event_error(title=\"Connection Error\", msg=data.message)\n PostEvent(self.prsnt.frame, evt)\n return\n busy = None\n del(busy)\n self.prsnt.confirmation_message(\"Light Reference Saved\",\n \"Success!\")", "title": "" }, { "docid": "d261df8c5af18bd4db0392a8fb1af520", "score": "0.53271884", "text": "def body(self, value):\r\n self.logger.warn(\"Setting values on body will NOT update the remote Canvas instance.\")\r\n self._body = value", "title": "" }, { "docid": "d261df8c5af18bd4db0392a8fb1af520", "score": "0.53271884", "text": "def body(self, value):\r\n self.logger.warn(\"Setting values on body will NOT update the remote Canvas instance.\")\r\n self._body = value", "title": "" }, { "docid": "c0ee8e76832486083ce4546ee0b163fa", "score": "0.5320214", "text": "def body(self, value):\n self.logger.warn(\n \"Setting values on body will NOT update the remote Canvas instance.\"\n )\n self._body = value", "title": "" }, { "docid": "f429e556d71fcc524f57b4233ecb2d6c", "score": "0.53182083", "text": "def refgeo(self, refgeo):\n\n self._refgeo = refgeo", "title": "" }, { "docid": "379db8e1e01bf22afbdd6a4f4c7bc676", "score": "0.5317862", "text": "def external_ref(self, external_ref):\n\n self._external_ref = external_ref", "title": "" }, { "docid": "13cc0420239affa8a812360ff3dc1438", "score": "0.5314341", "text": "def setFrame(self, frame):\r\n self.link.check_connection()\r\n if isinstance(frame,Item):\r\n command = 'S_Frame_ptr'\r\n self.link.send_line(command)\r\n self.link.send_item(frame)\r\n else:\r\n command = 'S_Frame'\r\n self.link.send_line(command)\r\n self.link.send_pose(frame)\r\n self.link.send_item(self)\r\n self.link.check_status()", "title": "" }, { "docid": "71a468cec1718cdf30b91811bf413c4c", "score": "0.53062135", "text": "def set_point_reference(self):\n self._envoi(\"R\")", "title": "" }, { "docid": "f6386caacec467d6855d30d6f30548a0", "score": "0.5304835", "text": "def set(self, value):\n pass", "title": "" }, { "docid": "f6386caacec467d6855d30d6f30548a0", "score": "0.5304835", "text": "def set(self, value):\n pass", "title": "" }, { "docid": "7cdcbbcbd3dfa38468d9ab69f269c738", "score": "0.53014565", "text": "def setBody(self, body):\r\n self.body = body", "title": "" }, { "docid": "9884914106bd74a3295311279cbd96fe", "score": "0.53008896", "text": "def references(self, references: List[OmimReferences]):\n\n self._references = references", "title": "" }, { "docid": "6b2d28f94414785a6fc5a40b8b4db7fb", "score": "0.5299543", "text": "def enable_reference(self, reference=1):\n reference = self._interpret_reference(reference)\n self.write(\":{0}:ENAB 1\".format(reference))", "title": "" }, { "docid": "087e0c26549c77a330ba37ed4df358f3", "score": "0.52990216", "text": "def set_references(self, references):\n self._references = references\n self._logger.set_references(references)\n self._counters.set_references(references)\n self._dependency_resolver.set_references(references)\n self._endpoint = self._dependency_resolver.get_one_optional('endpoint')\n\n if self._endpoint is None:\n self._endpoint = self.create_endpoint()\n self._local_endpoint = True\n else:\n self._local_endpoint = False\n\n self._endpoint.register(self)", "title": "" }, { "docid": "9862bfd1ad5c3a6cb7ca23921ab8e848", "score": "0.5293022", "text": "def Set(self, position, angle):\r\n return _Box2D.b2Transform_Set(self, position, angle)", "title": "" }, { "docid": "816b3b4781da97712a2ee1c474a2e48f", "score": "0.52772343", "text": "def setvel(self):", "title": "" }, { "docid": "524af1b5639a0cd869de0bd78ac571f3", "score": "0.5276718", "text": "def reference_frame(self, new_reference_frame):\n if not isinstance(new_reference_frame, ReferenceFrame):\n raise TypeError('''reference_frame should be a valid\n ReferenceFrame object.''')\n else:\n self._reference_frame = new_reference_frame", "title": "" }, { "docid": "333f092f8f925a5b2ca58db2ed62b294", "score": "0.5268586", "text": "def set_reference_biomass(name, comp):\r\n biomass.TPY = 300, 101325, comp\r\n return polimi.set_reference_coal(\r\n name,\r\n atoms={el: biomass.elemental_mass_fraction(el)\r\n for el in ('C', 'H', 'O')})", "title": "" }, { "docid": "052921feddf08b4465a5910c62439853", "score": "0.5252094", "text": "def setLaser(self, le):\n self.le = le", "title": "" }, { "docid": "9e9ca2d6f8be9e01a111429d67aee95d", "score": "0.5249896", "text": "def __SetTransform(self, position, angle):\r\n return _Box2D.b2Body___SetTransform(self, position, angle)", "title": "" }, { "docid": "4fabccc3de107aaa290d5765fb9d0ed1", "score": "0.5244177", "text": "def attachment(self, value):\r\n self.logger.warn(\"Setting values on attachment will NOT update the remote Canvas instance.\")\r\n self._attachment = value", "title": "" }, { "docid": "b33c85419ec2a8170cb69a96bf728763", "score": "0.52434146", "text": "def set(self, value):\n _set_or_connect_a_to_b(self, value)", "title": "" }, { "docid": "8bbf00a3f063f610ceb1b267c4a1f03e", "score": "0.5241623", "text": "def __SetType(self, type):\r\n return _Box2D.b2Body___SetType(self, type)", "title": "" }, { "docid": "98a0e64a4f1d5dab418f2039a70fbb89", "score": "0.5214857", "text": "def ref_tf(self, tf_ref):\n self._tf_ref = QuatTrans(tf_ref)", "title": "" }, { "docid": "8a589988e8aef052f7f9fe04fb8f78be", "score": "0.520979", "text": "def attachment(self, value):\n self.logger.warn(\n \"Setting values on attachment will NOT update the remote Canvas instance.\"\n )\n self._attachment = value", "title": "" }, { "docid": "4b6d082ff7df8e54e6a39145d3518aff", "score": "0.52088666", "text": "def set_reference(self, structure):\n self.refcoord = self.get_guide_coord_from_structure(structure)\n\n if len(self.refcoord) < self.window_size * 2:\n n_atoms = len(self.refcoord)\n msg = (\n f\"Too few atoms in the reference structure ({n_atoms}). \"\n \"Try reducing the window_size parameter.\"\n )\n raise PDBException(msg)", "title": "" }, { "docid": "cfd1eb52bd5e47ecee8c3d6600fd86dd", "score": "0.5206038", "text": "def reference(self):\n return super().reference", "title": "" }, { "docid": "ead6846b436d7733abb1e4c6216fdd3b", "score": "0.52057415", "text": "def studyref(self, studyref):\n\n self._studyref = studyref", "title": "" }, { "docid": "aaed12b96a60798bce289da030287d69", "score": "0.5205127", "text": "def glref(self, glref):\n\n self._glref = glref", "title": "" }, { "docid": "f3b373cec2faed4d67f05e42508707e8", "score": "0.52014875", "text": "def set_ref(self, ref, attr=None):\n ref_attr = '%s_refs' % ref.type.replace('-', '_')\n ref = {\n 'to': ref.fq_name,\n 'uuid': ref.uuid,\n }\n if ref_attr in self:\n self[ref_attr].append(ref)\n else:\n self[ref_attr] = [ref]\n return self", "title": "" }, { "docid": "815ae913773c4469415aeec0c8c00894", "score": "0.5198433", "text": "def set(self):\n pass", "title": "" }, { "docid": "de3d0e1dc86a2e862def00fd629f48db", "score": "0.51972324", "text": "def set_value(self, reference_path, value):\n\n if (reference_path.start_protocol is not None and\n reference_path.start_protocol != self.id):\n\n raise ValueError('The reference path does not target this protocol.')\n\n if reference_path.property_name is None or reference_path.property_name == '':\n raise ValueError('The reference path does specify a property to set.')\n\n if reference_path in self.provided_outputs:\n raise ValueError('Output values cannot be set by this method.')\n\n set_nested_attribute(self, reference_path.property_name, value)", "title": "" }, { "docid": "de3d0e1dc86a2e862def00fd629f48db", "score": "0.51972324", "text": "def set_value(self, reference_path, value):\n\n if (reference_path.start_protocol is not None and\n reference_path.start_protocol != self.id):\n\n raise ValueError('The reference path does not target this protocol.')\n\n if reference_path.property_name is None or reference_path.property_name == '':\n raise ValueError('The reference path does specify a property to set.')\n\n if reference_path in self.provided_outputs:\n raise ValueError('Output values cannot be set by this method.')\n\n set_nested_attribute(self, reference_path.property_name, value)", "title": "" }, { "docid": "6891606ec33764582430ac598c2a2567", "score": "0.5195247", "text": "def set(self, *args):\n val = _libsbol.AttachmentProperty_set(self, *args)\n\n try:\n sbol_obj.thisown = False\n except NameError:\n try:\n if not type(args[0]) == str:\n args[0].thisown = False\n except NameError:\n pass\n\n\n return val", "title": "" }, { "docid": "957579448582a451b4993f8f1b77f404", "score": "0.5190835", "text": "def set_reference_offset(self, offset, reference=1):\n reference = self._interpret_reference(reference)\n self.write(\":{0}:VOFF {1}\".format(reference, offset))", "title": "" }, { "docid": "c473cf4a38e7302d2064d2968dc24728", "score": "0.5189796", "text": "def setPosition(self, position):\n self.v = position\n for face in self.references:\n face.calculateTextureVertices()\n face.verticesChanged()", "title": "" }, { "docid": "0779cf7070c5b98d787aeaf4912b7430", "score": "0.5186851", "text": "def _target_setter(self, new_target):\n self._target = new_target", "title": "" }, { "docid": "274aaacdda6ad8c28acf948a1707c53f", "score": "0.5185953", "text": "def set_target(self, target):\n\n\n\t\tself.target = target", "title": "" }, { "docid": "b4b66453d645458a67163120823a9d93", "score": "0.51780933", "text": "def set(self, *args):\n val = _libsbol.ReferencedObject_set(self, *args)\n\n try:\n sbol_obj.thisown = False\n except NameError:\n try:\n if not type(args[0]) == str:\n args[0].thisown = False\n except NameError:\n pass\n\n\n return val", "title": "" }, { "docid": "e02558b976a5919a6eee3551f1a2465d", "score": "0.5166757", "text": "def b(self, b):\n\n self._b = b", "title": "" }, { "docid": "fea56731320026be0a517731b5a5db24", "score": "0.51641244", "text": "def _self(self, _self: RecambiosLinksParent):\n\n self.__self = _self", "title": "" }, { "docid": "4ee3cadcd164809e33c625f835a7117e", "score": "0.5161735", "text": "def entity(self, entity):\n\n self._entity = entity", "title": "" } ]
062e53385be12da3262258379b8d138a
Helper method to keep our tests DRY
[ { "docid": "c1fefd5f8cbadcc4e37eec35121f22ba", "score": "0.0", "text": "def given_a_series_of_prices(self, prices):\n timestamps = [datetime(2015, 5, 28), datetime(2015, 5, 29),\n datetime(2015, 5, 30)]\n for timestamp, price in zip(timestamps, prices):\n self.goog.update(timestamp, price)", "title": "" } ]
[ { "docid": "f9c82fcce3672cf836716181212cb178", "score": "0.7571753", "text": "def _test(self):", "title": "" }, { "docid": "f9c82fcce3672cf836716181212cb178", "score": "0.7571753", "text": "def _test(self):", "title": "" }, { "docid": "f9c82fcce3672cf836716181212cb178", "score": "0.7571753", "text": "def _test(self):", "title": "" }, { "docid": "f9c82fcce3672cf836716181212cb178", "score": "0.7571753", "text": "def _test(self):", "title": "" }, { "docid": "f9c82fcce3672cf836716181212cb178", "score": "0.7571753", "text": "def _test(self):", "title": "" }, { "docid": "89929534a4962c10a690a537d8396063", "score": "0.7355026", "text": "def unitary_test():", "title": "" }, { "docid": "51262b0e50f19bfeabbd206ecbc400f8", "score": "0.7285585", "text": "def _test(self):\n pass", "title": "" }, { "docid": "51262b0e50f19bfeabbd206ecbc400f8", "score": "0.7285585", "text": "def _test(self):\n pass", "title": "" }, { "docid": "51262b0e50f19bfeabbd206ecbc400f8", "score": "0.7285585", "text": "def _test(self):\n pass", "title": "" }, { "docid": "179e1fb3c1ce4fe978463bb8382a0fe0", "score": "0.7222582", "text": "def test_4_4_1_1(self):\n pass", "title": "" }, { "docid": "93d634925ca4cd42ab67364dab3dcb40", "score": "0.7149709", "text": "def tests():", "title": "" }, { "docid": "343d0cce8356ac3e92c0de489f4efcd1", "score": "0.70468533", "text": "def test(self):\n pass", "title": "" }, { "docid": "72067ca59af5e18113e3033c3b13c9a6", "score": "0.70047855", "text": "def testApi(self):", "title": "" }, { "docid": "a04d6fd50caa15c6a78a21a546013996", "score": "0.6999811", "text": "def test_alien_data(self):", "title": "" }, { "docid": "0447207dd4d65759fd7650c47ffbff4c", "score": "0.69940233", "text": "def test_get(self):\n pass", "title": "" }, { "docid": "437c97d442c7b0856d38747992b2c2cc", "score": "0.6982173", "text": "def test_required_methods(self):", "title": "" }, { "docid": "23bc033e1545fc18a03fcc60c92193ca", "score": "0.69452095", "text": "def test_basic_execution(self):", "title": "" }, { "docid": "64aeca6de00fe557e92e1fd76c4592d4", "score": "0.6942366", "text": "def test_apply_endorsements(self):", "title": "" }, { "docid": "8b1ef8345fdadfe8b8084c72c011341f", "score": "0.693542", "text": "def test_get2(self):\n pass", "title": "" }, { "docid": "f173953fe5dc9184fa29be2024c6ff03", "score": "0.69144267", "text": "def test_get_insumo(self):", "title": "" }, { "docid": "2a94c0cbec5994843966a13293efd88d", "score": "0.6898607", "text": "def test_if(self):", "title": "" }, { "docid": "a5c013bdd1962d473bc29d8ef9f1594c", "score": "0.68982375", "text": "def test(self):", "title": "" }, { "docid": "a5c013bdd1962d473bc29d8ef9f1594c", "score": "0.68982375", "text": "def test(self):", "title": "" }, { "docid": "f804228df328d299e59a65132d53aae3", "score": "0.6878471", "text": "def test_get_details7(self):\n pass", "title": "" }, { "docid": "6964a72fb027c35bbd5eb2c3c13e0d1d", "score": "0.6870291", "text": "def test_let(self):", "title": "" }, { "docid": "b968dd89e057c6e652a0baab457fe523", "score": "0.6865871", "text": "def test_require():", "title": "" }, { "docid": "81734121a21ab2e4eeb1f0f4dee9ca9e", "score": "0.68588555", "text": "def test(self):\n raise NotImplementedError", "title": "" }, { "docid": "c9f6d7137a2bd92b35b0c210bf0eda65", "score": "0.6856133", "text": "def test_T01():", "title": "" }, { "docid": "95fb7f90a509dafd1d9555bb95ff4bcd", "score": "0.6844019", "text": "def test_something():", "title": "" }, { "docid": "f9d3aadf020b94c846a9223d9bdbbbad", "score": "0.68333006", "text": "def test_get1(self):\n pass", "title": "" }, { "docid": "e54528a4f8620fa5a931c9264ddf2641", "score": "0.6795452", "text": "def test_for_client():", "title": "" }, { "docid": "0af3c7b1332f788ae5ca4c9e330f4453", "score": "0.67777425", "text": "def test_process_data(self):\n pass", "title": "" }, { "docid": "0a1d761ae8534b15b1819eaa0ba70698", "score": "0.6754029", "text": "def test_data_in_param(self):", "title": "" }, { "docid": "5966aee4cbc34d0266517d18b9669658", "score": "0.67398494", "text": "def test_compare(self):", "title": "" }, { "docid": "fcafb07835deeb463e5bda11509ea028", "score": "0.67271703", "text": "def test_method(self):", "title": "" }, { "docid": "32b1f4929b9b1fea633d29b6b2af02f9", "score": "0.6714886", "text": "def test_create_unexpected_problem(self):\n pass", "title": "" }, { "docid": "13d211f882237877b3726b59741636d0", "score": "0.6707117", "text": "def test_T2():", "title": "" }, { "docid": "13d211f882237877b3726b59741636d0", "score": "0.6707117", "text": "def test_T2():", "title": "" }, { "docid": "81925e5909755795a36a2ab1ebcc7a1c", "score": "0.67052203", "text": "def test_meme_get(self):\n pass", "title": "" }, { "docid": "c157f8661fb864a9926dae73af36de3d", "score": "0.67023325", "text": "def test_dummy():", "title": "" }, { "docid": "bacf2150772bba3a65d64749a4f17d00", "score": "0.6682762", "text": "def test_get_by_name2(self):\n pass", "title": "" }, { "docid": "af4352903bae7b9a79bc98efe6142ce0", "score": "0.66823655", "text": "def test_households_in_admin_unit(self):", "title": "" }, { "docid": "b068febdcb7ea0a7ca80b7171e732916", "score": "0.6664078", "text": "def test_03_visit_special(self):", "title": "" }, { "docid": "9c1d3837657baad532e6b13c0c4dffea", "score": "0.66543055", "text": "def test_get_run(self):\n pass", "title": "" }, { "docid": "dc6862f137fd4ca09646792a4a03211f", "score": "0.66539514", "text": "def inner_test():\n pass", "title": "" }, { "docid": "dc6862f137fd4ca09646792a4a03211f", "score": "0.66539514", "text": "def inner_test():\n pass", "title": "" }, { "docid": "3d4fbb4aec93b60106d036cd58b1bfcd", "score": "0.6639962", "text": "def test():", "title": "" }, { "docid": "3d4fbb4aec93b60106d036cd58b1bfcd", "score": "0.6639962", "text": "def test():", "title": "" }, { "docid": "141d291ed9b4c6281ad2f0e3f3bf6319", "score": "0.663654", "text": "def test_4():", "title": "" }, { "docid": "5d62c107c16f4fb478e8b466f86c7d34", "score": "0.6613261", "text": "def test_bed(self):\n #TODO write bed tests", "title": "" }, { "docid": "22c1aa096d1762d693996e374dbe1529", "score": "0.6613247", "text": "def test_uparforvarg(self):", "title": "" }, { "docid": "a513cd37e1d801d3ab61835e5e4ad338", "score": "0.66124606", "text": "def test_untar(self):", "title": "" }, { "docid": "7107bbddd4e601ef0e4046896aea824c", "score": "0.6609799", "text": "def setUp(self):\n self", "title": "" }, { "docid": "7107bbddd4e601ef0e4046896aea824c", "score": "0.6609799", "text": "def setUp(self):\n self", "title": "" }, { "docid": "5bd1925bbe4b4775fd8f55758ae411a6", "score": "0.6604512", "text": "def test_api_response_data(self):", "title": "" }, { "docid": "e16ed92fa5636ce9c166f7de53b0a58d", "score": "0.66030294", "text": "def test_composition(self):", "title": "" }, { "docid": "82e87dcd64b46f00aeaa6ffcb1fb38cb", "score": "0.6602845", "text": "def setUp(self):", "title": "" }, { "docid": "82e87dcd64b46f00aeaa6ffcb1fb38cb", "score": "0.6602845", "text": "def setUp(self):", "title": "" }, { "docid": "82e87dcd64b46f00aeaa6ffcb1fb38cb", "score": "0.6602845", "text": "def setUp(self):", "title": "" }, { "docid": "82e87dcd64b46f00aeaa6ffcb1fb38cb", "score": "0.6602845", "text": "def setUp(self):", "title": "" }, { "docid": "82e87dcd64b46f00aeaa6ffcb1fb38cb", "score": "0.6602845", "text": "def setUp(self):", "title": "" }, { "docid": "82e87dcd64b46f00aeaa6ffcb1fb38cb", "score": "0.6602845", "text": "def setUp(self):", "title": "" }, { "docid": "82e87dcd64b46f00aeaa6ffcb1fb38cb", "score": "0.6602845", "text": "def setUp(self):", "title": "" }, { "docid": "82e87dcd64b46f00aeaa6ffcb1fb38cb", "score": "0.6602845", "text": "def setUp(self):", "title": "" }, { "docid": "82e87dcd64b46f00aeaa6ffcb1fb38cb", "score": "0.6602845", "text": "def setUp(self):", "title": "" }, { "docid": "82e87dcd64b46f00aeaa6ffcb1fb38cb", "score": "0.6602845", "text": "def setUp(self):", "title": "" }, { "docid": "82e87dcd64b46f00aeaa6ffcb1fb38cb", "score": "0.6602845", "text": "def setUp(self):", "title": "" }, { "docid": "82e87dcd64b46f00aeaa6ffcb1fb38cb", "score": "0.6602845", "text": "def setUp(self):", "title": "" }, { "docid": "82e87dcd64b46f00aeaa6ffcb1fb38cb", "score": "0.6602845", "text": "def setUp(self):", "title": "" }, { "docid": "82e87dcd64b46f00aeaa6ffcb1fb38cb", "score": "0.6602845", "text": "def setUp(self):", "title": "" }, { "docid": "82e87dcd64b46f00aeaa6ffcb1fb38cb", "score": "0.6602845", "text": "def setUp(self):", "title": "" }, { "docid": "82e87dcd64b46f00aeaa6ffcb1fb38cb", "score": "0.6602845", "text": "def setUp(self):", "title": "" }, { "docid": "82e87dcd64b46f00aeaa6ffcb1fb38cb", "score": "0.6602845", "text": "def setUp(self):", "title": "" }, { "docid": "82e87dcd64b46f00aeaa6ffcb1fb38cb", "score": "0.6602845", "text": "def setUp(self):", "title": "" }, { "docid": "82e87dcd64b46f00aeaa6ffcb1fb38cb", "score": "0.6602845", "text": "def setUp(self):", "title": "" }, { "docid": "82e87dcd64b46f00aeaa6ffcb1fb38cb", "score": "0.6602845", "text": "def setUp(self):", "title": "" }, { "docid": "82e87dcd64b46f00aeaa6ffcb1fb38cb", "score": "0.6602845", "text": "def setUp(self):", "title": "" }, { "docid": "82e87dcd64b46f00aeaa6ffcb1fb38cb", "score": "0.6602845", "text": "def setUp(self):", "title": "" }, { "docid": "82e87dcd64b46f00aeaa6ffcb1fb38cb", "score": "0.6602845", "text": "def setUp(self):", "title": "" }, { "docid": "82e87dcd64b46f00aeaa6ffcb1fb38cb", "score": "0.6602845", "text": "def setUp(self):", "title": "" }, { "docid": "82e87dcd64b46f00aeaa6ffcb1fb38cb", "score": "0.6602845", "text": "def setUp(self):", "title": "" }, { "docid": "5bf2de1626d7ad28c7eddac243727bb5", "score": "0.659598", "text": "def test_get_info(self):\n pass", "title": "" }, { "docid": "82f56d0826759fead27129b7040a2a10", "score": "0.65929276", "text": "def test_document_retrieval(self):", "title": "" }, { "docid": "4eaf3d898286280a6823956bea302ed3", "score": "0.6592165", "text": "def test_T3():", "title": "" }, { "docid": "4eaf3d898286280a6823956bea302ed3", "score": "0.6592165", "text": "def test_T3():", "title": "" }, { "docid": "2c20fe4adbf35af2f0f49391226126a0", "score": "0.6588572", "text": "def test_get_by_name1(self):\n pass", "title": "" }, { "docid": "19c904b4f2d6e275deed4c59a15a13c3", "score": "0.6580314", "text": "def test(self):\n # -- Test --\n\n # (1)\n\n # (2)\n\n # (3)\n\n # (4)\n # -- Test --", "title": "" }, { "docid": "459e33d1568d359e378abef05fc9cf27", "score": "0.65699553", "text": "def test_nothing(self):", "title": "" }, { "docid": "b736060b9cd1969562c8c2c7d7550595", "score": "0.6569431", "text": "def test_get_results(self):\n pass", "title": "" }, { "docid": "03f8aab518d1536c9e808d69e62745c1", "score": "0.6560355", "text": "def test_T4():", "title": "" }, { "docid": "03f8aab518d1536c9e808d69e62745c1", "score": "0.6560355", "text": "def test_T4():", "title": "" }, { "docid": "3f5970ea7d4ed2a342e17d7c33e78b2c", "score": "0.6558351", "text": "def testBeliefs1sk(self):", "title": "" }, { "docid": "51ebbf17c74e9451feecf8765ce5207b", "score": "0.6551665", "text": "def test_get_scenarios(self):\n pass", "title": "" }, { "docid": "1fe897749af6f281491929effc2113eb", "score": "0.65403396", "text": "def test_T1():", "title": "" }, { "docid": "1fe897749af6f281491929effc2113eb", "score": "0.65403396", "text": "def test_T1():", "title": "" }, { "docid": "4dbdfbc151bc43b6c4c21c520227614b", "score": "0.6518431", "text": "def test_create_from_pear(self):\n pass", "title": "" }, { "docid": "d7612bc1e44ed157f385d4d50b08c373", "score": "0.6514903", "text": "def test_create_run(self):\n pass", "title": "" }, { "docid": "6020ca462e59ce758beacefa3f8b96d9", "score": "0.6509473", "text": "def test_gettem_using_get(self):\n pass", "title": "" }, { "docid": "a6e8678913f133c7dea92537caa2e47f", "score": "0.6504651", "text": "def test_3():", "title": "" }, { "docid": "d78830d3ac98b954d5480960042e6fd5", "score": "0.6489452", "text": "def test_index(self):", "title": "" }, { "docid": "b43541209a12440d69cc0ca040b9407e", "score": "0.6484511", "text": "def test_subsystems(self):\n pass", "title": "" } ]
b677ca61c75260738aaf0ddec5ce2b65
Normalizes string, converts to lowercase, removes nonalpha characters,
[ { "docid": "9f8e7ce7916a2afefdd81b4bb23bb675", "score": "0.0", "text": "def sanitize(value):\n import re\n re.sub('[^\\w\\-_\\. ]', '_', value)\n value = value.replace(\" \", \"_\")\n logger.debug(f'Saving file with sanitized name: {value}')\n return value", "title": "" } ]
[ { "docid": "21d05d24e2e0b083869daab4c2577f97", "score": "0.79351604", "text": "def normalizeText(s):\n s = s.lower()\n s = REMSPACE.sub(' ', s)\n s = ALPHAONLY.sub('', s)\n s = s.strip()\n return s", "title": "" }, { "docid": "8163d4336b7bb30bc7f5f8acab4d5c46", "score": "0.78814095", "text": "def normalizeString(self, s):\n s = self.unicodeToAscii(s.lower().strip())\n s = re.sub(r\"([.!?\\(\\)\\\"])\", r\"\", s)\n s = re.sub(r\"[^0-9a-zA-Z]+\", r\" \", s)\n return s", "title": "" }, { "docid": "b568ddc02f410010b31bb7ac36f2bd28", "score": "0.78429973", "text": "def normalize(self , string):\n\t\t\n\t\t# If the string is empty, just return it\n\t\tif len(string) is 0:\n\t\t\treturn string\n\t\t\n\t\t# Setting all words to lowercase\n\t\tstring = string.lower()\n\t\t\n\t\t# Removing punctuation\n\t\tif not string[ -1 ].isalnum():\n\t\t\tstring = string[ :-1 ]\n\t\t\n\t\t# Removing words\n\t\tstring = self.substitute_words(string)\n\t\t\n\t\t# Returning normalized text\n\t\treturn string", "title": "" }, { "docid": "65649d13b6ca94c72d29a248e30d35a2", "score": "0.780416", "text": "def normalizeString(s):\n s = unicodeToAscii(s.lower().strip())\n s = re.sub(r\"([.!?])\", r\" \\1\", s)\n s = re.sub(r\"[^a-zA-Z.!?]+\", r\" \", s)\n return s", "title": "" }, { "docid": "65649d13b6ca94c72d29a248e30d35a2", "score": "0.780416", "text": "def normalizeString(s):\n s = unicodeToAscii(s.lower().strip())\n s = re.sub(r\"([.!?])\", r\" \\1\", s)\n s = re.sub(r\"[^a-zA-Z.!?]+\", r\" \", s)\n return s", "title": "" }, { "docid": "46b5b9f8c1186a651b7226aba04ae137", "score": "0.77940845", "text": "def _scrub(cls, s):\n if not s:\n return s\n return s.lower().replace(\" \", \"\")", "title": "" }, { "docid": "41fd8d74298cbf7d57eb175d7addcd02", "score": "0.7763746", "text": "def _normalize(s):\n # Remove repeated whitespace characters.\n s = re.sub(r'\\s+', ' ', s)\n # Remove non-whitespace or word characters.\n s = re.sub(r'[^\\w\\s]', '', s)\n # Lowercase the string.\n s = s.lower()\n # Strip leading and trailing whitespace.\n s = s.strip()\n\n return s", "title": "" }, { "docid": "3996e7898b944a108b96091018c23144", "score": "0.7725168", "text": "def _normalize_sentence(cls, sentence):\n if sentence:\n sentence = re.sub(cls._non_alnum_exp, '', sentence)\n sentence = sentence.lower()\n return sentence", "title": "" }, { "docid": "ac9a8d68262549d3c4d94cac3aaef399", "score": "0.770046", "text": "def normalise(s):\n # TODO: Handle unicode\n return s.lower().replace('(', '').replace(')', '').replace('- ', '')", "title": "" }, { "docid": "cb8834bdc132baec898be75aa002d686", "score": "0.7691802", "text": "def _normalize(string):\n return re.sub(r'\\W', '', re.sub(r'[\\s-]+', '_', string)).lower()", "title": "" }, { "docid": "c36271f8fd3972b73348f262bdf5b2b8", "score": "0.76567686", "text": "def normalizeText(text:str)-> str:\n\n # Variable declaration.\n normalizedText:str\n \n # Remove number, punctuation, and space.\n normalizedText = \"\".join(filter(str.isalpha, text)).lower()\n return normalizedText", "title": "" }, { "docid": "d9465648993b56508138cb79fd4c7444", "score": "0.76530695", "text": "def normalize(s):\r\n result = ''\r\n for c in s.lower():\r\n if c in keep:\r\n result += c\r\n return result", "title": "" }, { "docid": "659198bbc076186b37797d4724c54a78", "score": "0.76037025", "text": "def normalize_string(string):\n filtered_string = []\n if isinstance(string, str):\n string = unicode(string, 'utf-8')\n for i in unicodedata.normalize('NFKC', string):\n cat = unicodedata.category(i)[0]\n # filter out all the non letter and non number characters from the\n # input (L is letter and N is number)\n if cat in 'LN' or i in '-_':\n filtered_string.append(i)\n elif cat in 'Z':\n filtered_string.append(' ')\n return re.sub('\\s+', '-', ''.join(filtered_string)).lower()", "title": "" }, { "docid": "484cb62b4f00722948b390f1b775d6aa", "score": "0.75858015", "text": "def normalize_text(text):\n text = text.lower()\n valid_chars = string.ascii_letters + string.whitespace + string.digits", "title": "" }, { "docid": "736fa98e47f17fd480fa371781caa46b", "score": "0.75788116", "text": "def normalize(s):\r\n # remove invalid chars\r\n first = s[0].upper()\r\n s = re.sub(invalid_re, \"\", s.upper()[1:])\r\n # remove repeated chars\r\n char = None\r\n \r\n s_clean = first\r\n\r\n for c in s:\r\n if char != c:\r\n s_clean += c\r\n char = c\r\n\r\n return s_clean", "title": "" }, { "docid": "cffbe865e782599e22b322e49e4b9753", "score": "0.7553414", "text": "def normalize(s):\n\n s = stripper(s).upper() \n return s", "title": "" }, { "docid": "a81a29d19e8f0330693e2a54590ce1d8", "score": "0.7541853", "text": "def standardize(text: str) -> str:\n return text.strip().lower()", "title": "" }, { "docid": "1d7bc22efca73e93f306da22263d27e4", "score": "0.7512885", "text": "def normalize_text(s: str) -> str:\n\n def white_space_fix(text: str) -> str:\n return ' '.join(text.split())\n\n def lower(text: str) -> str:\n return text.lower()\n\n return white_space_fix(lower(s))", "title": "" }, { "docid": "7867d9f79baf268850151e16c89dbd2c", "score": "0.7507214", "text": "def _cleanse(s):\n return ''.join([c for c in s if c.isalnum()]).lower()", "title": "" }, { "docid": "0569f99e0841391771ff021051b9daa0", "score": "0.7478427", "text": "def _normalise(s):\n camel = \"\".join([w[0].upper() + w[1:] for w in s.lower().split(\" \") if w != \"\"])\n return camel[0].lower() + camel[1:]", "title": "" }, { "docid": "e83674f36d724c5409e9c875584ea006", "score": "0.7478345", "text": "def normalize(text, norm=[]):\n if 'upper' in norm and text.isupper() == False:\n print \"WARN: Converting to uppercase...\"\n text=text.upper()\n if 'alpha' in norm and text.isalpha() == False:\n print \"WARN: Stripping non-alpha...\"\n text=filter(str.isalpha, text)\n return text", "title": "" }, { "docid": "62296e6f7031481a93740f557fa06320", "score": "0.74697775", "text": "def normalize_string(in_string):\n lowered = in_string.lower()\n no_symbols = remove_symbols(lowered)\n printable_string = remove_other(no_symbols)\n lemmad_string = lemma(printable_string)\n clean_string = remove_black_listed(lemmad_string)\n\n return clean_string", "title": "" }, { "docid": "d60bd05ad7eaf956a144bd0833c819d2", "score": "0.7469675", "text": "def normalize(s):\n #s = re.sub(r'\\ROAD ', '', s, flags=re.IGNORECASE)\n first = s[0].upper()\n s = re.sub(invalid_re, \"\", s.upper()[1:])\n # remove repeated chars\n char = None\n s_clean = first\n for c in s:\n if char != c:\n s_clean += c\n char = c\n return s_clean", "title": "" }, { "docid": "99d0c4f5a811b2c741ff0c0c33bf07ec", "score": "0.7460326", "text": "def normalize_text(s):\n import string, re\n\n def remove_articles(text):\n regex = re.compile(r\"\\b(a|an|the)\\b\", re.UNICODE)\n return re.sub(regex, \" \", text)\n\n def white_space_fix(text):\n return \" \".join(text.split())\n\n def remove_punc(text):\n exclude = set(string.punctuation)\n return \"\".join(ch for ch in text if ch not in exclude)\n\n def lower(text):\n return text.lower()\n\n return white_space_fix(remove_articles(remove_punc(lower(s))))", "title": "" }, { "docid": "55c22bc92594f337e51a0bf54e992290", "score": "0.7452759", "text": "def strip_lowered(string):\n return str(string).lower().strip()", "title": "" }, { "docid": "51c7cc266df79a785b58a61d863ec055", "score": "0.74413854", "text": "def normalize(self,s):\n s=s.rstrip().lstrip()\n return s[0].upper()+s[1:]", "title": "" }, { "docid": "1250dde0607a476d5f2348718dff7b61", "score": "0.74258524", "text": "def normalize_name(s):\n s = strip_diacritics(s)\n return s.lower()", "title": "" }, { "docid": "9b1a32bfcb25e828722be180d7dc6d9b", "score": "0.7386776", "text": "def clean(string):\n if string:\n return string.strip().lower()\n return ''", "title": "" }, { "docid": "dd3820c127e8fd09973c7cccc517496d", "score": "0.73821557", "text": "def __filter_chars_normalize(self):\n pattern = re.compile('[\\W_]+')\n self._data = pattern.sub(' ', self._data).lower()", "title": "" }, { "docid": "fa6ffdd676a803e673bb260cd5a05569", "score": "0.7365663", "text": "def toLowerCase(s):\n return s.lower()", "title": "" }, { "docid": "85894ad25a8afb4207957c8dda0a4bf9", "score": "0.7362882", "text": "def safelower(s):\n try:\n return s.lower()\n except AttributeError:\n return s", "title": "" }, { "docid": "d5ff4ea4806479d45708235736a94602", "score": "0.7344375", "text": "def _norm(s: str) -> str:\n rv = s.casefold().lower()\n for x in ' .-_./':\n rv = rv.replace(x, '')\n return rv", "title": "" }, { "docid": "a0848fbb9223d14c340a0c937c713b0b", "score": "0.73226666", "text": "def normalize_text(s: str) -> str:\n\n def remove_articles(text):\n return re.sub(r'\\b(a|an|the)\\b', ' ', text)\n\n def white_space_fix(text):\n return ' '.join(text.split())\n\n def remove_punc(text):\n exclude = set(string.punctuation)\n return ''.join(ch for ch in text if ch not in exclude)\n\n def lower(text):\n return text.lower()\n\n return white_space_fix(remove_articles(remove_punc(lower(s))))", "title": "" }, { "docid": "53db12cecf70653c6b4f22fcd471bcc6", "score": "0.72795695", "text": "def norm(s: str) -> str:\n rv = s.lower()\n for x in \" .-\":\n rv = rv.replace(x, \"\")\n return rv", "title": "" }, { "docid": "c917355879a09910cd7692420269e29f", "score": "0.7247678", "text": "def normalize(string):\n string = unicode(string or '').strip().upper()\n letters = []\n \"\"\"TODO(eyalf): we need to have a better list of types we are keeping\n one that will work for non latin languages\"\"\"\n for ch in unicodedata.normalize('NFD', string):\n category = unicodedata.category(ch)\n if category.startswith('L'):\n letters.append(ch)\n elif category != 'Mn' and ch != \"'\": # Treat O'Hearn as OHEARN\n letters.append(' ')\n return ''.join(letters)", "title": "" }, { "docid": "2ce892e7d06618c0ce0fffbf999cc99c", "score": "0.72054666", "text": "def lower(s):\n return s.lower()", "title": "" }, { "docid": "d29b55bd7ed163f9634abc5e2d6487a6", "score": "0.7186572", "text": "def clean_string(input_string):\n input_string = input_string if isinstance(input_string, unicode) else unicode(input_string, 'utf-8')\n return unicodedata.normalize('NFKD', input_string.strip().lower()).encode('ASCII', 'ignore')", "title": "" }, { "docid": "e36cede06f1783251a95824b09cb5677", "score": "0.71743286", "text": "def only_alpha(s):\r\n mod = \"\" # variable to hold modified string\r\n for char in s: # for each character in the string\r\n if char != \" \" and char not in string.punctuation: # if the character is not a punctuation and not a space\r\n mod = mod + char # add the character to the modified string\r\n return mod.lower() # return a lower case version of the modified string\r", "title": "" }, { "docid": "c91b4006b28304339e9b7e5cf5359458", "score": "0.71721804", "text": "def normalize(s):\n try:\n return (' '.join(re.split(npatt, s))).strip().lower()\n except TypeError:\n return s", "title": "" }, { "docid": "fd739e8e7dc83a8050a41938fec4f9c7", "score": "0.71289754", "text": "def _norm(s: str) -> str:\n rv = s.casefold().lower()\n for x in \" -_./\":\n rv = rv.replace(x, \"\")\n return rv", "title": "" }, { "docid": "9eff193316eeac3b8bfa809b01dda4a9", "score": "0.70967937", "text": "def normalize_title_word(word):\n word = strip_diacritics(word)\n word = word.lower()\n good_characters = string.ascii_lowercase + string.digits\n return ''.join(filter(lambda x: x in good_characters, word))", "title": "" }, { "docid": "a338cf49d996261ed35c62d188b013c3", "score": "0.70840955", "text": "def my_lower(s):\n try:\n return s.lower()\n except:\n return s", "title": "" }, { "docid": "2310df0b43d98d90583ab3266065b955", "score": "0.7055734", "text": "def preprocess_string(self, string: str) -> str:\n if not self.preserve_case:\n string = string.lower()\n if self.remove_urls:\n string = remove_urls(string)\n if self.enforce_ascii:\n string = string_printable(string)\n if self.strip_handles:\n string = remove_handles(string)\n if self.reduce_length:\n string = reduce_lengthening(string)\n string = _replace_html_entities(string)\n return normalize_whitespace(unicode_to_ascii(string))", "title": "" }, { "docid": "c9cd39d76fe8f387771218449198801a", "score": "0.7005047", "text": "def clean_string(s):\r\n s = s.replace(\"`\", \"\")\r\n s = s.replace(\"(\", \"\")\r\n s = s.replace(\")\", \"\")\r\n if s.startswith(\"-\"):\r\n s = s[1:]\r\n s = s.lower()\r\n return s", "title": "" }, { "docid": "be330537c56c9f97d60eb90a3a7cd1d7", "score": "0.69987607", "text": "def transform_string(self, text):\n return text.lower()", "title": "" }, { "docid": "fca3247f4154b80e1282d3bac65e77af", "score": "0.6984439", "text": "def lowercase(self, s):\n return s.lower()", "title": "" }, { "docid": "322a2f99982d58d0b044539435af2264", "score": "0.6975473", "text": "def clean_text(text):\n new_text = \"\"\n text = text.lower()\n # new_text = \"\".join([char for char in text if char.isalpha()])\n for char in text:\n if char.isalpha():\n new_text = new_text + char\n return new_text", "title": "" }, { "docid": "19f35dd70dced235627487b984638238", "score": "0.69436115", "text": "def normalize_text(text):\n regex = \"(?<!\\w)[!\\\"#$%&'()*-+/:;<=>?@[\\]^_`{|}~](?!\\w)\"\n\n #remove punctuation\n result = re.sub(regex, \"\", text, 0)\n\n #trim to remove excessive whitespace\n result = re.sub(' +', ' ',(result.replace('\\n',' '))).strip().lower()\n\n return result", "title": "" }, { "docid": "243bdca3507cde5d1a2566e87a76c48d", "score": "0.69422436", "text": "def normalize_name(name):\n return name.lower().strip().replace(\" \", \"\")", "title": "" }, { "docid": "5102f1293d860a588d657f21d7d05b74", "score": "0.69182134", "text": "def preprocess_text(s: str) -> str:\n s = s.translate(str.maketrans('', '', string.punctuation))\n s = re.sub('\\s+', ' ', s)\n return s.lower()", "title": "" }, { "docid": "99f7d6f102b24bdf9166ab918ffc52f0", "score": "0.6918051", "text": "def slugfy(value):\n value = unicodedata.normalize('NFKD', value).encode('ascii', 'ignore')\n value = re.sub(r'[^\\w\\s-]', '', value.decode('utf-8', 'ignore'))\n value = value.strip().lower()\n value = re.sub(r'[-\\s]+', '-', value)\n return value", "title": "" }, { "docid": "0081bf0977620d17a4f170547b152ab2", "score": "0.69130826", "text": "def to_lower(string):\n for letter in string:\n if (letter in ASCII_UPPERCASE):\n index = find_chr(ASCII_UPPERCASE, letter)\n string = replace_chr(string, letter, ASCII_LOWERCASE[index])\n \n return string", "title": "" }, { "docid": "72e81e0ec6b5af57c6a499420cc0888a", "score": "0.6912139", "text": "def clean_cases(text: str) -> str:\n return text.lower()", "title": "" }, { "docid": "3b4d59e193f401b3c1128fe0b12732e1", "score": "0.69086564", "text": "def clean_string(self, raw_string, sub_string):\n cleans = re.sub(\"[^0-9a-zA-Z]\", sub_string, raw_string)\n return cleans.lower()", "title": "" }, { "docid": "03ca703dbe7ab09e893587b0fe292b49", "score": "0.69072425", "text": "def clean_up_text(text):\n text = text.lower() # to lower case\n text = re.sub(r'[^a-z]', ' ', text) # replace other characters than a-z with ' '\n return text", "title": "" }, { "docid": "ba892c4e4034398ad1d74089d63ee238", "score": "0.6884004", "text": "def _remove_non_alpha_characters(self, string):\n return string.translate(self.translate_table)", "title": "" }, { "docid": "349ecd7411341efe261326b731099dc5", "score": "0.6864322", "text": "def preprocess_text(inputs, remove_space=True, lower=False):\n outputs = inputs\n if remove_space:\n outputs = \" \".join(inputs.strip().split())\n\n outputs = unicodedata.normalize(\"NFKD\", outputs)\n outputs = \"\".join([c for c in outputs if not unicodedata.combining(c)])\n if lower:\n outputs = outputs.lower()\n\n return outputs", "title": "" }, { "docid": "2b9f4759430de75e72db2ca3d5b6ca64", "score": "0.6862743", "text": "def clean( s ):\n for c in non_alpha:\n s = s.replace( c, \" \" )\n return s", "title": "" }, { "docid": "1c4f700d2dd89d95731bf9c1ff50dfe9", "score": "0.68560207", "text": "def clean_text(text):\n new_text = \"\"\n text = text.lower()\n for character in text:\n if character.isalpha():\n new_text = new_text + character\n return new_text", "title": "" }, { "docid": "ac136b9c6c0fd2f0882babd844715d89", "score": "0.6853917", "text": "def normalize(c: str) -> str:\n if c.isalnum():\n return c\n elif c == '-':\n return '_'\n else:\n return ''", "title": "" }, { "docid": "3c16af6faca4359e5485266a5a355c84", "score": "0.68476504", "text": "def preprocess_sentence(text):\n\n text = text.lower()\n text = text.strip()\n return text", "title": "" }, { "docid": "38727801bff8141210a808420206db3c", "score": "0.68418217", "text": "def lower(string):\n return str(string).lower()", "title": "" }, { "docid": "80b3157f2eb5b2ddec12e1eca2ee0fe8", "score": "0.68017226", "text": "def normalize_email(cls, email):\n return email.strip().lower()", "title": "" }, { "docid": "36fbf49ae9408c86dbcd1586a821854a", "score": "0.6796459", "text": "def _clean(s):\n s = _clean_regex.sub('', s.lower().strip())\n return \" \".join(s.split())", "title": "" }, { "docid": "2e93b5e99c3f81e5c10f751150696996", "score": "0.67787904", "text": "def cleanup_title(s):\n\n # s = s.lower()\n # s = s.capitalize()\n\n return s", "title": "" }, { "docid": "617d27a8ddeb32ad6a8e5a0bb29c47b9", "score": "0.67770684", "text": "def clean_cases(text):\n return text.lower()", "title": "" }, { "docid": "46313b4c1132c3435cbefaf965261bf6", "score": "0.67654353", "text": "def _slugify(self, text: str):\n return ''.join(text.split()).lower()", "title": "" }, { "docid": "b29a17949cf9e5a5913e4f0e0df94a74", "score": "0.67565435", "text": "def str2algo(s):\n return re.sub('[- ]', '_', s).lower()", "title": "" }, { "docid": "ef994af59071adeaed6a1aca99255188", "score": "0.6731338", "text": "def __normalize(self,text):\n return self._normalizer_regex.sub(lambda mo:\n self._normalizer[mo.string[mo.start():mo.end()]],\n text.lower())", "title": "" }, { "docid": "0a6e5512a71b3fcc49bcb81cf4b136e5", "score": "0.67280006", "text": "def clean_str_sst(string):\n string = re.sub(r\"[^A-Za-z0-9(),!?\\'\\`]\", \" \", string) \n string = re.sub(r\"\\s{2,}\", \" \", string) \n return string.strip().lower()", "title": "" }, { "docid": "021fc1745c68956193d98894068a7740", "score": "0.67221874", "text": "def preprocess(sentence):\n\n sentence = sentence.lower() # Change sentence to lowercase\n sentence = nltk.word_tokenize(sentence) # Tokenize each word with nltk function\n\n for word in sentence.copy(): # For each word\n if not word.isalnum(): # If the word is not alphanumeric\n sentence.remove(word) # Remove the word\n\n return sentence # Return the sentence", "title": "" }, { "docid": "f6aafcf5690a113e1812f4dc24a3082d", "score": "0.670956", "text": "def strip_whitespace_and_punctuation_and_make_lowercase(phrase):\n phrase = re.sub(r'[^A-Za-z]', '', phrase)\n return phrase.lower()", "title": "" }, { "docid": "f967aba23dd11e5021986beae983f63f", "score": "0.66970986", "text": "def ascii_lower(string):\n return string.encode('utf8').lower().decode('utf8')", "title": "" }, { "docid": "15bc50ba8cf76d3d26d249f4ef49066f", "score": "0.6694016", "text": "def uncamel(s):\n\n return re.sub('(?!^)([A-Z])(?=[^A-Z])', r'_\\1', s).lower()", "title": "" }, { "docid": "692323465ac133cac1cc5f61bfd5eaca", "score": "0.66848874", "text": "def slugify(value):\n\n value = unicodedata.normalize('NFKD', value).encode('ascii', 'ignore')\n value = re.sub('[^\\w\\s-]', '', value.decode()).strip().lower()\n value = re.sub('[-\\s]+', '-', value)\n\n return value", "title": "" }, { "docid": "8d16ec431cfe64f6ff9fc95bd42dcfe3", "score": "0.6681406", "text": "def sanitize(word):\n return re.sub('[^A-Za-z0-9]', '', word)", "title": "" }, { "docid": "15b2dd7996c2d415593057385fd74f6a", "score": "0.6669161", "text": "def _cleanName(self, name):\n return ''.join(c for c in name.lower() if c in string.ascii_lowercase)", "title": "" }, { "docid": "71120b104e5f2eb6d697ff48337af967", "score": "0.6660758", "text": "def clean_str(string):\n string = re.sub(r\"[^A-Za-z0-9(),!?\\'\\`]\", \" \", string)\n string = re.sub(r\"\\'s\", \" \\'s\", string)\n string = re.sub(r\"\\'ve\", \" \\'ve\", string)\n string = re.sub(r\"n\\'t\", \" n\\'t\", string)\n string = re.sub(r\"\\'re\", \" \\'re\", string)\n string = re.sub(r\"\\'d\", \" \\'d\", string)\n string = re.sub(r\"\\'ll\", \" \\'ll\", string)\n string = re.sub(r\",\", \" , \", string)\n string = re.sub(r\"!\", \" ! \", string)\n string = re.sub(r\"\\(\", \" \\( \", string)\n string = re.sub(r\"\\)\", \" \\) \", string)\n string = re.sub(r\"\\?\", \" \\? \", string)\n string = re.sub(r\"\\s{2,}\", \" \", string)\n return string.strip().lower()", "title": "" }, { "docid": "c1ed0be4ed9dcde5d6a5ed8d55f1c96e", "score": "0.6655272", "text": "def normalize(string: str) -> str:\n return ''.join([c for c in unicodedata.normalize('NFD', re.sub(\"\"\"[\\u0300-\\u036f`~!#$-@%^&*()|+=÷¿?;.:'\"\\\\s,<>{}]\"\"\"\n , \"\", string)) if not unicodedata.combining(c)])", "title": "" }, { "docid": "11d4623883b879972e5a7cd6576144c5", "score": "0.66498643", "text": "def clean_str(string):\n string = re.sub(r\"[^A-Za-z0-9(),!?\\'\\`\\-]\", \" \", string)\n string = re.sub(r\"\\'s\", \" \\'s\", string)\n string = re.sub(r\"\\'ve\", \" \\'ve\", string)\n string = re.sub(r\"n\\'t\", \" n\\'t\", string)\n string = re.sub(r\"\\'re\", \" \\'re\", string)\n string = re.sub(r\"\\'d\", \" \\'d\", string)\n string = re.sub(r\"\\'ll\", \" \\'ll\", string)\n string = re.sub(r\",\", \" , \", string)\n string = re.sub(r\"!\", \" ! \", string)\n string = re.sub(r\"\\(\", \" \\( \", string)\n string = re.sub(r\"\\)\", \" \\) \", string)\n string = re.sub(r\"\\?\", \" \\? \", string)\n string = re.sub(r\"\\s{2,}\", \" \", string)\n #return string.strip().lower()\n return string.strip()", "title": "" }, { "docid": "25c259c086ce7a0f84139d594d0516d8", "score": "0.6647857", "text": "def _normalize_string(string):\n return string.replace(\".\", \"_\").replace(\"-\", \"_\")", "title": "" }, { "docid": "c5a46a86b9ff9224cff102247a161f42", "score": "0.6646958", "text": "def normalise(self, word):\n word = word.lower()\n # word = self.stemmer.stem_word(word)\n # word = self.lemmatizer.lemmatize(word)\n return word", "title": "" }, { "docid": "abea12f0f8166a3e8c4ec27cd7634be0", "score": "0.6645301", "text": "def clean(string):\n \n string = re.sub(r\"[^A-Za-z0-9(),!?\\'\\`]\", \" \", string)\n string = re.sub(r\"\\'s\", \" \\'s\", string)\n string = re.sub(r\"\\'ve\", \" \\'ve\", string)\n string = re.sub(r\"n\\'t\", \" n\\'t\", string)\n string = re.sub(r\"\\'re\", \" \\'re\", string)\n string = re.sub(r\"\\'d\", \" \\'d\", string)\n string = re.sub(r\"\\'ll\", \" \\'ll\", string)\n string = re.sub(r\",\", \" , \", string)\n string = re.sub(r\"!\", \" ! \", string)\n string = re.sub(r\"\\(\", \" ( \", string)\n string = re.sub(r\"\\)\", \" ) \", string)\n string = re.sub(r\"\\?\", \" ? \", string)\n string = re.sub(r\"\\s{2,}\", \" \", string)\n return string.strip().lower()", "title": "" }, { "docid": "c1b027e742c92e2b2a1fc960bf0647ce", "score": "0.6641397", "text": "def to_lower(text):\n return text.lower()", "title": "" }, { "docid": "ecbf424f5ce9c746e4289da0c2a920f0", "score": "0.6641386", "text": "def text_normalization(text_narration):\n text_narration = text_narration.lower()\n return text_narration", "title": "" }, { "docid": "4fb5d364b9765b31903a3c3281bd3390", "score": "0.66380787", "text": "def slugify(s):\n slug = s.lower().strip()\n slug = re.sub(r'[^a-zA-Z0-9\\s]', '', slug)\n return slug", "title": "" }, { "docid": "39f33f50bed2252ef041e13f5f822532", "score": "0.6624704", "text": "def normalize(string):\n # Since we can't use space on the kernel cmdline, Ironic will\n # urlencode the values.\n return parse.unquote(string).lower().strip()", "title": "" }, { "docid": "6ce717889e7aa308e4d08a3898f1281e", "score": "0.66232526", "text": "def normalize_name(self, name):\n if re.match(\"^[A-Za-z0-9_-]*$\", name):\n no_space_name = name.replace(' ', '')\n if str.isalpha(no_space_name):\n return no_space_name\n raise ValueError('There are non alphabetic characters that I can not recognize! <{}>'.format(name))\n\n else:\n raise TypeError('Not String! The input is supposed to be a string type! <{}>'.format(name))\n\n return name", "title": "" }, { "docid": "ea354657cdfcd2ccf6cfe5ad59b6cdb8", "score": "0.66210204", "text": "def remove_accents(str_input):\n if not str_input:\n return ''\n\n return ''.join((c for c in normalize('NFD', str_input) if category(c) != 'Mn'))", "title": "" }, { "docid": "bd47e63de5c6b27888273f00002b9a33", "score": "0.6612585", "text": "def clean_str(string):\n #string = re.sub(r\"[^A-Za-z0-9(),!?\\'\\`]\", \" \", string)\n string = re.sub(r\"\\'s\", \" \\'s\", string)\n string = re.sub(r\"\\'ve\", \" \\'ve\", string)\n string = re.sub(r\"n\\'t\", \" n\\'t\", string)\n string = re.sub(r\"\\'re\", \" \\'re\", string)\n string = re.sub(r\"\\'d\", \" \\'d\", string)\n string = re.sub(r\"\\'ll\", \" \\'ll\", string)\n string = re.sub(r\",\", \" , \", string)\n string = re.sub(r\"!\", \" ! \", string)\n string = re.sub(r\"\\(\", \" \\( \", string)\n string = re.sub(r\"\\)\", \" \\) \", string)\n string = re.sub(r\"\\?\", \" \\? \", string)\n string = re.sub(r\"\\s{2,}\", \" \", string)\n #return string.strip().lower()\n return string.strip()", "title": "" }, { "docid": "ad6e98dd065bf71a72421a6c74463517", "score": "0.6605993", "text": "def _prepare_input(text: str) -> str:\n special_characters_regex = \"[^a-z|^0-9|^ ]\"\n text = text.lower()\n text = re.sub(special_characters_regex, \"\", text)\n return text", "title": "" }, { "docid": "2e9fc24644b1ed4ee87ff012b165493c", "score": "0.6603961", "text": "def clean_str_sst(string):\n string = re.sub(r\"[^A-Za-z0-9(),!?\\'\\`]\", \" \", string)\n string = re.sub(r\"\\s{2,}\", \" \", string)\n return string.strip().lower()", "title": "" }, { "docid": "2e9fc24644b1ed4ee87ff012b165493c", "score": "0.6603961", "text": "def clean_str_sst(string):\n string = re.sub(r\"[^A-Za-z0-9(),!?\\'\\`]\", \" \", string)\n string = re.sub(r\"\\s{2,}\", \" \", string)\n return string.strip().lower()", "title": "" }, { "docid": "86f0b4880a2e18c1201e98736a24818c", "score": "0.65925", "text": "def str_cleaner(s):\n if s is None:\n return ''\n upp = s.upper()\n clean_str = re.sub('[^ก-ฮะ-ูเ-์A-Z0-9]', ' ', upp)\n clean_str = re.sub('\\s+', ' ', clean_str).strip()\n return clean_str", "title": "" }, { "docid": "d3a409803b3aeb5b4fbee53fe90189af", "score": "0.65861684", "text": "def clean_str(string):\t\n string = re.sub(r\"[^A-Za-z0-9(),!?\\'\\`]\", \" \", string)\n string = re.sub(r\"\\'s\", \" \\'s\", string)\n string = re.sub(r\"\\'ve\", \" \\'ve\", string)\n string = re.sub(r\"n\\'t\", \" n\\'t\", string)\n string = re.sub(r\"\\'re\", \" \\'re\", string)\n string = re.sub(r\"\\'d\", \" \\'d\", string)\n string = re.sub(r\"\\'ll\", \" \\'ll\", string)\n string = re.sub(r\",\", \" , \", string)\n string = re.sub(r\"!\", \" ! \", string)\n string = re.sub(r\"\\(\", \" \\( \", string)\n string = re.sub(r\"\\)\", \" \\) \", string)\n string = re.sub(r\"\\?\", \" \\? \", string)\n string = re.sub(r\"\\s{2,}\", \" \", string)\n return string.strip().lower()", "title": "" }, { "docid": "54029f5bb2990ee14f526960d16ee819", "score": "0.6581639", "text": "def clean_str(string):\r\n string = re.sub(r\"[^A-Za-z0-9()_,!?\\'\\`]\", \" \", string) \r\n string = re.sub(r\"\\'s\", \" \\'s\", string) \r\n string = re.sub(r\"\\'ve\", \" \\'ve\", string) \r\n string = re.sub(r\"n\\'t\", \" n\\'t\", string) \r\n string = re.sub(r\"\\'re\", \" \\'re\", string) \r\n string = re.sub(r\"\\'d\", \" \\'d\", string) \r\n string = re.sub(r\"\\'ll\", \" \\'ll\", string) \r\n string = re.sub(r\",\", \" , \", string) \r\n string = re.sub(r\"!\", \" ! \", string) \r\n string = re.sub(r\"\\(\", \" ( \", string) \r\n string = re.sub(r\"\\)\", \" ) \", string) \r\n string = re.sub(r\"\\?\", \" ? \", string) \r\n string = re.sub(r\"\\s{2,}\", \" \", string) \r\n return string.strip().lower()", "title": "" }, { "docid": "eabf0b16602d95aa231f8692ea129280", "score": "0.65799594", "text": "def fuzzyprep(x):\n x = remove_non_ascii(x)\n if not isinstance(x, str):\n x = str(x)\n \n return ''.join(re.split(r'\\s+', x\\\n .translate(None, string.punctuation).lower()))", "title": "" }, { "docid": "72d2991245604341443671cce31b38d6", "score": "0.6578759", "text": "def normalize(text):\r\n # Remove periods\r\n text = re.sub(RE_PERIOD, '', text)\r\n # Replace anything that isn't a letter or number with a space\r\n text = re.sub(RE_NOT_ALPHA_NUM, ' ', text.lower())\r\n # Replace 2 or more spaces with a single space\r\n text = re.sub(RE_EXTRA_SPACES, ' ', text.strip())\r\n # Separate words by spaces\r\n return text.split(' ')", "title": "" }, { "docid": "1ea9e16a511cc920a1e175dfb88624a6", "score": "0.65779364", "text": "def apply_lower(s):\n if s is not None and hasattr(s, \"lower\"):\n return s.lower()\n else:\n return s", "title": "" }, { "docid": "5810b6a0e94e955cf875af619374c74a", "score": "0.6559998", "text": "def normalize ( self , s ):\n\n# print ( 'ZH normalize' )\n n = len(s)\n ns = [ ]\n for i in range(n):\n x = s[i]\n# print ( ' x=' , x )\n if ellyChar.isLetter(x):\n x = '_'\n elif ellyChar.isWhiteSpace(x):\n continue\n# print ( 'norm x=' , x )\n ns.append(x)\n# print ( 'norm=' , ns )\n return ns", "title": "" }, { "docid": "550bd76a845e70f8a950e536cc2e8d16", "score": "0.6556252", "text": "def normalize_answer(s):\n\n def white_space_fix(text):\n return ' '.join(text.split())\n\n def lower(text):\n return text.lower()\n\n return white_space_fix(lower(s))", "title": "" } ]
06c6dd3d6b60a452da31149b7cd48f4c
Determine if version number needs to be updated
[ { "docid": "4fd3c5010c81efb4c2891cf13d5dbb7c", "score": "0.0", "text": "def main():\n local_version = get_version(\"../../vertical_multi_columns/__init__.py\")\n print(f\"{local_version=}\")\n\n resp = requests.get(sys.argv[1])\n pypi_version = resp.json()[\"info\"][\"version\"]\n print(f\"{pypi_version=}\")\n if local_version == pypi_version:\n sys.exit(\"Version number NOT changed ... cannot upload to Pypi\")\n else:\n sys.exit(0) # ok - version number was changed", "title": "" } ]
[ { "docid": "edd9ba140dee6b58f1594ab78eb66623", "score": "0.747243", "text": "def latestVersionValid():", "title": "" }, { "docid": "edd9ba140dee6b58f1594ab78eb66623", "score": "0.747243", "text": "def latestVersionValid():", "title": "" }, { "docid": "e08f90db58eb696c5e4e239a6162f3a0", "score": "0.74231666", "text": "def need_update(self) -> bool:\n try:\n return self.version < self.latest_version\n except (AwesomeVersionException, TypeError):\n return False", "title": "" }, { "docid": "e08f90db58eb696c5e4e239a6162f3a0", "score": "0.74231666", "text": "def need_update(self) -> bool:\n try:\n return self.version < self.latest_version\n except (AwesomeVersionException, TypeError):\n return False", "title": "" }, { "docid": "11aaec04044af1a8a1dd92fc4b4a44cc", "score": "0.7158259", "text": "def check_version(self):\n version_number = self._get_current_version()\n if not version_number:\n # presume 3.1\n return 3.1\n else:\n return version_number", "title": "" }, { "docid": "b0e6e1f9ef82f49e1754748699e26854", "score": "0.7155626", "text": "def check_version(self):\n res = requests.get(f\"{self.apiurl}​/version​/\")\n return(False)", "title": "" }, { "docid": "cc619f1dab8988c5612911fbdb32ab84", "score": "0.7143757", "text": "def needs_update(version, upstream):\n if \"+git\" in version:\n # strip +git and see if this is a post-release snapshot\n version = version.replace(\"+git\", \"\")\n return version != upstream", "title": "" }, { "docid": "8d05c5582815bd34b6e54a987fbd7e11", "score": "0.7072748", "text": "def check_update(self):\r\n \r\n try:\r\n u = urllib2.urlopen('https://api.github.com/repos/jcumby/PIEFACE/releases/latest').read()\r\n ujson = json.loads(u)\r\n \r\n except:\r\n # Problem reading url (perhaps no internet)?\r\n tkMessageBox.showerror(\"Update Error\", \"Failed to check for updates\")\r\n return False\r\n \r\n newversion = ujson['tag_name'][1:].split('.')\r\n #currversion = pkg_resources.get_distribution('pieface').version.split('.')\r\n currversion = pieface.__version__.split('.')\r\n assert len(newversion) == len(currversion)\r\n \r\n \r\n for i, num in enumerate(currversion):\r\n if int(newversion[i]) > int(num):\r\n return True\r\n return False", "title": "" }, { "docid": "f4d8fc2b1635308cee760bd3f800132d", "score": "0.70591205", "text": "def check_update():\n try:\n r = requests.get(\"https://api.github.com/repos/TomWis97/urenlog/releases/latest\")\n data = r.json()\n if data['tag_name'] != CURRENT_VERSION:\n return True\n else:\n return False\n except:\n print(\"oops\")\n return False", "title": "" }, { "docid": "a050192090fff58b8ef416b009977264", "score": "0.7050883", "text": "def version_check(self, app_version):\n return True", "title": "" }, { "docid": "b934be6e5d64d103b2ad6f74631a215b", "score": "0.704495", "text": "def isBadVersion():\n pass", "title": "" }, { "docid": "358ded6efd5a54cae4f9248f78e021a4", "score": "0.70417607", "text": "def version_ok(self,version):\r\n return self.attribute is None or self.format is None or \\\r\n str(version)!=\"unknown\" and version >= self.requested_version", "title": "" }, { "docid": "1df3373a256e05798f3c540f65b412bf", "score": "0.689812", "text": "def test_versionned(self, result):\n return re.search(INFO_PARSE_REVISION, result, re.M) is not None", "title": "" }, { "docid": "14f1446fc75d92c3a9b321389bbc7476", "score": "0.6796797", "text": "def _version_check(self):\n if libvirt.getVersion() >= 8000:\n return True\n else:\n return False", "title": "" }, { "docid": "d155c74fd6c23bf4d8ed8505a5e13841", "score": "0.6734569", "text": "def isStableReleaseVersion(version=None):\n version = version or __version__\n return \"-\" not in version", "title": "" }, { "docid": "7108743c516c95fd30bde9c56db4c11c", "score": "0.6692768", "text": "def is_release():\n return VERSION[-1]", "title": "" }, { "docid": "ba77fee6c3062a90b391a76bc898f094", "score": "0.6690668", "text": "def version_bumped(prev_version, new_version):\n x0, y0, z0 = map(int, prev_version.split(\".\"))\n x, y, z = map(int, new_version.split(\".\"))\n return z0 != z", "title": "" }, { "docid": "9771bd46f8d812a6f0d3f146fd602d71", "score": "0.6685592", "text": "def get_version_counter_unrecoverable():", "title": "" }, { "docid": "da9db39413d30347d3839c7e48d4a3e8", "score": "0.6684148", "text": "def require_min_version_flag(self):\n return self.__min_version > StrictVersion('1.3')", "title": "" }, { "docid": "ce3ec83e553abfdc4d44f0757e49b8be", "score": "0.6668815", "text": "def get_version():", "title": "" }, { "docid": "b45196af2ca37406657fefb25178cb2f", "score": "0.66624266", "text": "def get_version(self):\n return \"N/A\"", "title": "" }, { "docid": "5337ef9fa17e3062bba8c1072bde98ed", "score": "0.6559681", "text": "def is_valid_version(self):\n return self._is_valid_version()", "title": "" }, { "docid": "1628a98b9e8ed19b6fd05ffe99ffe638", "score": "0.6528207", "text": "def allow_version_upgrade(self) -> pulumi.Output[Optional[bool]]:\n return pulumi.get(self, \"allow_version_upgrade\")", "title": "" }, { "docid": "c37b2d9126053579387f8484a1fa8fce", "score": "0.65234", "text": "def hasVersionAtTime(a_time):", "title": "" }, { "docid": "c37b2d9126053579387f8484a1fa8fce", "score": "0.65234", "text": "def hasVersionAtTime(a_time):", "title": "" }, { "docid": "48e9299f1f1db661ee3032e9c656657f", "score": "0.65220445", "text": "def check_update():\n \n connection = httplib.HTTPConnection(\"hexsec.com\")\n connection.request(\"GET\", \"/VersionCheck/MonkeyFist/current\")\n response = connection.getresponse()\n \n current = response.read()\n \n print(version)\n \n if version < current.rstrip():\n print(\"There is an update available, visit http://hexsec.com/labs for \\\n more details\")\n else:\n print(\"Your Version is Current\")", "title": "" }, { "docid": "12f15a97e772c3326b9c5e80a8dd6c7a", "score": "0.65085655", "text": "def pd_version_23() -> bool:\n return _PD_INSTALLED_VERSION >= _PD_VER_23", "title": "" }, { "docid": "1c6dcae0d9d6d8fc909748144af9bed3", "score": "0.646796", "text": "def version():\n\n return get_version()\n\n # We set_version in update_schedule.py", "title": "" }, { "docid": "7bdab4483a472c94ed4bbb82e58c2cb4", "score": "0.64651984", "text": "def get_version(self):\n return 42", "title": "" }, { "docid": "6d613608749d3d55abbffb93411b6880", "score": "0.6444255", "text": "def version_check():\n print(Fore.BLUE + \"Version 0.1\")", "title": "" }, { "docid": "de722b932d7c59031ed98e51897394cc", "score": "0.64401233", "text": "def check_build_version(self, env):\n \n srcDirTmp = self._name\n if self._source.attribute('module_directory').value :\n srcDirTmp = self._source.attribute('module_directory').value\n \n env.start_build(self._name, srcDirTmp,\n self._build.supports_objdir)\n \n retval = self._build.check_version(env)\n env.end_build()\n return retval", "title": "" }, { "docid": "8633a9a64a0d27cd5ff82f08f72d7812", "score": "0.6437044", "text": "def check_version():\n app_version = parse_version(get_system_spec()['raiden'])\n while True:\n try:\n content = requests.get(LATEST).json()\n # getting the latest release version\n latest_release = parse_version(content['tag_name'])\n # comparing it to the user's application\n if app_version < latest_release:\n msg = \"You're running version {}. The latest version is {}\".format(\n app_version,\n latest_release,\n )\n click.secho(msg, fg='red')\n click.secho(\"It's time to update! Releases: {}\".format(RELEASE_PAGE), fg='red')\n except requests.exceptions.HTTPError as herr:\n click.secho('Error while checking for version', fg='red')\n print(herr)\n except ValueError as verr:\n click.secho('Error while checking the version', fg='red')\n print(verr)\n finally:\n # repeat the process once every 3h\n gevent.sleep(CHECK_VERSION_INTERVAL)", "title": "" }, { "docid": "f09aa4d9901d4bf1d74a283d36031a10", "score": "0.6435579", "text": "def _is_right_ver(cls):\r\n version = cls.get_version()\r\n if version is not None:\r\n int_ver = cls.convert_version_to_int(version)\r\n if int_ver >= 140:\r\n return True\r\n return False", "title": "" }, { "docid": "f09aa4d9901d4bf1d74a283d36031a10", "score": "0.6435579", "text": "def _is_right_ver(cls):\r\n version = cls.get_version()\r\n if version is not None:\r\n int_ver = cls.convert_version_to_int(version)\r\n if int_ver >= 140:\r\n return True\r\n return False", "title": "" }, { "docid": "8dd204eab338aa23ebcac3e61f782ec9", "score": "0.64300394", "text": "def testVersion(self):\n self._testDoCommand(self.ctx, version=3,\n headers=['-h', 'x-goog-if-generation-match:3'])", "title": "" }, { "docid": "c2b2f9eb2e4fc0f53af395826a836cb1", "score": "0.64280474", "text": "def latestVersion():", "title": "" }, { "docid": "c2b2f9eb2e4fc0f53af395826a836cb1", "score": "0.64280474", "text": "def latestVersion():", "title": "" }, { "docid": "27c25b72645707d952b2ce0351819ac5", "score": "0.64207256", "text": "def versionCount():", "title": "" }, { "docid": "27c25b72645707d952b2ce0351819ac5", "score": "0.64207256", "text": "def versionCount():", "title": "" }, { "docid": "9273a488588746ff3da1f82167ff8bd8", "score": "0.64198583", "text": "def _check_nb_check_ver():\n nb_check_path = \"utils/nb_check.py\"\n gh_file = \"\"\n curr_file = \"\"\n try:\n # Bandit warning - fixed https URL\n with request.urlopen(NB_CHECK_URI) as gh_fh: # nosec\n gh_file = gh_fh.read().decode(\"utf-8\")\n except Exception:\n _disp_html(f\"Warning could not check version of {NB_CHECK_URI}\")\n return True\n nbc_path = get_aml_user_folder().joinpath(nb_check_path)\n if nbc_path.is_file():\n try:\n curr_file = nbc_path.read_text()\n except Exception:\n _disp_html(f\"Warning could not check version local {nb_check_path}\")\n\n if _get_file_ver(gh_file) == _get_file_ver(curr_file):\n return True\n\n _disp_html(\"Updating local {nb_check_path}...\")\n bk_up = get_aml_user_folder().joinpath(f\"{nb_check_path}._save_\")\n if bk_up.is_file():\n bk_up.unlink()\n nbc_path.replace(bk_up)\n try:\n with open(nbc_path, \"w\") as repl_fh:\n repl_fh.write(gh_file)\n except Exception:\n bk_up.replace(nbc_path)\n\n _disp_html(\n \"<h4><font color='orange'>\"\n f\"Important: The version of {nb_check_path} has been updated.<br>\"\n \"Please re-run this cell to run the new version.\"\n \"</font></h4>\"\n )\n return False", "title": "" }, { "docid": "df796bb3047b8b9d1635518006fc5b10", "score": "0.64184", "text": "def _check_version(self):\n versionDeferred = self.conn.get_info(\"version\")\n \n #add callbacks\n def on_learned_version(data):\n \"\"\"Handles version response from Tor\"\"\"\n \n #parse the version\n versionLine = data[\"version\"]\n regex = re.compile(\"^(.*)_for_BitBlinder_([0-9\\\\.]+).*$\")\n matches = regex.match(versionLine)\n \n #this means that innomitor is an outdated version\n if not matches:\n GUIController.get().show_msgbox(\"Your version of innomitor is out of date. Please go update it: %s/download/\" % (ProgramState.Conf.BASE_HTTP))\n return\n \n #set the current versions\n self.torVersion = matches.group(1)\n self.innomitorVersion = matches.group(2)\n log_msg(\"InnomiTor version: %s\" % (self.innomitorVersion), 2)\n return self.innomitorVersion\n versionDeferred.addCallback(on_learned_version)\n \n #add errback\n versionDeferred.addErrback(self._silent_tor_errback, \"Failed to get version from Tor\")\n \n return versionDeferred", "title": "" }, { "docid": "fa34079c34fd969cadabd7e6cb67b0ab", "score": "0.6402562", "text": "def can_run_bump_version(self, new_version: str, project_dir: str) -> bool:\n # ensure that the entered version number matches correct format like 1.1.0 or 1.1.0-SNAPSHOT but not 1.2 or 1.2.3.4\n if not re.match(r'(?<!\\.)\\d+(?:\\.\\d+){2}((?!.)|-SNAPSHOT)(?!.)', new_version):\n print('[bold red]Invalid version specified!\\nEnsure your version number has the form '\n 'of 0.0.0 or 15.100.239-SNAPSHOT')\n return False\n\n # ensure the version is bumped within a project created by mlf-core\n elif not Path(f'{project_dir}/mlf_core.cfg').is_file():\n print('[bold red]Did not find a mlf_core.cfg file. Make sure you are in the right directory '\n 'or specify the path to your projects bump_version.cfg file')\n return False\n\n # equal versions won't be accepted for bump-version\n elif new_version == self.CURRENT_VERSION:\n print(f'[bold red]The new version {new_version} cannot be equal to the current version {self.CURRENT_VERSION}.')\n return False\n\n # only allow bump from a SNAPSHOT version to its correspondence with -SNAPSHOT removed (like 1.0.0-SNAPSHOT to 1.0.0 but not 2.0.0)\n elif self.CURRENT_VERSION.endswith('-SNAPSHOT') and not self.CURRENT_VERSION.split('-')[0] == new_version:\n print(f'[bold red]Cannot bump {self.CURRENT_VERSION} to {new_version}.' +\n f'[blue]\\n{self.CURRENT_VERSION} as a SNAPSHOT version can only be bumped to its non-snapshot equivalent '\n f'{self.CURRENT_VERSION.split(\"-\")[0]}.')\n return False\n\n # ensure the new version is greater than the current one, if not the user wants to explicitly downgrade it\n elif not self.downgrade_mode:\n current_version_r = self.CURRENT_VERSION.replace('-SNAPSHOT', '')\n new_version_r = new_version.replace('-SNAPSHOT', '')\n\n # bump from x.x.x to x.x.x-SNAPSHOT should be only allowed when using the downgrade flag\n if new_version.endswith('-SNAPSHOT') and self.CURRENT_VERSION == new_version.split('-')[0]:\n print(f'[bold red]Cannot downgrade {self.CURRENT_VERSION} to its version SNAPSHOT {new_version}.' +\n f'[blue]\\nUse the -d flag if you want to downgrade {self.CURRENT_VERSION} to its SNAPSHOT version.')\n return False\n\n # when the current version and the new version are equal, but one is a -SNAPSHOT version return true\n elif version.parse(current_version_r) == version.parse(new_version_r) and ('-SNAPSHOT' in self.CURRENT_VERSION or '-SNAPSHOT' in new_version):\n return True\n\n # else check if the new version is greater than the current version\n elif version.parse(current_version_r) < version.parse(new_version_r):\n return True\n\n # the new version is not greater than the current one\n print(f'[bold red]The new version {new_version} is not greater than the current version {self.CURRENT_VERSION}.'\n f'\\nThe new version must be greater than the old one.')\n return False\n\n return True", "title": "" }, { "docid": "fdfa2d0bb014ec3f1605a1d2057200b4", "score": "0.6392716", "text": "def get_version(self):", "title": "" }, { "docid": "6b201a32de32fd96e3dda67bae457c20", "score": "0.6388986", "text": "def allow_version_upgrade(self) -> Optional[pulumi.Input[bool]]:\n return pulumi.get(self, \"allow_version_upgrade\")", "title": "" }, { "docid": "cff40e080cda221bdb68c8e2795d3ef7", "score": "0.63815033", "text": "def environment_needs_upgrade(self, db):\n version = self.version()\n self.log.debug(\"Version is %s\" % version)\n return version < len(self.upgrade_steps)", "title": "" }, { "docid": "c468b08f5673f81c3dc3009b48c5807a", "score": "0.63737756", "text": "def auto_minor_version_upgrade(self) -> pulumi.Output[Optional[bool]]:\n return pulumi.get(self, \"auto_minor_version_upgrade\")", "title": "" }, { "docid": "7b891a6b4f24ba84867d7560a5617a85", "score": "0.63717574", "text": "def check_nmcli_version(self):\n exit_code, response = self._send_command([\"--version\"])\n \n if exit_code == 0:\n parts = response.split()\n ver = parts[-1]\n compare = self.vercmp(ver, \"0.9.9.0\")\n if compare >= 0:\n return True\n else: \n raise ValueError(ver)\n return False\n else:\n return False", "title": "" }, { "docid": "234b5d3cc81bc86dd9c7177d4be5804b", "score": "0.63581496", "text": "def is_valid_VERSION(self,x):\n if x is None or x == \"None\":\n return False\n x = str(x).strip()\n try:\n x = int(x)\n except ValueError:\n return False\n return True", "title": "" }, { "docid": "dd26f3d6481c82447150b2bd24015673", "score": "0.6356", "text": "def check_latest_version():\n check = True\n\n with timestamp_file() as f:\n timestamp = float(f.read() or 0)\n delta = time.time() - timestamp\n check = delta > 3600\n\n if check:\n try:\n latest_version = get_latest_final_version()\n except requests.exceptions.RequestException as e:\n click.echo(\"Error checking cci version:\", err=True)\n click.echo(str(e), err=True)\n return\n\n result = latest_version > get_installed_version()\n if result:\n click.echo(\n f\"\"\"An update to CumulusCI is available. To install the update, run this command: {get_cci_upgrade_command()}\"\"\",\n err=True,\n )\n\n if sys.version_info < LOWEST_SUPPORTED_VERSION:\n click.echo(\n \"Sorry! Your Python version is not supported. Please upgrade to Python 3.9.\",\n err=True,\n )", "title": "" }, { "docid": "28eebe5ee19991e0411013a86d4642f5", "score": "0.6351137", "text": "def is_update_needed(self):\n try:\n latest = IndexHistory.objects.filter(\n index__symbol__iexact='.IUX').order_by('-date')[0]\n except IndexError:\n return True\n return latest.date.year < datetime.date.today().year", "title": "" }, { "docid": "0333eb53bb9055f79824a8852ea98a7c", "score": "0.6346155", "text": "def is_valid_version(data):\n return []", "title": "" }, { "docid": "5188f304c529d9f718cb15bc53d1892c", "score": "0.6338518", "text": "def upgrade_available(name, **kwargs):\n return latest_version(name) != \"\"", "title": "" }, { "docid": "452cd3f68ad73f7b89a8bb3d450ee5d6", "score": "0.6333832", "text": "def is_version_min(self, required_version):\r\n return self.version.split('.') >= required_version.split('.')", "title": "" }, { "docid": "3b0174d5004300cb16eefa29f1b9e944", "score": "0.63260585", "text": "def __check_version(self):\n response = misc.server_status(self)\n\n json_response = response.json()\n self.iserver_version = json_response[\"iServerVersion\"][:9]\n self.web_version = json_response[\"webVersion\"][:9]\n\n iserver_version_ok = version.parse(self.iserver_version) >= version.parse(self.__VRCH)\n web_version_ok = version.parse(self.web_version) >= version.parse(self.__VRCH)\n\n return iserver_version_ok and web_version_ok", "title": "" }, { "docid": "b055d31d5028f15c5f7fa8d8f3d933c5", "score": "0.6315993", "text": "def auto_minor_version_upgrade(self) -> Optional[pulumi.Input[bool]]:\n return pulumi.get(self, \"auto_minor_version_upgrade\")", "title": "" }, { "docid": "01d45990c9d021111d9382de7009f816", "score": "0.63042074", "text": "def get_db_outdated(self):\n self.cursor.execute('PRAGMA user_version;')\n return self.cursor.fetchall()[0][0] != pr.pcap_processor.get_db_version()", "title": "" }, { "docid": "ac3384dc04c80b1ea550c6d8ec545802", "score": "0.63037986", "text": "def test_setup_version_too_old(self):\n nx584_client.Client.return_value.get_version.return_value = \"1.0\"\n self._test_assert_graceful_fail({})", "title": "" }, { "docid": "7dc0257ac128aa1e0ccfc532d7a2f8e1", "score": "0.6294026", "text": "def isOriginalVersion(an_id):", "title": "" }, { "docid": "7dc0257ac128aa1e0ccfc532d7a2f8e1", "score": "0.6294026", "text": "def isOriginalVersion(an_id):", "title": "" }, { "docid": "fd24e01ec145d382236d933fbba00be5", "score": "0.6279365", "text": "def test_version_check(self):\n\n options = self._options\n options.version = 99\n self._run_http_fallback_test(options, 400)", "title": "" }, { "docid": "0cc1765954e0aa29e710a855572bbaac", "score": "0.62534076", "text": "def is_prerelease(self) -> bool: # noqa: D400\n m = re.fullmatch(VERSION_REGEX, self.vtext)\n assert m # the version must be a valid version\n return bool(m[\"pre\"])", "title": "" }, { "docid": "cb9a7ed8642c82f7b49093e0d0b1982f", "score": "0.6251545", "text": "def version_check(self):\n\n self.log.debug(\"version_check()\")\n full_url = App.version_url + \\\n \"?s=\" + str(self.defaults['serial']) + \\\n \"&v=\" + str(self.version) + \\\n \"&os=\" + str(self.os) + \\\n \"&\" + urllib.urlencode(self.defaults[\"stats\"])\n App.log.debug(\"Checking for updates @ %s\" % full_url)\n\n ### Get the data\n try:\n f = urllib.urlopen(full_url)\n except:\n # App.log.warning(\"Failed checking for latest version. Could not connect.\")\n self.log.warning(\"Failed checking for latest version. Could not connect.\")\n self.inform.emit(\"[warning] Failed checking for latest version. Could not connect.\")\n return\n\n try:\n data = json.load(f)\n except Exception, e:\n App.log.error(\"Could not parse information about latest version.\")\n self.inform.emit(\"[error] Could not parse information about latest version.\")\n App.log.debug(\"json.load(): %s\" % str(e))\n f.close()\n return\n\n f.close()\n\n ### Latest version?\n if self.version >= data[\"version\"]:\n App.log.debug(\"FlatCAM is up to date!\")\n self.inform.emit(\"[success] FlatCAM is up to date!\")\n return\n\n App.log.debug(\"Newer version available.\")\n self.message.emit(\n \"Newer Version Available\",\n QtCore.QString(\"There is a newer version of FlatCAM \" +\n \"available for download:<br><br>\" +\n \"<B>\" + data[\"name\"] + \"</b><br>\" +\n data[\"message\"].replace(\"\\n\", \"<br>\")),\n \"info\"\n )", "title": "" }, { "docid": "d667deeb96db9cce9022bf5f71b85b7e", "score": "0.62436634", "text": "def e_version(self) -> int:\n ...", "title": "" }, { "docid": "e0448c2aa1ea4e95f8717a906a622bf5", "score": "0.62391645", "text": "def module_version(self, c, e, msg, match):\n if not len(match.group(1)): # Ug.\n return\n mod = match.group(1)\n if not mod in isla.bot.isla.mods:\n self.reply(c,e,\"No module by that name found.\")\n return\n module = isla.bot.isla.mods[mod]\n if '__version__' in dir(module):\n self.reply(c,e,\"{module} {version}\".format(module=mod, version=module.__version__))\n else:\n self.reply(c,e,\"{module} has no version information.\".format(module=mod))", "title": "" }, { "docid": "7d48a13f8c0c1db612d6e97007b72432", "score": "0.62355757", "text": "def is_valid_VERSION(self,x):\n if x is None or x == \"None\":\n return False\n x = str(x).strip()\n try:\n x = int(x)\n except ValueError:\n return False\n return (00000 <= x <= 99999)", "title": "" }, { "docid": "7d48a13f8c0c1db612d6e97007b72432", "score": "0.62355757", "text": "def is_valid_VERSION(self,x):\n if x is None or x == \"None\":\n return False\n x = str(x).strip()\n try:\n x = int(x)\n except ValueError:\n return False\n return (00000 <= x <= 99999)", "title": "" }, { "docid": "095dd2aff2350973c54ffdcce28dd641", "score": "0.62338436", "text": "def _check_version_string(self, jira):\n if not self._filters[\"version\"]:\n return False\n\n field = jira.parent.field_id_map['Fix Version/s']\n for ver in jira.fields[field]:\n found = re.match(r'^((\\d+)(\\.\\d+)*).*$|^(\\w+\\-\\d+)$', ver['name'])\n if not found:\n return True\n return False", "title": "" }, { "docid": "54ede75cd0929729418e76b95feeaf3e", "score": "0.62333727", "text": "def needs_update_or_installation(cls) -> bool:\n return False", "title": "" }, { "docid": "c710e259d4697f89c6e7d7ff67ba177d", "score": "0.6228911", "text": "def _check_build_version(self, version: str = __version__, build_version: str = None) -> None:\r\n\r\n # Override the build version for testing.\r\n if build_version is not None:\r\n self._tdw_version = build_version\r\n print(f\"Build version {self._tdw_version}\\nUnity Engine {self._unity_version}\\n\"\r\n f\"Python tdw module version {version}\")", "title": "" }, { "docid": "16f6c16e5a937d1c7edcc1c3a27d8ad5", "score": "0.6228408", "text": "def check_updates():\n url = 'http://joaomatosf.com/rnp/releases.txt'\n print(BLUE + \" * Checking for updates in: %s **\" % url + ENDC)\n header = {\"User-Agent\": \"Checking for updates\"}\n r = pool.request('GET', url, redirect=False, headers=header)\n\n if r.status != 200:\n print(RED + \" * Error: could not check for updates ...\" + ENDC)\n return False\n else:\n current_version = __version\n link = 'https://github.com/joaomatosf/jexboss/archive/master.zip'\n date_last_version = ''\n notes = []\n # search for new versions\n resp = str(r.data).replace('\\\\n','\\n')\n for line in resp.split('\\n'):\n if \"#\" in line:\n continue\n if 'last_version' in line:\n last_version = line.split()[1]\n elif 'date:' in line:\n date_last_version = line.split()[1]\n elif 'link:' in line:\n link = line\n elif '* ' in line:\n notes.append(line)\n elif 'version:' in line and 'last_' not in line:\n break\n # compare last_version with current version\n tup = lambda x: [int(y) for y in (x + '.0.0.0').split('.')][:3]\n if tup(last_version) > tup(current_version):\n print (\n GREEN + BOLD + \"\\n * NEW VERSION AVAILABLE: JexBoss v%s (%s)\\n\" % (last_version, date_last_version) + ENDC +\n GREEN + \" * Link: %s\\n\" % link +\n GREEN + \" * Release notes:\")\n for note in notes:\n print (\" %s\" % note)\n return True\n else:\n return False", "title": "" }, { "docid": "10652fc7de144ddfaef804e0d1e24f9e", "score": "0.6223557", "text": "def _exceeds_version(self, major, minor, v_major, v_minor):\r\n return (major > v_major or (major == v_major and minor > v_minor))", "title": "" }, { "docid": "f4338a8b2fd7b0cab2469411926bb8e1", "score": "0.62192154", "text": "def available_version(self):\r\n return self._data.get(\"version\")", "title": "" }, { "docid": "2f008bb7d60a4c360d9e0012377f1249", "score": "0.62178296", "text": "def check_version_increment(old_version, new_version):\n old_version_tuple = _get_version_tuple(old_version)\n new_version_tuple = _get_version_tuple(new_version)\n\n if new_version_tuple is None:\n print_warning(\"The format of version '%s' is not valid. It should be\"\n \" in the form vX.Y.Z or vX.Y.Z-ABCD\" % new_version)\n return None\n\n old_major, old_minor, old_patch, old_name = old_version_tuple\n new_major, new_minor, new_patch, new_name = new_version_tuple\n\n if (new_major == old_major + 1 and\n new_minor == 0 and\n new_patch == 0):\n return \"Major version increment\"\n\n if (new_major == old_major and\n new_minor == old_minor + 1 and\n new_patch == 0):\n return \"Minor version increment\"\n\n if (new_major == old_major and\n new_minor == old_minor and\n new_patch == old_patch + 1):\n return \"Patch update\"\n\n if (new_major == old_major and\n new_minor == old_minor and\n new_patch == old_patch and\n new_name != old_name):\n return \"Development update\"\n\n print_warning(\"The version increment is not valid. Expecting a single \"\n \"increment of major, minor or patch.\")\n return None", "title": "" }, { "docid": "c035b95131d2e4746b13df6938265ad4", "score": "0.6210707", "text": "def check_update_available(self):\n # Don't perform any check for development versions\n if 'dev' in self.version:\n return (False, self.latest_release)\n\n # Filter releases\n if is_stable_version(self.version):\n releases = [r for r in self.releases if is_stable_version(r)]\n else:\n releases = [r for r in self.releases\n if not is_stable_version(r) or r in self.version]\n\n latest_release = releases[-1]\n\n return (check_version(self.version, latest_release, '<'),\n latest_release)", "title": "" }, { "docid": "e0dd865888a5cf4e3050f709967bd1d4", "score": "0.62005496", "text": "def version(self, now):\n self.last_version = now\n\n version_key = f'version:{self.node[0]}-{self.node[1]}'\n version_data = self.redis_conn.get(version_key)\n\n if version_data is None:\n return True\n\n version, user_agent, services = eval(version_data)\n if all([version, user_agent, services]):\n data = self.node + (\n version,\n user_agent,\n self.start_time,\n services)\n\n if self.data != data:\n self.redis_conn.srem('opendata', str(self.data))\n self.redis_conn.sadd('opendata', str(data))\n self.data = data\n\n return True", "title": "" }, { "docid": "530eb6aa532962fef67c71f232a27cd9", "score": "0.61976093", "text": "def test_get_version(self):\n pass", "title": "" }, { "docid": "530eb6aa532962fef67c71f232a27cd9", "score": "0.61976093", "text": "def test_get_version(self):\n pass", "title": "" }, { "docid": "587e096508eb9b48e6057da34f776f84", "score": "0.6186247", "text": "def is_usable_version(confbytes: bytes) -> bool:\n version = decode_response_version_from_config(confbytes)\n v_parts = version.split('.')\n ver = list(map(int, v_parts))\n if ver[0] != 1 or ver[1] < 2 or ver[2] < 4:\n return False\n return True", "title": "" }, { "docid": "81915723a7830a94fc6f81cbbd2f260b", "score": "0.61855143", "text": "def checkVersion(data):\n\n # Determine URL\n if data['release']:\n url_pattern = \"http://packages.osrfoundation.org/gazebo/$os_name-$release/dists/$os_code_name/main/binary-$arch/Packages\"\n else:\n url_pattern = \"http://packages.osrfoundation.org/gazebo/$os_name/dists/$os_code_name/main/binary-$arch/Packages\"\n urlTemplate = string.Template(url_pattern)\n url = urlTemplate.substitute(data)\n\n # Download package index\n req = urllib.request.Request(url)\n with urllib.request.urlopen(req) as response:\n package_index = response.read().decode('utf-8')\n\n # Determine seach pattern\n patternTemplate = string.Template(r'(\\bPackage: gazebo$version\\n)(.*\\n)')\n pattern_raw = patternTemplate.substitute(data)\n pattern = re.compile(pattern_raw)\n\n # Parse for version_number\n matchs = re.search(pattern, package_index)\n version_line = matchs.groups(0)[1] # Grab the second line of the first match\n version_number = re.search(r'\\d(?!Version\\:\\s)(.+)(?=(~\\w+\\n))', version_line).group(0) # extract version_number\n\n # Update the version_number\n data['package_version'] = version_number", "title": "" }, { "docid": "1f780f3856acb08d5ae3964d7bb86d12", "score": "0.6176475", "text": "def replace_version(self, other, logger):\n\n if other.library_name != self.library_name:\n logger.debug(\n 'not replaceable: {} != {} ()'\n .format(other.library_name, self.library_name, other.filename)\n )\n return False\n elif int(other.major_version) != int(self.major_version):\n logger.debug(\n 'not replaceable: {} != {} ({})'\n .format(\n int(self.major_version),\n int(other.major_version),\n other.filename,\n )\n )\n return False\n elif float(other.minor_version) > float(self.minor_version):\n logger.debug(\n 'not replaceable: {} > {} ({})'\n .format(\n other.minor_version,\n self.minor_version,\n other.filename,\n )\n )\n return False\n elif float(other.minor_version) == float(self.minor_version):\n if other.is_dev and self.is_dev:\n if int(other.dev_version) >= int(self.dev_version):\n # do not replace 1.0.0.dev1 with 1.0.0.dev0 or 1.0.0.dev1\n return False\n else:\n return True\n elif other.is_dev and not self.is_dev:\n # replace 1.0.0.dev1 with 1.0.0\n return True\n elif not other.is_dev and self.is_dev:\n # do not replace 1.0.0 with 1.0.0.dev1\n return False\n else: # both are not dev\n # do not replace 1.0.0 with 1.0.0\n return False\n else:\n return True", "title": "" }, { "docid": "8b8253c6b4d2055f532cf0c6ad4864ef", "score": "0.6170752", "text": "def get_version_counter_recoverable():", "title": "" }, { "docid": "5308a939d9122fb5e96378bc55ecba10", "score": "0.6158673", "text": "def version(self):", "title": "" }, { "docid": "5308a939d9122fb5e96378bc55ecba10", "score": "0.6158673", "text": "def version(self):", "title": "" }, { "docid": "5308a939d9122fb5e96378bc55ecba10", "score": "0.6158673", "text": "def version(self):", "title": "" }, { "docid": "65d2dc8d149c395ef4d00c51193c050b", "score": "0.6130099", "text": "def _CheckFormatVersion(self, line):\n return line.endswith(' 10.00')", "title": "" }, { "docid": "30a5da3b9ab46716cb16bc5fd1582feb", "score": "0.6125782", "text": "def _CheckFormatVersion(self, line):\n return line.endswith(' 11.00')", "title": "" }, { "docid": "163b19dae68f38d0d0a5fdb7a4b590db", "score": "0.6115315", "text": "def check_version():\n # log player versio\n logger.info(\"Dakara feeder %s (%s)\", __version__, __date__)\n\n # check version is a release\n version = parse_version(__version__)\n if version.is_prerelease:\n logger.warning(\"You are running a dev version, use it at your own risks!\")", "title": "" }, { "docid": "5e44889225aebb8f037b71fa4951a1e5", "score": "0.6109636", "text": "def version_meet_req(version, minimum_version):\n checking = packaging.version.parse(version)\n minimum = packaging.version.parse(minimum_version)\n return checking >= minimum", "title": "" }, { "docid": "a8078ee9d01d3a6e6074db6cc046d0a1", "score": "0.6105087", "text": "def check_version(version: str):\n code_version = parse_version(__version__)\n given_version = parse_version(version)\n if given_version[0] == '3' and given_version[1] == '0':\n logger.info(f\"Code version: {__version__}\")\n logger.warning(f\"Given release version ({version}) does not match code version ({__version__}). \"\n f\"Models with version {version} should be compatible though.\")\n return\n check_condition(code_version[0] == given_version[0],\n \"Given release version (%s) does not match release code version (%s)\" % (version, __version__))\n check_condition(code_version[1] == given_version[1],\n \"Given major version (%s) does not match major code version (%s)\" % (version, __version__))", "title": "" }, { "docid": "bd6e39ba4dc2a781ec677310b364a1d7", "score": "0.6103702", "text": "async def check_node_version(self):\n protocol_version = self.connection_status.meta.get(\n \"protocol_version\", None\n )\n\n if protocol_version:\n return True\n\n result = await self.do_json_post(\n self.rpc_url,\n params={\n \"action\": \"version\"\n }\n )\n\n protocol_version = int(result[\"protocol_version\"])\n\n # Cache the version so that later calls don't cause RPC requests\n self.connection_status.meta[\"protocol_version\"] = protocol_version\n\n if protocol_version < self.REQUIRED_PROTOCOL_VERSION:\n raise UnsupportedProtocolVersion(\n required_version=self.REQUIRED_PROTOCOL_VERSION,\n current_version=protocol_version\n )", "title": "" }, { "docid": "084c83d8a1a9798b21996bc40cb26831", "score": "0.6103308", "text": "def _is_known(version):\n return version in iati.version.STANDARD_VERSIONS", "title": "" }, { "docid": "c8b0d9bab92ac93bb1a382002e7288e0", "score": "0.61032885", "text": "def check_sw_version(self):\n namespace_ = {'sw': 'http://siklu.com/yang/tg/system'}\n xpath_ = 'sw:state/sw:banks-info/sw:banks/sw:software-version/text()'\n\n software_info = self.connection.get_command('<filter xmlns:tu=\"http://siklu.com/yang/tg/system\" '\n 'select=\"/tu:system/tu:state/tu:banks-info/tu:banks\" '\n 'type=\"xpath\"/>')\n software_versions = software_info.data_ele.getchildren()[0]\n\n versions = software_versions.xpath(xpath_, namespaces=namespace_)\n\n xpath_running = 'sw:state/sw:banks-info/sw:banks/sw:status/text()'\n running = software_versions.xpath(xpath_running, namespaces=namespace_)\n\n running_software = zip(versions, running)\n\n for version, running_ in running_software:\n version = version.replace('\"', '')\n if version > '1.0.1-1699-0240b7b6' and running_ == 'active':\n return True\n elif version > '1.0.1-1699-0240b7b6':\n self.tu_logs.emit('Version 1.0.2 detected but not active. Please activate in cli using:\\n'\n 'software activate scheduling immediate')\n return False\n else:\n self.tu_logs.emit('Please upgrade to at least version 1.0.2')\n return False", "title": "" }, { "docid": "8b4ae79f78e66dc1714d713ba9d64477", "score": "0.6093249", "text": "def test_version_number_match_with_changelog():\n changelog = open(os.path.join(_REPO_DIR, 'CHANGELOG.md')).read()\n version_in_changelog = (\n re.search(r'##\\s+(\\d+\\.\\d+\\.\\d+)', changelog).groups()[0])\n assert civis.__version__ == version_in_changelog, (\n 'Make sure both __version__ and CHANGELOG are updated to match the '\n 'latest version number')", "title": "" }, { "docid": "3eeca728c8f7674cebffc4dbf7a316c3", "score": "0.6091208", "text": "def validate_version():\n import camacq # pylint: disable=import-outside-toplevel\n\n version_string = camacq.__version__\n versions = version_string.split(\".\", 3)\n try:\n for ver in versions:\n int(ver)\n except ValueError:\n print(\n \"Only integers are allowed in release version, \"\n f\"please adjust current version {version_string}\"\n )\n return None\n return version_string", "title": "" }, { "docid": "1587e3e8786060400aaf0a3ec23b17eb", "score": "0.6086964", "text": "def versioning(self):\n\n if not hasattr(self, \"_versioning\"):\n self._versioning = self.__opts.get(\"versioning\")\n if not self._versioning:\n if self.id:\n query = Query(\"doc_type\", \"versioning\")\n query.where(query.Condition(\"id\", self.id))\n rows = query.execute(self.cursor).fetchall()\n self._versioning = rows[0].versioning if rows else \"Y\"\n else:\n self._versioning = \"Y\"\n assert self._versioning in \"YN\", \"invalid doctype versioning value\"\n return self._versioning", "title": "" }, { "docid": "f6c75634206bb1b3fd7e88ba35bc9ba8", "score": "0.60670376", "text": "def upgrade_available(name):\n version_num = None\n cmd = \"/opt/csw/bin/pkgutil -c --parse --single {}\".format(name)\n out = __salt__[\"cmd.run_stdout\"](cmd)\n if out:\n version_num = out.split()[2].strip()\n if version_num:\n if version_num == \"SAME\":\n return \"\"\n else:\n return version_num\n return \"\"", "title": "" }, { "docid": "325d9c23a6c7d6df57ccd825f320e169", "score": "0.6064693", "text": "def version(self, version):\n pass", "title": "" }, { "docid": "6d1919088ccf89edd1bf7b53740acaba", "score": "0.6052918", "text": "def has_recent_glibc() -> bool:\n try:\n out = subprocess.check_output([\"ldd\", \"--version\"]).decode(\"ascii\")\n version_str = re.search(\" (\\d\\.\\d+)\\n\", out).group(1)\n major, minor = version_str.split(\".\")\n except (OSError, AttributeError):\n pass\n else:\n if int(major) == 2 and int(minor) >= 17:\n return True\n\n return False", "title": "" }, { "docid": "34ea6de306ec2071449ba5f2692841bb", "score": "0.60501397", "text": "def test_version(self) -> None:\n regex = re.compile(r\"\\d+\\.\\d+(\\.\\d+)?\")\n self.assertTrue(regex.match(dcor.__version__))\n self.assertNotEqual(dcor.__version__, \"0.0\")", "title": "" }, { "docid": "01bd60b4c71199689101ecc6c3574c5a", "score": "0.60497224", "text": "def version_status(self):\n return self._version_status", "title": "" }, { "docid": "e41f121314c24f02701d067a85fb0b92", "score": "0.6046596", "text": "def is_final_release(version: str) -> bool:\n return bool(FINAL_VERSION_RE.match(version))", "title": "" } ]
c09532df2a7eb5de34c61599cbdfff3a
The ARN of the bucket.
[ { "docid": "0833f505a89d3e8f24c57e43b09f9b4b", "score": "0.85899574", "text": "def bucket_arn(self) -> str:\n return jsii.get(self, \"bucketArn\")", "title": "" } ]
[ { "docid": "cd16b59e48c62bc36c6a993f3486202c", "score": "0.8556041", "text": "def bucket_arn(self) -> str:\n ...", "title": "" }, { "docid": "1f02024d39e89f53e5c0393567499f43", "score": "0.8067578", "text": "def bucket_arn(self) -> pulumi.Input[str]:\n return pulumi.get(self, \"bucket_arn\")", "title": "" }, { "docid": "1f02024d39e89f53e5c0393567499f43", "score": "0.8067578", "text": "def bucket_arn(self) -> pulumi.Input[str]:\n return pulumi.get(self, \"bucket_arn\")", "title": "" }, { "docid": "c23b78ab9a1dc481f2854412dc40b3bd", "score": "0.7861779", "text": "def bucket_arn(self) -> typing.Optional[str]:\n return self._values.get('bucket_arn')", "title": "" }, { "docid": "39f0985c5a6fb74849ea8ca7353ffbbb", "score": "0.6985468", "text": "def arn(self) -> str:\n return pulumi.get(self, \"arn\")", "title": "" }, { "docid": "39f0985c5a6fb74849ea8ca7353ffbbb", "score": "0.6985468", "text": "def arn(self) -> str:\n return pulumi.get(self, \"arn\")", "title": "" }, { "docid": "39f0985c5a6fb74849ea8ca7353ffbbb", "score": "0.6985468", "text": "def arn(self) -> str:\n return pulumi.get(self, \"arn\")", "title": "" }, { "docid": "39f0985c5a6fb74849ea8ca7353ffbbb", "score": "0.6985468", "text": "def arn(self) -> str:\n return pulumi.get(self, \"arn\")", "title": "" }, { "docid": "39f0985c5a6fb74849ea8ca7353ffbbb", "score": "0.6985468", "text": "def arn(self) -> str:\n return pulumi.get(self, \"arn\")", "title": "" }, { "docid": "610fa01540cf9817dff4277a5da7db6e", "score": "0.68515044", "text": "def arn(self) -> pulumi.Output[str]:\n return pulumi.get(self, \"arn\")", "title": "" }, { "docid": "610fa01540cf9817dff4277a5da7db6e", "score": "0.68515044", "text": "def arn(self) -> pulumi.Output[str]:\n return pulumi.get(self, \"arn\")", "title": "" }, { "docid": "610fa01540cf9817dff4277a5da7db6e", "score": "0.68515044", "text": "def arn(self) -> pulumi.Output[str]:\n return pulumi.get(self, \"arn\")", "title": "" }, { "docid": "610fa01540cf9817dff4277a5da7db6e", "score": "0.68515044", "text": "def arn(self) -> pulumi.Output[str]:\n return pulumi.get(self, \"arn\")", "title": "" }, { "docid": "610fa01540cf9817dff4277a5da7db6e", "score": "0.68515044", "text": "def arn(self) -> pulumi.Output[str]:\n return pulumi.get(self, \"arn\")", "title": "" }, { "docid": "610fa01540cf9817dff4277a5da7db6e", "score": "0.68515044", "text": "def arn(self) -> pulumi.Output[str]:\n return pulumi.get(self, \"arn\")", "title": "" }, { "docid": "610fa01540cf9817dff4277a5da7db6e", "score": "0.68515044", "text": "def arn(self) -> pulumi.Output[str]:\n return pulumi.get(self, \"arn\")", "title": "" }, { "docid": "610fa01540cf9817dff4277a5da7db6e", "score": "0.68515044", "text": "def arn(self) -> pulumi.Output[str]:\n return pulumi.get(self, \"arn\")", "title": "" }, { "docid": "610fa01540cf9817dff4277a5da7db6e", "score": "0.68515044", "text": "def arn(self) -> pulumi.Output[str]:\n return pulumi.get(self, \"arn\")", "title": "" }, { "docid": "610fa01540cf9817dff4277a5da7db6e", "score": "0.68515044", "text": "def arn(self) -> pulumi.Output[str]:\n return pulumi.get(self, \"arn\")", "title": "" }, { "docid": "610fa01540cf9817dff4277a5da7db6e", "score": "0.68515044", "text": "def arn(self) -> pulumi.Output[str]:\n return pulumi.get(self, \"arn\")", "title": "" }, { "docid": "610fa01540cf9817dff4277a5da7db6e", "score": "0.68515044", "text": "def arn(self) -> pulumi.Output[str]:\n return pulumi.get(self, \"arn\")", "title": "" }, { "docid": "610fa01540cf9817dff4277a5da7db6e", "score": "0.68515044", "text": "def arn(self) -> pulumi.Output[str]:\n return pulumi.get(self, \"arn\")", "title": "" }, { "docid": "610fa01540cf9817dff4277a5da7db6e", "score": "0.68515044", "text": "def arn(self) -> pulumi.Output[str]:\n return pulumi.get(self, \"arn\")", "title": "" }, { "docid": "610fa01540cf9817dff4277a5da7db6e", "score": "0.68515044", "text": "def arn(self) -> pulumi.Output[str]:\n return pulumi.get(self, \"arn\")", "title": "" }, { "docid": "610fa01540cf9817dff4277a5da7db6e", "score": "0.68515044", "text": "def arn(self) -> pulumi.Output[str]:\n return pulumi.get(self, \"arn\")", "title": "" }, { "docid": "610fa01540cf9817dff4277a5da7db6e", "score": "0.68515044", "text": "def arn(self) -> pulumi.Output[str]:\n return pulumi.get(self, \"arn\")", "title": "" }, { "docid": "610fa01540cf9817dff4277a5da7db6e", "score": "0.68515044", "text": "def arn(self) -> pulumi.Output[str]:\n return pulumi.get(self, \"arn\")", "title": "" }, { "docid": "610fa01540cf9817dff4277a5da7db6e", "score": "0.68515044", "text": "def arn(self) -> pulumi.Output[str]:\n return pulumi.get(self, \"arn\")", "title": "" }, { "docid": "610fa01540cf9817dff4277a5da7db6e", "score": "0.68515044", "text": "def arn(self) -> pulumi.Output[str]:\n return pulumi.get(self, \"arn\")", "title": "" }, { "docid": "610fa01540cf9817dff4277a5da7db6e", "score": "0.68515044", "text": "def arn(self) -> pulumi.Output[str]:\n return pulumi.get(self, \"arn\")", "title": "" }, { "docid": "610fa01540cf9817dff4277a5da7db6e", "score": "0.68515044", "text": "def arn(self) -> pulumi.Output[str]:\n return pulumi.get(self, \"arn\")", "title": "" }, { "docid": "610fa01540cf9817dff4277a5da7db6e", "score": "0.68515044", "text": "def arn(self) -> pulumi.Output[str]:\n return pulumi.get(self, \"arn\")", "title": "" }, { "docid": "610fa01540cf9817dff4277a5da7db6e", "score": "0.68515044", "text": "def arn(self) -> pulumi.Output[str]:\n return pulumi.get(self, \"arn\")", "title": "" }, { "docid": "610fa01540cf9817dff4277a5da7db6e", "score": "0.68515044", "text": "def arn(self) -> pulumi.Output[str]:\n return pulumi.get(self, \"arn\")", "title": "" }, { "docid": "fb8f62cc10478e17f239e2292cbcb2ff", "score": "0.6779947", "text": "def s3_bucket_name(self):\n return self._get_param(\"ResourcesS3Bucket\")", "title": "" }, { "docid": "6fde1709a8581da24cb595e8d9586d7f", "score": "0.67742294", "text": "def arn(self) -> str:\n return self._values.get('arn')", "title": "" }, { "docid": "6fde1709a8581da24cb595e8d9586d7f", "score": "0.67742294", "text": "def arn(self) -> str:\n return self._values.get('arn')", "title": "" }, { "docid": "ee0f01fc889019281dfa0d51d7b3712d", "score": "0.67332757", "text": "def arn(self) -> Optional[str]:\n return pulumi.get(self, \"arn\")", "title": "" }, { "docid": "ee0f01fc889019281dfa0d51d7b3712d", "score": "0.67332757", "text": "def arn(self) -> Optional[str]:\n return pulumi.get(self, \"arn\")", "title": "" }, { "docid": "ee0f01fc889019281dfa0d51d7b3712d", "score": "0.67332757", "text": "def arn(self) -> Optional[str]:\n return pulumi.get(self, \"arn\")", "title": "" }, { "docid": "854c5101a726b2f52e51e4cab8c0acb0", "score": "0.67066497", "text": "def bucket_name(self) -> str:\n ...", "title": "" }, { "docid": "2be1a6d31733bb861f483f0cf491d086", "score": "0.6564975", "text": "def bucket_name(self) -> str:\n return jsii.get(self, \"bucketName\")", "title": "" }, { "docid": "2be1a6d31733bb861f483f0cf491d086", "score": "0.6564975", "text": "def bucket_name(self) -> str:\n return jsii.get(self, \"bucketName\")", "title": "" }, { "docid": "7b8900ba1ac980a2af44d8d3d733cb14", "score": "0.6561011", "text": "def bucket_name(self) -> str:\n return pulumi.get(self, \"bucket_name\")", "title": "" }, { "docid": "ebea490968e10316c089364634e9821f", "score": "0.65415645", "text": "def arn(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"arn\")", "title": "" }, { "docid": "ebea490968e10316c089364634e9821f", "score": "0.65415645", "text": "def arn(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"arn\")", "title": "" }, { "docid": "ebea490968e10316c089364634e9821f", "score": "0.65415645", "text": "def arn(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"arn\")", "title": "" }, { "docid": "ebea490968e10316c089364634e9821f", "score": "0.65415645", "text": "def arn(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"arn\")", "title": "" }, { "docid": "ebea490968e10316c089364634e9821f", "score": "0.65415645", "text": "def arn(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"arn\")", "title": "" }, { "docid": "ebea490968e10316c089364634e9821f", "score": "0.65415645", "text": "def arn(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"arn\")", "title": "" }, { "docid": "ebea490968e10316c089364634e9821f", "score": "0.65415645", "text": "def arn(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"arn\")", "title": "" }, { "docid": "ebea490968e10316c089364634e9821f", "score": "0.65415645", "text": "def arn(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"arn\")", "title": "" }, { "docid": "ebea490968e10316c089364634e9821f", "score": "0.65415645", "text": "def arn(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"arn\")", "title": "" }, { "docid": "ebea490968e10316c089364634e9821f", "score": "0.65415645", "text": "def arn(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"arn\")", "title": "" }, { "docid": "ebea490968e10316c089364634e9821f", "score": "0.65415645", "text": "def arn(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"arn\")", "title": "" }, { "docid": "ebea490968e10316c089364634e9821f", "score": "0.65415645", "text": "def arn(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"arn\")", "title": "" }, { "docid": "ebea490968e10316c089364634e9821f", "score": "0.65415645", "text": "def arn(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"arn\")", "title": "" }, { "docid": "ebea490968e10316c089364634e9821f", "score": "0.65415645", "text": "def arn(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"arn\")", "title": "" }, { "docid": "ebea490968e10316c089364634e9821f", "score": "0.65415645", "text": "def arn(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"arn\")", "title": "" }, { "docid": "ebea490968e10316c089364634e9821f", "score": "0.65415645", "text": "def arn(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"arn\")", "title": "" }, { "docid": "ebea490968e10316c089364634e9821f", "score": "0.65415645", "text": "def arn(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"arn\")", "title": "" }, { "docid": "ebea490968e10316c089364634e9821f", "score": "0.65415645", "text": "def arn(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"arn\")", "title": "" }, { "docid": "ebea490968e10316c089364634e9821f", "score": "0.65415645", "text": "def arn(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"arn\")", "title": "" }, { "docid": "ebea490968e10316c089364634e9821f", "score": "0.65415645", "text": "def arn(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"arn\")", "title": "" }, { "docid": "ebea490968e10316c089364634e9821f", "score": "0.65415645", "text": "def arn(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"arn\")", "title": "" }, { "docid": "ebea490968e10316c089364634e9821f", "score": "0.65415645", "text": "def arn(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"arn\")", "title": "" }, { "docid": "0f6885dc67b00d39a7a2a8a8035836f3", "score": "0.65278906", "text": "def bucket(self) -> pulumi.Output[str]:\n return pulumi.get(self, \"bucket\")", "title": "" }, { "docid": "0f6885dc67b00d39a7a2a8a8035836f3", "score": "0.65278906", "text": "def bucket(self) -> pulumi.Output[str]:\n return pulumi.get(self, \"bucket\")", "title": "" }, { "docid": "ffb349be189a89c523d8066d0c1be56b", "score": "0.64859205", "text": "def arn(self) -> pulumi.Input[str]:\n return pulumi.get(self, \"arn\")", "title": "" }, { "docid": "decb006fa6f9db2943ef1284ca3d8da2", "score": "0.647265", "text": "def bucket_name(self) -> str:\n return self._values.get('bucket_name')", "title": "" }, { "docid": "15d18ffd79ec58bb5e2d2fcdcb35a594", "score": "0.6394353", "text": "def s3_bucket(self) -> pulumi.Input[str]:\n return pulumi.get(self, \"s3_bucket\")", "title": "" }, { "docid": "6ad79040d7f6d3389bd079c73a901ddf", "score": "0.62917197", "text": "def bucket_name(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"bucket_name\")", "title": "" }, { "docid": "6ad79040d7f6d3389bd079c73a901ddf", "score": "0.62917197", "text": "def bucket_name(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"bucket_name\")", "title": "" }, { "docid": "6ad79040d7f6d3389bd079c73a901ddf", "score": "0.62917197", "text": "def bucket_name(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"bucket_name\")", "title": "" }, { "docid": "6ad79040d7f6d3389bd079c73a901ddf", "score": "0.62917197", "text": "def bucket_name(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"bucket_name\")", "title": "" }, { "docid": "6ad79040d7f6d3389bd079c73a901ddf", "score": "0.62917197", "text": "def bucket_name(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"bucket_name\")", "title": "" }, { "docid": "6ad79040d7f6d3389bd079c73a901ddf", "score": "0.62917197", "text": "def bucket_name(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"bucket_name\")", "title": "" }, { "docid": "6ad79040d7f6d3389bd079c73a901ddf", "score": "0.62917197", "text": "def bucket_name(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"bucket_name\")", "title": "" }, { "docid": "6ad79040d7f6d3389bd079c73a901ddf", "score": "0.62917197", "text": "def bucket_name(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"bucket_name\")", "title": "" }, { "docid": "6ad79040d7f6d3389bd079c73a901ddf", "score": "0.62917197", "text": "def bucket_name(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"bucket_name\")", "title": "" }, { "docid": "6ad79040d7f6d3389bd079c73a901ddf", "score": "0.62917197", "text": "def bucket_name(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"bucket_name\")", "title": "" }, { "docid": "61cd46590113596ddfae3f6bc06686ca", "score": "0.62346625", "text": "def bucket_name(self) -> pulumi.Input[str]:\n return pulumi.get(self, \"bucket_name\")", "title": "" }, { "docid": "61cd46590113596ddfae3f6bc06686ca", "score": "0.62346625", "text": "def bucket_name(self) -> pulumi.Input[str]:\n return pulumi.get(self, \"bucket_name\")", "title": "" }, { "docid": "61cd46590113596ddfae3f6bc06686ca", "score": "0.62346625", "text": "def bucket_name(self) -> pulumi.Input[str]:\n return pulumi.get(self, \"bucket_name\")", "title": "" }, { "docid": "61cd46590113596ddfae3f6bc06686ca", "score": "0.62346625", "text": "def bucket_name(self) -> pulumi.Input[str]:\n return pulumi.get(self, \"bucket_name\")", "title": "" }, { "docid": "61cd46590113596ddfae3f6bc06686ca", "score": "0.62346625", "text": "def bucket_name(self) -> pulumi.Input[str]:\n return pulumi.get(self, \"bucket_name\")", "title": "" }, { "docid": "7927ab6d1343bdcc94343f477724b6b3", "score": "0.6232426", "text": "def bucket_name(self) -> typing.Optional[str]:\n return self._values.get('bucket_name')", "title": "" }, { "docid": "7927ab6d1343bdcc94343f477724b6b3", "score": "0.6232426", "text": "def bucket_name(self) -> typing.Optional[str]:\n return self._values.get('bucket_name')", "title": "" }, { "docid": "172e06dc5a517eecb40c54225ee0ddfe", "score": "0.61402607", "text": "def account_role_arn(self, role, partition='aws'):\n if not role or role.startswith(\"arn:aws\"):\n return role\n if not role.startswith(\"role/\"):\n role = \"role/\" + role\n return \"arn:{0}:iam::{1}:{2}\".format(partition, self.account_id, role)", "title": "" }, { "docid": "7a2568ba84cd22dd82b537b3834c8132", "score": "0.6125123", "text": "def get_bucket_acl(Bucket=None):\n pass", "title": "" }, { "docid": "049eeaf7616ad89a6dbd84cc1fc14b16", "score": "0.61024684", "text": "def resource_arn(self) -> pulumi.Output[str]:\n return pulumi.get(self, \"resource_arn\")", "title": "" }, { "docid": "a0f1d4c4db411c752446bfdce5e197f3", "score": "0.60895294", "text": "def role_arn(self) -> str:\n return pulumi.get(self, \"role_arn\")", "title": "" }, { "docid": "f065718e60c8a0efd26fd81ff89deac3", "score": "0.60258895", "text": "def bucket(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"bucket\")", "title": "" }, { "docid": "f065718e60c8a0efd26fd81ff89deac3", "score": "0.60258895", "text": "def bucket(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"bucket\")", "title": "" }, { "docid": "32bcdb92495502b2da0402bf53e972ae", "score": "0.60143524", "text": "def bucket(self) -> pulumi.Input[str]:\n return pulumi.get(self, \"bucket\")", "title": "" }, { "docid": "32bcdb92495502b2da0402bf53e972ae", "score": "0.60143524", "text": "def bucket(self) -> pulumi.Input[str]:\n return pulumi.get(self, \"bucket\")", "title": "" }, { "docid": "32bcdb92495502b2da0402bf53e972ae", "score": "0.60143524", "text": "def bucket(self) -> pulumi.Input[str]:\n return pulumi.get(self, \"bucket\")", "title": "" }, { "docid": "32bcdb92495502b2da0402bf53e972ae", "score": "0.60143524", "text": "def bucket(self) -> pulumi.Input[str]:\n return pulumi.get(self, \"bucket\")", "title": "" }, { "docid": "32bcdb92495502b2da0402bf53e972ae", "score": "0.60143524", "text": "def bucket(self) -> pulumi.Input[str]:\n return pulumi.get(self, \"bucket\")", "title": "" } ]
4382f219b527dab17a0eac5e599326ef
get the url link
[ { "docid": "921636792c7324ff28a8f31af78eba2f", "score": "0.0", "text": "async def message_url(self, message_id: str) -> UrlLinkPayload:", "title": "" } ]
[ { "docid": "411716a5d825a4ca3911c7c270c84f0c", "score": "0.8282871", "text": "def get_link (self):\n if self.relative_url != '':\n return self.relative_url\n else:\n return self.url", "title": "" }, { "docid": "97b221b6714784f61d6886850fdb85dc", "score": "0.8211174", "text": "def url_link(self) -> str:\n return self._url_link", "title": "" }, { "docid": "abbac5c06fa844a83bff30a7d22b5d51", "score": "0.78782916", "text": "def getUrl():", "title": "" }, { "docid": "b7d2bbc0bbebbb6adfe616fed5299d15", "score": "0.7833394", "text": "def geturl(self, ):\n\t\tpass", "title": "" }, { "docid": "296b0e1ae7908a871649ba45072f6b5c", "score": "0.7622855", "text": "def geturl(self):\n return self.url", "title": "" }, { "docid": "cb3f72d1e87f4d0c335839efd5a561d1", "score": "0.7615803", "text": "def get_link_to(self):\n return self.get(\"link_to\")", "title": "" }, { "docid": "07881524ead321fd3f27b3b74354a2fb", "score": "0.75388443", "text": "def get_url(self):\n return self.url", "title": "" }, { "docid": "07881524ead321fd3f27b3b74354a2fb", "score": "0.75388443", "text": "def get_url(self):\n return self.url", "title": "" }, { "docid": "07881524ead321fd3f27b3b74354a2fb", "score": "0.75388443", "text": "def get_url(self):\n return self.url", "title": "" }, { "docid": "2c0a6688b0235c7823f7b5d802aa482b", "score": "0.75372213", "text": "def getLink(self):\n return self._getLink()", "title": "" }, { "docid": "3a14076594a8f278bf1b026e2d4e928a", "score": "0.74549115", "text": "def url(self) -> str:", "title": "" }, { "docid": "52ecccc98a6f3329114ec11e858c88ed", "score": "0.7413172", "text": "def getUrl():\n return URL", "title": "" }, { "docid": "d49a6728b768ce72901b40dbf7a6d4f2", "score": "0.740025", "text": "def url(self):\n if not 'self' in self.links:\n return None\n\n self_link = self.links['self']\n\n if isinstance(self_link, list):\n for link in self_link:\n return link.url()\n\n return self_link.url()", "title": "" }, { "docid": "f0b0c645f3b5e73b9cd9f3a749222e7f", "score": "0.7373527", "text": "def getUrl(self):\n\t\treturn self.url", "title": "" }, { "docid": "34b2284b84f259f6ec3f8f29c5a8bb4b", "score": "0.73657715", "text": "def url(self):\n return self.getattr('url')", "title": "" }, { "docid": "34b2284b84f259f6ec3f8f29c5a8bb4b", "score": "0.73657715", "text": "def url(self):\n return self.getattr('url')", "title": "" }, { "docid": "83e938fe79194a02da6d30922d2237e0", "score": "0.7310797", "text": "def url(self):\n return self.urlparts.geturl()", "title": "" }, { "docid": "35a0a056e90e31a217d59281c4d89f29", "score": "0.7277632", "text": "def get_url(self):\n return self.__url", "title": "" }, { "docid": "35a0a056e90e31a217d59281c4d89f29", "score": "0.7277632", "text": "def get_url(self):\n return self.__url", "title": "" }, { "docid": "35a0a056e90e31a217d59281c4d89f29", "score": "0.7277632", "text": "def get_url(self):\n return self.__url", "title": "" }, { "docid": "a6bfc392c33611f86549dcf937d20c8a", "score": "0.7247263", "text": "def url():", "title": "" }, { "docid": "a6bfc392c33611f86549dcf937d20c8a", "score": "0.7247263", "text": "def url():", "title": "" }, { "docid": "2472163b2faf58570fcbf2d184b168eb", "score": "0.72461814", "text": "def get_url(self):\n return 'https://' + self.api_host + self.HREF", "title": "" }, { "docid": "05410e85e69aed3da586b31aaa2caadf", "score": "0.72266996", "text": "def link(self):\n return self._link", "title": "" }, { "docid": "05410e85e69aed3da586b31aaa2caadf", "score": "0.72266996", "text": "def link(self):\n return self._link", "title": "" }, { "docid": "78a742ed5c6fdcb0bdefb26a7ad926f3", "score": "0.7219878", "text": "def url(self):\r\n return self._url", "title": "" }, { "docid": "d81ff220eb737b8033dea10115cea03f", "score": "0.72180957", "text": "def get_url(self):\n return self.obj.get_absolute_url()", "title": "" }, { "docid": "52e4bb850f2d46027fa3daa5a1ac047a", "score": "0.72178674", "text": "def get_href(self):\n return self._href", "title": "" }, { "docid": "9d6e48827dfb6ed9ac660da9d3108203", "score": "0.71994", "text": "def get_url(self):\n return self._url", "title": "" }, { "docid": "17213de4c798df70db63322862e7f516", "score": "0.7187638", "text": "def geturl(self):\n return self.unsplit()", "title": "" }, { "docid": "a1d91428c7599bde5818329eb8b4f440", "score": "0.7180146", "text": "def getURL(self):\n return self._url", "title": "" }, { "docid": "807fc3b31d05a07455a7f7212bd1924e", "score": "0.7163618", "text": "def getUrl(self):\n pass", "title": "" }, { "docid": "ef1f61bc783d2b23f1ded8044b393cfa", "score": "0.7157163", "text": "def href(self):\n return self._href", "title": "" }, { "docid": "ef1f61bc783d2b23f1ded8044b393cfa", "score": "0.7157163", "text": "def href(self):\n return self._href", "title": "" }, { "docid": "ef1f61bc783d2b23f1ded8044b393cfa", "score": "0.7157163", "text": "def href(self):\n return self._href", "title": "" }, { "docid": "c42ae67d259ebc372befeb4de395f530", "score": "0.7115357", "text": "def get_link(self, parent, base_url):\n pass", "title": "" }, { "docid": "7a0a22d819650d5ab1030cec91e98153", "score": "0.70792526", "text": "def get_url(self, news_type):", "title": "" }, { "docid": "f0c727cbcd566517e58f7b91d776d08b", "score": "0.70661455", "text": "def get_main_url(self):", "title": "" }, { "docid": "c1d2dbbaca704ef2d6eb9b3f807a24b9", "score": "0.7064892", "text": "def get_url(self):\n\n return self.__url", "title": "" }, { "docid": "3cab8eb5451712f7c55e14a0a0cea1ab", "score": "0.7058147", "text": "def url(self):\n return self._url", "title": "" }, { "docid": "3cab8eb5451712f7c55e14a0a0cea1ab", "score": "0.7058147", "text": "def url(self):\n return self._url", "title": "" }, { "docid": "3cab8eb5451712f7c55e14a0a0cea1ab", "score": "0.7058147", "text": "def url(self):\n return self._url", "title": "" }, { "docid": "3cab8eb5451712f7c55e14a0a0cea1ab", "score": "0.7058147", "text": "def url(self):\n return self._url", "title": "" }, { "docid": "3cab8eb5451712f7c55e14a0a0cea1ab", "score": "0.7058147", "text": "def url(self):\n return self._url", "title": "" }, { "docid": "3cab8eb5451712f7c55e14a0a0cea1ab", "score": "0.7058147", "text": "def url(self):\n return self._url", "title": "" }, { "docid": "3cab8eb5451712f7c55e14a0a0cea1ab", "score": "0.7058147", "text": "def url(self):\n return self._url", "title": "" }, { "docid": "3cab8eb5451712f7c55e14a0a0cea1ab", "score": "0.7058147", "text": "def url(self):\n return self._url", "title": "" }, { "docid": "3cab8eb5451712f7c55e14a0a0cea1ab", "score": "0.7058147", "text": "def url(self):\n return self._url", "title": "" }, { "docid": "3cab8eb5451712f7c55e14a0a0cea1ab", "score": "0.7058147", "text": "def url(self):\n return self._url", "title": "" }, { "docid": "3cab8eb5451712f7c55e14a0a0cea1ab", "score": "0.7058147", "text": "def url(self):\n return self._url", "title": "" }, { "docid": "3cab8eb5451712f7c55e14a0a0cea1ab", "score": "0.7058147", "text": "def url(self):\n return self._url", "title": "" }, { "docid": "3cab8eb5451712f7c55e14a0a0cea1ab", "score": "0.7058147", "text": "def url(self):\n return self._url", "title": "" }, { "docid": "3cab8eb5451712f7c55e14a0a0cea1ab", "score": "0.7058147", "text": "def url(self):\n return self._url", "title": "" }, { "docid": "3cab8eb5451712f7c55e14a0a0cea1ab", "score": "0.7058147", "text": "def url(self):\n return self._url", "title": "" }, { "docid": "50cf4efb052f7fa3738a65c13e76927a", "score": "0.704991", "text": "def URL( self ):\r\n return self.obj.URL", "title": "" }, { "docid": "a48072ced93e968334f66311de42c056", "score": "0.7032368", "text": "def mpd_url(self):\n return self.sorted_by_priority(https=True)[0][\"href\"]", "title": "" }, { "docid": "d696196a7708910479c2021dbb58e6c9", "score": "0.7017913", "text": "def get_advaned_link(self):\n pass", "title": "" }, { "docid": "fcce9ec7e1839a6f9ce52843365e1d61", "score": "0.7014895", "text": "def url(self):\n return self.response.url", "title": "" }, { "docid": "e757da7671a69bd273872804e9c16dfa", "score": "0.7007661", "text": "def _get_href_link(self, request, identifier, collection_name):\n prefix = self._update_masakari_link_prefix(request.application_url)\n return url_join(prefix,\n self._get_project_id(request),\n collection_name,\n str(identifier))", "title": "" }, { "docid": "43084fddb650b218f491559dcb672cff", "score": "0.69860256", "text": "def item_link(self, obj):\n if not obj:\n raise FeedDoesNotExist\n return obj.url", "title": "" }, { "docid": "d2ffd01c84909acb00dca51e1935ea0a", "score": "0.6980353", "text": "def get_url_from_resp(resp):\n return resp.json()['link']", "title": "" }, { "docid": "094f63531c40929670fa62da14281625", "score": "0.6980318", "text": "def get_url(args, config):\n return get_param_value('url', args, config)", "title": "" }, { "docid": "961b06099eb8b9b4a1b2350758d73bed", "score": "0.697802", "text": "def getURL(relative=0):", "title": "" }, { "docid": "74a9b8031bb587202cc1bce3b0dc631c", "score": "0.6977312", "text": "def href(self):\n return self.entry.self_link", "title": "" }, { "docid": "209154631eb765bd5e7de85a38658d60", "score": "0.6972179", "text": "def link(url):\r\n m = textedit_match(url)\r\n if m:\r\n return readurl(m)", "title": "" }, { "docid": "e29f2680132deb470b3536132d240125", "score": "0.69714314", "text": "def url(self):\n return self.__url", "title": "" }, { "docid": "767efa1e4a35fb19399b814851c45b11", "score": "0.69701445", "text": "def _get_link(li):\n try:\n a = li.find(\"a\")\n link = a[\"href\"]\n except:\n return None\n\n if link.startswith(\"/url?\"):\n m = match('/url\\?(url|q)=(.+?)&', link)\n if m and len(m.groups()) == 2:\n return urllib2.unquote(m.group(2))\n\n return link", "title": "" }, { "docid": "e096a71cb0064c08bb75f52075befc01", "score": "0.6963821", "text": "def getUrl(self):\n \n raise NotImplementedError", "title": "" }, { "docid": "8c1c6089f9a93655d9dc1d530b1a24b1", "score": "0.6948995", "text": "def link(self) -> str:\n return self._link #Return the hidden link attribute", "title": "" }, { "docid": "93fa00015a1395122d5ca31e31981499", "score": "0.6944073", "text": "def _abs_url_from_link(self, link_tag: Tag) -> str:\n return self._abs_url_from_relative(link_tag.get(\"href\"))", "title": "" }, { "docid": "02682596d041b35453642dfa4f909486", "score": "0.6928831", "text": "def url(self):\r\n if self.__url is None:\r\n self._fillInfo()\r\n return self.__url", "title": "" }, { "docid": "81ddfa8d6096948b4903154c48103cf0", "score": "0.692703", "text": "def link(self, obj):\n if not obj:\n raise FeedDoesNotExist\n return obj.get_absolute_url()", "title": "" }, { "docid": "5e24dc8247e554779d8866f6e0cf4fff", "score": "0.692327", "text": "def link(self) -> str:\n if self.url:\n return self.url\n return urljoin(settings.VIDEO_URL, self.filename)", "title": "" }, { "docid": "ce1c5d7c45da331a8dbc5fa11bcc9f94", "score": "0.69127804", "text": "def link(self,name):\n return self._canonical[\"links\"][name]", "title": "" }, { "docid": "7c7870f0ee66172972efb29deff29aa3", "score": "0.6908027", "text": "def url(self):", "title": "" }, { "docid": "7c7870f0ee66172972efb29deff29aa3", "score": "0.6908027", "text": "def url(self):", "title": "" }, { "docid": "905f1907e664c2c4409bd4d57cc2a603", "score": "0.68970317", "text": "def href(self,name):\n return self._data[name]['href']", "title": "" }, { "docid": "66be05c8fcd183f3809019ed41aedb6e", "score": "0.6895595", "text": "def url(self):\n if self._url is False:\n return None\n\n return self._url", "title": "" }, { "docid": "107cfa0af237d4771b898ef18369e8e4", "score": "0.68849254", "text": "def get_href(self, ):\n return self.attrs.get(self.AttributeNames.HREF, None)", "title": "" }, { "docid": "f480b09634ac05d6ad7dc868bd08849c", "score": "0.6872348", "text": "def _parse_link(self, doc):\n if 'link' in doc.keys():\n return doc['link']\n elif self._parse_doi(doc):\n return 'https://doi.org/' + self._parse_doi(doc)\n return", "title": "" }, { "docid": "079cf9a56224a881f3d0c385ba7fe891", "score": "0.6865341", "text": "def _get_url(self):\n return '{base}{url}'.format(base=self.base, url=self.url).replace('//', '/')", "title": "" }, { "docid": "c5eb604fd53b07dadc3ab39e97769b8b", "score": "0.68636894", "text": "def get_url(self):\n return self.options['url']", "title": "" }, { "docid": "54902d22539f2783d0de75492c18344c", "score": "0.6841278", "text": "def parse_url(self, text):\n anchor = text.find('a')\n if anchor:\n return anchor['href']\n else:\n return \"\"", "title": "" }, { "docid": "43cec9ec82114cb6a3e2e84fd36be570", "score": "0.6835174", "text": "def getUrl(self): #$NON-NLS-1$\r\n pass", "title": "" }, { "docid": "ce10ef9aea9aad83b3418c710018dd79", "score": "0.68320286", "text": "def link(self):\n return self.container['link']", "title": "" }, { "docid": "d16b22091b097753f9b6027411f700a8", "score": "0.6824143", "text": "def url(self):\n return self._review_dict[\"url\"]", "title": "" }, { "docid": "581f7fd500108f617c785f9e21671d72", "score": "0.68102103", "text": "def url(self):\n\n return self._url", "title": "" }, { "docid": "7cd367a7b2c0f2c2245c902e4f4a3cd5", "score": "0.67992896", "text": "def url(self):\n return self.data.url", "title": "" }, { "docid": "539d83db94878169161eeba7ad9c69a9", "score": "0.679155", "text": "def url(self) -> str:\n return self._url", "title": "" }, { "docid": "539d83db94878169161eeba7ad9c69a9", "score": "0.679155", "text": "def url(self) -> str:\n return self._url", "title": "" }, { "docid": "539d83db94878169161eeba7ad9c69a9", "score": "0.679155", "text": "def url(self) -> str:\n return self._url", "title": "" }, { "docid": "eabf59c8247de39b4a95debf12e5a71b", "score": "0.6778748", "text": "def get_url(self, *args, **kwargs) -> str:\n raise NotImplementedError()", "title": "" }, { "docid": "2208cf4080927b8ad48fa14018ef545e", "score": "0.67783356", "text": "def url(self):\n return self.full()", "title": "" }, { "docid": "2208cf4080927b8ad48fa14018ef545e", "score": "0.67783356", "text": "def url(self):\n return self.full()", "title": "" }, { "docid": "0460b4132453a7390452e7eb09754c15", "score": "0.67741346", "text": "def url(self):\r\n if self.page:\r\n return reverse(self.page.name)\r\n elif self.children:\r\n return self.children[0].url\r\n else:\r\n return None", "title": "" }, { "docid": "dd1af9fb104b1b7384367384f264d891", "score": "0.6765452", "text": "def get_feed_url(self):\n return self.resolve_uri(self.href)", "title": "" }, { "docid": "277b0834b5e560334e7aa8074353eb8c", "score": "0.6752162", "text": "def url(self):\n return \"\"", "title": "" }, { "docid": "9ddf6373ba68016c973a753be66eb0d7", "score": "0.67351705", "text": "def _get_google_link(li):\n try:\n a = li.find(\"a\")\n link = a[\"href\"]\n except:\n return None\n\n if link.startswith(\"/url?\") or link.startswith(\"/search?\"):\n return urljoin(\"http://www.google.com\", link)\n\n else:\n return None", "title": "" }, { "docid": "258d12cecb9c343b0bf081335c645268", "score": "0.6731994", "text": "def get_agent_url(self):\n try:\n agent = self._data.find('ul', {'class': 'links'})\n links = agent.find_all('a')\n return links[1]['href']\n except Exception as e:\n if self._verbose:\n print(e.message)\n return", "title": "" }, { "docid": "07fce6fd7d6b7e47d5946210e2bd2d60", "score": "0.67199904", "text": "def getLink(html):\n #encontra a primeira ancora e joga fora tudo que vem antes dela, incluindo o comeco da tag\n link = html.split(\"<A HREF=\")[1]\n\n link = link[:link.find(' ')]\n\n return link", "title": "" }, { "docid": "5ed5bb5eec5f4048c86803ceed8d2a54", "score": "0.6718438", "text": "def get_download_link(self, url: str) -> str:\n logger.debug(\"Getting url with driver\")\n self.driver.get(url)\n\n logger.debug(\"Returning redirected url\")\n return self.driver.current_url", "title": "" } ]
fbe39aef838317b089316438cad11925
The only meaning of this function is to trigger HighlimePauseCommandListener
[ { "docid": "019a4a108508ad49cf7201fe72c86c88", "score": "0.72343063", "text": "def run():\n print('Highlime paused')", "title": "" } ]
[ { "docid": "898f0e506e7f8c034b4be4605b68abcc", "score": "0.6303478", "text": "def pause(self):\r\n self._command_queue.put('PAUSE')", "title": "" }, { "docid": "a2fbfa52839871c05016db68f6602804", "score": "0.6262444", "text": "def pause():\r\n command(\"M0\")", "title": "" }, { "docid": "1ea2ea62bc59f0b4e51fb97c2e8c9664", "score": "0.62126005", "text": "def on_pause(self): # pylint: disable=no-self-use\n return True", "title": "" }, { "docid": "67635aa23fb4f5a12fc71ef70d9b599d", "score": "0.6166679", "text": "def do_pause(self):\n logger.debug('pause: nothing to do')", "title": "" }, { "docid": "b38dacb5d462bf471a7c56fe5e6e2c71", "score": "0.6118634", "text": "def pause(self):\n raise NotImplementedError()", "title": "" }, { "docid": "a4af7d5122bcfffc493b9f7e1fa944df", "score": "0.6036492", "text": "def test_callback_when_paused(self):\n # Checks that callback hasn't been called.\n self.assertFalse(self.callback_called)\n\n self.prueba.start()\n\n for i in range(10):\n print (\"Python\")\n time.sleep(0.2)\n\n # Checks that callback has been called.\n self.assertTrue(self.callback_called)", "title": "" }, { "docid": "4176900318512c99e01557dae5b2ef12", "score": "0.59930396", "text": "def Pause(self):\n pass", "title": "" }, { "docid": "980925d7d94046c6f40a5b2774fb99c6", "score": "0.59914106", "text": "def __after__(self, command):", "title": "" }, { "docid": "b4062ef917d2b132d02a9d8e7219ac5f", "score": "0.5989671", "text": "def pause(self):\n # TODO: Implement this", "title": "" }, { "docid": "c1f5f1d7076d0388b60a6d967c76b8a5", "score": "0.59425277", "text": "def pause(self):\n return self.set_status('paused')", "title": "" }, { "docid": "e68f20fd35f23db3b6ea824f8fc17f28", "score": "0.5927341", "text": "def MyHotKeyCallback(self, inRefcon):\r\n\t\tXPLMSpeakString(\"Hello World 1!\")\r\n\t\tpass", "title": "" }, { "docid": "d1ec85ead656f87c76fe5f791788b92a", "score": "0.5914783", "text": "def pause(self):\r\n self.paused = True", "title": "" }, { "docid": "8e0c438f586360f0a44cd38e679aa1b8", "score": "0.58862203", "text": "def pause(self) :\n\tself._pause = True\t\t\t\t\t\t# postpone run() loop", "title": "" }, { "docid": "5b71b121c6109208ade919e34e2f6027", "score": "0.58775747", "text": "def pause(self):\n self.request(\"Pause\")", "title": "" }, { "docid": "55de7a92f56c5c71cd69bf45e54c3231", "score": "0.5873952", "text": "def run(self):\n\n sublime.set_timeout(lambda: self.update(0), 100)", "title": "" }, { "docid": "cab7dea60b4daca5fc13cc05386bd4c1", "score": "0.5864492", "text": "def cmd_pause(self):\n return 'pause', []", "title": "" }, { "docid": "c3c21c48622d4708470a2b9267021dae", "score": "0.58641315", "text": "def pause_writing(self):", "title": "" }, { "docid": "498002089a915206024637f3b2549e27", "score": "0.5783404", "text": "def on_playback_pause(cls, *args, **kwargs):\n cls.debugself(*args, **kwargs)", "title": "" }, { "docid": "a56203cd647c63d1b88e3cde10a8b2dd", "score": "0.57688814", "text": "def _listener(self):\n while True:\n with open(\"commands\", \"r\") as f:\n for line in f.readlines():\n exec(line)\n with open(\"commands\", \"w\") as f:\n f.flush()\n hub.sleep(2)", "title": "" }, { "docid": "17f26d16147393d07a50aa8b8c548102", "score": "0.5753141", "text": "def run_command(self):\n self.qaction.trigger()", "title": "" }, { "docid": "4e8f1851f7afa78c712cd9db194152f2", "score": "0.5746878", "text": "def pause(self):\n self.running = False\n self.root.control_panel.play_pause_button.update_config()", "title": "" }, { "docid": "a37910c9a95310ea183cf18b58b93838", "score": "0.57083726", "text": "def pause(self):\n self._sendCommand(\"p\")", "title": "" }, { "docid": "8f275a7368fd9fbe199e7552721c6bda", "score": "0.56969446", "text": "def pause(self):\n self._sendCommand(\"stop\\n\")", "title": "" }, { "docid": "57c8b834752a8d428c64bd909c1eac99", "score": "0.56960887", "text": "def pause():\n schedule(compat.getcurrent())\n state.mainloop.switch()", "title": "" }, { "docid": "33238670b6d963aeb910ecb395548f5e", "score": "0.5685608", "text": "async def _pause(self, ctx: commands.Context):\n\n if ctx.voice_state.voice.is_playing():\n ctx.voice_state.voice.pause()\n await ctx.message.add_reaction('⏯')", "title": "" }, { "docid": "247326a512e72db8bc8b03598f45ab67", "score": "0.5643068", "text": "def pause_and_restart(self):\r\n self._command_queue.put('PAUSE_AND_RESTART')", "title": "" }, { "docid": "582ed762a88b9259e04bc5cac24ad1de", "score": "0.56402516", "text": "def on_stop(self):", "title": "" }, { "docid": "45129819eedf9c77eb7f81ec78d15520", "score": "0.56309694", "text": "def pause(self, msg='Pausing, hit \"t\" inside axes to continue'):\n print(msg)\n self.on(callbacks=\"key_only\", reset=False)\n self._in_loop = True\n self._figs[0].canvas.start_event_loop(timeout=-1)", "title": "" }, { "docid": "b258dc491368b424b0feb07beae537a3", "score": "0.5612761", "text": "def disconnect(self):\r\n self._command_queue.put('PAUSE')", "title": "" }, { "docid": "821ba2a0697566b0d1ab2d47107b3cdd", "score": "0.5603054", "text": "def __call__(self):\n while self.context.open:\n self.events()\n if not self.pause:\n self.update()\n self.show()", "title": "" }, { "docid": "5c170c6b07c3e741a2ee9d0de40dcbc0", "score": "0.5602029", "text": "def on_start(self):", "title": "" }, { "docid": "5c170c6b07c3e741a2ee9d0de40dcbc0", "score": "0.5602029", "text": "def on_start(self):", "title": "" }, { "docid": "4dbf41f55024e79e640267b93a3f8c7a", "score": "0.5596638", "text": "def process_host_command(self, command):\n command = command.lstrip()\n if command.startswith(\";@pause\"):\n self.pause_sig.emit()", "title": "" }, { "docid": "8aa4f1698c8f40b90884370522dbba4f", "score": "0.55852735", "text": "async def paused(self, ctx, *, moons = None):\r\n\t\t\r\n\t\tawait ctx.invoke(self.playing,moons=moons)", "title": "" }, { "docid": "79a49d8883c282807c97341cbbfff31d", "score": "0.5571689", "text": "def Pause(self):\n self.Call(\"set_paused\", True)", "title": "" }, { "docid": "636b7d6abdc07f2a466018ebf324c185", "score": "0.55703264", "text": "def on_lifecycle_pause(self, ch, method, properties, message):\n LOG.debug(\"Received lifecycle.pause event.\")\n self.state = \"PAUSED\"", "title": "" }, { "docid": "a2c8eece73c9be719a48172e8cc89a74", "score": "0.55540305", "text": "def commandStarted(self, *args, **kwargs): # real signature unknown\n pass", "title": "" }, { "docid": "253ef1ba7690f3cd5c8cf9901872b85c", "score": "0.5539409", "text": "def notify_pause(self, handle):\n ext = [struct.pack(\"I\", handle)]\n return _u2i(_lg_command_ext(self.sl, _CMD_NP, 4, ext, L=1))", "title": "" }, { "docid": "6a332ff5eba93e725ccfc99555e403f8", "score": "0.5538591", "text": "def after_command_exec(self, cmd: str) -> None:\n pass", "title": "" }, { "docid": "9268af00c6050b2c81c39ba1af48c5b4", "score": "0.5533746", "text": "def suspend(self):", "title": "" }, { "docid": "aa5205f3ceba396f1161f40bc0aa9a0d", "score": "0.5530409", "text": "def start_play(self):\r\n self.time_on()\r\n self.add_restart_button()\r\n self.activate_board_and_add_word()", "title": "" }, { "docid": "3f43e0a7bef498935917755b865c7231", "score": "0.55251205", "text": "async def on_yut_pause(self, user, youtube): # P\n if not youtube.is_response:\n self.console.write(f'{user.nick} paused {youtube.title}'\n f' at {youtube.offset}')", "title": "" }, { "docid": "75fd27a68c3d463e161c2ee5b36af485", "score": "0.55248374", "text": "def _before_send_hook(self):\r\n self._sleep_if_paused_die_if_stopped()", "title": "" }, { "docid": "b047987721ba4afdf505c16b01b3a983", "score": "0.55242664", "text": "def pause(args):\n utils.pause_unit_helper(utils.register_configs())", "title": "" }, { "docid": "2345963c1ad22295d0660fc3077e8ccc", "score": "0.5514024", "text": "def pause_switch(self):\n self.is_paused = not self.is_paused", "title": "" }, { "docid": "cae47f0e68a4846c3a9cdf6dec9b0c4b", "score": "0.55000514", "text": "def pause(self):\n if self.get_start_pause_button().text == \"Pause\":\n self.get_start_pause_button().click()", "title": "" }, { "docid": "79c8dc8473424bd7a416e9ad4d3fdc98", "score": "0.5491962", "text": "def pause():\n os.system(\"pause\")", "title": "" }, { "docid": "684ac1010e5c18b54dcd17888e484ebe", "score": "0.5490433", "text": "def on_stop(self):\n print(\"I am the on_stop\")", "title": "" }, { "docid": "3d33fa5edcfb86f2618e11dbe87b1ed6", "score": "0.54787177", "text": "def pause(self):\n readline.write_history_file(self.historyPath)\n readline.clear_history()\n readline.set_completer()\n self.active = False", "title": "" }, { "docid": "7bfbd4347a3ab57e058c0cf12a3793e1", "score": "0.5472983", "text": "def pauseProducing():", "title": "" }, { "docid": "2b04b1e6b6e1921918c80e8ed2ee4082", "score": "0.5472521", "text": "def not_pause():\r\n global pause\r\n pause = False", "title": "" }, { "docid": "5970be724a2cfb6ea1d47a218d0f96d9", "score": "0.5456", "text": "def pause():\n global state\n if state == \"playing\":\n commands.getoutput(\"osascript -e 'tell application \\\"iTunes\\\" to pause'\")\n state = state_update()", "title": "" }, { "docid": "21ab8f56ba82a4f3401596d7e546e163", "score": "0.5451173", "text": "def pause(self):\n reactive.set_flag(\"charm.paused\")\n self.run_pause_or_resume('pause')", "title": "" }, { "docid": "ab59d2d14f3ea24a07ecacf5953df807", "score": "0.5441281", "text": "def OnStart(self, event):", "title": "" }, { "docid": "133bfa85352294876381a6828498c607", "score": "0.5439464", "text": "def switchPause(self):\n self.pause = not self.pause\n if self.pause:\n self.context.console('The system is paused.')\n else:\n self.context.console('The system is unpaused.')", "title": "" }, { "docid": "dde02d3a608a4201635c9519fba6c07d", "score": "0.5428807", "text": "def pause(self):\n self.paused = not self.paused", "title": "" }, { "docid": "94fe550a3ef543f13a3f9f06865bfb1d", "score": "0.54251933", "text": "async def pause(self, ctx):\n logger.info(f\"stop command - author:'{ctx.author}'\")\n\n if ctx.voice_client is not None:\n if ctx.voice_client.is_paused():\n ctx.voice_client.resume()\n await ctx.send(f\":arrow_forward: Resuming the music\")\n elif ctx.voice_client.is_playing(): \n ctx.voice_client.pause()\n await ctx.send(f\":pause_button: Pausing the music\")", "title": "" }, { "docid": "4ac47c36e783328cf3e81697161ce1d8", "score": "0.54164284", "text": "def OnPause(self, event):\r\n self.client.disconnect()", "title": "" }, { "docid": "8d36dd6cde885596e79f629b59d30fd7", "score": "0.5415882", "text": "def pause(self):\n if self.isLoaded():\n self.callFlash('fp_pause', [])", "title": "" }, { "docid": "a06e8aa31f2a798deaedd7972da5cdc9", "score": "0.54152167", "text": "def sonos_pause(bot, msg, speaker):\n speaker.pause()", "title": "" }, { "docid": "92345cf71ff6336157700241bd24326c", "score": "0.5413853", "text": "def callback():\r\n game.SnakeGame()", "title": "" }, { "docid": "c57c0f875bd27f6f3160d07f5b0f83e1", "score": "0.54084873", "text": "def _trigger(self):\n pass", "title": "" }, { "docid": "dc68486e2651227d0f33bdaa4c62d7a5", "score": "0.5407856", "text": "def activate_assistant(key):\n\n if key == Key.f4:\n listen_for_cmd()\n \n # by pressing 'delete' button \n # you can terminate the loop \n if key == Key.delete: \n return False", "title": "" }, { "docid": "fc674e63aa607150980c08160d1f943e", "score": "0.54076827", "text": "def on_start(self):\n print(\"starting\")", "title": "" }, { "docid": "91ca19ebecbf302b08c8ea9a937c8c46", "score": "0.54046416", "text": "def fireCommand(command):\n global gMiniMacroRecorder\n SendMessage(gMiniMacroRecorder, WM_SETTEXT, 0, str(command) )\n SendMessage(gMiniMacroRecorder, WM_CHAR, VK_RETURN, 0)", "title": "" }, { "docid": "ed03ce2450294a1d23ec11c7f29ba278", "score": "0.5399254", "text": "def media_pause(self):\n\t\tself._tv.sendKey('Pause')", "title": "" }, { "docid": "88e0e3bcae5b158da04c6135327abbfa", "score": "0.53928703", "text": "def start_pause(self, **kwargs: Any) -> None:\n if self.supported_features & VacuumEntityFeature.PAUSE == 0:\n return\n\n self._state = not self._state\n if self._state:\n self._status = \"Resuming the current task\"\n self._cleaned_area += 1.32\n self._battery_level -= 1\n else:\n self._status = \"Pausing the current task\"\n self.schedule_update_ha_state()", "title": "" }, { "docid": "14a6e37e88d20bfb534cf7097e7b5854", "score": "0.5392088", "text": "def pause(self):\n for source in self.sources: # forward commands\n source.pause()", "title": "" }, { "docid": "94add8e6809bf5bfa333fbc0b5943a3d", "score": "0.53909934", "text": "def pause(self) -> None:\n self._act(htcondor.JobAction.Suspend)\n logger.debug(f\"paused map {self.tag}\")", "title": "" }, { "docid": "8ce5ac321c6c6e5820791f34bb415371", "score": "0.5387559", "text": "def main_loop(self):\n\n command = \"\"\n iswatching = True\n\n\n print(\"starting\")\n # proc.start()\n while iswatching:\n command = raw_input(\"Heimdall: \")\n\n if command == \"/quit\":\n iswatching = False\n state.value = -1\n\n elif command == \"/pause\":\n l.acquire()\n print(\"paused\")\n ispaused = True\n\n elif command == \"/start\":\n print(\"unpausing\")\n l.release()\n ispaused = False\n\n else:\n print(\"unknown command\")\n\n # proc.join()\n print(\"finished\")\n return", "title": "" }, { "docid": "ce487a96f9de1dec5d892241549930e0", "score": "0.53836596", "text": "def handler(self):\r\n\t\tself.pausePirate()\r\n\t\tif tkMessageBox.askokcancel(\"Quit?\", \"Are you sure you want to quit?\"):\r\n\t\t\tself.exitClient()\r\n\t\telse: # When the user presses cancel, resume playing.\r\n\t\t\tself.playPirate()", "title": "" }, { "docid": "b27fb66e079523a83d6e4aec54953bc8", "score": "0.5377227", "text": "def on_start(self):\n print(\"I am the on_start\")", "title": "" }, { "docid": "309f35ac9128f6a95d811d408b33be63", "score": "0.5374199", "text": "def pause_experiment(self, widget, data = None):\n if self.si is None: return False\n pause_state=self.toolbar_pause_button.get_active()\n if pause_state:\n if self.state!=DamarisGUI.Run_State: return False\n if self.spool_dir is None: return False\n no=self.si.data.get(\"__recentresult\",-1)+1\n result_pattern=os.path.join(self.spool_dir, \"job.%09d.result\")\n job_pattern=os.path.join(self.spool_dir, \"job.%09d\")\n while os.path.isfile(result_pattern%no):\n no+=1\n i=0\n self.pause_files=[]\n while i<3 and os.path.isfile(job_pattern%(no+i)):\n pause_file=(job_pattern%(no+i))+\".pause\"\n os.rename(job_pattern%(no+i), pause_file )\n self.pause_files.append(pause_file)\n i+=1\n self.state=DamarisGUI.Pause_State\n self.backend_statusbar_label.set_text(\"Backend Paused\")\n \n else:\n if self.state!=DamarisGUI.Pause_State: return False\n self.state=DamarisGUI.Run_State\n for f in self.pause_files:\n os.rename(f, f[:-6])\n self.pause_files=None\n self.backend_statusbar_label.set_text(\"Backend Running\")", "title": "" }, { "docid": "da829f8fdb5eda7d6262ca623e376eaf", "score": "0.53737056", "text": "def __pause(self):\n\n self.player.set_state(gst.STATE_PAUSED)", "title": "" }, { "docid": "836dfb516ed2e46601af40a3cf220bbc", "score": "0.53714347", "text": "def buddy_typing_stopped_cb(self, msg):\n self.proxyCallback('buddy-typing-stopped', msg)", "title": "" }, { "docid": "40e2e98f5e5eab2ea5875402b5ccc087", "score": "0.53696364", "text": "def play(self) :\n self.start()", "title": "" }, { "docid": "ebe24a52cdf51a4ac448bc6f435e2d50", "score": "0.53687835", "text": "def commandFinished(self, *args, **kwargs): # real signature unknown\n pass", "title": "" }, { "docid": "8f1bc01205cf7defdd772e2b1414afd7", "score": "0.53621835", "text": "def commandHandler(player, args):\n\n if args[0] == C.KEYWORD_RELOAD:\n reload(C) # reloading constant file\n return G_TWEAKER.setupDefaultTweaks()", "title": "" }, { "docid": "4d4f79558f8655054bfd72e3d57b1adb", "score": "0.5358429", "text": "def end_command(self):\n self.__quit()\n self.final_callback()", "title": "" }, { "docid": "170df5a535d7b8f16343a6c193279391", "score": "0.53547037", "text": "def play_end(self):\n self.main = True\n self.menu = True\n self.play = False", "title": "" }, { "docid": "2d767df9ef9fd8961bd7a52f887b3d1b", "score": "0.53538233", "text": "def on_stop(self):\n pass", "title": "" }, { "docid": "2d767df9ef9fd8961bd7a52f887b3d1b", "score": "0.53538233", "text": "def on_stop(self):\n pass", "title": "" }, { "docid": "f5b028d9a2e9b00cc05adc5ee0bfadb5", "score": "0.5352891", "text": "def OnKey(self, msg):\n pass", "title": "" }, { "docid": "a47d94c7c6d8406fcf02bdb2943fda49", "score": "0.5346518", "text": "def start_listening(self) -> None:\n\n self.shortcuts.listen = True\n self.highlight_specific_button(self.shortcuts.current_mode)", "title": "" }, { "docid": "8d8932bc96df15d49c785b5ed1347fee", "score": "0.53425646", "text": "def preferences_changed(self):\n\n cmd = self.pref_cmd.get()\n\n if self.ispell_command != cmd:\n if self.ispell:\n GPS.Logger('ISPELL').log('command changed, restart process')\n self.kill()\n\n self.ispell_command = ''\n if os_utils.locate_exec_on_path(cmd.split()[0]):\n GPS.Logger('ISPELL').log('initialize ispell module: %s' % cmd)\n self.ispell_command = cmd\n\n if self.ispell_command and self.pref_type.get() == 'static':\n GPS.Logger(\"ISPELL\").log('Activate static contextual menu')\n if self.dynamic:\n self.dynamic.hide()\n Static_Contextual(ispell=self)\n\n elif self.ispell_command and self.pref_type.get() == 'dynamic':\n GPS.Logger(\"ISPELL\").log(\"Activate dynamic contextual menu\")\n GPS.Contextual('spell check word').hide() # disable static menu\n if not self.dynamic:\n self.dynamic = Dynamic_Contextual(ispell=self)\n else:\n self.dynamic.show()\n\n else:\n if self.dynamic:\n self.dynamic.hide()", "title": "" }, { "docid": "c99d82d19f1ed4ceba58163f83d01612", "score": "0.5326319", "text": "def plugin_loaded():\n # Run our cleaner immediately\n # DEV: We must use an object, otherwise the local variable wouldn't reach our module one\n settings['silencing'] = True\n sublime.set_timeout(silence_query_completions(), initial_delay)", "title": "" }, { "docid": "e24a08747c065a4fd9277e81d7f306db", "score": "0.53214896", "text": "def pause(self):\n self._pause_mode = True", "title": "" }, { "docid": "fd80881bf724108884c41e89f1246bc5", "score": "0.53202486", "text": "def on_start(self):\n pass", "title": "" }, { "docid": "fd80881bf724108884c41e89f1246bc5", "score": "0.53202486", "text": "def on_start(self):\n pass", "title": "" }, { "docid": "fd80881bf724108884c41e89f1246bc5", "score": "0.53202486", "text": "def on_start(self):\n pass", "title": "" }, { "docid": "fd80881bf724108884c41e89f1246bc5", "score": "0.53202486", "text": "def on_start(self):\n pass", "title": "" }, { "docid": "fd80881bf724108884c41e89f1246bc5", "score": "0.53202486", "text": "def on_start(self):\n pass", "title": "" }, { "docid": "fd80881bf724108884c41e89f1246bc5", "score": "0.53202486", "text": "def on_start(self):\n pass", "title": "" }, { "docid": "fd80881bf724108884c41e89f1246bc5", "score": "0.53202486", "text": "def on_start(self):\n pass", "title": "" }, { "docid": "cf7e2dd9e2d925078456a16bebab6b6c", "score": "0.53189665", "text": "def pause(self):\n # Draw the string to the center of the screen.\n self.print_center([\"PAUSE\",\"Press \\\"p\\\" to continue\"])\n pygame.display.flip()\n while True:\n for ev in pygame.event.get():\n if ev.type == pygame.KEYDOWN and ev.key == pygame.K_p:\n return", "title": "" }, { "docid": "9c8d6df3690c4e704213249c48eb8494", "score": "0.53181666", "text": "def pausePlay(self):\n import pygame\n\n if pygame.key.get_pressed()[pygame.K_SPACE]:\n self._proxy.toggleSim(True)\n if pygame.key.get_pressed()[pygame.K_p]:\n self._proxy.toggleSim(False)", "title": "" }, { "docid": "fd47b14b57e3790cda89adfb738ae59a", "score": "0.531561", "text": "def on_leave(self):", "title": "" }, { "docid": "2d10e7b3ab908929f4ff735696f392df", "score": "0.5311308", "text": "def shell_text_changed(self, text):\n raise NotImplementedError", "title": "" }, { "docid": "78081b68c72c522a034c06dba73e1a4b", "score": "0.5298843", "text": "def pause(self):\n return False", "title": "" }, { "docid": "ac624023b811b9369904edefdc5ee3bf", "score": "0.5292565", "text": "def pause(self):\n command = self.__command(0x0E, 0x00, 0x00)\n \n sleep_ms(200)\n self.__uart.write(command)", "title": "" } ]
c626eae38259a07289a7b7708d2c1ce7
Returns or sets the text of header.
[ { "docid": "b1592dbb51367a2b79d2e7c8a17593a7", "score": "0.0", "text": "def header_right(self):\n return self.impl.header_right", "title": "" } ]
[ { "docid": "ab438d89b8054bc9433af182d69b5617", "score": "0.77726483", "text": "def header_text(self):\n return object_attr_string_repr(self.header)", "title": "" }, { "docid": "85fdadd7ccdfdf866f181fae0ac29e0c", "score": "0.7516228", "text": "def setHeader(self, text):\n self.header.config(text=text)", "title": "" }, { "docid": "f0849f4a6425188c5115aa177c1999d7", "score": "0.72582966", "text": "def get_header(self, name):\n return self.get_title()", "title": "" }, { "docid": "bbe936984e94353f8651a56a0c06e746", "score": "0.7195936", "text": "def header(self, content=None):\n if content is not None:\n self._header = content\n return self._header", "title": "" }, { "docid": "af475da825e3d078d63621362866da2c", "score": "0.7152526", "text": "def get_header(self, name):\r\n if name in self.headerdict:\r\n return self.headerdict[name]\r\n else:\r\n return \"\"", "title": "" }, { "docid": "ab520c4e734f729d8fb80a6850422675", "score": "0.7096762", "text": "def get_header(self, header):\n raise NotImplementedError('pure virtual method')", "title": "" }, { "docid": "b7efe35c8da6738369b0e4a9476bfec3", "score": "0.68633956", "text": "def header(header_text):\n l = len(header_text)\n return \"%s\\n%s\\n\" % (colored(header_text, 'green', attrs=['bold']),\n colored(l * '=', 'green', attrs=['bold']))", "title": "" }, { "docid": "2518e0c22bd474e992098209734482ec", "score": "0.68632174", "text": "def header(self):\n return self.__header", "title": "" }, { "docid": "bba634125f727ba8ecce36fcdcafc684", "score": "0.6815649", "text": "def get_heading(self):\r\n return self.__heading", "title": "" }, { "docid": "f79a09ad9e3002d7f62b16ac2761ed9e", "score": "0.6806669", "text": "def title(self):\n return self.header", "title": "" }, { "docid": "f79a09ad9e3002d7f62b16ac2761ed9e", "score": "0.6806669", "text": "def title(self):\n return self.header", "title": "" }, { "docid": "a2516a6f24a0ac37cd8e445653846522", "score": "0.6800272", "text": "def heading(self, value):\n\n pass", "title": "" }, { "docid": "e3934990edd37cee8071aedc31842bfc", "score": "0.67954236", "text": "def getheader(header_text, default=\"ascii\"):\n headers = decode_header(header_text)\n header_sections = [unicode(text, charset or default, \"ignore\") for text, charset in headers]\n return u\"\".join(header_sections)", "title": "" }, { "docid": "dcd40ff456ba7f96fce8e29f80603a86", "score": "0.67886496", "text": "def header(header_text, header_level=2):\n return('<h' + str(header_level) + '>' + header_text +\n '</h' + str(header_level) + '>')", "title": "" }, { "docid": "c154473eb95b4aa09c4dbb50b13373a0", "score": "0.67821014", "text": "def header(self):\n raise NotImplementedError(\"require header\")", "title": "" }, { "docid": "bb41e458327131fbd3c5acd6b89f9614", "score": "0.67671984", "text": "def getheader(header_text, default=\"ascii\"):\n headers = decode_header(header_text)\n header_sections = [unicode(text, charset or default)\n for text, charset in headers]\n return u\"\".join(header_sections)", "title": "" }, { "docid": "500e66ddbd9e4495ad44ad9a8456a0bb", "score": "0.67596596", "text": "def get_header(self):\n return self._header", "title": "" }, { "docid": "f59db5394e9b0e72ef0ca379bbb81619", "score": "0.67139584", "text": "def header(self, string):\n return self.HEADER + string + self.ENDC", "title": "" }, { "docid": "61462e4176d90a726c7cf72fe0739f2e", "score": "0.6673251", "text": "def getHeaderText(self,title=\"Cite The PC\"):\n #TODO take an optional arguement for the title\n return \"\"\"\n <html><head><title>%s</title>\n <link rel=\"stylesheet\" type=\"text/css\" href=\"/css/citeTPC.css\" media=\"screen\" />\n </head><body>\"\"\" % title", "title": "" }, { "docid": "2a7b5176cb55508be1f796cebf6fc103", "score": "0.66610456", "text": "def header(self) -> pulumi.Input[str]:\n return pulumi.get(self, \"header\")", "title": "" }, { "docid": "e3ab4789411cab62316d9dbec4c51ab9", "score": "0.6648027", "text": "def header(self):\n return self.data", "title": "" }, { "docid": "95b6c95be48962cbec41c7c60534b431", "score": "0.6622593", "text": "def _get_header_string(self):\n # Construct header string\n header_list = []\n for key, value in self.headers.items():\n header_list.append('%s: %s' % (key, value))\n\n return '\\r\\n'.join(header_list)", "title": "" }, { "docid": "8ac88e705ba1c32b5e241f48fded1622", "score": "0.6613078", "text": "def heading(self):\n return self._heading", "title": "" }, { "docid": "097b34378ac742fd420daf13e97168b3", "score": "0.66098005", "text": "def getHeader(self, header):\n return self.headers[header]", "title": "" }, { "docid": "98bcae35729b4a2e4563de7a9bedbd39", "score": "0.657234", "text": "def header():\n return \"# Some kind of legal stuff\" + LF + LF", "title": "" }, { "docid": "5d6a24b908f0e2e72f4468e30c9b89df", "score": "0.6550921", "text": "def get_header(self):\n if self.header is None:\n raise NotImplementedError('There is no header for this import type')\n return self.header", "title": "" }, { "docid": "bd8fed927b84200cdc2625c0b75e7fe6", "score": "0.6541497", "text": "def __set_header(self):\n self.canvas.setFont(self.fontType, 8)\n self.canvas.setFillColorRGB(.68, .68, .68)\n self.canvas.rect(\n self.margin_size, (self.page_height - self.full_header),\n (self.page_width - (self.margin_size * 2)),\n self.header, fill=True, stroke=True)\n\n # header text\n self.canvas.setFillColor('black')\n title_split = simpleSplit(\n self.title, self.fontType, 8,\n (self.page_width - (self.margin_size * 2)))\n self.canvas.drawString(\n (self.margin_size * 1.25),\n self.page_height - self.margin_header - .75 * self.header,\n title_split[0])", "title": "" }, { "docid": "b9da3f839189dddf8a3da93f34eb2e00", "score": "0.65122646", "text": "def persistent_header(self, text):\n self.template_global_header.append(text)", "title": "" }, { "docid": "9b13531fa8fdec012f14c307cb80396a", "score": "0.64811486", "text": "def set_header(self, header, value):\n raise NotImplementedError('pure virtual method')", "title": "" }, { "docid": "d6e32193dd75b59abcb0d5279fc9efab", "score": "0.6465997", "text": "def getHeading(self):\n return self.__heading", "title": "" }, { "docid": "f76997becc9d7ffe43f7203c52849faa", "score": "0.64640766", "text": "def __str__(self):\n header_str = self.desc\n header_str = self._add_border(header_str)\n return header_str", "title": "" }, { "docid": "9f4ef8c411870630fabef7c39679403c", "score": "0.6463702", "text": "def render_header(self, nb):\n header = self.template.module.__dict__.get('header',None)\n assert header is not None, \"This template must have a 'header' macro.\"\n return header(nb)", "title": "" }, { "docid": "9f4ef8c411870630fabef7c39679403c", "score": "0.6463702", "text": "def render_header(self, nb):\n header = self.template.module.__dict__.get('header',None)\n assert header is not None, \"This template must have a 'header' macro.\"\n return header(nb)", "title": "" }, { "docid": "223a9c09930577258f5ed2b5ec424489", "score": "0.6462892", "text": "def header_string(self):\n if self._header_string is None:\n self._header_string = self.row_format_string.format(self.label,\n self.count_or_proportion)\n return self._header_string", "title": "" }, { "docid": "35cf4866158a25050374e164abd860b3", "score": "0.6440023", "text": "def get_header(self, headername):\n return self.get_comment(headername)", "title": "" }, { "docid": "a275ea102862d0a529115a2a66c39b24", "score": "0.64319557", "text": "def print_header(self, header):\n global config\n line = '='*config['LEN']\n if len(header) == 1 and header[0] == '-':\n return\n if header == []:\n print '\\n'+line\n return\n header = [x.center(80, ' ') for x in header]\n head = '\\n'.join(header)\n head = '\\n%s\\n%s\\n%s'%(line, head, line)\n print head\n return", "title": "" }, { "docid": "282e90bffce1c846b0f5081382acd16d", "score": "0.6422456", "text": "def get_header_template(self):\n return None, None", "title": "" }, { "docid": "1c7f8a4c65cb8974f25c8f2924eb0455", "score": "0.64197326", "text": "def print_header(text,\n log=None,\n verbose=True):\n if verbose:\n print('%s\\n %s\\n%s' % ('='*80, text, '='*80))", "title": "" }, { "docid": "115a9e90d22f0f8db987cbaadafe1602", "score": "0.64066577", "text": "def heading( self ):", "title": "" }, { "docid": "68e19ff6e78af3bf7760cf3d622ac271", "score": "0.639062", "text": "def PrintHeader(self):", "title": "" }, { "docid": "972853dbcc8441fa6e0701ee9711cfdc", "score": "0.63740313", "text": "def heading(self):\n return self._state[2]", "title": "" }, { "docid": "7fc222bca8b09e30fa29540366792ba1", "score": "0.6366834", "text": "def setHeader(self, header):\n\t\tself._header = header + self._extraHeader", "title": "" }, { "docid": "4a14b3243025adfb74dcce5aa1db6a93", "score": "0.6359898", "text": "def test_get__header(self):\n self.assertTrue('<h1>Contact Manager</h1>')", "title": "" }, { "docid": "cfe4acfa76707871dbd225d50f93648e", "score": "0.63342404", "text": "def heading(self) -> str:\n level = self.level\n return self.title[level:-level].strip()", "title": "" }, { "docid": "20d4b629915b58e78b7f93114327e990", "score": "0.6326988", "text": "def header(self):\n self._load_grid(self.fname)\n return self._grid.header", "title": "" }, { "docid": "df3e258ca13aaebbae8c96ee53c11544", "score": "0.6312609", "text": "def get_header(self, name: str) -> Optional[str]:\n\n if header := self.data.get(name):\n return header.value\n return None", "title": "" }, { "docid": "ecc60083507773acbc94932376034231", "score": "0.63114303", "text": "def createHeader(self, title):\n template = u\"<h1>{0}</h1>\"\n \n if self.model.showHeader == True:\n return template.format(title)\n else:\n return \"\"", "title": "" }, { "docid": "643a932dae0a29b2a01a9fb7021ed70c", "score": "0.6302051", "text": "def title(self) -> str:\n m = TITLE_PATTERN.search(self.header)\n return m[1].strip() if m else ''", "title": "" }, { "docid": "fc85d480079c13c3e7ebe67f21c61cfa", "score": "0.6274774", "text": "def changeHeader(self):\n col = self.table_widget.currentColumn()\n\n text, ok = QInputDialog.getText(self, \"Enter Header\", \"Header text:\")\n\n if ok and text != \"\":\n self.table_widget.setHorizontalHeaderItem(col, QTableWidgetItem(text))\n else:\n pass", "title": "" }, { "docid": "50d8658b9e902486e0b9f34125a70391", "score": "0.62723434", "text": "def get_title(self):\n return self.config.get(\"header\", \"name\")", "title": "" }, { "docid": "3d67aa99d16ff75bf24c73a64915d4d3", "score": "0.627214", "text": "def edit_header(self, header):\n if self.get_id() not in ['', None]:\n header['EXTNAME'] = self.get_id(), 'Content identifier.'\n super().edit_header(header)", "title": "" }, { "docid": "3c9d27cb83e4a3bd773230e6cb0bedd8", "score": "0.62702066", "text": "def __get_header(self):\n # try:\n self.header = self.hdulist[0].header\n # except:\n # self.hdulist = astropy.io.fits.open(self.map_name)\n # self.header = self.hdulist[0].header", "title": "" }, { "docid": "f845afe6c357f40dca93b75b17d95889", "score": "0.62664443", "text": "def other_heading(self, value):\n\n pass", "title": "" }, { "docid": "9139b0b1126c81dec36904b9534f2767", "score": "0.62508875", "text": "def header(self, txt, size='1', color='black'):\r\n txt = f'<h {size} style=\"color: {color}\">{txt}</h {size}>'\r\n append_content(txt)", "title": "" }, { "docid": "757a3c784accbcecbb8b83605b694102", "score": "0.6217515", "text": "def draw_header(self, stream, header):\n stream.writeln(header)\n stream.writeln('~' * len(header))\n stream.writeln()", "title": "" }, { "docid": "52b7a0479e3e2437cf421598b505ffe1", "score": "0.61903256", "text": "def reportHeader(self):\n report = \"\"\n\n title = self.data.name\n current_time = datetime.datetime.now().strftime(\"%I:%M%p, %B %d, %Y\")\n filename = self.data.filename\n modelname = self.kernel_module.id\n optimizer = options.FIT_CONFIG.selected_fitter.name\n if hasattr(self.data, 'xmin'):\n qrange_min = self.data.xmin\n qrange_max = self.data.xmax\n else:\n qrange_min = min(self.data.x)\n qrange_max = max(self.data.x)\n qrange = \"min = {}, max = {}\".format(qrange_min, qrange_max)\n\n title = title + \" [\" + current_time + \"]\"\n title_name = HEADER % title\n report = title_name\n report += CENTRE % \"File name: {}\\n\".format(filename)\n report += CENTRE % \"SasView version: {}\\n\".format(SASVIEW_VERSION)\n report += CENTRE % \"SasModels version: {}\\n\".format(SASMODELS_VERSION)\n report += CENTRE % \"Fit optimizer used: {}\\n\".format(optimizer)\n report += CENTRE % \"Model name: {}\\n\".format(modelname)\n report += CENTRE % \"Q Range: {}\\n\".format(qrange)\n chi2_repr = GuiUtils.formatNumber(self.parent.chi2, high=True)\n report += CENTRE % \"Chi2/Npts: {}\\n\".format(chi2_repr)\n\n return report", "title": "" }, { "docid": "94facc719b8d9e844e1ba10dba5730e6", "score": "0.61834836", "text": "def setHeader(self, header):\n return self.__header.setLines(header)", "title": "" }, { "docid": "d0d3daec0abcb88c00afdeb63d8b773a", "score": "0.61696386", "text": "def write_header(self): # -> None:\n ...", "title": "" }, { "docid": "d0d3daec0abcb88c00afdeb63d8b773a", "score": "0.61696386", "text": "def write_header(self): # -> None:\n ...", "title": "" }, { "docid": "d997cc695d43671d5b8d27f855ddc02f", "score": "0.6165144", "text": "def get_header_cell(self):\n return self.heading.center(self.width)[:self.width]", "title": "" }, { "docid": "a22528a09822c74743613e8227ea37f0", "score": "0.6164694", "text": "def PrintHeader(self, text, character='*'):\n self._output_writer.Write(u'\\n')\n\n format_string = u'{{0:{0:s}^{1:d}}}\\n'.format(character, self._LINE_LENGTH)\n header_string = format_string.format(u' {0:s} '.format(text))\n self._output_writer.Write(header_string)", "title": "" }, { "docid": "61718390878ee7cfef2a3929ef76be44", "score": "0.6142413", "text": "def getHeader(self):\n topScript = getParentScript(top=True)[0]\n header = \"---- LOG ----\\nFile : %s\\nDate : %s\\nHost : %s\\nOS : %s\\n\\n\"%\\\n (topScript,\n datetime.now(),\n platform.uname()[1],\n platform.uname()[0].lower())\n return header", "title": "" }, { "docid": "10d99ae48d56843e98006e56c2114f62", "score": "0.6141985", "text": "def header(self, header: str, height: int=90) -> str:\r\n return self.view(pn.pane.HTML(f\"<div class='title'>{header}</div>\",\r\n sizing_mode='stretch_width'), height=height)", "title": "" }, { "docid": "5ac252ef9511583dce93acafc95e63c9", "score": "0.6140493", "text": "def write_header(self):\n pass", "title": "" }, { "docid": "e2e52b246f5ef63b4b70f7d72b50e574", "score": "0.61241066", "text": "def replace_header(self, header_text):\n with open(self.outfile, 'rt') as fp:\n _, body = self.split_header(fp)\n with open(self.outfile, 'wt') as fp:\n fp.write(header_text)\n fp.writelines(body)", "title": "" }, { "docid": "3094990603be588e533cb8270a77da5c", "score": "0.61212957", "text": "def _print_header(self, name, value, prefix='', stream=None):\n header = self.colorize(self.COLORMAP['header'], \"%s:\" % name)\n self._verbose_output(\"%s %s\" % (header, value), prefix=prefix,\n stream=stream)", "title": "" }, { "docid": "e74ee5cfa4c55a46e3c7348f25aea98f", "score": "0.61170083", "text": "def encode_mail_header(self, text):\n return Header(safe_unicode(text), 'utf-8')", "title": "" }, { "docid": "c057cfa8620a7fd111f06890ad7505a0", "score": "0.6115719", "text": "def __create_header(self):", "title": "" }, { "docid": "0c4f6a663de3e902e5c3b341656dec66", "score": "0.61126745", "text": "def add_heading(self, text):\r\n self.html += '<div class=\"heading\">%s</div>\\n' % (text)", "title": "" }, { "docid": "5509a9785222edbd9d72a0381a385062", "score": "0.6111075", "text": "def add_header(self, parent, record):\n header = SubElement( parent, 'header' )\n TextSubElement( header, 'identifier', record.identifier )\n TextSubElement( header, 'datestamp', record.datestamp )\n for set_spec in record.set_specs:\n TextSubElement( header, 'setSpec', set_spec )\n if (record.status is not None):\n TextSubElement( header, 'status', record.status )", "title": "" }, { "docid": "65b33bfaa7800af50742c3e88816e8a6", "score": "0.6109668", "text": "def get_header(self):\n return \"Date:{}\\nTime:{}\\nSubjectID:{}\\nPSI Load:{}\\nPSI Set:{}\\nSampling Rate(Hz):{}\\nChannel1 SensorScalar:{}\\nChannel2 SensorScalar:{}\\nSamples Per Channel:{}\\nY_Unit_Label:{}\\nX_Dimension:{}\\nChannel Order:AI0,AI1\".format(\n self._date, self.time, self.subjectId, self.PsiLoad,\n self.PsiSet, self.readRate, self.channel1_radianConvFactor, self.channel2_radianConvFactor, self.readNumSamples, \"Volts\", \"Time\")", "title": "" }, { "docid": "7524265c6ca090a0c93178e31a88b30f", "score": "0.6108955", "text": "def set_header(self, name, value):\n msg = {'command': 'SET_HEADER', 'args': [name, value]}\n data = self.send_and_recv(json.dumps(msg))\n return json.loads(data)", "title": "" }, { "docid": "5f09fb1b1592fab750ff3692574913db", "score": "0.61008143", "text": "def _get_show_header(self, source, group):\n header = self.mcast_show_header.replace(\"<source>\", source)\n header = header.replace(\"<group>\", group)\n return header", "title": "" }, { "docid": "f114dc679b97391bb2be3b608a115ec9", "score": "0.6089892", "text": "def parse_write_header(self, m):\n self._ctx.write_html_header()\n\n return ''", "title": "" }, { "docid": "7ae21b404afdd28f04639c06483cd973", "score": "0.60761374", "text": "def getHeaderContent(self):\n content = self.headerFactory()\n if content == self.currentHeaderContent:\n return None\n content.setFragmentParent(self)\n self.currentHeaderContent = content\n return content", "title": "" }, { "docid": "3c58073c30fd78520aed6474c68635c6", "score": "0.6075721", "text": "def get_header(self, taskmanager_id, generation_id, key):\n\n cols = [(x.split())[0] for x in SQLite3DB.tables.get(SQLite3DB.header_table)]\n return self._get_table_row(SQLite3DB.header_table, taskmanager_id,\n generation_id, key, cols)", "title": "" }, { "docid": "de2f6efdc7927ad3d03b38cd46292e98", "score": "0.60740924", "text": "def response_header(self) -> Sequence[str]:\n return pulumi.get(self, \"response_header\")", "title": "" }, { "docid": "d3c191884c720f24cd9a97b7f8d120ab", "score": "0.60706246", "text": "def _write_header(self, header):\n return", "title": "" }, { "docid": "da449489ca85652d32744339fd87065a", "score": "0.6067611", "text": "def __str__(self):\n if self.header:\n return '<th>%s</th>' %(self.text)\n else:\n return '<td>%s</td>' %(self.text)", "title": "" }, { "docid": "4bbd10de1df691f07170d81da9727c93", "score": "0.6067571", "text": "def add_header(self, drawing, header_type, value):\n drawing.header[header_type] = value", "title": "" }, { "docid": "c6c7e30042102d3e13bf04fcffc99616", "score": "0.60643363", "text": "def header_string(self):\n return str(self.index) + self.prev_hash + str(self.data) + str(self.timestamp) + str(self.nonce) + str(self.txid)", "title": "" }, { "docid": "71b93ec5fcc36a7cc79e7b3fd324abe2", "score": "0.6061629", "text": "def add_header(self, header=const.HEADER, **format):\n header = f' {header} '\n self.write_string(header, offset=('center', 0), **format)", "title": "" }, { "docid": "7e29379927c76252891ce26c7bf442f9", "score": "0.6058889", "text": "def Header (self, err):\n # Stubbed\n raise RuntimeError,\"Header: Not Defined for virtual base class OData\"", "title": "" }, { "docid": "ece955fe88426d952f896fbb421d2555", "score": "0.60506827", "text": "def set_header(self, name, value):\r\n self.headerdict[name] = value", "title": "" }, { "docid": "d4e74b5d3ed078e2263d8f60b6b7667f", "score": "0.60474783", "text": "def header_string(self):\n\n retval = [\"count\", \"match\"]\n return retval", "title": "" }, { "docid": "deec264a1b66c632f42a2023f1da7e39", "score": "0.60455847", "text": "def _FormatHeader(header):\n if header == lookup.CAPSULE:\n header = 'summary description'\n return '# {}'.format(header.upper())", "title": "" }, { "docid": "fb5b1d4d9b51332c38c961bd7fd1ff79", "score": "0.60444534", "text": "def get_msg_header(self):\n return self._state.header", "title": "" }, { "docid": "d4b0f8655df3b0e9db5cb5544e87d6c1", "score": "0.6035102", "text": "def updateRemoteHeaderContent(self):\n content = self.getHeaderContent()\n if content is not None:\n return self.callRemote(\n 'setContentFromWidgetInfo', content, u'header')", "title": "" }, { "docid": "7caa1c580a7335c37579f27d24248cc0", "score": "0.6025984", "text": "def print_header(cls, content):\n\n color, default = cls.ANSI.get('magenta'), cls.ANSI.get('default')\n size = cls.HEADER_SIZE\n cls.log('', ignore_wrap=True)\n main = '{' + \"\".join([' ' for x in range(int(size/2)-int((len(content)/2)))]) + content\n main += \"\".join([' ' for x in range(size-len(main))]) + '}'\n upper_line = ' /' + \"\".join(['=' for x in range(len(main)-4)]) + '\\\\'\n lower_line = ' \\\\' + \"\".join(['=' for x in range(len(main)-4)]) + '/'\n cls.log(f'{cls.ANSI.get(\"bold\")}{color}{upper_line}\\n{main}\\n{lower_line}{default}', ignore_wrap=True)\n cls.log('', ignore_wrap=True)", "title": "" }, { "docid": "22ddc7def47a8b1a24f4e9f38d1e7c4d", "score": "0.60252815", "text": "def get_header(self):\n with self.depot.open('rb') as ofi:\n ofi.seek(0)\n self._header = ofi.readline()", "title": "" }, { "docid": "0ec19e8e6d0a152bf5c3e856014f5946", "score": "0.6021255", "text": "def _header(self) -> str:\n return explain()", "title": "" }, { "docid": "cc26e3771a3b0d39d60e91e898d7b638", "score": "0.60196203", "text": "def get_header(self, name):\r\n return self.headers.get(name)", "title": "" }, { "docid": "4fd6b3adf657a1526df72226a2c06172", "score": "0.60166717", "text": "def header(self) -> ContainerHeader:\n if not self._header:\n parsed = self._parse_data()\n self._data = parsed.item\n self._header = parsed.header\n return self._header", "title": "" }, { "docid": "623f6b537d5edb7202df99078d8f7839", "score": "0.6006494", "text": "def formatHeader(self, records):\n return \"\"", "title": "" }, { "docid": "74a013ab9b36c37de6116399ab490569", "score": "0.60053504", "text": "def headline(self):\n return self.data.get(\"Headline\", None)", "title": "" }, { "docid": "11e07b56efc891b7b9f1565a348a82d1", "score": "0.6000792", "text": "def heading(self):\n\n return int(self._heading)", "title": "" }, { "docid": "318a3cd2d751eaca0d63043a36fa2327", "score": "0.5993063", "text": "def print_header():\n print(\"STEM Center Temperature Project\")\n print(\"Shaotong Wen\")", "title": "" }, { "docid": "124d838dcbf946f04a9d187cb0d999a5", "score": "0.597226", "text": "def header(self, head):\n # Test if head is a fits.Header object\n if not isinstance(head, fits.Header):\n raise TypeError('`head` must be an instance of astropy.io.fits.header.Header.')\n\n self.__header = head\n\n return None", "title": "" }, { "docid": "e47d071e913fa15ee0d2a496cc3cc6c3", "score": "0.5962617", "text": "def _serialize_header():\n\n content = \"% creator: {:s}\\n\".format(sg.__name__)\n content += \"% version: {:s}\\n\".format(sg.__version__)\n content += \"% date: {:s}\\n\".format(datetime.now().strftime(\"%Y-%m-%d\"))\n content += \"% author: {:s}\\n\".format(sg.__author__)\n content += \"% license: {:s}\\n\".format(sg.__copyright__)\n content += \"% \\n\"\n return content", "title": "" }, { "docid": "af3c3c006ba3660dfdc6604bd571abd7", "score": "0.59515095", "text": "def header_name(self) -> pulumi.Input[str]:\n return pulumi.get(self, \"header_name\")", "title": "" }, { "docid": "a65b2341957edec056851152248ede92", "score": "0.5938276", "text": "def add_header(self, header: str) -> None:\n self.head.appendChild(RawHtml(header))", "title": "" } ]
cf24a9a72fbf65c07687a84538833017
Return a string representing the node in g2o format
[ { "docid": "223b7b608650d8a8dbb39bab7f4324fd", "score": "0.0", "text": "def to_g2o(self):\n line = \"VERTEX_SE3:QUAT {} \".format(self.id_)\n line += \" \".join([str(x) for x in self.t]) + \" \"\n line += \" \".join([str(x) for x in self.q])\n return line", "title": "" } ]
[ { "docid": "10ff3df0af534ac4a0f9ac4acdbf8192", "score": "0.7376439", "text": "def __str__(self):\n string = '@nodes\\nlabel deg\\n'\n for i in self.V:\n string += '%3d %3d\\n'%(i,self.deg(i))\n\n string += '@edges\\n label\\n'\n for e in self.E:\n string += '%3d %3d %3d\\n'%(e.i,e.j,e.id)\n\n return string", "title": "" }, { "docid": "bf2605d3a809871005ef637c6ecff7f2", "score": "0.7289011", "text": "def __str__(self):\n return \"Node %d\" % self.node_id", "title": "" }, { "docid": "fb22e0178f0dcfce22b25237af8174a3", "score": "0.72559446", "text": "def __str__(self):\n return \"Node: value: \" + str(self._value) + \" a_i: \" + str(self._a_i) + \" Layer: \" + str(self._layer)", "title": "" }, { "docid": "a82c9a3443fac81825da610e734542cf", "score": "0.72367096", "text": "def __repr__(self) -> str:\n return \"Node({})\".format(self.val)", "title": "" }, { "docid": "fa444d90e420135e56cf56d1804e27cd", "score": "0.7224146", "text": "def __repr__(self):\n raw_shape = str(self.getDenseRaw().shape) if self.raw is not None else \"None\"\n raw = str(self.getDenseRaw()) if self.raw is not None else \"\"\n raw_housing = str(self.getDenseRawHousing().shape) if self.raw_housing is not None else \"None\"\n output = f\"--- geounit Node ---\\n\" \\\n f\"geocode: {self.geocode}, geolevel {self.geolevel}\\n\" \\\n f\"parent geocode: {self.parentGeocode}\\n\" \\\n f\"raw_shape: {raw_shape}\\n\" \\\n f\"raw: {raw}\\n\" \\\n f\"raw_housing: {raw_housing} \\n\" \\\n f\"dp: {self.dp}\\n\" \\\n f\"cons: {self.cons}\\n\" \\\n f\"invar: {self.invar}\\n\" \\\n f\"syn: {self.syn}\\n\" \\\n f\"syn_unrounded: {self.syn_unrounded}\\n\" \\\n f\"dp_queries: {self.dp_queries}\\n\"\n return output", "title": "" }, { "docid": "25077d8a4d533744763eef4185e19180", "score": "0.715805", "text": "def __str__(self):\n return self.source + \" \" + self.flag + \" \" + str(self.ttl) + \" \" + str(self.hops) + \" \" + self.end_node", "title": "" }, { "docid": "57038897f7e1ef3f2c81c87455238346", "score": "0.71216816", "text": "def __str__(self):\n\n return \"Node %i is located at (%f, %f) with degree %i\" % \\\n (self.uid, self.x, self.y, self.degree)", "title": "" }, { "docid": "f985a4ba961acc8da35bf1ac1153c265", "score": "0.7119959", "text": "def __str__(self):\n return self._graph.serialize(format=self.__fmt).decode('utf8')", "title": "" }, { "docid": "2fda9ba9afd044b9b248c6a598fde751", "score": "0.71196175", "text": "def __str__(self):\n return \"(%s,%s,%g)\" % (str(self.node1), str(self.node2), self.alpha)", "title": "" }, { "docid": "8dbea2f1ad2998991da75f15f858fc4b", "score": "0.70746374", "text": "def __str__(self):\n return self.source + \" \" + self.flag + \" \" + str(self.ttl) + \" \" + self.end_node + \" \" + self.next_node + \\\n \" \" + self.source_peer_id + \" \" + self.target_peer_id", "title": "" }, { "docid": "9ea8d43a74e066174b84ba6dc0d3f888", "score": "0.7048565", "text": "def __str__(self):\n return self.source + \" \" + self.flag + \" \" + str(self.ttl) + \" \" + str(self.hops) + \" \" + self.end_node + \" \" \\\n + self.next_node", "title": "" }, { "docid": "a97802aaa42bedbfb6f99d6e8a0423f3", "score": "0.7040183", "text": "def node_to_str(self, node) -> str:\n if isinstance(node, astroid.node_classes.Name):\n return node.name\n if isinstance(node, astroid.node_classes.AssignName):\n return node.name\n if isinstance(node, astroid.node_classes.Const):\n return node.value\n if isinstance(node, astroid.node_classes.JoinedStr):\n return self.node_concat_const(node.values)\n if isinstance(node, astroid.node_classes.Keyword):\n return node.arg\n if isinstance(node, astroid.node_classes.Call):\n return self.node_concat_const(node.args)\n return \"\"", "title": "" }, { "docid": "9737e835273f7ed02fc467a91697e5f2", "score": "0.69802636", "text": "def __repr__(self):\n return str(self.nodes)", "title": "" }, { "docid": "0165a0d48cc804f52e14322dd4ff9560", "score": "0.69427365", "text": "def __repr__(self):\n return \"Node({!r})\".format(self.data)", "title": "" }, { "docid": "ba850c4218ce3f6280284dec182f7a46", "score": "0.6927158", "text": "def __repr__(self):\n return 'Node({!r})'.format(self.data)", "title": "" }, { "docid": "ba850c4218ce3f6280284dec182f7a46", "score": "0.6927158", "text": "def __repr__(self):\n return 'Node({!r})'.format(self.data)", "title": "" }, { "docid": "55270b8e55f73fef0813381555649ef2", "score": "0.69023454", "text": "def __str__(self):\n return \"Node: {{value: {}, next: {}}}\".format(self.value, self.next.value if self.next else None)", "title": "" }, { "docid": "f919dc67f65d8201f1192e4d1dcc6284", "score": "0.6867839", "text": "def __str__(self):\n string = \"\"\n current_node = self.head_node\n while current_node:\n if current_node.data != None:\n string += str(current_node.data) + \" -> \"\n current_node = current_node.next_node\n return string", "title": "" }, { "docid": "1589acb2b9e2809c523325575f81c0c8", "score": "0.6819804", "text": "def __str__( self ) :\n\t\t\"*** YOUR CODE HERE (Optional) ***\"\n\t\tutil.raiseNotDefined()\n\t\treturn 'Node'", "title": "" }, { "docid": "dd9102f654cf2906f433ddf0cdedf795", "score": "0.6814448", "text": "def __repr__(self):\n return \"Node: \" + str(self._node_id) + \" Edge: \" \\\n + str(self._edge_list)", "title": "" }, { "docid": "ed0ada6994870154f37e4ad0f67dc330", "score": "0.67917746", "text": "def __str__(self):\n print('This is node {} and my value is {}'.format(self.identifier, self.value))\n print('My incoming edges are:')\n for edge in self.inc_edges:\n print(edge.__str__())\n print('My outgoing edges are:')\n for edge in self.out_edges:\n print(edge.__str__())", "title": "" }, { "docid": "c3d76dbe5e75471f5a6909b873a2112b", "score": "0.6771085", "text": "def __repr__(self):\n\n return \"<Node {data}>\".format(data=self.data)", "title": "" }, { "docid": "cefbf65a78ed470526e5b400311517d2", "score": "0.6762325", "text": "def __repr__(self):\n iden = f\" {self.id}\" if self.id else \"\"\n\n try:\n node = self.node.id\n except RuntimeError:\n node = \"<missing> (bad!)\"\n\n return f\"<{type(self).__name__}{iden} (node: {node})>\"", "title": "" }, { "docid": "445eaef38f5b4604d55ceec8bfbd505c", "score": "0.6761913", "text": "def __repr__(self):\n return 'Node({})'.format(repr(self.data))", "title": "" }, { "docid": "445eaef38f5b4604d55ceec8bfbd505c", "score": "0.6761913", "text": "def __repr__(self):\n return 'Node({})'.format(repr(self.data))", "title": "" }, { "docid": "445eaef38f5b4604d55ceec8bfbd505c", "score": "0.6761913", "text": "def __repr__(self):\n return 'Node({})'.format(repr(self.data))", "title": "" }, { "docid": "ea57ea3207aee42d4dedf94ae0718417", "score": "0.6747369", "text": "def __str__(self):\n return self.source + \" \" + self.flag + \" \" + str(self.ttl) + \" \" + self.end_node + \" \" + self.next_node + \\\n \" \" + self.source_peer_id + \" \" + self.target_peer_id + \" \" + self.timeout", "title": "" }, { "docid": "8f110f16f5077f0bd7877c823da2e852", "score": "0.6736107", "text": "def __str__(self):\n str_ = ''\n height = self.height()\n for depth in range(height + 1):\n str_ += ' ' * 3 * (2 ** (height - depth) - 1)\n for index in range(2 ** depth):\n if self.isNodeNoneByLocation(depth,index):\n str_ += ' . '\n str_ += ' ' * (3 * (2 ** (height - depth + 1) - 1) - 2)\n elif isinstance(self.getNodeByLocation(depth,index).value,(int,float)):\n str_ += \"{: 5.01f}\".format(round(self.getNodeByLocation(depth,index).value,1))\n str_ += ' ' * (3 * (2 ** (height - depth + 1) - 1) - 2)\n else:\n str_ += \"{:^5}\".format(self.getNodeByLocation(depth,index).value)\n str_ += ' ' * (3 * (2 ** (height - depth + 1) - 1) - 2)\n str_ += '\\n'\n return str_", "title": "" }, { "docid": "424f1a435127936d4e92b993e42bae02", "score": "0.67105275", "text": "def string(self):\n node = self.head\n output = \"\"\n\n while node:\n end = \"-> \"\n if not node.next:\n end = \"-> None\"\n output = \"{}{}{}\".format(output, node.data, end)\n node = node.next\n\n return(output)", "title": "" }, { "docid": "cfa32724cd7307d9384be13a434a8eac", "score": "0.67026657", "text": "def __str__(self):\n\n return \"Edge between Node %i and Node %i has name %s with distance %f, speed limit %i, and %i lanes\" % \\\n (self.uid1, self.uid2, self.name, self.distance, self.speedLimit, self.lanes)", "title": "" }, { "docid": "ebcf02f02df49d792428e03e0602f00b", "score": "0.66971385", "text": "def get_node_name_as_str(self):\n try:\n return '-'.join([\"\" if i is None else i for i in self.attr['indexed_public']['node_name']])\n except:\n return None", "title": "" }, { "docid": "1ead1fec905c069fc8eef99d4cf2180e", "score": "0.66713834", "text": "def __repr__(self):\n return f\"Node('{self.val}', {self.attrs})\"", "title": "" }, { "docid": "0dcc5861f433232e4c6c524a14520f11", "score": "0.66684526", "text": "def __str__(self):\n returnString = \"<n\\n\"\n returnString += \"id=\\\"\" + str(self.id) + \"\\\"\\n\"\n returnString += \"p=\\\"\" + str(self.x) + \" \" + str(self.y) + \"\\\"\\n\"\n if nodeType:\n returnString += \"NodeType=\\\"1\\\"\\n\"\n returnString += \"Element=\\\"\" + pt.elements.symbol(re.sub(r\"H\\:.+?\\, \", \"\", str(pt.formula(text).hill.atoms)).split(\"{\",1)[-1].split(\":\",1)[0]).number + \"\\\"\\n\"\n returnString += \"NumHydrogens=\\\"' + \"\n returnString += \"/>\"\n return returnString", "title": "" }, { "docid": "84444cb9ae60056888f824aaec8fddc0", "score": "0.6653387", "text": "def __str__(self):\n lines = []\n lines.append('<Node>')\n lines.append('\\t<Id>{0}</Id>'.format(self.id))\n lines.append('\\t<ClassName>{0}</ClassName>'.format(self.class_name)) # TODO change this if relevant for final XML notation \n lines.append('\\t<Top>{0}</Top>'.format(self.top))\n lines.append('\\t<Left>{0}</Left>'.format(self.left))\n lines.append('\\t<Width>{0}</Width>'.format(self.__width))\n lines.append('\\t<Height>{0}</Height>'.format(self.__height))\n\n mask_string = self.encode_mask()\n lines.append('\\t<Mask>{0}</Mask>'.format(mask_string))\n\n if len(self.inlinks) > 0:\n inlinks_string = ' '.join(list(map(str, self.inlinks)))\n lines.append('\\t<Inlinks>{0}</Inlinks>'.format(inlinks_string))\n if len(self.outlinks) > 0:\n outlinks_string = ' '.join(list(map(str, self.outlinks)))\n lines.append('\\t<Outlinks>{0}</Outlinks>'.format(outlinks_string))\n\n data_string = self.encode_data()\n if data_string is not None:\n lines.append('\\t<Data>\\n{0}\\n\\t</Data>'.format(data_string))\n\n lines.append('</Node>')\n return '\\n'.join(lines)", "title": "" }, { "docid": "4370f7b663489f055def46918fc05514", "score": "0.66325605", "text": "def __str__(self):\n s = \"\"\n node = self.head\n while node is not None:\n s += str(node.data) + \" \"\n node = node.next\n return s", "title": "" }, { "docid": "4370f7b663489f055def46918fc05514", "score": "0.66325605", "text": "def __str__(self):\n s = \"\"\n node = self.head\n while node is not None:\n s += str(node.data) + \" \"\n node = node.next\n return s", "title": "" }, { "docid": "3a11089884e95c46eb633f175ad5b4e2", "score": "0.66208595", "text": "def __str__(self):\r\n outstr = \"-\"\r\n node = self._first\r\n while node:\r\n outstr = outstr + str(node._element) + \"-\"\r\n #can access node's private variable, because defined in this file\r\n node = node._next\r\n return outstr", "title": "" }, { "docid": "5be50daae063bed5021ea5d7e4768236", "score": "0.6605113", "text": "def __str__(self):\n # pylint: disable = no-member\n return self.tree.to_string(verbose=False)", "title": "" }, { "docid": "7163330e8b93d476b54324653f2332d3", "score": "0.65766865", "text": "def __str__(self: 'LLNode') -> str:\n return str(self.item) + (' -> ' + str(self.link) if self.link else '')", "title": "" }, { "docid": "a0bb5eef010c16176d5d7a80e0a8f30b", "score": "0.6571841", "text": "def __repr__(self):\n return f' <Node | Value: {self.root.value} | Root : {self.root}>'", "title": "" }, { "docid": "a0365cbce359ab6a0db213b04b5c14f1", "score": "0.65679795", "text": "def __repr__(self):\r\n ret = \"** Node %d: %s **\\n\" % \\\r\n (self._id, self.State.reverse_mapping[self._state])\r\n ret += repr(self._io)\r\n return ret", "title": "" }, { "docid": "9a276433e8c23f5cf6f69d90d66ee873", "score": "0.6559514", "text": "def to_g2o(self):\n line = \"EDGE_SE2 {} {} {} {} {} \".format(self.i, self.j, self.x, self.y,\n self.theta)\n line += \" \".join([str(x) for x in self.info])\n return line", "title": "" }, { "docid": "c491d47f4521fde8cfed5cc96491a260", "score": "0.6558002", "text": "def _tostring(self, tree_link=TreeLink()):\n if self == self.get_root():\n name = split(self.hdf.filename)[1]\n else:\n name = self.name\n \n s = str(tree_link) + name + self._tostring_attrs()\n \n tree_link.append(TTL_G)\n \n i = 0\n for item in self:\n i += 1\n tree_link.follow(TTL_G, len(self) == i)\n s += '\\n' + item._tostring(tree_link)\n tree_link.down()\n \n return s", "title": "" }, { "docid": "cbe6e830dec8640aa55c5d8c9c6f33e3", "score": "0.65491", "text": "def hg_node(self):\r\n res = self.hg_command(\"log\", \"-r\", self.hg_id(),\r\n \"--template\", \"{node}\")\r\n return res.strip()", "title": "" }, { "docid": "daf1da7188f15bfc4d20fe3d45044116", "score": "0.65328294", "text": "def to_string(self):\n return self.name + \" :- \" + \\\n \" -> \".join([r[0] + '-' * r[1] for r in self.relations[0]]) +\\\n \" => \" +\\\n \" -> \".join([r[0] + '-' * r[1] for r in self.relations[1]])", "title": "" }, { "docid": "3d35990c629a9418352baf91fe613c6b", "score": "0.651472", "text": "def __repr__(self: 'Node') -> str:\n if not self.next:\n return 'Node({})'.format(repr(self.item))\n else:\n return 'Node({}, {})'.format(repr(self.item), repr(self.next)) \n # This is recursive; take a few moments to think about how this works", "title": "" }, { "docid": "1cce890d41ff610e3496f002c6cb82e8", "score": "0.6493908", "text": "def to_string(self):\n return self['target']", "title": "" }, { "docid": "e6f69763a02f8c69561a46d6dd4e81d6", "score": "0.64933425", "text": "def __repr__(self):\n return f' <Node | Value: {self.value} | Data: {self.data} | Left: {self.left} | Right: {self.right} | Next: {self._next} >'", "title": "" }, { "docid": "d9c539543c1503ae02be50cd5af4c887", "score": "0.6470727", "text": "def serialize(self, node: TreeNode) -> str:\n def encode(node):\n if not node:\n return ''\n else:\n return str(node.val)+'-'+encode(node.left)+encode(node.right)\n return encode(node)", "title": "" }, { "docid": "28df12428c441b3078020675d894534b", "score": "0.6457192", "text": "def __str__(self):\n\n def recurse(node, level):\n part = \"\"\n if node is not None:\n part += recurse(node.right, level + 1)\n part += \"| \" * level\n part += str(node.data) + \"\\n\"\n part += recurse(node.left, level + 1)\n return part\n\n return recurse(self._root, 0)", "title": "" }, { "docid": "9e298e5f627897bb0aea62e7e0676082", "score": "0.64539313", "text": "def nodeToString(node):\n return etree.tostring(node, encoding='utf-8')", "title": "" }, { "docid": "3818e9792c6f60feed7a19021292acb8", "score": "0.6448398", "text": "def __dump_tree(self, node):\n return node.toStringTree()", "title": "" }, { "docid": "ac586947231fd5f326d06d133209d0ea", "score": "0.6438046", "text": "def __repr__(self):\n return 'Node({coords}, {self.atoms}, {self.multiplicity})'.format(coords=self.coords.tolist(), self=self)", "title": "" }, { "docid": "85e9a3699804ff442792e80541881c77", "score": "0.6435616", "text": "def __str__(self):\n label = self._find_label()\n if self.Children:\n child_string = ','.join(map(str, self.Children))\n else:\n child_string = ''\n\n if self.Parent is None: #root of tree\n if self.Children:\n return '(%s)%s' % (child_string, label)\n else:\n return '()%s' % label\n else: #internal node\n if self.Children:\n if hasattr(self, 'Length') and (self.Length!=None):\n return '(%s)%s:%s' % \\\n (child_string, label, self.Length)\n else:\n return '(%s)%s' % \\\n (child_string, label)\n else:\n if hasattr(self, 'Length') and (self.Length!=None):\n return '%s:%s' % (label, self.Length)\n else:\n return '%s' % (label)", "title": "" }, { "docid": "7f7c48742e9a82bf27d99b6be3357113", "score": "0.6425798", "text": "def __repr__(self):\n node = self.root\n nodes = []\n while node is not None:\n nodes.append('{%s}' % node.value)\n node = node.right\n nodes.append(\"Null\")\n return \" -> \".join(nodes)", "title": "" }, { "docid": "6fe952cce2d7531485ebb0b4a5783b0a", "score": "0.63891876", "text": "def __str__(self):\n output = f'Graph: - {self.graph}'\n return output", "title": "" }, { "docid": "6d89ab0f44207419c473246226e45823", "score": "0.63867235", "text": "def __repr__(self: 'LLNode') -> str:\n return ('LLNode({}, {})'.format(repr(self.item), repr(self.link)) \n if self.link else 'LLNode({})'.format(repr(self.item)))", "title": "" }, { "docid": "a0400375980148bca710056f9ab1ed0f", "score": "0.63846195", "text": "def __str__(self):\r\n return ('( Start ' + str(self._vertices[0]) + '-- End '\r\n + str(self._vertices[1]) + ' : '\r\n + str(self._element) + ')')", "title": "" }, { "docid": "0f75b1553d21e9a349b6629f7912ecfc", "score": "0.6384201", "text": "def __str__(self):\n result = \"\"\n node = self.head\n\n while node:\n prev = \"None\"\n nxt = \"None\"\n\n if node.get_previous():\n prev = str(node.get_previous().get_value())\n\n if node.get_next():\n nxt = str(node.get_next().get_value())\n\n result += str(node.get_value()) + \" (\" + prev + \", \" + nxt + \") \" + \"-> \"\n node = node.get_next()\n\n return result + \"None\"", "title": "" }, { "docid": "4a7e4a656aa2b0d90af8f06a3df59e76", "score": "0.63687384", "text": "def to_string(self):\r\n return \"Object: \" + self.type + \"\\t ID: \" + str(self.ID)", "title": "" }, { "docid": "4a7e4a656aa2b0d90af8f06a3df59e76", "score": "0.63687384", "text": "def to_string(self):\r\n return \"Object: \" + self.type + \"\\t ID: \" + str(self.ID)", "title": "" }, { "docid": "4a7e4a656aa2b0d90af8f06a3df59e76", "score": "0.63687384", "text": "def to_string(self):\r\n return \"Object: \" + self.type + \"\\t ID: \" + str(self.ID)", "title": "" }, { "docid": "4a7e4a656aa2b0d90af8f06a3df59e76", "score": "0.63687384", "text": "def to_string(self):\r\n return \"Object: \" + self.type + \"\\t ID: \" + str(self.ID)", "title": "" }, { "docid": "4a7e4a656aa2b0d90af8f06a3df59e76", "score": "0.63687384", "text": "def to_string(self):\r\n return \"Object: \" + self.type + \"\\t ID: \" + str(self.ID)", "title": "" }, { "docid": "4a7e4a656aa2b0d90af8f06a3df59e76", "score": "0.63687384", "text": "def to_string(self):\r\n return \"Object: \" + self.type + \"\\t ID: \" + str(self.ID)", "title": "" }, { "docid": "4a7e4a656aa2b0d90af8f06a3df59e76", "score": "0.63687384", "text": "def to_string(self):\r\n return \"Object: \" + self.type + \"\\t ID: \" + str(self.ID)", "title": "" }, { "docid": "4a7e4a656aa2b0d90af8f06a3df59e76", "score": "0.63687384", "text": "def to_string(self):\r\n return \"Object: \" + self.type + \"\\t ID: \" + str(self.ID)", "title": "" }, { "docid": "4a7e4a656aa2b0d90af8f06a3df59e76", "score": "0.63687384", "text": "def to_string(self):\r\n return \"Object: \" + self.type + \"\\t ID: \" + str(self.ID)", "title": "" }, { "docid": "f62b84b11b21462c6a287505efe78892", "score": "0.63666725", "text": "def to_g2o(self):\n return \"VERTEX_SE2 {} {} {} {}\".format(self.id_, self.x, self.y,\n self.theta)", "title": "" }, { "docid": "96f3574ecc96e960abd5c23a1cd1ecbf", "score": "0.6359654", "text": "def __str__(self):\n res = '(V={'\n for v in self.vertices():\n res = res + str(v) + ','\n res = res + '}, E={'\n for e in self.edges():\n res = res + str(e) + ','\n res = res + '})'\n return res", "title": "" }, { "docid": "892d745e47a5941e0cc1bf725c17b9ab", "score": "0.6354981", "text": "def node_obs_to_str(obs: Observation) -> str:\n if isinstance(obs, int):\n obs_str = '{obs:d}'.format(obs=obs)\n elif isinstance(obs, str):\n obs_str = obs\n else:\n msg = f'obs ({obs} of type ({type(obs)}) must be of type: int, str)'\n raise ValueError(msg)\n\n return obs_str", "title": "" }, { "docid": "077680815ba786f8a4aca76104c8342e", "score": "0.6352432", "text": "def __str__(self) -> str:\n return (\n '<Telegram direction=\"{}\" source_address=\"{}\" '\n 'destination_address=\"{}\" payload=\"{}\" />'.format(\n self.direction.value,\n self.source_address,\n self.destination_address,\n self.payload,\n )\n )", "title": "" }, { "docid": "18bd209b79afe22c1807ec4600797a46", "score": "0.634604", "text": "def pretty_instance(node = None):\n if not node:\n node = current_node()\n return \"%s (%s @ %s)\" % (node.id, node.tags.get(\"Name\"), ip_address(node))", "title": "" }, { "docid": "aed57c1ee665b15b8f1924c981f97762", "score": "0.6338831", "text": "def to_g2o(self):\n return \"VERTEX_SE2 {} {} {} {}\\n\".format(self.id, self.pose[0], self.pose[1], self.pose[2])", "title": "" }, { "docid": "320a60702c302c348d6fb54f34a9c2b8", "score": "0.63335246", "text": "def __str__(self):\n return (\"LLNode: \" + str(self._data) +\n \" -> \" + str(self._next))", "title": "" }, { "docid": "db38dde20dc6810a9e197ff6de6c3427", "score": "0.63316023", "text": "def __repr__(self):\n\n node = self.head\n out = \"[\"\n while node is not None:\n out += str(node.weight)\n if node.link is not None:\n out += \", \"\n node = node.link\n out += \"]\"\n return out", "title": "" }, { "docid": "c06c770227f4d40a47aaac70f643a37e", "score": "0.6322638", "text": "def build_node_repr(name):\n return '{}:{}:{}'.format(\n get_specific_host(get_service_name(), name),\n get_specific_port(get_service_name(), name, 'peer'),\n get_specific_port(get_service_name(), name, 'leader_election'))", "title": "" }, { "docid": "54f0af0116217a9a49f637cfe9c1231c", "score": "0.63031495", "text": "def __repr__(self) -> str:\n cls = self.__class__.__name__\n node_features_str = str(list(self.node_features.shape))\n edge_index_str = str(list(self.edge_index.shape))\n if self.edge_features is not None:\n edge_features_str = str(list(self.edge_features.shape))\n else:\n edge_features_str = \"None\"\n\n out = \"%s(node_features=%s, edge_index=%s, edge_features=%s\" % (\n cls, node_features_str, edge_index_str, edge_features_str)\n # Adding shapes of kwargs\n for key, value in self.kwargs.items():\n if isinstance(value, np.ndarray):\n out += (', ' + key + '=' + str(list(value.shape)))\n elif isinstance(value, str):\n out += (', ' + key + '=' + value)\n elif isinstance(value, int) or isinstance(value, float):\n out += (', ' + key + '=' + str(value))\n out += ')'\n return out", "title": "" }, { "docid": "9938c0d7179d552251811f632427d06e", "score": "0.62982243", "text": "def __str__(self):\n if self.name is not None:\n return self.name\n if self.owner is not None:\n op = self.owner.op\n if self.index == op.default_output:\n return str(self.owner.op) + \".out\"\n else:\n return str(self.owner.op) + \".\" + str(self.index)\n else:\n return \"<%s>\" % str(self.type)", "title": "" }, { "docid": "6eeff57280858098640934c642307c83", "score": "0.62803847", "text": "def __str__(self):\n return ('<' + str(self._coords)[1:-1] + '>')", "title": "" }, { "docid": "71a73dcb4a5a992ec9f15800170e99b3", "score": "0.62698984", "text": "def __str__(self):\n if self.is_leaf:\n return str(self.label)\n return \"(\" + str(self.label) + \" \" + \\\n \" \".join(map(str, self)) + \")\"", "title": "" }, { "docid": "0b188d000f239801c94b13f604ac7394", "score": "0.62486506", "text": "def __repr__(self):\n output = f'<Graph: - {self.graph}>'\n return output", "title": "" }, { "docid": "4de19c8bdc200209792dbc509a361075", "score": "0.6236671", "text": "def __str__(self):\n return self.to_str(\"%g%s\")", "title": "" }, { "docid": "b453a315f986344a12b28d9bd5dda327", "score": "0.62321496", "text": "def __str__ (self):\n\n\n if (not self.__latex):\n res_str = \" %s => [ %s ]\"\n edge_str=\"\\n \"\n for edge in self.__edges:\n if edge is not None: edge_str+= repr(edge) + \",\\n\"\n return res_str%(self.getName(),edge_str)\n else:\n return \"\"\"\n \\\\tikzset{VertexStyle/.append style={fill}}\n \\Vertex[x=%.1f ,y=%.1f]{%s}\n \"\"\"%( self.getX(),self.getY(),self.getName() )", "title": "" }, { "docid": "83e221f55114457de81a82a0f7c36d14", "score": "0.6225474", "text": "def __repr__(self):\n return 'BTNode({}, {}, {})'.format(repr(self.data),\n repr(self.left),\n repr(self.right))", "title": "" }, { "docid": "a75dd3375d7293a6fd2b1bf9f592f646", "score": "0.62216854", "text": "def node(self):\n return self[\"node\"]", "title": "" }, { "docid": "f8ecec2bcc522eda2b85ba2b153b39fb", "score": "0.6214072", "text": "def __str__(self):\n return str([{k: v} for (k, v) in self.graph.items()])", "title": "" }, { "docid": "177b6bd6a4e4ea3c985af0dc2d1440ee", "score": "0.6201795", "text": "def __str__(self):\r\n result = self.inorder(self.root_node)\r\n return ' '.join(result)", "title": "" }, { "docid": "9a3c11ac5df25d53452eef350a41aaf8", "score": "0.6189298", "text": "def to_dot(self):\n attributes = {\n 'label': self.label(),\n 'name': self.name(),\n 'shape': \"rect\",\n 'style': 'rounded,filled',\n 'fillcolor': NODE_COLOR,\n }\n if self.is_trunk:\n attributes['fillcolor'] = TRUNK_COLOR\n elif self.is_leaf:\n attributes['fillcolor'] = LEAF_COLOR\n\n ret = self.uid + ' ['\n for k, v in attributes.items():\n ret += f'{k}=\"{v}\" '\n ret += ']'\n return ret", "title": "" }, { "docid": "1d51918d7045f244163e93115fe80346", "score": "0.6188718", "text": "def __str__(self):\n\n str_representation = \\\n + 80 * \"_\" + os.linesep \\\n + 80 * \"-\" + os.linesep\n str_representation += \\\n (\"Gene(object):\\t\" + self.gene_id + os.linesep)\n str_representation += \\\n + 80 * \"-\" + os.linesep\n str_representation += \\\n self.__repr__()\n str_representation += \\\n + 80 * \"-\" + os.linesep\n\n return(str_representation)", "title": "" }, { "docid": "4470bb21ed6e7e9494d0a1370f9472cf", "score": "0.6186035", "text": "def _repr(self, node, level):\n zeros = \" \" * (level * 6)\n result = \"---------------------------------------------------------------------\\n\"\n result += zeros + \" {} ( name:{}, id:{} )\\n\".format(node.type,\n node.name, node.id)\n #result += \"----------------------------------------------------------------------\\n\"\n result += zeros + \" Components: \\n\"\n for item in node.items:\n item = node.items[item]\n result += zeros + \" {} ( name:{}, id:{} )\\n\".format(item.type,\n item.name, item.id)\n #result += \"---------------------------------------------------------------------\\n\"\n result += zeros + \" Children: \\n\" \n for child in node.children:\n result += self._repr(node.children[child], level+1)\n \n return result", "title": "" }, { "docid": "22a0d95a35bc6863f651a61efeeb24e0", "score": "0.6185861", "text": "def GetNode(self) -> GvNode:\n ...", "title": "" }, { "docid": "ee61295f08079082acef20c3278e517e", "score": "0.6184456", "text": "def __str__(self):\n return \"<\" + str(self.x) + \",\" + str(self.y) + \">\"", "title": "" }, { "docid": "f5262c8a848f92bcfd23ae60120ae638", "score": "0.61812353", "text": "def __str__(self) -> str:\n\n cur_node = self.front\n result = ''\n while cur_node is not None:\n result += str(cur_node) + \"\\n\"\n cur_node = cur_node.next\n return result + '|'", "title": "" }, { "docid": "5178be3133da472606c8f116b6c54491", "score": "0.6178634", "text": "def __str__(self):\n return '(' + str(self._origin) + ',' + str(self._destination) + ',' + str(self._label) + ')'", "title": "" }, { "docid": "874b3b8d7e0deeb04ecfa281097e89dc", "score": "0.6174778", "text": "def __str__(self):\n return str(self.edges)", "title": "" }, { "docid": "874b3b8d7e0deeb04ecfa281097e89dc", "score": "0.6174778", "text": "def __str__(self):\n return str(self.edges)", "title": "" }, { "docid": "9f803529a26cda4da620b7bd69290152", "score": "0.61720467", "text": "def __str__(self):\n return self.network.__str__()", "title": "" }, { "docid": "a6ecb6c88cda5196d69fed53d54775b5", "score": "0.61675996", "text": "def __repr__(self):\n node = self.head\n nodes = []\n while node is not None:\n nodes.append(node.value)\n node = node.next\n nodes.append(\"None\")\n return \" -> \".join(nodes)", "title": "" }, { "docid": "dbd4e02410f19000045ec1bb4f528324", "score": "0.6167426", "text": "def __str__(self) -> str:\n return f\"NNC(resolution={self.resolution}):\\n{self.tree}\"", "title": "" }, { "docid": "81df1e697ffb83160470971b8d60ea56", "score": "0.6161392", "text": "def __repr__(self) -> str:\n return f\"NNC(resolution={self.resolution}):\\n{self.tree}\"", "title": "" } ]
f2d1d503acd930c5ec07c4f30e77a603
Function to calculate the overall sentiment using NLTK's vader library.
[ { "docid": "798a0136783d5ddca3272bbee7431a00", "score": "0.67116237", "text": "def overall_sentiment(text):\r\n sid = SentimentIntensityAnalyzer()\r\n ss = sid.polarity_scores(text)\r\n for _ in sorted(ss):\r\n if ss[\"compound\"] >= 0.15:\r\n return \"positive\"\r\n elif ss[\"compound\"] <= -0.01:\r\n return \"negative\"\r\n else:\r\n return \"neutral\"", "title": "" } ]
[ { "docid": "e3c5c0e86bce2b5b341ec4e089f8f077", "score": "0.72598004", "text": "def sentiment(text):\n words = pattern_split.split(text.lower())\n sentiments = map(lambda word: afinn.get(word, 0), words)\n if sentiments:\n # How should you weight the individual word sentiments? \n # You could do N, sqrt(N) or 1 for example. Here I use sqrt(N)\n sentiment = float(sum(sentiments))/math.sqrt(len(sentiments))\n \n else:\n sentiment = 0\n return sentiment", "title": "" }, { "docid": "65fc6b49c24f9328b1146eadf54d631e", "score": "0.7239759", "text": "def sentiment_training(self):\n\n polarity = 0\n positive = 0\n negative = 0\n neutral = 0\n n = self.num\n key = self.keyword\n\n for text in self.text:\n p = sentiment_analysis(text)\n polarity += p\n self.sentiment_.append(p)\n if p <= 0.56:\n negative += 1\n elif 0.56 < p <= 0.67:\n neutral += 1\n elif p > 0.67:\n positive += 1\n\n positive = share(positive, n)\n negative = share(negative, n)\n neutral = share(neutral, n)\n\n polarity = share(polarity, n) / 100\n\n return (\n polarity,\n positive,\n negative,\n neutral,\n key,\n n,\n )", "title": "" }, { "docid": "20b1389702092029c918010c64d6c00d", "score": "0.7234791", "text": "def get_sentiment_nltk(self):\n s_nltk = SentimentIntensityAnalyzer()\n\n polarity = 0\n positive = 0\n negative = 0\n neutral = 0\n n = self.num\n key = self.keyword\n\n for text in self.text:\n p = s_nltk.polarity_scores(text)[\"compound\"]\n polarity += p\n self.sentiment_vader.append(p)\n if p < 0:\n negative += 1\n elif p == 0:\n neutral += 1\n elif p > 0:\n positive += 1\n\n positive = share(positive, n)\n negative = share(negative, n)\n neutral = share(neutral, n)\n\n polarity = share(polarity, n) / 100\n\n return (\n polarity,\n positive,\n negative,\n neutral,\n key,\n n,\n )", "title": "" }, { "docid": "df5eaa7ab27805aaf3cc45604dfbd4e3", "score": "0.71887183", "text": "def sentiment(text):\n words = pattern_split.split(text.lower())\n sentiments = map(lambda word: afinn.get(word, 0), words)\n if sentiments:\n # How should you weight the individual word sentiments?\n # You could do N, sqrt(N) or 1 for example. Here I use sqrt(N)\n sentiment = float(sum(sentiments))/math.sqrt(len(sentiments))\n else:\n sentiment = 0\n return sentiment", "title": "" }, { "docid": "57071e680d03f855abb98e9e14e76508", "score": "0.7062856", "text": "def sentiment(text):\n words = pattern_split.split(text.lower())\n sentiments1 = map(lambda word: wordlist.get(word, 0), words)\n sentiments = []\n for k in sentiments1:\n\tif k != 0:\n\t\tsentiments.append(k)\n if sentiments:\n # How should you weight the individual word sentiments?\n # You could do N, sqrt(N) or 1 for example. Here I use sqrt(N\n sentiment = float(sum(sentiments))/math.sqrt(len(sentiments))\n else:\n sentiment = 0\n #print sentiment\n return sentiment", "title": "" }, { "docid": "1f0a807d9b1b8330ef108dc402cf0ac2", "score": "0.69930243", "text": "def AnalysingSentiment(Tweet):\r\n Analysis=TextBlob(Tweet)\r\n Polarity=Analysis.sentiment.polarity\r\n if Analysis.sentiment.polarity==0:\r\n Sentiment='Neutral'\r\n elif Analysis.sentiment.polarity>0:\r\n Sentiment='Positive'\r\n else:\r\n Sentiment='Negative'\r\n return Polarity, Sentiment", "title": "" }, { "docid": "fed7853abde542fc86ede4455f87cccd", "score": "0.6957903", "text": "def sentiment(text):\n analyzer = SentimentIntensityAnalyzer()\n f = analyzer.polarity_scores(text)\n return f", "title": "" }, { "docid": "36b1caa4646321c97354c9fdae41b60b", "score": "0.69462544", "text": "def sentiment(sentence, pos_lex, neg_lex):\n\n s = sentence.split(' ')\n p_pos = 0.5\n p_neg = 0.5\n p_pos_sentiment = p_pos\n for word in s:\n if word in pos_lex and word not in set(stopwords.words('english')):\n p_pos_sentiment = p_pos_sentiment * pos_lex[word]\n p_neg_sentiment = p_neg\n for word in s:\n if word in neg_lex and word not in set(stopwords.words('english')):\n p_neg_sentiment = p_neg_sentiment * neg_lex[word]\n if p_neg_sentiment != 0 and p_pos_sentiment != 0:\n return round(p_pos_sentiment / (p_pos_sentiment + p_neg_sentiment), 4)\n else:\n return 0.5", "title": "" }, { "docid": "a2fb7e935a3446c78bc12d191885f406", "score": "0.6879568", "text": "def vader_sentiment(text:str) -> dict:\n return sia.polarity_scores(text)", "title": "" }, { "docid": "bf87bcedbe3bd94b0c481b0e279106ba", "score": "0.68760324", "text": "def get_vader_scores(text):\n analyser = SentimentIntensityAnalyzer()\n scores = analyser.polarity_scores(text)\n scores['compound'] += 1\n return scores", "title": "" }, { "docid": "85da30eb25290b3efde46c32b8477caf", "score": "0.67957073", "text": "def sentiment(words):\n sentiments = map(lambda word: afinn.get(word, 0), words)\n#\tprint sentiments\n if sentiments:\n # How should you weight the individual word sentiments?\n # You could do N, sqrt(N) or 1 for example. Here I use sqrt(N)\n sentiment = float(sum(sentiments))/math.sqrt(len(sentiments))\n else:\n sentiment = 0\n return sentiment", "title": "" }, { "docid": "63e8532253e4bd2a01ca3ae8b506ce7d", "score": "0.67835623", "text": "def sentiment():\n # TextBlob can calculate the \"sentiment\" of a sentence. \"Sentiment\" is a measurement of the emotional content of the sentence: the number is positive (between 0 and 1) if the sentence says something \"good\" and negative (between 0 and -1) if the sentence says something \"bad.\"\n\n blob = textblob.TextBlob(open(\"cnn.txt\").read())\n\n for item in blob.sentences:\n if item.sentiment.polarity < 0:\n print(item, item.sentiment.polarity)\n # print(item.sentiment)", "title": "" }, { "docid": "7492cd14f88ebb00c3acfae32c2af8de", "score": "0.6782829", "text": "def interpret_sentiment(txt: str) -> float:\n analyser = SentimentIntensityAnalyzer()\n analyser.lexicon.update(lexicon.wsb_words)\n analyser.lexicon.update(lexicon.emojis)\n return analyser.polarity_scores(txt)['compound']", "title": "" }, { "docid": "0943d68ab0eed1dcb2f377dbb13355df", "score": "0.67797375", "text": "def getSentiment(text):\n cleantext = cleanText(text)\n return (countPos(cleantext) - countNeg(cleantext))", "title": "" }, { "docid": "6a34c087b3f0bef13c2f924d9d94b8e9", "score": "0.674879", "text": "def analyse_sentiment():\n try:\n subreddit = scraper.load_data('sub-reddits.txt')\n except IOError as e:\n print e\n return\n \n try:\n sentiments = scraper.load_sent()\n except IOError as e:\n print e\n return\n \n names, scores, titles = anl.sentiment(subreddit, sentiments)\n \n plot_bar_avg(names, scores)\n plot_bar(names, scores)", "title": "" }, { "docid": "6a6da8e8bcdc7c02f0d99c768de2be60", "score": "0.67190063", "text": "def analyze(lyrics, print):\n lyrics = preProcess(lyrics)\n\n sia = SentimentIntensityAnalyzer()\n sia_sent = sia.polarity_scores(lyrics)\n\n blob = TextBlob(lyrics)\n blob_sent = blob.sentiment.polarity\n\n avg_sentiment = (sia_sent['compound'] + blob_sent) / 2\n\n if print:\n print(\"\\nAverage Sentiment:\", avg_sentiment)\n\n if -1 <= avg_sentiment < -0.6:\n print(\"Negative\")\n elif -0.6 <= avg_sentiment < -0.3:\n print(\"Mostly Negative\")\n elif -0.3 <= avg_sentiment <= 0.3:\n print(\"Neutral\")\n elif 0.3 < avg_sentiment <= 0.6:\n print(\"Mostly Positive\")\n elif 0.6 < avg_sentiment <= 1:\n print(\"Positive\")\n\n return avg_sentiment", "title": "" }, { "docid": "c8b0e669d0013ae68a7b7fc1da9b2a52", "score": "0.6676222", "text": "def extract_sentiment(self, preprocessed_input):\n negation_words = ['not', 'never', 'nothing']\n\n words = re.sub(\"\\\".*?\\\"\", \"\", preprocessed_input) # Remove the singular movie title\n words = re.sub(\" +\", \" \", words) # Remove extraneous spaces\n words = re.sub('[,.!?\\\\-]', \"\", words) # Remove punctuation\n words = words.split() # Split the words into a list\n words = [word.lower() for word in words] # Send all words to lowercase\n\n sentiment = 0 # Initial sentiment (will toggle up and down)\n invert_flag = 1 # Whether to flip sentiment due to the presence of negation words\n\n for i, word in enumerate(words):\n # Invert the invert flag if you encounter a negation word\n if i > 0 and words[i] in negation_words or words[i].endswith('\\'t'):\n invert_flag *= -1\n continue\n\n # Check if the word itself is in self.sentiment\n if word in self.sentiment:\n if self.sentiment[word] == 'pos':\n sentiment += 1 * invert_flag\n else: # self.sentiment[word] == 'neg'\n sentiment -= 1 * invert_flag\n if invert_flag == -1:\n invert_flag = 1\n continue\n\n # Check if the word itself is in self.stemmedsentiment\n if word in self.stemmedsentiment:\n if self.stemmedsentiment[word] == 'pos':\n sentiment += 1 * invert_flag\n else: # self.sentiment[word] == 'neg'\n sentiment -= 1 * invert_flag\n if invert_flag == -1:\n invert_flag = 1\n continue\n\n # Check if the stemmed word is in self.sentiment\n stemmed_word = PorterStemmer().stem(word)\n if stemmed_word in self.sentiment:\n if self.sentiment[stemmed_word] == 'pos':\n sentiment += 1 * invert_flag\n else: # self.sentiment[word] == 'neg'\n sentiment -= 1 * invert_flag\n if invert_flag == -1:\n invert_flag = 1\n continue\n\n # Check if the stemmed word is in self.stemmedsentiment\n if stemmed_word in self.stemmedsentiment:\n if self.stemmedsentiment[stemmed_word] == 'pos':\n sentiment += 1 * invert_flag\n else: # self.sentiment[word] == 'neg'\n sentiment -= 1 * invert_flag\n if invert_flag == -1:\n invert_flag = 1\n continue\n\n # Return the sentiment (1 for positive, -1 for negative, 0 for neutral)\n if sentiment > 0:\n return 1\n elif sentiment < 0:\n return -1\n else: # sentiment == 0\n return 0", "title": "" }, { "docid": "8f58a33b24e5e5f6251d91078fadb390", "score": "0.6671297", "text": "def analyze(self, text):\n\n # assign each word in text a value (-1, 0, or 1)\n score = 0\n tknzr = TweetTokenizer()\n tokens = tknzr.tokenize(text)\n \n # print(tokens) ...testing purposes only...\n for token in tokens:\n if token.lower() in self.positives:\n score += 1\n continue\n elif token.lower() in self.negatives:\n score -= 1\n continue\n \n return score", "title": "" }, { "docid": "8a767f9e9ad6f43f2be28c6714a080de", "score": "0.6663789", "text": "def get_sentiment(text):\n\treturn analyzer.polarity_scores(text)", "title": "" }, { "docid": "7e3fa965a35e16d36cc24c8d005c3563", "score": "0.6637365", "text": "def get_sentiment():\n\n global sentiment_data\n if len(sentiment_data) != 0:\n current_sentiment = 5 * sentiment_data[-1] + 5\n else:\n current_sentiment = 5\n return current_sentiment", "title": "" }, { "docid": "dc0346d0da17b31aa5dc6167397e68e2", "score": "0.662942", "text": "def analyze_sentiment(review):\n\n positive_words = {'great', 'funny', 'uplifting', 'hilarious', 'cool'}\n negative_words = {'sad', 'stupid', 'disgusting', 'horrible', 'boring'}\n\n review_tokens = set(review.split(' '))\n\n num_positive = len(positive_words & review_tokens) # num of overlapping with positive\n num_negative = -len(negative_words & review_tokens) # num of overlapping with negative\n\n pos_ratio = float(num_positive)/len(positive_words)\n neg_ratio = float(num_negative)/len(negative_words)\n\n return pos_ratio + neg_ratio", "title": "" }, { "docid": "be0f5ba53f9bb88b50bd480a8e743782", "score": "0.65541583", "text": "def sentiment_score(analyser, text):\n\n scores = analyser.polarity_scores(text)\n return scores['compound']", "title": "" }, { "docid": "b4032d6ecb921af7c24e3ba1e15632f9", "score": "0.6516912", "text": "def analyze(self, text):\n\n #define tokenizer and create a list that divides the original string into substrings\n tokenizer = nltk.tokenize.TweetTokenizer()\n tokens = tokenizer.tokenize(text)\n score = 0\n \n #iterate over each token, determining if each word is positive,negative or neutral\n for token in tokens:\n temp = token.lower()\n #increase score by 1 if word is positive\n if temp in self.positives:\n score+=1\n #decrease score by 1 if word is negative\n elif temp in self.negatives:\n score-=1\n return score", "title": "" }, { "docid": "40f68f6acf0b9101d0bea754ce0ddf2e", "score": "0.6503834", "text": "def sentiment_aspects(docs: Iterable[tokens.Doc]) -> List[collections.Counter]:\n sent_dict_list = []\n for doc in docs:\n sent_dict = collections.Counter()\n for token in doc:\n # check if the word is an opinion word, then assign sentiment\n if token.text.lower() in _OPINION_WORDS:\n sentiment = 1 if token.text.lower() in _POS_WORDS else -1\n if (token.dep_ == \"advmod\"):\n # if target is an adverb modifier (i.e. pretty, highly, etc.)\n # but happens to be an opinion word, ignore and pass\n continue\n\n elif (token.dep_ == \"amod\"):\n sent_dict[token.head.text.lower()] += sentiment\n\n else:\n for child in token.children:\n # if there's a adj modifier (i.e. very, pretty, etc.) add\n # more weight to sentiment\n # This could be better updated for modifiers that either\n # positively or negatively emphasize\n if _is_opinion_mod(child):\n sentiment *= 1.5\n # check for negation words and flip the sign of sentiment\n if child.dep_ == \"neg\":\n sentiment *= -1\n for child in token.children:\n if (token.pos_ == \"VERB\") & (child.dep_ == \"dobj\"):\n # if verb, check if there's a direct object\n sent_dict[child.text.lower()] += sentiment\n # check for conjugates (a AND b), then add both to dictionary\n subchildren = []\n conj = 0\n for subchild in child.children:\n if subchild.text.lower() == \"and\": conj=1\n if (conj == 1) and (subchild.text.lower() != \"and\"):\n subchildren.append(subchild.text.lower())\n conj = 0\n for subchild in subchildren:\n sent_dict[subchild] += sentiment\n\n # check for negation\n for child in token.head.children:\n noun = \"\"\n if _is_opinion_mod(child):\n sentiment *= 1.5\n if (child.dep_ == \"neg\"):\n # check for negation words and flip the sign of sentiment\n sentiment *= -1\n\n # check for nouns\n for child in token.head.children:\n noun = \"\"\n if (child.pos_ == \"NOUN\") and (child.text not in sent_dict):\n noun = child.text.lower()\n # Check for compound nouns\n for subchild in child.children:\n if subchild.dep_ == \"compound\":\n noun = subchild.text.lower() + \" \" + noun\n sent_dict[noun] += sentiment\n sent_dict_list.append(collections.Counter(sent_dict))\n\n print(\"\\nFound aspects on {} reviews.\".format(len(sent_dict_list)))\n return sent_dict_list", "title": "" }, { "docid": "a0c61d68ec31fb25dd439ea6b9e98354", "score": "0.650191", "text": "def analyze(self, text):\n tokenizer = nltk.tokenize.TweetTokenizer()\n tokens = tokenizer.tokenize(text)\n word_score = 0\n \n for word in tokens:\n if word.lower() in self.positives:\n word_score += 1\n if word.lower() in self.negatives:\n word_score += -1\n else:\n word_score += 0\n \n return word_score", "title": "" }, { "docid": "dce493e0dd58ab98905d18766b691839", "score": "0.64634955", "text": "def get_sentiment(text):\n blob = TextBlob(text)\n sentiment_polarity = blob.sentiment.polarity\n if sentiment_polarity < 0:\n sentiment = 'negative'\n elif sentiment_polarity <= 0.2:\n sentiment = 'neutral'\n else:\n sentiment = 'positive'\n\n return sentiment", "title": "" }, { "docid": "ef3fa29b70a73937dd4dfe3a2d735c53", "score": "0.64476913", "text": "def analyze_sentiment(text):\n\ttblob = TextBlob(text)\n\treturn {\n\t\t'polarity': tblob.sentiment.polarity,\n\t\t'subjectivity': tblob.sentiment.subjectivity,\n\t}", "title": "" }, { "docid": "bc97a83961ba6ccdc71dec6dd911b320", "score": "0.6443908", "text": "async def sentiment(ctx, *, text):\n await ctx.trigger_typing()\n snt = sntm(text)\n await ctx.send('``Sentiment: {} ({})``'.format(snt['sentiment'], snt['vague']))", "title": "" }, { "docid": "bc97a83961ba6ccdc71dec6dd911b320", "score": "0.6443908", "text": "async def sentiment(ctx, *, text):\n await ctx.trigger_typing()\n snt = sntm(text)\n await ctx.send('``Sentiment: {} ({})``'.format(snt['sentiment'], snt['vague']))", "title": "" }, { "docid": "3367a2ee328f4b3fce0d173606d6c3ac", "score": "0.6442549", "text": "def extract_sentiment(self, preprocessed_input):\r\n no_titles = self.removed_titles(preprocessed_input)\r\n preprocessed_input = self.get_stemmed(no_titles).split()\r\n \r\n NEGATION = r\"\"\"\r\n (?:\r\n ^(?:never|no|nothing|nowhere|noone|none|not|\r\n havent|hasnt|hadnt|cant|couldnt|shouldnt|\r\n wont|wouldnt|dont|doesnt|didnt|isnt|arent|aint\r\n )$\r\n )\r\n |\r\n n't\"\"\"\r\n NEGATION_RE = re.compile(NEGATION, re.VERBOSE)\r\n\r\n PUNCT = r\"\"\"[,.:;!?]\"\"\"\r\n PUNCT_RE = re.compile(PUNCT)\r\n\r\n intense_words = {'love', 'hate', 'terribl', 'great', 'excel'}\r\n INTENSIFIER = r\"\"\"^(?:re+alli|rea+lli|su+pe?r?)$\"\"\"\r\n INTENSIFIER_RE = re.compile(INTENSIFIER)\r\n\r\n input_sentiment = 0\r\n tag_neg = False\r\n\r\n for i in range(len(preprocessed_input)):\r\n word = preprocessed_input[i]\r\n \r\n if NEGATION_RE.search(word):\r\n tag_neg = True\r\n elif PUNCT_RE.search(word) or word == \"becaus\":\r\n tag_neg = False\r\n word = word.strip('.,;:!')\r\n delta = 0\r\n word_sentiment = self.sentiment.get(word, '') # default to empty string\r\n if word_sentiment == 'pos' or word_sentiment == 'po': #'po' is the stemmed version\r\n delta = 1\r\n elif word_sentiment == 'neg':\r\n delta = -1\r\n if tag_neg:\r\n delta *= -1\r\n if i > 0 and INTENSIFIER_RE.search(preprocessed_input[i-1]):\r\n delta *= 2\r\n elif word in intense_words:\r\n delta *= 2\r\n\r\n input_sentiment += delta\r\n\r\n if input_sentiment == 0:\r\n return 0\r\n elif input_sentiment < -1:\r\n return -2\r\n elif input_sentiment < 0:\r\n return -1\r\n elif input_sentiment > 1:\r\n return 2\r\n else:\r\n return 1", "title": "" }, { "docid": "b6eb334b1fc1a59fab04590265003b3e", "score": "0.6440518", "text": "def get_tweet_sentiment(self, tweet):\r\n # Create TextBlob object of passed tweet text.\r\n analysis = TextBlob(self.clean_tweet(tweet)) \r\n # Set sentiment.\r\n if analysis.sentiment.polarity > 0: \r\n #print(analysis.sentiment.polarity, 'positive')\r\n return 'positive'\r\n elif analysis.sentiment.polarity < 0:\r\n #print(analysis.sentiment.polarity, 'negative')\r\n return 'negative'\r\n else:\r\n #print('neutral')\r\n return 'neutral'", "title": "" }, { "docid": "8ff73500f1885d2d4042ec0ac1116df0", "score": "0.6397947", "text": "def sentiment(self, text, lang='en'):\n\t\treturn self._call_api('score', text=text, lang=lang)", "title": "" }, { "docid": "5c18f6d7756bcad5e452b7630668ebde", "score": "0.63544315", "text": "def GetSentimentScores_l(sent, sentiment_list, verbose=False):\n if verbose: print('### sent:', sent)\n listOfSentenceparts = ProcessforSentiment_l(sent)\n listOfSentiScores, listOfphrs = [], []\n\n for sentpart in listOfSentenceparts:\n \"\"\"\n first step: identification of suitable candidates for opinionated phrases suitable candidates: \n nouns, adjectives, adverbs and verbs\n \"\"\"\n # if verbose: print('\\tsentpart:', sentpart, end='\\r')\n candidates = MakeCandidates(sentpart, sentiment_list, get='candidates')\n negation_candidates = MakeCandidates(sentpart, sentiment_list, get='negation')\n # if verbose: print('\\tcandidates:', candidates, end='\\r')\n # if verbose: print('\\tnegation_candidates:', negation_candidates)\n \"\"\"\n second step: extraction of possible opinion-bearing phrases from a candidate starting from a candidate, \n check all left and right neighbours to extract possible phrases. The search is terminated on a comma (POS tag $,), \n a punctuation terminating a sentence (POS tag $.), a conjunction (POS-Tag KON) or an opinion-bearing word that is \n already tagged. (Max distance determined by sentence lenght)\n If one of the adjacent words is included in the SePL, together with the previously extracted phrase, it is added to \n the phrase.\n \"\"\"\n\n raw_sentimentscores, raw_phrs = ReadSePLSentiments(candidates, sentiment_list)\n # if verbose: print('\\traw_sentimentscores:', raw_sentimentscores, 'raw_sepl_phrase:', raw_sepl_phrase)\n\n \"\"\"\n third step: compare extracted phrases with SePL After all phrases have been extracted, they are compared with the \n entries in the SePL. (everything lemmatized!) If no match is found, the extracted Phrase is shortened by the last \n added element and compared again with the SePL. This is repeated until a match is found.\n \"\"\"\n\n # Make sure sepl_phrase, negation_candidates, sentimentscores are of same size\n assert len(raw_phrs) == len(raw_sentimentscores) == len(candidates) == len(negation_candidates)\n\n # export processed, flattened lists\n sentimentscores = ProcessSentimentScores(raw_phrs, negation_candidates, raw_sentimentscores)\n # if verbose: print('\\tsentimentscores:', sentimentscores, end='\\r')\n final_phrs = ProcessSePLphrases(raw_phrs)\n # if verbose: print('\\tsepl_phrase:', sepl_phrase)\n\n listOfSentiScores.append(sentimentscores)\n listOfphrs.append(final_phrs)\n\n # create flat, non-empty list with scores\n sentiscores = np.array([i for i in listOfSentiScores if i])\n\n # Retrieve statistics\n ss_mean, ss_median, ss_n, ss_sd = sentiscores.mean(), np.median(sentiscores), sentiscores.size, sentiscores.std()\n if verbose: print('\\tstats:', ss_mean, ss_median, ss_n, ss_sd, end='\\n\\n')\n\n return {'mean': ss_mean, 'median': ss_median, 'n': ss_n, 'sd': ss_sd, 'sentiscores': listOfSentiScores,\n 'phrs': listOfphrs}", "title": "" }, { "docid": "3bb05057e2ef7f03321d48aef547ffbc", "score": "0.6337614", "text": "def get_tweet_sentiment(sentence, nbc):\n\n words = sentence.split(' ')\n sentiments = [get_word_sentiment(word, nbc) for word in words]\n return sum(sentiments) / float(len(words))", "title": "" }, { "docid": "421a7ab10cc88201923c8935f1a0515f", "score": "0.63241524", "text": "def average_sentiment(tweets):\n\n total_score = 0\n\n for tweet in tweets:\n score = sentiment_compound_score(tweet)\n total_score += score\n\n if len(tweets) == 0:\n return 0\n else:\n return total_score/len(tweets)", "title": "" }, { "docid": "4b5e675f2fcf4cbe6e6c3dcaf8b27ce1", "score": "0.63227075", "text": "def analyze(self, text):\n moral = 0\n text_list = nltk.tokenize.TweetTokenizer().tokenize(text);\n for i in text_list:\n if (i.lower() in self.positives):\n moral+=1\n elif (i.lower() in self.negatives):\n moral-=1\n return moral", "title": "" }, { "docid": "263e0246feac0b729e6f10fe5414e5b0", "score": "0.6307537", "text": "def sentiment_scan(title, text):\n\n return (pattern.en.sentiment(title), pattern.en.sentiment(text))", "title": "" }, { "docid": "3854b202626fe0d2c0373cc9abee1004", "score": "0.63008016", "text": "def sentiment_score(word_list, lang):\n SentimentScorer_eng = SentimentIntensityAnalyzer()\n\n if lang == 'es':\n sentiment_total = [SentimentScorer_span.sentiment(word) for word in word_list]\n sentiment = convert_to_neg_pos(np.mean(sentiment_total))\n elif lang == 'en':\n sentiment = SentimentScorer_eng.polarity_scores(\" \".join(word_list))\n sentiment = sentiment['compound']\n else:\n raise TypeError\n\n return sentiment", "title": "" }, { "docid": "373471d22d43394f1fa53106e5050f59", "score": "0.62881255", "text": "def get_sentiments(passage):\n nltk.download(\"vader_lexicon\", quiet=True)\n sia = SentimentIntensityAnalyzer()\n try:\n return sia.polarity_scores(passage)[\"compound\"]\n except AttributeError:\n return 0", "title": "" }, { "docid": "a822fa16352a6e6421189409b8530d4f", "score": "0.6282515", "text": "def labMT_sent(text, rel = False, lentoken = 0):\n\n #clean text \n text = re.sub(r'[^a-zA-Z]', ' ', text) #replace everything that is not alfapethical with a space\n text = re.sub(r' +', ' ', text) #replace one or more whitespaces with a whitespace\n text = text.rstrip() #remove newlines and other escapecharacters\n\n #tokenize and lowercase\n tokens = tokenize(text, lentoken)#the 0 indicates that we remove empty tokens\n\n # importing LabMT for sentiment score\n labmt = pd.read_csv('C:\\\\Users\\\\reno0006\\\\Desktop\\\\tutorial_py\\\\group_sos\\\\labmt_dict.csv', \n sep = '\\t', encoding = 'utf-8', index_col = 0) #Change filepath if you are using a different computer\n\n\n avg = labmt.happiness_average.mean() #this is done to averaging around zero instead of the 1-10 scale\n sent_dict = (labmt.happiness_average - avg).to_dict() #to.dict() er en pandas function - derfor antager den at \n \n #apply the LabMT sentiment score\n result = sum(sent_dict.get(token, 0.0) for token in tokens) #append to the sent_vect the sum of the sentiment scores for the each token in text\n \n if rel:\n result = result/len(tokens)\n #return result\n return result", "title": "" }, { "docid": "82285c6a81796ffc937f8f2c35106845", "score": "0.62642604", "text": "def sentiment_score(text):\n polarity = ''\n score = sentiment_model.predict_proba([text])\n if score[0][0] > score[0][1]:\n return [text,'Negative',score[0][0]]\n if score[0][1] >= score[0][0]:\n return [text,'Positive',score[0][1]]", "title": "" }, { "docid": "d9f0b2dd7e1513c40e54e86263a8f010", "score": "0.6229096", "text": "def analyze_balance(string):\n string = string.replace(\" ll \", \" \").replace(\" s \",\" \").replace(\" ve \",\" \")\n string = tokenizer(string)\n sent = sentiment(string)\n return sent", "title": "" }, { "docid": "b84a9b85e1f54995aa5f9786aa9e836e", "score": "0.6212152", "text": "def OverallSentiment(DataFrame):\r\n Frequency=DataFrame['TweetSentiment'].value_counts()\r\n Sentiment=['Positive', 'Neutral', 'Negative']\r\n return Frequency, Sentiment", "title": "" }, { "docid": "2fe0991776fcc661348e3c3d1d350f8f", "score": "0.6204218", "text": "def analyze(text):\n language_client = language.Client()\n\n # Instantiates a plain text document.\n document = language_client.document_from_html(text)\n\n # Detects sentiment in the document.\n annotations = document.annotate_text(include_sentiment=True,\n include_syntax=False,\n include_entities=False)\n\n # Print the results\n print_result(annotations)", "title": "" }, { "docid": "8ac43b1949780a6aad3dc26f5b793d0c", "score": "0.62009996", "text": "def get_total_sentiment(self):\n return sum(self.sentiment_values) / len(self.sentiment_values)", "title": "" }, { "docid": "bcfa478255c4efa62ff06f75c5cc9ca2", "score": "0.6145972", "text": "def analyze(self, text):\n\n score = 0\n tokens = self.tokenizer.tokenize(text)\n for token in tokens:\n word = token.lower()\n # add 1 for positive words, subtract 1 for negatives\n if word in self.positives:\n score += 1\n elif word in self.negatives:\n score -= 1\n return score", "title": "" }, { "docid": "9c5820e231f7c3c7dfa611717696f47a", "score": "0.61401147", "text": "def sentiment(self, tweet):\n\n analysis = TextBlob(tweet)\n if analysis.sentiment.polarity > 0:\n return 1\n elif analysis.sentiment.polarity == 0:\n return 0\n else:\n return -1", "title": "" }, { "docid": "1e57d52a0a45bfaf5229a75480bff111", "score": "0.61383903", "text": "def find_text_sentiment(text) -> float:\n negative_words = [\"crash\", \"crashing\", \"problems\", \"not working\", \"fix\", \"shutting down\", \"sucks\", \"sucking\",\\\n \"closed\", \"blows\", \"shitty\", \"shit\", \"crap\", \"terrible\"]\n\n score = SIA.polarity_scores(text)[\"compound\"]\n\n for word_tup in negative_words:\n if fuzz.partial_ratio(word_tup, text) > 90:\n score = -0.4\n\n return score", "title": "" }, { "docid": "593d662dac9239f33d321a972da36f88", "score": "0.61300176", "text": "def analyze(self, text):\n tokenizer = nltk.tokenize.TweetTokenizer()\n tokens = tokenizer.tokenize(text)\n score = 0;\n for word in tokens:\n word += \"\\n\";\n if(word in self.keys):\n score += self.keys.get(word);\n return score;", "title": "" }, { "docid": "6f9d0de59b364722d1ca89d0402576db", "score": "0.6122734", "text": "def get_sentiment_info(text):\n text = text.lower()\n\n cl = get_classifier()\n blob = TextBlob(text, classifier=cl)\n sentiment = blob.sentiment\n\n polarity = sentiment.polarity\n # subjectivity = sentiment.subjectivity\n\n prob_dist = cl.prob_classify(text)\n\n if polarity >= -1.0 and polarity < -0.5:\n probability = prob_dist.prob(\"negative\") * 100\n probability = round(probability, 3)\n return \"negative\", probability\n elif polarity <= 1.0 and polarity > 0.5:\n probability = prob_dist.prob(\"positive\") * 100\n probability = round(probability, 3)\n return \"positive\", probability\n elif polarity == 0: # bad polarity\n probability = (\n max(\n prob_dist.prob(\"negative\"),\n prob_dist.prob(\"positive\"),\n prob_dist.prob(\"neutral\"),\n )\n * 100\n )\n probability = round(probability, 3)\n return prob_dist.max(), probability\n else:\n probability = prob_dist.prob(\"neutral\") * 100\n probability = round(probability, 3)\n return \"neutral\", probability", "title": "" }, { "docid": "b4c4cb29b9f844f91f08b247a7844286", "score": "0.61026096", "text": "def find_sentiments(self, keyword1, keyword2):\n print(\"performing sentiment analysis\")\n\n self.positive_tweets = []\n self.negative_tweets = []\n self.positive_noun_tweets = []\n self.negative_noun_tweets = []\n \n positive_partial_clean_tweets = []\n negative_partial_clean_tweets = []\n\n #creating variable which contains counts for each type of sentiments\n positive_tweets_counts = negative_tweets_counts = neutral_tweets_counts = 0\n \n #fetching tweets for given kewords\n mask = self.data['tweets'].str.contains(keyword1, case=False) & self.data['tweets'].str.contains(keyword2, case=False)\n \n tweets = self.data[mask]['clean_tweets']\n partial_clean_tweets = self.data[mask]['partial_clean_tweets']\n \n itr = 0\n \n #finding sentiment score for tweets fetched for particular player\n for tweet in tweets:\n sentiment_score = TextBlob(tweet).sentiment.polarity;\n\n if sentiment_score >= 0.02: #if compound score is >= 0.02 then that tweet is considered to be positive tweet\n self.positive_tweets.append(tweet)\n positive_partial_clean_tweets.append(partial_clean_tweets.iloc[itr])\n positive_tweets_counts += 1\n elif sentiment_score <= -0.02: #if compound score <= -0.02 then that tweet is considered to be negative tweet#if compound score is > -0.02 and <0.02 then that tweet is considered to be neutral tweet\n self.negative_tweets.append(tweet)\n negative_partial_clean_tweets.append(partial_clean_tweets.iloc[itr])\n negative_tweets_counts += 1\n else: #if compound score is > -0.02 and <0.02 then that tweet is considered to be neutral tweet\n neutral_tweets_counts += 1\n \n itr += 1\n \n if(positive_tweets_counts > 0):\n positive_noun_phrases = self.get_noun_phrase(positive_partial_clean_tweets)\n self.positive_noun_tweets.extend(positive_noun_phrases)\n if(negative_tweets_counts > 0):\n negative_noun_phrases = self.get_noun_phrase(negative_partial_clean_tweets)\n self.negative_noun_tweets.extend(negative_noun_phrases)", "title": "" }, { "docid": "753dd57447d54f7c877830ccbbf9472a", "score": "0.6099192", "text": "def get_tweet_sentiment(self, tweet):\n # create TextBlob object of passed tweet text\n analysis = TextBlob(self.clean_tweet(tweet))\n\n return 'positive' \\\n if analysis.sentiment.polarity > 0 \\\n else 'neutral' if analysis.sentiment.polarity == 0 \\\n else 'negative'", "title": "" }, { "docid": "5d8260523228eb44dd197726778fc808", "score": "0.609875", "text": "def get_text_sentiment_analysis(text):\n #no need to analyse empty strings\n assert text\n\n response = requests.post('http://text-processing.com/api/sentiment/', {'text': text}).json()\n return {'pos': round(response.get('probability').get('pos')*10, 0),\n 'neg': round(response.get('probability').get('neg')*10, 0),\n 'neutral': round(response.get('probability').get('neutral')*10, 0),\n 'total': response.get('label')\n }", "title": "" }, { "docid": "2a896b99aeaf699bf8151d4556caea56", "score": "0.6097258", "text": "def sentenceTernarySentiment(taggedSentence):\n sumFeatures = sentenceSumSentiment(taggedSentence)\n if sumFeatures < 0:\n return -1\n elif sumFeatures > 0:\n return 1\n else:\n return 0", "title": "" }, { "docid": "022db007a224c533b5df34dcaa2370cf", "score": "0.6076544", "text": "def sentiment_compound_score(tweet):\n\n # Separate the tweet into sentences and compute the score for each sentence, then the average.\n sentence_list = tokenize.sent_tokenize(tweet[\"text\"])\n tweet_sentiments = 0.0\n for sentence in sentence_list:\n sentiment = analyzer.polarity_scores(sentence)\n tweet_sentiments += sentiment[\"compound\"]\n\n if len(sentence_list) == 0:\n return 0\n else:\n return tweet_sentiments/len(sentence_list)", "title": "" }, { "docid": "05da74e8cd0d2bfad285ba919c163e45", "score": "0.6046775", "text": "def get_sentiment(tweets):\n # load the classifier we built earlier\n pickle_path = os.path.dirname(__file__) + '/tweet_emotion_classifier.pkl'\n tweets_text = [tweet.text for tweet in tweets]\n cl = pickle.load(open(pickle_path, 'rb'))\n sent = cl.predict(tweets_text)\n return Counter(sent)", "title": "" }, { "docid": "cae9bd82b23f23bbcd6925abb09da735", "score": "0.6041137", "text": "def analyze(self, text):\n score = 0\n tokens = self.tokenizer.tokenize(text)\n for token in tokens:\n if token in self.positives:\n score += 1\n if token in self.negatives:\n score -= 1\n return score", "title": "" }, { "docid": "b933e2effaae372b4ce142625406aab2", "score": "0.6031139", "text": "def get_sentiment(self,word_sentiments):\n # List for saving the sentiment value of words in the tweet.\n sentiment_values = []\n # Go over the words in the tweet\n for word in self.get_words():\n # The word has a sentiment value.\n if word in word_sentiments:\n sentiment_values.append(word_sentiments[word])\n # Some words have sentiment value. Return the average value.\n if len(sentiment_values) > 0:\n return sum(sentiment_values)/len(sentiment_values)\n # None of the words have a sentiment.\n else:\n return None", "title": "" }, { "docid": "05c08b1a81cdb80dad38472099cf282a", "score": "0.60289973", "text": "def get_tweet_sentiment(self, tweet):\n # create TextBlob object of passed tweet text\n analysis = TextBlob(self.clean_tweet(tweet))\n # set sentiment\n return analysis.sentiment.polarity", "title": "" }, { "docid": "d6a65b4cf94f52e00e0773f591ebfdf5", "score": "0.602449", "text": "def sentiment(values: typing.List[float], weights=None):\n return foundation.weighted_average(values, weights)", "title": "" }, { "docid": "a027ec575b468c3fd9864b1ed3039043", "score": "0.6011608", "text": "def get_sentiment(query):\n\n print('Num entries:' + str(len(query)))\n\n # Now we're going to extract the sentiment and date information and get the average sentiment on a particular date\n sentiment_array = []\n pos_sent_dict = {}\n neg_sent_dict = {}\n pos_count_dict = {}\n neg_count_dict = {}\n hash_dict = {}\n hash_list = []\n fav_max = 0\n\n for entry in query:\n sentiment_array.append(entry.sentiment)\n\n hashtags = ast.literal_eval(entry.hashtags)\n if len(hashtags)!=0:\n for tags in hashtags:\n tags = dict(tags)\n if tags['text'] in hash_dict:\n hash_dict[tags['text']] = hash_dict[tags['text']] + 100\n else:\n hash_dict[tags['text']] = 100\n\n # We need to remove the timezone, day and hour data\n temp_date = entry.created_at.split()\n\n temp_date.pop(0)\n temp_date.pop(2)\n temp_date.pop(2)\n\n # This makes it a datetime object for easier working\n # formatted_date = datetime.datetime.strptime(' '.join(temp_date), '%b %d %Y')\n formatted_date = ' '.join(temp_date)\n # We now take the average of the sentiment by keeping a running average\n\n # Positive sentiment\n if entry.sentiment > 0:\n if formatted_date in pos_sent_dict:\n pos_count_dict[formatted_date] = pos_count_dict[formatted_date] + 1\n pos_sent_dict[formatted_date] = (pos_sent_dict[formatted_date] +\n (entry.sentiment - pos_sent_dict[formatted_date]) /\n pos_count_dict[formatted_date])\n else:\n pos_count_dict[formatted_date] = 1\n pos_sent_dict[formatted_date] = 1\n else:\n if formatted_date in neg_sent_dict:\n neg_count_dict[formatted_date] = neg_count_dict[formatted_date] + 1\n neg_sent_dict[formatted_date] = (neg_sent_dict[formatted_date] +\n (entry.sentiment - neg_sent_dict[formatted_date]) /\n neg_count_dict[formatted_date])\n else:\n neg_count_dict[formatted_date] = 1\n neg_sent_dict[formatted_date] = 0\n\n date_list = []\n pos_sentiment_list = []\n neg_sentiment_list = []\n total_search = []\n\n for key in sorted(pos_sent_dict):\n date_list.append(key), \\\n pos_sentiment_list.append(pos_sent_dict[key]), \\\n\n for key in sorted(neg_sent_dict):\n if key not in date_list: date_list.append(key)\n neg_sentiment_list.append(neg_sent_dict[key]), \\\n\n for key in sorted(date_list):\n pos_cont = 0\n neg_cont = 0\n\n if key in pos_count_dict:\n pos_cont = pos_count_dict[key]\n if key in neg_count_dict:\n neg_cont = neg_count_dict[key]\n\n total_search.append(pos_cont+neg_cont)\n\n for items in hash_dict:\n hash_list.append([items,hash_dict[items]])\n\n print(total_search)\n print(neg_count_dict)\n print(pos_count_dict)\n print(sorted(date_list))\n\n return sorted(date_list), pos_sentiment_list, neg_sentiment_list, total_search, hash_list", "title": "" }, { "docid": "3298c9524d83761cc6f7e6b1f3f89132", "score": "0.60091376", "text": "def get_sentiment(text):\n document = types.Document(content=text, type=enums.Document.Type.PLAIN_TEXT)\n sentiment = client.analyze_sentiment(document=document).document_sentiment\n return sentiment.score", "title": "" }, { "docid": "51e0de185414c511c1095adde5f10e24", "score": "0.5997068", "text": "def analyze(self, text):\n\n summe=0\n #for i in range(len(text)):\n \n if str.lower(text) in self.flattened_positives: \n summe=summe+1 \n elif str.lower(text) in self.flattened_negatives:\n summe=summe-1\n \n return summe\n \n '''\n textlist=[]\n summe=0\n \n textlist.append(text.split())\n for i in range(len(textlist[0])):\n if textlist[0][i] in self.positives:\n summe=summe+1\n elif textlist[0][i] in self.negatives:\n summe=summe-1\n # print(textlist[0][i])\n return summe\n '''", "title": "" }, { "docid": "508788625eba0a1da15fb565708cee6d", "score": "0.59966177", "text": "def compare_sent_lex():\n try:\n subreddit = scraper.load_data('sub-reddits.txt')\n except IOError as e:\n print e\n return\n \n try:\n sentiments = scraper.load_sent()\n except IOError as e:\n print e\n return\n \n names, scores1, titles = anl.sentiment(subreddit, sentiments)\n _, scores2, _ = anl.lexical_diversity(subreddit)\n \n plot_bar_compare_avg(names, scores1, scores2)", "title": "" }, { "docid": "e00eff857b357f8af4a740783ccad6c6", "score": "0.59942925", "text": "def detect_sentiment(Text=None, LanguageCode=None):\n pass", "title": "" }, { "docid": "4dfc2fa12083518ee2f031a5141e05de", "score": "0.5973765", "text": "def sentiment_text(text):\n client = language.LanguageServiceClient()\n # Instantiates a plain text document.\n document = types.Document(\n content=text,\n type=enums.Document.Type.PLAIN_TEXT)\n\n # Detects sentiment in the document. You can also analyze HTML with:\n # document.type == enums.Document.Type.HTML\n sentiment = client.analyze_sentiment(document).document_sentiment\n return sentiment.score", "title": "" }, { "docid": "abe1431661879119eb0f7c65f349971b", "score": "0.59718543", "text": "def find_sentiment():\n data = get_content()\n algo = client.algo(\"nlp/SentimentAnalysis/1.0.2\")\n try:\n # Find the sentiment score for each article\n algo_input = [{\"document\": item[\"content\"]} for item in data]\n algo_response = algo.pipe(algo_input).result\n \n algo_final = [{\"url\": doc[\"url\"]} for sent in algo_response for doc in data]\n\t#for a in algo_final:\n\t\t#print (\"{url}\".format(url=a))\n #print(algo_final)\n print(json.dumps(algo_final, indent = 4))\n return algo_final\n except Exception as e:\n print(e)", "title": "" }, { "docid": "d31f0d151d5ea5f87751e19f59161772", "score": "0.59672076", "text": "def sentiment_sentence(df):\n for i in range(len(df)):\n sum = 0\n sentence = nltk.tokenize.sent_tokenize(df.loc[i, 'text'])\n for s in sentence:\n sum += sentiment_sentence_to_word(s)\n df.loc[i, 'cos_score_sentence'] = sum", "title": "" }, { "docid": "43753e9ac8608b8552f3440997987488", "score": "0.59595823", "text": "def sentiment_analysis_v2(song,emotion_dic):\n \n # to reduce time, use addition by array\n result=[0,0,0,0,0,0,0,0,0,0,0] \n \n for key,val in song['lyric'].items():\n if key in emotion_dic:\n result = [x+y for x,y in zip(result,emotion_dic[key]*val)]\n\n print(result)\n \n emo_dic = {'Positive':result[0],\n 'Negative':result[1],\n 'Anger':result[2],\n 'Anticipation':result[3],\n 'Disgust':result[4],\n 'Fear':result[5],\n 'Joy':result[6],\n 'Sadness':result[7],\n 'Surprise':result[8],\n 'Trust':result[9],\n 'Love': result[10]}\n \n print(emo_dic)\n \n # set dominant emotion\n max =0;\n dominant=None\n for i in ['Anger','Anticipation','Disgust','Fear','Joy','Sadness','Surprise','Trust','Love']:\n if emo_dic[i]>max:\n max = emo_dic[i]\n dominant=i \n \n if dominant =='Joy'or dominant =='Trust':\n if emo_dic['Joy']>emo_dic['Trust'] and emo_dic['Trust']>= 0.5*emo_dic['Joy']:\n dominant = 'Love'\n emo_dic['Love']+=emo_dic['Trust']\n elif emo_dic['Joy']<emo_dic['Trust'] and emo_dic['Joy']>=0.9*emo_dic['Trust']:\n dominant = 'Love'\n emo_dic['Love']+=emo_dic['Joy'] \n \n emo_dic['dominant_emo']=dominant\n \n \n \n return song['title'],emo_dic", "title": "" }, { "docid": "b5c3d98342ddde3db85d15ab7a544868", "score": "0.5939989", "text": "def luis_Sentiment(self):\n\n print(\"LUIS INTENT SENTIMENT\")\n #below insert the LUISENDPOINT from Web.config - remember to delete it after so it doesn't get committed to GitHub\n endpoint = \"******&verbose=true&timezoneOffset=60&q=good&thankyou\"\n QnA_response = requests.get(endpoint)\n if QnA_response.status_code == 200:\n json = QnA_response.json()\n else:\n print(\"Failed to connect to Luis\")\n return\n\n if json[\"sentimentAnalysis\"][\"label\"].lower() == \"positive\":\n\n print(\"Success\")\n self.passed += 1\n\n else:\n\n print(\"Failure\")\n print(\"Expected Sentiment: Positive\")\n print(\"Sentiment Recognised: Negative\")\n\n print(\"------------------\")\n self.total += 1", "title": "" }, { "docid": "2dee89b8346fb4e8a256bd9ed6157241", "score": "0.59273446", "text": "def getWrathScore(flattened_word_list, wrath_positive, wrath_negative,decrease,increase,inverse,wrath_sentiment_score=0):\n for i in range(len(flattened_word_list)):\n if flattened_word_list[i][1] in wrath_positive:\n if i!=0 and flattened_word_list[i-1][1] in increase:\n wrath_sentiment_score+=2\n elif i!=0 and flattened_word_list[i-1][1] in decrease:\n wrath_sentiment_score+=0.5\n elif i != 0 and flattened_word_list[i - 1][1] in inverse:\n wrath_sentiment_score-=1\n else:\n wrath_sentiment_score+=1\n\n elif flattened_word_list[i][1] in wrath_negative:\n if i != 0 and flattened_word_list[i - 1][1] in increase:\n wrath_sentiment_score -= 2\n elif i != 0 and flattened_word_list[i - 1][1] in decrease:\n wrath_sentiment_score -= 0.5\n elif i != 0 and flattened_word_list[i - 1][1] in inverse:\n wrath_sentiment_score+=1\n else:\n wrath_sentiment_score-=1\n else:\n pass\n return wrath_sentiment_score", "title": "" }, { "docid": "75da89e62769410edd1212bbf69c9b12", "score": "0.5926602", "text": "def score_summary_2(summary_text):\n # Want high similarity between paragraphs\n inter_paragraph_similarities = []\n avg_similarity = None\n\n sentences = [i.text for i in NLP(summary_text).sents]\n\n # readability measures close to ebert baseline\n readability = abs(text_stats.TextStats(NLP(summary_text)).automated_readability_index - EBERT_READABILITY)/EBERT_READABILITY\n\n\n # Coh Metrix Indices\n anaphor_score = anaphor_overlap(summary_text)\n person_score = person_overlap(summary_text)\n\n\n # more subjective is better\n total_subjectivity = 0\n for i in sentences:\n total_subjectivity += TextBlob(i).sentiment[1]\n subjectivity = total_subjectivity/len(sentences)\n\n # thesis sentence doesn't have \"this\", \"here\", \"it\"\n if sentences[0] not in [' ', '', '\\n']:\n thesis_penalty = sum(i in sentences[0] for i in [\" this \", \" This \", \" here \", \" Here\"])\n elif sentences[1] not in [' ', '', '\\n']:\n thesis_penalty = sum(i in sentences[1] for i in [\" this \", \" This \", \" here \", \" Here\"])\n else:\n thesis_penalty = 0\n\n # Prefer expressions from the author\n author_count = 0\n for s in sentences:\n if any(i in s for i in [\"I \", \"I'd\", \"My\"]):\n author_count += 1\n\n # iterate through the paragraphs\n # sentiment within a paragraph is similar\n paragraphs = summary_text.split('\\n')\n for i in range(1, len(paragraphs)):\n if paragraphs[i - 1] not in [' ', '', '\\n'] and paragraphs[i] not in [' ', '', '\\n']:\n inter_paragraph_similarities.append(similarity.word_movers(NLP(paragraphs[i - 1]), NLP(paragraphs[i])))\n\n max_diff = 0\n for p in paragraphs:\n p_sent_min = None\n p_sent_max = None\n for s in p.split('.'):\n sent = TextBlob(s).sentiment[0]\n if p_sent_min is None:\n p_sent_min = sent\n if p_sent_max is None:\n p_sent_max = sent\n\n if sent < p_sent_min:\n p_sent_min = sent\n if sent > p_sent_max:\n p_sent_max = sent\n if max_diff < abs(p_sent_max - p_sent_min):\n max_diff = abs(p_sent_max - p_sent_min)\n max_diff = 1 - max_diff\n avg_similarity = sum(inter_paragraph_similarities)/len(inter_paragraph_similarities)\n\n\n\n # Make score\n score = (0.25 * avg_similarity) + \\\n (0.20 * person_score) + \\\n (0.15 * anaphor_score) + \\\n (0.1 * max_diff) + \\\n (0.05 * readability) + \\\n (0.25 * subjectivity)\n # boost by person count\n score = score * (1 + (0.1 * author_count))\n score = score - (0.2 * thesis_penalty)\n\n\n return score", "title": "" }, { "docid": "f8b7ebcca5a393922ff015c192e78d9e", "score": "0.5923025", "text": "def DoSentAnalysis(doc,dictionary=diction):\n sents1=nltk.sent_tokenize(stringify(doc['Text']))\n wsents=[nltk.word_tokenize(sent) for sent in sents1] \n #pause here so we dont' bother pos tagging non-english reviews\n #send to check language\n smp=[]\n for sent in wsents:\n smp.extend([w for w in sent])\n if testLanguage(smp)=='ENG':\n sents=[nltk.pos_tag(sent) for sent in wsents]\n #~8 seconds to run, on average\n sentThreep=[([(tup[0],tup[1],TagForSentiment(tup,dictionary=dictionary)) for tup in sent])\n for sent in sents] \n sentFinal=[(sent,ScoreSentence(sent)) for sent in sentThreep]\n return([(sentFinal[ct][1],sents1[ct],sentFinal[ct][0]) for ct in \n range(0,len(sentFinal))])\n #this is big, but the tokenization algorithm is the most expensive part\n #not needing to run it again if I want to change anything or do\n #something else with it is worth a few mb of HD space\n return (None)", "title": "" }, { "docid": "79a1783554fd8b2c6f5fc953cc8e071c", "score": "0.5918092", "text": "def get_tweet_sentiment(self, tweet):\n analysis = TextBlob(self.clean_tweet(tweet))\n return analysis.sentiment.polarity", "title": "" }, { "docid": "a3a3f507858a4dfc6f04c61143fba177", "score": "0.5916122", "text": "def sentiment_over_time(sentence):\n positive = get_words()\n negative = get_words(pos=False)\n docs = filter_documents(sentence, sort=[\n {\n \"date\": {\n \"order\": \"asc\"\n }\n }\n ])\n print(\"Got \", len(docs), \" docs\")\n dates = []\n sentiment = []\n for doc in docs:\n dates.append(doc['_source']['date'])\n tokens = Counter(doc['_source']['content'].replace(\".\", ' ').split())\n doc_sentiment = 0\n for tok in tokens:\n if tok in positive:\n doc_sentiment = doc_sentiment + (tokens[tok] * 1)\n elif tok in negative:\n doc_sentiment = doc_sentiment + (tokens[tok] * -1)\n sentiment.append(doc_sentiment)\n # print(dates, sentiment)\n x = list(range(len(dates)))\n mplt.plot(x, sentiment)\n # mplt.xticks(x, dates)\n mplt.show()", "title": "" }, { "docid": "07718d3c6a6bc727c0328bef0472f1df", "score": "0.590148", "text": "def sentiment(line):\n sia = SentimentIntensityAnalyzer()\n polaridad = sia.polarity_scores(line)\n pol = polaridad[\"compound\"]\n return pol", "title": "" }, { "docid": "2c42e8973d4a190bb5e2d2a6ae3f8509", "score": "0.58997333", "text": "def get_avg_sentiment(sentiment):\n average = {}\n\n for coin in sentiment:\n # sum up all compound readings from each title & body associated with the\n # coin we detected in keywords\n average[coin] = sum([item['compound'] for item in sentiment[coin]])\n\n # get the mean compound sentiment if it's not 0\n if average[coin] != 0:\n average[coin] = average[coin] / len(sentiment[coin])\n\n return average", "title": "" }, { "docid": "10b3c25a561b613745a895f8e12b4c63", "score": "0.58960295", "text": "def da_vader_getter(doc, lemmatization=True):\n analyser = SentimentIntensityAnalyzer()\n if lemmatization:\n polarity = analyser.polarity_scores(doc.text, tokenlist=[t.lemma_ for t in doc])\n else:\n polarity = analyser.polarity_scores(doc.text, tokenlist=[t.text for t in doc])\n return polarity", "title": "" }, { "docid": "e3f76e893bfa0065517b4ffea6fd8c6b", "score": "0.5893669", "text": "def __predict_sentiment(self):\n self.sentiments = SENTI_CLASSIFIER_LIB.predict(self.sentences)\n return", "title": "" }, { "docid": "6dedf46a8e1fa84bae188840a76de2c9", "score": "0.58881503", "text": "def sentiment_pipeline(text, topics):\r\n sentence, labels = load_model_output(text)\r\n ent = []\r\n\r\n for i, v in enumerate(labels):\r\n if v in topics:\r\n ent.append(sentence[i])\r\n\r\n if ent == []:\r\n response = {}\r\n response[\"sentiment\"] = None\r\n response[\"overall_sentiment\"] = overall_sentiment(text)\r\n return response\r\n\r\n else:\r\n full_sentence = []\r\n full_text = text.split(\".\")\r\n\r\n # code to match entities based on splliting point `.`. Scope for further improvement\r\n for i in range(len(ent)):\r\n temp = \"\"\r\n for t in full_text:\r\n if len(full_text) >= 1:\r\n if ent[i] in t.lstrip():\r\n temp += t\r\n full_sentence.append(temp)\r\n\r\n sid = SentimentIntensityAnalyzer()\r\n full_text = full_sentence\r\n sentiment_output = {}\r\n\r\n for i, t in enumerate(full_text):\r\n ss = sid.polarity_scores(t)\r\n item = labels[i] # returns topic value instead of entity value\r\n # sentiment_output.setdefault(item, [])\r\n if ss[\"compound\"] >= 0.15:\r\n sentiment_output[item] = \"positive\"\r\n # sentiment_output[item].append(\"positive\")\r\n elif ss[\"compound\"] <= -0.01:\r\n sentiment_output[item] = \"negative\"\r\n # sentiment_output[item].append(\"negative\")\r\n else:\r\n sentiment_output[item] = \"neutral\"\r\n # sentiment_output[item].append(\"neutral\")\r\n\r\n response = {}\r\n response[\"sentiment\"] = sentiment_output\r\n response[\"overall_sentiment\"] = overall_sentiment(text)\r\n\r\n return response", "title": "" }, { "docid": "d4e5323b5fff4614447c7ed063529a41", "score": "0.5886532", "text": "def message_sentiment(self, message):\n content = self.clean_message_content(message)\n\n skip_responses = [\n 'ski', 'skip',\n 'n', 'no', 'nay',\n 'pass',\n 'rsvp no',\n ]\n\n yes_responses = [\n 'y', 'ye', 'ya', 'yep', 'yes', 'yea', 'yeah',\n 'sur', 'sure',\n 'rsvp yes',\n ]\n\n is_skip_response = content in skip_responses\n is_yes_response = content in yes_responses\n\n if is_yes_response and not is_skip_response:\n return 1\n elif is_skip_response:\n return -1\n else:\n print(\" !!! Unknown message sentiment\")\n self.print_message(message)\n return 0", "title": "" }, { "docid": "67a3d05ac8e576c4b217d7272e35e9ce", "score": "0.5881713", "text": "def add_sentiment(data, nlp_column):\r\n start_time = time.time()\r\n print('Using Vader to calculate objectivity and pos-neg-neutral scores')\r\n analyzer = SentimentIntensityAnalyzer()\r\n data[nlp_column+'_vader_neg'] = 0\r\n data[nlp_column+'_vader_pos'] = 0\r\n data[nlp_column+'_vader_neu'] = 0\r\n data[nlp_column+'_vader_compound'] = 0\r\n data[nlp_column+'_vader_neg'] = data[nlp_column].map(\r\n lambda txt: analyzer.polarity_scores(txt)['neg']).fillna(0)\r\n data[nlp_column+'_vader_pos'] = data[nlp_column].map(\r\n lambda txt: analyzer.polarity_scores(txt)['pos']).fillna(0)\r\n data[nlp_column+'_vader_neutral'] = data[nlp_column].map(\r\n lambda txt: analyzer.polarity_scores(txt)['neu']).fillna(0)\r\n data[nlp_column+'_vader_compound'] = data[nlp_column].map(\r\n lambda txt: analyzer.polarity_scores(txt)['compound']).fillna(0)\r\n cols = [nlp_column+'_vader_neg',nlp_column+'_vader_pos',nlp_column+'_vader_neu',nlp_column+'_vader_compound']\r\n print(' Created %d new columns using SentinmentIntensityAnalyzer. Time taken = %d seconds' %(len(cols),time.time()-start_time))\r\n return data, cols", "title": "" }, { "docid": "c21a3fbd1ffe796a8d69c950f7423b27", "score": "0.5877822", "text": "def summarize_sentiment_dir(directory, include_tweets=False):\n movie = Movie()\n NUM_MOVIES = movie.get_num_movies()\n \n def process(file_, polarity, texts):\n infile = open(file_, 'r')\n data = json.loads(infile.read())\n data = data['data']\n\n polarity_level = {0:\"negative\", 2:\"neutral\", 4:\"positive\"}\n\n for item in data: \n polarity[item['no']][item['polarity']//2] += 1\n texts[item['no']][polarity_level[item['polarity']]] += [{\"text\":item['text']}]\n \n stm_polarity = {}\n stm_texts = {}\n for i in range(0, NUM_MOVIES):\n stm_polarity[i] = [0,0,0] # {datetime:[neg, neu, pos]}\n stm_texts[i] = {\"negative\":[], \"neutral\":[], \"positive\":[]}\n \n os.chdir(directory)\n for file_ in os.listdir(\".\"):\n if file_.endswith(\".json\"): \n process(file_, stm_polarity, stm_texts)\n\n os.mkdir('results')\n os.chdir('./results/')\n fw = open('sentiment_polarity_summary', 'w')\n fw.write(\"No,Title,#Neg,#Neu,#Pos,Total#\\n\")\n for key, value in stm_polarity.items():\n fw.write(str(key)+','+movie.get_title([key])+','+str(value[0])+','+\n str(value[1])+','+str(value[2])+','+str(sum(value))+'\\n') \n fw.close()\n\n if (not include_tweets): return\n for i in range(0, NUM_MOVIES): \n fw = open('tweets_' + str(i) + '.json', 'w')\n fw.write(json.dumps(stm_texts[i], indent=4))\n fw.close()", "title": "" }, { "docid": "f638d9652c6b6caf4c28cebe7528b984", "score": "0.5877814", "text": "def update_sentiment(new_sentiment):\n global sentiment_data\n sentiment_data += new_sentiment", "title": "" }, { "docid": "23675ce5f3f43e6607ec50f47da673bb", "score": "0.586774", "text": "def detect_sentiment(text):\n text = re.sub(r'/\\u\\d+', '', text)\n text = ''.join([char for char in text if char not in string.punctuation])\n return TextBlob(text.encode('ascii', 'ignore') ).sentiment.polarity", "title": "" }, { "docid": "e22ac5aa10ea47a5bf059dfb0fda1a53", "score": "0.5867666", "text": "def sentence_to_sentiment_score(self, sentence, lemmatization=False):\n\t\tif lemmatization:\n\t\t\ttokenized_sentence = self.document_to_lemmatized_clean_vector(sentence, lemmatization=True)\n\t\telse:\n\t\t\ttokenized_sentence = [str(x) for x in self.tokenizer.tokenize(sentence)]\n\n\t\ttxt = {} # aggregate of all tags\n\t\tpuncts = self.sal.getPunctIndex(tokenized_sentence)\n\t\tfor p in puncts:\n\t\t\ttxt[p] = {'type': 'punct'}\n\t\tif len(self.shifter_list) > 0:\n\t\t\ttxt = self.sal.tag_doc(self.tag, tokenized_sentence, self.shifters_dict, self.shifter_list, 'sword', txt,\n\t\t\t\t\t\t\t\t improved=self.faster_ver, multiple_dict=self.shifters_multiple)\n\t\tif len(self.intens_list) > 0:\n\t\t\ttxt = self.sal.tag_doc(self.tag, tokenized_sentence, self.intens_dict, self.intens_list, 'iword', txt,\n\t\t\t\t\t\t\t\t improved=self.faster_ver, multiple_dict=self.intens_multiple)\n\t\t# will over-write previous tags\n\t\tif len(self.pol_lex_list) > 0:\n\t\t\ttxt = self.sal.tag_doc(self.tag, tokenized_sentence, self.pol_lex_dict, self.pol_lex_list, 'pword', txt,\n\t\t\t\t\t\t\t\t improved=self.faster_ver, multiple_dict=self.pol_lex_multiple)\n\t\tscore = self.sal.scoreText(txt)\n\t\treturn score, self.sal.num2nom(score)", "title": "" }, { "docid": "89bf19eedecdff05120de24b3cc1a7b0", "score": "0.58629566", "text": "def get_sentiment(event, context):\n results = search(event['queryStringParameters'])\n summary = summarize(results)\n return {\n 'statusCode': 200,\n 'body': json.dumps({\n 'results': results,\n 'summary': summary,\n }),\n 'headers': {\n \"Access-Control-Allow-Origin\" : \"*\",\n \"Access-Control-Allow-Credentials\" : True\n },\n }", "title": "" }, { "docid": "7694cf0f860b20360299e38aa9c14167", "score": "0.58482575", "text": "def detect(text, args):\n text = text or 'Rosette API is the best! #iloverosette'\n api_instance = api.API(args.r, ROSETTE_SERVICE_URL)\n params = api.DocumentParameters()\n params['content'] = text\n params['language'] = 'eng'\n return api_instance.sentiment(params)", "title": "" }, { "docid": "aae3d8599a7c22cb8515e0707bf992ba", "score": "0.5846503", "text": "def GetSentimentScores(listOfSentenceparts, sentiment_list):\n\n listOfSentiScores, listOfseplphrs = [], []\n\n for sentpart in listOfSentenceparts:\n \"\"\"\n first step: identification of suitable candidates for opinionated phrases suitable candidates: \n nouns, adjectives, adverbs and verbs\n \"\"\"\n candidates = MakeCandidates(sentpart, sentiment_list, get='candidates')\n negation_candidates = MakeCandidates(sentpart, sentiment_list, get='negation')\n\n \"\"\"\n second step: extraction of possible opinion-bearing phrases from a candidate starting from a candidate, \n check all left and right neighbours to extract possible phrases. The search is terminated on a comma (POS tag $,), \n a punctuation terminating a sentence (POS tag $.), a conjunction (POS-Tag KON) or an opinion-bearing word that is \n already tagged. (Max distance determined by sentence lenght)\n If one of the adjacent words is included in the SePL, together with the previously extracted phrase, it is added to \n the phrase.\n \"\"\"\n\n raw_sentimentscores, raw_sepl_phrase = ReadSePLSentiments(candidates, sentiment_list)\n\n \"\"\"\n third step: compare extracted phrases with SePL After all phrases have been extracted, they are compared with the \n entries in the SePL. (everything lemmatized!) If no match is found, the extracted Phrase is shortened by the last \n added element and compared again with the SePL. This is repeated until a match is found.\n \"\"\"\n\n # Make sure sepl_phrase, negation_candidates, sentimentscores are of same size\n assert len(raw_sepl_phrase) == len(raw_sentimentscores) == len(candidates) == len(negation_candidates)\n\n # export processed, flattened lists\n sentimentscores = ProcessSentimentScores(raw_sepl_phrase, negation_candidates, raw_sentimentscores)\n sepl_phrase = ProcessSePLphrases(raw_sepl_phrase)\n\n listOfSentiScores.append(sentimentscores)\n listOfseplphrs.append(sepl_phrase)\n\n # create flat, non-empty list with scores\n sentiscores = np.array([i for i in listOfSentiScores if i])\n\n # Retrieve statistics\n ss_mean, ss_median, ss_n, ss_sd = sentiscores.mean(), np.median(sentiscores), sentiscores.size, sentiscores.std()\n\n return {'mean': ss_mean, 'median': ss_median, 'n': ss_n, 'sd': ss_sd, 'sentiscores': listOfSentiScores,\n 'phrs': listOfseplphrs}", "title": "" }, { "docid": "6dc1cbc7d89e65c7fae512982b11631b", "score": "0.5838795", "text": "def sift_sentiment_scores(sentiments: Iterable[float]) -> Tuple[float, float, int]:\n pos_sum = 0.0\n neg_sum = 0.0\n neu_count = 0\n for sentiment_score in sentiments:\n if sentiment_score > 0:\n pos_sum += (\n float(sentiment_score) + 1\n ) # compensates for neutral words that are counted as 1\n if sentiment_score < 0:\n neg_sum += (\n float(sentiment_score) - 1\n ) # when used with math.fabs(), compensates for neutrals\n if sentiment_score == 0:\n neu_count += 1\n return pos_sum, neg_sum, neu_count", "title": "" }, { "docid": "f30b166f2e351b2105fc2f1552cd10fd", "score": "0.58316356", "text": "def twitter_sentiment_analysis(self, text):\n return self._classification_request(text, \"TwitterSentimentAnalysis\")", "title": "" }, { "docid": "5d3a030d703d399e9f00c5af24f10abe", "score": "0.5830023", "text": "def summarize_daily_sentiment(date, save_summary=True):\n\n historical_df = pd.read_csv('data/processed_wsb_submissions.csv')\n historical_df[\"created_utc\"] = pd.to_datetime(historical_df[\"created_utc\"])\n daily_df = historical_df[historical_df[\"created_utc\"].isin([date])]\n text_column = daily_df['text']\n\n positive_counts = 0\n average_sentiment = 0\n valid_posts = 0\n\n for i, text in text_column.iteritems():\n if i % 20 != 0:\n continue\n\n try:\n sentiment_score = predict_sentiment(text)\n if sentiment_score > 0:\n positive_counts += 1\n\n except Exception:\n print(Exception)\n pass\n\n else:\n average_sentiment += sentiment_score\n valid_posts += 1\n\n if valid_posts == 0:\n return None, None\n\n percentage_positive = positive_counts / valid_posts\n average_sentiment = average_sentiment / valid_posts\n\n if save_summary:\n f = open(\"data/sentiment_percentage.txt\", \"w\")\n f.write(str(percentage_positive))\n f = open(\"data/average_sentiment.txt\", \"w\")\n f.write(str(average_sentiment))\n\n return percentage_positive, average_sentiment", "title": "" }, { "docid": "cc4a4f5582c295757af087d83a94fa4c", "score": "0.58281475", "text": "def run_analysis(self):\n\n if len(self.commlist) == 0:\n print('No posts/comments downloaded.')\n return\n self.rawtextstr = c.list_to_str(self.commlist)\n self.analysis_text = c.ana_text(self.rawtextstr)\n worddict = self.__getdict(c.clean_text(self.rawtextstr))\n self.wordskv = self.__getsortedkv(worddict)\n\n self.stats['flesch_ease'].append(ts.textstat.flesch_reading_ease(self.analysis_text))\n self.stats['flesch_grade'].append(ts.textstat.flesch_kincaid_grade(self.analysis_text))\n self.stats['dalechall'].append(ts.textstat.dale_chall_readability_score(self.analysis_text))\n self.stats['ari'].append(ts.textstat.automated_readability_index(self.analysis_text))\n self.stats['colemanliau'].append(ts.textstat.coleman_liau_index(self.analysis_text))\n self.stats['lisear'].append(ts.textstat.linsear_write_formula(self.analysis_text))\n\n self.stats['smog'].append(ts.textstat.smog_index(self.analysis_text))\n self.stats['difcwords'].append(ts.textstat.difficult_words(self.analysis_text))\n self.stats['sentences'].append(ts.textstat.sentence_count(self.rawtextstr))\n self.stats['lexiconcnt'].append(ts.textstat.lexicon_count(self.rawtextstr))\n self.stats['avgsyllables'].append(ts.textstat.avg_syllables_per_word(self.analysis_text))\n\n self.stats['stdreadability'].append(ts.textstat.text_standard(self.analysis_text))", "title": "" }, { "docid": "20e643cf04f62ac857e382689cde0298", "score": "0.5823728", "text": "def analyze(self, text):\n \n word_list = self.Tokenizer.tokenize(text)\n \n # analyze and return \n counter = 0\n for word in word_list :\n if word.lower() in self.positive_set :\n counter += 1\n \n if word.lower() in self.negative_set :\n counter -= 1\n \n return counter", "title": "" }, { "docid": "0c8e49ce3b4c2a0e9d23bba43b7c4d1c", "score": "0.5811908", "text": "def dictionary_sentiment_check(string: str) -> float:\n bullishness = 0\n bearishness = 0\n string = string.lower()\n for word in bull_words:\n if word in string:\n bullishness += 1\n for word in bear_words:\n if word in string:\n bearishness += 1\n total_words = bullishness + bearishness\n if total_words > 0:\n return bullishness / total_words\n else:\n return 0.5", "title": "" }, { "docid": "48ed7bba74f4bbcdc5a9e287581e8217", "score": "0.5799291", "text": "def main():\n\n query_string = '''\n filingsource:\"US SEC Non-XBRL\" AND filerid:1065280 AND\n enddate:[2018-01-01 TO 2018-12-31] AND reporttype:\"8-K\"\n '''\n\n # send off the query\n resp_data = facts_stringquery(query_string, False)\n # keep unique list of company names from results\n\n scores = []\n # set up a vader sentiment analyzer\n analyzer = SentimentIntensityAnalyzer()\n\n # for each hit\n for ele in resp_data['hits']:\n # clean up the html and put it in simple text\n soup = bs4.BeautifulSoup(ele['source']['fieldvalue'], 'html.parser')\n for script in soup([\"script\", \"style\"]):\n script.extract()\n as_text = soup.get_text().strip()\n\n # score the text\n vader_score = analyzer.polarity_scores(as_text)\n # and record the whole entry alongside the score\n scores.append(vader_score['compound'])\n\n average_score = statistics.mean(scores)\n print(average_score)", "title": "" }, { "docid": "6983fcd978920385e647f69be4877767", "score": "0.5775975", "text": "def calc_sentiment_textblob(text: str):\r\n \r\n text = str(text)\r\n blob = TextBlob(text)\r\n \r\n return blob.sentiment.polarity", "title": "" }, { "docid": "c3f5904986f34c60b24505f26a808c8a", "score": "0.5773931", "text": "def text_sentiment(self, text, **kwargs):\n if self.sent_used == 'HPE':\n return self.HPE_text_sentiment(text, **kwargs)\n elif self.sent_used == 'IBM':\n return self.IBM_text_sentiment(text, **kwargs)", "title": "" }, { "docid": "7119b520b4598fa0efa5284d3d0a6562", "score": "0.5770259", "text": "def calc_sentiment(\r\n student_comments: pd.Series,\r\n package: str\r\n ) -> pd.Series:\r\n \r\n if package.lower() == 'vader':\r\n sid = SentimentIntensityAnalyzer()\r\n results = student_comments.apply(\r\n lambda x: sid.polarity_scores(x)['compound']\r\n )\r\n results.name = 'sentiment_vader'\r\n elif package.lower() == 'textblob':\r\n results = student_comments.apply(calc_sentiment_textblob)\r\n results.name = 'sentiment_textblob'\r\n elif package.lower() == 'luis':\r\n pass\r\n \r\n return results", "title": "" }, { "docid": "a4bf71fe3d02f67432e9ccf95c8e5a2a", "score": "0.5766547", "text": "def overall_sentiment(data):\n outliers_removed = data[is_outlier(data, thresh=2)]\n if len(outliers_removed) == 0:\n return np.mean(data)\n else:\n return np.mean(outliers_removed)", "title": "" } ]
d2ad09c8cbe6b76dcdcf2bf822e90025
!Calls self.communicate(), and returns the stdout from the pipeline (self.outbytes). The return value will be Null if the pipeline was redirected to a file or if the constructor's capture option was not True.
[ { "docid": "d8e40611db405a37b900f39eea4e4026", "score": "0.5637281", "text": "def to_string(self):\n self.communicate()\n o=self.out\n if not isinstance(o,str):\n o=str(o)\n return o", "title": "" } ]
[ { "docid": "79d9e8c61854efd57d11968a3ffd058b", "score": "0.6113572", "text": "def capture(*cmd):\n a = subprocess.Popen(cmd, stdout=subprocess.PIPE, stderr=subprocess.PIPE)\n return a.communicate()", "title": "" }, { "docid": "aa2bd5a65384749897ba3080cd28c526", "score": "0.6037035", "text": "def start(self):\n import os\n import threading\n import time\n\n self.capturedtext = \"\"\n # Save a copy of the stream:\n self.streamfd = os.dup(self.origstreamfd)\n # Replace the original stream with our write pipe:\n os.dup2(self.pipe_in, self.origstreamfd)\n if self.threaded:\n # Start thread that will read the stream:\n self.workerThread = threading.Thread(target=self.readOutput)\n self.workerThread.start()\n # Make sure that the thread is running and os.read() has executed:\n time.sleep(0.01)", "title": "" }, { "docid": "e183fbe70f5af2bafb471c5605399f15", "score": "0.59744865", "text": "def readpipe(argv, preexec_fn=None):\r\n p = subprocess.Popen(argv, stdout=subprocess.PIPE, preexec_fn=preexec_fn)\r\n out, err = p.communicate()\r\n if p.returncode != 0:\r\n raise Exception('subprocess %r failed with status %d'\r\n % (' '.join(argv), p.returncode))\r\n return out", "title": "" }, { "docid": "e19ddfb969f80bdfe0959c9463ce3bf9", "score": "0.58750147", "text": "def readOutput(self):\n import os\n while True:\n char = os.read(self.pipe_out, 1)\n if not char or self.escape_char.encode('utf-8') in char:\n break\n self.capturedtext += char.decode('utf-8', 'ignore')", "title": "" }, { "docid": "f9073557be8242d6b45481d7c672370d", "score": "0.586559", "text": "def __call__(self, data=None, timeout=None, report=None, cwd=None):\n\n if report:\n report('shell: %s' % ' '.join(self.cmdline))\n\n def _thread():\n stdout = subprocess.PIPE\n if self.passthrough:\n stdout = sys.stdout\n\n stderr = subprocess.PIPE\n if self.merge_output:\n stderr = subprocess.STDOUT\n elif self.passthrough:\n stderr = sys.stderr\n\n self.process = subprocess.Popen(self.cmdline, bufsize=0, env=self.environ,\n shell=self.shell, cwd=cwd, stdin=subprocess.PIPE, stdout=stdout,\n stderr=stderr, universal_newlines=True)\n\n self.stdout, self.stderr = self.process.communicate(data)\n\n thread = Thread(target=_thread)\n thread.start()\n\n thread.join(timeout)\n if thread.isAlive():\n self.process.terminate()\n thread.join()\n\n self.returncode = self.process.returncode\n return self.returncode", "title": "" }, { "docid": "6ec213578cf5327726a581ce74e73316", "score": "0.5859571", "text": "def getOutputData(self):\r\n try:\r\n process = Popen(self.command, stdout=PIPE)\r\n # output = str(process.stdout.read()) # Use for big data of stdout\r\n (output, error) = process.communicate() # Store data in clipboard\r\n self.returnCode = process.returncode\r\n if self.returnCode == 0:\r\n res = None\r\n if output:\r\n res = str(output)\r\n if self.writeToFile and self.fileName:\r\n lines = res.split('\\\\n')\r\n Helper.createNewFile(self.fileName, os.linesep.join(lines), 'w')\r\n return res\r\n else:\r\n debug.log().error(\"Command '{}' failed, exit-code = {}, error = {}\".format(self.command,\r\n self.returnCode,\r\n str(error)))\r\n except (ValueError, OSError, TypeError) as err:\r\n debug.log().critical(err)\r\n Helper.systemExit()", "title": "" }, { "docid": "e92b1ef415720258538a29313637bf18", "score": "0.58462775", "text": "def stdout(self):\r\n self.wait()\r\n return self._stdout", "title": "" }, { "docid": "9d8e47397a883f6b913d60c935c41313", "score": "0.5846078", "text": "def _remote_read(self, cmd, *args, **kwargs):\n # First remove stdout=, if it's there.\n kwargs[\"stdout\"] = subprocess.PIPE\n return self._remote_stream(cmd, *args, **kwargs)", "title": "" }, { "docid": "ef6fa4e1da3731a495e056b78df119f8", "score": "0.5807413", "text": "def read_stdout(cmd, capture_stderr=False):\n if capture_stderr:\n p = subprocess.Popen(cmd, stdout=subprocess.PIPE, stderr=subprocess.PIPE, env=env)\n std_out, std_err = p.communicate()\n if python3:\n return std_out.decode(), std_err.decode()\n else:\n return std_out, std_err\n else:\n p = subprocess.Popen(cmd, stdout=subprocess.PIPE, env=env)\n std_out, std_err = p.communicate() # ignore stderr\n if python3:\n return std_out.decode()\n else:\n return std_out", "title": "" }, { "docid": "63bfc9ff728712e08d308f3f086aeebf", "score": "0.57136154", "text": "def readOutput(self):\n while True:\n char = os.read(self.pipe_out, 1)\n if not char or self.escape_char in char:\n break\n self.capturedtext += char", "title": "" }, { "docid": "eb1b77ca5e0af7d4b630d3ab84d586da", "score": "0.5699526", "text": "def popen_communicate(args, stdin=None, shell=False):\n proc = subprocess.Popen(\n args,\n stdout=subprocess.PIPE,\n stderr=subprocess.PIPE,\n stdin=stdin,\n shell=shell\n )\n out, err = proc.communicate()\n sys.stdout.write(out.decode('utf-8'))\n\n # bytes -> utf-8\n return out.decode('utf-8'), err.decode('utf-8')", "title": "" }, { "docid": "f8ae6e1698bf8bf0a4241a8235afda74", "score": "0.56991994", "text": "def call_output(self, *args, **kargs):\n git_env = None\n if not kargs.get(\"no_env\", False):\n git_env = os.environ.copy()\n git_env['GIT_WORK_TREE'] = self.worktree\n git_env['GIT_DIR'] = self.gitdir\n margs = [GIT]\n margs.extend([ x for x in args])\n cwd = kargs.get(\"cwd\", self.worktree)\n wanterr = kargs.get(\"stderr\", True)\n rawout = kargs.get(\"rawout\", False)\n stderrout = None if wanterr or not rawout else subprocess.PIPE\n stderrout = subprocess.PIPE\n process = subprocess.Popen(margs, cwd=cwd, env=git_env,\n stdout=subprocess.PIPE, stderr=stderrout)\n out = process.communicate()\n process.poll()\n if rawout:\n return (process.returncode, out)\n return out[0].splitlines()", "title": "" }, { "docid": "72fd397b48294a60a5d049e417189033", "score": "0.5668758", "text": "def test_stdout(self, os_environ_copy: unittest.mock.Mock,\n os_getcwd_mock: unittest.mock.Mock,\n subprocess_popen: unittest.mock.Mock):\n\n # fake the return value of stdout\n subprocess_popen.return_value.stdout = ''\n\n # create a run where we do not pipe stdout\n run1 = run.ProcessRun(\"echo\", pipe_stdout=False)\n\n # in the ready state we should raise an error\n with self.assertRaises(run.ProcessRunStateError):\n run1.stdout\n\n # once we run, we should return the normal value\n run1.run()\n self.assertEqual(run1.stdout, '')\n\n # create a run where we do pipe stdout\n run2 = run.ProcessRun(\"echo\", pipe_stdout=True)\n\n # in the ready state we should raise an error\n with self.assertRaises(run.ProcessRunStateError):\n run2.stdout\n\n # once we run, we should return None (because piping)\n run2.run()\n self.assertEqual(run2.stdout, None)", "title": "" }, { "docid": "3acf1e1caece04e54125854b9bd1a485", "score": "0.56492597", "text": "def Run(self):\n if not isinstance(self.cmd, list):\n cmd = self.to_unicode(str(self.cmd))\n cmd = cmd.split()\n if self.include_stdout:\n std_out = PIPE\n else:\n std_out = open(os.devnull, 'w')\n if self.include_stderr:\n err = STDOUT\n fds = True\n else:\n err = None\n fds = False\n if self.return_obj:\n std_in = PIPE\n else:\n std_in = None\n\n # We need to make sure that the results of the command we run\n # are in English, so we set up a temporary environment.\n tmpenv = os.environ.copy()\n tmpenv[\"LC_ALL\"] = \"C\"\n tmpenv[\"LANG\"] = \"C\"\n\n try:\n f = Popen(cmd, shell=False, stdout=std_out, stdin=std_in, stderr=err,\n close_fds=fds, cwd='/', env=tmpenv)\n except OSError, e:\n print \"Running command %s failed: %s\" % (str(cmd), str(e))\n return \"\"\n\n if self.return_obj:\n return f\n if self.return_pipe:\n return f.stdout\n else:\n return f.communicate()[0]", "title": "" }, { "docid": "7b1f867ce4126c835a23ef3fa396c5b3", "score": "0.5644535", "text": "def communicate(self, input = None):\r\n stdout = []\r\n stderr = []\r\n sources = [(\"1\", stdout, self.stdout)]\r\n if not self.isatty:\r\n # in tty mode, stdout and stderr are unified\r\n sources.append((\"2\", stderr, self.stderr))\r\n i = 0\r\n while sources:\r\n if input:\r\n chunk = input[:1000]\r\n self.stdin.write(chunk)\r\n self.stdin.flush()\r\n input = input[1000:]\r\n i = (i + 1) % len(sources)\r\n name, coll, pipe = sources[i]\r\n line = pipe.readline()\r\n shell_logger.debug(\"%s> %r\", name, line)\r\n if not line:\r\n del sources[i]\r\n else:\r\n coll.append(line)\r\n if self.isatty:\r\n stdout.pop(0) # discard first line of prompt\r\n try:\r\n self.returncode = int(stdout.pop(-1))\r\n except (IndexError, ValueError):\r\n self.returncode = \"Unknown\"\r\n self._done = True\r\n stdout = six.b(\"\").join(stdout)\r\n stderr = six.b(\"\").join(stderr)\r\n return stdout, stderr", "title": "" }, { "docid": "2286365356e4254526b3e8fb2e3d2ecd", "score": "0.56326616", "text": "def complete_output(self):\n if self.stdout0:\n sys.stdout = self.stdout0\n sys.stderr = self.stderr0\n self.stdout0 = None\n self.stderr0 = None\n return self.outputBuffer.getvalue()", "title": "" }, { "docid": "c98a3d2a2de937cf964ea2fdef119fe3", "score": "0.563128", "text": "def pipe(self, *args: Any) -> Any:\n return pipe(self, *args)", "title": "" }, { "docid": "e0bb2bb212ffaad6c15a8e99070f4c51", "score": "0.56161827", "text": "def pipeline(*args, **kwargs):\n stdout = kwargs.pop('stdout', None)\n assert not kwargs\n children = []\n for n, command in enumerate(args):\n p = subprocess.Popen(\n command,\n stdin=children[-1].stdout if children else None,\n stdout=stdout if n == len(args) - 1 else subprocess.PIPE)\n children.append(p)\n for child in children:\n child.wait()", "title": "" }, { "docid": "ba425252177c19dc6eba9f3af1c5cc78", "score": "0.5606309", "text": "def stdout(self):\n return self.output", "title": "" }, { "docid": "d1d2a5472841ba0ecd523ab6a4023be5", "score": "0.55938226", "text": "def _redirect_output(self):\n if self._binary_output:\n while True:\n data = self._process.stdout.read(1024)\n\n if not data:\n return\n else:\n self._on_output_callback(data)\n else:\n while True:\n line = self._process.stdout.readline().decode('utf-8',\n errors='replace')\n\n if not line:\n return\n else:\n # Output the line without trailing \\n and whitespace.\n self._on_output_callback(line.rstrip())", "title": "" }, { "docid": "f87b8c5627b00744204c97de0047b0e2", "score": "0.5582964", "text": "def _stdout(self):\n return RedirectStream(self.loop, sys.stdout)", "title": "" }, { "docid": "16aa32aee4d9208c535026cdf39ce352", "score": "0.55755216", "text": "def __readStdout(self):\n self.process.setReadChannel(QProcess.StandardOutput)\n \n while self.process.canReadLine():\n s = str(self.process.readLine(), self.vcs.getEncoding(),\n 'replace').strip()\n self.__processOutputLine(s)", "title": "" }, { "docid": "c8b67b97183ed8e3622978958ee31802", "score": "0.5562943", "text": "def get_stdout(cmdline):\n return subprocess.check_output(cmdline, shell=True)", "title": "" }, { "docid": "9f432ca11c61454f997ca619dd2d50d7", "score": "0.5551666", "text": "def call_output(args):\n return Popen(args, stdout=PIPE).communicate()[0].strip()", "title": "" }, { "docid": "adad9f02e94af131b3b03dcabcafeed6", "score": "0.5527739", "text": "def _run(self):\n if self.stdin is None:\n # Okay, we need to make a pipe for this. We want to set\n # self.stdin to an fdopen of the write side, and set the\n # read side to be closed when we're done.\n read_side, write_side = os.pipe()\n for f in [read_side, write_side]:\n fl = fcntl.fcntl(f, fcntl.F_GETFL)\n fcntl.fcntl(f, fcntl.F_SETFL, fl | fcntl.FD_CLOEXEC)\n self.stdin = os.fdopen(write_side, \"wb\")\n read_from = os.fdopen(read_side, \"rb\")\n self._to_close.append(read_from)\n else:\n read_from = self.stdin\n if self.stdout is None:\n # We make a pipe for this one. We want to set\n # self.stdout to an fdopen of the read side, and set the\n # write side to be closed when we're done.\n read_side, write_side = os.pipe()\n for f in [read_side, write_side]:\n fl = fcntl.fcntl(f, fcntl.F_GETFL)\n fcntl.fcntl(f, fcntl.F_SETFL, fl | fcntl.FD_CLOEXEC)\n self.stdout = os.fdopen(read_side, \"rb\")\n write_to = os.fdopen(write_side, \"wb\")\n self._to_close.append(write_to)\n else:\n write_to = self.stdout\n if self.stderr is None:\n self.stderr = open(\"/dev/null\", \"wb\")\n self._to_close.append(self.stderr)\n \n # Inform the main thread we've started.\n self._started.set()\n mByte = 1024 * 1024\n # We want to set read_from and write_to to be non-blocking\n for f in [read_from, write_to]:\n SetNonBlock(f)\n try:\n def doWrite(buffer):\n \"\"\"\n Write out to self.stdout, making sure to write out the whole\n buffer, and stop when required. In its own nested function\n simply to make the main loop easier to read.\n \"\"\"\n nwritten = 0\n while nwritten < len(buffer):\n _, w, _ = select([], [write_to], [], 0.1)\n if self._stop:\n return\n if w:\n # We use os.write() because it will tell us how many\n # bytes were written, which is important if there's\n # a full pipe.\n try:\n nwritten += os.write(write_to.fileno(), buffer[nwritten:])\n except OSError:\n print(\"Got OSError\", file=sys.stderr)\n if self._stop:\n return\n return\n\n while True:\n r, _, _ = select([read_from], [], [], 0.1)\n if self._stop:\n break\n if r:\n # Great, we have input ready. Or eof.\n b = read_from.read(mByte)\n if b:\n temp_buf = (self._target if callable(self._target) else self._process)(b)\n # Great, we have data we want to write out\n doWrite(temp_buf)\n else:\n # EOF, so let's close the output\n break\n if self._stop:\n break\n self._exception = None\n except BaseException as e:\n # Deliberately catching all exceptions\n self._exception = None if self._stop else e\n self.handler.HelperFinished(self, exc=self._exception)\n # Now close the files in _to_close:\n for f in self._to_close:\n try:\n if type(f) == int:\n os.close(f)\n else:\n f.close()\n except OSError:\n pass\n self.stdin = None\n self.stdout = None\n self.stderr = None\n self._to_close = []\n self._exited.set()", "title": "" }, { "docid": "67f1b1976401a21087b7e8c3a8fe45ba", "score": "0.55173665", "text": "def capture_command(args, shell=False, cwd=None, env=None, stdin=None, cmd_encoding='utf-8'):\n p = subprocess.Popen(\n __convert_args(args, shell, cmd_encoding), shell=shell, cwd=cwd, env=__convert_env(env, cmd_encoding),\n stdin=stdin, stdout=subprocess.PIPE, stderr=subprocess.PIPE)\n stdout_data, stderr_data = p.communicate()\n return p.returncode, stdout_data, stderr_data", "title": "" }, { "docid": "63791160f0d93115e700c8f8441349a8", "score": "0.5507802", "text": "def _command_run(self):\n # `casperjs` used for base data that write temporary file from web page\n # which don't need to echo content, because of too much data\n\n # child = subprocess.Popen(self._command, shell=True, stdout=subprocess.PIPE, stderr=subprocess.PIPE)\n # could use `communicate` function fetch `stdout` and `stderr` data to avoid pipe block\n # could use file descriptor instead of `subprocess.PIPE`\n fd_out, fd_err = open(self._stdout_path, 'w'), open(self._stderr_path, 'w')\n child = subprocess.Popen(self._command, shell=True, stdout=fd_out, stderr=fd_err)\n child.wait() # program will continue until wait for sub process finished\n fd_out.close()\n\n with open(self._stdout_path) as fd_out_read:\n for each_echo in fd_out_read:\n yield each_echo\n fd_err.close()", "title": "" }, { "docid": "49dddc990b92b473c1f32be830b7f877", "score": "0.549012", "text": "def start(self):\n self.capturedtext = \"\"\n # Save a copy of the stream:\n self.streamfd = os.dup(self.origstreamfd)\n # Replace the original stream with our write pipe:\n os.dup2(self.pipe_in, self.origstreamfd)", "title": "" }, { "docid": "7d2701ef4a6d878df125aa96d0c59ab2", "score": "0.54886866", "text": "def capture_process(self, command: str | list[str], shell=False, **kwargs):\n return self.modules.subprocess.Popen(\n self.format_for_subprocess(command, shell=shell),\n shell=shell,\n stdout=self.modules.subprocess.PIPE,\n stderr=self.modules.subprocess.STDOUT,\n **kwargs,\n )", "title": "" }, { "docid": "7fcd41519263b7b24c05b6a39cfdbc48", "score": "0.5484562", "text": "async def read_stdout(self):\n return await self._read(self.stdout)", "title": "" }, { "docid": "49ee0d156af741770c00506b85cfaf23", "score": "0.5462999", "text": "def _call_get_stdout(command, env=None, stderr=None):\n assert isinstance(command, (list, tuple)), (\n \"list or tuple argument expected, got: {}\".format(command))\n\n with TemporaryFile() as fout:\n subprocess.check_call(command, env=env, stdout=fout, stderr=stderr)\n fout.seek(0)\n output = fout.read()\n\n return output", "title": "" }, { "docid": "f54884a098e3a57c218dcc951eeb83fa", "score": "0.5459622", "text": "def run(self):\n #print \"SimThread.run corriendo \", self.getName()\n pipePoll = select.poll()\n pipePoll.register(self.pipe.stdout, select.POLLIN)\n self.pipe.poll()\n while self.pipe.returncode == None:\n if pipePoll.poll(0):\n #if True:\n #print \"SimThread.run leyendo\"\n self.bufferAccess.acquire()\n #print \"SimThread.run permiso concedido\"\n #self.namdOutput += self.pipe.stdout.readlines(1)\n line = self.pipe.stdout.readline()\n if len(line) > 0:\n self.namdOutput.append(line)\n self.ready = True\n #print \"SimThread.run lines received: \", len(self.namdOutput)\n self.bufferAccess.release()\n #print \"SimThread.run self.pipe.returncode = \", self.pipe.returncode\n self.pipe.poll()\n self.resetModifiationTime()\n print(\"SimThread.run termino\")", "title": "" }, { "docid": "07ed361cca6819299fb26d63dd44173d", "score": "0.5428686", "text": "def _parse_stdout(self):\n\n fname = self.node.get_attribute('output_filename')\n\n if fname not in self.retrieved.list_object_names():\n return self.exit_codes.ERROR_OUTPUT_STDOUT_MISSING\n\n try:\n output_string = self.retrieved.get_object_content(fname)\n except IOError:\n return self.exit_codes.ERROR_OUTPUT_STDOUT_READ\n\n result_dict = parse_cp2k_output_bsse(output_string)\n\n # nwarnings is the last thing to be printed in the CP2K output file:\n # if it is not there, CP2K didn't finish properly\n if 'nwarnings' not in result_dict:\n raise OutputParsingError('CP2K did not finish properly.')\n\n self.out('output_parameters', Dict(dict=result_dict))\n return None", "title": "" }, { "docid": "efc35e83d751e34dc485434ba494e1f9", "score": "0.54269785", "text": "def _stdout():\n # type: () -> IO\n if six.PY3:\n return sys.stdout.buffer\n return sys.stdout", "title": "" }, { "docid": "4974039cf93a24709b629fac5d8008b1", "score": "0.5426238", "text": "def capture(command, *args, **kwargs):\n out, sys.stdout = sys.stdout, StringIO()\n command(*args, **kwargs)\n sys.stdout.seek(0)\n yield sys.stdout.read()\n sys.stdout = out", "title": "" }, { "docid": "bbd67722b33573ddee6cd4d7854e94f4", "score": "0.5417501", "text": "def stdout(self):\n return self._stdout", "title": "" }, { "docid": "7e2b5eb9d6ce152020c3165963d40207", "score": "0.54090756", "text": "async def capture_subprocess_output(stream_captured, output_stream):\n while not stream_captured.at_eof():\n line = await stream_captured.readline()\n if not line:\n continue\n\n output_stream.write(line.decode())", "title": "" }, { "docid": "49e065de6b478546e72e28f9cf58e40a", "score": "0.54018885", "text": "def stdout(self):\n return self.capsys.readouterr().out", "title": "" }, { "docid": "fa8ce1d340a659df62fddc4e593fd51c", "score": "0.5392236", "text": "def Stdout(self):\n return self._Request('stdout')", "title": "" }, { "docid": "dd649c9ca6e9e89cf70c93d38a8f1985", "score": "0.5386912", "text": "def _get_new_process_output(self, a_process, a_input=None,\n a_get_stdout=True, a_get_stderr=True):\n (l_stdout, l_stderr) = a_process.communicate(a_input)\n l_data = \"\"\n if a_get_stdout and l_stdout:\n l_data += l_stdout\n if a_get_stderr and l_stderr:\n l_data += l_stderr\n return l_data", "title": "" }, { "docid": "9be97bdd09aad7e7041b196524515d1b", "score": "0.537272", "text": "def popen_communicate(cmd, stdout_fn=None, mode=\"w\", env=None):\n cmd = pre_cmd_set(cmd)\n if stdout_fn is None:\n p = Popen(cmd, stdout=PIPE, stderr=PIPE)\n else:\n with open(stdout_fn, mode) as outfile:\n # output written into file, error will be returned\n p = Popen(cmd, stdout=outfile, stderr=PIPE, env=env, universal_newlines=False)\n output, error = p.communicate()\n p.wait()\n return p, output, error.rstrip()\n\n output, error = p.communicate()\n if output:\n output = output.strip().decode(\"utf-8\")\n\n if error:\n error = error.decode(\"utf-8\").rstrip()\n\n return p, output, error", "title": "" }, { "docid": "fede8256b28dd7b7fd8048eafa817124", "score": "0.5365206", "text": "def get_stdout(self):\n return self.__stdout", "title": "" }, { "docid": "700e9564e2492f16778d0008567426fc", "score": "0.53646696", "text": "def output(self) :\n if self.encoding :\n return self.stdout.decode(self.encoding)\n return self.stdout.decode()", "title": "" }, { "docid": "f7383c84100fe1f2452411cceee363cb", "score": "0.5361379", "text": "def __call__(self, *arguments):\n full_arguments = self._prep_command(*arguments)\n return subprocess.check_output(full_arguments).decode()", "title": "" }, { "docid": "bee8a3f6621941613173808361d4fb1c", "score": "0.5346048", "text": "def _call_cmd_line(self):\n try:\n logging.info(\"Calling Popen with: {}\".format(self.args))\n p = Popen(self.args, stdout=PIPE, stderr=PIPE)\n except OSError:\n raise(RuntimeError(\"No such command found in PATH\"))\n\n self.stdout, self.stderr = p.communicate()\n self.stdout = self.stdout.decode(\"utf-8\")\n self.stderr = self.stderr.decode(\"utf-8\")\n self.returncode = p.returncode", "title": "" }, { "docid": "cb811c775061cba213815d5d8a9898dd", "score": "0.53412265", "text": "def pipe(cmd: List[str], input: bytes) -> bytes:\n result = subprocess.run(args=cmd, input=input, check=True, stdout=subprocess.PIPE)\n output = result.stdout\n return output", "title": "" }, { "docid": "cdf423191bed1d3f79a39bd8689bd6d2", "score": "0.53317684", "text": "def capture_stdout(and_stderr=False):\n ll_stream = io.BytesIO()\n stream = _NonClosingTextIOWrapper(ll_stream, sys.stdout.encoding,\n sys.stdout.errors)\n old_stdout = sys.stdout\n sys.stdout = stream\n\n if and_stderr:\n old_stderr = sys.stderr\n sys.stderr = stream\n\n try:\n yield _CapturedStream(stream)\n finally:\n stream.flush()\n sys.stdout = old_stdout\n if and_stderr:\n sys.stderr = old_stderr", "title": "" }, { "docid": "7f82177b666f443c52b068d55b155f41", "score": "0.532519", "text": "def get_output2():\n p = Popen(command, shell=True, stdout=PIPE, stderr=PIPE)\n p.wait()\n stdout, stderr = p.communicate()\n output = {'returncode': p.returncode,\n 'stdout': to_string(stdout),\n 'stderr': to_string(stderr)}\n return output", "title": "" }, { "docid": "3f9165b32969f2057835a22f902aecaa", "score": "0.53134346", "text": "def _check_output(self, args, shell=True, input=None, *pargs, **kwargs):\n\t\tself.log(\"_check_output:\", args)\n\t\tself.log(\"cwd:\", os.getcwd())\n\t\tself.log(\"-----\")\n\t\tx = subprocess.Popen(\" \".join(args) if shell else args, *pargs, shell=shell,stdin=subprocess.PIPE, stdout=subprocess.PIPE, **kwargs).communicate(input)\n\t\ty = x[0].splitlines()\n\t\tself.log(\"-----\")\n\t\treturn y", "title": "" }, { "docid": "c8f4846cd7d662ca447c9d45d6cdc2a6", "score": "0.53133494", "text": "def testOtherPipedOK(self):\n cmd = 'cat mymail.txt | ./mymail.py'\n result = subprocess.Popen(cmd, stdout=subprocess.PIPE, \n stderr=subprocess.PIPE, shell=True)\n stdout, stderr = result.communicate()\n self.assertEqual(stderr, '')", "title": "" }, { "docid": "28c80f48d478c73151ac96e381670529", "score": "0.5308705", "text": "def wrapper(*args):\n opts = [i for i in args]\n cmd = [] + opts\n process = Popen(cmd, stdout=PIPE)\n process.communicate()[0]\n\n return process", "title": "" }, { "docid": "75db898c5289f14d46aaf2481738ab58", "score": "0.5305821", "text": "def read_command_pipe(self, args, *, and_stderr=False, fatal=True):\n\n cmd = ' '.join(args)\n\n if self.verbose:\n print(cmd, flush=True)\n\n stderr = subprocess.STDOUT if and_stderr else subprocess.PIPE\n\n with subprocess.Popen(cmd, shell=True, stdin=subprocess.PIPE,\n stdout=subprocess.PIPE, stderr=stderr) as pipe:\n for line in pipe.stdout:\n yield str(line, encoding=sys.stdout.encoding)\n\n if pipe.returncode != 0 and fatal:\n raise UserException(\n \"'{0}' failed returning {1}\".format(cmd, pipe.returncode))", "title": "" }, { "docid": "55aad0da3772b345169bd3d4233202c0", "score": "0.5302874", "text": "def check_output(*popenargs, **kwargs):\n if 'stdout' in kwargs:\n raise ValueError('stdout argument not allowed, it will be overridden.')\n process = subprocess.Popen(stdout=subprocess.PIPE, *popenargs, **kwargs)\n output, unused_err = process.communicate()\n retcode = process.poll()\n if retcode:\n cmd = kwargs.get(\"args\")\n if cmd is None:\n cmd = popenargs[0]\n raise subprocess.CalledProcessError(retcode, cmd, output=output)\n return output", "title": "" }, { "docid": "f8cfa0a42243a303fc5e8fb225e8eaf2", "score": "0.52901036", "text": "def run(self, timeout=-1):\n self.start()\n\n out, err = self.proc.communicate(timeout)\n\n return out", "title": "" }, { "docid": "59aca84dade6c694bfd3f9e21f5c74e3", "score": "0.5287275", "text": "def mocked_output(*args, **kwargs):\n if any(\"pytest\" in a for a in args):\n return bytes(\"pytest-parallel\", \"utf-8\")\n else:\n proc = sp.run(*args, **kwargs, stdout=sp.PIPE)\n if proc.returncode:\n raise ValueError(\n \"This command should not have failed. This is testing something else.\"\n )\n return proc.stdout", "title": "" }, { "docid": "ba9d91d56bc72f3e480863b1626d5aea", "score": "0.5285921", "text": "def __repr__(self):\n return \"<Pipeline id=0x%x in=%s out=%s err=%s>\"%(\n id(self),\n repr(self.__stdin),repr(self.__stdout),repr(self.__stderr))", "title": "" }, { "docid": "030ed49f6d23675178f9d691a6ce5896", "score": "0.5281969", "text": "def proc(self) -> subprocess.Popen:\n return self._proc", "title": "" }, { "docid": "bc167483809afe8bd25df2c3c561814f", "score": "0.5267826", "text": "def stdout(self):\n \n return self._std_file", "title": "" }, { "docid": "83afeed684cb1f1e8d78a33835d1a18d", "score": "0.5258308", "text": "def run(self) -> None:\n self._data[\"start_time\"] = time.time()\n proc = subprocess.Popen(\n self._data[\"cmd\"],\n cwd=\"/\",\n stdout=subprocess.PIPE,\n stderr=subprocess.STDOUT,\n close_fds=True,\n )\n (stdout, _stderr) = proc.communicate()\n self._data[\"end_time\"] = time.time()\n self._data[\"exit_status\"] = proc.returncode\n self._data[\"pid\"] = proc.pid\n self._output = stdout\n return None", "title": "" }, { "docid": "d08007ebc62399dfa984d94d53bb1894", "score": "0.5246284", "text": "def stdout(self) :\n # get STDOUT data from the queue and add it to cache\n more_data = b''.join(list(iter(lambda : dequeue_output(self.__queueStdOut), None)))\n self.__stdout = b''.join((self.__stdout, more_data))\n\n # return cached data as is if there is no encoding, else return it decoded.\n return self.__stdout", "title": "" }, { "docid": "a4044c55b4de770717b0403b94a046bf", "score": "0.52446413", "text": "def call_subprocess(cmd, stdin_data=None):\n try:\n sprocess = tornado.process.Subprocess(\n cmd,\n stdin=subprocess.PIPE,\n stdout=STREAM,\n stderr=STREAM\n )\n except OSError as e:\n raise Return((None, e))\n\n if stdin_data:\n sprocess.stdin.write(stdin_data)\n sprocess.stdin.flush()\n sprocess.stdin.close()\n\n result, error = yield [\n Task(sprocess.stdout.read_until_close),\n Task(sprocess.stderr.read_until_close)\n ]\n\n raise Return((result, error))", "title": "" }, { "docid": "fab8792c0e493a2df66cd966b99f7666", "score": "0.5232588", "text": "def get_command_result(self, timeout=10):\n\n self.logger.debug(\"Getting output from proc {}\".format(self._proc.pid))\n\n try:\n outs, errs = self._proc.communicate(timeout=timeout)\n except subprocess.TimeoutExpired:\n self.logger.debug(\"Timeout while waiting. Killing process {}\".format(self._proc.pid))\n self._proc.kill()\n outs, errs = self._proc.communicate()\n\n self._proc = None\n\n return outs", "title": "" }, { "docid": "465e7a6e074282e920568edc0fb5ab05", "score": "0.52308255", "text": "def read_pipe(self, curr_data, pipe, task_name):\n # TODO: Look for another way to check if theres data to receive?\n # Attempt to get more data\n try:\n data = pipe.channel.recv(1)\n except:\n return curr_data\n \n # If new line, store line as a print statement\n if data == b'\\n':\n # Cast to string, format into hostname-taskname: string form\n str_out = curr_data.decode(\"utf-8\")\n # If the program prints FINISHED then the task was successful\n if str_out == \"FINISHED\":\n self.task_successful = True\n if self.print_finish:\n self.print_list.append(\"%s%s: %s\" % (self.ssh_name, task_name, str_out))\n return b''\n\n self.print_list.append(\"%s%s: %s\" % (self.ssh_name, task_name, str_out))\n\n # Returns empty bytes to start a new running output\n return b''\n else:\n # If the last data isn't new line then return what we current have plus the extra data\n return curr_data + data", "title": "" }, { "docid": "0578397831bf3c617f031af330255211", "score": "0.5229602", "text": "def __call__( self, cmd):\n\n self.shell.stdin.write( cmd + self.terminator )\n return CommandOutput( *self.read_output(), cmd=cmd )", "title": "" }, { "docid": "35e2bcf2042bbae62e69c77679ef253e", "score": "0.5209298", "text": "def dispatch(self, cmd):\n result, err = subprocess.Popen(cmd.split(' '),\n stdout=subprocess.PIPE).communicate()\n return result.decode()", "title": "" }, { "docid": "573026cda269114e980ef4d3549598f6", "score": "0.5186658", "text": "def get_stdout(self) -> str:\n return self._stdout.read_text()", "title": "" }, { "docid": "f41b9e6cd925fac4bdea0dc458e002db", "score": "0.51811653", "text": "def process_thread():\n self.process = subprocess.Popen(self.cmd, shell=True, stdout=subprocess.PIPE, stderr=subprocess.PIPE, universal_newlines=True)\n self.stdout, self.stderr = self.process.communicate()", "title": "" }, { "docid": "6168fad9f95608ea8d40c4034f02d90c", "score": "0.5165408", "text": "def _remote_write(self, cmd, *args, **kwargs):\n # First remove stdin=, if it's there.\n kwargs[\"stdin\"] = subprocess.PIPE\n return self._remote_stream(cmd, *args, **kwargs)", "title": "" }, { "docid": "78ec623b29bb5e342982187e8d7a4cf8", "score": "0.51620907", "text": "def __call__(self, exit_on_failure=True):\n self._call_cmd_line()\n if self.success():\n return self.stdout\n else:\n logger.warning(\"Command failed: {}\".format(self.args))\n logger.warning(self.stderr)\n sys.stderr.write(self.stderr)\n\n if exit_on_failure:\n sys.exit(self.returncode)", "title": "" }, { "docid": "58821aebb1819f112b1dc94a97bbead3", "score": "0.51586455", "text": "def read_out(self):\n size = self.console.inWaiting()\n if size:\n data = self.console.read(size)\n return data\n return ''", "title": "" }, { "docid": "52f767296aa50ffe67583e957f9295e8", "score": "0.51584935", "text": "def check_output(*popenargs, **kwargs):\n if 'stdout' in kwargs:\n raise ValueError('stdout argument not allowed, it will be overridden.')\n process = subprocess.Popen(stdout=subprocess.PIPE, *popenargs, **kwargs)\n output, unused_err = process.communicate()\n retcode = process.poll()\n if retcode:\n cmd = kwargs.get(\"args\")\n if cmd is None:\n cmd = popenargs[0]\n raise subprocess.CalledProcessError(retcode, cmd, output=output)\n return output", "title": "" }, { "docid": "e2b7b095059857f9e345e4be7d57e2b7", "score": "0.5156374", "text": "def output(self, *options, **kwargs):\n\n process = self.git_process(*options)\n stream = unicode_filereader(process.stdout)\n\n for line in iter_rstrip(stream):\n logging.debug(\"GIT: Read %r\", line)\n yield line\n\n symbol = process.stderr.read(1)\n if symbol:\n logging.error(\"GIT: %s\", symbol + process.stderr.read())\n raise StdErrorProcessError\n\n stdout, stderr = process.communicate()\n if hasattr(stdout, \"decode\"):\n stdout = stdout.decode(\"utf-8\")\n if hasattr(stderr, \"decode\"):\n stderr = stderr.decode(\"utf-8\")\n\n if process.returncode != os.EX_OK:\n if stderr:\n logging.error(\"GIT: %s\", stderr.strip())\n\n raise ReturnCodeProcessError(process.returncode)\n\n for line in iter_rstrip(stdout.split(\"\\n\")):\n logging.debug(\"GIT: Read %r\", line)\n yield line", "title": "" }, { "docid": "9f2f5fe173ae8521cc8cf3fde53bbb51", "score": "0.51503193", "text": "def pipe_stdout(filepath=None):\n sys.stdout = get_filelike(filepath)\n yield\n sys.stdout = sys.__stdout__", "title": "" }, { "docid": "45ea1913ea9d6395ce59a6785b0c7141", "score": "0.51462847", "text": "def close(self):\n return self.ffmpeg_proc.communicate()", "title": "" }, { "docid": "79c8b7a3a24c92058daef7efa55ccd4d", "score": "0.51388234", "text": "def pipe(*args):\r\n if len(args) < 2:\r\n raise ValueError(\"pipe needs at least 2 processes\")\r\n\r\n # Set stdout=PIPE in every subprocess except the last\r\n for i in args[:-1]:\r\n i[\"stdout\"] = subprocess.PIPE\r\n\r\n # Runs all subprocesses connecting stdins and stdouts to create the\r\n # pipeline. Closes stdouts to avoid deadlocks.\r\n popens = [popen_sp(**args[0])]\r\n for i in range(1, len(args)):\r\n args[i][\"stdin\"] = popens[i - 1].stdout\r\n popens.append(popen_sp(**args[i]))\r\n popens[i - 1].stdout.close()\r\n\r\n # Returns the array of subprocesses just created\r\n return popens", "title": "" }, { "docid": "d6c9f0bd6d8794d7b3a3b6bc1c651d8e", "score": "0.51339155", "text": "def cmd(self, *args):\n\n p = Popen(args, stdout=PIPE)\n return p.communicate()[0]", "title": "" }, { "docid": "a2691f086d13d9e0085a15b64ae4d0f7", "score": "0.5132257", "text": "def _check_output(*popenargs, **kwargs):\n if 'stdout' in kwargs:\n raise ValueError('stdout argument not allowed, it will be overridden.')\n process = subprocess.Popen(stdout=subprocess.PIPE, *popenargs, **kwargs)\n return process.communicate()[0]", "title": "" }, { "docid": "3a224c65c285526947badda97003829e", "score": "0.5125459", "text": "def recv(self):\n if not self.contents:\n raise EOFError(\"Pipe is empty\")\n return_value = self.contents.pop(0)\n if isinstance(return_value, Exception):\n raise return_value\n return return_value", "title": "" }, { "docid": "1683a04e40e88cdb041920ac5133917c", "score": "0.5123639", "text": "def check_output(self, **kwargs):\n env, kwargs = self._prepare_env(kwargs)\n return subprocess.check_output(self.cmd, env=env, **kwargs)", "title": "" }, { "docid": "a8bc0502d8da11352068a1cec48ef942", "score": "0.5121278", "text": "def check_output(*popenargs, **kwargs):\n if 'stdout' in kwargs:\n raise ValueError('stdout argument not allowed, it will be overridden.')\n process = Popen(stdout=PIPE, *popenargs, **kwargs)\n output, unused_err = process.communicate()\n retcode = process.poll()\n if retcode:\n cmd = kwargs.get(\"args\")\n if cmd is None:\n cmd = popenargs[0]\n raise CalledProcessError(retcode, cmd, output=output)\n return output", "title": "" }, { "docid": "8507355a6d0141d0d5b566ac1a51a91f", "score": "0.5115879", "text": "def wait(self):\n # communicate returns a tuple with the file object for the child's\n # output.\n self.output = self.p.communicate()[0]\n _return_code = self.p.returncode\n\n if _return_code < 0:\n self.terminated_by_signal = True\n self.exited = False\n self.signal = -_return_code\n else:\n self.terminated_by_signal = False\n self.exited = True\n self.discard_stdout.close()", "title": "" }, { "docid": "dcfa4484b9744d8ae8aac90a30c676d5", "score": "0.5108902", "text": "def check_output(*popenargs, **kwargs):\n if 'stdout' in kwargs:\n raise ValueError('stdout argument not allowed, it will be '\n 'overridden.')\n process = Popen(stdout=PIPE, *popenargs, **kwargs)\n output, unused_err = process.communicate()\n retcode = process.poll()\n if retcode:\n cmd = kwargs.get(\"args\")\n if cmd is None:\n cmd = popenargs[0]\n raise CalledProcessError(retcode, cmd, output=output)\n return output", "title": "" }, { "docid": "c4c267f7594ba514e10a74fd61b51f21", "score": "0.5102163", "text": "def _get_output(self) -> AbstractOutput:\n pass", "title": "" }, { "docid": "0a4b1546a51d90ad19c7b3f59cd58653", "score": "0.50984037", "text": "def run(self):\n self.process = subprocess.Popen(\n self.args, stdout=subprocess.PIPE, stderr=subprocess.PIPE,\n close_fds=True)\n\n # Each subprocess needs a thread to be watching it and absorbing its\n # output; otherwise it will block when its stdout pipe buffer fills.\n self.start_watching_output(self.process.stdout)\n self.start_watching_output(self.process.stderr)\n self.process.wait()", "title": "" }, { "docid": "6455cfbb5b7b84bfdf87a6256e1c079c", "score": "0.5097896", "text": "def test_output(mock_sub, popen_process, capsys):\n mock_sub.stream_output(\"testing\", popen_process)\n\n assert capsys.readouterr().out == (\"output line 1\\n\" \"\\n\" \"output line 3\\n\")\n mock_sub.cleanup.assert_called_once_with(\"testing\", popen_process)", "title": "" }, { "docid": "070217e0989e460bc9f667660bec6cd5", "score": "0.5088194", "text": "def run_and_capture(self):\n\n subproc = Popen([self._executable, self._command] + self._args,\n stderr=PIPE)\n err = ''\n while subproc.poll() is None:\n line = subproc.stderr.readline().decode('utf-8')\n err += line\n sys.stderr.write(line)\n sys.stderr.flush()\n\n exitcode = subproc.poll()\n # We only want to catch exceptions, not other stderr messages\n # (such as \"task does not exist\", so we look for the 'Traceback'\n # string. This only works for python, so we'll need to revisit\n # this in the future when we support subcommands written in other\n # languages.\n err = ('Traceback' in err and err) or None\n\n return exitcode, err", "title": "" }, { "docid": "dc35279d80a59ec80520ce16f554b78f", "score": "0.508792", "text": "def get_simple_cmd_output(self, cmd, stderr=STDOUT):\n args = shlex.split(cmd)\n return Popen(args, stdout=PIPE, stderr=stderr).communicate()[0]", "title": "" }, { "docid": "a545ca84cca5b637a7a5dd501df69cfe", "score": "0.5087496", "text": "def captured_output(self):\n new_out, new_err = StringIO(), StringIO()\n old_out, old_err = sys.stdout, sys.stderr\n try:\n sys.stdout, sys.stderr = new_out, new_err\n yield sys.stdout, sys.stderr\n finally:\n sys.stdout, sys.stderr = old_out, old_err", "title": "" }, { "docid": "410931144d9c9fbe8e01e603f6b86b80", "score": "0.50699955", "text": "def check_output(*popenargs, **kwargs):\n if 'stdout' in kwargs:\n raise ValueError('stdout argument not allowed, it will be overridden.')\n logging.debug(' '.join(popenargs[0]))\n process = subprocess.Popen(stdout=subprocess.PIPE, *popenargs, **kwargs)\n output, unused_err = process.communicate()\n retcode = process.poll()\n if retcode:\n cmd = kwargs.get(\"args\")\n if cmd is None:\n cmd = popenargs[0]\n raise subprocess.CalledProcessError(retcode, cmd)\n return output", "title": "" }, { "docid": "43a1ba18beaa8d5b689020571c6e7c05", "score": "0.50610965", "text": "def _execute(self, cmd, args=[]):\n proc = subprocess.Popen([self.path, cmd] + args,\n stdout=subprocess.PIPE, stderr=subprocess.PIPE)\n out, _ = proc.communicate()\n if isinstance(out, bytes):\n out = out.decode().strip()\n return out", "title": "" }, { "docid": "b817852478ee99c610a5fd88c37985a3", "score": "0.50576717", "text": "def get_packet_capture_output(network_watcher_name: Optional[pulumi.Input[str]] = None,\n packet_capture_name: Optional[pulumi.Input[str]] = None,\n resource_group_name: Optional[pulumi.Input[str]] = None,\n opts: Optional[pulumi.InvokeOptions] = None) -> pulumi.Output[GetPacketCaptureResult]:\n ...", "title": "" }, { "docid": "bf5d7c078ec2592f2367c39a3c0917e4", "score": "0.50491697", "text": "def __readStdout(self):\n self.__process.setReadChannel(QProcess.StandardOutput)\n \n while self.__process.canReadLine():\n s = str(self.__process.readLine(),\n Preferences.getSystem(\"IOEncoding\"),\n 'replace')\n self.__logOutput(s)", "title": "" }, { "docid": "7702d17daef9a06fb970f9d1cd16695d", "score": "0.50470203", "text": "def get_stdout(self):\n if not self.finished():\n self._sync_stdx()\n return strip_endlines(self.stdout)", "title": "" }, { "docid": "1799ac5b7d60bf869257db4066be9f00", "score": "0.504645", "text": "def _retrieveProcess(self) -> subprocess.Popen:\n # if there is a tshark process running...\n if self.__tshark is not None and self.__tshark.poll() is None \\\n and (self.__tempfile is None or self.__tempreader is None\n or self.__tempfile.closed or self.__tempreader.closed):\n # ... there must also be a open self.__tempfile and self.__tempreader\n self.terminate(2)\n # print(\"Terminated tshark\", self.__tshark.poll())\n\n if self.__tshark is None or self.__tshark.poll() is not None:\n self.__version = TsharkConnector.checkTsharkCompatibility()[0]\n\n header = struct.pack(\"IHHIIII\", 0xa1b2c3d4, 2, 4, 0, 0, 0x7fff, self.__linktype)\n\n # create tempfile\n # print(\"create tempfile\")\n self.__tempfile = NamedTemporaryFile()\n self.__tempreader = open(self.__tempfile.name, \"rb\")\n self.__tshark = subprocess.Popen(TsharkConnector.__tsharkline,\n stdout=self.__tempfile, stdin=subprocess.PIPE)\n self.__tshark.stdin.write(header)\n time.sleep(.3)\n\n assert self.__tshark is not None and self.__tshark.poll() is None \\\n and self.__tempfile is not None and self.__tempreader is not None \\\n and not self.__tempfile.closed and not self.__tempreader.closed\n\n return self.__tshark", "title": "" }, { "docid": "02950dd2f73aa7ff512fc434627b25d9", "score": "0.5044877", "text": "def test_output_deep_debug(mock_sub, popen_process, capsys):\n mock_sub.command.logger = Log(verbosity=3)\n\n mock_sub.stream_output(\"testing\", popen_process)\n\n # fmt: off\n expected_output = (\n \"output line 1\\n\"\n \"\\n\"\n \"output line 3\\n\"\n \">>> Return code: -3\\n\"\n )\n # fmt: on\n assert capsys.readouterr().out == expected_output\n\n mock_sub.cleanup.assert_called_once_with(\"testing\", popen_process)", "title": "" }, { "docid": "e5660f46437a7a283733ffee9ffc4ed0", "score": "0.5041759", "text": "def get_pipeline_output(pipeline_arn: Optional[pulumi.Input[str]] = None,\n opts: Optional[pulumi.InvokeOptions] = None) -> pulumi.Output[GetPipelineResult]:\n ...", "title": "" }, { "docid": "679063eab5003bdae35583e4929f0639", "score": "0.5041671", "text": "def get_download_pipeline(in_fd, out_fd, gpg=False):\r\n commands = []\r\n if gpg:\r\n commands.append(GPGDecryptionFilter())\r\n commands.append(LZODecompressionFilter())\r\n\r\n return Pipeline(commands, in_fd, out_fd)", "title": "" }, { "docid": "2dfe62d09e9dd9f994edd83a9132a2ab", "score": "0.503829", "text": "def run_capture(cmd_string, timeout=None):\n try:\n result = subprocess.run(\n shlex.split(cmd_string),\n stdout=subprocess.PIPE,\n stderr=subprocess.PIPE,\n check=True,\n universal_newlines=True,\n timeout=timeout\n )\n except subprocess.CalledProcessError as cpe:\n # Here we have some kind of SystemExit that was triggered.\n result = cpe\n except subprocess.TimeoutExpired as exc:\n result = exc\n\n # Convert bytes to utf-8\n if isinstance(result.stderr, bytes):\n result.stderr = result.stderr.decode()\n if isinstance(result.stdout, bytes):\n result.stdout = result.stdout.decode()\n return result # see https://docs.python.org/3/library/subprocess.html#subprocess.CompletedProcess", "title": "" }, { "docid": "f1d8abbc182e5358250415ce6985c95c", "score": "0.50367516", "text": "def execute_get_output(*command):\n devnull = open(os.devnull, 'w')\n command = map(str, command)\n proc = subprocess.Popen(command, close_fds=True, stdout=subprocess.PIPE, stderr=devnull)\n devnull.close()\n stdout = proc.communicate()[0]\n return stdout.strip()", "title": "" }, { "docid": "10d461d50ddebf344e4af7ade0ae1cd0", "score": "0.5035831", "text": "async def read_binary(self) -> bytes:\n stdout, stderr = self._cli([\"-cat\", self.raw])\n return stdout", "title": "" } ]
02034d9965d781f30c6f7918368b3e67
Creates tokens from raw text
[ { "docid": "47d8828dde4173cdc244ec460e9816af", "score": "0.59687525", "text": "def tokenize_text(self,text):\n tokenizer = RegexpTokenizer(r'\\w+')\n \n tokens = tokenizer.tokenize(text)\n extra_stops = [str(i) for i in range(2100)]\n extra_stops +=['zero','one','two','three','four','five',\n 'six','seven','eight','nine','ten']\n stops = set(stopwords.words(\"english\")+extra_stops)\n clean_tokens = [token.lower() for token in tokens if token.lower() not in stops]\n \n stemmer = PorterStemmer()\n stemmed_tokens = [stemmer.stem(word) for word in clean_tokens]\n \n lemmatizer = WordNetLemmatizer()\n lemmatized_tokens = [lemmatizer.lemmatize(word) for word in stemmed_tokens]\n return lemmatized_tokens", "title": "" } ]
[ { "docid": "542013bc74bde82830f0473726cfb956", "score": "0.7338718", "text": "def tokenize(raw_text):\n\n return raw_text.split()", "title": "" }, { "docid": "2f9ff835a968bb12af51b8b89ca14dab", "score": "0.71471465", "text": "def tokenize(self, text):\n ...", "title": "" }, { "docid": "cd85f7c04b2193e4ba8257ec5f685cd1", "score": "0.70003814", "text": "def createTokensFromText(infile):\n print \"Reading file...\"\n raw_text = infile.read()\n print \"Cleaning & tokenizing text...\"\n tokens = [word.lower() for word in raw_text.split() if\n re.match(ur\"^[^\\W\\d_]+$\", word, re.UNICODE)]\n print \"Text tokenized...\"\n return tokens", "title": "" }, { "docid": "2127b89fea1cec6aee240618e8b3e3e5", "score": "0.69858634", "text": "def createTokens(self, s):\n s = s.strip()\n a = s.split()\n self.tokenlist = a", "title": "" }, { "docid": "ef11596410d65808f6acfe18cdd45af4", "score": "0.6862427", "text": "def tokenize(text):\n \n return wakachi.parse(text)", "title": "" }, { "docid": "2bf380dea9bd51e851c65c211e3d11d5", "score": "0.6801968", "text": "def tokenize(self, text):\n raise NotImplementedError", "title": "" }, { "docid": "f575eb987616bd28eeb17fa4ce2ce41b", "score": "0.677063", "text": "def feed(self, txt):\n self.tokens = self.tokenize(txt)", "title": "" }, { "docid": "4596e13dd247e805c6e54feeba9cb307", "score": "0.67608064", "text": "def tokenize(self, text):\n text = self.preprocessor(text) if self.preprocessor else text\n output_tokens = self.tokenizer.EncodeAsPieces(text)\n return output_tokens", "title": "" }, { "docid": "4abdf32ef2f4f82c393794129e0ca922", "score": "0.6736158", "text": "def tokenize(self, text):\n return self._read(text, self.tokenizer)", "title": "" }, { "docid": "571ec120d5b9e7847ab84dc1ba43b160", "score": "0.6724426", "text": "def tokenize(self, text):\n raise NotImplementedError()", "title": "" }, { "docid": "0792028de68966735bc189836973e03a", "score": "0.6661811", "text": "def get_tokens(text):\r\n tokens = tokenizer.tokenize(text)\r\n tokens = [token.strip() for token in tokens]\r\n return tokens", "title": "" }, { "docid": "7dd5ee3f083aa46ee699ded1daa6949d", "score": "0.6616062", "text": "def whitespace_tokenize(text):\n text = text.strip()\n raw_tokens = text.split()\n # tokens = [token.strip() for token in raw_tokens]\n return raw_tokens", "title": "" }, { "docid": "345e8e1332c44f8e0fce5a237c600419", "score": "0.66009605", "text": "def scan(text) :\n separators = ('\"\"\"{', '}\"\"\"', '\"\"\"', \\\n \"==\", \"!=\", \"<=\", \">=\", \\\n \"=\", \"<\", \">\", \"#\", \"{\", \"}\", \"(\", \")\", \\\n \",\", \".\", \":\", '\"', \"'\", \"+\", \"-\", \"*\", \"/\", \"%\", \"[\", \"]\")\n whitespace =(\" \", \"\\t\", \"\\n\", \"\\r\")\n tokens = ()\n rest = text\n word = \"\"\n while rest != \"\" :\n symbol = \"\"\n for item in whitespace + separators : \n if rest.find(item) == 0 :\n symbol = item\n break\n if symbol != \"\" : # found a separator?\n if word != \"\" :\n tokens = tokens + (word,)\n word = \"\"\n if symbol == \"#\" : # comment starting ?\n break # then, no more tokens to build; quit loop\n if not(symbol in whitespace) :\n tokens = tokens + (symbol,)\n rest = rest[len(symbol):]\n else :\n word = word + rest[0]\n rest = rest[1:]\n\n if word != \"\" :\n tokens = tokens + (word,)\n\n return tokens", "title": "" }, { "docid": "533aeed904dbfed2c531d867dc9187eb", "score": "0.6585229", "text": "def _tokenize_text(self, text):\n tokens = []\n whitespace_normalized_text = \"\"\n for whitespace_token in self._whitespace_tokenizer.tokenize(text):\n for punctuation_token in self._punctuation_tokenizer.tokenize(\n whitespace_token):\n start = len(whitespace_normalized_text)\n end = start + len(punctuation_token)\n for wordpiece in self._full_tokenizer.tokenize(punctuation_token):\n token_id = self._full_tokenizer.vocab[wordpiece]\n tokens.append(Wordpiece(start=start, end=end, token_id=token_id))\n whitespace_normalized_text += punctuation_token\n whitespace_normalized_text += \" \"\n return tokens, whitespace_normalized_text", "title": "" }, { "docid": "501ab0187c779bf6d566a9f47a05358d", "score": "0.6575263", "text": "def tokenize(raw):\n tokenized = []\n temp_string = \"\"\n raw = normalise(raw)\n for cc in raw:\n c = cc\n if c == \" \":\n if temp_string != \"\":\n tokenized.append(temp_string)\n temp_string = \"\"\n elif c in CHARACTERS_TO_SPLIT:\n if temp_string != \"\":\n tokenized.append(temp_string)\n tokenized.append(c)\n temp_string = \"\"\n else:\n temp_string += c\n if temp_string != \"\":\n tokenized.append(temp_string)\n return tokenized", "title": "" }, { "docid": "a65d02bc53fba3fa13bbcf5db3017124", "score": "0.6562381", "text": "def tokenize(text):\n global TOK\n tokens = TOK.tokenize(text)\n output = {\n 'words': replace_digits(tokens.words()),\n 'offsets': tokens.offsets(),\n 'pos': tokens.pos(),\n 'lemma': tokens.lemmas(),\n 'ner': tokens.entities(),\n }\n return output", "title": "" }, { "docid": "f2190fb14e6c47207ec09ee932b24824", "score": "0.6540094", "text": "def create_tokens(self):\n return nltk.pos_tag(nltk.word_tokenize(self.clean_text()))", "title": "" }, { "docid": "29091c1b29f756d903855b32158eb0f3", "score": "0.6496633", "text": "def tokenize(self, s):\n ...", "title": "" }, { "docid": "f62a5638780475d75e3333dddee02104", "score": "0.64704436", "text": "def tokenize(text):\n return [Token(word, index) for index, word in enumerate(text.split())]", "title": "" }, { "docid": "f658c2e3f01a47b083f5d78b139f29aa", "score": "0.6452084", "text": "def _tokenize(self, text, pattern):\n\t\tpass", "title": "" }, { "docid": "6bbbe4f4f3a9da5a3b2dc29221f869f9", "score": "0.63681716", "text": "def tokenize(text):\n return text.split(\" \")", "title": "" }, { "docid": "2b9cd358f0c3baaabd1f4ba5e326083d", "score": "0.63515705", "text": "def load_tokens(self):", "title": "" }, { "docid": "b1d70d7b4db3127e75589352197ec0c2", "score": "0.63133764", "text": "def tokenize(self, text) -> List[str]:\n pass", "title": "" }, { "docid": "d80ec0b8fa187baa82a91711821f8380", "score": "0.62991285", "text": "def tokenize(text):\n return tokenizer.convert_tokens_to_ids(tokenizer.tokenize(text))", "title": "" }, { "docid": "5f0a0bb6ac4a178f6cc22b34ec145cf8", "score": "0.6276372", "text": "def textToTokens(self, tweet_text):\n tokens = [token.strip() for token in tweet_text.split()]\n return tokens", "title": "" }, { "docid": "e91b514649f0d6b11f01ce62c8905b5b", "score": "0.62631226", "text": "def tokenize(self, text: str):\n return self.moses_tokenizer.tokenize(text, escape=False, return_str=True)", "title": "" }, { "docid": "e26ef36f935bc51788239deded0d53b6", "score": "0.62529165", "text": "def get_tokenized_data(self, song_text: str) -> List:\n tokens = [match.group(0) for match in\n re.finditer(r\"\\w+|([^\\w])\\1*\", song_text)] # keeps punctuation and \\n chars\n\n # tokens = word_tokenize(song_text)\n\n # tokens = song_text.split(' ')\n # tokens = get_tokens(text=song_text, chars=[' ', ',', \"'\"])\n return tokens", "title": "" }, { "docid": "129a890d53925a0c936570abe07e4117", "score": "0.6252028", "text": "def naive(self, text):\n\n\t\ttokenizedText = None\n\n\t\t#Fill in code here\n\t\ttokenizedText = []\n\n\t\tfor string in text:\n\t\t\tsplit_string = string.split()\n\t\t\ttokenizedText.append(split_string)\n\n\t\treturn tokenizedText", "title": "" }, { "docid": "02678aaa2d1b73679a0e7a6868e39ef3", "score": "0.6248274", "text": "def tokenizer_wrapper(txt):\n max_len = config[\"max_len\"]\n txt_re = re.sub(r\"[^ㄱ-힣0-9]\",\" \",txt)\n tokens=tokenizer(txt_re)\n tokens = [token for token,pos in tokens \\\n if pos not in config[\"stop_pos\"] and token not in config[\"stop_words\"]]\n tokens = list(map(lambda x:vocab.get(x,vocab[\"<unk>\"]),tokens)) \n return tokens[:max_len]+[vocab[\"<pad>\"]]*(max_len-len(tokens))", "title": "" }, { "docid": "eb09b70a52d44ef70861d222fff70712", "score": "0.62473667", "text": "def tokenize(self, text: str, **kwargs) -> List[int]:\n text = ' '.join(text.split(' '))\n text = text.replace(' ', self.word_delimiter_token)\n tokens = [self.bos_token_id]\n \n for char in text:\n tokens.append(self._convert_token_to_id(char))\n\n tokens.append(self.eos_token_id)\n return tokens", "title": "" }, { "docid": "4b048e55225245bdf2c6a82ff90b0c25", "score": "0.62452865", "text": "def get_token_list(text):\n return text.split()", "title": "" }, { "docid": "6a6b6356fa97e9e5935a0c447ad9ecc9", "score": "0.6220432", "text": "def tokenize(text):\n return gensim.utils.simple_preprocess(p.clean(text))", "title": "" }, { "docid": "50276601c17bf5b8e96684c0b9d98681", "score": "0.6199782", "text": "def tokenize(text):\n return [word for word in token.findall(text)]", "title": "" }, { "docid": "095fd800c1665d2820d4ba27be50c116", "score": "0.61935866", "text": "def tokenizeText(self, text: str) -> slist:\n sentences = self.__tokenizeToSentences(text)\n tokens = slist()\n for sent in sentences:\n sent = creReplaceNLs.sub(r' ', sent)\n tokens.extend(self._tb_tokenizer(\n sent)) # Tokenize sentences using TreeBank tokenizer initialized upper in the __init__ function\n return tokens", "title": "" }, { "docid": "00b582a507c929632cffa284a1c735b3", "score": "0.61870426", "text": "def tokenize(self, token):\n ...", "title": "" }, { "docid": "f0c05bdfe0b02a69483e47855f04853e", "score": "0.61861056", "text": "def tokenize_string(text):\n return tokenize_stream(StringIO(text))", "title": "" }, { "docid": "2dc5d547203995dc7375cfcccee6c058", "score": "0.61788166", "text": "def _tokenize(self, text):\n split_tokens = []\n for token in self.basic_tokenizer.tokenize(text):\n for sub_token in self.wordpiece_tokenizer.tokenize(token):\n split_tokens.append(sub_token)\n return split_tokens", "title": "" }, { "docid": "5f7bbc7ec0efe110684dc4e542698cf7", "score": "0.61623865", "text": "def tokenize_code(text):\n return tokenize.RegexpTokenizer(r'\\w+').tokenize(text)", "title": "" }, { "docid": "0079be8539f4334c6168c49574712dde", "score": "0.6150385", "text": "def untokenize(self, text):\n raise NotImplementedError", "title": "" }, { "docid": "60843c4f914b4c34a5f58147eafbe470", "score": "0.6149551", "text": "def tokenize(self, text):\n\n text = convert_to_unicode(text)\n\n output_tokens = []\n for token in whitespace_tokenize(text):\n chars = list(token)\n if len(chars) > self.max_input_chars_per_word:\n output_tokens.append(self.unk_token)\n continue\n\n is_bad = False\n start = 0\n sub_tokens = []\n while start < len(chars):\n end = len(chars)\n cur_substr = None\n while start < end:\n substr = \"\".join(chars[start:end])\n if start > 0:\n substr = \"##\" + six.ensure_str(substr)\n if substr in self.vocab:\n cur_substr = substr\n break\n end -= 1\n if cur_substr is None:\n is_bad = True\n break\n sub_tokens.append(cur_substr)\n start = end\n\n if is_bad:\n output_tokens.append(self.unk_token)\n else:\n output_tokens.extend(sub_tokens)\n return output_tokens", "title": "" }, { "docid": "60843c4f914b4c34a5f58147eafbe470", "score": "0.6149551", "text": "def tokenize(self, text):\n\n text = convert_to_unicode(text)\n\n output_tokens = []\n for token in whitespace_tokenize(text):\n chars = list(token)\n if len(chars) > self.max_input_chars_per_word:\n output_tokens.append(self.unk_token)\n continue\n\n is_bad = False\n start = 0\n sub_tokens = []\n while start < len(chars):\n end = len(chars)\n cur_substr = None\n while start < end:\n substr = \"\".join(chars[start:end])\n if start > 0:\n substr = \"##\" + six.ensure_str(substr)\n if substr in self.vocab:\n cur_substr = substr\n break\n end -= 1\n if cur_substr is None:\n is_bad = True\n break\n sub_tokens.append(cur_substr)\n start = end\n\n if is_bad:\n output_tokens.append(self.unk_token)\n else:\n output_tokens.extend(sub_tokens)\n return output_tokens", "title": "" }, { "docid": "57b01f0570a4930b771d6172a622e2dd", "score": "0.6148498", "text": "def get_token_list(text):\n return list(text) # text.split()", "title": "" }, { "docid": "3816fcf3968e60820b610b5d826a0255", "score": "0.6142135", "text": "def tokenize_text(text):\n\n return tkn.word_tokenize(text)", "title": "" }, { "docid": "d2339fdc380562c8b0dd7dd47dea646c", "score": "0.6138596", "text": "def tokenize(self, text):\n\n output_tokens = []\n for token in whitespace_tokenize(text):\n chars = list(token)\n if len(chars) > self.max_input_chars_per_word:\n output_tokens.append(self.unk_token)\n continue\n\n is_bad = False\n start = 0\n sub_tokens = []\n while start < len(chars):\n end = len(chars)\n cur_substr = None\n while start < end:\n substr = \"\".join(chars[start:end])\n if start > 0:\n substr = \"##\" + substr\n if substr in self.vocab:\n cur_substr = substr\n break\n end -= 1\n if cur_substr is None:\n is_bad = True\n break\n sub_tokens.append(cur_substr)\n start = end\n\n if is_bad:\n output_tokens.append(self.unk_token)\n else:\n output_tokens.extend(sub_tokens)\n return output_tokens", "title": "" }, { "docid": "0aab3259f14ec7f23cb62757a1d64e4d", "score": "0.6130496", "text": "def tokenize(self):\n pass", "title": "" }, { "docid": "d82a3d9e4a48ccfc0a380b1f198de5db", "score": "0.61289406", "text": "def tokenize_text(self, data):\n data = data.strip()\n tokens = []\n token = u''\n for i in range(len(data)):\n char = data[i]\n # Add to current token if not a non-word character\n if char.isalnum() or char in self.quotes:\n # Special conditions to handle apostrophe in string to make sure it's a contraction or possessive\n # E.g. bird's [eye chilies], brewer's [yeast], [Moscato] d'Asti (including right single quote char)\n if char in self.quotes:\n # Checks to see if it's the beginning of a word\n # E.g. ('What) in ('What a knob-end')\n if len(token) == 0:\n continue\n # Checks for trailing apostrophes and adds token if the constructed token is non-zero length\n # E.g. (end') in ('What a knob-end')\n elif i + 1 >= len(data) or not data[i + 1].isalnum():\n if len(token) > 0:\n tokens.append(token)\n token = u''\n continue\n # If it's managed to get this far, it's passed all the tests and we'll want to store just the\n # apostrophe/single quote, and not the right single quote character for the sake of stemming consistency\n token += u'\\''\n # Converts special unicode chars to ASCII chars for lookup consistency\n elif char in self.char_dict:\n token += self.char_dict[char]\n # Normal case (any time character is not an apostrophe, right single quote, or a non-ASCII character)\n else:\n token += char\n # If not, check to see that we have a token so we don't push empty tokens to the tokens list\n elif len(token) > 0:\n # Edge case for stuff like '2%' in '2% skim milk'\n if char == u'%':\n token += u'%'\n tokens.append(token)\n token = u''\n # Edge case - adds the last word in the doc\n if len(token) > 0:\n tokens.append(token)\n return tokens", "title": "" }, { "docid": "03351893933ad8d1f78b6f6ffb5469bd", "score": "0.6114814", "text": "def tokenize(self, text):\n return self.nl.tokenize(text).split()", "title": "" }, { "docid": "73df6a361fc42ca8f13c0eeefea299ad", "score": "0.61056155", "text": "def create_token(self):", "title": "" }, { "docid": "a2a389bf476cc852a263616218419f89", "score": "0.60990185", "text": "def _tokenize(text):\n\n tokens = []\n\n tag_soup = re.compile(r'([^<]*)(<!--.*?--\\s*>|<[^>]*>)', re.S)\n\n token_match = tag_soup.search(text)\n\n previous_end = 0\n while token_match:\n if token_match.group(1):\n tokens.append(['text', token_match.group(1)])\n\n # if -- in text part of comment, then it's not a comment, therefore it\n # should be converted.\n #\n # In HTML4 [1]:\n # [...] Authors should avoid putting two or more adjacent hyphens\n # inside comments.\n #\n # In HTML5 [2]:\n # [...] the comment may have text, with the additional restriction\n # that the text must not [...], nor contain two consecutive U+002D\n # HYPHEN-MINUS characters (--)\n #\n # [1]: http://www.w3.org/TR/REC-html40/intro/sgmltut.html#h-3.2.4\n # [2]: http://www.w3.org/TR/html5/syntax.html#comments\n tag = token_match.group(2)\n type_ = 'tag'\n if tag.startswith('<!--'):\n # remove --[white space]> from the end of tag\n if '--' in tag[4:].rstrip('>').rstrip().rstrip('-'):\n type_ = 'text'\n tokens.append([type_, tag])\n\n previous_end = token_match.end()\n token_match = tag_soup.search(text, token_match.end())\n\n if previous_end < len(text):\n tokens.append(['text', text[previous_end:]])\n\n return tokens", "title": "" }, { "docid": "5ff39e8f577b735bef74858a183d65a3", "score": "0.6083375", "text": "def tokenize(text):\n # De facto działanie jak poniżej:\n tokens = nltk.word_tokenize(text)\n\n # tokenizer = nltk.data.load('nltk:tokenizers/punkt/polish.pickle')\n # sentences = tokenizer.tokenize(text)\n # tokens = []\n # for sentence in sentences:\n # tokens.extend(TreebankWordTokenizer().tokenize(sentence))\n return tokens", "title": "" }, { "docid": "0295432126eed01902b94063e05d78c9", "score": "0.60826904", "text": "def tokenize(self, text):\n text = self._clean_text(text)\n # This was added on November 1st, 2018 for the multilingual and Chinese\n # models. This is also applied to the English models now, but it doesn't\n # matter since the English models were not trained on any Chinese data\n # and generally don't have any Chinese data in them (there are Chinese\n # characters in the vocabulary because Wikipedia does have some Chinese\n # words in the English Wikipedia.).\n text = self._tokenize_chinese_chars(text)\n orig_tokens = whitespace_tokenize(text)\n split_tokens = []\n for token in orig_tokens:\n if self.do_lower_case:\n token = token.lower()\n token = self._run_strip_accents(token)\n split_tokens.extend(self._run_split_on_punc(token))\n\n output_tokens = whitespace_tokenize(\" \".join(split_tokens))\n return output_tokens", "title": "" }, { "docid": "05b332f89a53615bfe1fa7ebc1e4845e", "score": "0.6082514", "text": "def tokenizer(text):\r\n\ttext = re.sub('<[^>]*>', '', text)\r\n\temoticons = re.findall('(?::|;|=)(?:-)?(?:\\)|\\(|D|P)',\r\n\t text.lower())\r\n\ttext = re.sub('[\\W]+', ' ', text.lower()) \\\r\n\t + ' '.join(emoticons).replace('-', '')\r\n\ttokenized = [w for w in text.split() if w not in stop]\r\n\treturn tokenized", "title": "" }, { "docid": "e12822912d3ed896c1a867f26da52af7", "score": "0.6067263", "text": "def tokenize(self, text):\n return self.nl.tokenize_list(text)", "title": "" }, { "docid": "4258e699d6e391ff81300850e527acbc", "score": "0.6054093", "text": "def tokenize(self, text):\n text = convert_to_unicode(text)\n text = self._clean_text(text)\n\n # This was added on November 1st, 2018 for the multilingual and Chinese\n # models. This is also applied to the English models now, but it doesn't\n # matter since the English models were not trained on any Chinese data\n # and generally don't have any Chinese data in them (there are Chinese\n # characters in the vocabulary because Wikipedia does have some Chinese\n # words in the English Wikipedia.).\n text = self._tokenize_chinese_chars(text)\n\n orig_tokens = whitespace_tokenize(text)\n split_tokens = []\n for token in orig_tokens:\n if self.do_lower_case:\n token = token.lower()\n token = self._run_strip_accents(token)\n split_tokens.extend(self._run_split_on_punc(token))\n\n output_tokens = whitespace_tokenize(\" \".join(split_tokens))\n return output_tokens", "title": "" }, { "docid": "be417b1dfec467d47b7eb5cdc313df4c", "score": "0.60520554", "text": "def tokenize(self, text):\n tokens = []\n token = \"\"\n for c in text:\n if (re.match(\"[a-zA-Z0-9]\", str(c)) is not None or\n c == \"\\\"\" or c == \"_\" or c == \"-\"):\n token += c\n else:\n if token != \"\":\n tokens.append(token)\n token = \"\"\n if c.strip() != \"\":\n tokens.append(str(c.strip()))\n\n if token != \"\":\n tokens.append(token)\n\n return tokens", "title": "" }, { "docid": "3c34d6fcaf19e9eb27fbbff6b139e3c1", "score": "0.6045957", "text": "def tokenize_text(text):\n # Uses the nltk TweetTokenizer\n tokenizer = TweetTokenizer()\n tokens = tokenizer.tokenize(text)\n return tokens", "title": "" }, { "docid": "90da799fcd99a5c0aa9bd325e9686b84", "score": "0.60433084", "text": "def _tokenize(text: bytes) -> np.ndarray:\n\n words = tokenizer(text.decode('utf-8'))\n if lowercase:\n words = [w.lower() for w in words]\n return np.asarray([\n self._word_to_idx.get(w, self._unknown_token)\n for w in words\n ])", "title": "" }, { "docid": "4474a5e82805c62e32922113dc94f65b", "score": "0.6040958", "text": "def tokenize(self, text: str) -> List[Token]:\n return self._tokenize(text)", "title": "" }, { "docid": "16f7d72f812db67f3903573e8b909c10", "score": "0.6034761", "text": "def tokenize_text(text):\n\t\n\ttokens = []\t\n\n\t# split by paragraphs (>2 newline separators)\n\tparas = text.split('\\n¶\\n')\n\n\t# scan each paragraph\n\tfor p in paras:\n\t\t\n\t\tptokens = tokenize_paragraph(p)\n\t\tif ptokens:\t# if paragraph not empty\n\t\t\tif tokens:\t# if previous content exists\n\t\t\t\ttokens.append('¶')\n\t\t\t\n\t\t\ttokens += ptokens\t# add content of this paragraph \n\n\treturn tokens", "title": "" }, { "docid": "1503f9ace4d6389ea1832a4cdb7fd724", "score": "0.60325545", "text": "def tokenize(text):\n\n words = [word.surface for word in tagger(text)]\n return words", "title": "" }, { "docid": "e09efc3cb9ee58811ccb0b73af13ae3f", "score": "0.60107136", "text": "def tokenize(text):\n current_token = next_token(text)\n while current_token is not None:\n yield current_token\n current_token = next_token(text)", "title": "" }, { "docid": "f129ffac782e30983f48540d0da4e5dc", "score": "0.59925914", "text": "def preprocess_sent(text, lang):\n normalizer = normalizer_factory.get_normalizer(lang)\n\n return indic_tokenize.trivial_tokenize(normalizer.normalize(\n text.replace('\\n', ' ')), lang)", "title": "" }, { "docid": "67b88974a835214c2dd17f5834c20ab2", "score": "0.5990227", "text": "def text_to_tokens(self, text, reverse=False, padding=False):\n\n # Convert to tokens. Note that we assume there is only\n # a single text-string so we wrap it in a list.\n tokens = self.texts_to_sequences([text])\n tokens = np.array(tokens)\n\n if reverse:\n # Reverse the tokens.\n tokens = np.flip(tokens, axis=1)\n\n # Sequences that are too long should now be truncated\n # at the beginning, which corresponds to the end of\n # the original sequences.\n truncating = 'pre'\n else:\n # Sequences that are too long should be truncated\n # at the end.\n truncating = 'post'\n\n if padding:\n # Pad and truncate sequences to the given length.\n tokens = pad_sequences(tokens,\n maxlen=self.max_tokens,\n padding='pre',\n truncating=truncating)\n\n return tokens", "title": "" }, { "docid": "89f2b490c8d7656c5350560539a2e168", "score": "0.59842914", "text": "def to_tokens(text, tokenizer):\n def is_whitespace(c):\n if c == \" \" or c == \"\\t\" or c == \"\\r\" or c == \"\\n\" or ord(c) == 0x202F:\n return True\n return False\n doc_tokens = []\n char_to_word_offset = []\n prev_is_whitespace = True\n for c in text:\n if is_whitespace(c):\n prev_is_whitespace = True\n else:\n if prev_is_whitespace:\n doc_tokens.append(c)\n else:\n doc_tokens[-1] += c\n prev_is_whitespace = False\n char_to_word_offset.append(len(doc_tokens) - 1)\n orig_to_tok_index = []\n all_doc_tokens = []\n for token in doc_tokens:\n orig_to_tok_index.append(len(all_doc_tokens))\n sub_tokens = tokenizer.tokenize(token)\n for sub_token in sub_tokens:\n all_doc_tokens.append(sub_token)\n char_to_final_offset = []\n for c in char_to_word_offset:\n char_to_final_offset.append(orig_to_tok_index[c])\n return all_doc_tokens, char_to_final_offset", "title": "" }, { "docid": "6588c60a7f5fcdd711938bb914d9c9e3", "score": "0.5981468", "text": "def tokenize(self, text):\n text = unicodedata.normalize(\"NFC\", text) # normalize\n tokens = re.split(r\"([^-\\u0400-\\u04FF\\u0300-\\u036F]+)\", text)\n tokens = self._split_hyphenated(tokens)\n return triples(tokens, normalize=self._normalize)", "title": "" }, { "docid": "b02aec3a86e1ef4fa9b8b35bbaea410e", "score": "0.5979199", "text": "def tokenize(text):\n if isinstance(text, str):\n print(\"[-] error in method 'tokenize(text): text must be type 'str'\")\n exit()\n \n tokens = word_tokenize(text)\n tokens = [token[0] + token[1:].lower() for token in tokens]\n return tokens", "title": "" }, { "docid": "d57585ba358019eee67aa2be6019bedb", "score": "0.595268", "text": "def _parse_and_tokenize(self, *args, padding=True, add_special_tokens=True, **kwargs):\n # Parse arguments\n inputs = self._args_parser(*args, **kwargs)\n inputs = self.tokenizer(\n inputs,\n add_special_tokens=add_special_tokens,\n return_tensors=self.framework,\n padding=padding,\n )\n\n return inputs", "title": "" }, { "docid": "19357581bc250e79ac3dd6e82a1d99b8", "score": "0.5950988", "text": "def tokenize(text): \n \n #Remove non alpha numeric characters\n text = re.sub('[^a-zA-Z0-9]', ' ', text.lower())\n \n # Compress multiple spaces into one\n text = re.sub( '[ ]+', ' ', text )\n \n #Use word tokenizer from NLTK\n tokens = word_tokenize(text)\n \n #Remove stop words\n english_stop_words = stopwords.words(\"english\")\n tokens = filter(lambda x: x not in english_stop_words, tokens)\n \n #Lemmatize using the Wordnet Lemmatizer\n lemmatizer = WordNetLemmatizer()\n \n tokens = [x.strip() for x in tokens]\n tokens = [lemmatizer.lemmatize(x) for x in tokens]\n tokens = [lemmatizer.lemmatize(x, pos= 'v') for x in tokens]\n \n return tokens", "title": "" }, { "docid": "8a10987cf517019ad8bb687eb99cbea5", "score": "0.59435797", "text": "def tokenize_text(self, text: str) -> List[str]:\n return [tok.text for tok in self.lang_model.tokenizer(text)]", "title": "" }, { "docid": "bd2e61a7a4c9877780b98fcc56d05a66", "score": "0.5940797", "text": "def tokenize(text): \n \n #Creating a tokenizer\n tokenizer = RegexpTokenizer(r'\\w+')\n #Removing unwanted characters\n tokens = tokenizer.tokenize(text.replace(\"'s\", ' is').replace(\"'re\", ' are'))\n lemmatizer = WordNetLemmatizer()\n\n #Creating tokens\n clean_tokens = []\n for tok in tokens:\n clean_tok = lemmatizer.lemmatize(tok).lower().strip()\n clean_tokens.append(clean_tok)\n \n return clean_tokens", "title": "" }, { "docid": "bbd861175db30a49a8d4ae2df7f3ad84", "score": "0.5938612", "text": "def tokenize(self, text):\n text = convert_to_unicode(text)\n if self.do_lower_case:\n text = text.lower()\n output_tokens = self.tokenizer.EncodeAsPieces(text)\n return ' '.join(output_tokens)", "title": "" }, { "docid": "f33c425e1e8313da584abbb013458ddd", "score": "0.5930256", "text": "def tokenizeText(self, text: str) -> List[SpacySentence]:\n text = re.sub(r'[`\\x92\\x91]', r\"'\", text)\n text = re.sub(r'[\\x93\\x94\\x95\\x96\\x85\\xE9]', r'\"', text)\n text = re.sub(r'[\\x80-\\xFF]', r' ', text)\n text = re.sub(r\"([:\\s])\\'(.+?)\\'([\\s\\.])\", r'\\1\"\\2\"\\3', text)\n text = re.sub(r\"\\s+\", r' ', text)\n text = text.strip()\n \n doc: Doc = self._model(text)\n tokenized = slist()\n sent: Span\n for sent in doc.sents:\n ss = SpacySentence(list(sent), sent.text)\n tokenized.append(ss)\n return tokenized", "title": "" }, { "docid": "38241dc6f2e64b449918d3f5d3d7bb0f", "score": "0.5916297", "text": "def tokenize(self, text: str) -> List[str]: #tokenize input sentences to compare word by word\n tokens = []\n token = \"\"\n for c in text:\n if re.match(\"[a-zA-Z0-9]\", str(c)) != None or c == \"\\'\" or c == \"_\" or c == '-':\n token += c\n else:\n if token != \"\":\n tokens.append(token)\n token = \"\"\n if c.strip() != \"\":\n tokens.append(str(c.strip()))\n\n if token != \"\": tokens.append(token)\n return tokens", "title": "" }, { "docid": "2b4bec4fa889bd89b0310df77319f17d", "score": "0.5909391", "text": "def preprocess(self, text):\n return self.tokenize(self.remove_special_character(text.lower()))", "title": "" }, { "docid": "df2be0719263320ffc001e878b971bb7", "score": "0.5901879", "text": "def gen_tokens( tokenobj, editor, lexer ):\n tokens = tokenobj.copyTokens()\n workfile = copy.copy(editor.workfile)\n row = 0\n while row < workfile.numLines():\n if (workfile.isLineChanged(editor,row)):\n line = workfile.getLine(row)+'\\n'\n if line:\n line_tokens = []\n for (index,tokentype,value) in lexer.get_tokens_unprocessed(line):\n line_tokens.append((tokentype, value, (row,index), (row,index+len(value)), line))\n tokens[row] = line_tokens\n elif line in tokens:\n del tokens[line]\n row = row + 1\n for l in list(tokens.keys()):\n if l >= workfile.numLines():\n del tokens[l]\n tokenobj.setTokens(tokens)\n tokenobj.setModref(workfile.getModref())\n tokenobj.setThread(None)\n workfile.close()\n del workfile\n workfile = None", "title": "" }, { "docid": "4378289f440708715c2ffc20bf794bd7", "score": "0.5892827", "text": "def tokenize_text(self):\n tknzr = TweetTokenizer()\n self.text = tknzr.tokenize(self.text)\n return self", "title": "" }, { "docid": "bed4759ff4b6e244456ad20116c17e10", "score": "0.58883977", "text": "def parse(cls: type[_R], text: str) -> _R:\n headers: list[Header] = []\n in_codeblock = False\n for line in text.splitlines():\n if line.startswith(\"```\"):\n in_codeblock = not in_codeblock\n if in_codeblock:\n continue\n if not line.startswith(\"#\"):\n continue\n\n level, title = line.split(\" \", 1)\n headers.append(Header(title.strip(), len(level)))\n\n return cls(headers)", "title": "" }, { "docid": "db23c0375ad21b224a62697f17afea43", "score": "0.5877941", "text": "def tokenize(self, text):\n text = self._clean_text(text)\n orig_tokens = whitespace_tokenize(text)\n split_tokens = []\n for token in orig_tokens:\n if self.do_lower_case:\n token = token.lower()\n token = self._run_strip_accents(token)\n split_tokens.extend(self._run_split_on_punc(token))\n\n output_tokens = whitespace_tokenize(\" \".join(split_tokens))\n return output_tokens", "title": "" }, { "docid": "2a7c65c747dbe815579fe6c9a43f4e49", "score": "0.5877834", "text": "def tokenize(text: str) -> Generator[str, None, None]:\n for token in TOKENIZER.tokenize(text):\n yield str(token).strip()", "title": "" }, { "docid": "e68cb5690be149611c733460baf868e5", "score": "0.5877774", "text": "def parse(self, tokens):\n ...", "title": "" }, { "docid": "a87be146f4078d436761a32629b7734e", "score": "0.5874028", "text": "def tokenize(tokenizer, text, apply_basic_tokenization=False):\n tokenize_fn = tokenizer.tokenize\n if apply_basic_tokenization:\n tokenize_fn = tokenizer.basic_tokenizer.tokenize\n tokens = []\n for token in text.split(\" \"):\n if _SPECIAL_TOKENS_RE.match(token):\n if token in tokenizer.vocab:\n tokens.append(token)\n else:\n tokens.append(tokenizer.wordpiece_tokenizer.unk_token)\n else:\n tokens.extend(tokenize_fn(token))\n return tokens", "title": "" }, { "docid": "ea23ecea7aa4220eb2f1c76a74067ac0", "score": "0.587125", "text": "def tokenize(self):\r\n self.filter_lines()\r\n\r\n for line in self._data:\r\n # Seperate line into relevant tokens\r\n segments = self.split_line_by_symbols(line)\r\n for seg in segments:\r\n cur_type = self.token_type(seg)\r\n # Valid token type\r\n if cur_type is not None:\r\n self._types.append(cur_type)\r\n self._tokens.append(seg)\r\n if cur_type not in {'stringConstant', 'integerConstant'}:\r\n cur_type = cur_type.lower()\r\n else:\r\n if cur_type == 'stringConstant':\r\n cur_type = 'stringConstant'\r\n self._tokens[-1] = self._tokens[-1].strip('\\\"')\r\n seg = seg.strip('\\\"')\r\n else:\r\n cur_type = 'integerConstant'\r\n if seg in {'<', '>', '\\\"', '&'}:\r\n self._tokens[-1] = self.convert_lt_gt_quot_amp(seg)\r\n seg = self.convert_lt_gt_quot_amp(seg)\r\n self._xml.append('<'+cur_type+'> '+seg+' </'+cur_type+'>')\r\n # Throw exception if cur_type is None seg is not whitespace\r\n elif len(seg.strip()):\r\n print(seg)\r\n raise InvalidTokenException\r\n self._xml.append('</tokens>')", "title": "" }, { "docid": "0e81503767fcc0218e75fced7ca51a60", "score": "0.58634293", "text": "def get_tokens(self, text, unfiltered=False):\r\n if not isinstance(text, str):\r\n if self.encoding == 'guess':\r\n try:\r\n text = text.decode('utf-8')\r\n if text.startswith(u'\\ufeff'):\r\n text = text[len(u'\\ufeff'):]\r\n except UnicodeDecodeError:\r\n text = text.decode('latin1')\r\n else:\r\n text = text.decode(self.encoding)\r\n if self.stripall:\r\n text = text.strip()\r\n elif self.stripnl:\r\n text = text.strip('\\n')\r\n if self.tabsize > 0:\r\n text = text.expandtabs(self.tabsize)\r\n# if not text.endswith('\\n'):\r\n# text += '\\n'\r\n\r\n def streamer():\r\n for i, t, v in self.get_tokens_unprocessed(text):\r\n yield t, v\r\n stream = streamer()\r\n if not unfiltered:\r\n stream = apply_filters(stream, self.filters, self)\r\n return stream", "title": "" }, { "docid": "54cf2a85f5a015337f60b03dafb49919", "score": "0.5858334", "text": "def from_text(text):\n\n return Rcode.from_text(text)", "title": "" }, { "docid": "aac5f146b12c77f10057745092a122ed", "score": "0.5853632", "text": "def tokenize(self, text):\n return self.tokenizer.sub(' ', text.lower()).split()", "title": "" }, { "docid": "37940fe42d7827568337a68ab2acb841", "score": "0.5851185", "text": "def _parse_and_tokenize(self, *args, padding=True, add_special_tokens=True, **kwargs):\n inputs = self._args_parser(*args, **kwargs)\n inputs = self.tokenizer(\n inputs,\n add_special_tokens=add_special_tokens,\n return_tensors=self.framework,\n padding=padding,\n truncation=\"only_first\",\n )\n\n return inputs", "title": "" }, { "docid": "d471b7a79533c7c6e5deee43c95f7ce8", "score": "0.58350813", "text": "def tokenize(text):\n tokens = simple_preprocess(text)\n tokens = [token for token in tokens if token not in STOPWORDS]\n return tokens", "title": "" }, { "docid": "7a0a71336d0761539bc3d0e1adf63462", "score": "0.58350676", "text": "def tokenize(text):\n return text.replace('(', ' ( ').replace(')', ' ) ').split()", "title": "" }, { "docid": "2af37d0c906a251fc270020e4113ce09", "score": "0.58231694", "text": "def parse_raw(text: str) -> lark.Tree:\n return parser.parse(text)", "title": "" }, { "docid": "f05386520b1e2b460ee524d5ccea9906", "score": "0.58230823", "text": "def load_tokenizer():", "title": "" }, { "docid": "4481545248b40f84fae8a66e22cf9668", "score": "0.5821221", "text": "def tokenize(text):\n tokens = nltk.word_tokenize(txt)\n tokens = [w for w in tokens if bool(re.search(r\"[^a-zA-Z0-9]\", w)) != True]\n tokens = [WordNetLemmatizer().lemmatize(w, pos='v') for w in tokens if stopwords.words(\"english\")]\n tokens = [PorterStemmer().stem(w) for w in tokens]\n \n return tokens", "title": "" }, { "docid": "f97cb9e75da248c1ab7c80b7402519ab", "score": "0.58195144", "text": "def process_text_constructor(cleaner: Callable,\n tokenizer: Callable,\n append_indicators: bool,\n start_tok: str,\n end_tok: str):\n def process_text(text):\n if append_indicators:\n return [[start_tok] + tokenizer(cleaner(doc)) + [end_tok] for doc in text]\n return [tokenizer(cleaner(doc)) for doc in text]\n\n return process_text", "title": "" }, { "docid": "da2666efac378ee61bd12d8137403cff", "score": "0.5805711", "text": "def tokenize(self, text, start):\n\n def get_tokens(char_start, length):\n span = Span(char_start, char_start + length)\n return [token for token in self.annot_tokens if token.overlap(span)]\n\n return get_tokens(start, len(text))", "title": "" }, { "docid": "3ed4ce61145763dfc4377f30d08c6a0a", "score": "0.5799853", "text": "def naive(self, text):\n\n tokenizedText = []\n\n for i in text:\n sentence_token = []\n #text_raw_token = re.split(' -|:|;|[\\t\\n]',i)\n text_raw_token = re.split(r'\\s',i)\n #print('split text : ',text_raw_token)\n for j in text_raw_token :\n if (j == \"\") or (j in string.punctuation) :\n continue\n sentence_token.append(j)\n #print('Punctuation removed :',sentence_token)\n tokenizedText.append(sentence_token)\n return tokenizedText", "title": "" }, { "docid": "8bb66253e28ecbc50ce378e11ab02ba7", "score": "0.5799521", "text": "def __call__(self, text: str):\n return nltk.word_tokenize(text, language=self.language)", "title": "" }, { "docid": "37588ad80cb54806671dd9484f95f3da", "score": "0.57970995", "text": "def preprocess_text(self, text):\n # Remove new lines and concatenate each line into a string \n text = ''.join(text.splitlines())\n # Transform a document into a series of token words\n text = text.split(' ')\n # Remove noncharacters\n text = [i for i in text if i.isalpha()]\n \n return text", "title": "" }, { "docid": "a1fc916e67a603128e60c13d3037f66f", "score": "0.57940584", "text": "def tokenize(self,stream):\r\n if not isinstance(stream,str):\r\n raise ValueError('Input has an unappropriate type, it should be str')\r\n tokensback = []\r\n for i,c in enumerate(stream): # i is a number c is a letter\r\n # here it's just shifts to the position where token starts\r\n if c.isalpha() and (not stream[i-1].isalpha() or i==0): \r\n position=i\r\n # here it takes section from the beginning to the end of the token\r\n # and adds token to the list\r\n if not c.isalpha()and i>0 and stream[i-1].isalpha(): \r\n s=stream[position:i]\r\n # constructor for token is working here\r\n t=Token(position,s) \r\n tokensback.append(t)\r\n # last if for the very last substring in stream \r\n if c.isalpha(): \r\n s=stream[position:i+1]\r\n t=Token(position,s)\r\n tokensback.append(t)\r\n return tokensback", "title": "" }, { "docid": "88c3d3b847228c64117dc5a0a36eb927", "score": "0.57914764", "text": "def parse_from_text(self, text: str) -> Understanding:\n ...", "title": "" }, { "docid": "32a732a056fe470df4ddcacba61bc5c9", "score": "0.5785597", "text": "def init_tokens(self):\n raise NotImplementedError('Abstract method.')", "title": "" }, { "docid": "7a552066e992207b4ce766aaf8284086", "score": "0.57735026", "text": "def get_tokens(text):\n\tfrom nltk import word_tokenize\n\tfrom string import punctuation\n\t\n\tlowers = text.lower()\n\tno_punc = lowers.translate(None, punctuation)\n\ttokens = word_tokenize(no_punc)\n\treturn tokens", "title": "" } ]
329232d02e29ea3ed29d2ff519324fd5
Gets the locale used if `TurnContext.activity.locale` is not specified.
[ { "docid": "2521393f2c80cbbdbe0fddd527a97efd", "score": "0.0", "text": "def default_locale(self, value: str) -> None:\n self._default_locale = value", "title": "" } ]
[ { "docid": "0183bb62745d02306c278fdc5d75d2a0", "score": "0.7553464", "text": "def getCurrentLocale(self):\n\n permutation = self.getCurrentPermutation()\n if permutation:\n locale = permutation.get(\"locale\")\n if locale:\n return locale\n\n return None", "title": "" }, { "docid": "b485af54c6d641a1e0fe5aadb9b14e59", "score": "0.7400666", "text": "def get_current_locale():\n return getattr(_thread_locals, 'locale', None)", "title": "" }, { "docid": "f3038118d516d0d4b2109601dc3b813b", "score": "0.7248603", "text": "def locale(self):\n\n return self.sys.get('locale', None)", "title": "" }, { "docid": "22f337da398cb32b09dd32609a5e6f5c", "score": "0.72201496", "text": "def locale(self) -> Optional[pulumi.Input[Union[str, 'CultureCode']]]:\n return pulumi.get(self, \"locale\")", "title": "" }, { "docid": "62732cf42c158fe850083e32cac37e3c", "score": "0.7135882", "text": "def locale(self) -> str | None:\n return self.interaction.locale", "title": "" }, { "docid": "735521007b903fc3067d4277c2c119b3", "score": "0.699697", "text": "def get_locale():\n if request.args.get('locale'):\n locale = request.args.get('locale')\n if locale in app.config['LANGUAGES']:\n return locale\n else:\n return request.accept_languages.best_match(app.config['LANGUAGES'])", "title": "" }, { "docid": "05e5e1361bef07b517890a8eff0d380e", "score": "0.6963141", "text": "def get_locale():\n return g.get('lang_code', current_app.config['BABEL_DEFAULT_LOCALE'])", "title": "" }, { "docid": "58ef9194e6846d80e6f37a024d7ea01b", "score": "0.67659044", "text": "def locale(self):\n return self._locale", "title": "" }, { "docid": "d0b966069ab58a19d6c556e37e6450b2", "score": "0.6732188", "text": "def locale(self):\n return self.__locale", "title": "" }, { "docid": "3341a4b14f8f87ee520d798b42afd6b4", "score": "0.66924", "text": "def current_locale():\n try:\n return Locale.parse(get_language(), sep='-')\n except (UnknownLocaleError, ValueError):\n # Default to en-US\n return Locale('en', 'US')", "title": "" }, { "docid": "d39a3e7f5748cc4b4761354bac3c86a0", "score": "0.6652101", "text": "def get_locale():\n return request.accept_languages.best_match(app.config['SUPPORTED_LANGUAGES'])", "title": "" }, { "docid": "847f43cf8505b70295704183e27053f4", "score": "0.6621683", "text": "def locale(self):\n return self._get_property(API_NODE_PROPERTY.LOCALE)", "title": "" }, { "docid": "e83f3ee73960bc68aea2d763366184e9", "score": "0.65953124", "text": "def get_locale():\n language = 'en'\n if config.SERVER_MODE is False:\n # Get the user language preference from the miscellaneous module\n if current_user.is_authenticated:\n user_id = current_user.id\n else:\n user = user_datastore.get_user(config.DESKTOP_USER)\n if user is not None:\n user_id = user.id\n user_language = Preferences.raw_value(\n 'misc', 'user_language', 'user_language', user_id\n )\n if user_language is not None:\n language = user_language\n else:\n # If language is available in get request then return the same\n # otherwise check the session or cookie\n data = request.form\n if 'language' in data:\n language = data['language'] or language\n setattr(session, 'PGADMIN_LANGUAGE', language)\n elif hasattr(session, 'PGADMIN_LANGUAGE'):\n language = getattr(session, 'PGADMIN_LANGUAGE', language)\n elif hasattr(request.cookies, 'PGADMIN_LANGUAGE'):\n language = getattr(\n request.cookies, 'PGADMIN_LANGUAGE', language\n )\n\n return language", "title": "" }, { "docid": "9bedb9c597cc85d9350599c0e08914e7", "score": "0.6427812", "text": "def getTranslationLanguage():\n userlocale = config.safeGet(\n 'bitmessagesettings', 'userlocale', 'system')\n return userlocale if userlocale and userlocale != 'system' else language", "title": "" }, { "docid": "5e672499132ff5e6f13f2a386f39a35f", "score": "0.64095587", "text": "def getLocale(self):\n try:\n return untangle.parse(self.freesat.getDeviceURL() + \"/rc/locale\")\n except (requests.exceptions.ConnectionError, URLError):\n self.freesat._resetURL()\n return untangle.parse(self.freesat.getDeviceURL() + \"/rc/locale\")", "title": "" }, { "docid": "853bcc6ce4aa9af14a3dcb3640efbf35", "score": "0.6257218", "text": "def get_default_locale(self):\n return self._payload.get_default_locale()", "title": "" }, { "docid": "31f99f320b683bb96a2f24f72886f3b7", "score": "0.62007314", "text": "def get_current_language(context=None):\n request = getRequest()\n return request.get('LANGUAGE', None) or \\\n (context and aq_inner(context).Language()) \\\n or get_default_language()", "title": "" }, { "docid": "8e3875a851b99d0638c045d4be68cd5d", "score": "0.614858", "text": "def _babel_locale():\n try:\n return Locale.parse(get_language(), sep='-')\n except UnknownLocaleError:\n # Default to en-US\n return Locale('en', 'US')", "title": "" }, { "docid": "cf58b5a480a6e63b0230f4949d6e872c", "score": "0.61025536", "text": "def default_locale(self) -> str:\n return self._default_locale", "title": "" }, { "docid": "0447bd9e3f5ac77fd9d762305eb87423", "score": "0.60786796", "text": "def get_lang(self):\n return self.current_lang", "title": "" }, { "docid": "18e4ab11ff968b82f99ac15b54b077d5", "score": "0.6012598", "text": "def default(self):\n\t\t\n\t\tif self.no_dbus: return self.default_offline\n\t\t\n\t\tfor item in self.LocaleProperties.Get('(ss)', BUS_NAME, 'Locale'):\n\t\t\tif item.startswith(\"LANG=\"):\n\t\t\t\treturn item.split(\"=\")[-1]\n\t\t\n\t\treturn None", "title": "" }, { "docid": "c1e07fa0dccac5b19d0e19557fcd258f", "score": "0.5989064", "text": "def launch_presentation_locale(self):\n return self.request.POST.get(\n \"launch_presentation_locale\",\n settings.DEFAULT_LTI_LAUNCH_PRESENTATION_LOCALE,\n )", "title": "" }, { "docid": "0144f9c8046321f85edd5dffbb0b3960", "score": "0.59821975", "text": "def get_locale_dir(self):\n try:\n return self.locale_dir\n except AttributeError:\n pass", "title": "" }, { "docid": "b5614499da5c9689d5b37933698813e0", "score": "0.59065145", "text": "def locale_language(): # real signature unknown; restored from __doc__\n return \"\"", "title": "" }, { "docid": "b131de903d6b3e3d5a8375281a1fa59e", "score": "0.58440614", "text": "async def get_locale(bot, ctx: commands.Context):\n cog = bot.get_cog(\"LocaleStore\")\n\n if cog:\n try:\n ret = cog.get(ctx)\n if asyncio.iscoroutine(ret):\n ret = await ret\n return ret\n except Exception as e:\n traceback.print_exc()", "title": "" }, { "docid": "093f12b8398bb5d25240ea0db4f99e46", "score": "0.58402246", "text": "def guild_locale(self) -> str | None:\n return self.interaction.guild_locale", "title": "" }, { "docid": "51f0da2db017eb5a0daabca56e512117", "score": "0.5839838", "text": "def getLanguage():\n global _currentLanguage\n return _currentLanguage", "title": "" }, { "docid": "2531e2ac2204cfa320bd428073b08484", "score": "0.5769412", "text": "def get_language_name(self, locale=None):\r\n if locale is None:\r\n locale = self\r\n locale = Locale.parse(locale)\r\n return locale.languages.get(self.language)", "title": "" }, { "docid": "cbfad67b09f56d9d023a3eb39fb96198", "score": "0.5761662", "text": "def getDefaultLanguage(self):\n for environment in self.environments:\n if not os.path.exists(environment):\n continue\n for line in open(environment).readlines():\n line = line.strip()\n if line.startswith(\"LANGUAGE=\"):\n (key,value) = line.split(\"=\")\n value = value.strip('\"')\n return value.split(\":\")[0]\n for line in open(environment).readlines():\n match = re.match(r'LANG=\"([a-zA-Z_]*).*\"$',line)\n if match:\n return match.group(1)\n return None", "title": "" }, { "docid": "b6496295465f70b663c2e719dad895c6", "score": "0.57447225", "text": "def get_next_locale(self):\n return # osid.locale.Locale", "title": "" }, { "docid": "91c48602639e379c5f879cb1756a6279", "score": "0.57372344", "text": "def determine_system_language():\n if os.name == \"nt\":\n windll = ctypes.windll.kernel32.GetUserDefaultUILanguage()\n return locale.windows_locale[windll].split(\"_\")[0]\n elif os.name == \"posix\":\n return locale.getdefaultlocale()[0].split(\"_\")[0]\n else:\n return \"en\"", "title": "" }, { "docid": "a96d6da015fae6993eebe60231005f69", "score": "0.5722665", "text": "def babel_selector():\n\n if 'locale' in request.args and Locale(\n request.args['locale']) in possible_locales():\n session['locale'] = request.args['locale']\n elif not session.get('locale'):\n langs = []\n for lang in possible_locales():\n langs.append(lang.language)\n session['locale'] = request.accept_languages.best_match(langs)\n\n return session.get('locale')", "title": "" }, { "docid": "e7a4731bacbdc0c4e5c68a3668d4128a", "score": "0.568869", "text": "def get_language(self):\n return self._lang", "title": "" }, { "docid": "df88cafc9511fa29906f60c19a1c43ca", "score": "0.56617606", "text": "def get_default_locale(self):\n parent = self.get_parent()\n if parent is not None:\n return (\n parent.specific_class.objects.defer()\n .select_related(\"locale\")\n .get(id=parent.id)\n .locale\n )\n\n return super().get_default_locale()", "title": "" }, { "docid": "70c34ee2a760585d94307ee9bb493485", "score": "0.556108", "text": "def get_active():\n # Check locked languages\n # This might be faster than call is_locked() method\n language_code = getattr(_lock, \"value\", None)\n if language_code is not None:\n return language_code\n\n # Get language from django\n language_code = get_language()\n if language_code not in get_all():\n # Try to use only first component\n parts = language_code.split('-', 1)\n if len(parts) == 2 and parts[0] in get_all():\n language_code = parts[0]\n else:\n # Get default language from settings\n language_code = get_settings_default()\n return language_code", "title": "" }, { "docid": "6c3f889d11ac12c68190f3c8b7fe363a", "score": "0.5519336", "text": "def GetCurrentLanguage(self):\n pass", "title": "" }, { "docid": "1253eedeb3d734c5aca9888c43204dc5", "score": "0.54969823", "text": "def getlocale(category=LC_CTYPE):\r\n localename = _setlocale(category)\r\n if category == LC_ALL and ';' in localename:\r\n raise TypeError, 'category LC_ALL is not supported'\r\n return _parse_localename(localename)", "title": "" }, { "docid": "f8a9d1153b9d1d09c331da8bf1f45873", "score": "0.5489622", "text": "def get_language(self):\n return self._language", "title": "" }, { "docid": "f8a9d1153b9d1d09c331da8bf1f45873", "score": "0.5489622", "text": "def get_language(self):\n return self._language", "title": "" }, { "docid": "39076118f799675f003835a70c26b96e", "score": "0.5464765", "text": "def default_language(self):\n return self._lang", "title": "" }, { "docid": "a4de8a20378cc483d6d1bd43f9b03ab2", "score": "0.54425913", "text": "async def get_locale(\n websession: aiohttp.ClientSession, ctx_data: Dict[str, Any]\n) -> str:\n credential_store: CredentialStore = ctx_data[\"credential_store\"]\n locale = credential_store.get_value(CONF_LOCALE)\n if locale:\n return locale\n\n default_locale = getdefaultlocale()[0]\n while True:\n locale = click.prompt(\"Please select a locale\", default=default_locale)\n if locale: # pragma: no branch\n try:\n await get_api_keys(locale, websession=websession)\n except RenaultException as exc: # pragma: no cover\n click.echo(f\"Locale `{locale}` is unknown: {exc}\", err=True)\n else:\n if click.confirm(\n \"Do you want to save the locale to the credential store?\",\n default=False,\n ):\n credential_store[CONF_LOCALE] = Credential(locale)\n # Add blank new line\n click.echo(\"\")\n return locale", "title": "" }, { "docid": "0c8b0bfd5ae52b01dc5e2ae373a49659", "score": "0.5437559", "text": "def get_lang():\r\n return getattr(pylons.translator, 'pylons_lang', None)", "title": "" }, { "docid": "dad929dec64d82532861c7cf26318597", "score": "0.54235107", "text": "def default(self):\n\t\t\n\t\ttarget = None\n\t\t\n\t\twith open(os.path.join(self.target, \"etc/default/locale\")) as f:\n\t\t\tfor line in f.readlines():\n\t\t\t\tif \"LANG=\" in line:\n\t\t\t\t\ttarget = line.strip('LANG=\"').strip('\"\\n')\n\t\t\t\t\tbreak\n\t\t\n\t\treturn target", "title": "" }, { "docid": "e47b85fd3452705a26d69ba4ad4f71a3", "score": "0.5401633", "text": "async def locale(self, ctx):\n pass", "title": "" }, { "docid": "d1e9d9c6d81017f27de60cb466d8a999", "score": "0.5398931", "text": "def language(self):\n lang = None\n if self.__dict__['TAG:language']:\n lang = self.__dict__['TAG:language']\n return lang", "title": "" }, { "docid": "0639c7d762f6524946a48188016cfcd6", "score": "0.53235364", "text": "def default_locale(self, **kw):\n if \"__LOCALE__\" not in kw or kw[\"__LOCALE__\"] not in self.registry[\"localize\"][\"locales\"][\"available\"]:\n kw[\"__LOCALE__\"] = self.locale_name\n\n return kw", "title": "" }, { "docid": "625800376d87927979e14f3a319ddd84", "score": "0.5319034", "text": "def getLanguage():", "title": "" }, { "docid": "d6bfdd6364470c9bfbaeba30fe6e4187", "score": "0.53123033", "text": "def get_ms_windows_language():\n windll = ctypes.windll.kernel32\n\n # Get the language setting of the Windows GUI\n try:\n os_lang = windll.GetUserDefaultUILanguage()\n except Exception as e:\n print(e)\n return\n\n # Convert language code to string\n lang = locale.windows_locale.get(os_lang)\n\n # Only return supported languages\n if not lang.startswith('de'):\n lang = 'en'\n\n # Return de or en\n return lang[:2]", "title": "" }, { "docid": "d3c3ab9b8e914da9140aa949113f748f", "score": "0.5268498", "text": "def default_language(self) -> Optional[LANGUAGE_TAG]:\n return \"en\"", "title": "" }, { "docid": "480c3b12e25124e1887fec629b1e4a21", "score": "0.5262231", "text": "def get_default_locale(self, lang_code: str) -> str:\n locales = self.locales_per_language[lang_code]\n if not locales:\n return 'en_US'\n\n for locale_code in locales:\n if (country_from_locale(locale_code).lower() ==\n self._language_from_locale(locale_code)):\n return locale_code\n\n return locales[0]", "title": "" }, { "docid": "b949e7246afe64f2cd3b260207bcb8c7", "score": "0.5244062", "text": "def get_localized_context():\n return build_localized_context(current_app.config,\n language.get_languages())", "title": "" }, { "docid": "3250b795723d483231050f75ffc0f981", "score": "0.52257293", "text": "def language(self):\n return self.__language", "title": "" }, { "docid": "d023cc511a742d39c5e8b7394f3130c1", "score": "0.52133536", "text": "def get_language(self) -> str:\n return self.data[\"game\"][\"language\"]", "title": "" }, { "docid": "9689552f995fef4bc6f35a789bc70f06", "score": "0.5211445", "text": "def _get_language_code(self):\n lang_code = translation.get_language()\n if lang_code:\n lang_code = translation.to_locale(lang_code).replace('_', '-')\n return lang_code", "title": "" }, { "docid": "e6414a265f61a14ae012e294b6c0549f", "score": "0.5210064", "text": "def get_best_locale(self, locale):\n\t\t\n\t\tif len(locale) == 2:\n\t\t\t# If we have only the two-letters code, we should make something like ll_LL.\n\t\t\t# Ex: it -> it_IT.\n\t\t\tlocale = locale.lower() + \"_\" + locale.upper()\n\t\t\n\t\tbest = False\n\t\tfor line in self.supported:\n\t\t\tif locale in line:\n\t\t\t\tbest = line\n\t\t\t\tbreak # Break here.\n\t\t\n\t\tif not best: return None\n\t\t\n\t\treturn best", "title": "" }, { "docid": "e6414a265f61a14ae012e294b6c0549f", "score": "0.5210064", "text": "def get_best_locale(self, locale):\n\t\t\n\t\tif len(locale) == 2:\n\t\t\t# If we have only the two-letters code, we should make something like ll_LL.\n\t\t\t# Ex: it -> it_IT.\n\t\t\tlocale = locale.lower() + \"_\" + locale.upper()\n\t\t\n\t\tbest = False\n\t\tfor line in self.supported:\n\t\t\tif locale in line:\n\t\t\t\tbest = line\n\t\t\t\tbreak # Break here.\n\t\t\n\t\tif not best: return None\n\t\t\n\t\treturn best", "title": "" }, { "docid": "6f0ce8dc20ec83a1b2bd555674d2bcba", "score": "0.51910937", "text": "def getWindowsLocale(posixLocale):\n if posixLocale in windowsLanguageMap:\n return windowsLanguageMap[posixLocale]\n if \".\" in posixLocale:\n loc = posixLocale.split(\".\", 1)\n if loc[0] in windowsLanguageMap:\n return windowsLanguageMap[loc[0]]\n if \"_\" in posixLocale:\n loc = posixLocale.split(\"_\", 1)\n if loc[0] in windowsLanguageMap:\n return windowsLanguageMap[loc[0]]\n if posixLocale != DEFAULT_LANGUAGE:\n return getWindowsLocale(DEFAULT_LANGUAGE)\n return None", "title": "" }, { "docid": "655dcbf5ae248094c1de9a2949c0c08d", "score": "0.51897025", "text": "def get_user_language(self):\n return self.langs_handled[self.user['lang_selected']]", "title": "" }, { "docid": "16739f73df6fe185e4c38a4010d4eb63", "score": "0.517735", "text": "def localized(self):\n\n return self.get(translation.get_language())", "title": "" }, { "docid": "c83856b033f38001233ddd1ccea8962e", "score": "0.5175637", "text": "def language(self):\n return self._language", "title": "" }, { "docid": "c83856b033f38001233ddd1ccea8962e", "score": "0.5175637", "text": "def language(self):\n return self._language", "title": "" }, { "docid": "c83856b033f38001233ddd1ccea8962e", "score": "0.5175637", "text": "def language(self):\n return self._language", "title": "" }, { "docid": "779ef69be81541aa402dff71de356acd", "score": "0.5174664", "text": "def default_offline(self):\n\t\t\n\t\ttarget = None\n\t\t\n\t\twith open(os.path.join(self.target, \"etc/default/locale\")) as f:\n\t\t\tfor line in f.readlines():\n\t\t\t\tif \"LANG=\" in line:\n\t\t\t\t\ttarget = line.strip('LANG=\"').strip('\"\\n')\n\t\t\t\t\tbreak\n\t\t\n\t\treturn target", "title": "" }, { "docid": "4a01fab36d7e9e71257d11a6b0386fae", "score": "0.5154599", "text": "def language_code(self) -> Optional[str]:\n return pulumi.get(self, \"language_code\")", "title": "" }, { "docid": "c6dcb5fa44e64342cfe4c5dbd6cd5bad", "score": "0.51419157", "text": "def currentLanguage():\r\n return GLng", "title": "" }, { "docid": "dd9cceb1b6374e24fbb7bc79d712dac4", "score": "0.5138827", "text": "def get_language(request):\n lang = request.GET.get(\"lang\",\n request.session.get('django_language', 'en'))\n return get_language_info(lang)", "title": "" }, { "docid": "0632046c3a4bb200590d1d38982451b5", "score": "0.5134244", "text": "def defaultLanguage(self):\n return config.LANGUAGE_DEFAULT", "title": "" }, { "docid": "dbb671f9bc936a11e9bc50dd9d254a38", "score": "0.51292294", "text": "def logon_language(self) -> Optional[str]:\n return pulumi.get(self, \"logon_language\")", "title": "" }, { "docid": "aab2f6297806908e2c08f90acaf290b6", "score": "0.5119657", "text": "def get_current_activity(self):\n return self.activity", "title": "" }, { "docid": "e263802579f38458e3a19b5dc61ebc52", "score": "0.5108897", "text": "def default_locale(category=None, aliases=LOCALE_ALIASES):\r\n varnames = (category, 'LANGUAGE', 'LC_ALL', 'LC_CTYPE', 'LANG')\r\n for name in filter(None, varnames):\r\n locale = os.getenv(name)\r\n if locale:\r\n if name == 'LANGUAGE' and ':' in locale:\r\n # the LANGUAGE variable may contain a colon-separated list of\r\n # language codes; we just pick the language on the list\r\n locale = locale.split(':')[0]\r\n if locale.split('.')[0] in ('C', 'POSIX'):\r\n locale = 'en_US_POSIX'\r\n elif aliases and locale in aliases:\r\n locale = aliases[locale]\r\n try:\r\n return get_locale_identifier(parse_locale(locale))\r\n except ValueError:\r\n pass", "title": "" }, { "docid": "00bc6c5ca28cc2963ad9d67a32f29848", "score": "0.5091934", "text": "def translate_locale(self, locale):\n (lang, country) = string.split(locale, \"_\")\n current_language = None\n if \"LANGUAGE\" in os.environ:\n current_language = os.environ[\"LANGUAGE\"]\n os.environ[\"LANGUAGE\"]=locale\n lang_name = self.translate_language(lang)\n country_name = gettext.dgettext('iso_3166', self._country[country])\n if current_language:\n os.environ[\"LANGUAGE\"] = current_language\n return (lang_name, country_name)", "title": "" }, { "docid": "861109fb9b660a8f8cb491cf7badc4f0", "score": "0.50884664", "text": "def get_queryset_language(self, request):\n if not is_multilingual_project():\n # Make sure the current translations remain visible, not the dynamically set get_language() value.\n return appsettings.PARLER_LANGUAGES.get_default_language()\n else:\n # Allow to adjust to current language\n # This is overwritten for the inlines, which follow the primary object.\n return get_language()", "title": "" }, { "docid": "34f64497fc227a70b1644b475f42f850", "score": "0.50768846", "text": "def getdefaultlocale(envvars=('LC_ALL', 'LC_CTYPE', 'LANG', 'LANGUAGE')):\r\n\r\n try:\r\n # check if it's supported by the _locale module\r\n import _locale\r\n code, encoding = _locale._getdefaultlocale()\r\n except (ImportError, AttributeError):\r\n pass\r\n else:\r\n # make sure the code/encoding values are valid\r\n if sys.platform == \"win32\" and code and code[:2] == \"0x\":\r\n # map windows language identifier to language name\r\n code = windows_locale.get(int(code, 0))\r\n # ...add other platform-specific processing here, if\r\n # necessary...\r\n return code, encoding\r\n\r\n # fall back on POSIX behaviour\r\n import os\r\n lookup = os.environ.get\r\n for variable in envvars:\r\n localename = lookup(variable,None)\r\n if localename:\r\n if variable == 'LANGUAGE':\r\n localename = localename.split(':')[0]\r\n break\r\n else:\r\n localename = 'C'\r\n return _parse_localename(localename)", "title": "" }, { "docid": "a97a713e2ad84156367b8d3ed425fd1c", "score": "0.50704014", "text": "def locale(self, locale):\n allowed_values = [\"us\", \"at\", \"be\", \"ca\", \"gb\", \"fr\", \"de\", \"ie\", \"it\", \"lu\", \"mx\", \"nl\", \"es\", \"ch\", \"pr\"] # noqa: E501\n if self.local_vars_configuration.client_side_validation and locale not in allowed_values: # noqa: E501\n raise ValueError(\n \"Invalid value for `locale` ({0}), must be one of {1}\" # noqa: E501\n .format(locale, allowed_values)\n )\n\n self._locale = locale", "title": "" }, { "docid": "626ab19b767a472868a920e7e0149f83", "score": "0.50656545", "text": "def lang(self) -> str:\n return self._lang", "title": "" }, { "docid": "9f7b7053e5d2d7262c9c86ef69178c0a", "score": "0.5048874", "text": "def language(self):\n try:\n return self.results[0].select('.result.details .itemLanguage'\n )[0].text\n except IndexError:\n return None", "title": "" }, { "docid": "c9387c14eeb930580b8359f1074b70f3", "score": "0.50459325", "text": "def get_default_language():\n try:\n from Products.CMFPlone.interfaces import ILanguageSchema\n except ImportError:\n portal = get()\n return portal.portal_properties.site_properties.getProperty(\n 'default_language', None)\n else:\n registry = getUtility(IRegistry)\n settings = registry.forInterface(ILanguageSchema, prefix='plone')\n return settings.default_language", "title": "" }, { "docid": "08a12edc3fe4043d0b12c3c4bf32c37b", "score": "0.50369173", "text": "def get_fallback(cls):\n return cls._meta.get_field('default_language')", "title": "" }, { "docid": "08a12edc3fe4043d0b12c3c4bf32c37b", "score": "0.50369173", "text": "def get_fallback(cls):\n return cls._meta.get_field('default_language')", "title": "" }, { "docid": "976146accc3c6233792b1ff06e08c938", "score": "0.50326306", "text": "def language(self) -> str:\r\n return self._language", "title": "" }, { "docid": "3040aa72280e3d665878851ef4b9bda7", "score": "0.5009113", "text": "def lang(self) -> float:\n return self._lang", "title": "" }, { "docid": "2e4c420b96229027c0d8ba0a31c9e928", "score": "0.5002463", "text": "def get_form_language(self, request, obj=None):\n if obj is not None:\n return obj.get_current_language()\n else:\n return self._language(request)", "title": "" }, { "docid": "da5a89c86c4b5be55d8fb3effd4e9c64", "score": "0.49639308", "text": "def chooseContext(self):\n langs = self.getAvailableLanguages()\n language = negotiator.getLanguage(langs, self.request)\n try:\n return self._data[language]\n except KeyError:\n return self._data[self.defaultLanguage]", "title": "" }, { "docid": "f07661a2b2bed5a3a7f5cbbe855850a2", "score": "0.4958831", "text": "def language(self) -> str:\n return self._language", "title": "" }, { "docid": "d76f3a84a1d001a80e605e8d82390429", "score": "0.4958378", "text": "def get(self, language: str = None, default: str = None) -> str:\n\n language = language or settings.LANGUAGE_CODE\n value = super().get(language, default)\n return value if value is not None else default", "title": "" }, { "docid": "b68289d2c88fc583bcfbb119b632114e", "score": "0.49517637", "text": "def get_current_intent() -> Intent:\n return PythonActivity.mActivity.getIntent()", "title": "" }, { "docid": "c72f3680f1e540b9bded1aa58c82754c", "score": "0.4918786", "text": "def _language(self, request, obj=None):\n return get_language_parameter(request, self.query_language_key)", "title": "" }, { "docid": "6fe32e108807367a4984b7f8dd5fcf69", "score": "0.4911298", "text": "def getCurrentLocaleProject(self, update=False):\n\n locale = self.getCurrentLocale()\n if not locale:\n return None\n\n path = os.path.abspath(os.path.join(\".jasy\", \"locale\", locale))\n if not os.path.exists(path) or update:\n Locale.LocaleParser(locale).export(path)\n\n return Project.getProjectFromPath(path, self.__session)", "title": "" }, { "docid": "642b5a402cae6be14bb67a735c0eccf0", "score": "0.48724294", "text": "def transcription_language(self) -> Optional[str]:\n return pulumi.get(self, \"transcription_language\")", "title": "" }, { "docid": "6368a0922055bcbaf23752a9cc4145aa", "score": "0.48680127", "text": "def language(self):\n return get_client_language(self.index)", "title": "" }, { "docid": "72a49f06a6b8efc018c12abb02aa1696", "score": "0.48660663", "text": "def determine_language(otter_config, **kwargs):\n return kwargs.get(\"lang\", otter_config.get(\"lang\", DEFAULT_OPTIONS[\"lang\"]))", "title": "" }, { "docid": "c66f8edb6e3b1c64e82535b9e92ef750", "score": "0.48636216", "text": "def locale_id(request):\n if request.locale_name not in request._database_locales:\n _create_locale(request.locale_name, request)\n\n return request._database_locales[request.locale_name].id", "title": "" }, { "docid": "488813deff73c34d621d76f6d4c11aac", "score": "0.48553705", "text": "def language(self):\n return self._get_volume_info('language')", "title": "" }, { "docid": "a6fa4961dd04122b8b4ec3e5f56d8cde", "score": "0.4836683", "text": "def get_user_lang(user=None):\n\tif not user:\n\t\tuser = frappe.session.user\n\n\t# via cache\n\tlang = frappe.cache().get_value(\"lang:\" + user)\n\n\tif not lang:\n\n\t\t# if defined in user profile\n\t\tuser_lang = frappe.db.get_value(\"User\", user, \"language\")\n\t\tif user_lang and user_lang!=\"Loading...\":\n\t\t\tlang = get_lang_dict().get(user_lang)\n\t\telse:\n\t\t\tdefault_lang = frappe.db.get_default(\"lang\")\n\t\t\tlang = default_lang or frappe.local.lang\n\n\t\tfrappe.cache().set_value(\"lang:\" + user, lang or \"en\")\n\n\treturn lang", "title": "" }, { "docid": "317e12050e494284f060a9e9e20b5248", "score": "0.48305547", "text": "def language_code(self):\n if not self.language:\n return None\n language = self.language\n if language in LanguageCodes.three_to_two:\n language = LanguageCodes.three_to_two[language]\n return language", "title": "" }, { "docid": "1e6e4c1037088a4c7f774730359c1113", "score": "0.48237053", "text": "def get_main_activity(self):\n activities = self.get_main_activities()\n if len(activities) > 0:\n return self._format_value(activities.pop())\n return None", "title": "" }, { "docid": "25fd6b3cfad8ff9dd81b1ccc53ad155a", "score": "0.48093718", "text": "def GetLanguageString(self):\n RaiseNotImpl(\"GetLanguageString\")", "title": "" }, { "docid": "a88367ac2685c84a94bc4bd0bdd49769", "score": "0.48036385", "text": "def locale_dir(self):", "title": "" }, { "docid": "949d27de8bd331483cb4bcc33113dcfc", "score": "0.47982132", "text": "def getLanguage(product, file):\n lang = None\n if file.endswith('.po'):\n if file.startswith(product):\n lang = '-'.join(file.split('-')[1:])[:-3]\n return lang", "title": "" }, { "docid": "02ca18b7d18a6eed5cd39cc2b57c8bbe", "score": "0.47972268", "text": "def get_settings_default():\n #TODO: move it so it is checked only once\n if settings.LANGUAGE_CODE not in get_all():\n raise ImproperlyConfigured(\n \"LANGUAGE_CODE '%s' is not one of LANGUAGES.\" \\\n \"Set one of LANGUAGES as LANGUAGE_CODE or add '%s' to LANGUAGES.\"\n % (settings.LANGUAGE_CODE, settings.LANGUAGE_CODE)\n )\n return settings.LANGUAGE_CODE", "title": "" }, { "docid": "fac4455a93ae7933ae04f92a1188893e", "score": "0.47780767", "text": "def get_display_name(self, locale=None):\r\n if locale is None:\r\n locale = self\r\n locale = Locale.parse(locale)\r\n retval = locale.languages.get(self.language)\r\n if self.territory or self.script or self.variant:\r\n details = []\r\n if self.script:\r\n details.append(locale.scripts.get(self.script))\r\n if self.territory:\r\n details.append(locale.territories.get(self.territory))\r\n if self.variant:\r\n details.append(locale.variants.get(self.variant))\r\n details = filter(None, details)\r\n if details:\r\n retval += ' (%s)' % u', '.join(details)\r\n return retval", "title": "" } ]
3acc3d2008079b219ac7f83ecb7e3210
Create the command line to be passed to RTC server. This function does not add the min_port and max_port param to the RTC command line
[ { "docid": "ac80715cd3570bd89c02d5a142a1b8f6", "score": "0.6836483", "text": "def get_rtc_cmdline_params(self, as_config_file, vs_config_file):\n as_lb, as_port, vs_lb, vs_port = self._get_audio_video_lb_address(\n as_config_file, vs_config_file)\n # Create the command line for RTC server\n s = '{0} -as={1}:{2} -vs={3}:{4}'.format(\n self.config.get_cmdline_env_flag(), as_lb, as_port, vs_lb, vs_port)\n return s", "title": "" } ]
[ { "docid": "4849001e343d48655bbecfa022eafbab", "score": "0.7281892", "text": "def get_rtc_cmdline_params_port_range(self, as_config_file, vs_config_file):\n as_lb, as_port, vs_lb, vs_port = self._get_audio_video_lb_address(\n as_config_file, vs_config_file)\n # Create the command line for RTC server\n s = '{0} -as={1}:{2} -vs={3}:{4} -min_port=30000 -max_port=65535'.format(\n self.config.get_cmdline_env_flag(), as_lb, as_port, vs_lb, vs_port)\n return s", "title": "" }, { "docid": "2fce619e7c06d07c60679c4eacbbd89f", "score": "0.6548384", "text": "def get_mixdown_cmdline_params(self, as_config_file, vs_config_file):\n as_lb, as_port, vs_lb, vs_port = self._get_audio_video_lb_address(\n as_config_file, vs_config_file)\n # Create the command line for RTC server\n s = '{0} -as={1}:{2} -vs={3}:{4}'.format(\n self.config.get_cmdline_env_flag(), as_lb, as_port, vs_lb, vs_port)\n return s", "title": "" }, { "docid": "1c57de4ba4df7d0b3002266432e389c6", "score": "0.62795913", "text": "def cmdlinestart(self):\n try:\n port = int(self.getarg(\"-p\") or self.getarg(\"--port\",50140))\n host = self.getarg(\"-h\") or self.getarg(\"--hostname\",\"localhost\")\n # Check if we should connect to existing ring\n remoteserver = self.getarg(\"-s\") or self.getarg(\"-server\")\n replicas = self.getarg(\"-r\") or self.getarg(\"--replicas\", 3)\n replicas = int(replicas)\n # Start the server\n self.start(host=host,port=port,replicas=replicas,remote_server=remoteserver)\n except ValueError:\n self.help()", "title": "" }, { "docid": "77c901bca53a8cb8d0c872cf88d64e9d", "score": "0.599955", "text": "def _init_client(self):\n self.setup_commands = [\n \"java\",\n \"-jar\",\n os.path.join(os.path.expanduser('~/microrts_env/rts_wrapper'),\n 'microrts-master/out/artifacts/microrts_master_jar/microrts-master.jar'),\n \"--map\", os.path.join(os.path.expanduser(self.config.microrts_path), self.config.map_path),\n \"--ai1_type\", self.config.ai1_type,\n \"--ai2_type\", self.config.ai2_type,\n \"--maxCycles\", str(self.config.max_cycles),\n \"--maxEpisodes\", str(self.config.max_episodes),\n \"--period\", str(self.config.period),\n \"--render\", str(self.config.render),\n # \"--port\", str(self.port),\n # \"more\",\n # \"options\"\n ]", "title": "" }, { "docid": "db815d91b96136f4232760ed59551f08", "score": "0.58759457", "text": "def setup_cli():\n parser = argparse.ArgumentParser()\n\n parser.add_argument(\n \"--availability\",\n action=\"store_true\",\n help=\"Run the availability calculation.\"\n )\n parser.add_argument(\n \"--cutoff\",\n type=int,\n default=-1,\n help=\"Maximum allowed length of a time-windowed event (e.g. availability window, trip), in days.\"\n )\n parser.add_argument(\n \"--debug\",\n action=\"store_true\",\n help=\"Print debug messages.\"\n )\n parser.add_argument(\n \"--duration\",\n type=int,\n help=\"Number of seconds; with --start_time or --end_time, defines a time query range.\"\n )\n parser.add_argument(\n \"--end\",\n type=str,\n help=\"The end of the time query range for this request.\\\n Should be either int Unix seconds or ISO-8601 datetime format.\\\n At least one of end or start is required.\"\n )\n parser.add_argument(\n \"--local\",\n action=\"store_true\",\n help=\"Input and query times are local.\"\n )\n parser.add_argument(\n \"--query\",\n action=\"append\",\n type=lambda kv: kv.split(\"=\", 1),\n dest=\"queries\",\n help=\"A series of PROVIDER=VEHICLE pairs; each pair will be analyzed separately.\"\n )\n parser.add_argument(\n \"--start\",\n type=str,\n help=\"The beginning of the time query range for this request.\\\n Should be either int Unix seconds or ISO-8601 datetime format\\\n At least one of end or start is required.\"\n )\n\n return parser, parser.parse_args()", "title": "" }, { "docid": "9ff6b031e472739eb98150652526db09", "score": "0.5844345", "text": "def cmdline(self, executable, options, tasks, propertyfile=None, rlimits={}):\n opts = self._options\n\n cmd = [executable] + self._arguments\n\n if opts.timeout is not None:\n cmd.append('-max-time={0}'.format(opts.timeout))\n\n if opts.exit_on_error:\n cmd.append('-exit-on-error-type=Assert')\n\n if not opts.nowitness:\n cmd.append('-write-witness')\n\n if opts.executable_witness:\n cmd.append('-write-harness')\n\n return cmd + options + tasks + opts.argv", "title": "" }, { "docid": "510af9f02e60763105b5d7813738d808", "score": "0.5779346", "text": "def defineConsole():\n parser = argparse.ArgumentParser(description='SBML to BNGL translator')\n parser.add_argument('-i', '--input', type=str, help='settings file', required=True)\n return parser", "title": "" }, { "docid": "6bf9bccc3fdf2a391aad9647e532c748", "score": "0.5735488", "text": "def _init_client(self):\n self.setup_commands = [\n \"java\",\n \"-jar\", settings.jar_dir,\n \"--map\", os.path.join(self.config.microrts_path, self.config.map_path),\n \"--ai1_type\", self.config.ai1_type,\n \"--ai2_type\", self.config.ai2_type,\n \"--maxCycles\", str(self.config.max_cycles),\n \"--maxEpisodes\", str(self.config.max_episodes),\n \"--period\", str(self.config.period),\n \"--render\", str(self.config.render),\n # \"--port\", str(self.port),\n # \"more\",\n # \"options\"\n ]", "title": "" }, { "docid": "2d36260089857f78061867218bc535e5", "score": "0.57255864", "text": "def command_line_args():\n args = dict()\n\n args['ip'] = \"8.8.8.8\"\n args['asn'] = \"15169\"\n args['host'] = \"example.com\"\n\n return args", "title": "" }, { "docid": "5a03e295ab37782b6129a18380ce647c", "score": "0.5678419", "text": "def make_start_cmdline(options):\n arg = []\n if options.cdrom:\n arg.append(\"-cdrom\")\n arg.append(os.path.abspath(options.cdrom[0]))\n if options.snapshot:\n arg.append(\"-snapshot\")\n if options.args:\n arg.append(\"\".join(options.args))\n if options.password:\n os.environ[\"SPICE_PASSWORD\"] = options.password[0]\n if arg:\n return shlex.join(arg)\n return \"\"", "title": "" }, { "docid": "41141096479b5a165c025e5445385265", "score": "0.5675012", "text": "def _initCmdLineArguments(self):\n self.parser.add_argument(\"registrations\", nargs='*')\n\n self.parser.add_argument(\"-t\", \"--time\",\n help=\"Single date or date range. Format is \"\n \"YYYY-MM-DD for a single date and \" \n \"<begin>:<end> for a range.\")\n\n self.parser.add_argument(\"-T\", \"--time-offset\",\n help=\"Time offset added to total flight time\",\n default=\"0:00\")\n\n self.parser.add_argument(\"-L\", \"--landing-offset\",\n help=\"Offset added to total number of \"\n \"landings\",\n type= int,\n default=self.config.landing_offset)\n\n self.parser.add_argument(\"-S\", \"--non-strict\",\n help=\"Allow summation of flights with \"\n \"different PICs\",\n default=self.config.non_strict,\n action=\"store_true\")", "title": "" }, { "docid": "588ea9ca010c502395b11676beabe52e", "score": "0.56361634", "text": "def parse_command_line(description):\n parser = argparse.ArgumentParser(description=description)\n parser.add_argument('-p', metavar='port', type=int, default=1070,\n help='TCP port (default 1070)')\n args = parser.parse_args()\n address = ('127.0.0.1', args.p)\n return address", "title": "" }, { "docid": "2d73ed2c1ada9d08872542ec920ab53b", "score": "0.5631673", "text": "def build_init_cmd(time_mode: str) -> str:\n initiation_command = f\"{time_mode} username {USERNAME} password {APIKEY} useragent firestarter\"\n if COMPRESSION != \"\":\n initiation_command += f\" compression {COMPRESSION}\"\n if KEEPALIVE != \"\":\n initiation_command += f\" keepalive {KEEPALIVE}\"\n if INIT_CMD_ARGS != \"\":\n initiation_command += f\" {INIT_CMD_ARGS}\"\n initiation_command += \"\\n\"\n\n return initiation_command", "title": "" }, { "docid": "f60e2fd8672cb4422893d1a940928238", "score": "0.562205", "text": "def commandline(self, *args):\n return _Client.Application_commandline(self, *args)", "title": "" }, { "docid": "8fea9b6c6c8b1d17b966e0aca8459885", "score": "0.55896324", "text": "def cli(debug):", "title": "" }, { "docid": "17c2086ea589eed04eb0578656a825e7", "score": "0.5579557", "text": "def __init__(self):\n # torcs_command = [\"torcs\", \"-t\", \"100000\", \"-nofuel\", \"-nolaptime\", \"-nodamage\", \"-r\", \"/home/student/Documents/torcs-server/torcs-client/evolver/race_config/quickrace.xml\"]\n # self.torcs_process = subprocess.Popen(torcs_command)", "title": "" }, { "docid": "c9cb5e099e45669e7b63c3dbf25cf6a9", "score": "0.5574597", "text": "def cli():", "title": "" }, { "docid": "c9cb5e099e45669e7b63c3dbf25cf6a9", "score": "0.5574597", "text": "def cli():", "title": "" }, { "docid": "c9cb5e099e45669e7b63c3dbf25cf6a9", "score": "0.5574597", "text": "def cli():", "title": "" }, { "docid": "c9cb5e099e45669e7b63c3dbf25cf6a9", "score": "0.5574597", "text": "def cli():", "title": "" }, { "docid": "c9cb5e099e45669e7b63c3dbf25cf6a9", "score": "0.5574597", "text": "def cli():", "title": "" }, { "docid": "c9cb5e099e45669e7b63c3dbf25cf6a9", "score": "0.5574597", "text": "def cli():", "title": "" }, { "docid": "c9cb5e099e45669e7b63c3dbf25cf6a9", "score": "0.5574597", "text": "def cli():", "title": "" }, { "docid": "c9cb5e099e45669e7b63c3dbf25cf6a9", "score": "0.5574597", "text": "def cli():", "title": "" }, { "docid": "c9cb5e099e45669e7b63c3dbf25cf6a9", "score": "0.5574597", "text": "def cli():", "title": "" }, { "docid": "c9cb5e099e45669e7b63c3dbf25cf6a9", "score": "0.5574597", "text": "def cli():", "title": "" }, { "docid": "c9cb5e099e45669e7b63c3dbf25cf6a9", "score": "0.5574597", "text": "def cli():", "title": "" }, { "docid": "c9cb5e099e45669e7b63c3dbf25cf6a9", "score": "0.5574597", "text": "def cli():", "title": "" }, { "docid": "c9cb5e099e45669e7b63c3dbf25cf6a9", "score": "0.5574597", "text": "def cli():", "title": "" }, { "docid": "c9cb5e099e45669e7b63c3dbf25cf6a9", "score": "0.5574597", "text": "def cli():", "title": "" }, { "docid": "c9cb5e099e45669e7b63c3dbf25cf6a9", "score": "0.5574597", "text": "def cli():", "title": "" }, { "docid": "c9cb5e099e45669e7b63c3dbf25cf6a9", "score": "0.5574597", "text": "def cli():", "title": "" }, { "docid": "c9cb5e099e45669e7b63c3dbf25cf6a9", "score": "0.5574597", "text": "def cli():", "title": "" }, { "docid": "c9cb5e099e45669e7b63c3dbf25cf6a9", "score": "0.5574597", "text": "def cli():", "title": "" }, { "docid": "c9cb5e099e45669e7b63c3dbf25cf6a9", "score": "0.5574597", "text": "def cli():", "title": "" }, { "docid": "c9cb5e099e45669e7b63c3dbf25cf6a9", "score": "0.5574597", "text": "def cli():", "title": "" }, { "docid": "c9cb5e099e45669e7b63c3dbf25cf6a9", "score": "0.5574597", "text": "def cli():", "title": "" }, { "docid": "c9cb5e099e45669e7b63c3dbf25cf6a9", "score": "0.5574597", "text": "def cli():", "title": "" }, { "docid": "c9cb5e099e45669e7b63c3dbf25cf6a9", "score": "0.5574597", "text": "def cli():", "title": "" }, { "docid": "c9cb5e099e45669e7b63c3dbf25cf6a9", "score": "0.5574597", "text": "def cli():", "title": "" }, { "docid": "c9cb5e099e45669e7b63c3dbf25cf6a9", "score": "0.5574597", "text": "def cli():", "title": "" }, { "docid": "c9cb5e099e45669e7b63c3dbf25cf6a9", "score": "0.5574597", "text": "def cli():", "title": "" }, { "docid": "c9cb5e099e45669e7b63c3dbf25cf6a9", "score": "0.5574597", "text": "def cli():", "title": "" }, { "docid": "c9cb5e099e45669e7b63c3dbf25cf6a9", "score": "0.5574597", "text": "def cli():", "title": "" }, { "docid": "98f67a84d684eea41e33e80de2365454", "score": "0.5546555", "text": "def cmdline(self, executable, options, tasks, propertyfile=None, rlimits={}):\n\n if not propertyfile is None:\n options = options + ['--prp={0}'.format(propertyfile)]\n\n return [executable] + options + tasks", "title": "" }, { "docid": "6b18c49863750c1b3ce8b21e0f2c8d25", "score": "0.55331415", "text": "def build_cmdline(self, *args, minion_tgt=None, **kwargs): # pylint: disable=arguments-differ\n log.debug(\n \"Building cmdline. Minion target: %s; Input args: %s; Input kwargs: %s;\",\n minion_tgt,\n args,\n kwargs,\n )\n minion_tgt = self._minion_tgt = self.get_minion_tgt(minion_tgt=minion_tgt)\n cmdline = []\n\n args = list(args)\n\n # Handle the config directory flag\n for arg in args:\n if arg.startswith(\"--config-dir=\"):\n break\n if arg in (\"-c\", \"--config-dir\"):\n break\n else:\n cmdline.append(\"--config-dir={}\".format(self.config_dir))\n\n # Handle the timeout CLI flag, if supported\n if self.__cli_timeout_supported__:\n salt_cli_timeout_next = False\n for arg in args:\n if arg.startswith(\"--timeout=\"):\n # Let's actually change the _terminal_timeout value which is used to\n # calculate when the run() method should actually timeout\n if self._terminal_timeout_set_explicitly is False:\n salt_cli_timeout = arg.split(\"--timeout=\")[-1]\n try:\n self._terminal_timeout = int(salt_cli_timeout) + 5\n except ValueError:\n # Not a number? Let salt do it's error handling\n pass\n break\n if salt_cli_timeout_next:\n if self._terminal_timeout_set_explicitly is False:\n try:\n self._terminal_timeout = int(arg) + 5\n except ValueError:\n # Not a number? Let salt do it's error handling\n pass\n break\n if arg == \"-t\" or arg.startswith(\"--timeout\"):\n salt_cli_timeout_next = True\n continue\n else:\n salt_cli_timeout = self._terminal_timeout\n if salt_cli_timeout and self._terminal_timeout_set_explicitly is False:\n # Shave off a few seconds so that the salt command times out before the terminal does\n salt_cli_timeout -= 5\n if salt_cli_timeout:\n # If it's still a positive number, add it to the salt command CLI flags\n cmdline.append(\"--timeout={}\".format(salt_cli_timeout))\n\n # Handle the output flag\n if self.__cli_output_supported__:\n for arg in args:\n if arg in (\"--out\", \"--output\"):\n break\n if arg.startswith((\"--out=\", \"--output=\")):\n break\n else:\n # No output was passed, the default output is JSON\n cmdline.append(\"--out=json\")\n\n if self.__cli_log_level_supported__:\n # Handle the logging flag\n for arg in args:\n if arg in (\"-l\", \"--log-level\"):\n break\n if arg.startswith(\"--log-level=\"):\n break\n else:\n # Default to being quiet on console output\n cmdline.append(\"--log-level=quiet\")\n\n if minion_tgt:\n cmdline.append(minion_tgt)\n\n # Add the remaining args\n cmdline.extend(args)\n\n # Keyword arguments get passed as KEY=VALUE pairs to the CLI\n for key in kwargs:\n value = kwargs[key]\n if not isinstance(value, str):\n value = json.dumps(value)\n cmdline.append(\"{}={}\".format(key, value))\n cmdline = super().build_cmdline(*cmdline)\n if self.python_executable:\n if cmdline[0] != self.python_executable:\n cmdline.insert(0, self.python_executable)\n log.debug(\"Built cmdline: %s\", cmdline)\n return cmdline", "title": "" }, { "docid": "0a810b47df99cb6fdcd4ebd77f0f023c", "score": "0.552562", "text": "def set_cmd_line_args():\n data_path = os.path.dirname(os.path.realpath(__file__))\n parser = argparse.ArgumentParser()\n parser.add_argument(\"-a\", \"--api_key\", help=\"API Key\", dest=\"api_key\", required=True)\n parser.add_argument(\"-b\", \"--base_url\", dest=\"base_url\", help=\"Library API base URL\", default=\"https://library.cdisc.org/api\")\n parser.add_argument(\"-r\", \"--resource\", dest=\"start_resource\", help=\"Library API resource\", default=\"/mdr/ct/packages\")\n parser.add_argument(\"-l\", \"--log_file\", help=\"log file name\", default=\"link_log.txt\", dest=\"log_file\")\n parser.add_argument(\"-d\", \"--log_dir\", help=\"path to log and config file directory\", default=data_path, dest=\"log_path\")\n parser.add_argument(\"-m\", \"--media_type\", help=\"media_type\", default=\"application/json\", dest=\"media_type\")\n parser.add_argument(\"-v\", \"--verbose\", dest=\"verbose\", help=\"verbose\", default=False, required=False)\n parser.add_argument(\"-f\", \"--filter\", help=\"filter file name\", default=\"prime_cache_filters.txt\", dest=\"filter\")\n args = parser.parse_args()\n return args", "title": "" }, { "docid": "670675aee1cb24ff243e57852f33430c", "score": "0.55160445", "text": "def _spawn_server(self, fchdir, ExpandedFilename, args, rid):\n\n if g_fScreen:\n name = SCREEN\n elif sys.platform == DARWIN:\n name = DARWIN\n else:\n try:\n import terminalcommand\n name = MAC\n except:\n name = os.name\n\n if name == 'nt' and g_fDebug:\n name = NT_DEBUG\n \n e = ['', ' --encrypt'][not self.m_fAllowUnencrypted]\n r = ['', ' --remote'][self.m_fAllowRemote]\n c = ['', ' --chdir'][fchdir]\n p = ['', ' --pwd=\"%s\"' % self.m_rpdb2_pwd][os.name == 'nt']\n \n b = ''\n\n encoding = detect_locale()\n fse = sys.getfilesystemencoding()\n\n ExpandedFilename = g_found_unicode_files.get(ExpandedFilename, ExpandedFilename)\n ExpandedFilename = as_unicode(ExpandedFilename, fse)\n\n if as_bytes('?') in as_bytes(ExpandedFilename, encoding, fstrict = False):\n _u = as_bytes(ExpandedFilename)\n _b = base64.encodestring(_u)\n _b = _b.strip(as_bytes('\\n')).translate(g_safe_base64_to)\n _b = as_string(_b, fstrict = True)\n b = ' --base64=%s' % _b\n\n debugger = os.path.abspath(__file__)\n if debugger[-1:] == 'c':\n debugger = debugger[:-1]\n\n debugger = as_unicode(debugger, fse)\n\n debug_prints = ['', ' --debug'][g_fDebug] \n \n options = '\"%s\"%s --debugee%s%s%s%s%s --rid=%s \"%s\" %s' % (debugger, debug_prints, p, e, r, c, b, rid, ExpandedFilename, args)\n\n python_exec = sys.executable\n if python_exec.endswith('w.exe'):\n python_exec = python_exec[:-5] + '.exe'\n\n python_exec = as_unicode(python_exec, fse)\n\n if as_bytes('?') in as_bytes(python_exec + debugger, encoding, fstrict = False):\n raise BadMBCSPath\n\n if name == POSIX:\n shell = CalcUserShell()\n terminal_command = CalcTerminalCommand()\n\n if terminal_command in osSpawn:\n command = osSpawn[terminal_command] % {'shell': shell, 'exec': python_exec, 'options': options}\n else: \n command = osSpawn[name] % {'term': terminal_command, 'shell': shell, 'exec': python_exec, 'options': options}\n else: \n command = osSpawn[name] % {'exec': python_exec, 'options': options}\n\n if name == DARWIN:\n s = 'cd \"%s\" ; %s' % (getcwdu(), command)\n command = CalcMacTerminalCommand(s)\n\n print_debug('Terminal open string: %s' % repr(command))\n\n command = as_string(command, encoding)\n \n if name == MAC:\n terminalcommand.run(command)\n else:\n subprocess.Popen(command, shell=True)", "title": "" }, { "docid": "cd254dad85f06e8d9d3fde6aac1049aa", "score": "0.5511705", "text": "def make_args_for_generate_input(self):\n argstr = '-p %u -n %u %u -r %s' % (self.prec, self.num_edges, self.num_verts, str(self.seed))\n if self.dims == 0 and (self.min!=0 or self.max!=100000):\n argstr += ' -e %.1f,%.1f' % (self.min, self.max)\n elif self.dims > 0:\n argstr += ' -v %u,%.1f,%.1f' % (self.dims, self.min, self.max)\n return argstr", "title": "" }, { "docid": "fba3f3ba2662fd2884bb34a8294cf659", "score": "0.549415", "text": "def parse_command_line():\n parser = argparse.ArgumentParser(description=\"2D OpenGL accelerated GUI for Quartjesavond.\")\n parser.add_argument(\"--hostname\", help=\"Hostname to connect too. Default runs local server.\")\n parser.add_argument(\"--port\", type=int, default=1234, help=\"Port to connect to.\")\n parser.add_argument(\"--no-fullscreen\", action=\"store_false\", dest=\"fullscreen\", \n help=\"Do not run in fullscreen mode.\")\n parser.add_argument(\"--width\", type=int, default=1024, help=\"Width of the display window.\")\n parser.add_argument(\"--height\", type=int, default=768, help=\"Height of the display window.\")\n args = parser.parse_args()\n return args", "title": "" }, { "docid": "5d1ec031aefab1b762a8e8b600159b2c", "score": "0.5473084", "text": "def __init__(self, executable_path, port=..., verbose: bool = ..., log_path: Optional[Any] = ...):\n self.service_args = ...", "title": "" }, { "docid": "d85b92a52a222091f6cc35a63f190785", "score": "0.54571795", "text": "def generateParameters(self):\n parms = {}\n args = []\n \n # 1. the program name\n args.append(self.cxfreezeExecCombo.currentText())\n \n # 2. the commandline options\n # 2.1 general options\n if (\n self.parameters['targetDirectory'] !=\n self.defaults['targetDirectory']\n ):\n parms['targetDirectory'] = self.parameters['targetDirectory']\n args.append('--target-dir={0}'.format(\n self.parameters['targetDirectory']))\n if self.parameters['targetName'] != self.defaults['targetName']:\n parms['targetName'] = self.parameters['targetName'][:]\n args.append('--target-name={0}'.format(\n self.parameters['targetName']))\n parms['baseName'] = self.parameters['baseName'][:]\n if self.parameters['baseName'] != '':\n args.append('--base-name={0}'.format(self.parameters['baseName']))\n parms['initScript'] = self.parameters['initScript'][:]\n if self.parameters['initScript'] != '':\n args.append('--init-script={0}'.format(\n self.parameters['initScript']))\n parms['applicationIcon'] = self.parameters['applicationIcon'][:]\n if (\n self.parameters['applicationIcon'] !=\n self.defaults['applicationIcon']\n ):\n args.append('--icon={0}'.format(\n self.parameters['applicationIcon']))\n parms['script'] = self.parameters['script'][:]\n if self.parameters['keepPath'] != self.defaults['keepPath']:\n parms['keepPath'] = self.parameters['keepPath']\n args.append('--no-copy-deps')\n if self.parameters['compress'] != self.defaults['compress']:\n parms['compress'] = self.parameters['compress']\n args.append('--compress')\n if self.parameters['optimize'] != self.defaults['optimize']:\n parms['optimize'] = self.parameters['optimize']\n if self.parameters['optimize'] == 1:\n args.append('-O')\n elif self.parameters['optimize'] == 2:\n args.append('-OO')\n \n # 2.2 advanced options\n if self.parameters['defaultPath'] != self.defaults['defaultPath']:\n parms['defaultPath'] = self.parameters['defaultPath'][:]\n args.append('--default-path={0}'.format(\n os.pathsep.join(self.parameters['defaultPath'])))\n if self.parameters['includePath'] != self.defaults['includePath']:\n parms['includePath'] = self.parameters['includePath'][:]\n args.append('--include-path={0}'.format(\n os.pathsep.join(self.parameters['includePath'])))\n if self.parameters['replacePaths'] != self.defaults['replacePaths']:\n parms['replacePaths'] = self.parameters['replacePaths'][:]\n args.append('--replace-paths={0}'.format(\n os.pathsep.join(self.parameters['replacePaths'])))\n if (\n self.parameters['includeModules'] !=\n self.defaults['includeModules']\n ):\n parms['includeModules'] = self.parameters['includeModules'][:]\n args.append('--include-modules={0}'.format(\n ','.join(self.parameters['includeModules'])))\n if (\n self.parameters['excludeModules'] !=\n self.defaults['excludeModules']\n ):\n parms['excludeModules'] = self.parameters['excludeModules'][:]\n args.append('--exclude-modules={0}'.format(\n ','.join(self.parameters['excludeModules'])))\n if self.parameters['extListFile'] != self.defaults['extListFile']:\n parms['extListFile'] = self.parameters['extListFile']\n args.append('--ext-list-file={0}'.format(\n self.parameters['extListFile']))\n \n # 2.3 additional files tab\n if self.parameters['additionalFiles'] != []:\n parms['additionalFiles'] = self.parameters['additionalFiles'][:]\n \n return (args, parms)", "title": "" }, { "docid": "abfed901d232e14938d09e6730c4369b", "score": "0.54485977", "text": "def _port_string(env):\n if env.port is None:\n return ''\n else:\n return \"-e 'ssh -p {port}'\".format(port=env.port)", "title": "" }, { "docid": "790a686d577494a42557cd9c45237159", "score": "0.54340976", "text": "def generateLaunchDescription(gzclient, multiInstance, port):\n try:\n envs = {}\n for key in os.environ.__dict__[\"_data\"]:\n key = key.decode(\"utf-8\")\n if key.isupper():\n envs[key] = os.environ[key]\n except BaseException as exception:\n print(\"Error with Envs: \" + str(exception))\n return None\n\n # Gazebo visual interfaze. GUI/no GUI options.\n if gzclient:\n gazeboCmd = \"gazebo\"\n else:\n gazeboCmd = \"gzserver\"\n\n # Creation of ROS2 LaunchDescription obj.\n\n worldPath = os.path.join(os.path.dirname(gazeborlenv.__file__), 'worlds',\n 'test8.world')\n '''\n worldPath = os.path.join(os.path.dirname(gazeborlenv.__file__), 'worlds',\n 'empty.world')\n '''\n\n launchDesc = LaunchDescription([\n ExecuteProcess(\n cmd=[gazeboCmd, '--verbose', '-s', 'libgazebo_ros_factory.so', '-s',\n 'libgazebo_ros_init.so', worldPath], output='screen', env=envs),\n Node(package='travel', node_executable='spawn_agent',\n output='screen'),\n Node(package='travel', node_executable='agent',\n output='screen')\n ])\n return launchDesc", "title": "" }, { "docid": "d7443b97e9b158028657d97e72428514", "score": "0.5423018", "text": "def set_command_line_arguments(self, time_info):\n return None", "title": "" }, { "docid": "28fb8780d7e38c4a6d2361c3fcbf145e", "score": "0.54178864", "text": "def grab_server_args():\n \n workbench_conf = ConfigParser.ConfigParser()\n config_path = os.path.join(os.path.dirname(os.path.realpath(__file__)), 'config.ini')\n workbench_conf.read(config_path)\n server = workbench_conf.get('workbench', 'server_uri')\n port = workbench_conf.get('workbench', 'server_port')\n\n # Collect args from the command line\n parser = argparse.ArgumentParser()\n parser.add_argument('-s', '--server', type=str, default=server, help='location of workbench server')\n parser.add_argument('-p', '--port', type=int, default=port, help='port used by workbench server')\n args, commands = parser.parse_known_args()\n server = str(args.server)\n port = str(args.port)\n\n return {'server':server, 'port':port, 'commands': commands}", "title": "" }, { "docid": "2f140fa3974ee9111cd0d0b28dc654c4", "score": "0.54108846", "text": "def console_main(override_args: Optional[List[str]], stop_on_join_tower: bool) -> None:\n __version__ = get_version_number()\n\n # PARSE THE ARGUMENTS\n\n parser = argparse.ArgumentParser(description=\"A bot to fill in bells during ringingroom.com practices\")\n\n # Tower arguments\n tower_group = parser.add_argument_group(\"Tower arguments\")\n tower_group.add_argument(\n \"room_id\",\n type=int,\n help=\"The numerical ID of the tower to join, represented as a row on 9 bells, \\\n e.g. 763451928.\",\n )\n tower_group.add_argument(\n \"--url\",\n default=\"https://ringingroom.com\",\n type=str,\n help=\"The URL of the server to join (defaults to 'https://ringingroom.com')\",\n )\n tower_group.add_argument(\n \"--name\",\n default=None,\n type=str,\n help=\"If set, then Wheatley will ring bells assigned to the given name. \\\n When not set, Wheatley rings unassigned bells.\",\n )\n\n # Row generation arguments\n row_gen_group = parser.add_argument_group(\"Row generation arguments\")\n\n # An mutual exclusion group to disallow specifying more than one of a method,\n # a CompLib comp or a place notation\n comp_method_place_group = row_gen_group.add_mutually_exclusive_group(required=True)\n comp_method_place_group.add_argument(\n \"-c\",\n \"--comp\",\n type=str,\n help=\"The ID or URL of the complib composition you want to ring. \\\n This can include a substituted method or access key query string\",\n )\n comp_method_place_group.add_argument(\n \"-m\", \"--method\", type=str, help=\"The title of the method you want to ring\"\n )\n comp_method_place_group.add_argument(\n \"-p\",\n \"--place-notation\",\n type=str,\n help=\"The place notation description of the method you want to ring\",\n )\n\n row_gen_group.add_argument(\n \"-b\",\n \"--bob\",\n default=\"14\",\n help='An override for what place notation(s) should be made when a `Bob` is called in \\\n Ringing Room. These will by default happen at the lead end. Examples: \"16\" or \\\n \"0:16\" => 6ths place lead end bob. \"-1:3\" or \"-1:3.1\" => a Grandsire Bob. \"20: 70\" \\\n => a 70 bob taking effect 20 changes into a lead (the Half Lead for Surprise Royal). \\\n \"20:7/0:4\" => a 70 bob 20 changes into a lead and a 14 bob at the lead end. \\\n \"3: 5/9: 5\" => bobs in Stedman Triples. Defaults to \"14\".',\n )\n row_gen_group.add_argument(\n \"-n\",\n \"--single\",\n default=\"1234\",\n help='An override for what place notation(s) should be made when a `Single` is called in \\\n Ringing Room. These will by default happen at the lead end. Examples: \"1678\" or \\\n \"0:1678\" => 6ths place lead end single. \"-1:3.123\" => a Grandsire Single. \\\n \"20: 7890\" => a 7890 single taking effect 20 changes into a lead (the Half Lead for \\\n Surprise Royal). \"3: 567/9: 567\" => singles in Stedman Triples. Defaults to \"1234\".',\n )\n row_gen_group.add_argument(\n \"--start-index\",\n type=int,\n default=0,\n help=\"Determines which row of the lead Wheatley will start ringing. This can be negative (so -1 \\\n would refer to the lead **end**). Defaults to 0 (i.e. a standard start).\",\n )\n row_gen_group.add_argument(\"--start-row\", type=str, help=\"Determines the initial row.\")\n row_gen_group.add_argument(\n \"-u\",\n \"--use-up-down-in\",\n action=\"store_true\",\n help=\"If set, then the Wheatley will automatically go into changes after two rounds have been \\\n rung.\",\n )\n row_gen_group.add_argument(\n \"-s\",\n \"--stop-at-rounds\",\n action=\"store_true\",\n help=\"If set, then Wheatley will stand his bells the first time rounds is reached.\",\n )\n row_gen_group.add_argument(\n \"-H\",\n \"--handbell-style\",\n action=\"store_true\",\n help=\"If set, then Wheatley will ring 'handbell style', i.e. ringing two strokes of \\\n rounds then straight into changes, and stopping at the first set of rounds. By \\\n default, he will ring 'towerbell style', i.e. only taking instructions from the \\\n ringing-room calls. This is equivalent to using the '-us' flags.\",\n )\n row_gen_group.add_argument(\n \"--no-calls\",\n action=\"store_true\",\n help=\"If set, Wheatley will not call anything when ringing compositions.\",\n )\n\n # Rhythm arguments\n rhythm_group = parser.add_argument_group(\"Rhythm arguments\")\n rhythm_group.add_argument(\n \"-k\",\n \"--keep-going\",\n action=\"store_true\",\n help=\"If set, Wheatley will not wait for users to ring - instead, he will push on with the \\\n rhythm.\",\n )\n rhythm_group.add_argument(\n \"-w\",\n \"--wait\",\n action=\"store_true\",\n help=\"Legacy parameter, which is now set by default. The previous default behaviour of not waiting \\\n can be set with '-k'/'--keep-going'.\",\n )\n rhythm_group.add_argument(\n \"-I\",\n \"--inertia\",\n type=float,\n default=0.5,\n help=\"Overrides Wheatley's 'inertia' - how much Wheatley will take other ringers' positions \\\n into account when deciding when to ring. 0.0 means he will cling as closely as \\\n possible to the current rhythm, 1.0 means that he will completely ignore the other \\\n ringers.\",\n )\n rhythm_group.add_argument(\n \"-S\",\n \"--peal-speed\",\n default=\"2h58\",\n help=\"Sets the default speed that Wheatley will ring (assuming a peal of 5040 changes), \\\n though this will usually be adjusted by Wheatley whilst ringing to keep with other \\\n ringers. Example formatting: '3h4' = '3h4m' = '3h04m' = '3h04' = '184m' = '184'. \\\n Defaults to '2h58'.\",\n )\n rhythm_group.add_argument(\n \"-G\",\n \"--handstroke-gap\",\n type=float,\n default=1.0,\n help=\"Sets the handstroke gap as a factor of the space between two bells. Defaults to \\\n '1.0'.\",\n )\n rhythm_group.add_argument(\n \"-X\",\n \"--max-bells-in-dataset\",\n type=int,\n default=15,\n help=\"Sets the maximum number of bells that Wheatley will store to determine the current \\\n ringing speed. If you make this larger, then he will be more consistent but less \\\n quick to respond to changes in rhythm. Setting both this and \\\n --inertia to a very small values could result in Wheatley ringing ridiculously \\\n quickly. Defaults to '15'.\",\n )\n\n # Misc arguments\n parser.add_argument(\"--version\", action=\"version\", version=f\"Wheatley {__version__}\")\n parser.add_argument(\n \"-v\",\n \"--verbose\",\n action=\"count\",\n default=0,\n help=\"Makes Wheatley print more (DEBUG) output.\",\n )\n parser.add_argument(\n \"-q\",\n \"--quiet\",\n action=\"count\",\n default=0,\n help=\"Makes Wheatley print less output. `-q` only prints WARNINGs and ERRORs; `-qq` only prints \\\n ERRORs; `-qqq` prints nothing.\",\n )\n\n # Parse arguments\n # `[1:]` is apparently needed, because sys.argv[0] is the working file of the Python interpreter\n # which `parser.parse_args` does not want to see as an argument\n args = parser.parse_args(sys.argv[1:] if override_args is None else override_args)\n\n # Deprecation warnings\n if args.wait:\n print(\"Deprecation warning: `--wait` has been replaced with `--keep-going`!\")\n\n # Run the program\n configure_logging(args.verbose, args.quiet)\n\n try:\n tower_url = get_load_balancing_url(args.room_id, args.url)\n except TowerNotFoundError as e:\n sys.exit(f\"Bad value for 'room_id': {e}\")\n except InvalidURLError as e:\n sys.exit(f\"Bad value for '--url': {e}\")\n\n try:\n parse_start_row(args.start_row)\n except StartRowParseError as e:\n sys.exit(f\"{e}\")\n\n tower = RingingRoomTower(args.room_id, tower_url)\n row_generator = create_row_generator(args)\n\n try:\n peal_speed = parse_peal_speed(args.peal_speed)\n except PealSpeedParseError as e:\n sys.exit(f\"{e}\")\n\n rhythm = create_rhythm(\n peal_speed,\n args.inertia,\n args.max_bells_in_dataset,\n args.handstroke_gap,\n not args.keep_going,\n )\n bot = Bot(\n tower,\n row_generator,\n args.use_up_down_in or args.handbell_style,\n args.stop_at_rounds or args.handbell_style,\n not args.no_calls,\n rhythm,\n user_name=args.name,\n )\n\n # Catch keyboard interrupts and just print 'Bye!' instead a load of guff\n try:\n with tower:\n tower.wait_loaded()\n if not stop_on_join_tower:\n bot.main_loop()\n except KeyboardInterrupt:\n print(\"Bye!\")", "title": "" }, { "docid": "f67e3a1667481e15d85d7f24cfb9ab1c", "score": "0.54005355", "text": "def get_args():\n \n global command\n \n ## Assign description to help doc\n parser = argparse.ArgumentParser(description='Script manages RCON connection to remote linux game server. One command accepted at a time when specified on command line.', allow_abbrev=True)\n \n ## Add arguments. When argument present on command line, then it is stored as True, else returns False\n parser.add_argument(\n '-server', help='Hostname or IP to connect to', nargs=1, required=True)\n parser.add_argument(\n '-port', help='Port to use', type=int, nargs=1, required=True)\n parser.add_argument(\n '-command', help='Optional command to send, if not specified script enters interactive mode', nargs='+', required=False)\n\n \n ## Array for arguments passed to script\n args = parser.parse_args()\n server = str(*args.server)\n port = str(*args.port)\n if args.command:\n command = ' '.join(args.command)\n else:\n command = ''\n \n ## Return all variable values\n return server, port, command", "title": "" }, { "docid": "37f04389c9632feb8c5f163999401f52", "score": "0.5391013", "text": "def __set_cmdargs_up():\n\n psr_desc='Document maker command line control interface'\n psr_epi='The config profile is used to specify defaults'\n\n psr = argparse.ArgumentParser(description=psr_desc, epilog=psr_epi)\n\n psr.add_argument(\n '-d', '--debug', action='store_true',\n dest='dm_debug', help='print debug information'\n )\n psr.add_argument(\n '-r', '--resdir', action='store',\n dest='resdir', help='points out the one resources directory')\n psr.add_argument(\n '-c', '--config', action='store',\n dest='config', help='load an specific config profile'\n )\n psr.add_argument(\n '-b', '--builder',\n dest='dm_builder', help='specify the builder to use'\n )\n psr.add_argument(\n '-i', '--input',\n dest='dm_input', help='specify the input variables with \\'var=val;var2=val2;var2=valN\\'..'\n )\n psr.add_argument(\n '-o', '--output',\n dest='dm_output', help='specify the output file'\n )\n\n return psr.parse_args()", "title": "" }, { "docid": "0216c6060ab9e72f534f8e3d5153c969", "score": "0.5360339", "text": "def parse_cmd_line():\n\n parser = argparse.ArgumentParser(description=__doc__)\n parser.add_argument(\n '-f', '--frames', dest='frames_path', default=DEFAULT_FRAMES_PATH,\n help=\"Watson frames file path (default: {})\".format(\n DEFAULT_FRAMES_PATH\n )\n )\n parser.add_argument(\n '-s', '--server-domain', dest='server_domain',\n default=DEFAULT_API_SERVER_DOMAIN,\n help=\"Crick server domain (default: {})\".format(\n DEFAULT_API_SERVER_DOMAIN\n )\n )\n parser.add_argument(\n '-p', '--port', dest='server_port', default=DEFAULT_API_SERVER_PORT,\n help=\"Crick server port (default: {})\".format(DEFAULT_API_SERVER_PORT)\n )\n parser.add_argument(\n '-t', '--token', dest='token', help=\"Crick API token\"\n )\n parser.add_argument(\n '-n', '--n-frames', dest='n_frames', type=int,\n default=DEFAULT_N_FRAMES,\n help=(\n \"The number of latest frames to push (default: {})\".format(\n DEFAULT_N_FRAMES\n )\n )\n )\n parser.add_argument(\n '-v', '--verbose', dest='logging_level', action='store_const',\n const=logging.INFO, default=logging.WARNING,\n help=\"Verbose mode\"\n )\n parser.add_argument(\n '-d', '--debug', dest='logging_level', action='store_const',\n const=logging.DEBUG,\n help=\"Debug mode\"\n )\n return parser.parse_args()", "title": "" }, { "docid": "847881c7dfcf8ea95750e99a01565d12", "score": "0.5348467", "text": "def create_port(body=None):\n return NEUTRON_CLI.create_port(body)", "title": "" }, { "docid": "138414ca86b7acccfb6cc2514db60756", "score": "0.5347037", "text": "def get_config():\n parser = argparse.ArgumentParser(description='Serve files.')\n parser.add_argument(\n '--port', type=int, default=7900, \n help='What port should we run on?')\n parser.add_argument(\n '--hostname', type=str, default='0.0.0.0', \n help='The IP or host address to use?')\n return parser.parse_args()", "title": "" }, { "docid": "d3bc8d0ffe7ca1f67f6ed25dd23c3256", "score": "0.533686", "text": "def main():\n return cli(obj={})", "title": "" }, { "docid": "7d3d014732df8116d5c69390f842e8c9", "score": "0.53276074", "text": "def cli():\r\n pass", "title": "" }, { "docid": "7d3d014732df8116d5c69390f842e8c9", "score": "0.53276074", "text": "def cli():\r\n pass", "title": "" }, { "docid": "68322a9750831c83f6f96d00feec8bfd", "score": "0.5318665", "text": "def cli():\n pass", "title": "" }, { "docid": "68322a9750831c83f6f96d00feec8bfd", "score": "0.5318665", "text": "def cli():\n pass", "title": "" }, { "docid": "68322a9750831c83f6f96d00feec8bfd", "score": "0.5318665", "text": "def cli():\n pass", "title": "" }, { "docid": "68322a9750831c83f6f96d00feec8bfd", "score": "0.5318665", "text": "def cli():\n pass", "title": "" }, { "docid": "416ebef25c2ec8d1e62f66e4f8143a45", "score": "0.5318459", "text": "def cli():\n\tpass", "title": "" }, { "docid": "7c617b764b97a9aa4644995a91a2404d", "score": "0.5316515", "text": "def create_argument_parser():\n\n parser = argparse.ArgumentParser(\n description='starts server to serve an agent')\n parser.add_argument(\n '-d', '--core',\n required=True,\n type=str,\n help=\"core model to run with the server\")\n parser.add_argument(\n '-u', '--nlu',\n type=str,\n help=\"nlu model to run with the server\")\n parser.add_argument(\n '-p', '--port',\n type=int,\n default=5005,\n help=\"port to run the server at\")\n parser.add_argument(\n '--cors',\n nargs='*',\n type=str,\n help=\"enable CORS for the passed origin. \"\n \"Use * to whitelist all origins\")\n parser.add_argument(\n '--auth_token',\n type=str,\n help=\"Enable token based authentication. Requests need to provide \"\n \"the token to be accepted.\")\n parser.add_argument(\n '-o', '--log_file',\n type=str,\n default=\"rasa_core.log\",\n help=\"store log file in specified file\")\n parser.add_argument(\n '--endpoints',\n default=None,\n help=\"Configuration file for the connectors as a yml file\")\n\n utils.add_logging_option_arguments(parser)\n return parser", "title": "" }, { "docid": "01591f487f630d01e31169b5c6d0ca1d", "score": "0.5307443", "text": "def command_line(self):\n return self.launch_mode.generate_command(self.executable, self.main_file, self.script_arguments)", "title": "" }, { "docid": "78e66af9d35a7b9b98ae14e234485f59", "score": "0.5296109", "text": "def __init__(__self__, *,\n command_line: Optional[pulumi.Input[str]] = None):\n if command_line is not None:\n pulumi.set(__self__, \"command_line\", command_line)", "title": "" }, { "docid": "46b12fbdd4abfa2739d74a63b7430ccf", "score": "0.52908", "text": "def initiate():\n\n ap = argparse.ArgumentParser()\n\n w_mode_gr = ap.add_argument_group('user-interfaces')\n w_mode_gr.add_argument(\"--interface\", help=\"Set the user interfaces. To use: either `official_stand`, `augmented`, `remote_ui` or None.\"\n \"`official_stand`: for using the interface of official T_System stand.\"\n \"`augmented`: Augmented control with the Augmented Virtual Assistant A.V.A.. \\'https://github.com/MCYBA/A.V.A.\\' is the home page of the A.V.A. and usage explained into the \\'AUGMENTED.md\\'.\"\n \"`remote_ui`: remote control with created graphic interface that is power by flask available on desktop or mobile.\"\n \"None: Use to just by `running modes` parameters.\"\n \"The default value is None.\", action=\"store\", type=str, choices=[\"official_stand\", \"augmented\", \"remote_ui\", None], default=None)\n\n official_stand_gr = ap.add_argument_group('official_stand')\n official_stand_gr.add_argument(\"--stand-gpios\", help=\"GPIO pin numbers of official stand's LEDs and fans. 5(as red led), 6(as green led) and 14(as fan) GPIO pins are default.\", nargs=3, default=[5, 6, 14], type=int, metavar=('RED-LED', 'GREEN-LED', 'FAN'))\n\n remote_ui_gr = ap.add_argument_group('remote_ui')\n remote_ui_gr.add_argument(\"--host\", help=\"Specify host address.\", action=\"store\", type=str, default=\"0.0.0.0\")\n remote_ui_gr.add_argument(\"--port\", help=\"Specify the port.\", action=\"store\", type=str, default=\"5000\")\n remote_ui_gr.add_argument(\"--debug\", help=\"Activate debug mode.\", action=\"store_true\")\n\n r_mode_gr = ap.add_argument_group('Running Modes')\n r_mode_gr.add_argument(\"-l\", \"--learn\", help=\"Teach Mode. Teach the object tracking parameters with the trial and error method.\", action=\"store_true\")\n r_mode_gr.add_argument(\"-s\", \"--security\", help=\"Security Mode. Scan the around and optionally take photos of visitors.\", action=\"store_true\")\n\n tool_gr = ap.add_argument_group('Running Tools')\n tool_gr.add_argument(\"--detection-model\", help=\"Object detection model to use: either `haarcascade`, `hog` or `cnn`. `hog` and `cnn` can only use for detecting human faces. `haarcascade` is default.\", action=\"store\", type=str, default=\"haarcascade\")\n tool_gr.add_argument(\"--cascades\", help=\"Specify the trained detection algorithm file for the object detection ability. Sample: 'frontalface_default' for frontalface_default.xml file under the 'haarcascade' folder.\", action=\"store\", type=list, default=[\"frontalface_default\", \"profileface\"])\n tool_gr.add_argument(\"-j\", \"--no-recognize\", help=\"Do not recognize the things.(faces, objects etc.)\", action=\"store_true\")\n tool_gr.add_argument(\"--encoding-file\", help=\"Specify the trained recognition encoding pickle file for recognize object. Sample: 'jane_encoding' for jane_encoding.pickle file under the '.t_system/recognition/encodings' folder in your Home directory. \"\n \"If `main_encoding` chosen, `main_encoding.pickle` file that creates from merging all encoding files under `.../encodings` folder will used. Default is `main_encoding`\", action=\"store\", type=str, default=\"main_encoding\")\n tool_gr.add_argument(\"--use-tracking-api\", help=\"Use the openCV's tracking API for realize the next object is same as previous one.\", action=\"store_true\")\n tool_gr.add_argument(\"--tracker-type\", help=\"OpenCV's tracking type to use: either `BOOSTING`, `MIL`, `KCF`, `TLD`, `MEDIANFLOW`, `GOTURN`, `MOSSE` or `CSRT`. `CSRT` is default.\", action=\"store\", type=str, choices=[\"BOOSTING\", \"MIL\", \"KCF\", \"TLD\", \"MEDIANFLOW\", \"GOTURN\", \"MOSSE\", \"CSRT\"], default=\"CSRT\")\n\n camera_gr = ap.add_argument_group('Camera Options')\n camera_gr.add_argument(\"--camera-rotation\", help=\"Specify the camera's ratational position. 180 degree is default.\", action=\"store\", default=180, type=int)\n camera_gr.add_argument(\"--resolution\", help=\"Specify the camera's resolution of vision ability. 320x240 is default\", nargs=2, default=[80, 60], type=int, metavar=('WIDTH', 'HEIGHT'))\n\n shoot_gr = ap.add_argument_group('Shoot Options')\n shoot_gr.add_argument(\"--framerate\", help=\"Specify the camera's framerate. of vision ability. 32 fps is default.\", action=\"store\", default=32, type=int)\n shoot_gr.add_argument(\"--chunk\", help=\"Smallest unit of audio. 1024*8=8192 bytes are default.\", action=\"store\", default=8192, type=int)\n shoot_gr.add_argument(\"--rate\", help=\"Bit Rate of audio stream / Frame Rate. 44100 Hz sample rate is default.\", action=\"store\", default=44100, type=int)\n shoot_gr.add_argument(\"--channels\", help=\"Number of microphone's channels. Default value is 1.\", action=\"store\", default=1, type=int)\n shoot_gr.add_argument(\"--audio_device_index\", help=\"Index of the using audio device. 2 is default.\", action=\"store\", default=2, type=int)\n shoot_gr.add_argument(\"--shoot-formats\", help=\"Formats for recording the work. `h264` and `wav` for separate video and audio recording and `mp4` for merged file are default.\", nargs=3, default=[\"h264\", \"wav\", \"mp4\"], type=str, metavar=('VIDEO', 'AUDIO', 'MERGED'))\n\n shot_gr = ap.add_argument_group('Shot Options')\n shot_gr.add_argument(\"--shot-format\", help=\"Format for take shots. `jpg` is default\", default=\"jpg\", type=str, metavar=('SHOT',))\n\n motion_gr = ap.add_argument_group('Motion Mechanism')\n motion_gr.add_argument(\"-x\", \"--ext-servo-driver\", help=\"Use external servo motor driver board.\", action=\"store_true\")\n motion_gr.add_argument(\"--sd-channels\", help=\"Number of external servo driver's channels. Default value is 16.\", action=\"store\", default=16, type=int)\n\n robotic_arm_gr = ap.add_argument_group('Robotic Arm')\n robotic_arm_gr.add_argument(\"--arm-name\", help=\"One of the robotic arm names those are defined in config.json file. The arm is for relocating the 2 axis target locking system hybrid-synchronously.\", default=\"Senior\", type=str, metavar=('ARM',))\n\n lock_sys_gr = motion_gr.add_argument_group('Target Locking System')\n lock_sys_gr.add_argument(\"--ls-gpios\", help=\"GPIO pin numbers of the 2 axis target locking system's servo motors. 23(as pan) and 24(as tilt) GPIO pins are default.\", nargs=2, default=[23, 24], type=int, metavar=('PAN', 'TILT'))\n lock_sys_gr.add_argument(\"--ls-channels\", help=\"Servo driver channels of the 2 axis target locking system's servo motors. 4(as pan) and 3(as tilt) channels are default.\", nargs=2, default=[4, 3], type=int, metavar=('PAN', 'TILT'))\n lock_sys_usage_gr = lock_sys_gr.add_mutually_exclusive_group()\n lock_sys_usage_gr.add_argument(\"--AI\", help=\"Specify the learning method of how to move to the target position from the current. When the nothing chosen, learn mode and decision mechanisms will be deprecated. to use: either `official_ai`\", action=\"store\", type=str, default=None)\n lock_sys_usage_gr.add_argument(\"--non-moving-target\", help=\"Track the non-moving objects. Don't use AI or OpenCv's object detection methods. Just try to stay focused on the current focus point with changing axis angles by own position.\", action=\"store_true\")\n lock_sys_usage_gr.add_argument(\"--arm-expansion\", help=\"Use the Target Locking System as the extension of the Robotic Arm. Don't use AI or OpenCv's object detection methods. Add 2 more joints to the Robotic Arm\", action=\"store_true\")\n\n access_p_gr = ap.add_argument_group('Access Point Options')\n access_p_gr.add_argument(\"-p\", \"--access-point\", help=\"Become access point for serving remote UI inside the internal network.\", action=\"store_true\")\n access_p_gr.add_argument(\"--ap-wlan\", help=\"Network interface that will be used to create HotSpot. 'wlan0' is default.\", action=\"store\", default=\"wlan0\", type=str)\n access_p_gr.add_argument(\"--ap-inet\", help=\"Forwarding interface. Default is None.\", action=\"store\", default=None, type=str)\n access_p_gr.add_argument(\"--ap-ip\", help=\"Ip address of this machine in new network. 192.168.45.1 is default.\", action=\"store\", default=\"192.168.45.1\", type=str)\n access_p_gr.add_argument(\"--ap-netmask\", help=\"Access Point netmask address. 255.255.255.0 is default.\", action=\"store\", default=\"255.255.255.0\", type=str)\n access_p_gr.add_argument(\"--ssid\", help=\"Preferred access point name. 'T_System' is default.\", action=\"store\", default=\"T_System\", type=str)\n access_p_gr.add_argument(\"--password\", help=\"Password of the access point. 't_system' is default.\", action=\"store\", default=\"t_system\", type=str)\n\n ext_network_gr = ap.add_argument_group('External Network Options')\n ext_network_gr.add_argument(\"--wlan\", help=\"network interface that will be used to connect to external network. 'wlan0' is default.\", action=\"store\", default=\"wlan0\", type=str)\n ext_network_gr.add_argument(\"--inet\", help=\"Forwarding interface. Default is None.\", action=\"store\", default=None, type=str)\n ext_network_gr.add_argument(\"--static-ip\", help=\"The static IP address for the connected external network, if wanted. \", action=\"store\", type=str)\n ext_network_gr.add_argument(\"--netmask\", help=\"Netmask address. 255.255.255.0 is default.\", action=\"store\", default=\"255.255.255.0\", type=str)\n ext_network_gr.add_argument(\"--country-code\", help=\"Wifi country code for the wpa_supplicant.conf. To use look at: https://github.com/recalbox/recalbox-os/wiki/Wifi-country-code-(EN). Default is `TR`\", action=\"store\", default=\"TR\", type=str)\n\n other_gr = ap.add_argument_group('Others')\n other_gr.add_argument(\"--environment\", help=\"The running environment. It specify the configuration files and logs. To use: either `production`, `development` or `testing`. Default is production\", action=\"store\", type=str, choices=[\"production\", \"development\", \"testing\"], default=\"development\")\n other_gr.add_argument(\"--no-emotion\", help=\"Do not mak feelings with using motion mechanisms.(Arm and Locking System.)\", action=\"store_true\")\n other_gr.add_argument(\"-S\", \"--show-stream\", help=\"Display the camera stream. Enable the stream window.(Require gui environment.)\", action=\"store_true\")\n other_gr.add_argument(\"-m\", \"--found-object-mark\", help=\"Specify the mark type of the found object. To use: either `single_rect`, `rotating_arcs`, `partial_rect` or None. Default is `single_rect`\", action=\"store\", choices=[\"single_rect\", \"rotating_arcs\", \"partial_rect\", \"animation_1\", None], default=\"single_rect\", type=str)\n other_gr.add_argument(\"-r\", \"--record\", help=\"Record the video stream. Files are named by the date.\", action=\"store_true\")\n other_gr.add_argument(\"-v\", \"--verbose\", help=\"Print various debugging logs to console for debug problems\", action=\"store_true\")\n other_gr.add_argument(\"--version\", help=\"Display the version number of T_System.\", action=\"store_true\")\n\n sub_p = ap.add_subparsers(dest=\"sub_jobs\", help='officiate the sub-jobs') # if sub-commands not used their arguments create raise.\n\n ap_id = sub_p.add_parser('id', help='Make identification jobs of T_System.')\n id_sub_p = ap_id.add_subparsers(dest=\"id_sub_jobs\", help='officiate the identification sub-jobs') # if sub-commands not used their arguments create raise.\n\n ap_id_set = id_sub_p.add_parser('set', help='Setting the identity of T_System for detecting specific working device of it.')\n ap_id_set.add_argument('--public_id', help='Specific and unique ID of T_System.', type=str)\n ap_id_set.add_argument('--private_id', help='Specific and unique ID of T_System.', type=str)\n ap_id_set.add_argument('--name', help='Specific name for T_System.', type=str)\n\n ap_id_show = id_sub_p.add_parser('show', help='Getting the identity info of T_System.')\n\n ap_r_ui_auth = sub_p.add_parser('remote-ui-authentication', help='Remote UI administrator authority settings of the secret entry point that is the new network connection panel.')\n ap_r_ui_auth.add_argument('--ssid', help='Secret administrator ssid flag', type=str)\n ap_r_ui_auth.add_argument('--password', help='Secret administrator password flag', type=str)\n\n ap_face_encode = sub_p.add_parser('encode-face', help='Generate encoded data from the dataset folder to recognize the man T_System is monitoring during operation.')\n ap_face_encode.add_argument(\"-i\", \"--dataset\", help=\"Path to input directory of faces + images.\", required=True)\n ap_face_encode.add_argument(\"-n\", \"--owner-name\", help=\"Name of the images owner. If there is single man who has the images, give the name of that man with dataset\", type=str, default=None)\n ap_face_encode.add_argument(\"-d\", \"--detection-method\", help=\"Face detection model to use: either `hog` or `cnn` default is `hog`\", type=str, default=\"hog\")\n\n ap_self_update = sub_p.add_parser('self-update', help='Update source code of t_system itself via `git pull` command from the remote git repo.')\n ap_self_update.add_argument(\"-e\", \"--editable\", help=\"Update the T_System in editable mode (i.e. setuptools'develop mode')\", action=\"store_true\")\n\n ap_arm = sub_p.add_parser('arm', help='Management jobs of Denavit-Hartenberg transform matrix models of robotic arms of T_System.')\n arm_sub_p = ap_arm.add_subparsers(dest=\"arm_sub_jobs\", help='officiate the identification sub-jobs') # if sub-commands not used their arguments create raise.\n\n ap_arm_create = arm_sub_p.add_parser('create', help='Create the D-H transform matrix model of given robotic arm name via configuration file.')\n ap_arm_create.add_argument('--name', help='The name of robotic arm in arm module\\'s config.json file.', type=str, required=True)\n\n ap_arm_list = arm_sub_p.add_parser('list', help='List the robotic arms with their model and features')\n ap_arm_list.add_argument('--name', help='The name of robotic arm in arm module\\'s config.json file.', type=str, default=None)\n\n ap_live_st = sub_p.add_parser('live-stream', help='Make Online Stream jobs of T_System.')\n live_st_sub_p = ap_live_st.add_subparsers(dest=\"live_st_sub_jobs\", help='officiate the Online Stream sub-jobs') # if sub-commands not used their arguments create raise.\n\n ap_live_st_website = live_st_sub_p.add_parser('website', help='Make jobs about Live Streaming available websites.')\n l_s_website_sub_p = ap_live_st_website.add_subparsers(dest=\"live_st_website_sub_jobs\", help='officiate the Online Stream\\'s sub-jobs about its websites') # if sub-commands not used their arguments create raise.\n\n ap_l_s_website_upsert = l_s_website_sub_p.add_parser('upsert', help='Insert new website for the live streaming point. If name of given website is exist, update its other parameters.')\n ap_l_s_website_upsert.add_argument('--name', help='Name of the website.', type=str, required=True)\n ap_l_s_website_upsert.add_argument('--url', help='Active Internet Link of the website.', type=str, required=True)\n ap_l_s_website_upsert.add_argument('--server', help='Server rtmp Link of the website.', type=str, required=True)\n\n ap_l_s_website_remove = l_s_website_sub_p.add_parser('remove', help='Remove existing online websites by their name.')\n ap_l_s_website_remove.add_argument('--website-ids', help='ID list of websites that will remove.', type=list, required=True)\n\n ap_l_s_website_list = l_s_website_sub_p.add_parser('list', help='List the existing websites.')\n\n ap_live_st_streaming = live_st_sub_p.add_parser('streaming', help='Make jobs about online streaming runtime specifications and parameters')\n l_s_streaming_sub_p = ap_live_st_streaming.add_subparsers(dest=\"live_st_streaming_sub_jobs\", help='officiate the Online Stream\\' sub-jobs about its streaming specifications') # if sub-commands not used their arguments create raise.\n\n ap_l_s_streaming_upsert = l_s_streaming_sub_p.add_parser('upsert', help='Insert new stream ID for the live streaming point. If name of given ID is exist, update its other parameters.')\n ap_l_s_streaming_upsert.add_argument('--website-id', help='ID of the website that has stream IDs.', type=str, required=True)\n ap_l_s_streaming_upsert.add_argument('--account-name', help='Name of the website personalized account.', type=str, required=True)\n ap_l_s_streaming_upsert.add_argument('--key', help='Stream key of the account', type=str, required=True)\n\n ap_l_s_streaming_remove = l_s_streaming_sub_p.add_parser('remove', help='Remove a stream ID for the live streaming point.')\n ap_l_s_streaming_remove.add_argument('--website-id', help='ID of the website that has stream IDs.', type=str, required=True)\n ap_l_s_streaming_remove.add_argument('--account-name', help='Name of the personalized account of a website that will be removed.', type=str, required=True)\n\n ap_l_s_streaming_list = l_s_streaming_sub_p.add_parser('list', help='List the existing websites.')\n\n ap_r_sync = sub_p.add_parser('r-sync', help='Make remote synchronization jobs of T_System.')\n ap_r_sync.add_argument('--list-services', help='List the remote storage services information.', action=\"store_true\")\n\n r_sync_sub_p = ap_r_sync.add_subparsers(dest=\"r_sync_sub_jobs\", help='officiate the Remote Synchronization sub-jobs')\n\n ap_r_sync_sync = r_sync_sub_p.add_parser('sync', help='Make jobs about synchronization recorded videos folder with remote storage service.')\n ap_r_sync_sync.add_argument('--service-name', help='Name of the remote storage service that has accounts. to use: either, `Dropbox`', type=str, choices=[\"Dropbox\"], required=True)\n ap_r_sync_sync.add_argument('--name', help='Name of the personalized account of remote storage service.', type=str, required=True)\n\n ap_r_snc_acc = r_sync_sub_p.add_parser('account', help='Make jobs about remote storage service accounts.')\n r_sync_acc_sub_p = ap_r_snc_acc.add_subparsers(dest=\"r_sync_account_sub_jobs\", help='officiate the remote storage synchronization\\'s sub-jobs about its account specifications')\n\n ap_r_sync_acc_upsert = r_sync_acc_sub_p.add_parser('upsert', help='Insert new account for specified remote storage service. If name of given account is exist, update its other parameters.')\n ap_r_sync_acc_upsert.add_argument('--service-name', help='Name of the remote storage service that has accounts. to use: either, `Dropbox`', type=str, choices=[\"Dropbox\"], required=True)\n ap_r_sync_acc_upsert.add_argument('--name', help='Name of the personalized account of remote storage service.', type=str, required=True)\n ap_r_sync_acc_upsert.add_argument('--key', help='Stream key of the account', type=str, required=True)\n\n ap_r_sync_acc_remove = r_sync_acc_sub_p.add_parser('remove', help='Remove existing account about storage services by their name.')\n ap_r_sync_acc_remove.add_argument('--service-name', help='Name of the remote storage service that has accounts. to use: either, `Dropbox`', type=str, choices=[\"Dropbox\"], required=True)\n ap_r_sync_acc_remove.add_argument('--name', help='Name of the personalized account of remote storage service.', type=str, required=True)\n\n ap_r_sync_acc_list = r_sync_acc_sub_p.add_parser('list', help='List the existing remote storage services.')\n\n ap_log = sub_p.add_parser('log', help='Make logging jobs of T_System.')\n app_log_gr = ap_log.add_mutually_exclusive_group()\n app_log_gr.add_argument('--show', help='Show the contents of the `logfile.log` file of T_System', action=\"store_true\")\n app_log_gr.add_argument('--clear', help='Clear the contents of the `logfile.log` file of T_System', action=\"store_true\")\n\n args = vars(ap.parse_args())\n\n if args[\"version\"]:\n from t_system.presentation import versions_banner\n versions_banner()\n sys.exit(1)\n\n prepare(args)\n start(args)", "title": "" }, { "docid": "dad127a8d45c95b392b5628a5ca50002", "score": "0.52849996", "text": "def parseArgs():\r\n aparser = argparse.ArgumentParser(description='Script demonstrates breaking of simple ciphers: Caesar, Substitution cipher, and OTP.', formatter_class = argparse.RawTextHelpFormatter) \r\n aparser.add_argument('--port', required=True, metavar='PORT', help='Port of challenge/response server.')\r\n aparser.add_argument('--ip', required=True, metavar='PORT', help='Port of challenge/response server.')\r\n aparser.add_argument(\"--mode\", required=True, choices = ['p', 'c', 's', 'o'], help=\"p => demonstrates hexadecimal encoding challenge.\\\r\n \\nc => demonstrates breaking of the Caesar cipher.\\\r\n \\ns => demonstrates breaking of the Substitution cipher.\\\r\n \\no => demonstrates breaking of the OTP cipher.\")\r\n args = aparser.parse_args()\r\n \r\n return args", "title": "" }, { "docid": "bd5f139d36be5cbd4f5cd355bf967ada", "score": "0.5277702", "text": "def cli():\n\n pass", "title": "" }, { "docid": "48ac2f0676a59a9e31a1cdc0718d40fa", "score": "0.52628934", "text": "def add_arguments_to_parser(parser, stdscr):\n\n parser.add_argument(\n \"-hn\",\n \"--hostname\",\n type=str,\n # required=True,\n default=\"127.0.0.1\",\n help=\"The host name for the minoTour server.\",\n dest=\"host_name\",\n )\n\n parser.add_argument(\n \"-p\",\n \"--port\",\n type=int,\n # required=True,\n default=80,\n help=\"The port number for the minoTour server.\",\n dest=\"port_number\",\n )\n\n parser.add_argument(\n \"-k\",\n \"--key\",\n type=str,\n required=True,\n default=None,\n help=\"The api key for uploading data.\",\n dest=\"api_key\",\n )\n\n parser.add_argument(\n \"-w\",\n \"--watch-dir\",\n type=str,\n # required=True,\n default=None,\n help=\"The path to the folder containing the downloads directory with fast5 reads to analyse - e.g. C:\\\\data\\\\minion\\\\downloads (for windows).\",\n dest=\"watch_dir\",\n )\n\n parser.add_argument(\n \"-i\",\n \"--ignore_existing\",\n action=\"store_true\",\n required=False,\n default=False,\n help=\"The client will ignore previously existing fastq files and will only monitor newly created files..\",\n dest=\"ignore_existing\",\n )\n\n parser.add_argument(\n \"-s\",\n \"--skip_sequence\",\n action=\"store_true\",\n required=False,\n help=\"If selected only read metrics, not sequence, will be uploaded to the databse.\",\n dest=\"skip_sequence\",\n )\n\n parser.add_argument(\n \"-nf\",\n \"--no_fastq\",\n action=\"store_true\",\n help=\"Run minFQ without monitoring fastq files.\",\n default=False,\n dest=\"no_fastq\",\n )\n\n parser.add_argument(\n \"-nm\",\n \"--no_minKNOW\",\n action=\"store_true\",\n help=\"Run minFQ without monitoring minKNOW for live activity.\",\n default=False,\n dest=\"no_minknow\",\n )\n\n parser.add_argument(\n \"-rc\",\n \"--remote_control\",\n action=\"store_true\",\n default=False,\n help=\"This option allows your runs to be remotely started and stopped and for runs to be remotely renamed. As standard this is not enbabled.\",\n dest=\"enable_remote\",\n )\n\n parser.add_argument(\n \"-ip\",\n \"--ip-address\",\n type=str,\n dest=\"ip\",\n required=False,\n default=\"127.0.0.1\",\n help=\"The IP address of the minKNOW machine - Typically 127.0.0.1.\",\n )\n\n parser.add_argument(\n \"-n\",\n \"--name\",\n type=str,\n default=None,\n help=\"This provides a backup name for a flowcell. MinoTour will use the run names and flowcell ids it finds in reads or from minKNOW if available.\",\n dest=\"run_name_manual\",\n )\n\n parser.add_argument(\n \"--unique\",\n action=\"store_true\",\n default=True,\n help=\"If you are flushing a flowcell, this option will force the flowcell to be named as a combination of flowcell ID and sample name. Thus data will be grouped appropriately. Default true.\",\n dest=\"force_unique\",\n )\n\n parser.add_argument(\n \"-f\",\n \"--is_flowcell\",\n action=\"store_true\",\n help=\"If you add this flag, all runs added here will be considered as a single flow cell with the name set by the name flag.\",\n dest=\"is_flowcell\",\n )\n\n parser.add_argument(\n \"-tc\",\n \"--treatment-control\",\n type=int,\n required=False,\n default=None,\n help=\"Optionally split reads based in treatment and control groups based on the channel number. The integer value informed is used to mover ish read to the control group.\",\n dest=\"treatment_control\",\n )\n\n parser.add_argument(\n \"-j\",\n \"--job\",\n type=int,\n # required=True,\n default=None,\n help=\"An optional minotour job to run on your server. Please enter the ID shown on the side when running --list.\",\n dest=\"job\",\n )\n\n parser.add_argument(\n \"-r\",\n \"--reference\",\n type=int,\n # required=True,\n default=None,\n help=\"An optional minotour reference to map against. please enter the numerical id shown when running --list\",\n dest=\"reference\",\n )\n\n parser.add_argument(\n \"--list\",\n action=\"store_true\",\n required=False,\n help=\"List available tasks, target sets and references at this server.\",\n dest=\"list\",\n )\n\n parser.add_argument(\n \"-ts\",\n \"--targets\",\n type=int,\n default=None,\n help=\"Set the target set for the metagenomics, if desired. Please enter the numerical id shown when running --list\",\n dest=\"targets\",\n )\n\n parser.add_argument(\n \"-v\",\n \"--verbose\",\n action=\"store_true\",\n help=\"Don't clear the screen. Helps when debugging.\",\n default=False,\n dest=\"verbose\",\n )\n\n parser.add_argument(\n \"-ll\",\n \"--loglevel\",\n type=str,\n default=\"INFO\",\n choices=[\"DEBUG\", \"INFO\", \"WARNING\", \"ERROR\", \"CRITICAL\"],\n help=\"Set the logging level, default INFO.\",\n dest=\"loglevel\",\n )\n\n parser.add_argument(\n \"-V\", \"--version\", action=\"version\", version=test(stdscr),\n )\n\n parser.add_argument(\n \"-T\",\n \"--toml\",\n default=None,\n required=False,\n help=\"Path to the channels configuration file for a read until experiment.\",\n dest=\"toml\",\n )\n parser.add_argument(\n \"-U\",\n \"--unblocks\",\n default=None,\n required=False,\n help=\"Absolute path to an unblocked read_ids text file. Not necessary, should be picked up automatically.\",\n dest=\"unblocks\",\n )\n parser.add_argument(\n \"-ps\",\n \"--primer-scheme\",\n type=int,\n default=None,\n required=False,\n help=\"Set the primer scheme to use for artic tasks. Valid options can be seen using --list.\",\n dest=\"primer_scheme\"\n )\n return parser", "title": "" }, { "docid": "2b0c0161e5bb83b7b3bee5bd0d3b9d7d", "score": "0.52548534", "text": "def parse_command_line(self, args=..., final=...):\n ...", "title": "" }, { "docid": "63338567934f9d6a77ffd0bd11eae4c0", "score": "0.5238345", "text": "def main():\n\n # determine parameter values and command path\n opts = parse_args()\n\n # install a signal handler to catch ^c\n def cancel(*args):\n p.stdin.write(\"CANCEL\\n\".encode('utf-8'))\n p.stdin.flush()\n signal.signal(signal.SIGINT, cancel)\n\n # launch the back-end of the console-runner and pipe in our params\n prefix = getattr(sys, 'real_prefix', getattr(sys, 'base_prefix', None))\n suffix = '/bin/python' + '3' if sys.version_info >= (3,) else ''\n python_cmd = prefix+suffix if prefix else sys.executable\n console_py = join(opts.site, 'plotdevice/run/console.py')\n\n p = Popen([python_cmd, console_py], stdin=PIPE)\n opts = (json.dumps(vars(opts))+\"\\n\").encode('utf-8')\n p.stdin.write(opts)\n p.stdin.flush()\n p.wait()", "title": "" }, { "docid": "f6ea7d30f01b7c92c7a47520c064b95b", "score": "0.5233841", "text": "def commandline():\n\n description = 'Cool Command-line Cheatsheets'\n help_general = 'The cheatsheet you want to see'\n help_list = 'List all available cheatsheets'\n help_colors = 'Print output without colors'\n help_inline = 'One cheat per line, this is the default'\n help_breakline = 'Break lines'\n\n argumentparser = ArgumentParser(description=description)\n printertype = argumentparser.add_mutually_exclusive_group()\n\n argumentparser.add_argument('--list', dest='listcheats', action=\"store_true\", required=False, help=help_list)\n argumentparser.add_argument('--nc', dest='nocolor', action=\"store_false\", required=False, help=help_colors)\n argumentparser.add_argument('cheatsheet', nargs='?', help=help_general)\n\n printertype.set_defaults(printer='InlinePrinter')\n printertype.add_argument('-l', help=help_inline, action='store_const', dest='printer', const='InlinePrinter')\n printertype.add_argument('-b', help=help_breakline, action='store_const', dest='printer', const='BreaklinePrinter')\n\n return argumentparser", "title": "" }, { "docid": "bc741876317a1aeb116b917d6ab51b2a", "score": "0.52337253", "text": "def test_cli_extra_param_tcp():\n cmd = [\n \"python\",\n \"dnsck/dnsck.py\",\n \"-s\",\n \"2001:4860:4802:34::a\",\n \"google.com\",\n \"--tcp\",\n \"-y\",\n ]\n process = subprocess.run(cmd, shell=False, check=False)\n assert process.returncode == 2", "title": "" }, { "docid": "b2424d05ae7492d3f02aeca04792f4fb", "score": "0.52309966", "text": "def cli():\n pass", "title": "" }, { "docid": "b2424d05ae7492d3f02aeca04792f4fb", "score": "0.52309966", "text": "def cli():\n pass", "title": "" }, { "docid": "b2424d05ae7492d3f02aeca04792f4fb", "score": "0.52309966", "text": "def cli():\n pass", "title": "" }, { "docid": "b2424d05ae7492d3f02aeca04792f4fb", "score": "0.52309966", "text": "def cli():\n pass", "title": "" }, { "docid": "b2424d05ae7492d3f02aeca04792f4fb", "score": "0.52309966", "text": "def cli():\n pass", "title": "" }, { "docid": "b2424d05ae7492d3f02aeca04792f4fb", "score": "0.52309966", "text": "def cli():\n pass", "title": "" }, { "docid": "b2424d05ae7492d3f02aeca04792f4fb", "score": "0.52309966", "text": "def cli():\n pass", "title": "" }, { "docid": "b2424d05ae7492d3f02aeca04792f4fb", "score": "0.52309966", "text": "def cli():\n pass", "title": "" }, { "docid": "b2424d05ae7492d3f02aeca04792f4fb", "score": "0.52309966", "text": "def cli():\n pass", "title": "" }, { "docid": "b2424d05ae7492d3f02aeca04792f4fb", "score": "0.52309966", "text": "def cli():\n pass", "title": "" }, { "docid": "b2424d05ae7492d3f02aeca04792f4fb", "score": "0.52309966", "text": "def cli():\n pass", "title": "" }, { "docid": "b2424d05ae7492d3f02aeca04792f4fb", "score": "0.52309966", "text": "def cli():\n pass", "title": "" }, { "docid": "b2424d05ae7492d3f02aeca04792f4fb", "score": "0.52309966", "text": "def cli():\n pass", "title": "" }, { "docid": "b2424d05ae7492d3f02aeca04792f4fb", "score": "0.52309966", "text": "def cli():\n pass", "title": "" }, { "docid": "b2424d05ae7492d3f02aeca04792f4fb", "score": "0.52309966", "text": "def cli():\n pass", "title": "" }, { "docid": "b2424d05ae7492d3f02aeca04792f4fb", "score": "0.52309966", "text": "def cli():\n pass", "title": "" }, { "docid": "b2424d05ae7492d3f02aeca04792f4fb", "score": "0.52309966", "text": "def cli():\n pass", "title": "" }, { "docid": "b2424d05ae7492d3f02aeca04792f4fb", "score": "0.52309966", "text": "def cli():\n pass", "title": "" }, { "docid": "b2424d05ae7492d3f02aeca04792f4fb", "score": "0.52309966", "text": "def cli():\n pass", "title": "" } ]
92d841dcafd34df3242b964e23640f53
Log and email warning about any acquisition stars with observed positions outside the expected search box (plus a pad). For acquisition stars with tracked positions in the wrong search box, note the box (classic acquisition anomaly).
[ { "docid": "300ea322480477cd031f996f103b2e96", "score": "0.6799472", "text": "def warn_on_acq_anom(acqs, emails):\n # Find tracked objects in the acq stats table outside the intended search box plus padding\n\n # Note that dy/dz are observed yag/zag (t_guide) - predicted yag/zag (t_guide) using AOATTQT\n # (estimated attitude). Observed yag/zag are from AOAC{Y,Z}AN, and t_guide is the time of the\n # first sample with AOACASEQ = 'GUID'. t_guide is the same as manvrs.guide_start in kadi.\n # The one-shot attitude update occurs in telemetry on the sample after the GUID transition,\n # so the estimated attitude for dy/dz gives a reasonable approximation of the OBC estimated\n # attitude at the time of commanding the search boxes. (It would be more accurate to use the\n # time of transition to acquisition, but this approximation is at least closer than using\n # catalog yag/zag.)\n box_pad = 16 # arcsecs\n anom_match = ((acqs['img_func'] != 'NONE') & (acqs['img_func'] != 'SRCH') &\n ((np.abs(acqs['dy']) >= (acqs['halfw'] + box_pad))\n | (np.abs(acqs['dz']) >= (acqs['halfw'] + box_pad))))\n for anom in acqs[anom_match]:\n # Check to see if the star is actually found in another box.\n other_box_match = ((np.abs(anom['yang_obs'] - acqs['yang']) <= (acqs['halfw'] + box_pad))\n & (np.abs(anom['zang_obs'] - acqs['zang']) <= (acqs['halfw'] + box_pad)))\n if np.any(other_box_match):\n text = \"Acquisition Anomaly. Star for slot {} actually in box {} \\n\".format(\n anom['slot'], acqs[other_box_match][0]['slot'])\n else:\n text = \"Does not appear to be classic star-in-wrong-box anomaly\\n\"\n # Make a dictionary of the anom record for use in string formatting\n output_dict = {col: anom[col] for col in anom.colnames}\n output_dict['dy'] = anom['dy']\n output_dict['dz'] = anom['dz']\n text += \"\"\"Large Deviation from Expected ACQ Star Position in {obsid}\n Slot {slot} Expected (Y-Pos, Z-Pos) = ({yang:.1f}, {zang:.1f})\n Slot {slot} Observed (Y-Pos, Z-Pos) = ({yang_obs:.1f}, {zang_obs:.1f})\n Halfwidth {halfw:03d} (dy, dz) = ({dy:.1f}, {dz:.1f})\n\n Expected here is catalog Y-Pos/Z-pos. dy, dz calculation corrects these for estimated attitude.\n\"\"\".format(\n **output_dict)\n # Log and Send message for slot. Obsid can have more than one email\n logger.warning(text)\n msg = MIMEText(text)\n msg['From'] = 'aca@head.cfa.harvard.edu'\n msg['Subject'] = \"Acq Anomaly: Obsid {} (mica processing)\".format(anom['obsid'])\n msg['To'] = \",\".join(emails)\n s = smtplib.SMTP('head.cfa.harvard.edu')\n s.sendmail('aca@head.cfa.harvard.edu', emails, msg.as_string())\n s.quit()", "title": "" } ]
[ { "docid": "ce3f1e64473ba2120407263f5189104a", "score": "0.5493461", "text": "def test_pos_err_on_guide():\n stars = StarsTable.empty()\n stars.add_fake_star(id=100, yang=100, zang=-200, POS_ERR=2010, mag=8.0)\n stars.add_fake_star(id=101, yang=0, zang=500, mag=8.0, POS_ERR=1260) # Just over warning\n stars.add_fake_star(id=102, yang=-200, zang=500, mag=8.0, POS_ERR=1240) # Just under warning\n stars.add_fake_star(id=103, yang=500, zang=500, mag=8.0, POS_ERR=1260) # Not selected\n\n aca = get_aca_catalog(**mod_std_info(n_fid=0), stars=stars, dark=DARK40, raise_exc=True,\n include_ids_guide=[100, 101]) # Must force 100, 101, pos_err too big\n\n aca = ACAReviewTable(aca)\n\n # 103 not selected because pos_err > 1.25 arcsec\n assert aca.guides['id'].tolist() == [100, 101, 102]\n\n # Run pos err checks\n for guide in aca.guides:\n aca.check_pos_err_guide(guide)\n\n assert len(aca.messages) == 2\n msg = aca.messages[0]\n assert msg['category'] == 'critical'\n assert 'Guide star 100 POS_ERR 2.01' in msg['text']\n\n msg = aca.messages[1]\n assert msg['category'] == 'warning'\n assert 'Guide star 101 POS_ERR 1.26' in msg['text']", "title": "" }, { "docid": "a46aa709be82c25ba3c149f06f760a15", "score": "0.5237497", "text": "def test_n_guide_check_not_enough_stars():\n\n stars = StarsTable.empty()\n stars.add_fake_constellation(n_stars=4, mag=8.5)\n aca = get_aca_catalog(**mod_std_info(n_fid=3, n_guide=5, obsid=5000),\n stars=stars, dark=DARK40,\n raise_exc=True)\n acar = ACAReviewTable(aca)\n acar.check_guide_count()\n assert acar.messages == [\n {'text': 'OR with 4 guides but 5 were requested',\n 'category': 'caution'}]", "title": "" }, { "docid": "412bf02bd75d08ca5e1aea03d9c9ad12", "score": "0.522365", "text": "def test_imposters_on_guide():\n stars = StarsTable.empty()\n # Add two stars because separate P2 tests seem to break with just one star\n stars.add_fake_constellation(n_stars=5, mag=9.5)\n mag = 8.0\n cnt = mag_to_count_rate(mag)\n stars.add_fake_star(id=110, row=100, col=-200, mag=mag)\n dark_with_badpix = DARK40.copy()\n dark_with_badpix[100 + 512, -200 + 512] = cnt * 0.1\n dark_with_badpix[100 + 512, -201 + 512] = cnt * 0.1\n aca = get_aca_catalog(**mod_std_info(n_fid=0, n_guide=8), stars=stars, dark=dark_with_badpix,\n raise_exc=True)\n aca = ACAReviewTable(aca)\n aca.check_imposters_guide(aca.guides.get_id(110))\n assert len(aca.messages) == 1\n msg = aca.messages[0]\n assert msg['category'] == 'warning'\n assert 'Guide star imposter offset' in msg['text']", "title": "" }, { "docid": "e0803e6ca1ceb86058e59f5ff503a4e9", "score": "0.510648", "text": "def message_f(self):\r\n\t\tprint(\"Entered position is too big.\")", "title": "" }, { "docid": "2ee6eba26fc3883b13a92aa3303a5b17", "score": "0.50645804", "text": "def test_missing(self):\n nmea._validateChecksum(GPGGA[:-2])", "title": "" }, { "docid": "6ecf4609e62918d0f365033ce9cd0abb", "score": "0.5056662", "text": "def test_too_bright_guide_magerr():\n stars = StarsTable.empty()\n # Add two stars because separate P2 tests seem to break with just one star\n stars.add_fake_star(id=100, yang=100, zang=-200, mag=6.0, mag_err=0.11, MAG_ACA_ERR=10)\n stars.add_fake_star(id=101, yang=0, zang=500, mag=8.0)\n aca = get_aca_catalog(**mod_std_info(n_fid=0), stars=stars, dark=DARK40, raise_exc=True)\n aca = ACAReviewTable(aca)\n aca.check_too_bright_guide(aca.guides.get_id(100))\n msg = aca.messages[0]\n assert msg['category'] == 'critical'\n assert '2*mag_err of 5.8' in msg['text']", "title": "" }, { "docid": "dea38f1ee66db0d67da85ea4f8061a02", "score": "0.504578", "text": "def weak_editing_site_detection(rna_orf_alignment, q, sample_id):\n alignment = rna_orf_alignment\n wes = []\n counter = 0\n total = len(ubs)\n\n for i in ubs:\n coverage = alignment.count_coverage(contig=i[0],start=i[1],stop=i[1]+1,\n quality_threshold=30)\n temp = np.array([0,0,0,0,0])\n\n for j in range(4):\n temp[j] = coverage[j][0]\n temp[4] = temp[0] + temp[1] + temp[2] + temp[3]\n if temp[4] > 1:\n if i[2] == 'A': num_mismatch = temp[4] - temp[0]\n if i[2] == 'C': num_mismatch = temp[4] - temp[1]\n if i[2] == 'G': num_mismatch = temp[4] - temp[2]\n if i[2] == 'T': num_mismatch = temp[4] - temp[3]\n\n if num_mismatch > 0:\n p_binom = stats.binom_test(num_mismatch, temp[4], p=0.001,\n alternative='greater')\n if p_binom < 0.05:\n wes.append([i[0], i[1], i[2],\n temp[0], temp[1],\n temp[2], temp[3],\n temp[4], p_binom])\n\n counter += 1\n if counter % round(total/100) == 0:\n print(\"wes sample %d progress: %d%s\" % (sample_id, round((counter/total)*100), '%'))\n\n wes_pval = [i[-1] for i in wes]\n multitest_results = multipletests(wes_pval,alpha=0.1,method='fdr_bh')\n\n for i, j in zip(multitest_results[1], range(len(wes))):\n wes[j][-1] = i\n\n for i in wes:\n if i[-1] < 0.05: q.put(i)\n\n q.put(None) # used for indicating items in q are all removed\n q.close()", "title": "" }, { "docid": "61d5f6052d3bf6b13bf31a52bb17c968", "score": "0.50357956", "text": "def rail_feedback(self):\n count = 0\n direction = 1\n self.cam.start()\n while(True):\n x = 0\n QRList = []\n while (x < 3):\n x = x + 1\n ret = None\n partial_list = self.cam.partial_qr_scan()\n ret = self.cam.partial_qr_select(partial_list)\n if ret != None:\n x_disp = ret.tvec[0]\n if abs(x_disp) < .125:\n print \"QRCode found at x_disp: \", x_disp\n self.cam.stop()\n return ret\n else:\n QRList.append(ret)\n print \"Checking Alignment with x_disp = \", x_disp\n print \"countx = \", x\n \n targetQR = self.cam.selectQR(QRList)\n if targetQR != None:\n rail_ret = self.rail.DisplacementConverter(targetQR.tvec[0])\n if rail_ret == 0:\n #out of range, reset to middle and try again\n self.rail.MoveToPosition(3500)\n else: # if no qrcodes are found\n limit = self.rail.DisplacementConverter(1.5*direction)\n if limit == 0: #out of range\n direction = -1*direction #reverse direction\n ret = self.rail.DisplacementConverter(.75*direction)\n if ret == 0:\n print \"Error: out of range on both ends, shouldn't be possible.\"", "title": "" }, { "docid": "a8e809a51eb826a5bf775543997761c5", "score": "0.5013203", "text": "def error_missed(self, position):\n return 'Ran into %r at %d but should have hit %r first.' % (\n self.track_objects[position],\n position,\n self.track_objects[self.position])", "title": "" }, { "docid": "5e2065500c3c1973721fe2f150f2c5a8", "score": "0.50021744", "text": "def handle_sonar_detection_message(self, message):\r\n self.check_turn_number(message)\r\n self.spotted.append(message)\r\n if verbose:\r\n print(\"Sub(s) spotted: \\n{0}\".format(self.spotted))", "title": "" }, { "docid": "700c47520c5f1772f66d1564ccdd372e", "score": "0.49843445", "text": "def warn_for_key_near_expiry(ctx: gpg.Context):\n fpr_to_expiry_days = get_days_until_expiry(ctx)\n\n for fpr, days_to_expiry in fpr_to_expiry_days.items():\n if days_to_expiry < 0:\n logger.warning(\n \"Found key with fingerprint {} that expired {} days ago. \"\n \"Fix now!\".format(fpr, abs(days_to_expiry))\n )\n elif 0 <= days_to_expiry <= DAYS_WARNING_FOR_KEY_EXPIRATION:\n logger.warning(\n \"Found key with fingerprint {} that expires in {} days. \"\n \"Fix ASAP!\".format(fpr, days_to_expiry)\n )", "title": "" }, { "docid": "fa765ccdfff6887dd1aedb09c0a5311e", "score": "0.49798274", "text": "def message_b(self):\r\n\t\tprint(\"Entered position is too small.\")", "title": "" }, { "docid": "8317c2b99388c4b6e69c2388cb92b208", "score": "0.49741966", "text": "def ssw_alignment(x, y, ends_discrepancy_threshold = 250 ):\n\n score_matrix = ssw.DNA_ScoreMatrix(match=1, mismatch=-20)\n aligner = ssw.Aligner(gap_open=50, gap_extend=0, matrix=score_matrix)\n\n # for the ends that SSW leaves behind\n bio_matrix = matlist.blosum62\n g_open = -1\n g_extend = -0.5\n ######################################\n\n # result = aligner.align(\"GA\", \"G\", revcomp=False)\n # y_alignment, match_line, x_alignment = result.alignment\n # c = Counter(match_line)\n # matches, mismatches, indels = c[\"|\"], c[\"*\"], c[\" \"]\n # alignment_length = len(match_line)\n # print(\"matches:{0}, mismatches:{1}, indels:{2} \".format(matches, mismatches, indels))\n # print(match_line)\n\n result = aligner.align(x, y, revcomp=False)\n y_alignment, match_line, x_alignment = result.alignment\n # print()\n # print(y_alignment)\n # print(match_line)\n # print(x_alignment)\n matches, mismatches, indels = match_line.count(\"|\"), match_line.count(\"*\"), match_line.count(\" \")\n\n # alignment_length = len(match_line)\n \n start_discrepancy = max(result.query_begin, result.reference_begin) # 0-indexed # max(result.query_begin, result.reference_begin) - min(result.query_begin, result.reference_begin)\n query_end_discrepancy = len(x) - result.query_end - 1\n ref_end_discrepancy = len(y) - result.reference_end - 1\n end_discrepancy = max(query_end_discrepancy, ref_end_discrepancy) # max(result.query_end, result.reference_end) - min(result.query_end, result.reference_end)\n # print(\"disc:\", start_discrepancy, end_discrepancy)\n tot_discrepancy = start_discrepancy + end_discrepancy\n\n if 0 < start_discrepancy <= ends_discrepancy_threshold:\n print(\"HERE\",start_discrepancy)\n matches_snippet = 0\n mismatches_snippet = 0\n if result.query_begin and result.reference_begin:\n query_start_snippet = x[:result.query_begin]\n ref_start_snippet = y[:result.reference_begin]\n alns = pairwise2.align.globalds(query_start_snippet, ref_start_snippet, bio_matrix, g_open, g_extend)\n top_aln = alns[0]\n # print(alns)\n mismatches_snippet = len(list(filter(lambda x: x[0] != x[1] and x[0] != '-' and x[1] != \"-\", zip(top_aln[0],top_aln[1]))))\n indels_snippet = top_aln[0].count(\"-\") + top_aln[1].count(\"-\")\n matches_snippet = len(top_aln[0]) - mismatches_snippet - indels_snippet\n # print(matches_snippet, mismatches_snippet, indels_snippet)\n query_start_alignment_snippet = top_aln[0]\n ref_start_alignment_snippet = top_aln[1]\n elif result.query_begin:\n query_start_alignment_snippet = x[:result.query_begin]\n ref_start_alignment_snippet = \"-\"*len(query_start_alignment_snippet)\n indels_snippet = len(ref_start_alignment_snippet)\n elif result.reference_begin:\n ref_start_alignment_snippet = y[:result.reference_begin]\n query_start_alignment_snippet = \"-\"*len(ref_start_alignment_snippet)\n indels_snippet = len(query_start_alignment_snippet)\n else:\n print(\"BUG\")\n sys.exit()\n matches, mismatches, indels = matches + matches_snippet, mismatches + mismatches_snippet, indels + indels_snippet\n\n # print(ref_start_alignment_snippet)\n # print(query_start_alignment_snippet)\n y_alignment = ref_start_alignment_snippet + y_alignment\n x_alignment = query_start_alignment_snippet + x_alignment\n\n if 0 < end_discrepancy <= ends_discrepancy_threshold:\n print(\"HERE2\", end_discrepancy)\n matches_snippet = 0\n mismatches_snippet = 0\n if query_end_discrepancy and ref_end_discrepancy:\n query_end_snippet = x[result.query_end+1:]\n ref_end_snippet = y[result.reference_end+1:]\n alns = pairwise2.align.globalds(query_end_snippet, ref_end_snippet, bio_matrix, g_open, g_extend)\n top_aln = alns[0]\n mismatches_snippet = len(list(filter(lambda x: x[0] != x[1] and x[0] != '-' and x[1] != \"-\", zip(top_aln[0],top_aln[1]))))\n indels_snippet = top_aln[0].count(\"-\") + top_aln[1].count(\"-\")\n matches_snippet = len(top_aln[0]) - mismatches_snippet - indels_snippet\n query_end_alignment_snippet = top_aln[0]\n ref_end_alignment_snippet = top_aln[1]\n elif query_end_discrepancy:\n query_end_alignment_snippet = x[result.query_end+1:]\n ref_end_alignment_snippet = \"-\"*len(query_end_alignment_snippet)\n indels_snippet = len(ref_end_alignment_snippet)\n\n elif ref_end_discrepancy:\n ref_end_alignment_snippet = y[result.reference_end+1:]\n query_end_alignment_snippet = \"-\"*len(ref_end_alignment_snippet)\n indels_snippet = len(query_end_alignment_snippet)\n\n else:\n print(\"BUG\")\n sys.exit()\n matches, mismatches, indels = matches + matches_snippet, mismatches + mismatches_snippet, indels + indels_snippet\n\n y_alignment = y_alignment + ref_end_alignment_snippet\n x_alignment = x_alignment + query_end_alignment_snippet\n\n return x_alignment, y_alignment, matches, mismatches, indels, match_line", "title": "" }, { "docid": "647efb9d6910b575b50b7b6399760d2e", "score": "0.49552074", "text": "def warning(self, msg, pos=None):\n self.log(msg, \"warning: \" + self.location(pos))", "title": "" }, { "docid": "c6feae24ad666c93b14ef15fd112ae62", "score": "0.49234995", "text": "def _warning(self, text) :\n print(\"WARNING [2D Binning]: %s\"%(text))", "title": "" }, { "docid": "b62bc5291e6aa94aa87bc3cb6d89f330", "score": "0.49161667", "text": "def test_aacgm_boundary_location_no_overwrite(self):\n log_capture = StringIO()\n ocbpy.logger.addHandler(logging.StreamHandler(log_capture))\n ocbpy.logger.setLevel(logging.WARNING)\n\n # Initialize the attributes with values for the good location\n rind = 27\n self.test_aacgm_boundary_location_good()\n\n # This should not raise a warning\n self.ocb.get_aacgm_boundary_lat(150.0, rec_ind=rind - 1)\n\n # This should raise a warning\n self.ocb.get_aacgm_boundary_lat(150.0, rec_ind=rind)\n\n self.out = log_capture.getvalue()\n # Test logging error message for only one warning about boundary update\n self.assertRegex(self.out, \"unable to update AACGM boundary\")\n\n del log_capture\n return", "title": "" }, { "docid": "4a2e7160ed5869cc8cdac03c6b7ec723", "score": "0.4908536", "text": "def diagnostic_processing(self):\n\n if self.ultrasound_data.get_side_right() < distance_limit or\\\n self.ultrasound_data.get_side_left() < distance_limit:\n self.safety_status_array.append(False)\n else:\n self.safety_status_array.append(True)\n del self.safety_status_array[0] # gets rid of old messages, keeps buffer size the same\n\n if False in self.safety_status_array and self.movement_paused is False:\n # robot has ability to move but\n self.stop_service_publisher()\n\n if False not in self.safety_status_array and self.movement_paused is True:\n self.start_service_publisher()", "title": "" }, { "docid": "96107e5d38efa41222d371b30012ccb6", "score": "0.4900087", "text": "def thresholding(times, disps, gaps, Ws, Vs, J, name, station, direction, \\\n events, locations, draw=True, draw_gaps=False, draw_events=True):\n # Computation of sigE\n filename = '../data/GeoNet/FITS-' + station + '-' + direction + '.csv'\n data = pd.read_csv(filename)\n N = data.shape[0]\n sigE2 = np.mean(np.square(data[' error (mm)']))\n # Thresholding of MODWT wavelet coefficients\n dispt = []\n ymin = []\n ymax = []\n for i in range(0, len(times)):\n time = times[i]\n disp = disps[i]\n N = len(time)\n W = Ws[i]\n V = Vs[i]\n Wt = []\n for j in range(1, J + 1):\n Wj = W[j - 1]\n deltaj = sqrt(2.0 * sigE2 * log(N) / (2.0 ** j))\n Wjt = np.where(np.abs(Wj) >= deltaj, Wj, 0.0)\n if (j == J):\n Vt = np.where(np.abs(V) >= deltaj, V, 0.0)\n Wt.append(Wjt)\n Xt = MODWT.inv_pyramid(Wt, Vt, name, J)\n maxy = max(np.max(disp), np.max(Xt))\n miny = min(np.min(disp), np.min(Xt))\n dispt.append(Xt)\n ymax.append(maxy)\n ymin.append(miny)\n\n # Initialize figure\n if (draw == True):\n params = {'xtick.labelsize':24,\n 'ytick.labelsize':24}\n pylab.rcParams.update(params) \n fig = plt.figure(1, figsize=(15, 10))\n\n # Initial data\n plt.subplot2grid((2, 1), (0, 0))\n if (draw_gaps == True):\n for i in range(0, len(gaps)):\n time = times[i]\n gap = gaps[i]\n for j in range(0, len(gap)):\n plt.axvline(time[gap[j]], linewidth=1, color='red')\n if (draw_events == True):\n for event, location in zip(events, locations):\n for site in location:\n if (site == station):\n plt.axvline(datetime.date(year=event[0], \\\n month=event[1], day=event[2]), linewidth=2, \\\n color='grey')\n xmin = []\n xmax = []\n for i in range(0, len(times)):\n time = times[i]\n disp = disps[i]\n if (i == 0):\n plt.plot(time, disp, 'k', label='Data')\n else:\n plt.plot(time, disp, 'k')\n xmin.append(np.min(time))\n xmax.append(np.max(time))\n plt.xlim(min(xmin), max(xmax))\n plt.ylim(min(ymin), max(ymax))\n plt.legend(loc=1, fontsize=20)\n\n # Denoised data\n plt.subplot2grid((2, 1), (1, 0))\n if (draw_gaps == True):\n for i in range(0, len(gaps)):\n time = times[i]\n gap = gaps[i]\n for j in range(0, len(gap)):\n plt.axvline(time[gap[j]], linewidth=1, color='red')\n if (draw_events == True):\n for event, location in zip(events, locations):\n for site in location:\n if (site == station):\n plt.axvline(datetime.date(year=event[0], \\\n month=event[1], day=event[2]), linewidth=2, \\\n color='grey')\n xmin = []\n xmax = []\n for i in range(0, len(times)):\n time = times[i]\n disp = dispt[i]\n if (i == 0):\n plt.plot(time, disp, 'k', label='Denoised')\n else:\n plt.plot(time, disp, 'k')\n xmin.append(np.min(time))\n xmax.append(np.max(time))\n plt.xlim(min(xmin), max(xmax))\n plt.ylim(min(ymin), max(ymax))\n plt.xlabel('Time (years)', fontsize=20)\n plt.legend(loc=1, fontsize=20)\n\n # Save figure\n if (draw == True):\n namedir = station\n if not os.path.exists(namedir):\n os.makedirs(namedir)\n title = station + ' - ' + direction\n plt.suptitle(title, fontsize=24)\n plt.savefig(namedir + '/' + station + '_' + direction + '_' + \\\n name + '_threshold.eps', format='eps')\n plt.close(1)\n\n # Return denoised data\n return dispt", "title": "" }, { "docid": "c7e321b224a808737eb4a0c010e3eadd", "score": "0.48615378", "text": "def quirky_message(self, msg):\n log.warn(\"Quirky Message: \\\"%s\\\"\", msg)", "title": "" }, { "docid": "74d3a0052c6b13dcea9c9d424d138747", "score": "0.48354146", "text": "def slot_debug(self, sender, (msg)):\n if \"https://data.mtgox.com/api/2/money/order/lag\" in msg:\n return\n else:\n logging.debug(\"%s:%s\", sender.__class__.__name__, msg) #change this to .info to see the messages on screen.", "title": "" }, { "docid": "74d3a0052c6b13dcea9c9d424d138747", "score": "0.48354146", "text": "def slot_debug(self, sender, (msg)):\n if \"https://data.mtgox.com/api/2/money/order/lag\" in msg:\n return\n else:\n logging.debug(\"%s:%s\", sender.__class__.__name__, msg) #change this to .info to see the messages on screen.", "title": "" }, { "docid": "143228207354ba87def61322415d5618", "score": "0.482875", "text": "def attenuateError(seg_path, rect):\n seg = cv2.imread(seg_path, 0)\n #creating the frame for the mask corresponding to the rectangle to assess\n sub_mask = seg[rect.top_left.y : rect.bottom_right.y,rect.top_left.x:rect.bottom_right.x]\n total_pixel = rect.area\n white_pixel = np.count_nonzero(sub_mask)\n p = white_pixel*100/total_pixel\n if p> 50.0:\n #print(\"There is too much important pixel in the inside rectangle missing. No attenuation!\")\n return rect.area\n else :\n return total_pixel - ((total_pixel-white_pixel)/2)", "title": "" }, { "docid": "bd1bf7ce1a99502dac7845760750d38e", "score": "0.48241326", "text": "def obstacle_detection(self):\n #saving the acelerations\n x_accel = self._imudata.linear_acceleration.x\n y_accel = self._imudata.linear_acceleration.y\n z_accel = self._imudata.linear_acceleration.z\n \n axis_list = [x_accel, y_accel, z_accel]\n \n #looking for the major measure\n max_axis_index = axis_list.index(max(axis_list))\n #if that measure is positive or not\n positive = axis_list[max_axis_index] >= 0\n \n #if value is > than 7 then True\n significative_value = axis_list[max_axis_index] > self._threshold\n \n message = \"\"\n \n if significative_value:\n if max_axis_index == 0:\n # Winner is in the x axis, therefore its a side crash left/right\n rospy.logwarn(\"[X=\"+str(x_accel))\n rospy.loginfo(\"Y=\"+str(y_accel)+\", Z=\"+str(z_accel)+\"]\")\n if positive:\n message = \"right\"\n else:\n message = \"left\"\n \n elif max_axis_index == 1:\n # Winner is the Y axis, therefore its a forn/back crash\n rospy.logwarn(\"[Y=\"+str(y_accel))\n rospy.loginfo(\"X=\"+str(x_accel)+\", Z=\"+str(z_accel)+\"]\")\n if positive:\n message = \"front\"\n else:\n message = \"back\"\n elif max_axis_index == 2:\n # Z Axis is the winner, therefore its a crash that made it jump\n rospy.logwarn(\"[Z=\"+str(z_accel))\n rospy.loginfo(\"X=\"+str(x_accel)+\", Y=\"+str(y_accel)+\"]\")\n \n if positive:\n message = \"up\"\n else:\n message = \"down\"\n else:\n message = \"unknown_direction\"\n else:\n rospy.loginfo(\"X=\"+str(x_accel)+\"Y=\"+str(y_accel)+\", Z=\"+str(z_accel)+\"]\")\n message = \"nothing\"\n \n return self.convert_to_dict(message)", "title": "" }, { "docid": "10f50d2f55ecee04b8750f60e2f2d9a8", "score": "0.47711715", "text": "def check_mpii_gaze_not_on_screen(input_path: str, output_path: str) -> None:\n\n data = {'file_name': [], 'on_screen_gaze_position': [], 'monitor_pixels': []}\n\n for person_file_path in sorted(glob.glob(f'{input_path}/Data/Original/p*'), reverse=True):\n person = person_file_path.split('/')[-1]\n\n screen_size = scipy.io.loadmat(f'{input_path}/Data/Original/{person}/Calibration/screenSize.mat')\n screen_width_pixel = screen_size[\"width_pixel\"].item()\n screen_height_pixel = screen_size[\"height_pixel\"].item()\n\n for day_file_path in sorted(glob.glob(f'{person_file_path}/d*')):\n day = day_file_path.split('/')[-1]\n\n df = pd.read_csv(f'{day_file_path}/annotation.txt', sep=' ', header=None)\n for row_idx in range(len(df)):\n row = df.iloc[row_idx]\n on_screen_gaze_target = row[24:26].to_numpy().reshape(-1).astype(int)\n\n if not (0 <= on_screen_gaze_target[0] <= screen_width_pixel and 0 <= on_screen_gaze_target[1] <= screen_height_pixel):\n file_name = f'{person}/{day}/{row_idx + 1:04d}.jpg'\n\n data['file_name'].append(file_name)\n data['on_screen_gaze_position'].append(list(on_screen_gaze_target))\n data['monitor_pixels'].append([screen_width_pixel, screen_height_pixel])\n\n pd.DataFrame(data).to_csv(f'{output_path}/not_on_screen.csv', index=False)", "title": "" }, { "docid": "67cafd26bfb2873e5e7df3fbfe769632", "score": "0.47654232", "text": "def __volume_warning(self) -> None:\n\n mean_volume = np.nanmean(np.array([\n i['volume'] for i in self.__tracker\n ]))\n\n for item in self.__tracker:\n if item['volume'] <= 0.8 * mean_volume:\n log.warning(\n 'Droplet %d too small. Possible dosing mistake',\n item['droplet']\n )", "title": "" }, { "docid": "cbc423769d76b392d4fee421b0a92785", "score": "0.47493938", "text": "def _check_missing_speakers(self, num_missing: int = 0):\n for k in range(len(self._furthest_sample)):\n if self._furthest_sample[k] == 0:\n num_missing += 1\n if num_missing != 0:\n warnings.warn(\n f\"{self._params.data_simulator.session_config.num_speakers - num_missing}\"\n f\"speakers were included in the clip instead of the requested amount of \"\n f\"{self._params.data_simulator.session_config.num_speakers}\"\n )", "title": "" }, { "docid": "6a2250686d394da7a46f8e49c4bce32e", "score": "0.4746645", "text": "def invalid_log_warning(self):\n print(f\"The submitted log is NOT from {dh.LOG_TYPE}.\")\n\n # Save warning message to analysis_summary_top.txt\n with open(self.fzip['top'], 'w', encoding='utf-8') as file:\n file.write(f\"You sbumitted logs which are NOT from {dh.LOG_TYPE}.\")\n\n # Save top n descriptions title to analysis_summary.csv\n with open(self.fzip['sum'], 'w', newline='', encoding='utf-8') as file:\n writer = csv.writer(file)\n writer.writerow(['No.', 'Prob', 'Target', 'Reference', 'Description'])", "title": "" }, { "docid": "0466ba5ab40653fc805f074d5ddaffdf", "score": "0.4737495", "text": "def check_detection(frame, yx_exp, fwhm, snr_thresh, deltapix=3):\n\n def verify_expcoord(vectory, vectorx, exp_yx):\n return any(\n np.allclose(coor[0], expec[0], atol=deltapix)\n and np.allclose(coor[1], expec[1], atol=deltapix)\n for coor in zip(vectory, vectorx)\n for expec in exp_yx\n )\n\n table = vip.metrics.detection(\n frame,\n fwhm=fwhm,\n mode=\"lpeaks\",\n bkg_sigma=5,\n matched_filter=False,\n mask=True,\n snr_thresh=snr_thresh,\n plot=False,\n debug=True,\n full_output=True,\n verbose=True,\n )\n msg = \"Injected companion not recovered\"\n assert verify_expcoord(table.y, table.x, yx_exp), msg", "title": "" }, { "docid": "f970f328ddeccf1e9671181dbdfce413", "score": "0.47330427", "text": "def _signalize_invalid_card(self):\n self.logger.debug('Invalid card read.')\n self._buzzer.beep(False)\n self._display.show('INVALID CARD!', 'please try again.', False)", "title": "" }, { "docid": "85d9caff2d0ae358d2b95b4f4ae1eef6", "score": "0.47254866", "text": "def issue_2023_07_25():\n from psana.detector.NDArrUtils import info_ndarr\n from psana import DataSource\n from psana.detector.UtilsGraphics import gr, fleximage#, arr_median_limits\n flimg = None\n\n #ds = DataSource(exp='tstx00417', run=286, dir='/sdf/data/lcls/drpsrcf/ffb/tst/tstx00417/xtc') # on s3df 3-panel run\n ds = DataSource(exp='tstx00417', run=286, dir='/cds/data/drpsrcf/tst/tstx00417/xtc') # on pcds 3-panel run\n #ds = DataSource(exp='tstx00417', run=287, dir='/cds/data/drpsrcf/tst/tstx00417/xtc') # on pcds 20-panel run detectors=['epixhr_emu'])\n for run in ds.runs():\n det = run.Detector('epixhr_emu')\n for nev,evt in enumerate(run.events()):\n if nev>10000: break\n arr = det.fex.calib(evt)\n if arr is None: continue\n print(info_ndarr(arr, '==== ev:%05d evt.timestamp: %d arr:' % (nev, evt.timestamp)))\n\n img = det.fex.image(evt, value_for_missing_segments=800)\n #img = det.fex.image(evt)\n print(info_ndarr(img, 43*' ' + 'image:'))\n\n if flimg is None:\n #flimg = fleximage(img, arr=None, h_in=8, w_in=11, amin=9000, amax=11000) #, nneg=1, npos=3)\n flimg = fleximage(img, arr=None, h_in=8, w_in=11, amin=700, amax=800) #, nneg=1, npos=3)\n gr.set_win_title(flimg.fig, titwin='Event %d' % nev)\n flimg.update(img, arr=None)\n gr.show(mode='DO NOT HOLD')\n gr.show()", "title": "" }, { "docid": "fce51f67ea02176e3fed72f10a35d8e0", "score": "0.47171494", "text": "def FailWithMessage(self):\n fail_items = []\n\n for x, row in enumerate(self.touch_tested):\n fail_items.extend('touch-x-%d-y-%d' % (x, y)\n for y, tested in enumerate(row) if not tested)\n\n fail_items.extend('scroll-y-%d' % y\n for y, tested in enumerate(self.scroll_tested)\n if not tested)\n\n fail_items.extend('quadrant-%d' % i\n for i, c in enumerate(self.quadrant_count[1:], 1)\n if c < self.args.number_to_quadrant)\n\n if self.single_click_count < self.args.number_to_click:\n fail_items.append('left click count: %d' % self.single_click_count)\n\n if self.double_click_count < self.args.number_to_click:\n fail_items.append('right click count: %d' % self.double_click_count)\n\n self.FailTask(\n 'Touchpad test failed. Malfunction sectors: %s' % ', '.join(fail_items))", "title": "" }, { "docid": "2be86d2986a23dbc7e8336a173177844", "score": "0.47158158", "text": "def scan_from_position(map_data, position, width, height, log_level):\n \n count = 0\n visibility_map = map_data.copy()\n\n for coordinate in map_data:\n if coordinate[0] == position[0] and coordinate[1] == position[1]:\n if log_level >= 3:\n print(f\"Skipping asteroid at {coordinate} due to being the same as scan position.\")\n\n continue\n\n step = get_smallest_step(coordinate, position)\n\n if log_level >= 3:\n print(f\"Smallest step on line from {position} to {coordinate} is {step}\")\n\n x = position[0]\n y = position[1]\n\n #Find each asteroid in steps from position towards coordinate (and beyond if applicable)\n isObstructed = False\n\n while True:\n x += step[0]\n y += step[1]\n\n if x < 0 or x >= width or y < 0 or y >= height:\n if log_level >= 4:\n print(f\"Out of bounds: {x},{y} from {position} towards {coordinate}\")\n\n break\n\n if log_level >= 4:\n print(f\"Step to {x},{y} from {position} towards {coordinate}\")\n\n if (x, y) in visibility_map:\n if not isObstructed:\n isObstructed = True #Everything along the line past first contact is obstructed\n\n if visibility_map[(x, y)] == 0:\n visibility_map[(x, y)] = 1\n count += 1\n\n if log_level >= 2:\n print(f\"Asteroid visible at {x},{y} from {position}, count at {count}\")\n else:\n if visibility_map[(x, y)] == 1:\n if log_level >= 3:\n print(f\"Asteroid already visible at {x},{y} from {position}, not counted in\")\n else:\n if log_level >= 3:\n print(f\"Asteroid obstructed at {x},{y} from {position}, not counted in\")\n else:\n if visibility_map[(x, y)] >= 0:\n if visibility_map[(x, y)] == 1:\n count -= 1 #Reduce visible count\n if log_level >= 2:\n print(f\"Asteroid obstructed at {x},{y} from {position} and no longer visible, count at {count}\")\n else:\n if log_level >= 2:\n print(f\"Asteroid obstructed at {x},{y} from {position} and no longer viable for visibility\")\n else:\n if log_level >= 3:\n print(f\"Asteroid obstructed at {x},{y} from {position}, not counted in\")\n \n visibility_map[(x, y)] = -1\n\n return count", "title": "" }, { "docid": "3e2b587924fe5ed286a86e9984e38ea2", "score": "0.47136724", "text": "def _update_noise(self) -> None:\n if \"SPAM\" in self.config.noise and self.config.eta > 0:\n dist = (\n np.random.uniform(size=len(self._qid_index))\n < self.config.spam_dict[\"eta\"]\n )\n self._bad_atoms = dict(zip(self._qid_index, dist))\n if \"doppler\" in self.config.noise:\n detune = np.random.normal(\n 0, self.config.doppler_sigma, size=len(self._qid_index)\n )\n self._doppler_detune = dict(zip(self._qid_index, detune))", "title": "" }, { "docid": "4c5706df125793f62bf4aceeb81b4202", "score": "0.4697981", "text": "def log_trial(self):\n slot = 1\n num_points = 100\n av_time = 0.02\n\n # self.lwmain.write(\"*RST\")\n # time.sleep(2)\n self.lwmain.write(\"*CLS\")\n\n # self.lwmain.write(\"TRIG%d:OUTP DIS\" % slot)\n self.lwmain.write(\"TRIG%d:INP CME\" % slot)\n\n self.lwmain.write(\"SENS%d:CHAN1:FUNC:PAR:LOGG %d,%.7E\" %\n (slot, num_points, av_time))\n\n self.lwmain.write(\"SENS%d:CHAN1:FUNC:STAT LOGG,START\" % slot)\n\n self.lwmain.write(\":TRIG 2\")\n\n time.sleep(num_points*av_time)\n\n # Check for acquisition finished\n acq_finished = self.lwmain.query(\"SENS%d:CHAN1:FUNC:STATE?\" % slot)\n while not ('COMPLETE' in acq_finished):\n print(acq_finished)\n time.sleep(0.5)\n acq_finished = self.lwmain.query(\"SENS%d:CHAN1:FUNC:STATE?\" % slot)\n sys.stdout.flush()\n print(acq_finished)\n\n # Acquisition finished, query the values\n self.lwmain.write(\"SENS%d:CHAN1:FUNC:RES?\" % slot)\n\n # response = self.lwmain.read_raw()\n data = self.lwmain.read_binary_values()\n\n return data\n\n # The instrument returns the logging result in the following format:\n # #xyyyffff...; the first digit after the hash denotes the number of ascii\n # digits following (y) ; y specifies the number of binary data following;\n # \"ffff\" represent the 32Bit floats as log result.\n # response_ascii = response[0:2].decode('ascii')\n # print(response_ascii)\n # num_digits = response_ascii[1]\n # print(num_digits)\n #\n # num_points = response[2:2+int(num_digits)].decode('ascii')\n # print(num_points)\n # # Tentative things\n #\n # response = response[2+int(num_digits):]\n # print(float(response[0:4]))\n # #data = response.decode('ascii')\n # #print(data)\n # data = struct.unpack('<float', response[0:4])\n # print(data)", "title": "" }, { "docid": "6ee10cc82273aa3d3a843491f1201aad", "score": "0.46897948", "text": "def part_2(map_data, position, width, height, log_level):\n\n asteroid_data = create_asteroid_data(map_data, -1, log_level)\n asteroid_count = asteroid_data[0]\n asteroid_map = asteroid_data[1]\n\n #Remove station position from data\n del asteroid_map[position]\n asteroid_count -= 1\n\n print(f\"Sweeping with laser from {position} to destroy {asteroid_count} asteroids in total.\")\n \n asteroids_remaining = asteroid_count\n list_asteroids_destroyed = []\n asteroids_destroyed = 0\n pass_number = 1\n\n while asteroids_remaining > 0:\n for i in range(8):\n new_hits = sweep_segment(asteroid_map, width, height, position, i, log_level)\n asteroids_destroyed += len(new_hits)\n list_asteroids_destroyed.extend(new_hits)\n\n print(f\"{asteroids_destroyed} asteroids destroyed in pass {pass_number}\")\n\n if log_level >= 1:\n index = 0\n coordinate = 0\n for n in range(asteroid_count - asteroids_remaining, len(list_asteroids_destroyed)):\n index = n + 1\n coordinate = list_asteroids_destroyed[n]\n print(f\"{index:003d}: Hit {coordinate} in segment {asteroid_map[coordinate]}\")\n \n asteroids_remaining -= asteroids_destroyed\n\n print(f\"{asteroids_remaining} asteroids remain after pass {pass_number}\")\n asteroids_destroyed = 0\n pass_number += 1\n\n #Give user input choice over asteroid info for answer\n\n txt = input(f\"Get coordinate of destroyed asteroid at place (1-{len(list_asteroids_destroyed)}): \")\n\n if len(txt) < 1:\n print(f\"Empty number input\")\n return\n\n asteroid_number = 0\n try:\n asteroid_number = int(txt)\n except ValueError:\n print(f\"Invalid number input {txt}\")\n return\n\n if asteroid_number > 0 and asteroid_number <= len(list_asteroids_destroyed):\n asteroid_position = list_asteroids_destroyed[asteroid_number - 1]\n answer = asteroid_position[0] * 100 + asteroid_position[1]\n print(f\"Asteroid number {asteroid_number} was hit at {asteroid_position}\")\n print(f\"Answer to 100 * {asteroid_position[0]} + {asteroid_position[1]} is {answer}\")\n else:\n print(f\"Number input {asteroid_number} out of bounds!\")", "title": "" }, { "docid": "6a8fbad25ea10ade125b866dd20ee884", "score": "0.4689483", "text": "def report(self,msg):\n spacer=\"----------------------------------------\"\n cli.log.info(spacer)\n cli.log.info(msg) \n cli.log.info(spacer)", "title": "" }, { "docid": "e1c879b82e5a6ab0506f1af2b02ae5aa", "score": "0.46861243", "text": "def onPinholeSmear(self, event):", "title": "" }, { "docid": "cb8487ebc7d82178643b477504ff8610", "score": "0.4670551", "text": "def test_logging_for_vibrent_server_anomalies(self):\n self.ghost_dao_mock.get_participants_needing_checked.return_value = []\n self.client_mock.get_participant_lookup.return_value = {\n 'participants': [\n {'drcId': 'P1234'},\n {'drcId': None, 'vibrentId': 45}\n ]\n }\n\n self.service.run_ghost_check(start_date=datetime.now())\n self.logger_mock.error.assert_has_calls([\n mock.call(\"Vibrent has missing drc id: {'drcId': None, 'vibrentId': 45}\"),\n mock.call('Vibrent had unknown id: 1234')\n ], any_order=True)", "title": "" }, { "docid": "9aca5c4a4a4a7cdb731d8c1906bbeaee", "score": "0.46686956", "text": "def EmailDebug(text):\n\tPlainEmail(\"matches@ucc.asn.au\", text)", "title": "" }, { "docid": "ae0af2d4b82cbd1e9cf8948672b167bb", "score": "0.46683922", "text": "def test_positionErrorUpdateAcrossStates(self):\n sentences = [GPGSA] + GPGSV_SEQ\n callbacksFired = ['positionErrorReceived', 'beaconInformationReceived']\n\n def _getIdentifiers(beacons):\n return sorted(map(attrgetter(\"identifier\"), beacons))\n\n def checkBeaconInformation():\n beaconInformation = self.adapter._state['beaconInformation']\n\n seenIdentifiers = _getIdentifiers(beaconInformation.seenBeacons)\n expected = [3, 4, 6, 13, 14, 16, 18, 19, 22, 24, 27]\n self.assertEqual(seenIdentifiers, expected)\n\n usedIdentifiers = _getIdentifiers(beaconInformation.usedBeacons)\n # These are not actually all the PRNs in the sample GPGSA:\n # only the ones also reported by the GPGSV sequence. This\n # is just because the sample data doesn't come from the\n # same reporting cycle of a GPS device.\n self.assertEqual(usedIdentifiers, [14, 18, 19, 22, 27])\n\n self._receiverTest(sentences, callbacksFired, checkBeaconInformation)", "title": "" }, { "docid": "474ed164728bf809d4d3434755077675", "score": "0.46572086", "text": "def check_human_error(self):\n\n def test_monitor_sample(dbpos):\n if dbpos.sample:\n if dbpos.sample.name == monitor_name:\n if dbpos.sample.material:\n return dbpos.sample.material.name == monitor_material\n\n def monitor_exists_test(l):\n for dbpos in l.positions:\n if test_monitor_sample(dbpos):\n return True\n\n projectname = \"{}{}\".format(self.irradiation_project_prefix, self.irradiation)\n\n def correct_monitor_sample(l):\n incorrect_monitors = []\n for dbpos in l.positions:\n if test_monitor_sample(dbpos):\n if (\n not dbpos.sample.project\n or dbpos.sample.project.name != projectname\n ):\n incorrect_monitors.append(str(dbpos.position))\n\n return \",\".join(incorrect_monitors)\n\n error = \"\"\n no_monitors = []\n incorrect_monitor_sample = []\n\n monitor_name = self.monitor_name.strip()\n monitor_material = self.monitor_material.strip()\n\n dbirrad = self.dvc.get_irradiation(self.irradiation)\n for dblevel in dbirrad.levels:\n if not monitor_exists_test(dblevel):\n no_monitors.append(dblevel.name)\n\n poss = correct_monitor_sample(dblevel)\n if poss:\n incorrect_monitor_sample.append(\n \"Level={}, Positions={}\".format(dblevel.name, poss)\n )\n\n if no_monitors:\n error = \"No Monitors: {}\\n\".format(\",\".join(no_monitors))\n if incorrect_monitor_sample:\n error = \"{}Incorrect Monitor Sample: {}\".format(\n error, \"\\n\".join(incorrect_monitor_sample)\n )\n\n if error:\n if not self.confirmation_dialog(\n \"There are issues with this irradiation.\\n\\n\"\n \"{}\\n\\n\"\n \"Are you sure you want to continue?\".format(error)\n ):\n return True", "title": "" }, { "docid": "47318ae46a16ff80f5b7492bd4e1be41", "score": "0.4656393", "text": "def paint_box():\n global k, counter, bbox\n # Tracking success - draw rectangle\n p1 = (int(bbox[0]), int(bbox[1]))\n p2 = (int(bbox[0] + bbox[2]), int(bbox[1] + bbox[3]))\n cv2.rectangle(frame, p1, p2, (255, 0, 0), 2, 1)\n # display offsets\n offsets_text = str(frame_num) + ' ' + str(int(bbox[0])) + ' ' + str(int(bbox[1])) + ' ' + str(int(bbox[2])) +\\\n ' ' + str(int(bbox[3])) + ' ' + str(track_type)\n cv2.putText(frame, offsets_text, (200, 260), cv2.FONT_HERSHEY_SIMPLEX, 0.5, (255, 50, 50), 2)", "title": "" }, { "docid": "d2d09baa8a1e643af097858aad57c0a7", "score": "0.46541443", "text": "def main():\n emergency_pub = rospy.Publisher(\"isEmergency\", Bool, queue_size=30)\n rospy.init_node('emergency_brake')\n rate = rospy.Rate(30)\n scan_data = scanReader(20)\n velocity_reader = ReadSpeed(3).start()\n emergency_msg = Bool()\n\n while not rospy.is_shutdown():\n if len(scan_data.ranges) > 0:\n il, iu = scan_data._get_angles_indexes()\n \n ranges = scan_data.ranges\n current_velocity = velocity_reader.get_velocity()\n emergency_distance = 0.4 + 0.4 * current_velocity\n if min(ranges[il:iu]) < emergency_distance:\n emergency_msg.data = True\n else:\n emergency_msg.data = False\n \n #print(emergency_msg)\n emergency_pub.publish(emergency_msg)\n rate.sleep()", "title": "" }, { "docid": "a5309543a8bd4052fff48d0800d0e8b0", "score": "0.4642563", "text": "def test_noised_gps(self):\r\n with Morse() as morse:\r\n d = morse.robot.gps.get()\r\n dn = morse.robot.gps_noised.get()\r\n for i in ['x', 'y', 'z']:\r\n self.assertNotAlmostEqual(d[i], dn[i], delta=.001)", "title": "" }, { "docid": "f8184c00df611db8665f4294184fde06", "score": "0.46344808", "text": "def freeform(frame): # pylint: disable=too-many-branches\n\n ctx, msg = frame.ctx, frame.msg\n if ctx.document: # pragma: no cover\n text = 'document:%s' % ctx.document\n elif ctx.photo: # pragma: no cover\n text = 'photo:%s' % ctx.photo\n elif ctx.sticker: # pragma: no cover\n text = 'sticker:%s' % ctx.sticker\n else:\n text = frame.text\n\n if text:\n if text.lower() in ('-', 'none', 'off'):\n text = None\n set_log(frame, text)\n else:\n msg.action = 'Type a new value for ' + frame.field\n msg.add(frame.desc)\n if frame.get():\n msg.add('<code>%s</code> is currently <code>%s</code>.', frame.field, frame.value)\n msg.add('Type your new value, or type \"off\" to disable/reset to default.')", "title": "" }, { "docid": "697300080548b63f5c9877828531a213", "score": "0.4632691", "text": "def warn(self, msg):\n print(msg)", "title": "" }, { "docid": "5f11fca014a0208543d970cf569132ff", "score": "0.46325725", "text": "def on_x_box(self):\n self.log()", "title": "" }, { "docid": "3856836b0f940115c56057e9ccdedb44", "score": "0.46313027", "text": "def in_box(self, event):\n if event.x >= 80 and event.y <= 200:\n print \"x=%d, y=%d\"%(event.x, event.y)\n else:\n print \"x=%d, y=%d w=%d\"%(event.x, event.y, 0)", "title": "" }, { "docid": "284516c532f8e930f92b6173dfb4a394", "score": "0.4627008", "text": "def warn_trial_incorrect(self, flash=None):\n\t\twarble(500, 100, wait=0)\n\t\tif flash:\n\t\t\tself.fb.clear((255, 0, 0), flip=1)\n\t\t\tself.idlefn(flash)\n\t\t\tself.fb.clear((1, 1, 1), flip=1)", "title": "" }, { "docid": "2778267ebbb4f6ba3393486e5ca98d44", "score": "0.46224073", "text": "def test_astar_threshold_neighbors(self):\n graph = workspace_graph.Astar_Graph(self.world_descriptor, (2, 2),\n connect_8=True)\n neibs = graph.get_limited_offset_neighbors((4, 2), 0)\n self.assertTrue(len(neibs) == 3)\n for t in [(0, (3, 2)), (0, (3, 3)), (0, (3, 1))]:\n self.assertTrue(t in neibs)\n neibs = graph.get_limited_offset_neighbors((4, 2), 1)\n self.assertTrue(len(neibs) == 6)\n for t in [(0, (3, 2)), (0, (3, 3)), (0, (3, 1)), (1, (4, 3)),\n (1, (4, 2)), (1, (4, 1))]:\n self.assertTrue(t in neibs)\n neibs = graph.get_limited_offset_neighbors((4, 2), 2)\n self.assertTrue(len(neibs) == 9)\n # check minimum offset functionality\n neibs = graph.get_limited_offset_neighbors((4, 2), 1, min_offset=1)\n self.assertTrue(len(neibs) == 3)\n for t in [(1, (4, 3)), (1, (4, 2)), (1, (4, 1))]:\n self.assertTrue(t in neibs)\n neibs = graph.get_limited_offset_neighbors((4, 2), 2, min_offset=1)\n self.assertTrue(len(neibs) == 6)\n for t in [(1, (4, 3)), (1, (4, 2)), (1, (4, 1)), (2, (5, 3)),\n (2, (5, 2)), (2, (5, 1))]:\n self.assertTrue(t in neibs)", "title": "" }, { "docid": "a422f2dd66e151473d9b635b3d9d1bd7", "score": "0.46186808", "text": "def test_wifi_track_bssid_sanity(self):\n track_setting = {\"bssidInfos\": [self.bssid_2g], \"apLostThreshold\": 3}\n self.track_bssid_with_vaild_scan_for_lost(track_setting)", "title": "" }, { "docid": "c55542c4228ac53b65ce294fb26df10f", "score": "0.46162882", "text": "def verify_sixth_idrac_dvce_hardwre_logs():\n msg, status = \"\", True\n try:\n if g.platform == 'android':\n\n 'click on hardware logs element'\n flag1 = ui_controls.Click(get_obj_identifier('idrac_hardwre_logs_elmnt'))\n sleep(3)\n 'click on system event spinner view'\n flag2 = ui_controls.Click(get_obj_identifier('SystmEvntLog_spiner_view'))\n sleep(3)\n 'click on system event log'\n flag3 = ui_controls.Click(get_obj_identifier('system_evnt_log'))\n sleep(3)\n 'click on system event spinner view'\n flag4 = ui_controls.Click(get_obj_identifier('SystmEvntLog_spiner_view'))\n sleep(3)\n 'click on life cycle log'\n flag5 = ui_controls.Click(get_obj_identifier('lificycle_log'))\n sleep(3)\n 'validate wether spinner view all alerts text displaying ro not'\n if ui_controls.isDisplayed(get_obj_identifier('SystmEvntLog_spiner_view')):\n print 'System event log spinner view is displaying'\n else:\n print 'System event log spinner view is not displaying'\n ''\n if ui_controls.isDisplayed(get_obj_identifier('Hrdware_log_alrts_element')):\n print 'hardware log alerts is displaying properly'\n else:\n print 'hardware log alerts is not displaying properly'\n 'go back'\n flag6 = ui_controls.back_button()\n sleep(3)\n\n status = False if not (flag1 and flag2 and flag3 and flag4 and flag5 and flag6) else True\n\n except Exception as excp:\n traceback.print_exc()\n msg += str(excp)\n status = False\n return status, msg", "title": "" }, { "docid": "b35b2f822c5b8e32acce3972fb4c2dc2", "score": "0.46154594", "text": "def verify_trim_dups_warn_log(\n rados_obj: RadosOrchestrator, acting_sets, start_time, end_time\n) -> bool:\n log.debug(\"Checking if the warning message is generated in OSD logs post upgrade\")\n for pgid in acting_sets.keys():\n log.debug(f\"Checking OSD logs of PG: {pgid}. OSDs : {acting_sets[pgid]}\")\n for osd in acting_sets[pgid]:\n log_lines = rados_obj.get_journalctl_log(\n start_time=start_time,\n end_time=end_time,\n daemon_type=\"osd\",\n daemon_id=osd,\n )\n line = \"read_log_and_missing WARN num of dups exceeded 6000\"\n if line not in log_lines:\n log.error(\n f\" did not find relevant logging on PG : {pgid} - OSD : {osd}\"\n )\n return False\n log.debug(f\"Found relevant logging on PG : {pgid} - OSD : {osd}\")\n log.debug(f\"Completed verification on PG : {pgid}\")\n log.info(\"Completed log verification on all the OSDs sent\")\n return True", "title": "" }, { "docid": "a990d90a56155786ec198c45de4b4e31", "score": "0.46140376", "text": "def check(self, significance):\n msg = ''\n ok = True\n for v in self.last_vars:\n if self.weights[v][0] > 1.0:\n # Computes means and distribution variances.\n means = self.sum_x[v] / self.weights[v]\n variances = self.sum_xx[v] / self.weights[v] - means * means\n # These are the variances of the means. The distribution variance is N / (N - 1) the\n # sample variance; the variance of the mean is the distribution variance divided by\n # the number of points (the weight of) the mean, that is, N.\n mean_vars = variances / (self.weights[v] - 1.0)\n # Computes significance threshold for each pair of estimates.\n for k in range(NUM_BINS - 1):\n # We perform the check only once we have enough data, and if sufficient time has passed\n # since the latest alert.\n if self.count[v] > self.intervals[k] and self.count_since_alarm[v][k] > self.intervals[k]:\n mean_diff = abs(means[k] - means[k + 1])\n stdev_diff = math.sqrt(abs(mean_vars[k] + mean_vars[k + 1]))\n \n if (stdev_diff == 0 and mean_diff != 0) or mean_diff / (stdev_diff or 1) > significance:\n\n ok = False\n msg += \"\\nQuantity %r differs from past behavior for timescale 10^%d with significance %r\" % (\n v, k + MIN_TIME_SCALE, \"infinity\" if stdev_diff == 0 else mean_diff / stdev_diff)\n msg += \"\\nBehavior in last 10^%d iterations: mean = %f variance = %f variance of mean = %f\" % (\n k + MIN_TIME_SCALE, means[k], variances[k], mean_vars[k])\n msg += \"\\nBehavior in last 10^%d iterations: mean = %f variance = %f variance of mean = %f\" % (\n k + 1 + MIN_TIME_SCALE, means[k + 1], variances[k + 1], mean_vars[k + 1])\n self.count_since_alarm[v][k] = 0\n\n print \"v:\", v\n print \"means:\", means\n print \"count:\", self.count[v]\n print \"k\", k\n print \"mean_diff\", mean_diff\n print \"stdev_diff\", stdev_diff\n print \"significance\", mean_diff / stdev_diff\n if stdev_diff > 0:\n print mean_diff / stdev_diff\n\n\n\n\n return ok, msg", "title": "" }, { "docid": "b65ce7e60104e3405a0222a54435f5a8", "score": "0.46067783", "text": "def couldnt():\r\n print(\"Use bent bars or Increase Dimensions\")", "title": "" }, { "docid": "882847b4e48b85de18f46316fe9e6e1b", "score": "0.45967934", "text": "def not_found_warning():\n warnings.warn(\n \"For metadata to be logged in the data array, \"\n \"it is necessary to install the orsopy package.\",\n UserWarning,\n )", "title": "" }, { "docid": "22e1c38df936eb506ed5ea5c5ef63dae", "score": "0.45964295", "text": "def notice(self, msg):\n self.log(25, msg)", "title": "" }, { "docid": "22e1c38df936eb506ed5ea5c5ef63dae", "score": "0.45964295", "text": "def notice(self, msg):\n self.log(25, msg)", "title": "" }, { "docid": "b21b0db502b942b526dbf80857a92981", "score": "0.45804104", "text": "def test_astar_threshold_neighbors_obstacles(self):\n self.world_descriptor[2][2] = 1\n graph = workspace_graph.Astar_Graph(self.world_descriptor, (1, 2),\n connect_8=True)\n neibs = graph.get_limited_offset_neighbors((3, 2), 0)\n self.assertTrue(len(neibs) == 2)\n for t in [(0, (2, 3)), (0, (2, 1))]:\n self.assertTrue(t in neibs)\n neibs = graph.get_limited_offset_neighbors((3, 2), 1)\n self.assertTrue(len(neibs) == 5)\n for t in [(0, (2, 3)), (0, (2, 1)), (1, (3, 3)), (1, (3, 2)),\n (1, (3, 1))]:\n self.assertTrue(t in neibs)", "title": "" }, { "docid": "ba6ecf9b85c6e7dee8dbf82691f0fdf1", "score": "0.45746806", "text": "def log_post_checks(self):\n if self.metrics.update_count == 0:\n # this is almost certainly a problem; there is no use for a program that never produces\n # an update\n LOG.warning('The program %r for party %r never produced an update!', self._program.name, self.client.party_name)\n LOG.warning(' update count: %r', self.metrics.update_count)\n LOG.warning(' trigger count: %r', self.metrics.trigger_count)\n with StringIO() as buf:\n self.write_state(buf=buf)\n LOG.warning(buf.getvalue())\n if self._current_command is not None and not self._current_command.done():\n LOG.warning('The program %r for party %r is stuck waiting for an update!', self._program.name, self.client.party_name)\n LOG.warning(' stuck command: %r', self._current_command)\n LOG.warning(' update count: %r', self.metrics.update_count)\n LOG.warning(' trigger count: %r', self.metrics.trigger_count)\n with StringIO() as buf:\n self.write_state(buf=buf)\n LOG.warning(buf.getvalue())", "title": "" }, { "docid": "0d91dfe62f16fb4c7871de751003726c", "score": "0.45697755", "text": "def on_smear_helper(self, update=False):", "title": "" }, { "docid": "0b652ed5bb762930e22d2338743cf86c", "score": "0.45692065", "text": "def qtips(gid):\n amus_mols = ( 'H2O', 'CO2', 'O3', 'N2O', 'CO', 'CH4', 'O2', 'NO',\n 'SO2', 'NO2', 'NH3', 'HNO3', 'OH', 'HF', 'HCL', 'HBR',\n 'HI', 'CLO', 'OCS', 'H2CO', 'HOCL', 'N2', 'HCN', 'CH3CL',\n 'H2O2', 'C2H2', 'C2H6', 'PH3', 'COF2', 'SF6', 'H2S', 'HCOOH' )\n try:\n gasid = amus_mols.index(gid)+1\n except:\n gasid = gid\n if (gasid == 1):\n g = num.array(( 1, 1, 6, 6 )) # H2O\n elif (gasid == 2):\n g = num.array(( 1, 2, 1, 6, 2, 12, 1, 6 )) # CO2\n elif (gasid == 3):\n g = num.array(( 1, 1, 1 )) # O3\n elif (gasid == 4):\n g = num.array(( 9, 6, 6, 9,54 )) # N2O\n elif (gasid == 5):\n g = num.array(( 1, 2, 1, 6,2 )) # CO\n elif (gasid == 6):\n g = num.array(( 1, 2, 3 )) # CH4\n elif (gasid == 7):\n g = num.array(( 1, 1, 6 )) # O2 \n elif (gasid == 8):\n g = num.array(( 12, 8, 12 )) # NO\n elif (gasid == 9):\n g = num.array(( 1, 1 )) # SO2\n elif (gasid == 10):\n g = num.array(( 6 )) # NO2\n elif (gasid == 11):\n g = num.array(( 3, 2 )) # NH3\n elif (gasid == 12):\n g = num.array(( 6 )) # HNO3\n elif (gasid == 13):\n g = num.array(( 8, 8, 12 )) # OH\n elif (gasid == 14):\n g = num.array(( 4 )) # HF\n elif (gasid == 15):\n g = num.array(( 8, 8 )) # HCL\n elif (gasid == 16):\n g = num.array(( 8, 8 )) # HBR\n elif (gasid == 17):\n g = num.array(( 12 )) # HI\n elif (gasid == 18):\n g = num.array(( 4, 4 )) # CLO\n elif (gasid == 19):\n g = num.array(( 1, 1, 2, 1 )) # OCS\n elif (gasid == 20):\n g = num.array(( 1, 2, 1 )) # H2CO\n elif (gasid == 21):\n g = num.array(( 8, 8 )) # HOCL\n elif (gasid == 22):\n g = num.array(( 0.5 )) # N2 \n elif (gasid == 23):\n g = num.array(( 6, 12, 4 )) # HCN\n elif (gasid == 24):\n g = num.array(( 4, 4 )) # CH3CL\n elif (gasid == 25):\n g = num.array(( 4 )) # H2O2\n elif (gasid == 26):\n g = num.array(( 1, 8 )) # C2H2\n elif (gasid == 27):\n g = num.array(( 64 )) # C2H6\n elif (gasid == 28):\n g = num.array(( 2 )) # PH3\n elif (gasid == 29):\n g = num.array(( 1 )) # COF2\n elif (gasid == 30):\n g = num.array(( 1 )) # SF6\n elif (gasid == 31):\n g = num.array(( 1 )) # H2S\n elif (gasid == 32):\n g = num.array(( 1 )) # HCOOH\n else:\n raise ValueError('gasid not in range or unkown')\n\n g = num.reshape(g,(-1,1))\n\n #...TOTAL INTERNAL PARTITION SUMS FOR 70 - 405 K RANGE\n\n if (gasid == 1): # isotopes 161 181 171 162\n abcd = num.array((\n -.37688E+01, .26168E+00, .13497E-02, -.66013E-06,\n -.38381E+01, .26466E+00, .13555E-02, -.65372E-06,\n -.22842E+02, .15840E+01, .81575E-02, -.39650E-05,\n -.20481E+02, .13017E+01, .66225E-02, -.30447E-05))\n elif (gasid == 2):\n abcd = num.array((\n -.21995E+01, .96751E+00, -.80827E-03, .28040E-05,\n -.38840E+01, .19263E+01, -.16058E-02, .58202E-05,\n -.47289E+01, .20527E+01, -.17421E-02, .60748E-05,\n -.27475E+02, .11973E+02, -.10110E-01, .35187E-04,\n -.84191E+01, .41186E+01, -.34961E-02, .12750E-04,\n -.48468E+02, .23838E+02, -.20089E-01, .73067E-04,\n -.22278E+01, .10840E+01, -.89718E-03, .32143E-05,\n -.29547E+02, .12714E+02, -.10913E-01, .38169E-04))\n elif (gasid == 3):\n abcd = num.array((\n -.13459E+03, .62255E+01, .14811E-01, .18608E-04,\n -.12361E+03, .61656E+01, .19168E-01, .13223E-04,\n -.12359E+03, .60957E+01, .18239E-01, .13939E-04))\n elif (gasid == 4):\n abcd = num.array((\n -.95291E+01, .15719E+02, -.12063E-01, .53781E-04,\n .48994E+01, .10211E+02, -.62964E-02, .33355E-04,\n -.28797E+01, .10763E+02, -.78058E-02, .36321E-04,\n .25668E+02, .15803E+02, -.67882E-02, .44093E-04,\n .18836E+03, .91152E+02, -.31071E-01, .23789E-03))\n elif (gasid == 5):\n abcd = num.array((\n .31591E+00, .36205E+00, -.22603E-05, .61215E-08,\n .62120E+00, .75758E+00, -.59190E-05, .15232E-07,\n .30985E+00, .38025E+00, -.29998E-05, .76646E-08,\n .18757E+01, .22289E+01, -.15793E-04, .41607E-07,\n .60693E+00, .79754E+00, -.78021E-05, .19200E-07))\n elif (gasid == 6):\n abcd = num.array((\n -.17475E+02, .95375E+00, .39758E-02,-.81837E-06,\n -.27757E+02, .17264E+01, .93304E-02,-.48181E-05,\n -.89810E+03, .44451E+02, .17474E+00,-.22469E-04))\n elif (gasid == 7):\n abcd = num.array((\n -.10000E+01, .00000E+00, .00000E+00, .00000E+00,\n -.10000E+01, .00000E+00, .00000E+00, .00000E+00,\n -.10000E+01, .00000E+00, .00000E+00, .00000E+00))\n elif (gasid == 8):\n abcd = num.array((\n -.17685E+03, .28839E+02, .87413E-01,-.92142E-04,\n -.61157E+02, .13304E+02, .40161E-01,-.42247E-04,\n -.18775E+03, .30428E+02, .92040E-01,-.96827E-04))\n elif (gasid == 9):\n abcd = num.array((\n -.17187E+03, .94104E+01, .34620E-01, .25199E-04,\n -.17263E+03, .94528E+01, .34777E-01, .25262E-04))\n elif (gasid == 10):\n abcd = num.array((\n -.89749E+03, .44718E+02, .15781E+00, .43820E-04))\n elif (gasid == 11):\n abcd = num.array((\n -.48197E+02, .27739E+01, .11492E-01,-.18209E-05,\n -.32700E+02, .18444E+01, .77001E-02,-.12388E-05))\n elif (gasid == 12):\n abcd = num.array((\n -.74208E+04, .34984E+03, .89051E-01, .39356E-02))\n elif (gasid == 13):\n abcd = num.array((\n .76510E+02, .11377E+01, .39068E-02,-.42750E-05,\n .76140E+02, .11508E+01, .39178E-02,-.42870E-05,\n .14493E+03, .47809E+01, .15441E-01,-.16217E-04))\n elif (gasid == 14):\n abcd = num.array((\n .15649E+01, .13318E+00, .80622E-05,-.83354E-08))\n elif (gasid == 15):\n abcd = num.array((\n .28877E+01, .53077E+00, .99904E-05,-.70856E-08,\n .28873E+01, .53157E+00, .99796E-05,-.70647E-08))\n elif (gasid == 16):\n abcd = num.array((\n .28329E+01, .66462E+00, .83420E-05,-.30996E-08,\n .28329E+01, .66483E+00, .83457E-05,-.31074E-08))\n elif (gasid == 17):\n abcd = num.array((\n .41379E+01, .12977E+01, .61598E-05, .10382E-07))\n elif (gasid == 18):\n abcd = num.array((\n .15496E+04, .11200E+03, .19225E+00, .40831E-04,\n .15728E+04, .11393E+03, .19518E+00, .43308E-04))\n elif (gasid == 19):\n abcd = num.array((\n .18600E+02, .31185E+01, .30405E-03, .85400E-05,\n .19065E+02, .31965E+01, .31228E-03, .87535E-05,\n .42369E+02, .61394E+01, .13090E-02, .16856E-04,\n .21643E+02, .32816E+01, .57748E-03, .90034E-05))\n elif (gasid == 20):\n abcd = num.array((\n -.44663E+02, .23031E+01, .95095E-02,-.16965E-05,\n -.91605E+02, .47223E+01, .19505E-01,-.34832E-05,\n -.44663E+02, .23031E+01, .95095E-02,-.16965E-05))\n elif (gasid == 21):\n abcd = num.array((\n -.62547E+03, .31546E+02, .11132E+00, .32438E-04,\n -.60170E+03, .31312E+02, .11841E+00, .23717E-04))\n elif (gasid == 22):\n abcd = num.array((\n .73548E+00, .78662E+00, -.18282E-05, .68772E-08))\n elif (gasid == 23):\n abcd = num.array((\n -.97107E+00, .29506E+01, -.16077E-02, .61148E-05,\n -.16460E+01, .60490E+01, -.32724E-02, .12632E-04,\n -.40184E+00, .20202E+01, -.10855E-02, .42504E-05))\n elif (gasid == 24):\n abcd = num.array((\n -.89695E+03, .40155E+02, .82775E-01, .13400E-03,\n -.91113E+03, .40791E+02, .84091E-01, .13611E-03))\n elif (gasid == 25):\n abcd = num.array((\n -.95255E+03, .49483E+02, .21249E+00,-.35489E-04))\n elif (gasid == 26):\n abcd = num.array((\n .25863E+01, .11921E+01, -.79281E-03, .46225E-05,\n .20722E+02, .95361E+01, -.63398E-02, .36976E-04))\n elif (gasid == 27):\n abcd = num.array((\n -.10000E+01, .00000E+00, .00000E+00, .00000E+00))\n elif (gasid == 28):\n abcd = num.array((\n -.11388E+03, .69602E+01, .17396E-01, .65088E-05))\n elif (gasid == 29):\n abcd = num.array((\n -.10000E+01, .00000E+00, .00000E+00, .00000E+00))\n elif (gasid == 30):\n abcd = num.array((\n -.10000E+01, .00000E+00, .00000E+00, .00000E+00))\n elif (gasid == 31):\n abcd = num.array((\n -.10000E+01, .00000E+00, .00000E+00, .00000E+00))\n elif (gasid == 32):\n abcd = num.array((\n -.10000E+01, .00000E+00, .00000E+00, .00000E+00))\n else:\n raise ValueError('gasid not in range or unkown')\n a = num.reshape(abcd[0::4],(-1,1))\n b = num.reshape(abcd[1::4],(-1,1))\n c = num.reshape(abcd[2::4],(-1,1))\n d = num.reshape(abcd[3::4],(-1,1))\n return [a,b,c,d,g]", "title": "" }, { "docid": "d3c09752bc12ede669e676b3352b5edf", "score": "0.45648846", "text": "def test_wrong_flag(self):\n with self.assertRaises(MinorServoErrorsEx):\n self.boss.setElevationTracking('foo')", "title": "" }, { "docid": "0cfd799d60c7ec816f5ff3c647f78ee0", "score": "0.4562964", "text": "def test_find_single_noisy(self):\n self.atol = 5\n radius = np.random.random() * 15 + 15\n generated_image = self.generate_image(radius, 1, noise=0.2)\n\n fits = find_disks(generated_image.image, (radius / 2.0,\n radius * 2.0),\n maximum=1)\n\n y_coord, x_coord = generated_image.coords[0]\n if len(fits) != 1: # Particle number mismatch\n r, y, x = np.nan, np.nan, np.nan\n else:\n r, x, y = fits[['r', 'x', 'y']].values[0]\n\n return (r, y, x), (radius, y_coord, x_coord)", "title": "" }, { "docid": "65b073cddc1da31870a2e12388abcaa7", "score": "0.4560828", "text": "def unreliable_assessment_taken_notifications(self):\n pass", "title": "" }, { "docid": "8a8a6ab0d297b091fc38a6ddffc27de5", "score": "0.45602554", "text": "def __printDetails(self, signal: u.Quantity, background: u.Quantity, read_noise: u.Quantity,\n dark: u.Quantity, prefix: str = \"\"):\n # Calculate the total collected electrons per pixel\n total = signal + background + dark\n # Check for overexposed pixels\n overexposed = total > self.__well_capacity\n if np.any(overexposed):\n # Calculate number of overexposed pixels\n n_overexposed = np.count_nonzero(overexposed)\n # Show a warning for the overexposed pixels\n if n_overexposed == 1:\n logger.warning(prefix + str(n_overexposed) + \" pixel is overexposed.\")\n else:\n logger.warning(prefix + str(n_overexposed) + \" pixels are overexposed.\")\n logger.info(\"--------------------------------------------------------------------------\")\n logger.info(prefix + \"Collected electrons from target: %1.2e electrons\" % signal.sum().value)\n logger.info(prefix + \"Collected electrons from background: %1.2e electrons\" % background.sum().value)\n logger.info(prefix + \"Electrons from dark current: %1.2e electrons\" % dark.sum().value)\n logger.info(prefix + \"Read noise: %1.2e electrons\" % (read_noise ** 2).sum().value)\n logger.info(prefix + \"Total collected electrons: %1.2e electrons\" % total.sum().value)\n logger.info(\"--------------------------------------------------------------------------\")", "title": "" }, { "docid": "b27057a7e838d9b9eefe06bf8b511bf1", "score": "0.4557791", "text": "def get_hint(self):\n message = \"(^.^) Getting colder!\"\n if self.distance[-1] == 0:\n message = \"(;.;) You found me!\"\n elif self.distance[-1] < self.distance[-2]:\n message = \"(>.<) Getting warmer!\"\n elif self.distance[-1] > self.distance[-2]:\n message = \"(^.^) Getting colder!\"\n return message", "title": "" }, { "docid": "3429747561e0e99536828d93b30ea922", "score": "0.4550547", "text": "def _check_gain_correction(self):\n from pyraf import iraf\n from pyraf.irafpar import IrafPar,IrafParI,IrafParS, IrafParList, makeIrafPar\n\n print('')\n print('AXEPREP: Non-NICMOS images such as: %s usually are already gain corrected!' % self.grisim)\n\n idec = IrafParS([' Correct it nevertheless?[(y)es/(n)o/(q)uit] :(q)','string','h'],'whatever')\n idec.getWithPrompt()\n dec = idec.value.strip()\n if dec.upper() == 'Y':\n print(' Continue!')\n print('')\n return 1\n elif dec.upper() == 'N':\n print(' Not correcting image %s!' % self.grisim)\n print('')\n return 0\n else:\n err_msg = 'AXEPREP: No gain correction for non-NICMOS images such as: %s!' % self.grisim\n raise aXeError(err_msg)", "title": "" }, { "docid": "b82168da13833ee05901237c511f3645", "score": "0.45488554", "text": "def checkPosition(self, cmd):\n self.coords = [np.nan] * 6\n\n try:\n self.coords = self._getCurrentPosition()\n self.declareNewHexapodPosition(cmd)\n\n finally:\n genKeys = cmd.inform if np.nan not in self.coords else cmd.warn\n genKeys('slit=%s' % ','.join(['%.5f' % p for p in self.coords]))\n genKeys('slitPosition=%s' % self.slitPosition(self.coords, config=self.controllerConfig))", "title": "" }, { "docid": "07ffdbde7420524af430106740980132", "score": "0.45486036", "text": "def check_bl():\n print('checking beamline for beam available...')\n #diode_IN() \n att2.set_T(0) \n fe_sh.open()\n foe_sh.open()\n fast_sh.open()\n current_T=att.get_T()\n att.set_T(1)\n time.sleep(2)\n\n #expected_feedback_voltage_A=3.67 # Dont't drive the beamline into the wall!!!\n #expected_feedback_voltage_B=4.91\n\n #if abs(caget('XF:11IDB-BI{XBPM:02}CtrlDAC:ALevel-I')-expected_feedback_voltage_A)>0.4:\n # print('Feedback voltage A seems wrong, setting it to '+str(expected_feedback_voltage_A))\n # caput('XF:11IDB-BI{XBPM:02}CtrlDAC:ALevel-SP',expected_feedback_voltage_A)\n #if abs(caget('XF:11IDB-BI{XBPM:02}CtrlDAC:BLevel-I')-expected_feedback_voltage_B)>0.4:\n # print('Feedback voltage B seems wrong, setting it to '+str(expected_feedback_voltage_B))\n # caput('XF:11IDB-BI{XBPM:02}CtrlDAC:BLevel-SP',expected_feedback_voltage_B)\n \n time.sleep(2) \n\n RE(feedback_ON())\n time.sleep(2)\n if caget('XF:11IDB-BI{XBPM:02}Fdbk:BEn-SP')==1 and caget('XF:11IDB-BI{XBPM:02}Fdbk:AEn-SP')==1 and abs(caget('XF:11IDB-BI{XBPM:02}Pos:X-I'))+abs(caget('XF:11IDB-BI{XBPM:02}Pos:Y-I'))<.8:\n bl_ok=1\n print('################################\\n')\n print('checked beamline: beam on DBPM, all ok!')\n else:\n bl_ok=0\n print('################################\\n')\n print('checked beamline: NO beam on DBPM, not ready for experiment....')\n att.set_T(current_T)\n print('Setting back transmission to '+str(current_T))\n return bl_ok", "title": "" }, { "docid": "7a425ab9c79e4a086dcda2f02ca9a9df", "score": "0.45471254", "text": "def updateMessages(self, params):\n\n srcImg = params[0]\n\n # Custom messaging related to the fact that we need at least 3 bands\n try:\n bandCount = arcpy.Describe(srcImg).bandCount\n except:\n bandCount = -1\n srcImg.setErrorMessage(\"Could not identify image band information. Is this a multiband raster?\")\n if bandCount < 3 and bandCount >= 1:\n srcImg.setErrorMessage(\"Your image should have at least three bands, only detected %s\" % bandCount)\n else:\n srcImg.clearMessage()\n\n # Custom messaging to make the alpha band \"required\" if checked.\n # parameterType is read only, but we can add an error message if useAlpha = True & bandAlpha = False\n if params[4].value and not params[5].value:\n params[5].setErrorMessage(\"An opacity band was indicated above. Please select opacity band.\")\n else:\n params[5].clearMessage()\n\n # Make sure that regardless of the way that someone pointed to their summary areas,\n # the data that they pointed to is polygon\n if params[6].altered:\n summaryLayerShapeType = arcpy.Describe(params[6].valueAsText).shapeType\n if summaryLayerShapeType.upper() != \"POLYGON\":\n params[6].setErrorMessage(\"Summary Areas Must be Polygons, not %s\" % summaryLayerShapeType)\n else:\n params[6].clearMessage()\n\n # If the noise method is being used, check for spatial analyst\n if params[9].value:\n if \"Add Noise\" in params[9].valueAsText:\n if arcpy.CheckExtension(\"Spatial\") == \"Available\":\n params[9].clearMessage()\n else:\n params[9].setErrorMessage(\"Adding noise requires a spatial analyst license. A spatial analyst license is not available.\")\n \n return", "title": "" }, { "docid": "9c7c16c67bc7f36bc0e6d2ad0fbf3162", "score": "0.45415694", "text": "def prmt_searchagain(self):\n self.invalidate_screen()\n if not self.searchagain():\n if self.isMark():\n self.mark_span()\n message(self.parent,\"Search\",\"Pattern not found.\")", "title": "" }, { "docid": "a5e83fd1a69e6f839caf60dc6fe6d909", "score": "0.45411322", "text": "def snap_and_get_bleach_location(exposure, cutoff):\n p_exposure = projector_device.get_exposure()\n c_exposure = mmc.get_exposure()\n\n # set analyze channel\n mmc.set_config(\"Channels\", cell_detect_channel)\n\n test_img = mm.live().snap(True).get(0)\n test_np_img = np.reshape(test_img.get_raw_pixels(), newshape=[test_img.get_height(), test_img.get_width()])\n location = central_pixel_without_cells(test_np_img)\n if location:\n auto_shutter = mm.shutter().get_auto_shutter()\n mm.shutter().set_auto_shutter(False)\n projector.set_exposure(projector_device, exposure)\n mmc.set_exposure(exposure)\n projector.enable_point_and_shoot_mode(True)\n pre_img = mm.live().snap(True).get(0)\n pre_np_img = np.reshape(pre_img.get_raw_pixels(), newshape=[pre_img.get_height(), pre_img.get_width()])\n projector.add_point_to_point_and_shoot_queue(int(location[1]), int(location[0]))\n post_img = mm.live().snap(True).get(0)\n post_np_img = np.reshape(post_img.get_raw_pixels(), newshape=[post_img.get_height(), post_img.get_width()])\n measured_location = bleach_location(pre_np_img, post_np_img, location, [100, 100])\n offset = (measured_location[0] - location[0], measured_location[1] - location[1])\n print(offset)\n cal = False\n if offset[0] * offset[0] + offset[1] * offset[1] > cutoff:\n projector.calibrate(True)\n cal = True\n print(\"Calibrated\")\n projector.set_exposure(projector_device, p_exposure)\n mmc.set_exposure(c_exposure)\n mm.shutter().set_auto_shutter(auto_shutter)\n return cal, offset[0] * offset[0] + offset[1] * offset[1]\n return False, -1", "title": "" }, { "docid": "079e994e6f1523a9e6e0ddb77e9eb3df", "score": "0.4541002", "text": "def ensureOverscansAreInRange(overscan, ampsConfig):\n warnings = []\n\n for level, rms, ampId, ampConfig in zip(overscan.level, overscan.noise, ampsConfig.keys(), ampsConfig.values()):\n minLevel, maxLevel = ampConfig['serialOverscanLevelLim']\n minRMS, maxRMS = ampConfig['serialOverscanNoiseLim']\n\n if (not minLevel < level < maxLevel) or (not minRMS < rms < maxRMS):\n warnings.append((ampId, int(level), round(rms, 1)))\n\n # this would be too long let's keep it short for STS sake\n # warnings.append(f'amp{ampId} overscan level({level}) out of range({minLevel}:{maxLevel})')\n # if not minRMS<noise<maxRMS:\n # warnings.append(f'amp{ampId} overscan noise({noise}) out of range({minRMS}:{maxRMS})')\n\n if not warnings:\n status = \"OK\"\n else:\n status = \"overscan out of range ! \" + \\\n \" \".join([f'amp{ampId}(level={level} RMS={rms})' for ampId, level, rms in warnings])\n\n return status", "title": "" }, { "docid": "abe13aea6f86ff527c81ee96a25356fe", "score": "0.45409036", "text": "def _guide_labels(self,stamp):\n center = stamp.gpCtr\n boxWidth = stamp.image.shape[0] / 2.0\n probeName = makeGProbeName(stamp.gpNumber, stamp.gpBits)\n fwhm = stamp.fwhmArcSec\n loc1 = center-(boxWidth+4,-5)\n loc2 = center-(0,boxWidth+17)\n if not stamp.gpEnabled:\n disabled = [np.array([center+(boxWidth,boxWidth),center-(boxWidth,boxWidth)]).T,\n np.array([center+(boxWidth,-boxWidth),center-(boxWidth,-boxWidth)]).T]\n else:\n disabled = None\n return loc1,probeName,loc2,'%4.2f\"'%fwhm,disabled", "title": "" }, { "docid": "4f3ee339c9511b5898519bfff03a5e71", "score": "0.45392963", "text": "def test_star_mask_in_sim(draw_stars):\n rng = np.random.RandomState(234)\n\n bands = ['r']\n coadd_dim = 100\n buff = 0\n star_density = 100\n psf = make_fixed_psf(psf_type='moffat')\n\n some_were_bright = False\n some_were_saturated = False\n for i in range(1000):\n galaxy_catalog = make_galaxy_catalog(\n rng=rng,\n gal_type='fixed',\n coadd_dim=coadd_dim,\n buff=buff,\n layout='random',\n )\n star_catalog = StarCatalog(\n rng=rng,\n coadd_dim=coadd_dim,\n buff=buff,\n density=star_density,\n )\n sim_data = make_sim(\n rng=rng,\n galaxy_catalog=galaxy_catalog,\n star_catalog=star_catalog,\n draw_stars=draw_stars,\n coadd_dim=coadd_dim,\n bands=bands,\n psf=psf,\n g1=0, g2=0,\n star_bleeds=True,\n )\n\n nbright = sim_data['bright_info'].size\n if nbright > 0:\n some_were_bright = True\n\n for bi in sim_data['bright_info']:\n\n assert 'ra' in bi.dtype.names\n assert 'dec' in bi.dtype.names\n\n assert 'radius_pixels' in bi.dtype.names\n assert bi['radius_pixels'] >= 0\n\n assert 'has_bleed' in bi.dtype.names\n\n exp = sim_data['band_data'][bands[0]][0]\n\n mask = exp.mask.array\n image = exp.image.array\n\n wsat = np.where((mask & get_flagval('SAT')) != 0)\n if (\n wsat[0].size > 0 and\n np.all(image[wsat] == BAND_SAT_VALS['r'])\n ):\n\n some_were_saturated = True\n break\n\n assert some_were_bright and some_were_saturated", "title": "" }, { "docid": "746d53c7bb7b488da622e01e51592da1", "score": "0.45392954", "text": "def qc_BeamLogs():\n \n file_dir = 'SpectralCube_BeamLogs'\n basename = '/beamlog.image.restored.' + imagebase + '.' + field\n\n # use different basename - latest sbids\n if not glob.glob(file_dir + basename +'*.txt'): \n basename = '/beamlog.image.restored.' + 'i.' + field + '.SB' + sbid + '.cube.' + field \n\n QC_BEAMS_LABEL = []\n MEDIAN_BMAJ = []\n MEDIAN_BMIN = []\n\n for i in range(0, 36):\n infile = file_dir + basename + '.beam%02d.txt' % (i)\n if os.path.isfile(infile):\n beamlog_file = np.loadtxt(infile)\n bmaj = beamlog_file[:,1]\n bmin = beamlog_file[:,2]\n bmaj = bmaj[bmaj > 0]\n bmin = bmin[bmin > 0]\n bmaj_median=np.median(bmaj)\n bmin_median=np.median(bmin)\n tolerance_maj=[bmaj_median - bmaj_median*0.3, bmaj_median + bmaj_median*0.3]\n tolerance_min=[bmin_median - bmin_median*0.3, bmin_median + bmin_median*0.3]\n MEDIAN_BMAJ.append(bmaj_median)\n MEDIAN_BMIN.append(bmin_median)\n\n # check bmaj\n outliers_bmaj = (bmaj < tolerance_maj[0]) | (bmaj > tolerance_maj[1])\n\n if np.count_nonzero(outliers_bmaj) > 50:\n qc_BMAJ_label = 'fail'\n else:\n qc_BMAJ_label = 'pass'\n\n # check bmin\n outliers_bmin = (bmin < tolerance_min[0]) | (bmin > tolerance_min[1])\n\n if np.count_nonzero(outliers_bmin) > 50:\n qc_BMIN_label = 'fail'\n else:\n qc_BMIN_label = 'pass'\n\n # check both bmaj and bmin\n if (qc_BMAJ_label == 'pass') and (qc_BMIN_label == 'pass'):\n QC_BEAMS_LABEL.append('pass')\n else:\n QC_BEAMS_LABEL.append('fail')\n\n # no beamlogs text files - missing beams\n else:\n QC_BEAMS_LABEL.append('missing')\n MEDIAN_BMAJ.append(0)\n MEDIAN_BMIN.append(0)\n\n return QC_BEAMS_LABEL, MEDIAN_BMAJ, MEDIAN_BMIN", "title": "" }, { "docid": "aaaefe21b70d8006c0bc75624a9fda37", "score": "0.45300284", "text": "def testAntibodyMarkerWildSearch(self):\n driver = self.driver\n #finds the antibody alias J number field and enters a J number, tabs out of the field then clicks the Search button\n driver.find_element(By.ID, \"markerSymbol-0\").send_keys('Sfp%')\n time.sleep(2)\n actions = ActionChains(driver) \n actions.send_keys(Keys.TAB)\n actions.perform()\n time.sleep(2)\n driver.find_element(By.ID, 'searchButton').click()\n time.sleep(4)\n #find the search results table\n results_table = self.driver.find_element(By.ID, \"resultsTable\")\n table = Table(results_table)\n #Iterate and print the search results headers\n cell1 = table.get_row_cells(0)\n cell2 = table.get_row_cells(1)\n symbol1 = iterate.getTextAsList(cell1)\n symbol2 = iterate.getTextAsList(cell2)\n print(symbol1)\n #Assert the correct antibodies are returned\n self.assertEqual(symbol1, ['anti-PSF (clone B92)'])\n self.assertEqual(symbol2, ['anti-SFPQ'])", "title": "" }, { "docid": "84111076d5d6be89af5ca5e65fae60bd", "score": "0.4520859", "text": "def log_warning_and_above(self) -> \"PyMarkdownApi\":\n return self.log(ApplicationLogging.log_level_warning)", "title": "" }, { "docid": "c3bdacffc1523b8824b24903fbd79ce6", "score": "0.45023543", "text": "def spiral_search():\n #spiral inward to outward making a larger circle each pass (currently squares)\n #------------check the RSSI readings as it spins------------------\n #replace max rssi with new largest and record degrees coordinates\n rssi_max = -120\n max_x = 0\n max_y = 0\n\n count = 0\n while (count < 5):\n move_ccw()\n time.sleep((.1+count))\n move_up_ccw()\n time.sleep((.05+count))\n move_up()\n time.sleep((.05+count))\n move_up_cw()\n time.sleep((.05+count))\n move_cw()\n time.sleep(2*(.1+count))\n move_down_cw()\n time.sleep((.05*count))\n move_down()\n time.sleep(2*(.05+(.05*count)))\n move_down_ccw()\n time.sleep(.05*count)\n count+=1\n #this method isn't really ideal with using timer to determine movement length", "title": "" }, { "docid": "d5098d5be0aa5b37e9710862cde1762a", "score": "0.44982144", "text": "def log(msg):\n cutter.message(f\"[capa explorer]: {msg}\")", "title": "" }, { "docid": "7a668a5d49ac8b0cf9904f08ad344d97", "score": "0.44955093", "text": "def scan_area(_angleSearch, CameraIndex):\r\n # Search angle where angle of rotation is [-maxAngleScan;+maxAngleScan]\r\n names = \"HeadYaw\"\r\n useSensors = False\r\n motionAngles = []\r\n maxAngleScan = _angleSearch\r\n\r\n #motion.angleInterpolationWithSpeed(\"Head\", [-maxAngleScan, 0.035], 0.1)\r\n #pic(path + 'camImage0.png', CameraIndex)\r\n #commandAngles = motion.getAngles(names, useSensors)\r\n #motionAngles.append(commandAngles)\r\n #print str(commandAngles)\r\n motion.angleInterpolationWithSpeed(\"Head\", [-2*maxAngleScan/3, 0.035], 0.1)\r\n pic(path + 'camImage0.png', CameraIndex)\r\n commandAngles = motion.getAngles(names, useSensors)\r\n motionAngles.append(commandAngles)\r\n print str(commandAngles)\r\n motion.angleInterpolationWithSpeed(\"Head\", [-maxAngleScan/3, 0.035], 0.1)\r\n pic(path + 'camImage1.png', CameraIndex)\r\n commandAngles = motion.getAngles(names, useSensors)\r\n motionAngles.append(commandAngles)\r\n print str(commandAngles)\r\n motion.angleInterpolationWithSpeed(\"Head\", [0, 0.035], 0.1)\r\n pic(path + 'camImage2.png', CameraIndex)\r\n commandAngles = motion.getAngles(names, useSensors)\r\n motionAngles.append(commandAngles)\r\n print str(commandAngles)\r\n motion.angleInterpolationWithSpeed(\"Head\", [maxAngleScan/3, 0.035], 0.1)\r\n pic(path + 'camImage3.png', CameraIndex)\r\n commandAngles = motion.getAngles(names, useSensors)\r\n motionAngles.append(commandAngles)\r\n print str(commandAngles)\r\n motion.angleInterpolationWithSpeed(\"Head\", [2*maxAngleScan/3, 0.035], 0.1)\r\n pic(path + 'camImage4.png', CameraIndex)\r\n commandAngles = motion.getAngles(names, useSensors)\r\n motionAngles.append(commandAngles)\r\n print str(commandAngles)\r\n #motion.angleInterpolationWithSpeed(\"Head\", [maxAngleScan, 0.035], 0.1)\r\n #pic(path + 'camImage6.png', CameraIndex)\r\n #commandAngles = motion.getAngles(names, useSensors)\r\n #motionAngles.append(commandAngles)\r\n #print str(commandAngles)\r\n centers = analyze_img()\r\n return [centers, motionAngles]", "title": "" }, { "docid": "b3a40a03fced40e7408091a6f7e6dc01", "score": "0.44927022", "text": "def issue_2023_01_03():\n import psana.pyalgos.generic.PSUtils as psu\n from psana.detector.NDArrUtils import info_ndarr\n from time import time\n\n #ds, orun, det = ds_run_det(exp='rixx45619',run=121, detname='epixhr', dir='/cds/data/psdm/prj/public01/xtc')\n ds, orun, det = ds_run_det(exp='ueddaq02',run=569, detname='epixquad', dir='/cds/data/psdm/prj/public01/xtc')\n\n print('common mode parameters from DB', det.raw._common_mode())\n\n from psana.detector.UtilsGraphics import gr, fleximage, arr_median_limits\n flimg = None\n for nevt,evt in enumerate(orun.events()):\n print('== Event %03d ==' % nevt)\n t0_sec_tot = time()\n arr = det.raw.calib(evt, cmpars=(0,7,300,10))\n logger.info('calib consumed time = %.6f sec' % (time()-t0_sec_tot))\n\n #arr = det.raw.calib(evt, cmpars=0)\n #arr = det.raw.calib(evt)\n #arr = det.raw.raw(evt)\n if nevt>29: break\n if arr is None: continue\n\n #print(info_ndarr(arr,'arr:'))\n #sh = img.shape # shape:(1, 288, 384)\n #img = arr[0,144:,:192] # cut off a single ASIC with meaningfull data\n img = psu.table_nxn_epix10ka_from_ndarr(arr)\n print(info_ndarr(img,'img:'))\n\n if flimg is None:\n flimg = fleximage(img, arr=None, h_in=8, w_in=11, nneg=1, npos=3)\n gr.set_win_title(flimg.fig, titwin='Event %d' % nevt)\n flimg.update(img, arr=None)\n gr.show(mode='DO NOT HOLD')\n gr.show()", "title": "" }, { "docid": "3efd7831efc9b8575363202441e166c7", "score": "0.4488036", "text": "def test_computing_frequent_patterns_with_mismatches_check_kmers_not_Presented_in_text(self):\r\n self.assertSequenceEqual(sorted([\"AA\", \"AC\", \"AG\", \"CA\", \"AT\", \"GA\", \"TA\"]),\r\n sorted(computing_frequent_patterns_with_mismatches(\"AAAAAAAAAA\", 2, 1)))", "title": "" }, { "docid": "b557fce75bb08d9e16772658fa5d26be", "score": "0.44860032", "text": "def test_silence_direct_notifications(self):\n pass", "title": "" }, { "docid": "0a57b8f14c7af3393cc6f38a7abe8783", "score": "0.44782406", "text": "def errs_warnings():\n return 30", "title": "" }, { "docid": "2250b9adf939a7e9cb6a80b682fd30aa", "score": "0.44780713", "text": "def add_missing_in_calculate():\r\n screen_state = SCREEN.get()\r\n # add closing )\r\n for _ in range(screen_state.count(\"(\") - screen_state.count(\")\")):\r\n update_screen(\")\")\r\n # add * if missing with parenthesis\r\n screen_state = SCREEN.get()\r\n pos = 0\r\n for n in screen_state:\r\n if n is \"(\" and screen_state[pos - 1] in \"0123456789\":\r\n screen_state = screen_state[:pos]+\"*\"+screen_state[pos:]\r\n SCREEN.set(screen_state)\r\n pos += 1", "title": "" }, { "docid": "88c5c2bc7d281af288f28b3c1f11fa01", "score": "0.44741243", "text": "def qc_Bad_Chans(infile, mad_rms, med_rms):\n\n data = np.loadtxt(infile)\n data_rms = data[:,3]\n data_median_rms=np.median(data_rms)\n data_std_rms=np.std(data_rms)\n threshold = np.std(data_rms)*3.0 # threshold = 3 * standard deviation from med_rms\n\n BAD_CHAN = []\n\n stat_file = open(infile, 'r')\n LINES = stat_file.readlines()[2:]\n stat_file.close()\n\n for i in range(len(LINES)):\n line = LINES[i]\n TOKS = line.split()\n chan = TOKS[0]\n freq = TOKS[1]\n rms = float(TOKS[3])\n madfm = float(TOKS[5])\n \n if abs(rms-data_median_rms) > threshold:\n BAD_CHAN.append(chan)\n elif rms == 0:\n BAD_CHAN.append(chan)\n\n if BAD_CHAN == []:\n BAD_CHAN.append('none')\n QC_badchan_id = 'good'\n QC_badchan_keyword = 'n= '+str(len(BAD_CHAN))\n else:\n QC_badchan_id = 'bad'\n QC_badchan_keyword = 'n= '+str(len(BAD_CHAN))\n\n\n mosaic_bad_chan = 'mosaic_bad_chan.txt'\n print ('\\n'.join(BAD_CHAN), file=open(fig_dir + '/' + mosaic_bad_chan,'w'))\n\n n_bad_chan = len(BAD_CHAN)\n\n # Check if number of bad channel recorded is 1. If yes, check if is it a none keyword.\n # If yes, number of bad channel should be 0.\n \n if n_bad_chan == 1:\n with open(fig_dir + '/' + mosaic_bad_chan) as f:\n if 'none' in f.read():\n n_bad_chan = 0\n print ('yes')\n \n return n_bad_chan, mosaic_bad_chan, QC_badchan_id, QC_badchan_keyword", "title": "" }, { "docid": "5326817e8ba43d8260c71c364a6813df", "score": "0.44733787", "text": "def _cv2_show_wrong_tracks(img,\n bboxes,\n ids,\n error_types,\n thickness=2,\n font_scale=0.4,\n text_width=10,\n text_height=15,\n show=False,\n wait_time=100,\n out_file=None):\n assert bboxes.ndim == 2, \\\n f' bboxes ndim should be 2, but its ndim is {bboxes.ndim}.'\n assert ids.ndim == 1, \\\n f' ids ndim should be 1, but its ndim is {ids.ndim}.'\n assert error_types.ndim == 1, \\\n f' error_types ndim should be 1, but its ndim is {error_types.ndim}.'\n assert bboxes.shape[0] == ids.shape[0], \\\n 'bboxes.shape[0] and ids.shape[0] should have the same length.'\n assert bboxes.shape[1] == 5, \\\n f' bboxes.shape[1] should be 5, but its {bboxes.shape[1]}.'\n\n bbox_colors = sns.color_palette()\n # red, yellow, blue\n bbox_colors = [bbox_colors[3], bbox_colors[1], bbox_colors[0]]\n bbox_colors = [[int(255 * _c) for _c in bbox_color][::-1]\n for bbox_color in bbox_colors]\n\n if isinstance(img, str):\n img = mmcv.imread(img)\n else:\n assert img.ndim == 3\n\n img_shape = img.shape\n bboxes[:, 0::2] = np.clip(bboxes[:, 0::2], 0, img_shape[1])\n bboxes[:, 1::2] = np.clip(bboxes[:, 1::2], 0, img_shape[0])\n\n for bbox, error_type, id in zip(bboxes, error_types, ids):\n x1, y1, x2, y2 = bbox[:4].astype(np.int32)\n score = float(bbox[-1])\n\n # bbox\n bbox_color = bbox_colors[error_type]\n cv2.rectangle(img, (x1, y1), (x2, y2), bbox_color, thickness=thickness)\n\n # FN does not have id and score\n if error_type == 1:\n continue\n\n # score\n text = '{:.02f}'.format(score)\n width = (len(text) - 1) * text_width\n img[y1:y1 + text_height, x1:x1 + width, :] = bbox_color\n cv2.putText(\n img,\n text, (x1, y1 + text_height - 2),\n cv2.FONT_HERSHEY_COMPLEX,\n font_scale,\n color=(0, 0, 0))\n\n # id\n text = str(id)\n width = len(text) * text_width\n img[y1 + text_height:y1 + text_height * 2,\n x1:x1 + width, :] = bbox_color\n cv2.putText(\n img,\n str(id), (x1, y1 + text_height * 2 - 2),\n cv2.FONT_HERSHEY_COMPLEX,\n font_scale,\n color=(0, 0, 0))\n\n if show:\n mmcv.imshow(img, wait_time=wait_time)\n if out_file is not None:\n mmcv.imwrite(img, out_file)\n\n return img", "title": "" }, { "docid": "6694c1b24dc9c7383fda15fc79568852", "score": "0.44725356", "text": "def _warning():\n raise ValueError(\"\"\"WARNING: The algorithm was not able to generate \\\n the desired number of points.\\n\n Change the search region or refine resolution vector dx\"\"\")\n sys.exit(1)", "title": "" }, { "docid": "dd3ff959adb8531dc0f53f406c9633ae", "score": "0.4471385", "text": "def provide_feedback(self, message):\n print(message + '\\n')", "title": "" }, { "docid": "d9b609c879f9b7271c111f5c254a86a3", "score": "0.44671145", "text": "def _log_not_found(not_found):\n\n with open(\"PersistentData/not_founds.txt\", \"a\") as file:\n file.write(f\"{datetime.now()}:\\n\")\n for gene in not_found:\n file.write(str(gene) + \"\\n\")", "title": "" }, { "docid": "672a45d72e44ffa54d7b5cf6336d44b3", "score": "0.44654462", "text": "def test_badGSAFixType(self):\n sentenceData = {'type': 'GPGSA',\n 'altitude': '545.4',\n 'dataMode': nmea.GPGLLGPRMCFixQualities.VOID,\n 'fixType': nmea.GPGSAFixTypes.GSA_2D_FIX}\n self._invalidFixTest(sentenceData)", "title": "" }, { "docid": "94318b7eb6a2400a238a7137bba7bd85", "score": "0.4464759", "text": "def check_mpii_face_gaze_not_on_screen(input_path: str, output_path: str) -> None:\n\n data = {'file_name': [], 'on_screen_gaze_position': [], 'monitor_pixels': []}\n\n for person_file_path in sorted(glob.glob(f'{input_path}/p*')):\n person = person_file_path.split('/')[-1]\n\n screen_size = scipy.io.loadmat(f'{input_path}/{person}/Calibration/screenSize.mat')\n screen_width_pixel = screen_size[\"width_pixel\"].item()\n screen_height_pixel = screen_size[\"height_pixel\"].item()\n\n df = pd.read_csv(f'{person_file_path}/{person}.txt', sep=' ', header=None)\n df_idx = 0\n\n for day_file_path in sorted(glob.glob(f'{person_file_path}/d*')):\n day = day_file_path.split('/')[-1]\n\n for image_file_path in sorted(glob.glob(f'{day_file_path}/*.jpg')):\n row = df.iloc[df_idx]\n on_screen_gaze_target = row[1:3].to_numpy().reshape(-1).astype(int)\n\n if not (0 <= on_screen_gaze_target[0] <= screen_width_pixel and 0 <= on_screen_gaze_target[1] <= screen_height_pixel):\n file_name = f'{person}/{day}/{image_file_path.split(\"/\")[-1]}'\n\n data['file_name'].append(file_name)\n data['on_screen_gaze_position'].append(list(on_screen_gaze_target))\n data['monitor_pixels'].append([screen_width_pixel, screen_height_pixel])\n\n df_idx += 1\n\n pd.DataFrame(data).to_csv(f'{output_path}/not_on_screen.csv', index=False)", "title": "" }, { "docid": "c9a73d7be121684080f5d3838cc84728", "score": "0.4464356", "text": "def issue_2023_01_10():\n import psana.detector.utils_calib_components as ucc\n import psana.detector.UtilsEpix10kaChargeInjection as ueci\n\n from psana.detector.NDArrUtils import info_ndarr\n from time import time\n import numpy as np\n # dir='/cds/data/psdm/asc/ascdaq18/xtc/' # default\n # dir='/cds/data/psdm/prj/public01/xtc') # preserved\n ds, orun, det = ds_run_det(exp='ascdaq18', run=171, detname='epixhr', dir='/cds/data/psdm/asc/ascdaq18/xtc/')\n\n config = det.raw._config_object()\n calibc = det.raw._calibconst\n\n logger.debug('calibc: %s' % str(calibc))\n\n cc = ucc.calib_components_epix(calibc, config)\n data_bit_mask = cc.data_bit_mask() # 0o77777 for epixhr\n pedestals = cc.pedestals()\n\n ones = np.ones_like(pedestals, dtype=np.float32)\n\n print('calib_types: ', cc.calib_types())\n print('config - number of panels: ', cc.number_of_panels())\n print('dettype: ', cc.dettype())\n print('calib_metadata: ', cc.calib_metadata(ctype='pedestals'))\n print(info_ndarr(pedestals,'pedestals:'))\n print('data_bit_mask:', oct(data_bit_mask))\n\n #sys.exit('TEST EXIT')\n\n from psana.detector.UtilsGraphics import gr, fleximage, arr_median_limits\n flimg = None\n\n nstep_sel = 2\n space = 5\n databitw = 0o037777\n\n for nstep, step in enumerate(orun.steps()):\n #if nstep<nstep_sel: continue\n #elif nstep>nstep_sel: break\n if nstep>10: break\n\n irow, icol = ueci.injection_row_col(nstep, space)\n\n s = '== Step %02d irow %03d icol %03d ==' % (nstep, irow, icol)\n print(s)\n\n\n for nevt,evt in enumerate(step.events()):\n #if nevt>1000: break\n if nevt%100: continue\n\n #print('== Step %02d Event %03d irow %03d icol %03d ==' % (nstep, nevt, irow, icol))\n\n #t0_sec_tot = time()\n raw = det.raw.raw(evt)\n if raw is None: continue\n\n peds = cc.event_pedestals(raw)\n #arr = peds\n arr = np.array(raw & data_bit_mask, dtype=np.float32) - peds\n\n #gmaps = cc.gain_maps_epix(raw)\n #arr = ucc.event_constants_for_gmaps(gmaps, ones, default=0)\n #arr = ucc.map_gain_range_index_for_gmaps(gmaps, default=10) # stack bits...\n #arr = np.array(raw & 0o100000, dtype=np.int) # 0o77777 # behaves ok\n arr1 = np.array(arr[0,irow,100:120], dtype=np.int16) & databitw\n print(info_ndarr(arr1,'%s arr1:' % s, first=0, last=10), ' v[col]=%5d' % arr1[icol])\n\n #logger.info('time consumption to make 3-d array for imaging = %.6f sec' % (time()-t0_sec_tot))\n #pedestals: shape:(7, 1, 288, 384)\n #img = cc.pedestals()[1,0,:150,:200]\n #img = arr[0,:150,:200] # cut off a single ASIC with meaningfull data\n img = arr[0,:144,:192] # cut off a single ASIC with meaningfull data\n #img = arr[0,60:144,110:192] # cut off a single ASIC with meaningfull data\n #img = arr[0,0:20,100:120] # cut off a single ASIC with meaningfull data\n #img = arr[0,:,:] # cut off a single ASIC with meaningfull data\n #img = ucc.psu.table_nxn_epix10ka_from_ndarr(arr, gapv=0)\n #print(info_ndarr(img,'img:'))\n\n if flimg is None:\n flimg = fleximage(img, arr=None, h_in=8, w_in=11, nneg=1, npos=3)\n gr.set_win_title(flimg.fig, titwin='Step %02d Event %d' % (nstep,nevt))\n flimg.update(img, arr=None, amin=0, amax=databitw)\n gr.show(mode='DO NOT HOLD')\n gr.show()", "title": "" }, { "docid": "006d2ad68f9e883e7a89ca3fe87e776b", "score": "0.446172", "text": "def test_locate_single_noisy(self):\n self.atol = 0.5\n radius = np.random.uniform(15, 30)\n generated_image = self.generate_image(radius, 1, noise=0.2)\n\n fits = locate_disks(generated_image.image, (radius / 2.0,\n radius * 2.0),\n maximum=1)\n\n y_coord, x_coord = generated_image.coords[0]\n if len(fits) != 1: # Particle number mismatch\n r, y, x = np.nan, np.nan, np.nan\n else:\n r, y, x = fits[['r', 'y', 'x']].values[0]\n\n return (r, y, x), (radius, y_coord, x_coord)", "title": "" }, { "docid": "8177927cad3ac11601fe734494ad42ae", "score": "0.44595575", "text": "def test_allow_nan_durations():\n raw = RawArray(\n data=np.empty([2, 10], dtype=np.float64),\n info=create_info(ch_names=2, sfreq=1.0),\n first_samp=0,\n )\n raw.set_meas_date(0)\n\n ons = [1, 2.0, 15.0, 17.0]\n dus = [np.nan, 1.0, 0.5, np.nan]\n descriptions = [\"A\"] * 4\n onsets = np.asarray(ons, dtype=float)\n durations = np.asarray(dus, dtype=float)\n annot = mne.Annotations(onset=onsets, duration=durations, description=descriptions)\n with pytest.warns(RuntimeWarning, match=\"Omitted 2 annotation\"):\n raw.set_annotations(annot)", "title": "" }, { "docid": "e3284a4710e7d238f2d5b29fd93f879b", "score": "0.4457433", "text": "def end_episode(self):\n super().end_episode()\n \n total_num_rooms = self.env.total_num_rooms # this is taken from the NH bottom line (R: %d)\n \n non_secret_map_positions = dfs(start=self.env.nh.initial_player_pos,\n passable_func=lambda x, y: self.secret_grid[x][y] == 0 and self.env.nh.basemap_char(x, y) in PASSABLE_CHARS,\n neighbor_func=lambda x, y, diag: self.env.nh.get_neighboring_positions(x, y, diag),\n min_neighbors=0, diag=True)\n \n total_nonsecret_rooms = 0\n num_discovered_secret_rooms = 0\n\n for room in self.visited_room_pos:\n if room in non_secret_map_positions:\n verboseprint(\"Room\",room,\"in non secret map positions\")\n total_nonsecret_rooms += 1\n else:\n verboseprint(\"Room\",room,\"NOT in non secret map positions\")\n num_discovered_secret_rooms += 1\n \n total_secret_rooms = total_num_rooms - total_nonsecret_rooms\n percent_secret_discovered = (num_discovered_secret_rooms / total_secret_rooms) if total_secret_rooms > 0 else 1\n \n num_discovered_sdoors_scorrs = 0\n for i in range(ROWNO):\n for j in range(COLNO):\n if self.secret_grid[i][j] == 1:\n num_discovered_sdoors_scorrs += 1\n percent_secret_sdoors_scorrs_explored = (num_discovered_sdoors_scorrs/self.env.total_sdoors_scorrs) if self.env.total_sdoors_scorrs > 0 else 1\n \n verboseprint(\"Num secret rooms discovered:\", num_discovered_secret_rooms, \"and total:\", total_secret_rooms)\n verboseprint(\"Num secret spots discovered:\", num_discovered_sdoors_scorrs, \"and total:\", self.env.total_sdoors_scorrs)\n \n self.env.num_discovered_secret_rooms = num_discovered_secret_rooms\n self.env.total_secret_rooms = total_secret_rooms\n self.env.num_discovered_sdoors_scorrs = num_discovered_sdoors_scorrs", "title": "" }, { "docid": "05c730d5822627d105250cfcdd227317", "score": "0.44569728", "text": "def _alert_if_anomalies(anomalies: anomalies_pb2.Anomalies, output_path: str):\n\n if list(anomalies.anomaly_info):\n logging.warn(\"Anomalies detected. The anomaly report uploaded to: {}\".format(output_path))\n else:\n logging.info(\"No anomalies detected.\")\n \n return anomalies", "title": "" }, { "docid": "2539e41cfa26c2eefe707645d1096aa2", "score": "0.4455192", "text": "def com_google_fonts_check_outline_alignment_miss(ttFont, outlines_dict, config):\n alignments = {\n \"baseline\": 0,\n \"x-height\": ttFont[\"OS/2\"].sxHeight,\n \"cap-height\": ttFont[\"OS/2\"].sCapHeight,\n \"ascender\": ttFont[\"OS/2\"].sTypoAscender,\n \"descender\": ttFont[\"OS/2\"].sTypoDescender,\n }\n warnings = []\n for glyph, outlines in outlines_dict.items():\n codepoint, glyphname = glyph\n for p in outlines:\n for node in p.asNodelist():\n if node.type == \"offcurve\":\n continue\n for line, yExpected in alignments.items():\n # skip x-height check for caps\n if line == \"x-height\" and (\n len(glyphname) > 1 or glyphname[0].isupper()\n ):\n continue\n if close_but_not_on(yExpected, node.y, ALIGNMENT_MISS_EPSILON):\n warnings.append(f\"{glyphname} (U+{codepoint:04X}):\"\n f\" X={node.x},Y={node.y}\"\n f\" (should be at {line} {yExpected}?)\")\n if len(warnings) > FALSE_POSITIVE_CUTOFF:\n # Let's not waste time.\n yield PASS, (\"So many Y-coordinates of points were close to\"\n \" boundaries that this was probably by design.\")\n return\n\n if warnings:\n formatted_list = \"\\t* \" + pretty_print_list(config,\n warnings,\n sep=\"\\n\\t* \")\n yield WARN,\\\n Message(\"found-misalignments\",\n f\"The following glyphs have on-curve points which\"\n f\" have potentially incorrect y coordinates:\\n\"\n f\"{formatted_list}\")\n else:\n yield PASS, \"Y-coordinates of points fell on appropriate boundaries.\"", "title": "" } ]
e63245e828429092ba71c86e1d1ee3ad
Get histogram counts per each processed CCD frame as a 2D array. Use setHistBins to configure histogram bins. Bin edges and centers are available in self.hedges and self.hcenters. Return a 2D array of histogram counts per each frame.
[ { "docid": "4f01216566be8d30ec6cbf2945acbf80", "score": "0.6916845", "text": "def ahistogram(self):\n import functools\n from py15sacla.utils import eqbinhistogram\n self._ensureHistBinsExist()\n lo, hi, bins = self.chistbins\n fnc = functools.partial(eqbinhistogram, bins=bins, range=(lo, hi))\n histcounts = map(fnc, self.generate())\n rv = numpy.array([c for c, e in histcounts])\n return rv", "title": "" } ]
[ { "docid": "4bcc28038117f26ba6811756bf804fdb", "score": "0.66184413", "text": "def ToHistogram(self):\n return np.array([bin[3] for bin in self.bins])", "title": "" }, { "docid": "9e7fd3f24c8b8f0b511422fe75da5fec", "score": "0.65742964", "text": "def histogram(self):\n rv = self.ahistogram().sum(axis=0)\n return rv", "title": "" }, { "docid": "5e04f9fe9c92471d67c12e578ed9493e", "score": "0.6559818", "text": "def get_all_histograms(self):\n out = np.zeros([self.nstreams, 256])\n for stream in range(self.nstreams/2):\n x, out[stream,:] = self.get_input_histogram(stream)\n return x, out", "title": "" }, { "docid": "2a8977d7f77550b2fcc6022af663f20f", "score": "0.6473845", "text": "def get_num_bins(self):\n return _wxgui_swig.histo_sink_f_sptr_get_num_bins(self)", "title": "" }, { "docid": "e1b769372f0703e068cc03fe8fc06f06", "score": "0.64644384", "text": "def histogram(self):\n out = CentrallyBin([c for c, v in self.bins], self.quantity, Count(), self.nanflow.copy())\n out.entries = self.entries\n for i, v in self.bins:\n out.bins[i] = Count.ed(v.entries)\n return out.specialize()", "title": "" }, { "docid": "bc429a6db57bfabf2c2b80969146aa99", "score": "0.6448279", "text": "def getHistogramCount(X, Y, xmin, xmax, ymin, ymax, bins, Hp, Hn):\r\n \r\n row = (np.round(((bins-1)*(X-xmin)/(xmax-xmin)))).astype('int32')\r\n col = (np.round(((bins-1)*(Y-ymin)/(ymax-ymin)))).astype('int32') \r\n counts = Hp[row, col], Hn[row, col]\r\n return counts", "title": "" }, { "docid": "ffc91638877a9192f19b319f93bfb5b6", "score": "0.64006627", "text": "def histogram(self):\n\n if self.empty:\n return\n\n batch = self.batch\n boxed = self.boxed\n stages = self.stages\n rotated = self.stages[\"rotated\"]\n self.histX = cv2.reduce(rotated, 0, cv2.REDUCE_AVG).reshape(-1)\n self.histY = cv2.reduce(rotated, 1, cv2.REDUCE_AVG).reshape(-1)\n\n if not batch or boxed:\n normalizedC = stages[\"normalizedC\"]\n if not batch:\n histogram = normalizedC.copy()\n\n for (hist, vert) in ((self.histY, True), (self.histX, False)):\n for (i, val) in enumerate(self.histY):\n color = (int(val), int(2 * val), int(val))\n index = (0, i) if vert else (i, 0)\n value = (val, i) if vert else (i, val)\n cv2.line(histogram, index, value, color, 1)\n stages[\"histogram\"] = histogram", "title": "" }, { "docid": "39bfe40f225580fc6ad00976aacb1e24", "score": "0.6382733", "text": "def hist_n_per_bin(self):\n return copy.deepcopy(self._hist_n_per_bin)", "title": "" }, { "docid": "312b043ffab7f0a62866e0f8e6450b88", "score": "0.63606656", "text": "def get_num_bins(self):\n return _wxgui_swig.histo_sink_f_get_num_bins(self)", "title": "" }, { "docid": "da0292ece20b6e24c5f47c2e4436e915", "score": "0.63437766", "text": "def histogramize(self, bins):\n cdf_vals = reshape_to_pdf_size(self.cdf(bins), -1)\n bin_vals = cdf_vals[:,1:] - cdf_vals[:,0:-1]\n return (bins, reshape_to_pdf_shape(bin_vals, self._shape, bins.size-1))", "title": "" }, { "docid": "da0292ece20b6e24c5f47c2e4436e915", "score": "0.63437766", "text": "def histogramize(self, bins):\n cdf_vals = reshape_to_pdf_size(self.cdf(bins), -1)\n bin_vals = cdf_vals[:,1:] - cdf_vals[:,0:-1]\n return (bins, reshape_to_pdf_shape(bin_vals, self._shape, bins.size-1))", "title": "" }, { "docid": "9f7fc1ae4fd8a2e58bdfff1621cb7751", "score": "0.62537414", "text": "def hist(self, idx, tod):\n\n map = np.zeros((self.nsb, self.nfreq, self.nbin), dtype = ctypes.c_float) # Array to be filled with TOD values at each pixel.\n nhit = np.zeros((self.nsb, self.nfreq, self.nbin), dtype = ctypes.c_int) # Array to be filled with hits at each pixel.\n\n maputilslib = ctypes.cdll.LoadLibrary(\"histutils.so.1\") # Load shared C utils library.\n \n float32_array3 = np.ctypeslib.ndpointer(dtype=ctypes.c_float, ndim=3, flags=\"contiguous\") # 3D array 32-bit float pointer object.\n #float64_array3 = np.ctypeslib.ndpointer(dtype=ctypes.c_double, ndim=3, flags=\"contiguous\") # 3D array 64-bit float pointer object.\n int32_array3 = np.ctypeslib.ndpointer(dtype=ctypes.c_int, ndim=3, flags=\"contiguous\") # 3D array 32-bit integer pointer object.\n int32_array1 = np.ctypeslib.ndpointer(dtype=ctypes.c_int, ndim=1, flags=\"contiguous\") # 1D array 32-bit integer pointer object.\n\n maputilslib.histogram.argtypes = [int32_array1, float32_array3, float32_array3, int32_array3, # Specifying input types for C library function.\n ctypes.c_int, ctypes.c_int, ctypes.c_int]\n maputilslib.histogram(idx, tod, map, nhit, self.nsb, self.nfreq, self.nsamp, self.nbin) # Filling map and nhit by call-by-pointer.\n \n return map, nhit", "title": "" }, { "docid": "f57624a32064109e4bf614b27b8a9cb6", "score": "0.6225647", "text": "def compute_histograms(frame, hist_func=frame_to_hs_hist, grid_size=2, bins=[180, 180]):\n \n # Convert image to HSV\n hsv_frame = cv2.cvtColor(frame, cv2.COLOR_BGR2HSV)\n \n # Initialize array with main histogram\n histograms = [hist_func(hsv_frame, bins)]\n \n # Split frame into grids and calculate histograms\n # TODO: why save these at all and not just 'generate' and check them only for the best matches?\n if grid_size and grid_size > 1:\n for sub_frame in split_frame(hsv_frame, grid_size):\n histograms.append(hist_func(sub_frame, bins))\n\n return histograms", "title": "" }, { "docid": "c20e468de35952a17efb4db56db1addb", "score": "0.61758035", "text": "def _histograms(self, cell_shape: (int, int)) -> np.ndarray:\n # Raise a warning if the image can't be evenly divided and some cells have to be smaller\n celled_shape = self.matrix.shape[0] / cell_shape[0], self.matrix.shape[1] / cell_shape[1]\n if not celled_shape[0].is_integer() or not celled_shape[0].is_integer():\n warnings.warn(f\"The cell's shape {cell_shape} cannot perfectly tile the image's shape {self.matrix.shape}\",\n stacklevel=3)\n celled_shape = tuple(int(math.ceil(dim)) for dim in celled_shape)\n # List that every (n, n+1) couple represents the indices range of a cell\n rows = [None, *range(cell_shape[0], self.matrix.shape[0], cell_shape[0]), None] # [None 16 32 ... 496 None]\n cols = [None, *range(cell_shape[1], self.matrix.shape[1], cell_shape[1]), None]\n histograms = np.array([[\n np.histogram(self.matrix[rows[i]:rows[i + 1], cols[j]:cols[j + 1]],\n bins=self.hist_bins, range=(0, self.hist_bins), density=True)[0]\n for j in range(0, celled_shape[1])]\n for i in range(0, celled_shape[0])])\n return histograms", "title": "" }, { "docid": "c4279e448bc6746de538be6e7161bcdb", "score": "0.6117699", "text": "def get_age_class_histogram_data(self):\n\n hist_data = [0]*15\n\n for i in range(self.size[0]):\n for j in range(self.size[1]):\n\n #convert age into bin index\n bin = math.floor((self.get_age([i,j]) / 10))\n\n #cap at index 14\n bin = min(bin,14)\n\n #count it\n hist_data[ bin ] += 1\n\n return hist_data", "title": "" }, { "docid": "bd7366730ab2089dd2d2d86d07dedded", "score": "0.61151487", "text": "def calc_hist(data):\n counts, bins = exposure.histogram(data)\n\n return counts, bins", "title": "" }, { "docid": "243fd51cda230155cd2262464aa5d2e9", "score": "0.6087128", "text": "def image_counts(self):\n return self.array_from_electrons_per_second_to_counts(self.data)", "title": "" }, { "docid": "6e60e22258653b41b4886598552d79b8", "score": "0.60773844", "text": "def HOG_hists(X, dic, n_bins):\n # build all histograms\n hists = np.zeros((X.shape[0], dic.shape[0]))\n for i in range(0, X.shape[0]):\n # retrieve the first histogram\n im_i = build_image(X[i,:])\n hists[i,:] = HOG_hist(im_i, dic, n_bins)\n\n return hists", "title": "" }, { "docid": "67d7e5376a371e4cc2ce4d2f1b601183", "score": "0.6024093", "text": "def _ComputeHistograms(self, start, stop):\n if stop <= start:\n return []\n\n (width, height) = self._dimension\n if self._is_dual:\n width = width / 2\n\n # Modify the memory offset to match the field.\n PAGE_SIZE = 4096\n PIXEL_LEN = 3\n field_size = width * height * PIXEL_LEN\n field_size = ((field_size - 1) / PAGE_SIZE + 1) * PAGE_SIZE\n offset_args = ['-g', self._GRID_NUM, '-s', self._GRID_SAMPLE_NUM]\n # The histogram is computed by sampled pixels. Getting one band is enough\n # even if it is in dual pixel mode.\n offset_addr = fpga.VideoDumper.GetPixelDumpArgs(self._input_id, False)[1]\n\n max_limit = fpga.VideoDumper.GetMaxFieldLimit(width, height)\n for i in xrange(start, stop):\n offset_args += ['-a', offset_addr + field_size * (i % max_limit)]\n\n result = system_tools.SystemTools.Output(\n 'histogram', width, height, *offset_args)\n # Normalize the histogram by dividing the maximum.\n return [[float(v) / self._GRID_SAMPLE_NUM / self._GRID_SAMPLE_NUM\n for v in l.split()]\n for l in result.splitlines()]", "title": "" }, { "docid": "c56dc2df353f9000629580c67fbb16c8", "score": "0.59929955", "text": "def compute_histograms(self, descriptors, vocabulary, k):\n # Histograms generation array of (images count, number of words)\n histograms = np.zeros((len(self.images_path), k))\n for i in range(len(self.images_path)):\n if isinstance(descriptors[i], np.ndarray):\n # Assign codes from a code book to observations.\n words, distance = vq(descriptors[i], vocabulary)\n for w in words:\n histograms[i][w] += 1\n return histograms", "title": "" }, { "docid": "bc4e45f420b4e339f6929d9d6ef99cbb", "score": "0.5987241", "text": "def histogram_helper(x, cmin, cmax, bins=100):\n if x.ndim == 1:\n y, _ = np.histogram(x, bins=bins, range=[cmin, cmax])\n return y.astype(float)\n else:\n y = np.empty((len(x), bins), float)\n for i in range(len(x)):\n y[i], _ = np.histogram(x[i], bins=bins, range=[cmin, cmax])\n return y", "title": "" }, { "docid": "cecefdf554e5241ae0077f289bf45298", "score": "0.5982804", "text": "def find_histogram(self,clt):\n\n numLabels = np.arange(0, len(np.unique(clt.labels_)) + 1)\n\n (hist, _) = np.histogram(clt.labels_, bins=numLabels)\n\n hist = hist.astype(\"float\")\n\n hist /= hist.sum()\n\n return hist", "title": "" }, { "docid": "4fca12f61a0d748376aa05400db07c95", "score": "0.5942056", "text": "def cal_hist(channels):\n return [single_channel_hist(channel) for channel in channels]", "title": "" }, { "docid": "6866a7f349cff6ba2b50d9643298193d", "score": "0.5938347", "text": "def hist(self, bins=10, ax=None):\n return self._handy.hist(self._colnames, bins, ax)", "title": "" }, { "docid": "3d8869b6296e0c10d4c4d2149e5c2c77", "score": "0.5867579", "text": "def getHist(self, img: 'Image') -> 'histogram':\n px = img.load()\n w, h = img.size\n hst = [0] * 256\n\n for i in range(w):\n for j in range(h):\n if type(px[i, j]) != int:\n r, g, b = px[i, j]\n res = (r + g + b) // 3\n else:\n res = px[i, j]\n hst[res] += 1\n\n return hst", "title": "" }, { "docid": "b1b286e0285e2e1cb5a360f621c99934", "score": "0.58632475", "text": "def calc_histogram(chain, nbins):\n # Histogram\n x_range = chain.max() - chain.min()\n bin_spacing = np.round(x_range,-int(np.floor(np.log10(abs(x_range))))) / nbins\n bin_start = np.floor(chain.min()*(1/bin_spacing)) * bin_spacing\n bin_end = np.ceil(chain.max() * (1/bin_spacing)) * bin_spacing\n bins = np.arange(bin_start, bin_end + bin_spacing, bin_spacing)\n hist, bins = np.histogram(chain, bins=bins)\n return hist, bins, bin_spacing", "title": "" }, { "docid": "c520961e15b65a104e4d439a263600ea", "score": "0.5853736", "text": "def bins(self):\n return self._bins", "title": "" }, { "docid": "94f00ac94d2c948c9f7ea5ca538c37a8", "score": "0.58214724", "text": "def _compute_histogram_from_mask(mask, image, num_bins, range):\n\n # Apply binary mask to your array, you will get array with shape (N, C)\n region = image[mask]\n\n hist, _ = np.histogram(region, bins=num_bins, range=range)\n return hist", "title": "" }, { "docid": "b975f0553dc2f760c95d6b7a57d839ac", "score": "0.5793443", "text": "def histogram(dist):\n network_size = 1902 # Chengdu network\n hist = np.zeros(network_size)\n x_hist = np.arange(network_size)\n for i in dist: hist[i-1]+=1\n return x_hist, hist", "title": "" }, { "docid": "2201403effcb0da83e2842df66e81bb3", "score": "0.574582", "text": "def compute_histogram(mask, image):\r\n # Apply binary mask to your array, you will get array with shape (N, C)\r\n region = image[mask]\r\n\r\n red = np.histogram(region[..., 0].ravel(), bins=256, range=[0, 256])\r\n green = np.histogram(region[..., 1].ravel(), bins=256, range=[0, 256])\r\n blue = np.histogram(region[..., 2].ravel(), bins=256, range=[0, 256])\r\n\r\n return [red, green, blue]", "title": "" }, { "docid": "6b262366624cd3c0c027135b5ee9bb2f", "score": "0.5742153", "text": "def _calc_spark_categorical_hist_and_stats(self, column_df_list):\n max_buckets = 50\n value_counts = []\n for column_df in column_df_list:\n if self.config.spark_cache_dataframe_column:\n column_df_cached = True\n column_df.cache()\n else:\n column_df_cached = False\n try:\n column_name = column_df.schema.names[0]\n self._calc_categorical_column_stats(column_df, column_name)\n if column_name not in self._histogram_column_names:\n continue\n column_value_counts = (\n column_df.groupby(column_name)\n .count()\n .orderBy(desc(\"count\"))\n .withColumn(\"column_name\", lit(column_name))\n .limit(max_buckets - 1)\n )\n column_value_counts = column_value_counts.collect()\n value_counts.extend(column_value_counts)\n finally:\n if column_df_cached:\n column_df.unpersist()\n return value_counts", "title": "" }, { "docid": "f125217d19f659abc4e705b37989d262", "score": "0.5709805", "text": "def color_hist(img, mask):\n histograms = np.array([])\n for ch in range(3):\n channel = img[:,:,ch]\n ch_histogram,_ = np.histogram(channel[np.where(mask>0)],\n np.arange(0, 255, 255/11)) / np.sum(mask.ravel())\n histograms = np.hstack((histograms, ch_histogram))\n return histograms", "title": "" }, { "docid": "fe5d9d6cceb24ddd5d5f0c3e71eb60de", "score": "0.5706466", "text": "def hist(self, ra, dec):\n return np.array([np.sum(f.coord_in_field(ra, dec)) \n for f in self.fields])", "title": "" }, { "docid": "18b0913747e651b4dd76b024dfa304c9", "score": "0.57056445", "text": "def compute_img_hist(img):\r\n blocks = view_as_blocks(img.astype(np.float32)/255.0, block_shape=(20,20))\r\n hists = [np.histogram(block, bins=np.linspace(0,1,10))[0] for block in blocks]\r\n return np.concatenate(hists)", "title": "" }, { "docid": "532149aec9b3a3abbe574be4aae4e783", "score": "0.56878775", "text": "def color_histogram(img, bins=64):\n if img.ndim == 2:\n img = np.stack([img, img, img], -1) # Gray to RGB\n if img.max() > 1:\n img = img / 255.0\n img = (img * (bins - 1)).astype(int)\n rhist = np.bincount(img[:, :, 0].ravel(), minlength=bins)\n ghist = np.bincount(img[:, :, 1].ravel(), minlength=bins)\n bhist = np.bincount(img[:, :, 2].ravel(), minlength=bins)\n hist = np.stack([rhist, ghist, bhist], 0)\n hist = hist / (img.shape[0] * img.shape[1])\n return hist", "title": "" }, { "docid": "f3336cfb07f565b2cdb7e0fbdd2f17cb", "score": "0.56876516", "text": "def extract_1d_hist(samples, nbins=100, density=True):\n\n # Obtain histogram\n count, bins = np.histogram(trace, bins=nbins, density=density)\n\n # Make the bins into the bin centers, not the edges\n x = (bins[:-1] + bins[1:]) / 2.0\n\n return count, x", "title": "" }, { "docid": "7d503fcba266a06f9e864cb07186e938", "score": "0.5681561", "text": "def calculate_histogram(magnitudes, angles, bin_count=9, is_signed=False):\n cell_size_x, cell_size_y = magnitudes.shape\n orientation_size = 360 if is_signed else 180\n bin_width = orientation_size // bin_count\n cell_histogram = np.zeros(bin_count)\n for row in range(cell_size_x):\n for col in range(cell_size_y):\n orientation = angles[row][col]\n histogram_bin = int(orientation // bin_width)\n cell_histogram[histogram_bin] += magnitudes[row][col]\n return cell_histogram / np.product(magnitudes.shape)", "title": "" }, { "docid": "61f7d60e5352a22ae60b2db1d7d05027", "score": "0.5645347", "text": "def calculate_histograms(df):\n histograms = {\"total_count\": len(df.index), \"features\": {}}\n for colname in df:\n histogram = calculate_single_histogram(df[colname])\n histograms[\"features\"][\n str(colname)\n ] = histogram # TODO: directly store non-str column names\n\n return histograms", "title": "" }, { "docid": "b02d45e92e33e31bc8f38a3812f12b2a", "score": "0.5638103", "text": "def data_for_histogram(self, request):\n # Usage: chrom = indicate the chromosome(s) of interest ('all' or any chromosome),\n # region_width = indicate the size of the bins for which to count hits should be passed under the form,\n # threshold = indicate the type of threshold to look at (FDR, Bonferroni, permutation or none, default='FDR')\n # region = indicate a specific window in which to aggregate for, default = ('','') looks at entire chromosome\n # maf = indicate a minimum maf (default=0)\n # mac = indicate a minimum mac (default=0)\n # Get bin size\n filters = dict()\n filters['region_width'] = max(1,int(request.query_params.get('region_width')))\n recompute = request.query_params.getlist('recompute')\n if recompute != []:\n filters['chrom'] = request.query_params.get('chromosome')\n region = request.query_params.getlist('region')\n filters['region'] = (int(region[0]), int(region[1]))\n # get the rest of the data\n results = elastic.get_gwas_overview_bins_data(filters)\n return Response(results)", "title": "" }, { "docid": "c1276d6a4e75e8ce9c84e9520917bb82", "score": "0.56326735", "text": "def get_histogram(self, input, sum_cores=True):\n self.set_input(input)\n time.sleep(0.1)\n v = np.array(struct.unpack('>512H', self.read('bit_stats_histogram_output', 512*2)))\n a = v[0:256]\n b = v[256:512]\n a = np.roll(a, 128) # roll so that array counts -128, -127, ..., 0, ..., 126, 127\n b = np.roll(b, 128) # roll so that array counts -128, -127, ..., 0, ..., 126, 127\n vals = np.arange(-128,128)\n if sum_cores:\n return vals, a+b\n else:\n return vals, a, b", "title": "" }, { "docid": "5513be234ffc6093ad5637325ef9e431", "score": "0.56070524", "text": "def find_histogram(clt):\n numLabels = np.arange(0, len(np.unique(clt.labels_)) + 1)\n (hist, _) = np.histogram(clt.labels_, bins=numLabels)\n\n hist = hist.astype(\"float\")\n hist /= hist.sum()\n\n return hist", "title": "" }, { "docid": "967b72f01563d6dd95ad1cf0dbada164", "score": "0.5602975", "text": "def get_histogram(self, X, y, channel=0):\n classes = self.classes.copy()\n if self.background:\n classes.append(\"Background\")\n X = X[:,channel].astype('int16')\n try:\n bins = np.linspace(np.amin(X), np.amax(X), np.amax(X)-np.amin(X))\n except:\n bins = np.linspace(0, 100, 1)\n pyplot.title(\"Channel \"+str(channel))\n for key, value in enumerate(classes):\n _x = X[y[:,key] == 1]\n pyplot.hist(_x, bins, alpha=0.5, density = True, label=value, log=True)\n pyplot.legend(loc='upper right')\n pyplot.ylabel('Probability')\n pyplot.xlabel('Intensity')\n pyplot.show()", "title": "" }, { "docid": "f09da651b9262e25e7eb8f46b10296f4", "score": "0.55917436", "text": "def GetHistograms(self, start, stop):\n return [self._saved_histograms[i : i + self._HISTOGRAM_SIZE]\n for i in xrange(start * self._HISTOGRAM_SIZE,\n stop * self._HISTOGRAM_SIZE,\n self._HISTOGRAM_SIZE)]", "title": "" }, { "docid": "5b3a92ccfdf3dc242b58a03c1f36142b", "score": "0.5589602", "text": "def __call__(self, features):\n row_number, col_number = features.shape\n histogram = np.zeros((self._intervals_number+2,col_number), dtype=int)\n hist_min = np.min(features, axis=0)\n hist_max = np.max(features, axis=0)\n hist_half_step = 1./self._intervals_number\n hist_min = hist_min - hist_half_step\n hist_max = hist_max + hist_half_step\n histogram[0,:] = np.floor(hist_min)\n histogram[1,:] = np.ceil(hist_max)\n for j in range(col_number):\n ibins = np.linspace(start=histogram[0,j],\n stop=histogram[1,j],\n num=self._intervals_number+1,\n endpoint=True)\n for i in range(self._intervals_number):\n if i == 0:\n histogram[i+2,j] += np.sum(np.logical_and(\n features[:,j]>=ibins[i],\n features[:,j]<=ibins[i+1]))\n else:\n histogram[i+2,j] += np.sum(np.logical_and(\n features[:,j]>ibins[i],\n features[:,j]<=ibins[i+1]))\n return histogram", "title": "" }, { "docid": "c38156d7110bfd990b158732b3afb15b", "score": "0.55843127", "text": "def _cooccurrence_matrix_dir(values, bins, di, dj):\n m, n = values.shape\n codes = values[:m - di, :n - dj] + bins * values[di:, dj:]\n entries = np.bincount(codes.ravel(), minlength=bins ** 2)\n return entries.reshape(bins, bins)", "title": "" }, { "docid": "b6abc134f20a244636bb05c70cecd396", "score": "0.5577731", "text": "def histogram(self, bin_scale=10, pix_x=3448, pix_y=2574, clear_mem=False): \n x=self.rf[0,:]\n y=self.rf[2,:]\n\n x=x[~np.isnan(x)]\n y=y[~np.isnan(y)]\n\n self.H, self.xedges, self.yedges = np.histogram2d(x, y, \n bins=[pix_x//bin_scale, pix_y//bin_scale], \n range=[[-self.Lx/2, self.Lx/2],[-self.Ly/2,self.Ly/2]])\n self.H = self.H.T\n\n # Optional - clear ray attributes to save memory\n if(clear_mem):\n self.clear_rays()", "title": "" }, { "docid": "54fe8bae76ae6925ba6407e7a6a97099", "score": "0.5555838", "text": "def _bincount_2d(self, rhos):\n # 2-dimensional bincount\n num_rows = tf.cast(tf.shape(rhos)[0], dtype=self.threshold.dtype)\n\n # round and typecast rho values\n rhos = tf.cast(tf.math.round(rhos), dtype=self.threshold.dtype)\n\n # convert the values in each row to a consecutive range of ids that will not\n # overlap with the other rows.\n row_values = rhos - tf.reduce_min(rhos) + \\\n (tf.expand_dims(tf.range(num_rows, dtype=self.threshold.dtype), axis=1)) * \\\n (tf.reduce_max(rhos) - tf.reduce_min(rhos) + 1)\n\n # flatten the tensor\n values_flat = tf.reshape(row_values, [-1])\n\n # bincount\n bins_length = tf.multiply(num_rows, tf.reduce_max(rhos) - tf.reduce_min(rhos) + 1)\n bins = tf.reshape(\n tf.math.bincount(values_flat, minlength=bins_length, maxlength=bins_length, dtype=self.threshold.dtype),\n [num_rows, -1]\n )\n\n return rhos, bins", "title": "" }, { "docid": "45421a18a776b3f8d438fd35dd95a4bb", "score": "0.55548126", "text": "def color_hist(img, nbins=32):\n # Compute the histogram of the color channels separately\n channel1_hist = np.histogram(img[:, :, 0], bins=nbins)\n channel2_hist = np.histogram(img[:, :, 1], bins=nbins)\n channel3_hist = np.histogram(img[:, :, 2], bins=nbins)\n # Concatenate the histograms into a single feature vector\n hist_features = np.concatenate((channel1_hist[0], channel2_hist[0], channel3_hist[0]))\n # Return the individual histograms, bin_centers and feature vector\n return hist_features", "title": "" }, { "docid": "b4c659efc63986fd7573b7920f52c217", "score": "0.5550383", "text": "def n_bins(self):\n if self.isempty:\n return 0\n return utils.PrettyInt(len(self.bin_centers))", "title": "" }, { "docid": "7cd5a29de86cb0ebbd779eb297c64808", "score": "0.5538113", "text": "def calculate_counts(self, hLength, marginals=True):\n data = self.data\n alphabet = self.alphabet\n\n if hLength > len(data) - self.fLength:\n msg = \"`hLength` is too large.\"\n raise Exception(msg)\n\n if marginals:\n hLengths = {L for L in range(hLength + 1)}\n # Discard everything from before\n self.counts = None\n self.histories = []\n self.index = {}\n self.hLengths = set([])\n else:\n hLengths = {hLength}\n self.hLengths.update(hLengths)\n\n kwargs = {\n 'data': data,\n 'hLength': hLength,\n 'fLength': self.fLength,\n 'marginals': marginals,\n 'alphabet': alphabet\n }\n out = dit.inference.counts_from_data(**kwargs)\n histories, cCounts, hCounts, alphabet = out\n\n if self.counts is not None:\n self.counts = np.vstack([self.counts, cCounts])\n else:\n self.counts = cCounts\n\n # Assign alphabet in case it was None was passed in.\n self.alphabet = alphabet\n\n # Extend the list of histories and the dictionary mapping them to rows.\n prev = len(self.histories)\n row_ids = range(prev, prev + len(histories))\n self.index.update(zip(histories, row_ids))\n self.histories.extend(histories)", "title": "" }, { "docid": "e8b1cd20d4141d4d0520819bda65dc3f", "score": "0.5529651", "text": "def histories(self):\n return self.__histories", "title": "" }, { "docid": "1833af3e1aeb77e29e99baa6bd65a80b", "score": "0.5522894", "text": "def histogram(x):\n if not x.dtype=='uintp':\n raise ValueError('input array should have uintp data type')\n\n cdef np.npy_uintp xv\n cdef np.npy_uintp nbins = <np.npy_uintp>x.max() + 1\n cdef np.flatiter it = x.flat\n cdef np.ndarray h = np.zeros(nbins, dtype='uintp')\n cdef np.npy_uintp* hv\n\n while np.PyArray_ITER_NOTDONE(it):\n xv = (<np.npy_uintp*>np.PyArray_ITER_DATA(it))[0]\n hv = <np.npy_uintp*>np.PyArray_DATA(h) + xv\n hv[0] += 1\n np.PyArray_ITER_NEXT(it)\n \n return h", "title": "" }, { "docid": "cd754180771f617122a3f8e7b4306350", "score": "0.5521748", "text": "def color_hist(img, nbins=32):\n # Compute the histogram of the color channels separately\n channel1_hist = np.histogram(img[:, :, 0], bins=nbins)\n channel2_hist = np.histogram(img[:, :, 1], bins=nbins)\n channel3_hist = np.histogram(img[:, :, 2], bins=nbins)\n\n # Concatenate the histograms into a single feature vector\n hist_features = np.concatenate((channel1_hist[0], channel2_hist[0], channel3_hist[0]))\n\n # Return the individual histograms, bin_centers and feature vector\n return hist_features", "title": "" }, { "docid": "53a178c0dd6f8434644679e4008a3ba4", "score": "0.5516417", "text": "def update_histogram_dimensions(self):\n self.number_histograms = int((self.bin_end - self.bin_start) / self.bin_width)\n # spectra_bins dataset\n payload = '{\"dims\": [%s], \"chunks\": [1, %s]}' % \\\n (self.number_histograms, self.number_histograms)\n command = \"config/hdf/dataset/\" + \"spectra_bins\"\n request = ApiAdapterRequest(str(payload), content_type=\"application/json\")\n self.adapters[\"fp\"].put(command, request)\n\n # pixel_spectra dataset\n payload = '{\"dims\": [%s, %s], \"chunks\": [1, %s, %s]}' % \\\n (self.pixels, self.number_histograms, self.pixels, self.number_histograms)\n command = \"config/hdf/dataset/\" + \"pixel_spectra\"\n request = ApiAdapterRequest(str(payload), content_type=\"application/json\")\n self.adapters[\"fp\"].put(command, request)\n\n # summed_spectra dataset\n payload = '{\"dims\": [%s], \"chunks\": [1, %s]}' % \\\n (self.number_histograms, self.number_histograms)\n command = \"config/hdf/dataset/\" + \"summed_spectra\"\n request = ApiAdapterRequest(str(payload), content_type=\"application/json\")\n self.adapters[\"fp\"].put(command, request)", "title": "" }, { "docid": "5c89186ed05a5d43d4bcd637cd026648", "score": "0.55034864", "text": "def calculate_hue_histograms(self, instance_ids, masks, image):\n\n dict_hue_histogram = {}\n num_bins = 36\n hue_range = [0,180] # for opencv\n for i in instance_ids:\n mask = masks[:, :, i]\n contour_indexes = np.where(mask == 1)\n b = image[:,:,0]\n g = image[:,:,1]\n r = image[:,:,2]\n b = b[contour_indexes].reshape(-1,1)\n g = g[contour_indexes].reshape(-1,1)\n r = r[contour_indexes].reshape(-1,1)\n bgr = np.stack((b, g, r), axis=-1)\n hsv = cv2.cvtColor(bgr, cv2.COLOR_BGR2HSV)\n hist, bins = np.histogram(hsv[:,:,0].ravel(),bins=num_bins, range=hue_range, density=True)\n dict_hue_histogram[i] = hist * ((hue_range[1] - hue_range[0]) / num_bins)\n return dict_hue_histogram", "title": "" }, { "docid": "1592b5f64ebb14d991cc67a36730f605", "score": "0.5494215", "text": "def n_bins(self):\n return len(self.bins)", "title": "" }, { "docid": "230d33e4cdcab8e0fc8349070a6002b5", "score": "0.54929197", "text": "def histeq(arr, nbr_bins=256):\n imhist, bins = np.histogram(arr.flatten(), nbr_bins, normed=True)\n cdf = imhist.cumsum() # cumulative distribution function\n cdf = 255 * cdf / cdf[-1] # normalized\n\n new_arr = np.interp(arr.flatten(), bins[:-1], cdf)\n new_arr = new_arr.reshape(arr.shape)\n return new_arr, cdf", "title": "" }, { "docid": "cfd8a41ff510d62d1c14819a19340d2a", "score": "0.548459", "text": "def getCountMat(tupleMat,nBinMat,sX,sY,NoDataCode): # Faster version\n \n dim1Edge = np.r_[0,np.arange(nBinMat[sX])+1.5]\n dim2Edge = np.r_[0,np.arange(nBinMat[sY])+1.5]\n dim3Edge = np.r_[0,np.arange(nBinMat[sY])+1.5]\n \n tupleMat = tupleMat.astype(float)\n tupleMat[tupleMat == NoDataCode] = np.nan\n tupleMat = np.delete(tupleMat, np.argwhere(np.isnan(np.sum(tupleMat,1))==True), axis=0)#delete nan\n \n #print(tupleMat)\n nCounts = np.shape(tupleMat)[0]\n \n C1,cedge = np.histogramdd(tupleMat,bins=(dim1Edge,dim2Edge,dim3Edge))\n C = np.moveaxis(C1, 2, 0)\n\n \n return C.astype(int), nCounts", "title": "" }, { "docid": "54d804f62c81772c53e78cfef841e4ad", "score": "0.5484171", "text": "def histogram(*digitized):\n if not digitized:\n raise ValueError(\n \"Must provide at least one 'digitized' field construct\"\n )\n\n f = digitized[0].copy()\n f.clear_properties()\n\n return f.bin(\"sample_size\", digitized=digitized)", "title": "" }, { "docid": "f67b09c180582cd97526b2fa00c5fea4", "score": "0.5483535", "text": "def histogram(x, cmin, cmax, bins=100, multiprocessing=False):\n if multiprocessing:\n num_workers = mp.cpu_count()\n with mp.Pool(processes=num_workers) as pool:\n func = functools.partial(\n histogram_helper, cmin=cmin, cmax=cmax, bins=bins)\n results = pool.map(func, x)\n return np.stack(results)\n else:\n return histogram_helper(x, cmin, cmax, bins)", "title": "" }, { "docid": "dc9083c3353aa1a19060a8a82aaf440b", "score": "0.54763937", "text": "def hist(self, ra, dec):\n pixels = hp.ang2pix(self.nside,(90 - dec) * _d2r, ra * _d2r, \n nest=self.nest)\n binned = np.histogram(pixels, bins=range(hp.nside2npix(self.nside) + 1))\n \n return binned[0]", "title": "" }, { "docid": "24e60a7779557a185935f2ef2b98345f", "score": "0.5474355", "text": "def get_histogram(self, prop, bin_edges=None):\r\n\t\tif isinstance(prop, (list, np.ndarray)):\r\n\t\t\tar = prop\r\n\t\telse:\r\n\t\t\ttry:\r\n\t\t\t\tar = getattr(self, prop)\r\n\t\t\texcept AttributeError:\r\n\t\t\t\tar = np.ma.array(self.get_prop_values(prop))\r\n\t\tif bin_edges is None:\r\n\t\t\tbin_edges = self.bins.get(prop, None)\r\n\t\tif bin_edges is None:\r\n\t\t\tbin_edges = 10\r\n\t\tcounts, bin_edges = np.histogram(ar[np.isfinite(ar)], bins=bin_edges)\r\n\t\treturn bin_edges, counts", "title": "" }, { "docid": "81ad820fd8c16adc9431e880d3694f9b", "score": "0.54535973", "text": "def color_hist(img, nbins=32, bins_range=(0, 256)):\n\n # Compute the histogram of the color channels separately\n channel1_hist = np.histogram(img[:,:,0], bins=nbins, range=bins_range)\n channel2_hist = np.histogram(img[:,:,1], bins=nbins, range=bins_range)\n channel3_hist = np.histogram(img[:,:,2], bins=nbins, range=bins_range)\n\n # Concatenate the histograms into a single feature vector\n hist_features = np.concatenate((channel1_hist[0], channel2_hist[0], channel3_hist[0]))\n\n # Return the individual histograms, bin_centers and feature vector\n return hist_features", "title": "" }, { "docid": "f7fa10355d2116cc5f61eaecd48007ab", "score": "0.5449193", "text": "def compute_1D_histogram(img, mask):\n gray = transform_color(img, \"Gray\")\n hist= cv2.calcHist([gray], [0], mask, [256], [0, 256]) # compute the histogram\n cv2.normalize(hist, hist, norm_type=cv2.NORM_L2, alpha=1.)\n\n return hist", "title": "" }, { "docid": "7be4593ae71610b2045edd2aa9aa8960", "score": "0.5438208", "text": "def make_histogram(points, bucket_size):\n return Counter(bucketize(point, bucket_size) for point in points)", "title": "" }, { "docid": "7f26556e7ec1514ec31745f54cb50763", "score": "0.54327536", "text": "def ffd(self):\n bins = []\n\n for item in self.boxes:\n bins = self.add_to_bins(item, bins)\n return bins", "title": "" }, { "docid": "ac2fd87d3a033f47b7de5c42c2479c9d", "score": "0.54159665", "text": "def nhits(self, idx):\n\n nhit = np.zeros((self.nsb, self.nfreq, self.nbin), dtype = ctypes.c_int) # Array to be filled with hits at each pixel.\n\n maputilslib = ctypes.cdll.LoadLibrary(\"histutils.so.1\") # Load shared C utils library.\n \n int32_array3 = np.ctypeslib.ndpointer(dtype=ctypes.c_int, ndim=3, flags=\"contiguous\") # 3D array 32-bit integer pointer object.\n int32_array1 = np.ctypeslib.ndpointer(dtype=ctypes.c_int, ndim=1, flags=\"contiguous\") # 1D array 32-bit integer pointer object.\n maputilslib.nhits.argtypes = [int32_array1, int32_array3, # Specifying input types for C library function.\n ctypes.c_int, ctypes.c_int, ctypes.c_int]\n maputilslib.nhits(idx, nhit, self.nsb, self.nfreq, self.nsamp, self.nbin) # Filling map and nhit by call-by-pointer.\n \n return nhit", "title": "" }, { "docid": "550c4f048395a7ff03c564c7da1c45af", "score": "0.5414532", "text": "def histogram(self):\n # setting plot parameters\n canvas = toyplot.Canvas(width=600, height=400) \n # making sure axes are cartesian coordinates and labelling axes\n axes = canvas.cartesian(xlabel= self.column_name, ylabel=\"Frequency\") \n # show axes ticks\n axes.x.ticks.show = True\n axes.y.ticks.show = True\n # Binning values using np.histogram\n self.mark = axes.bars(np.histogram(self.arr,range=(0,1), bins=20))\n return self.mark", "title": "" }, { "docid": "55daef210d6cf937c2ed2d8c3d22e8a4", "score": "0.54038525", "text": "def getHist(graphInput, outputDir):\n hist = graphInput.degree()\n print (\"-------------Original count------------------\")\n \n size = len(hist)\n print (hist)\n print (len(hist))\n print (\"-------------Outliers count------------------\")\n hist = list(removeOutliers(hist))\n print (hist)\n print (len(hist))\n print (\"-------------Histogram count------------------\")\n bins = np.arange(1, np.max(hist)+2)\t\n weightsNumpy = np.ones_like((hist))/float(len(hist))\n histogram, bin_edges = np.histogram(hist, bins=bins)\n pdf = histogram/size\n print(pdf)\n\t#print (bin_edges)\n\t#print (len(pdf))\n\t#print (len(bins))\n print (\"-------------Saving PDF------------------\")\n xaxis = range(1,len(pdf)+1)\n plt.bar(xaxis, pdf)\t\n output_file = outputDir + \"/PDF.png\"\n plt.savefig(output_file, bbox_inches='tight')\n print (\"-------------Preparing CDF------------------\")\n cdf = np.cumsum(pdf)\n print (cdf)\n plt.bar(xaxis, cdf)\n output_file = outputDir + \"/CDF.png\"\n plt.savefig(output_file, bbox_inches='tight')", "title": "" }, { "docid": "a41f79b04182bfcf36b5bc96106444c5", "score": "0.5401612", "text": "def get_colorhist(img, nbins=64, tcmap='HLS', bins_range=(0, 256), vis=False):\n assert img.max() > 1, \"Pixel value range is not (0, 255), it's {}\".format((img.min(), img.max()))\n\n img = color_xform(img, tcmap)\n ch1_hist = np.histogram(img[:,:,0], bins=nbins, range=bins_range)\n ch2_hist = np.histogram(img[:,:,1], bins=nbins, range=bins_range)\n ch3_hist = np.histogram(img[:,:,2], bins=nbins, range=bins_range)\n hist_features = np.concatenate((ch1_hist[0], ch2_hist[0], ch3_hist[0]))\n\n if vis is False:\n return hist_features\n\n bin_edges = ch1_hist[1]\n bin_centers = (bin_edges[1:] + bin_edges[0:len(bin_edges)-1])/2\n\n assert len(hist_features) > 0, 'Got no color historgram for image'\n\n return ch1_hist, ch2_hist, ch3_hist, hist_features, bin_centers", "title": "" }, { "docid": "5afa4496db136eeb146bd744ce79f14f", "score": "0.53974646", "text": "def hls_hist(hls, rect):\r\n # loop on the channels\r\n for channel in range(3):\r\n values = []\r\n # loop over the rectangles\r\n n_rect = len(rect)\r\n for i in range(n_rect):\r\n temp_list = hls[rect[i][0]:rect[i][1], rect[i][2]:rect[i][3] , channel].ravel().tolist()\r\n values.append(temp_list)\r\n # concatenate all pixel values from each channel in one long list \r\n values = reduce(lambda x,y:x+y,values)\r\n if SHOW_COLOR_HISTOGRAMS:\r\n figure(3) ; subplot(3,1,channel+1) ; hist(values, bins = 100) ; title('pixel values histogram on HLS color space, channel: ' + str(channel) )\r\n show()\r\n return 0", "title": "" }, { "docid": "7d5626eae7f89eb7406d39c2552553f8", "score": "0.5395598", "text": "def create_temp_histograms(qsr_path, accu_path):\n\n global_codebook = np.array([])\n all_graphlets = np.array([])\n\n for d_cnt, dir_ in sorted(enumerate(os.listdir(qsr_path))):\n directory = os.path.join(qsr_path, dir_)\n print \"dir: \", directory, d_cnt, dir_\n\n for e_cnt, event_file in sorted(enumerate(os.listdir(directory))):\n e = utils.load_e(directory, event_file)\n\n if len(e.qsr_object_frame.qstag.graphlets.histogram) == 0:\n print \"removed:\", e_cnt, event_file\n continue\n e.temp_histogram = np.array([0] * (global_codebook.shape[0]))\n\n print \" \", d_cnt, e_cnt, e.uuid, \"len:\", len(e.qsr_object_frame.qstag.graphlets.histogram) #, len(e.qsr_joints_frame.qstag.graphlets.histogram)\n # feature_spaces = [e.qsr_object_frame.qstag.graphlets, e.qsr_joints_frame.qstag.graphlets]\n feature_spaces = [e.qsr_object_frame.qstag.graphlets]#, e.qsr_joints_frame.qstag.graphlets]\n\n for cnt, f in enumerate(feature_spaces):\n for freq, hash in zip(f.histogram, f.code_book):\n try:\n ind = np.where(global_codebook == hash)[0][0]\n e.temp_histogram[ind] += freq\n # If the hash doesn't exist in the global codebook yet - add it\n except IndexError:\n global_codebook = np.append(global_codebook, hash)\n e.temp_histogram = np.append(e.temp_histogram, freq)\n all_graphlets = np.append(all_graphlets, f.graphlets[hash])\n # print \"\\n>\", hash, f.graphlets[hash]\n\n # print global_codebook, e.temp_histogram, all_graphlets\n utils.save_event(e, \"Learning/Histograms\")\n\n print \"Code book shape:\", global_codebook.shape\n f = open(os.path.join(accu_path, \"code_book_all.p\"), \"w\")\n pickle.dump(global_codebook, f)\n f.close()\n\n f = open(os.path.join(accu_path, \"graphlets_all.p\"), \"w\")\n pickle.dump(all_graphlets, f)\n f.close()\n\n return global_codebook.shape[0]", "title": "" }, { "docid": "3ea2697f7a9748d2f9a25ff9fd23c93a", "score": "0.5379015", "text": "def counts(self, loader, num_bins=100, save_path=None, rewrite=False):\n print(\"\\nCounting in-distribution and out-distribution pixels\")\n if save_path is None:\n save_path = self.save_path_data\n if not os.path.exists(save_path) or rewrite:\n save_dir = os.path.dirname(save_path)\n if not os.path.exists(save_dir):\n print(\"Create directory\", save_dir)\n os.makedirs(save_dir)\n bins = np.linspace(start=0, stop=1, num=num_bins + 1)\n counts = {\"in\": np.zeros(num_bins, dtype=\"int64\"), \"out\": np.zeros(num_bins, dtype=\"int64\")}\n inf = inference(self.params, self.roots, loader, self.dataset.num_eval_classes)\n for i in range(len(loader)):\n probs, gt_train, _, _ = inf.probs_gt_load(i)\n ent = entropy(probs, axis=0) / np.log(self.dataset.num_eval_classes)\n counts[\"in\"] += np.histogram(ent[gt_train == self.dataset.train_id_in], bins=bins, density=False)[0]\n counts[\"out\"] += np.histogram(ent[gt_train == self.dataset.train_id_out], bins=bins, density=False)[0]\n print(\"\\rImages Processed: {}/{}\".format(i + 1, len(loader)), end=' ')\n sys.stdout.flush()\n torch.cuda.empty_cache()\n pickle.dump(counts, open(save_path, \"wb\"))\n print(\"Counts data saved:\", save_path)", "title": "" }, { "docid": "b9899a5ef63acec61dad9e1cc747c3bc", "score": "0.5364067", "text": "def make_histograms(self):\n\t\tself.suits = Hist()\n\t\tself.ranks = Hist()\n\n\t\tfor c in self.cards:\n\t\t\tself.suits.count(c.suit)\n\t\t\tself.ranks.count(c.rank)\n\n\t\tself.sets = list(self.ranks.values())\n\t\tself.sets.sort(reverse=True)\n\n\t\tprint(self.ranks)", "title": "" }, { "docid": "966695f63e2386cc7d07044bc75c62a3", "score": "0.5360983", "text": "def extract_2d_hist(samples_x, samples_y, bins=100, density=True,\n meshgrid=False):\n # Obtain histogram\n count, x_bins, y_bins = np.histogram2d(samples_x, samples_y, bins=bins,\n normed=density)\n\n # Make the bins into the bin centers, not the edges\n x = (x_bins[:-1] + x_bins[1:]) / 2.0\n y = (y_bins[:-1] + y_bins[1:]) / 2.0\n\n # Make mesh grid out of x_bins and y_bins\n if meshgrid:\n y, x = np.meshgrid(x, y)\n\n return count.transpose(), x, y", "title": "" }, { "docid": "94359c80fc00efd8b9c0981baa73e9bf", "score": "0.5360516", "text": "def histogram(self):\n from django.db import connection\n cursor = connection.cursor()\n cursor.execute(HISTOGRAM, [self.pk])\n Day = namedtuple('Day', 'count date start end')\n days = map(Day._make, cursor.fetchall())\n # TODO (rmyers): This should be moved to view or templatetag?\n # return just the totals for now and condense and pad the results\n # so that there are 31 days. The games start noon UTC time the last\n # day of the previous month and end noon the 1st of the next month.\n # This, is, really, ugly, don't look!\n results = [int(day.count) for day in days]\n if len(results) >= 2:\n results[1] += results[0]\n results = results[1:] # trim the first day\n if len(results) == 32:\n results[30] += results[31]\n results = results[:31] # trim the last day\n padding = [0 for day in xrange(31 - len(results))]\n results += padding\n return results", "title": "" }, { "docid": "f10b9c688e01e9b745ce3d0c35b2be58", "score": "0.5347997", "text": "def cluster_count_hist(labels, figure_dir=\"figures\"):\n col_scl = MinMaxScaler()\n unique_labels = np.unique(labels)\n col_trans = col_scl.fit(unique_labels.reshape(-1, 1))\n scl_vals = col_trans.transform(unique_labels.reshape(-1, 1))\n color = plt.cm.nipy_spectral(scl_vals)\n # mx = np.max(labels)\n # cmap = sns.color_palette(\"Spectral\", mx + 1, as_cmap=True)\n # color = cmap(scl_vals)\n\n fig = plt.Figure()\n plt.bar(*np.unique(labels, return_counts=True), color=color)\n plt.xlabel(\"cluster ID\")\n plt.ylabel(\"number of compounds\")\n plt.tight_layout()\n plt.savefig(join(figure_dir, \"cluster-count-hist\"))\n plt.show()\n return fig", "title": "" }, { "docid": "a69ce710e626d5fc748800b02c584d9f", "score": "0.5345074", "text": "def histogram(self, search_id):\r\n\r\n response = self._post(\r\n '/server/search/histogram', data=dict(\r\n search_session_id=search_id,\r\n user_session_id=self.token\r\n ))\r\n return response.json()", "title": "" }, { "docid": "732bae10f3d6b82fd56003979d6aae4c", "score": "0.5338475", "text": "def values(self):\n return [v for c, v in self.bins]", "title": "" }, { "docid": "015c6f0925ff62bd12450eedac5ed185", "score": "0.5329546", "text": "def getFrequencyArray(self) -> np.ndarray:\n return np.linspace(0, self.getSampleFreq() / 2.0, self.getDataSize())", "title": "" }, { "docid": "ffafc3ceb98e2e08dc4dad2a5af8fdda", "score": "0.53222513", "text": "def img2histogram(img,vocabulary_size,centers):\n kp,des=vlfeat.sift.dsift(img,step=8,fast=True,float_descriptors=True)\n distances=cdist(des,centers,'euclidean')\n counter=collections.Counter(np.argmin(distances,axis=1))\n re=np.zeros(vocabulary_size)\n for i in counter:\n re[i]=counter[i]\n\n return re", "title": "" }, { "docid": "0fada3b3f57e8156da8d5f414629f01f", "score": "0.5320726", "text": "def histeq(img, nbr_bins=256):\n\n imhist, bins = np.histogram(img.flatten(), nbr_bins, normed=True)\n\n\n cdf = imhist.cumsum() # cumulative distribution function\n cdf = 255 * cdf / cdf[-1]\n\n\n result = np.interp(img.flatten(), bins[:-1], cdf)\n\n return result.reshape(img.shape), cdf", "title": "" }, { "docid": "c2d55aeaae6613994a90f56bc8063920", "score": "0.53164566", "text": "def _cooccurrence_matrix_dir(values, bins, di, dj):\n m, n = values.shape\n if dj > 0:\n codes = values[:m + di, :n - dj] + bins * values[-di:, dj:] # di è <= 0 per costruzione, dj > 0\n else:\n codes = values[:m + di, :n + dj] + bins * values[-di:, - dj:]\n entries = np.bincount(codes.ravel(), minlength=bins ** 2)\n return entries.reshape(bins, bins)", "title": "" }, { "docid": "bc171f0656f09f317b079e5d1d743a8d", "score": "0.53057235", "text": "def _calc_colour_hist(img):\n\n BINS = 25\n hist = numpy.array([])\n\n for colour_channel in (0, 1, 2):\n\n # extracting one colour channel\n c = img[:, colour_channel]\n\n # calculate histogram for each colour and join to the result\n hist = numpy.concatenate(\n [hist] + [numpy.histogram(c, BINS, (0.0, 255.0))[0]])\n\n # L1 normalize\n hist = hist / len(img)\n\n return hist", "title": "" }, { "docid": "e4851e5c53f57c82da629e53e7c80285", "score": "0.5305564", "text": "def spiketimes_to_spikecounts(self, binEdges):\n if len(self.spikeTimesFromEventOnsetAll) == 0:\n msg = 'You need to run eventlocked_spiketimes() for this object first'\n raise RuntimeError(msg)\n nBins = len(binEdges) - 1\n nTrials = len(self.eventOnsetTimes)\n nClusters = len(self.celldb)\n spikeCountMatAll = np.empty((nClusters, nTrials, nBins), dtype=int)\n for indc in range(nClusters):\n cMat = spikesanalysis.spiketimes_to_spikecounts(self.spikeTimesFromEventOnsetAll[indc],\n self.indexLimitsEachTrialAll[indc],\n binEdges)\n spikeCountMatAll[indc,:,:] = cMat\n return spikeCountMatAll", "title": "" }, { "docid": "57fcf81413bcd43955dfb80c62ef7344", "score": "0.5293354", "text": "def get_cls_counts(self, with_diff=False, with_trun=True):\n counts = np.zeros(len(self.classes))\n objects = self.get_objects(with_diff, with_trun)\n if objects.shape[0] > 0:\n cls_inds = objects.subset_arr('cls_ind').astype('int')\n bincount = np.bincount(cls_inds)\n # need to pad this with zeros for total length of num_classes\n counts[:bincount.size] = bincount\n return counts", "title": "" }, { "docid": "975f649dad45299f4a224c3f2ef0431b", "score": "0.5291467", "text": "def _calc_texture_hist(img):\n BINS = 10\n\n hist = numpy.array([])\n\n for colour_channel in (0, 1, 2):\n\n # mask by the colour channel\n fd = img[:, colour_channel]\n\n # calculate histogram for each orientation and concatenate them all\n # and join to the result\n hist = numpy.concatenate(\n [hist] + [numpy.histogram(fd, BINS, (0.0, 1.0))[0]])\n\n # L1 Normalize\n hist = hist / len(img)\n\n return hist", "title": "" }, { "docid": "1c37e5e6f9925f4a8fbaf29c2492c0a2", "score": "0.528792", "text": "def make_time_slice_histograms(data, spectrum_rate_hz):\n max_channel = _next_highest_power_of_two(data.channel.max())\n time_edges = np.arange(0, data.timestamp.max(), 1/spectrum_rate_hz)\n upper_indices = np.searchsorted(data.timestamp, time_edges[1:])\n lower_indices = np.insert(upper_indices[:-1], 0, 0)\n ranges = np.vstack((lower_indices, upper_indices)).T\n all_counts = data.channel.values\n hists = np.zeros([len(ranges), max_channel])\n for i, limits in enumerate(ranges):\n hists[i, :] = np.bincount(all_counts[limits[0]:limits[1]], minlength=max_channel)\n return hists, time_edges", "title": "" }, { "docid": "9fc79a9ed4f5126b3374a9466b76d4ae", "score": "0.52808625", "text": "def compute_histogram_1d(img, bins=256, mask=None, sqrt=False, concat=False):\n\n if mask is not None:\n mask = mask.astype(\"bool\")\n\n if len(img.shape) == 3:\n if concat:\n if img.shape[2] == 3:\n hist = np.array(\n [\n np.histogram(\n img[..., i][mask], bins=bins, density=True\n )[0]\n for i in range(3)\n ]\n )\n hist = hist.ravel()\n else:\n raise Exception(\"Image should have more channels\")\n else:\n hist = np.histogram(img[mask], bins=bins, density=True)[0]\n else:\n hist = np.histogram(img[mask], bins=bins, density=True)[0]\n\n return np.sqrt(hist) if sqrt else hist", "title": "" }, { "docid": "963dd2385151872c1bf1008b5b49500e", "score": "0.5278245", "text": "def num_src_bins(self):\n return self.num_bins * self.num_radii", "title": "" }, { "docid": "66dbd52ec7344a31415cb2e1bf7ceee6", "score": "0.527806", "text": "def cum_freq(data, bins=None, norm=True):\r\n\r\n # set the bin edges\r\n if bins is None:\r\n bins = np.concatenate(([0.1, 1], np.arange(2, 10, 1),\r\n np.arange(10, 152, 2)))\r\n\r\n # mask the missing values and change negative to zero\r\n data = data[np.isfinite(data)]\r\n\r\n # calculate the cumulative frequency distribution\r\n cfd_array = np.full(bins.size, np.nan)\r\n for ib, b in enumerate(bins):\r\n cfd_array[ib] = np.count_nonzero(data >= b) * 1.0 / data.size\r\n\r\n # return the bin edges and CFD\r\n return cfd_array, bins", "title": "" }, { "docid": "f11ddc8070690b85c3aa14d4e54ede6d", "score": "0.5273581", "text": "def freqArray(self) -> np.ndarray:\n\n return getFrequencyArray(self.sampleFreq, self.dataSize)", "title": "" }, { "docid": "dc3d7a789ee34647c9c988201b99224d", "score": "0.5273548", "text": "def hist_from_array(self, *args):\n assert False, \"\"\"This function has been replaced by hist_from_arrays which takes a dictionary of arrays.\n Please use this function.\"\"\"", "title": "" }, { "docid": "5b591ce03a788210e9a6e9792315fa81", "score": "0.52684766", "text": "def calculate_histogram(self, abstract_features_1, abstract_features_2):\n scores = torch.mm(abstract_features_1, abstract_features_2).detach()\n scores = scores.view(-1, 1)\n hist = torch.histc(scores, bins=self.args.bins)\n hist = hist/torch.sum(hist)\n hist = hist.view(1, -1)\n return hist", "title": "" }, { "docid": "5b591ce03a788210e9a6e9792315fa81", "score": "0.52684766", "text": "def calculate_histogram(self, abstract_features_1, abstract_features_2):\n scores = torch.mm(abstract_features_1, abstract_features_2).detach()\n scores = scores.view(-1, 1)\n hist = torch.histc(scores, bins=self.args.bins)\n hist = hist/torch.sum(hist)\n hist = hist.view(1, -1)\n return hist", "title": "" }, { "docid": "9dbd3fdce5965cfc4e07787065d78633", "score": "0.52646834", "text": "def dict_of_counts(self):\n with self.lock:\n return dict((key, ss.count()) for (key, ss) in self.stats_dict.iteritems())", "title": "" }, { "docid": "50bf6040112451d074791371e9c113b6", "score": "0.52642655", "text": "def get_histogram(struc, box, n_bins=100):\n # Extract x/y/z positions only\n x, y, z = struc[:, 0], struc[:, 1], struc[:, 2]\n\n histograms = []\n for dimension in (0, 1, 2):\n bins = np.linspace(0, box[dimension], n_bins)\n hist, bins = np.histogram(struc[:, dimension], bins=bins, density=True)\n # Normalize the histogram for all values to sum to 1\n hist /= sum(hist)\n\n histograms += [(hist, bins)]\n return histograms", "title": "" }, { "docid": "3927cfc4495c1327985d81b873bb376a", "score": "0.5260398", "text": "def get_histogram(self):\n self.subtotals = self.wiki.groupby('items').sum()\n self.his = pd.DataFrame()\n cols=[\"0\",\"1\",\"2\",\"3\",\">4\"]\n for i in range(len(self.subtotals.columns)):\n counts, bins = np.histogram(self.subtotals.iloc[:, i], bins = [0,1,2,3, 4, float('inf')])\n self.his = self.his.append(pd.Series(counts,name=self.wiki.columns[i], index=cols))\n self.his = self.his.sort_values(['1'],ascending=False)\n self.his = self.his/len(self.subtotals)\n qids = self.his.index.tolist()\n names = self.get_names(qids)\n names = [names[q] for q in qids]\n self.his[\"name\"] = names\n return self.his", "title": "" }, { "docid": "16b9219362171d695f15cc95568734fd", "score": "0.5257534", "text": "def estimate_counts(self):\n counts = []\n for dataset in self.datasets.datasets:\n mask = dataset.mask_fit\n if dataset.mask_safe is not None:\n mask &= dataset.mask_safe\n\n counts.append(dataset.counts.data[mask].sum())\n\n return {\"counts\": np.array(counts, dtype=int)}", "title": "" }, { "docid": "e6c3d67057f7b682ddbaddac45c3cc6b", "score": "0.52486086", "text": "def histogram_3d(self, X, win=None, env=None, opts=None):\n\n opts = {} if opts is None else opts\n _assert_opts(opts)\n\n X = np.asarray(X)\n assert X.ndim >= 2, 'X must have atleast 2 dimensions'\n\n opts['numbins'] = opts.get('numbins', min(30, len(X[0])))\n opts['mutiplier'] = opts.get('numbins', 100)\n\n traces = []\n for i, array in enumerate(X):\n array = array.flatten()\n bins, intervals = np.histogram(array, bins=opts['numbins'])\n\n x = []\n y = []\n z = []\n prev_interv = 0.\n for b, iv in zip(bins, intervals):\n interval_middle = float((prev_interv + iv) / 2.) * opts['mutiplier']\n z.append([float(b), float(b)])\n y.append([interval_middle, interval_middle])\n x.append([i * 2, i * 2 + 0.5])\n prev_interv = float(iv)\n traces.append(dict(\n z=z,\n x=x,\n y=y,\n # colorscale=[[i, 'rgb(%d,%d,255)' % (ci, ci)] for i in np.arange(0, 1.1, 0.1)],\n # autocolorscale=True,\n showscale=False,\n type='surface',\n ))\n\n return self._send({\n 'data': traces,\n 'win': win,\n 'eid': env,\n 'layout': _opts2layout(opts),\n 'opts': opts,\n })", "title": "" } ]
5ff642964c526823046501a5c2bba226
Should be implemented by the base class (if using renderable, ensure it comes last in resolution order)
[ { "docid": "41c2be246653e727bdbe448de1a403c8", "score": "0.0", "text": "def position_intersects(self, position: Position) -> bool:\n raise NotImplementedError", "title": "" } ]
[ { "docid": "0c7c45818ff2bc1931fe2e11d314069b", "score": "0.76715225", "text": "def __init__(self):\n super(RenderBase, self).__init__()\n return", "title": "" }, { "docid": "21569fd1241d3b25a8ac06aaa8d53153", "score": "0.7640047", "text": "def render(self):\n raise NotImplementedError('Subclasses should implement this.')", "title": "" }, { "docid": "5afee2a995eb4a4941d63491d5013fd7", "score": "0.7236147", "text": "def do_render(self):\r\n pass", "title": "" }, { "docid": "5ef62b7811a31f1f104641cb0374764d", "score": "0.71822137", "text": "def render(self):\n pass", "title": "" }, { "docid": "ea68878df4817445af39ecd7b27e046e", "score": "0.7073564", "text": "def render(self, *args, **kwargs):\n raise NotImplementedError", "title": "" }, { "docid": "4d579f8e0e9433668b8d961ee3bd8878", "score": "0.70053846", "text": "def post_render(self):\n pass", "title": "" }, { "docid": "65509f26c8a341543f9cb3e6861dfa47", "score": "0.69845295", "text": "def _mgui_render(self):\n raise NotImplementedError()", "title": "" }, { "docid": "2e745eb555cccedb6a07191e660f8874", "score": "0.6949089", "text": "def render(self) :\n pass", "title": "" }, { "docid": "782b66a8d2fb9247eb18b9d374f308a5", "score": "0.665951", "text": "def render(cls, **kwargs):\n raise NotImplementedError", "title": "" }, { "docid": "32e4517a9325a266186760b719b00900", "score": "0.6604805", "text": "def postConstructor(self):\n\t\tself.setRenderable(True)", "title": "" }, { "docid": "b5685d6f91113a8b1a4c8af6147dba40", "score": "0.65651196", "text": "def pre_render(self):\n pass", "title": "" }, { "docid": "ad9402dbf74a9f6d5c68c377b5e9493b", "score": "0.64998466", "text": "def draw(self, render):\n pass", "title": "" }, { "docid": "9674a53f41c819786d78160c50d1decc", "score": "0.64261895", "text": "def __call__(self):\n raise NotImplementedError()", "title": "" }, { "docid": "c25af8348f8522a3cff527bba3ff3fa4", "score": "0.6380988", "text": "def render():", "title": "" }, { "docid": "2c3d91233ea37f57e1a31f6d52eb2c7f", "score": "0.6351828", "text": "def render(self, r):\n raise NotImplementedError", "title": "" }, { "docid": "18083b5740a20201bbe5192e6bb0b175", "score": "0.6259413", "text": "def test_com_day_cq_dam_core_impl_gfx_commons_gfx_renderer(self):\n pass", "title": "" }, { "docid": "b80e85359a4325d37869bb1e68fa1f00", "score": "0.6213663", "text": "def render(self, image):\n raise NotImplementedError", "title": "" }, { "docid": "49d56481f00ce30b3f854727db731d7c", "score": "0.62051743", "text": "def render(self, props={}):\n pass", "title": "" }, { "docid": "437d1530820649411e913d518e7fc002", "score": "0.6196707", "text": "def render(self, context, viewport, location):\n pass", "title": "" }, { "docid": "810e89a661864ca7ac5441c53fe1b633", "score": "0.6168228", "text": "def render(self):\n return self.__render", "title": "" }, { "docid": "dd9a7263f51fec0656b4629f76cc4174", "score": "0.61647314", "text": "def blit(self):\n raise NotImplementedError()", "title": "" }, { "docid": "dd9a7263f51fec0656b4629f76cc4174", "score": "0.61647314", "text": "def blit(self):\n raise NotImplementedError()", "title": "" }, { "docid": "cc26fa1dd7ed094f450f49fd755cd690", "score": "0.61427426", "text": "def render(self):\n raise NotImplementedError('Data category did not implement draw!')", "title": "" }, { "docid": "40dc4f895b9e4cb93d343ab8c523a484", "score": "0.6124965", "text": "def render(self, **kwargs):\n return None", "title": "" }, { "docid": "789372215c5990aa1e4f20da653d55b1", "score": "0.61183906", "text": "def render(self, obj: object) -> object:", "title": "" }, { "docid": "f8b6049c2874e62088a19871469e96d8", "score": "0.6115543", "text": "def render(self, context):\n pass", "title": "" }, { "docid": "998d1340f9936d76054462c494fd99db", "score": "0.6089828", "text": "def environment_render(self):\n raise NotImplementedError()", "title": "" }, { "docid": "4c318b99eab1cf5fa6da210a28f1eda9", "score": "0.60682356", "text": "def __call__(self):\n raise NotImplementedError('Override in client classes')", "title": "" }, { "docid": "19e758f87b49cbd93fefa8ff7beb309b", "score": "0.6064418", "text": "def __call__(self):\n\n return NotImplementedError", "title": "" }, { "docid": "0bd9396d4b4fc28c7c10b919b97fe923", "score": "0.60631764", "text": "def realizeImplementation(self):\n return _osg.GraphicsContext_realizeImplementation(self)", "title": "" }, { "docid": "5df81afeb2706d3214f9a659b676997e", "score": "0.6044836", "text": "def react(self) -> None:", "title": "" }, { "docid": "d388939ba349a300a7a20adb599c127d", "score": "0.60232526", "text": "def __call__(self):\n pass", "title": "" }, { "docid": "e2add6a325d124133b02dfcc5f11a62d", "score": "0.60223997", "text": "def custom_init(self):\n raise NotImplementedError(\"PictureDrawer: attempted to call abstract method\")", "title": "" }, { "docid": "dd0c121c83e9c375da4a6ef08c057220", "score": "0.6017273", "text": "def render(self, mode=None):\n pass", "title": "" }, { "docid": "3a705b0389cca7178225bb2b4a2c06d7", "score": "0.59811", "text": "def __call__(self, *args, **kwargs):\r\n return ComponentMeta.__call__(self, *args, **kwargs)", "title": "" }, { "docid": "35d9e514e71c4e827785fa2c4fcf0387", "score": "0.5979676", "text": "def draw(self):\n raise NotImplementedError", "title": "" }, { "docid": "93d2aa5653625cf26a2600b19e44245d", "score": "0.59635675", "text": "def internalView(self):", "title": "" }, { "docid": "7160846a797db8b8dcf24eb398730e74", "score": "0.5962455", "text": "def render(self, mode='human'):\n pass", "title": "" }, { "docid": "7160846a797db8b8dcf24eb398730e74", "score": "0.5962455", "text": "def render(self, mode='human'):\n pass", "title": "" }, { "docid": "7160846a797db8b8dcf24eb398730e74", "score": "0.5962455", "text": "def render(self, mode='human'):\n pass", "title": "" }, { "docid": "405e4316e6007d1adc21cafb63e59532", "score": "0.59602123", "text": "def __init__(self):\n\t\tsuper().__init__()", "title": "" }, { "docid": "3d6ccf29505414d42c370e5fb5f3c8b5", "score": "0.5937957", "text": "def render(self, ax):\n raise NotImplementedError(\n \"The `render` method in the `Spectrogram` base class\"\n \" should be overridden by derived classes\"\n \" and is not meant to be called!\")", "title": "" }, { "docid": "d30f507518885a1c98dcd870b734a3fe", "score": "0.59313154", "text": "def __init__(self):\n\n\t\tsuper().__init__()", "title": "" }, { "docid": "81dadeb23d77ff6cbb2b001af308ca95", "score": "0.59294033", "text": "def render (self):\n\n raise NotImplementedError(\"_Record#render() must be overridden\")", "title": "" }, { "docid": "1ac4ed39b9f39dd5d6f6f27c9a4369fe", "score": "0.59119403", "text": "def render(self, mode='noop'):\n pass", "title": "" }, { "docid": "f8c1ec70202a43ca6def976ae872ad32", "score": "0.58911747", "text": "def __call__(self, *args):\n raise NotImplementedError", "title": "" }, { "docid": "f8c1ec70202a43ca6def976ae872ad32", "score": "0.58911747", "text": "def __call__(self, *args):\n raise NotImplementedError", "title": "" }, { "docid": "f8c1ec70202a43ca6def976ae872ad32", "score": "0.58911747", "text": "def __call__(self, *args):\n raise NotImplementedError", "title": "" }, { "docid": "f8c1ec70202a43ca6def976ae872ad32", "score": "0.58911747", "text": "def __call__(self, *args):\n raise NotImplementedError", "title": "" }, { "docid": "366cd6dd2efddedc4ffaac644544b404", "score": "0.5871371", "text": "def __panel__(self) -> Viewable:\n raise NotImplementedError", "title": "" }, { "docid": "c3e9b73e614f6c03f2077716ba37c6a3", "score": "0.5861025", "text": "def __call__(self):\n\n pass", "title": "" }, { "docid": "ee4b2f3536968e91093f5a054dfe65e1", "score": "0.58505493", "text": "def render(self, QPainter, *__args): # real signature unknown; restored from __doc__ with multiple overloads\r\n pass", "title": "" }, { "docid": "c1ba7c92290309fa22c770673eb07958", "score": "0.58434284", "text": "def __init__(self) -> None:\n raise NotImplementedError(\"Subclass needed!\")", "title": "" }, { "docid": "6f7abe2903c6256152688760dbc8a3b8", "score": "0.5829047", "text": "def _render_from(self, memory: D.T_memory[D.T_state], **kwargs: Any) -> Any:\n raise NotImplementedError", "title": "" }, { "docid": "1caae80999371fe3cb94dbd1a75adc61", "score": "0.5820991", "text": "def __call__(self, **kwargs):\n raise NotImplementedError()", "title": "" }, { "docid": "651973e64b41a018d58af4366df4ed0a", "score": "0.5820138", "text": "def renderer_finished(self):\n raise NotImplementedError()", "title": "" }, { "docid": "3e6086ff5e0b02439b0a648d2e22fb48", "score": "0.58172524", "text": "def _build_display(self):\n raise NotImplementedError", "title": "" }, { "docid": "7e86727d51f2c1752789c9c7012714e1", "score": "0.5808815", "text": "def render_impl(self):\n return \"\"", "title": "" }, { "docid": "7ffe7da32dbf0adc116dd35d29c0efe8", "score": "0.5800587", "text": "def render(self, render):\n self.p_render = render\n return self", "title": "" }, { "docid": "79c2f6adedbf31147b877e0b64bef4b7", "score": "0.57889533", "text": "def __call__(self, *args, **kwargs):\n raise NotImplementedError()", "title": "" }, { "docid": "33f4281a21d1a02b232c2b8863ac85c2", "score": "0.57734174", "text": "def draw(self):\n\t\tpass", "title": "" }, { "docid": "377d4d2c0e0a8c7c5ee5034785a13037", "score": "0.5772862", "text": "def render(self, pickingMode=False):\n pass", "title": "" }, { "docid": "c7dced819f4455a415bb918dcc470591", "score": "0.5757149", "text": "def provide(self):\n raise NotImplementedError", "title": "" }, { "docid": "8d69e83ccf9d5c7a54b512db1e1f4a9a", "score": "0.57490337", "text": "def __call__(self, *args, **kwargs):\n raise NotImplementedError", "title": "" }, { "docid": "67c47245063e0ebbe914ed8038e62b84", "score": "0.5731896", "text": "def Render( self, mode = None):\n ### Put your rendering code here", "title": "" }, { "docid": "5063c430e6805c10bf972c3bfb1fd1f9", "score": "0.57214", "text": "def render(self):\n if self._patcher:\n self._patcher.render()\n\n self.patcher = self._patcher.to_dict()", "title": "" }, { "docid": "ad31110c9820781b95bb8f73233012fd", "score": "0.57189834", "text": "def render(self, **kwargs):\n\n render_fn = getattr(self, 'render_%s' % self.region, None)\n\n if render_fn:\n return render_fn(**kwargs)\n\n raise NotImplementedError", "title": "" }, { "docid": "4719a498b0e14f32c6b7aa707aa5d480", "score": "0.5716758", "text": "def load_view(self):\r\n raise NotImplementedError", "title": "" }, { "docid": "c46d797aa6b70ce3f38c1a363443cf26", "score": "0.57002497", "text": "def __init__(self):\n\t\traise NotImplementedError()", "title": "" }, { "docid": "239b034983c06ac169e9e177e5ab0e1f", "score": "0.5683766", "text": "def __init__(self):\n super().__init__", "title": "" }, { "docid": "92fefc5cefa3c9b2dc594ff04efd7ef6", "score": "0.5681814", "text": "def framework(self):\n raise NotImplementedError", "title": "" }, { "docid": "d13f73224a5112286c2d7ba374dc4ae7", "score": "0.5678158", "text": "def run(self):\n raise NotImplementedError('The Subclass must implement this')", "title": "" }, { "docid": "c3480bd8e34694e29e352035588716e8", "score": "0.5672975", "text": "def RenderToImage(self):\r\n pass", "title": "" }, { "docid": "5ce78b9632354ba3937648a8e1b2e034", "score": "0.5671841", "text": "def drawImplementation(self, *args):\n return _osg.Drawable_drawImplementation(self, *args)", "title": "" }, { "docid": "4eb840517ddf27ae7a7bdf09e62cdf7f", "score": "0.5653528", "text": "def __init__(self):\n super().__init__()", "title": "" }, { "docid": "4eb840517ddf27ae7a7bdf09e62cdf7f", "score": "0.5653528", "text": "def __init__(self):\n super().__init__()", "title": "" }, { "docid": "4eb840517ddf27ae7a7bdf09e62cdf7f", "score": "0.5653528", "text": "def __init__(self):\n super().__init__()", "title": "" }, { "docid": "4eb840517ddf27ae7a7bdf09e62cdf7f", "score": "0.5653528", "text": "def __init__(self):\n super().__init__()", "title": "" }, { "docid": "4eb840517ddf27ae7a7bdf09e62cdf7f", "score": "0.5653528", "text": "def __init__(self):\n super().__init__()", "title": "" }, { "docid": "4eb840517ddf27ae7a7bdf09e62cdf7f", "score": "0.5653528", "text": "def __init__(self):\n super().__init__()", "title": "" }, { "docid": "4eb840517ddf27ae7a7bdf09e62cdf7f", "score": "0.5653528", "text": "def __init__(self):\n super().__init__()", "title": "" }, { "docid": "4eb840517ddf27ae7a7bdf09e62cdf7f", "score": "0.5653528", "text": "def __init__(self):\n super().__init__()", "title": "" }, { "docid": "4eb840517ddf27ae7a7bdf09e62cdf7f", "score": "0.5653528", "text": "def __init__(self):\n super().__init__()", "title": "" }, { "docid": "4eb840517ddf27ae7a7bdf09e62cdf7f", "score": "0.5653528", "text": "def __init__(self):\n super().__init__()", "title": "" }, { "docid": "4eb840517ddf27ae7a7bdf09e62cdf7f", "score": "0.5653528", "text": "def __init__(self):\n super().__init__()", "title": "" }, { "docid": "4eb840517ddf27ae7a7bdf09e62cdf7f", "score": "0.5653528", "text": "def __init__(self):\n super().__init__()", "title": "" }, { "docid": "4eb840517ddf27ae7a7bdf09e62cdf7f", "score": "0.5653528", "text": "def __init__(self):\n super().__init__()", "title": "" }, { "docid": "4eb840517ddf27ae7a7bdf09e62cdf7f", "score": "0.5653528", "text": "def __init__(self):\n super().__init__()", "title": "" }, { "docid": "4eb840517ddf27ae7a7bdf09e62cdf7f", "score": "0.5653528", "text": "def __init__(self):\n super().__init__()", "title": "" }, { "docid": "4eb840517ddf27ae7a7bdf09e62cdf7f", "score": "0.5653528", "text": "def __init__(self):\n super().__init__()", "title": "" }, { "docid": "4eb840517ddf27ae7a7bdf09e62cdf7f", "score": "0.5653528", "text": "def __init__(self):\n super().__init__()", "title": "" }, { "docid": "4eb840517ddf27ae7a7bdf09e62cdf7f", "score": "0.5653528", "text": "def __init__(self):\n super().__init__()", "title": "" }, { "docid": "4eb840517ddf27ae7a7bdf09e62cdf7f", "score": "0.5653528", "text": "def __init__(self):\n super().__init__()", "title": "" }, { "docid": "268835bf51f02d5529f59320adbc932b", "score": "0.5651195", "text": "def renderer(self) -> RendererBase:\n if not self._renderer:\n from .utils.custom_exceptions import RendererException\n\n try:\n from smarts.p3d.renderer import Renderer\n\n self._renderer = Renderer(self._sim_id)\n except ImportError as e:\n raise RendererException.required_to(\"use camera observations\")\n except Exception as e:\n self._renderer = None\n raise RendererException(\"Unable to create renderer.\")\n if not self._renderer.is_setup:\n if self._scenario:\n self._renderer.setup(self._scenario)\n self._vehicle_index.begin_rendering_vehicles(self._renderer)\n return self._renderer", "title": "" }, { "docid": "8a8ed958928f5514fd8ea8a8c84ed239", "score": "0.56494635", "text": "def design(self):\n pass", "title": "" }, { "docid": "71b2ec27562cc4625bf0703272a9ed3f", "score": "0.56463706", "text": "def blitme(self):", "title": "" }, { "docid": "c3bf3544ae6fcaf132184a697f372d78", "score": "0.5643445", "text": "def _render(self):\n global env\n self.image = pygame.transform.rotate(self.base_image, -self.direction.angle) \n w,h = self.image.get_size()\n self.scaled_image = pygame.transform.smoothscale(self.image, (int(w*env.scale), int(h*env.scale)))\n self._set_rect()", "title": "" }, { "docid": "e67772d98d88ec91f797566839e91da3", "score": "0.5642218", "text": "def render(self, mode=\"human\", close=False):\n ...", "title": "" }, { "docid": "27ccaeca5f88a2288116cde0e9df229a", "score": "0.5641759", "text": "def render(self):\n return self._render()", "title": "" }, { "docid": "e0f96ca85836cb2de537203a89582a78", "score": "0.5641197", "text": "def _init_this(self):\n raise NotImplementedError", "title": "" }, { "docid": "2f8afb8d9b2a46871c22d2e95b948b36", "score": "0.5634087", "text": "def show(self):\n raise NotImplementedError(\"This is just an abstract interface\")", "title": "" } ]
5db90035f9cdea3e72db69035db905b8
Updates the marker according to the given event. All submarkers will be updated before the compound marker itself is updated.
[ { "docid": "dc13326b3e9e55f908c4e8b1abd051bc", "score": "0.63127154", "text": "def track(self, event: Event) -> None:\n for marker in self.sub_markers:\n marker.track(event)\n super().track(event)", "title": "" } ]
[ { "docid": "308e72dad6a59f68311c1f154078fc03", "score": "0.6227557", "text": "def update(self, event, annotation):\n # Get artist-specific information about the pick event\n info = self.event_info(event)\n\n if self.props_override is not None:\n info = self.props_override(**info)\n\n # Update the xy position and text using the formatter function\n annotation.set_text(self.formatter(**info))\n annotation.xy = info['x'], info['y']\n\n # Unfortnately, 3D artists are a bit more complex...\n # Also, 3D artists don't share inheritance. Use naming instead.\n if '3D' in type(event.artist).__name__:\n annotation.xy = event.mouseevent.xdata, event.mouseevent.ydata\n\n # In case it's been hidden earlier...\n annotation.set_visible(True)\n\n if self.keep_inside:\n self._keep_annotation_inside(annotation)\n\n annotation._has_been_shown = True\n self._last_event = event\n self._last_annotation = annotation\n\n event.canvas.draw()", "title": "" }, { "docid": "898ec6458224f30cd78c108a692a61f7", "score": "0.5775956", "text": "def _set_marker_event(self, evt):\n assert self.marker_event is None\n assert evt.name in self.events\n assert evt.name == 'marker'\n assert evt.controllable\n assert evt.observable\n assert evt.marker\n self.marker_event = evt", "title": "" }, { "docid": "16ce72bc0d90614b2e61dd525bfdd9cf", "score": "0.562162", "text": "def update_event(self, event):\n pass", "title": "" }, { "docid": "1adfb9f0c3fdf38e41d4c1d3e75c3f84", "score": "0.54933155", "text": "def mark(self, marker):\r\n pass", "title": "" }, { "docid": "e68d3a0342f6279c4e4afec071557781", "score": "0.5329545", "text": "def update_marker(self, iid, **kwargs):\n if iid not in self._markers:\n raise ValueError(\"Unknown iid passed as argument: {}\".format(iid))\n self.check_kwargs(kwargs)\n marker = self._markers[iid]\n marker.update(kwargs)\n self.delete_marker(iid)\n return self.create_marker(marker[\"category\"], marker[\"start\"], marker[\"finish\"], marker)", "title": "" }, { "docid": "bf75ddfde1daf9127e62fe07952a2085", "score": "0.5320121", "text": "def UpdateEntry(self, entry):\n raise NotImplementedError()", "title": "" }, { "docid": "bc6888873f5fe61082609085c3520900", "score": "0.53158635", "text": "def update(self):\n self.data.update()\n self._event = self.data.event\n self._attr_extra_state_attributes = {\n \"offset_reached\": is_offset_reached(\n self._event.start_datetime_local, self.data.offset\n )\n if self._event\n else False\n }", "title": "" }, { "docid": "e228479ecafa572902f995953e92c9b3", "score": "0.525726", "text": "def update_markers(self, *items):\n for item in items:\n self.markers[item.tag] = item", "title": "" }, { "docid": "720e6b3290f7d35ea60ac865c0513986", "score": "0.5028162", "text": "def update(self, event, annotation):\n # Decide whether or not to hide previous highlights...\n for artist in self.highlights.values():\n if self.display == 'multiple':\n continue\n if self.display == 'one-per-axes':\n if event.artist.axes is not artist.axes:\n continue\n artist.set_visible(False)\n self.show_highlight(event.artist)\n DataCursor.update(self, event, annotation)", "title": "" }, { "docid": "895117a152ca5cdd49215ccf4b1e1b50", "score": "0.50186986", "text": "def put(self, request, primary_key):\n marker = self.get_object(primary_key)\n serializer = MarkerSerializer(marker, data=request.data, partial=True)\n if serializer.is_valid():\n serializer.save()\n return Response(serializer.data)\n return Response(serializer.errors, status=status.HTTP_400_BAD_REQUEST)", "title": "" }, { "docid": "fb0ed1f6001b6932cef497c769d0cf1d", "score": "0.50142837", "text": "def addMarkerInfo(self, node, markerData):\n self.markerMap[node] = markerData", "title": "" }, { "docid": "e40108b83945193b63677c49cd7fbcd3", "score": "0.50020534", "text": "def custom_map_update(self):\n pass", "title": "" }, { "docid": "4a7f225a67f86aafbfc375940d032d41", "score": "0.4965763", "text": "def update_signal(self, event):\n raise NotImplementedError(\"Should implement update_signal()\")", "title": "" }, { "docid": "fc8db888c9e3db8cda6fc7abc9ecf784", "score": "0.49366045", "text": "def mark(self):\n if self._prepended_events:\n raise ValueError('mark() offsets must be monotonic')\n self._mark = self._get_offset()\n self._saved_events.clear()", "title": "" }, { "docid": "a42ae2ad3fdc6a99842f09dea6421b97", "score": "0.4887127", "text": "def test_case_markers_partial_update(self):\n pass", "title": "" }, { "docid": "09462407dc0fe9d72e51ed872abe75a4", "score": "0.4859513", "text": "def update_position_from_image(self, marker_id: int, new_x: int, new_y: int):\n self.table_widget.update_marker_position(marker_id, new_x, new_y)\n self.marker_position_update_widget.update_marker_position_from_main_window(marker_id, (new_x, new_y))\n self.markers[marker_id] = {\"position_x\": new_x, \"position_y\": new_y}\n self.settings_bearer.update_settings(Constants.SETTINGS_MARKERS, self.markers)", "title": "" }, { "docid": "8813dacaf53a908e0d49a209ba94f919", "score": "0.48329806", "text": "def place_marker(self, marker, x, y):\n self.state[x][y] = marker", "title": "" }, { "docid": "19111a6dde8c3c07244588111c21666d", "score": "0.48297894", "text": "def add(self, event, new_state,\r\n TupleType = tuple):\r\n if type(event) is TupleType:\r\n code0, code1 = event\r\n i = self.split(code0)\r\n j = self.split(code1)\r\n map = self.map\r\n while i < j:\r\n map[i + 1][new_state] = 1\r\n i = i + 2\r\n else:\r\n self.get_special(event)[new_state] = 1", "title": "" }, { "docid": "446a3e42cae9660c73b38be5867eaac3", "score": "0.48278877", "text": "def add_marker_to_map(self, marker, force_modify=False):\n if self.check_for_marker_in_map(marker):\n if force_modify:\n rospy.loginfo(\"Editing existing marker pose in map\")\n self.modify_marker_pose(marker)\n return\n else:\n rospy.loginfo(\"Not editing existing marker pose in map\")\n rospy.loginfo(\"param force_modify set to 'False'\")\n return\n\n \"\"\" Write line to add \"\"\"\n params = []\n params.append(marker.fid)\n quat = (marker.pose.orientation.x,\n marker.pose.orientation.y,\n marker.pose.orientation.z,\n marker.pose.orientation.w)\n rpy_ = euler_from_quaternion(quat)\n\n params.append(\"{:.6f}\".format(marker.pose.position.x))\n params.append(\"{:.6f}\".format(marker.pose.position.y))\n params.append(\"{:.6f}\".format(marker.pose.position.z))\n params.append(\"{:.6f}\".format(math.degrees(rpy_[0])))\n params.append(\"{:.6f}\".format(math.degrees(rpy_[1])))\n params.append(\"{:.6f}\".format(math.degrees(rpy_[2])))\n params.append(\"{:.6f}\".format(0))\n params.append('1\\n')\n new_line = \" \".join(map(str, params))\n\n \"\"\" Add line to map \"\"\"\n file = open(self.file_path, \"r\")\n lines = file.readlines()\n for idx, line in enumerate(lines):\n params = list(line.split(\" \"))\n if int(params[0]) > marker.fid:\n index = idx\n break\n\n lines.insert(index, new_line)\n file = open(self.file_path, \"w\")\n file.write(\"\".join(lines))\n file.close()", "title": "" }, { "docid": "f4772575fc0e7e75dd9bb2ab19d7bdb5", "score": "0.48173434", "text": "def test_case_markers_update(self):\n pass", "title": "" }, { "docid": "58552250cf265e8797de78754f57a926", "score": "0.4810462", "text": "def add_marker_cluster(self, event=\"click\", add_marker=True):\n coordinates = []\n markers = []\n marker_cluster = ipyleaflet.MarkerCluster(name=\"Marker Cluster\")\n self.last_click = []\n self.all_clicks = []\n if add_marker:\n self.add_layer(marker_cluster)\n\n def handle_interaction(**kwargs):\n latlon = kwargs.get(\"coordinates\")\n\n if event == \"click\" and kwargs.get(\"type\") == \"click\":\n coordinates.append(latlon)\n self.last_click = latlon\n self.all_clicks = coordinates\n if add_marker:\n markers.append(ipyleaflet.Marker(location=latlon))\n marker_cluster.markers = markers\n elif kwargs.get(\"type\") == \"mousemove\":\n pass\n\n # cursor style: https://www.w3schools.com/cssref/pr_class_cursor.asp\n self.default_style = {\"cursor\": \"crosshair\"}\n self.on_interaction(handle_interaction)", "title": "" }, { "docid": "6116dfb6a8ef980e86c99df75a9e925e", "score": "0.48022392", "text": "def modify_marker_pose(self, marker):\n\n if not self.check_for_marker_in_map(marker):\n rospy.loginfo(\"Cannot edit pose - marker not in map\")\n return\n\n file = open(self.file_path, \"r\")\n lines = file.readlines()\n for idx, line in enumerate(lines):\n params = list(line.split(\" \"))\n fid = int(params[0])\n if fid == marker.fid:\n quat = (marker.pose.orientation.x,\n marker.pose.orientation.y,\n marker.pose.orientation.z,\n marker.pose.orientation.w)\n rpy_ = euler_from_quaternion(quat)\n\n params[1] = \"{:.6f}\".format(marker.pose.position.x)\n params[2] = \"{:.6f}\".format(marker.pose.position.y)\n params[3] = \"{:.6f}\".format(marker.pose.position.z)\n params[4] = \"{:.6f}\".format(math.degrees(rpy_[0]))\n params[5] = \"{:.6f}\".format(math.degrees(rpy_[1]))\n params[6] = \"{:.6f}\".format(math.degrees(rpy_[2]))\n\n new_line = \" \".join(map(str, params))\n lines[idx] = new_line\n\n file = open(self.file_path, \"w\")\n file.write(\"\".join(lines))\n file.close()", "title": "" }, { "docid": "3405ccf8b9b22acff37c3b3ff64c4bb4", "score": "0.4782863", "text": "def update_map(self):\n pass", "title": "" }, { "docid": "afdc1dbbbb031223e3a78041b01d890b", "score": "0.4752266", "text": "def set_marker(self, marker_id):\n try:\n marker = self._markers[marker_id]\n except ValueError:\n self.log.warning(\"An invalid marker ID was received: %d\", marker_id)\n return\n\n # Trigger any events\n if marker['events'] is not None:\n for event in marker['events']:\n self.mc.post_mc_native_event(event, sound_instance=self, marker_id=marker_id)", "title": "" }, { "docid": "70fee478143807c3004a53f572a832f5", "score": "0.47485507", "text": "def addMarker(self, lon, lat):\n x, y = self.m(lon, lat)\n self.m.plot(x, y, 'kx', markersize=4, zorder=15)", "title": "" }, { "docid": "e41b47b9bc314223a57a1ef183fb69a9", "score": "0.47421196", "text": "def _increment_index(self, di=1):\n if self._last_event is None:\n return\n\n if not hasattr(self._last_event, 'ind'):\n return\n\n event = self._last_event\n xy = pick_info.get_xy(event.artist)\n\n if xy is not None:\n x, y = xy\n i = (event.ind[0] + di) % len(x)\n event.ind = [i]\n event.mouseevent.xdata = x[i]\n event.mouseevent.ydata = y[i]\n\n self.update(event, self._last_annotation)", "title": "" }, { "docid": "1436fc0761c4843387c5c23a40de72d6", "score": "0.47246352", "text": "def update_edge(self, edge: np.ndarray, change: float):\n raise NotImplementedError(\n 'subclasses must override update_edge()!')", "title": "" }, { "docid": "b683e97d293349bd67493b962ee64d67", "score": "0.47066453", "text": "def _update_event(self, student, progress, event_entity, event_key,\n direct_update=False):\n if direct_update or event_entity not in self.UPDATER_MAPPING:\n if event_entity in self.UPDATER_MAPPING:\n # This is a derived event, so directly mark it as completed.\n self._set_entity_value(\n progress, event_key, self.COMPLETED_STATE)\n else:\n # This is not a derived event, so increment its counter by one.\n self._inc(progress, event_key)\n else:\n self.UPDATER_MAPPING[event_entity](self, progress, event_key)\n\n if event_entity in self.DERIVED_EVENTS:\n for derived_event in self.DERIVED_EVENTS[event_entity]:\n self._update_event(\n student=student,\n progress=progress,\n event_entity=derived_event['entity'],\n event_key=derived_event['generate_parent_id'](event_key),\n )", "title": "" }, { "docid": "622be98bcf0994906b8be4cafa687e24", "score": "0.4700873", "text": "def update_fill(self, event):\n raise NotImplementedError(\"Should implement update_fill()\")", "title": "" }, { "docid": "bdb014f55818c4c7349ba3dc940981bb", "score": "0.46878043", "text": "def set_edge(self, event):\n current = self.edge_coords.get()\n self.edge_coords.delete(0, len(current) + 1)\n self.edge_coords.insert(0, f\"{event.x}, {event.y}\")\n self.set_radius()", "title": "" }, { "docid": "8fff1231511f8fdbf399a7727b92e1c7", "score": "0.4680266", "text": "def update_annot(ind):\n pos = sc.get_offsets()[ind[\"ind\"][0]] # Get position of curser\n annot.xy = pos\n text = \"{}\".format(\" \".join([tickers[n] for n in ind[\"ind\"]])) # The annotation content\n annot.set_text(text)\n annot.get_bbox_patch().set_alpha(0.4) # Make annotation prettier", "title": "" }, { "docid": "0d9c9ba084e2f9687adbf07db7df702a", "score": "0.467926", "text": "def update_position_from_marker_position_update_widget(self, marker_id: int, new_x: int, new_y: int):\n self.table_widget.update_marker_position(marker_id, new_x, new_y)\n self.image_widget.image.update_position_from_marker_position_update_widget(marker_id, new_x, new_y)\n self.markers[marker_id] = {\"position_x\": new_x, \"position_y\": new_y}\n self.settings_bearer.update_settings(Constants.SETTINGS_MARKERS, self.markers)", "title": "" }, { "docid": "015f3207725f88b45856fafd34c22ada", "score": "0.46693218", "text": "def _redraw_event(self, event):\n kwargs = {event.name: event.new.lower()}\n self.redraw(**kwargs)", "title": "" }, { "docid": "015f3207725f88b45856fafd34c22ada", "score": "0.46693218", "text": "def _redraw_event(self, event):\n kwargs = {event.name: event.new.lower()}\n self.redraw(**kwargs)", "title": "" }, { "docid": "4a65bf05bcd259fb386afdbc7486998d", "score": "0.4668639", "text": "def zoom_in(event: Event=None, delta: int=1) -> None:\n zoom_helper(event, delta=1)", "title": "" }, { "docid": "c5faf45b1ff88085e81f7690fb2bc044", "score": "0.46662444", "text": "def _plotClick(self, Event):\n if Event.button != 1:\n return\n # TODO: Figure out if this means I've reversed indexing somewhere.\n dataCoord = (Event.ydata, Event.xdata)\n plotCoord = (Event.xdata, Event.ydata)\n\n self.setMark.emit(Event, dataCoord, plotCoord)\n self.addMarker(plotCoord)", "title": "" }, { "docid": "e4e5b0c382b0a72330d314178f25dcca", "score": "0.46622425", "text": "def notify(self, event: TileEvent):\r\n raise NotImplementedError(\r\n \"TileListener subclass needs to override notify(TileEvent)\")", "title": "" }, { "docid": "7acee721e94fb3d662c48621a2babd5d", "score": "0.4655158", "text": "def __updateToEvent(self, depth, eventIndex, number):\n\n #check if we have to add or remove items\n if number > 0:\n self.__addToEvent(depth, eventIndex, number)\n else:\n self.__removeFromEvent(depth, eventIndex, abs(number))\n pass", "title": "" }, { "docid": "5273daae5fc8f9f0cb573746dd198f01", "score": "0.46427363", "text": "def add_set(self, event, new_set,\r\n TupleType = tuple):\r\n if type(event) is TupleType:\r\n code0, code1 = event\r\n i = self.split(code0)\r\n j = self.split(code1)\r\n map = self.map\r\n while i < j:\r\n map[i + 1].update(new_set)\r\n i = i + 2\r\n else:\r\n self.get_special(event).update(new_set)", "title": "" }, { "docid": "374f491590216d5641c8a1398b877a88", "score": "0.46368894", "text": "def update(self, item_name, item_event, issnapshot):", "title": "" }, { "docid": "fc7caff2d56cca05af993e9bb8d32de5", "score": "0.46055776", "text": "def notify(self, event, **kwargs):\n eventDict = kwargs.copy()\n eventDict['event'] = event\n self.sigPlotSignal.emit(eventDict)\n\n if event == 'setKeepDataAspectRatio':\n self.sigSetKeepDataAspectRatio.emit(kwargs['state'])\n elif event == 'setGraphGrid':\n self.sigSetGraphGrid.emit(kwargs['which'])\n elif event == 'setGraphCursor':\n self.sigSetGraphCursor.emit(kwargs['state'])\n elif event == 'contentChanged':\n self.sigContentChanged.emit(\n kwargs['action'], kwargs['kind'], kwargs['legend'])\n elif event == 'activeCurveChanged':\n self.sigActiveCurveChanged.emit(\n kwargs['previous'], kwargs['legend'])\n elif event == 'activeImageChanged':\n self.sigActiveImageChanged.emit(\n kwargs['previous'], kwargs['legend'])\n elif event == 'activeScatterChanged':\n self.sigActiveScatterChanged.emit(\n kwargs['previous'], kwargs['legend'])\n elif event == 'interactiveModeChanged':\n self.sigInteractiveModeChanged.emit(kwargs['source'])\n\n eventDict = kwargs.copy()\n eventDict['event'] = event\n self._callback(eventDict)", "title": "" }, { "docid": "052a8b57c6d59bea3432538ce4546c89", "score": "0.46044087", "text": "def set_chart_marker(self, marker):\n assert isinstance(marker, list), \"'marker' %r must be a list\" % type(marker)\n for m in marker:\n if isinstance(m, list):\n self._chart.marker(*m)\n else:\n self._chart.marker(*marker)\n break", "title": "" }, { "docid": "2a977198ae7f8c8eeaf83bddd75684dd", "score": "0.45891872", "text": "def updateEvent(self, eventNr, newWeight):\n difference = newWeight - self.eventWeights[eventNr]\n if difference != 0:\n #find the highest depth we have to update\n updateDepth = int(log(abs(difference),self.__base))\n\n #the difference is positive so we start at the bottom\n if difference > 0:\n for i in range(updateDepth+1):\n # get the new weight of the event on this depth\n levelWeight = difference % self.__base**(i+1)\n\n #claculate the number of items the event should have at this level\n levelItems = int(levelWeight / self.__base**i)\n\n #update the number of items on this depth\n self.__addToEvent(i, eventNr, levelItems)\n\n difference -= levelItems * self.__base**i\n\n #the diffence is negative so we start at the top\n else:\n difference = abs(difference)\n for i in range(updateDepth, -1, -1):\n # get the new weight of the event on this depth\n levelWeight = difference % self.__base**(i+1)\n\n #claculate the number of items the event should have at this level\n levelItems = int(levelWeight / self.__base**i)\n\n #update the number of items on this depth\n self.__removeFromEvent(i, eventNr, levelItems)\n\n difference -= levelItems * self.__base**i\n\n #update the total weight\n self.totalWeight -= self.eventWeights[eventNr]\n self.totalWeight += newWeight\n\n #update the weight of the event\n self.eventWeights[eventNr] = newWeight", "title": "" }, { "docid": "65b35917de561c32497c5ce1c8e5467b", "score": "0.4580655", "text": "def update(self, coord: Tuple[int, int], val: {0}):\n x, y = coord\n if self.mine_matrix[x][y] == -1:\n if self.env.mark(x, y, val):\n #print(f'Mark ({x}, {y}) as safe')\n self.mine_matrix[x][y] = 0\n _, mines = self.env.query(x, y)\n self.kb.append((x, y, mines))\n self.num_matrix[x][y] = mines\n else:\n self.mine_matrix[x][y] = 1\n self.fail += 1\n #print(f'Error: Mark ({x}, {y}) as safe')", "title": "" }, { "docid": "3d0c978f3ffe6b452f5e6f1118a16f22", "score": "0.4570484", "text": "def update_dataset_marker(self):\n start_time = self.parent.overview.start_time\n\n markers = []\n if self.parent.info.markers is not None:\n markers = self.parent.info.markers\n\n self.idx_marker.clearContents()\n self.idx_marker.setRowCount(len(markers))\n\n for i, mrk in enumerate(markers):\n abs_time = (start_time +\n timedelta(seconds=mrk['start'])).strftime('%H:%M:%S')\n dur = timedelta(seconds=mrk['end'] - mrk['start'])\n duration = '{0:02d}.{1:03d}'.format(dur.seconds,\n round(dur.microseconds / 1000))\n\n item_time = QTableWidgetItem(abs_time)\n item_duration = QTableWidgetItem(duration)\n item_name = QTableWidgetItem(str(mrk['name']))\n\n color = self.parent.value('marker_color')\n item_time.setForeground(QColor(color))\n item_duration.setForeground(QColor(color))\n item_name.setForeground(QColor(color))\n\n self.idx_marker.setItem(i, 0, item_time)\n self.idx_marker.setItem(i, 1, item_duration)\n self.idx_marker.setItem(i, 2, item_name)\n\n # store information about the time as list (easy to access)\n marker_start = [mrk['start'] for mrk in markers]\n marker_end = [mrk['end'] for mrk in markers]\n self.idx_marker.setProperty('start', marker_start)\n self.idx_marker.setProperty('end', marker_end)\n\n if self.parent.traces.data is not None:\n self.parent.traces.display()\n self.parent.overview.display_markers()", "title": "" }, { "docid": "95161dec9eecd0d57bc41864c96835f4", "score": "0.45466718", "text": "def update_annotation(\n self,\n dt,\n points,\n ):\n return None", "title": "" }, { "docid": "be5743c7d4dde06d27b04a4326482a62", "score": "0.45380002", "text": "def update (self, **kwargs):\n self.line = kwargs.pop ('line', self.line)\n self.markers = kwargs.pop ('markers', self.markers)\n self.errorbars = kwargs.pop ('errorbars', self.errorbars)\n self.errorcaps = kwargs.pop ('errorcaps', self.errorcaps)\n self._kwargs.update (copy.deepcopy (kwargs))", "title": "" }, { "docid": "bb024cde98efa85c196ab367178a0331", "score": "0.45184407", "text": "def addMarker(self, Coord):\n for circ in self.circles:\n circ.remove()\n self.circles.pop()\n\n self.circles.append(pch.Circle(Coord,\n radius=1,\n lw=0.25,\n ec='yellow',\n fc='none'))\n\n self.ax.add_patch(self.circles[-1])\n self.draw()", "title": "" }, { "docid": "f726945a8b94a048ad004dd9933453ce", "score": "0.45092535", "text": "def paint(self, coord, new_label):\n if self.n_dimensional or self.image.ndim == 2:\n slice_coord = tuple([slice(self._to_pix(ind-self.brush_size/2, i),\n self._to_pix(ind+self.brush_size/2, i),\n 1) for i, ind\n in enumerate(coord)])\n else:\n slice_coord = tuple(list(np.array(coord[:-2]).astype(int)) +\n [slice(self._to_pix(ind-self.brush_size/2,\n len(self.shape) - 2 + i),\n self._to_pix(ind+self.brush_size/2,\n len(self.shape) - 2 + i),\n 1) for i, ind\n in enumerate(coord[-2:])])\n\n # update the labels image\n self._image[slice_coord] = new_label\n\n self.refresh()", "title": "" }, { "docid": "b5327415d1e26c4c2e25de9e0287e5e9", "score": "0.4508218", "text": "def set_center(self, event):\n current = self.center_coords.get()\n self.center_coords.delete(0, len(current) + 1)\n self.center_coords.insert(0, f\"{event.x}, {event.y}\")\n self.set_radius()", "title": "" }, { "docid": "fea05eba4275c59682082d9ed75b3863", "score": "0.45042056", "text": "def update_data(self, new_data: dict = None, connection_flag: bool = False) -> None:\n if new_data is not None:\n _LOGGER.debug(\n f\"Updating data for {self.location2} {self.location} {self.name} ({self.ref})\"\n )\n self._raw_data = new_data\n\n if connection_flag and self._suppress_update_callback:\n return\n\n if self._update_callback is not None:\n self._update_callback()", "title": "" }, { "docid": "62323ce7351c6b813735fbb19f7613da", "score": "0.45031968", "text": "def SetMarkerColor(self, color):\r\n self._markercolor = Color(color)\r\n if isinstance(self, ROOT.TAttMarker):\r\n ROOT.TAttMarker.SetMarkerColor(self, self._markercolor('root'))", "title": "" }, { "docid": "b61417f478a0ba0864b6ac7a8d6eb596", "score": "0.44950494", "text": "def updateGeom(self,event): \n \n if isinstance(event, AddGeomsEvent):\n action='add'\n elif isinstance(event, DeleteGeomsEvent):\n action='delete'\n elif isinstance(event, EditGeomsEvent):\n action='edit'\n else:\n import warnings\n warnings.warn('Bad event %s for epmvAdaptor.updateGeom'%event)\n return\n nodes,options = event.objects\n if event.arg == 'iso' :\n self._isoSurface(nodes,options)\n return\n mol, atms = self.mv.getNodesByMolecule(nodes, Atom)\n #################GEOMS EVENT############################################\n if event.arg == 'lines' and action =='edit' :\n self._editLines(mol,atms)\n elif event.arg == 'cpk' and action =='edit' and not self.useLog :\n self._editCPK(mol,atms,options)\n elif event.arg == 'bs' and action =='edit' and not self.useLog :\n self._editBS(mol,atms,options)\n elif event.arg == 'trace' and action =='edit' and not self.useLog :\n print(\"displayTrace not supported Yet\")\n #displayTrace should use a linear spline extruded like _ribbon command\n elif event.arg[0:4] == 'msms' and action =='edit' and not self.useLog :\n\t\t\t#there is 2 different msms event : compute msms_c and display msms_ds\n if event.arg == \"msms_c\" : #ok compute\n self._computeMSMS(mol,atms,options)\n elif event.arg == \"msms_ds\" : #ok display\n self._displayMSMS(mol,atms,options) \n elif event.arg[:2] == 'SS' and action =='edit' and not self.useLog :\n #if event.arg == \"SSextrude\":\n # self._SecondaryStructure(mol,atms,options,extrude=True)\n if event.arg == \"SSdisplay\":\n self._SecondaryStructure(mol,atms,options)\n if event.arg == \"SSdisplayU\":\n self._SecondaryStructure(mol,atms,options,uniq=True)\n \n #the bead riibbon ?\n #('bead', [nodes,[params,redraw]],setOn=setOn, setOff=setOff)\n elif event.arg == 'bead' and action =='edit' :\n self._beadedRibbons(mol,atms,options[0])\n elif event.arg == 'beadU' and action =='edit' :\n self._beadedRibbons(mol,atms,options[0],uniq=True)\n #self.beadedRibbons(\"1crn\", redraw=0, log=1)\n #################COLOR EVENT############################################\n elif event.arg[0:5] == \"color\" : #color Commands\n #in this case liste of geoms correspond to the first options\n #and the type of function is the last options\n self._color(mol,atms,options)\n elif event.arg == 'struts' and action =='edit' :\n self._struts(mol,atms,options[0])#nodes, params", "title": "" }, { "docid": "e6f380641f86cb95d127436f78200711", "score": "0.44877583", "text": "def zoommove(self, event):\n\t\t# capture click data and draw dot for user feedback\n\t\tif event.xdata == None:\n\t\t\treturn\n\t\tif(self.add == None):\n\t\t\tdlog('Error: Centroid has no subplot to draw to')\n\t\t\treturn\n\t\t# set the start coords for the zooming regions\n\t\tself.zoomend = event.xdata\n\t\tself.add.cla()\n\t\tself.displayPlot()\n\t\tself.add.plot([self.zoomstart, self.zoomstart], [self.workingChiplot.ymin, self.workingChiplot.ymax], color = 'r')\n\t\tself.add.plot([self.zoomend, self.zoomend], [self.workingChiplot.ymin, self.workingChiplot.ymax], color = 'r')\n\t\tself.canvas.draw()", "title": "" }, { "docid": "736a88c578a0f2805bebf70f72c1c714", "score": "0.44737467", "text": "def update(self, entry, auth_token=None, force=False, **kwargs):\r\n raise NotImplementedError(\r\n 'GData Update operation unsupported, try update_*')", "title": "" }, { "docid": "00022168d3785cf0e7be4f459b53b408", "score": "0.44636914", "text": "def update_location(self):\n \n self.location = \"POINT(%0.8f %0.8f)\" % (self.longitude, self.latitude)", "title": "" }, { "docid": "d880bd59eeef1281c48e4a276ba7cb22", "score": "0.4456151", "text": "def update_fill(self, event):\r\n if isinstance(event, FillEvent):\r\n self.update_positions_after_fill(event)\r\n self.update_holdings_after_fill(event)", "title": "" }, { "docid": "9293c39c6a84aa1fbd92c53a0454015d", "score": "0.44549307", "text": "def __setitem__(self, key, value):\n self._coord[key] = value", "title": "" }, { "docid": "4753e6c5a1dbdb23fe34c73df41d49bf", "score": "0.444224", "text": "def update(self, chunk):", "title": "" }, { "docid": "24c53a5cf17c8e3ecf2b41ed73149c10", "score": "0.44382387", "text": "def Notify(self, event):\n if isinstance(event, TickEvent):\n self.backSprites.clear(self.window, self.background)\n self.frontSprites.clear(self.window, self.background)\n\n self.backSprites.update()\n self.backSprites.update()\n\n dirtyRects1 = self.backSprites.draw(self.window)\n dirtyRects2 = self.frontSprites.draw(self.window)\n\n dirtyRects = dirtyRects1 + dirtyRects2\n pygame.display.update(dirtyRects)\n\n elif isinstance(event, MapBuiltEvent):\n map = event.map\n self.ShowMap(map)\n elif isinstance(event, CharacterPlaceEvent):\n self.ShowCharacter(event.character)\n elif isinstance(event, CharacterMoveEvent):\n self.MoveCharacter(event.character)", "title": "" }, { "docid": "96dbacd89b52eb742e9b23cf35b36da6", "score": "0.44263393", "text": "def mark(self):\r\n\r\n raise NotImplementedError", "title": "" }, { "docid": "08177008d01853e30fec255732be59aa", "score": "0.44191065", "text": "def leaf_modify(self, func):\n for key, value in self.leaf_items():\n self[key] = func(value)", "title": "" }, { "docid": "b88ca9c5e7fd0c3e21e3ea698b82ca70", "score": "0.44160187", "text": "def notify(self, event: Event):\r\n raise NotImplementedError(\"You must override Listener.notify\")", "title": "" }, { "docid": "356eac66cb50766ae7effacc69cd6f57", "score": "0.44159484", "text": "def event_update(self):\n\n url = \"/events/%s\" % (str(self.event_id))\n data = self._conn.request(url)\n log.debug(\"Updating Event\")\n log.debug(data)\n\n data['event']['event_id'] = data['event']['id']\n data['event']['id'] = data['event']['droplet_id']\n\n self.__dict__.update(**data['event'])", "title": "" }, { "docid": "84cb8f4b719df55ef94bba57ecef778a", "score": "0.44125882", "text": "def update_indicator(self) -> None:\n self.set_position()\n self.repaint()", "title": "" }, { "docid": "034c513bb7557877e79d1b687e06bd89", "score": "0.44064003", "text": "def update(self, e=None, **kwargs):\n try:\n for k in e:\n self.__setitem__(k, e[k])\n except AttributeError:\n for (k, v) in e:\n self.__setitem__(k, v)\n for k in kwargs:\n self.__setitem__(k, kwargs[k])", "title": "" }, { "docid": "0ba275ee64a476d577932eafff16c12e", "score": "0.44059756", "text": "def mutateIndividual(self, individual):\n raise NotImplementedError", "title": "" }, { "docid": "b29b084cf0aec6df5c7afccebb811bbf", "score": "0.43900952", "text": "def update(self, coord, char_name):\n x, y = coord\n self.map[y][x] = NAME_TO_CHAR[char_name]", "title": "" }, { "docid": "c019700760e2ef2d77ac3e23ae3a7372", "score": "0.43655112", "text": "def update_state(self, iid, state):\n if state not in [\"normal\", \"hover\", \"active\"]:\n raise ValueError(\"Invalid state: {}\".format(state))\n marker = self._markers[iid]\n rectangle_id, text_id = marker[\"rectangle_id\"], marker[\"text_id\"]\n state = \"\" if state == \"normal\" else state + \"_\"\n colors = {}\n for color_type in [\"background\", \"foreground\", \"outline\", \"border\"]:\n value = marker[state + color_type]\n attribute = \"_marker_{}\".format(color_type)\n colors[color_type] = getattr(self, attribute) if value == \"default\" else value\n self._timeline.itemconfigure(rectangle_id, fill=colors[\"background\"], width=colors[\"border\"],\n outline=colors[\"outline\"])\n self._timeline.itemconfigure(text_id, fill=colors[\"foreground\"])", "title": "" }, { "docid": "8d1c699671a07e3497e97a47d7afacf7", "score": "0.435958", "text": "def update_with_observations(self,observation):\n raise NotImplementedError( \"Should have implemented this\" )\n pass", "title": "" }, { "docid": "8d093a3ed7a7b0bb14000e95516b98b2", "score": "0.43544", "text": "def update(self) -> None:\n self.icon.update()\n self.time_series.update()", "title": "" }, { "docid": "53922aa68457bb6d855630d90f3b7c74", "score": "0.43509215", "text": "def eventModified(self, event):\n\t\tself.logger.debug(\"Received the following modify event: %s\" % event.getString())\n\n\t\tdata = {}\n\t\tdata['type'] = 'modify'\n\t\tdata['itemID'] = event.root.metadata.objectID.text\n\t\tdata['timestamp'] = event.root.metadata.timestamp.text\n\t\tdata['event'] = event.getString()\n\n\t\t# Send to webservice\n\t\tresult = self.sendDataToWebService(data)\n\t\t\n\t\ttry:\n\t\t\tresultXml = superxml.fromString(result)\n\t\texcept:\n\t\t\tself.logger.error(\"Received invalid response from webservice. Event not synced correctly.\")\n\t\t\treturn \"Error\"\n\t\t\n\t\tif (resultXml.root.status.text == 'OK'):\n\t\t\tself.logger.debug(\"Item synced succesfully.\")\n\t\t\treturn \"Success\"\n\t\telse:\n\t\t\tself.logger.error(\"Remote webservice returned the following error: %s\" % resultXml.root.message.text)\n\t\t\treturn \"Error\"", "title": "" }, { "docid": "5313ff7f38481d696d8951b7853b6101", "score": "0.43463522", "text": "def update(self, to_update):\n updated_map = self.map.copy()\n updated_map.update(to_update)\n self.map = updated_map", "title": "" }, { "docid": "3a816d182cd1a03ed27a551e57174b4e", "score": "0.43359545", "text": "def set(self, loc: Location, increment: float):\n self.world[loc[0] % self.world_size][loc[1] %\n self.world_size] += increment", "title": "" }, { "docid": "2ee346046e12ca7ce050f03838d357ee", "score": "0.43260747", "text": "def update_one_segment(self, segment_ind, ptt_tuple, add_to_current=True):\n if add_to_current:\n command_list = util.create_zero_list(self.aperture.number_segments_in_pupil)\n command_list[segment_ind] = ptt_tuple\n self.add_map(command_list)\n else:\n self.data[segment_ind] = ptt_tuple", "title": "" }, { "docid": "9f232343548dbd043a4a9c6ee263513f", "score": "0.43200767", "text": "def _put_event(self, student, event_entity, event_key):\n if student.is_transient or event_entity not in self.EVENT_CODE_MAPPING:\n return\n\n progress = self.get_or_create_progress(student)\n\n self._update_event(\n student, progress, event_entity, event_key, direct_update=True)\n\n progress.updated_on = datetime.datetime.now()\n progress.put()", "title": "" }, { "docid": "404a5bd88ac3daf1f2806db223827924", "score": "0.4303512", "text": "def update_cell(self, cell, dist_to_bmu, input_vector, t):\n self.som[cell] += self.N(dist_to_bmu, t) * self.L(t) * (input_vector - self.som[cell])", "title": "" }, { "docid": "edb56f4843baefe53076e2add54097f3", "score": "0.42992508", "text": "def crises_update(self, event):\n payload = event[\"payload\"]\n self.send(payload)", "title": "" }, { "docid": "526a8dc50ff9058216d3ed7993ca8525", "score": "0.42991757", "text": "def update_locations(self, old_prefix, new_prefix):\n for mark in self:\n #year = date[0:4]\n #prefix = '/mnt/sdcard/external_sd/podcasts/beats_in_space/'\n #new_source = os.path.join(prefix, year, date, correct_name)\n relative = Path(mark.source).to_relative(old_prefix)\n new_source = os.path.join(new_prefix, relative)\n\n if new_source != mark.source:\n logging.debug(\"original source: %s\" % mark.source)\n logging.debug(\"new source: %s\" % new_source)\n\n mark.source = new_source", "title": "" }, { "docid": "4fa178d17ac1a67740acb9d96e127ebb", "score": "0.42865208", "text": "def update_plot(self, **p_kwargs):\n pass", "title": "" }, { "docid": "6e58a15d68439268e865ab3a07016441", "score": "0.42864162", "text": "def updateData(self, run):\n self.monte_carlo_data.counter_inside += self.counter_inside\n self.monte_carlo_data.counter_outside += self.counter_outside\n\n self.output(run)\n\n self.counter_inside = 0\n self.counter_outside = 0", "title": "" }, { "docid": "5c3692a91c71ba1cc2ef1bc05914a6e4", "score": "0.42759508", "text": "def SetMarkerStyle(self, style):\r\n self._markerstyle = MarkerStyle(style)\r\n if isinstance(self, ROOT.TAttMarker):\r\n ROOT.TAttMarker.SetMarkerStyle(self, self._markerstyle('root'))", "title": "" }, { "docid": "a7070f5cce168d15b486fe6455791adf", "score": "0.42688948", "text": "def update(self, measurement, val, pos):\n measurement.S.add(pos)\n l = 0\n for j in range(0, self.num_blocks):\n measurement.y[l + self.hlist[j](pos)] += val * self.sigmalist[j](pos)\n l += self.blist[j]\n return measurement", "title": "" }, { "docid": "d7b711d9f1af1d606fedb2315d4e5084", "score": "0.4267697", "text": "def update(self, mapping):\r\n self.frames[0].update(mapping)", "title": "" }, { "docid": "311f823ac4420c3bb818c2621b14e127", "score": "0.42646652", "text": "def update(self, mapping):\n self.frames[0].update(mapping)", "title": "" }, { "docid": "4d0b3d5550cfa5a14ccb13a892dd2755", "score": "0.42641664", "text": "def update_fill(self, event):\n if event.type == EventType.FILL:\n self.update_positions_from_fill(event)\n self.update_holdings_from_fill(event)", "title": "" }, { "docid": "36aebbba031fe40338195fcf588e38d8", "score": "0.4259198", "text": "def update_codon_dict(self,codon_pos):\n self.codon_pos_dict[codon_pos] += 1", "title": "" }, { "docid": "a32e20d968463f57b6aa3bdcc1c0e3f5", "score": "0.42581668", "text": "def mark_event(self, value: int = 1):\n self._inner_counter.inc(value)", "title": "" }, { "docid": "961013b9e23ca07559d4c0f2727f2fda", "score": "0.4257804", "text": "def analysis(self, ope):\n super().analysis(ope)\n self.update_coordinates_correspondance(ope)", "title": "" }, { "docid": "53f2d6f9116cc8b3c62fa611f97ee756", "score": "0.42546538", "text": "def touchEntry(self):\n self.modified = Time()\n self.occurences += 1", "title": "" }, { "docid": "17b73c36002ed0666ea1ffbfcea84866", "score": "0.42533034", "text": "def updateModel(self,event): \n \n if isinstance(event, AfterDeleteAtomsEvent):#DeleteAtomsEvent):\n action='deleteAt'\n #when atom are deleted we have to redo the current representation and \n #selection\n atom_set = event.objects\n #mol = atom_set[0].getParentOfType(Protein)\n #need helperFunction to delete Objects\n for i,atms in enumerate(atom_set):\n nameo = \"S\"+\"_\"+atms.full_name()\n o=self._getObject(nameo)\n if o is not None :\n# print nameo\n self.helper.deleteObject(o)\n #and the ball/stick\n nameo = \"B\"+\"_\"+atms.full_name()\n o=self._getObject(nameo)\n if o is not None :\n# print nameo\n self.helper.deleteObject(o)\n #and the bonds...and other geom?\n mol = atom_set[0].top#getParentOfType(Protein)\n self.mv.buildBondsByDistance(mol,log=0)\n elif isinstance(event, BeforeDeleteMoleculesEvent):\n action='deletMol'\n# print action,dir(event)\n mols = event.arg\n for mol in mols :\n# print \"delete\",mol.name\n self.delMolDic(mol.name)\n self.delGeomMol(mol)\n if self.gui is not None:\n #need to update molmenu\n self.gui.resetPMenu(self.gui.COMB_BOX[\"mol\"]) \n self.gui.restoreMolMenu()", "title": "" }, { "docid": "678eb22b2764251da4acc6013ae9c4ab", "score": "0.42514184", "text": "def _update_entry(wl_obj, entry, resort):\r\n try:\r\n # If the edited entry is part of a series (either as the parent\r\n # or a child), ask whether to apply the edits to the entire\r\n # series or only the one entry--but ONLY if the date attribute\r\n # hasn't been changed. If the date attribute HAS been changed,\r\n # apply all the edits to just the one entry, even if other\r\n # attributes have also been changed.\r\n if (\r\n (entry.recurring is True or entry.rec_parent) and\r\n (entry.date == entry.info[\"date\"])):\r\n response = io_utils.menu(\r\n [\"Edit this task only\", \"Edit all tasks in the series\"],\r\n keystroke_list=\"#\", prompt=\"This task is part of a series. \" +\r\n \"Do you want to apply your changes to all the tasks in this \" +\r\n \"series?\", line_length=wl_obj.line_length)\r\n # If the user chooses to back out, just return without\r\n # updating.\r\n if response == QUIT:\r\n return\r\n elif response == UPDATE_ONE:\r\n edit_series = False\r\n else: # response == UPDATE_ALL\r\n edit_series = True\r\n # end if\r\n else:\r\n edit_series = False\r\n # end if\r\n # Find the original entry (and, if applicable, any child\r\n # entries).\r\n for ndx in range(len(wl_obj.entries)):\r\n if (\r\n (wl_obj.entries[ndx].id == entry.id) or\r\n (edit_series and\r\n ((wl_obj.entries[ndx].rec_parent == entry.rec_parent) or\r\n (wl_obj.entries[ndx].id == entry.rec_parent)))):\r\n # Simpler to overwrite the values (even if unchanged).\r\n wl_obj.entries[ndx].title = entry.title\r\n wl_obj.entries[ndx].time = entry.time\r\n wl_obj.entries[ndx].duration = entry.duration\r\n wl_obj.entries[ndx].notes = entry.notes\r\n # Recalculate the datetime attribute, in case it's\r\n # changed.\r\n wl_obj.entries[ndx].datetime = (\r\n wl_add.add_datetime(wl_obj.entries[ndx]))\r\n # If title, date or time changed, need to update sort\r\n # lists.\r\n if resort:\r\n for n in range(len(wl_obj.sorts[TITLE_SORT])):\r\n # Update and re-sort the list.\r\n if wl_obj.sorts[TITLE_SORT][n][ENTRY_ID] == entry.id:\r\n wl_obj.sorts[TITLE_SORT][n] == (\r\n entry.title, entry.datetime, entry.id)\r\n wl_obj.sorts[TITLE_SORT].sort()\r\n # end if\r\n # end for\r\n for n in range(len(wl_obj.sorts[DATE_SORT])):\r\n # Update and re-sort the list.\r\n if wl_obj.sorts[DATE_SORT][n][ENTRY_ID] == entry.id:\r\n wl_obj.sorts[DATE_SORT][n] == (\r\n entry.datetime, entry.title, entry.id)\r\n wl_obj.sorts[DATE_SORT].sort()\r\n break\r\n # end if\r\n # end for\r\n # end if\r\n # end if\r\n # end for\r\n return\r\n except Exception as err:\r\n _z_exc(\"wl_viewedit.py/_update_entry\", err)\r\n # end try\r", "title": "" }, { "docid": "cb94243f835a220d21916cd6f33dc82b", "score": "0.42447966", "text": "def update_signal(self, event):\r\n if isinstance(event, SignalEvent):\r\n order_event = self.generate_naive_order(event)\r\n self.events.put(order_event)", "title": "" }, { "docid": "b76272c4ffb7a049dc34715ee808f5b2", "score": "0.4241223", "text": "def draw_marker(self, position, *, color, label, marker_symbol, size):\n raise NotImplementedError(\n \"This method is expected to be override by a plotting backend class.\"\n )", "title": "" }, { "docid": "39f34b7f7d3cc0f72ffb9825c32f3c6e", "score": "0.4240004", "text": "def change_shape(self, cpoint, val):\r\n\r\n idx = self.control_points_idx[cpoint]\r\n self.control_points[idx] += val", "title": "" }, { "docid": "7d88256dae6af826226b9ed9d4853dd8", "score": "0.42376193", "text": "def _register_marking(new_marking, version=version.DEFAULT_VERSION):\n if not version:\n version = version.DEFAULT_VERSION\n\n mark_type = new_marking._type\n _validate_type(mark_type, version)\n _validate_props(new_marking._properties, version)\n\n OBJ_MAP_MARKING = registry.STIX2_OBJ_MAPS[version]['markings']\n if mark_type in OBJ_MAP_MARKING.keys():\n raise DuplicateRegistrationError(\"STIX Marking\", mark_type)\n OBJ_MAP_MARKING[mark_type] = new_marking", "title": "" }, { "docid": "eecc4c2967044610eba8e8d647ef8a19", "score": "0.4235929", "text": "def UpdateEntryShape(self, shapes):\n raise NotImplementedError()", "title": "" }, { "docid": "fe40ae459adfb30658438c6b2082e862", "score": "0.42333382", "text": "def update_location(self, new_location: SliceLocation) -> 'Slice':\n self.location = new_location\n self.save()\n return self", "title": "" }, { "docid": "950c3f47466de9318be829645bb9c3df", "score": "0.42327356", "text": "def __itemUpdated(self, event):\n if event == items.ItemChangedType.DATA:\n self.sigCurrentItemChanged.emit(self.__currentItem)", "title": "" }, { "docid": "ff54a139c8ad62dff87b17f10f0d0af6", "score": "0.42296886", "text": "def mark(self, mark_name):\n self._marks[mark_name] = self._current_line", "title": "" } ]
c7872bd282a051e526f485e11f6379c8
Help to encode message. Called before message encoding.
[ { "docid": "9bf32b62df0636367e7201fcc181c3af", "score": "0.0", "text": "def before_encode(cls, func):\n\n @functools.wraps(func)\n def wrap(*args, **kwargs):\n return func(*args, **kwargs)\n\n return wrap", "title": "" } ]
[ { "docid": "c6241b38b2421d64e9fd4f34efabf7eb", "score": "0.6964077", "text": "def encode(self, *args, **kwargs):\r\n pass", "title": "" }, { "docid": "2e45b8167aacfe58cba85502f8a700ab", "score": "0.6803782", "text": "def encode(message):\n # Please print out which line of the above program contains an error. E.g. if the bug is on line 4 then print 4\n # END OF CONTEXT\n print(\"2\")\n # END OF SOLUTION", "title": "" }, { "docid": "22b70984b491ee53b1d1ec1709a0e842", "score": "0.65393436", "text": "def encode(self):\n pass", "title": "" }, { "docid": "22b70984b491ee53b1d1ec1709a0e842", "score": "0.65393436", "text": "def encode(self):\n pass", "title": "" }, { "docid": "d2df5f63d6d9ab21ff3a60fd55040a95", "score": "0.6471871", "text": "def encode(self):\n \n pass", "title": "" }, { "docid": "43e05ff351da290f8ed943b47e6687e7", "score": "0.63610137", "text": "def encode(self):\r\n if self.mtype is None or self.mid is None:\r\n raise TypeError(\"Fatal Error: Message Type and Message ID must not be None.\")\r\n rawdata = chr((self.version << 6) + ((self.mtype & 0x03) << 4) + (len(self.token) & 0x0F))\r\n rawdata += struct.pack('!BH', self.code, self.mid)\r\n rawdata += self.token\r\n rawdata += self.opt.encode()\r\n if len(self.payload) > 0:\r\n rawdata += chr(0xFF)\r\n rawdata += self.payload\r\n return rawdata", "title": "" }, { "docid": "eaec35ebcc8988f048a09d7166845f10", "score": "0.6192611", "text": "def encode_message(self, key):\n\n encoded_message = ''\n for char in self.message:\n if char.isalpha():\n encoded_char = self.convert_char(char, key)\n encoded_message = encoded_message + encoded_char\n else:\n encoded_message = encoded_message + char\n return encoded_message", "title": "" }, { "docid": "0fb9988140cb30461d3b53fa3da73df6", "score": "0.6168271", "text": "def test_encode_functions(self):\n m = mido.messages\n\n # These have no type and value checks, since the data\n # is assumed to be correct already. (It was checked on\n # the way into the object.)\n\n # Channel should be ignored, and an empty list returned.\n # Thus, there is no reason to check for TypeError\n # and ValueError.\n self.assertEqual(m.encode_channel(channel=0), [])\n\n # Encode data\n sysex_end_byte = 0xf7\n self.assertEqual([1, 2, 3, sysex_end_byte], m.encode_data((1, 2, 3)))\n\n # Pitchwheel pitch\n self.assertEqual([0, 0], m.encode_pitch(m.MIN_PITCHWHEEL))\n self.assertEqual([127, 127], m.encode_pitch(m.MAX_PITCHWHEEL))\n self.assertEqual([0, 64], m.encode_pitch(0))\n\n # Song position\n self.assertEqual([0, 0], m.encode_pos(0))\n self.assertEqual([127, 127], m.encode_pos(m.MAX_SONGPOS))\n # Check endian\n self.assertEqual([16, 78], m.encode_pos(10000))", "title": "" }, { "docid": "40037188b5e0622a66079a39fd74aea2", "score": "0.616511", "text": "def encode(self, plain_text, senders_key):", "title": "" }, { "docid": "878af66a3bf10e4df56eedaae179d99c", "score": "0.6162301", "text": "def post_encode(msg: Message):\n return {\"Encoded message\": symmetric.encode_message(msg.text)}", "title": "" }, { "docid": "b800f2525ebc5d0c1b54721f525534af", "score": "0.616213", "text": "def encode(self):\n raise NotImplementedError()", "title": "" }, { "docid": "73f91de947ee6b415d3daaa0075133b8", "score": "0.6141686", "text": "def encode(self, msg):\n return str(msg) if sys.stdout.encoding == 'utf-8' else \\\n bytes(msg, self.encoding).decode(sys.stdout.encoding)", "title": "" }, { "docid": "8a158bba40b6bae20b848aea9501d0db", "score": "0.6135857", "text": "def encodeString():\n pass", "title": "" }, { "docid": "9f128457c2b63e0ff46e8d6775053988", "score": "0.606918", "text": "def encode(self, desc):\n raise NotImplementedError", "title": "" }, { "docid": "bc56377ead7cf8c69090c43c445c3db7", "score": "0.5978397", "text": "def encode(self) -> Dict[str, Any]:\n res = {\"mes\": self._text, \"charset\": self._charset} # type: Dict[str, Any]\n if self._format is not None and self._format != \"\":\n res[self._format] = 1\n if self._translit is not None:\n res[\"translit\"] = self._translit\n if self._tinyurl is not None:\n res[\"tinyurl\"] = self._tinyurl\n if self._maxsms is not None:\n res[\"maxsms\"] = self._maxsms\n return res", "title": "" }, { "docid": "8850336512e34edffe71722b30632a43", "score": "0.5971328", "text": "def _prepare_message(self):\n self.subject = force_unicode(self.subject, strings_only=True)\n self.message = force_unicode(self.message, strings_only=True)\n self.extra_tags = force_unicode(self.extra_tags, strings_only=True)", "title": "" }, { "docid": "d098f699fbee7f9533613b636a67b3ef", "score": "0.59426033", "text": "def encoding(self):\r\n raise NotImplementedError()", "title": "" }, { "docid": "b91f8d438b86a7caf33c678651d90521", "score": "0.59391856", "text": "def encode(self, note_seq=None):", "title": "" }, { "docid": "78c6374d9718996301aa1f57f63f66f7", "score": "0.59155643", "text": "def test_encodeMsg(self):\n slipMsg = SLIPMsg(256)\n slipMsg.encodeMsg(testMsg)\n encodedMsg = self.msgParser.encodeMsg(testMsg)\n assert(encodedMsg == slipMsg.encoded)", "title": "" }, { "docid": "73358954d24468ddbbfbdb0914c613c7", "score": "0.58803046", "text": "def encode(self, x=None):\n pass", "title": "" }, { "docid": "1641ebbbbbc15b6711ad12704a828966", "score": "0.5874669", "text": "def test_encode(self):\n pass # TODO(tlarsen)", "title": "" }, { "docid": "4f24b6d524d18f84b03877742dc22d52", "score": "0.58626115", "text": "def lua_encode(self) -> str:\n ...", "title": "" }, { "docid": "6600e8670e64cf778733b5c65bf8c044", "score": "0.5800443", "text": "def encode(self, data):\n raise NotImplementedError", "title": "" }, { "docid": "d870aa2885c0f469da11575d8d774f0a", "score": "0.5765854", "text": "def gen_message(self) -> str:", "title": "" }, { "docid": "fdcb5f2d4fa6e39e0468949414093264", "score": "0.5764565", "text": "def encode(self, data):\r\n return protocol.encode(data)", "title": "" }, { "docid": "ba8a1ef35627aeb59a6d7a759abeba47", "score": "0.57632166", "text": "def Message(self) -> str:", "title": "" }, { "docid": "92ebb935230eec09d1cece59c212751f", "score": "0.5732989", "text": "def message(self):", "title": "" }, { "docid": "7aed513a6fdd1c1bc2e12b705e282b4a", "score": "0.5712544", "text": "def encode(message):\n return pickle.dumps(message, 3)", "title": "" }, { "docid": "1ca62909fce1cca30fcf72cc18fcea28", "score": "0.5695107", "text": "def encode(payload):", "title": "" }, { "docid": "3a24ffb4d611a09a78a68962ae1f21c2", "score": "0.5660466", "text": "async def encode(self, ctx, *, message: str):\n confirm_letters = self.character_check.match(message)\n if not confirm_letters:\n return await ctx.send(\"You must provide only alphanumeric characters / numbers.\")\n\n encoded = self.encode_morse(message)\n if not encoded:\n return await ctx.send(\n \"Was unable to encode your text into morse code.\"\n \"\\nIf this is a recurring issue, please reach out to my support channel.\"\n \" Which is in red's cog support server.\"\n )\n for page in pagify(encoded):\n await ctx.send(page)", "title": "" }, { "docid": "bcd624347f209b2721e2232655b8316d", "score": "0.56442344", "text": "def encode_data(data):", "title": "" }, { "docid": "2462f3584c7d8ea27a2797c3ed47ca0a", "score": "0.5641589", "text": "def message(self) -> str:\n ...", "title": "" }, { "docid": "faafd768c1a0e4f2273ee96ac93d0a9f", "score": "0.56311524", "text": "def message():", "title": "" }, { "docid": "915cf17c7c082c4744a137f26cb63d53", "score": "0.56134623", "text": "def encode(self):\n called_ae_title = self.called_ae_title.encode()\n calling_ae_title = self.calling_ae_title.encode()\n return self.header.pack(self.pdu_type, self.reserved1, self.pdu_length,\n self.protocol_version, self.reserved2,\n called_ae_title, calling_ae_title,\n *self.reserved3) \\\n + b''.join([item.encode() for item in self.variable_items])", "title": "" }, { "docid": "e4f0593295fb5a475fbfd295806cae89", "score": "0.5606854", "text": "def encode(self, *args, **kwargs):\n return self.value.encode(*args, **kwargs)", "title": "" }, { "docid": "146ba62aa2c2054fe03e8e0f7f7c2417", "score": "0.55853313", "text": "def encode(self, *args, **kwargs):\n return self.encoder(*args, **kwargs)", "title": "" }, { "docid": "146ba62aa2c2054fe03e8e0f7f7c2417", "score": "0.55853313", "text": "def encode(self, *args, **kwargs):\n return self.encoder(*args, **kwargs)", "title": "" }, { "docid": "76ce55dd883512ea09cd5371db208fa5", "score": "0.5580279", "text": "def encode(self, value):", "title": "" }, { "docid": "d7d881402c5961f96002417badab231e", "score": "0.5572194", "text": "def encode(self, data):\n return data", "title": "" }, { "docid": "7a74eda9dda1a6f1ebb25955c2ad5525", "score": "0.55370593", "text": "def _encode_text(self):\n\n print(f\"Vigenere Cipher encode; received message is {self.message}\")\n\n finalKey = self._applyCipher(self.keyword)\n cipherText = \"\"\n for i in range(len(self.message)):\n encodedCharSequence = (ord(self.message[i]) + ord(finalKey[i])) % 26\n cipherText += chr(encodedCharSequence + self.upperCaseAsciiValueStart)\n\n return cipherText", "title": "" }, { "docid": "3867a4022104be19a4e0d8cc682a94df", "score": "0.55300665", "text": "def __init__(self, encode):\n\n self.encode = encode", "title": "" }, { "docid": "b0b15042882203bf5a3d0c532058a9c7", "score": "0.55275065", "text": "def encode(img, msg):\n\tassert type(msg) is str\n\n\tim = imgToNP(img)\n\th, w, c = np.shape(im)\n\tim = np.ravel(im)\n\n\tasciiArr = [ord(i) for i in msg]\n\n\t# Write the length of the message to the first 32 bits\n\tmsgLen = min(len(msg) * 8, len(im) - 32)\n\tfor i in range(32):\n\t\tbit = msgLen & (1 << (31 - i))\n\t\tim[i] = setLastBit(im[i], bit)\n\n\timPos = 32\n\tfor i in asciiArr:\n\t\tfor j in xrange(7, -1, -1):\n\t\t\tbit = i & (1 << j)\n\t\t\ttry:\n\t\t\t\tim[imPos] = setLastBit(im[imPos], bit)\n\t\t\texcept:\n\t\t\t\tprint('Image too small to write entire message')\n\t\t\t\tprint('Returning truncated version\\n')\n\t\t\t\tim = im.reshape(h, w, c)\n\t\t\t\tcv2.imwrite('encode_' + img, im)\n\t\t\t\treturn\n\t\t\timPos += 1\n\n\tim = im.reshape(h, w, c)\n\tcv2.imwrite('encode_' + img, im)", "title": "" }, { "docid": "ad60ca7b7ea4a44b113555c607f2c122", "score": "0.5523146", "text": "def encode_key(self, data: bytes, msg: InstantMessage) -> Any:\n raise NotImplemented", "title": "" }, { "docid": "424ae31c37572c9ae7237079c193c093", "score": "0.5500232", "text": "def encode_data(self, data: bytes, msg: InstantMessage) -> Any:\n raise NotImplemented", "title": "" }, { "docid": "1e56a0ce843988cdf64739cab24cba6f", "score": "0.5496678", "text": "def __str__(self):\n return \"%s|%s|%s|%s|%s|%s\" % (str(self.code), self.msg, str(self.sub_code), self.sub_msg, self.request_id, self.request)", "title": "" }, { "docid": "ff103f55b3fea64424376b101112fe7f", "score": "0.5495458", "text": "def __str__(self):\r\n message = self.get_header(\"Version\") + \" \" + str(self.code) + \" \" + reasondict[self.code] + \"\\r\\n\"\r\n message += \"Date: \" + str(self.get_header(\"Date\")) + \"\\r\\n\"\r\n #message += \"Content-Type: \" + \"text/html\" + \"\\r\\n\" # todo!\r\n if (self.get_header(\"Content-Encoding\") != \"\"):\r\n message += \"Content-Encoding: \" + str(self.get_header(\"Content-Encoding\")) + \"\\r\\n\"\r\n message += \"Content-Length:\" + str(self.get_header(\"Content-Length\")) + \"\\r\\n\"\r\n message += \"ETag: \" + str(self.get_header(\"ETag\")) + \"\\r\\n\"\r\n message += \"Connection: \" + self.get_header(\"Connection\") + \"\\r\\n\"\r\n message += \"\\n\"\r\n message += self.body\r\n #print(message)\r\n return message", "title": "" }, { "docid": "3733cd3f16582102d0916ec64abb262b", "score": "0.5475162", "text": "def assemble(self):\n hparams_str = self.hparams.assemble()\n if hparams_str:\n hparams_str = f';{hparams_str}'\n ret_val = f'{self.sent_protocol} {self.sent_by}{hparams_str}'\n return ret_val", "title": "" }, { "docid": "4512447f1562c7cdd1fc8c1061c35715", "score": "0.5466789", "text": "def encode(self):\n # Encoding invalid questions is unsupported.\n if not self.is_valid:\n raise Exception(\"Encoding invalid questions is unsupported\")\n\n # Encode the question type into \"question_type(args)\"\n return encode_function(self.question_type, self.encode_to_args())", "title": "" }, { "docid": "92021bc82e6b687a87b665c5cfd103c9", "score": "0.54553777", "text": "def _encoder(self):\n raise NotImplementedError(\"Encoder has not yet been set!\")", "title": "" }, { "docid": "2b004b780803199028cc6cf3c0f2ba73", "score": "0.5431375", "text": "def post_asymmetric_encode_message(msg: Message):\n encoded_message = asymmetric.encode_message(msg.text)\n return {\"Encoded message\": encoded_message}", "title": "" }, { "docid": "5d9ee3414ccada1e7dc072046138a40a", "score": "0.54078645", "text": "def message(self) -> str:\n raise NotImplementedError", "title": "" }, { "docid": "56db767d711d12316d4d72377f9df655", "score": "0.53627014", "text": "def encode(msg: Message) -> bytes:\n msg = cast(TProtocolMessage, msg)\n message_pb = ProtobufMessage()\n dialogue_message_pb = DialogueMessage()\n t_protocol_msg = t_protocol_pb2.TProtocolMessage()\n\n dialogue_message_pb.message_id = msg.message_id\n dialogue_reference = msg.dialogue_reference\n dialogue_message_pb.dialogue_starter_reference = dialogue_reference[0]\n dialogue_message_pb.dialogue_responder_reference = dialogue_reference[1]\n dialogue_message_pb.target = msg.target\n\n performative_id = msg.performative\n if performative_id == TProtocolMessage.Performative.PERFORMATIVE_CT:\n performative = t_protocol_pb2.TProtocolMessage.Performative_Ct_Performative() # type: ignore\n content_ct = msg.content_ct\n DataModel.encode(performative.content_ct, content_ct)\n t_protocol_msg.performative_ct.CopyFrom(performative)\n elif performative_id == TProtocolMessage.Performative.PERFORMATIVE_PT:\n performative = t_protocol_pb2.TProtocolMessage.Performative_Pt_Performative() # type: ignore\n content_bytes = msg.content_bytes\n performative.content_bytes = content_bytes\n content_int = msg.content_int\n performative.content_int = content_int\n content_float = msg.content_float\n performative.content_float = content_float\n content_bool = msg.content_bool\n performative.content_bool = content_bool\n content_str = msg.content_str\n performative.content_str = content_str\n t_protocol_msg.performative_pt.CopyFrom(performative)\n elif performative_id == TProtocolMessage.Performative.PERFORMATIVE_PCT:\n performative = t_protocol_pb2.TProtocolMessage.Performative_Pct_Performative() # type: ignore\n content_set_bytes = msg.content_set_bytes\n performative.content_set_bytes.extend(content_set_bytes)\n content_set_int = msg.content_set_int\n performative.content_set_int.extend(content_set_int)\n content_set_float = msg.content_set_float\n performative.content_set_float.extend(content_set_float)\n content_set_bool = msg.content_set_bool\n performative.content_set_bool.extend(content_set_bool)\n content_set_str = msg.content_set_str\n performative.content_set_str.extend(content_set_str)\n content_list_bytes = msg.content_list_bytes\n performative.content_list_bytes.extend(content_list_bytes)\n content_list_int = msg.content_list_int\n performative.content_list_int.extend(content_list_int)\n content_list_float = msg.content_list_float\n performative.content_list_float.extend(content_list_float)\n content_list_bool = msg.content_list_bool\n performative.content_list_bool.extend(content_list_bool)\n content_list_str = msg.content_list_str\n performative.content_list_str.extend(content_list_str)\n t_protocol_msg.performative_pct.CopyFrom(performative)\n elif performative_id == TProtocolMessage.Performative.PERFORMATIVE_PMT:\n performative = t_protocol_pb2.TProtocolMessage.Performative_Pmt_Performative() # type: ignore\n content_dict_int_bytes = msg.content_dict_int_bytes\n performative.content_dict_int_bytes.update(content_dict_int_bytes)\n content_dict_int_int = msg.content_dict_int_int\n performative.content_dict_int_int.update(content_dict_int_int)\n content_dict_int_float = msg.content_dict_int_float\n performative.content_dict_int_float.update(content_dict_int_float)\n content_dict_int_bool = msg.content_dict_int_bool\n performative.content_dict_int_bool.update(content_dict_int_bool)\n content_dict_int_str = msg.content_dict_int_str\n performative.content_dict_int_str.update(content_dict_int_str)\n content_dict_bool_bytes = msg.content_dict_bool_bytes\n performative.content_dict_bool_bytes.update(content_dict_bool_bytes)\n content_dict_bool_int = msg.content_dict_bool_int\n performative.content_dict_bool_int.update(content_dict_bool_int)\n content_dict_bool_float = msg.content_dict_bool_float\n performative.content_dict_bool_float.update(content_dict_bool_float)\n content_dict_bool_bool = msg.content_dict_bool_bool\n performative.content_dict_bool_bool.update(content_dict_bool_bool)\n content_dict_bool_str = msg.content_dict_bool_str\n performative.content_dict_bool_str.update(content_dict_bool_str)\n content_dict_str_bytes = msg.content_dict_str_bytes\n performative.content_dict_str_bytes.update(content_dict_str_bytes)\n content_dict_str_int = msg.content_dict_str_int\n performative.content_dict_str_int.update(content_dict_str_int)\n content_dict_str_float = msg.content_dict_str_float\n performative.content_dict_str_float.update(content_dict_str_float)\n content_dict_str_bool = msg.content_dict_str_bool\n performative.content_dict_str_bool.update(content_dict_str_bool)\n content_dict_str_str = msg.content_dict_str_str\n performative.content_dict_str_str.update(content_dict_str_str)\n t_protocol_msg.performative_pmt.CopyFrom(performative)\n elif performative_id == TProtocolMessage.Performative.PERFORMATIVE_MT:\n performative = t_protocol_pb2.TProtocolMessage.Performative_Mt_Performative() # type: ignore\n if msg.is_set(\"content_union_1\"):\n if isinstance(msg.content_union_1, DataModel1):\n performative.content_union_1_type_DataModel1_is_set = True\n content_union_1_type_DataModel1 = msg.content_union_1\n DataModel1.encode(\n performative.content_union_1_type_DataModel1,\n content_union_1_type_DataModel1,\n )\n elif isinstance(msg.content_union_1, bytes):\n performative.content_union_1_type_bytes_is_set = True\n content_union_1_type_bytes = msg.content_union_1\n performative.content_union_1_type_bytes = content_union_1_type_bytes\n elif isinstance(msg.content_union_1, int):\n performative.content_union_1_type_int_is_set = True\n content_union_1_type_int = msg.content_union_1\n performative.content_union_1_type_int = content_union_1_type_int\n elif isinstance(msg.content_union_1, float):\n performative.content_union_1_type_float_is_set = True\n content_union_1_type_float = msg.content_union_1\n performative.content_union_1_type_float = content_union_1_type_float\n elif isinstance(msg.content_union_1, bool):\n performative.content_union_1_type_bool_is_set = True\n content_union_1_type_bool = msg.content_union_1\n performative.content_union_1_type_bool = content_union_1_type_bool\n elif isinstance(msg.content_union_1, str):\n performative.content_union_1_type_str_is_set = True\n content_union_1_type_str = msg.content_union_1\n performative.content_union_1_type_str = content_union_1_type_str\n elif isinstance(msg.content_union_1, (set, frozenset)) and all(\n map(lambda x: isinstance(x, int), msg.content_union_1)\n ):\n performative.content_union_1_type_set_of_int_is_set = True\n content_union_1 = msg.content_union_1\n performative.content_union_1_type_set_of_int.extend(content_union_1)\n elif isinstance(msg.content_union_1, (list, tuple)) and all(\n map(lambda x: isinstance(x, bool), msg.content_union_1)\n ):\n performative.content_union_1_type_list_of_bool_is_set = True\n content_union_1 = msg.content_union_1\n performative.content_union_1_type_list_of_bool.extend(\n content_union_1\n )\n elif isinstance(msg.content_union_1, dict) and all(\n map(\n lambda x: isinstance(x[0], str) and isinstance(x[1], int),\n msg.content_union_1.items(),\n )\n ):\n performative.content_union_1_type_dict_of_str_int_is_set = True\n content_union_1 = msg.content_union_1\n performative.content_union_1_type_dict_of_str_int.update(\n content_union_1\n )\n elif msg.content_union_1 is None:\n pass\n else:\n raise ValueError(\n f\"Bad value set to `content_union_1` {msg.content_union_1 }\"\n )\n if msg.is_set(\"content_union_2\"):\n if isinstance(msg.content_union_2, (set, frozenset)) and all(\n map(lambda x: isinstance(x, bytes), msg.content_union_2)\n ):\n performative.content_union_2_type_set_of_bytes_is_set = True\n content_union_2 = msg.content_union_2\n performative.content_union_2_type_set_of_bytes.extend(\n content_union_2\n )\n elif isinstance(msg.content_union_2, (set, frozenset)) and all(\n map(lambda x: isinstance(x, int), msg.content_union_2)\n ):\n performative.content_union_2_type_set_of_int_is_set = True\n content_union_2 = msg.content_union_2\n performative.content_union_2_type_set_of_int.extend(content_union_2)\n elif isinstance(msg.content_union_2, (set, frozenset)) and all(\n map(lambda x: isinstance(x, str), msg.content_union_2)\n ):\n performative.content_union_2_type_set_of_str_is_set = True\n content_union_2 = msg.content_union_2\n performative.content_union_2_type_set_of_str.extend(content_union_2)\n elif isinstance(msg.content_union_2, (list, tuple)) and all(\n map(lambda x: isinstance(x, float), msg.content_union_2)\n ):\n performative.content_union_2_type_list_of_float_is_set = True\n content_union_2 = msg.content_union_2\n performative.content_union_2_type_list_of_float.extend(\n content_union_2\n )\n elif isinstance(msg.content_union_2, (list, tuple)) and all(\n map(lambda x: isinstance(x, bool), msg.content_union_2)\n ):\n performative.content_union_2_type_list_of_bool_is_set = True\n content_union_2 = msg.content_union_2\n performative.content_union_2_type_list_of_bool.extend(\n content_union_2\n )\n elif isinstance(msg.content_union_2, (list, tuple)) and all(\n map(lambda x: isinstance(x, bytes), msg.content_union_2)\n ):\n performative.content_union_2_type_list_of_bytes_is_set = True\n content_union_2 = msg.content_union_2\n performative.content_union_2_type_list_of_bytes.extend(\n content_union_2\n )\n elif isinstance(msg.content_union_2, dict) and all(\n map(\n lambda x: isinstance(x[0], str) and isinstance(x[1], int),\n msg.content_union_2.items(),\n )\n ):\n performative.content_union_2_type_dict_of_str_int_is_set = True\n content_union_2 = msg.content_union_2\n performative.content_union_2_type_dict_of_str_int.update(\n content_union_2\n )\n elif isinstance(msg.content_union_2, dict) and all(\n map(\n lambda x: isinstance(x[0], int) and isinstance(x[1], float),\n msg.content_union_2.items(),\n )\n ):\n performative.content_union_2_type_dict_of_int_float_is_set = True\n content_union_2 = msg.content_union_2\n performative.content_union_2_type_dict_of_int_float.update(\n content_union_2\n )\n elif isinstance(msg.content_union_2, dict) and all(\n map(\n lambda x: isinstance(x[0], bool) and isinstance(x[1], bytes),\n msg.content_union_2.items(),\n )\n ):\n performative.content_union_2_type_dict_of_bool_bytes_is_set = True\n content_union_2 = msg.content_union_2\n performative.content_union_2_type_dict_of_bool_bytes.update(\n content_union_2\n )\n elif isinstance(msg.content_union_2, int):\n performative.content_union_2_type_int_is_set = True\n content_union_2_type_int = msg.content_union_2\n performative.content_union_2_type_int = content_union_2_type_int\n elif msg.content_union_2 is None:\n pass\n else:\n raise ValueError(\n f\"Bad value set to `content_union_2` {msg.content_union_2 }\"\n )\n if msg.is_set(\"content_union_3\"):\n if isinstance(msg.content_union_3, DataModel2):\n performative.content_union_3_type_DataModel2_is_set = True\n content_union_3_type_DataModel2 = msg.content_union_3\n DataModel2.encode(\n performative.content_union_3_type_DataModel2,\n content_union_3_type_DataModel2,\n )\n elif isinstance(msg.content_union_3, DataModel3):\n performative.content_union_3_type_DataModel3_is_set = True\n content_union_3_type_DataModel3 = msg.content_union_3\n DataModel3.encode(\n performative.content_union_3_type_DataModel3,\n content_union_3_type_DataModel3,\n )\n elif msg.content_union_3 is None:\n pass\n else:\n raise ValueError(\n f\"Bad value set to `content_union_3` {msg.content_union_3 }\"\n )\n t_protocol_msg.performative_mt.CopyFrom(performative)\n elif performative_id == TProtocolMessage.Performative.PERFORMATIVE_O:\n performative = t_protocol_pb2.TProtocolMessage.Performative_O_Performative() # type: ignore\n if msg.is_set(\"content_o_ct\"):\n performative.content_o_ct_is_set = True\n content_o_ct = msg.content_o_ct\n DataModel4.encode(performative.content_o_ct, content_o_ct)\n if msg.is_set(\"content_o_bool\"):\n performative.content_o_bool_is_set = True\n content_o_bool = msg.content_o_bool\n performative.content_o_bool = content_o_bool\n if msg.is_set(\"content_o_set_int\"):\n performative.content_o_set_int_is_set = True\n content_o_set_int = msg.content_o_set_int\n performative.content_o_set_int.extend(content_o_set_int)\n if msg.is_set(\"content_o_list_bytes\"):\n performative.content_o_list_bytes_is_set = True\n content_o_list_bytes = msg.content_o_list_bytes\n performative.content_o_list_bytes.extend(content_o_list_bytes)\n if msg.is_set(\"content_o_dict_str_int\"):\n performative.content_o_dict_str_int_is_set = True\n content_o_dict_str_int = msg.content_o_dict_str_int\n performative.content_o_dict_str_int.update(content_o_dict_str_int)\n t_protocol_msg.performative_o.CopyFrom(performative)\n elif (\n performative_id == TProtocolMessage.Performative.PERFORMATIVE_EMPTY_CONTENTS\n ):\n performative = t_protocol_pb2.TProtocolMessage.Performative_Empty_Contents_Performative() # type: ignore\n t_protocol_msg.performative_empty_contents.CopyFrom(performative)\n else:\n raise ValueError(\"Performative not valid: {}\".format(performative_id))\n\n dialogue_message_pb.content = t_protocol_msg.SerializeToString()\n\n message_pb.dialogue_message.CopyFrom(dialogue_message_pb)\n message_bytes = message_pb.SerializeToString()\n return message_bytes", "title": "" }, { "docid": "89ace16ff6316551b4e0ba9bd80d70d9", "score": "0.5360983", "text": "def encode(message: str) -> str:\n encoded = \"\"\n for symbol in message:\n if symbol == \" \" or symbol == \"\\n\":\n # Space between words should have length of 7 single dot, but after every symbol there is already 3 dots\n # space\n encoded += \" \" * 4\n else:\n encoded += symbols[symbol] + \" \" * 3\n return encoded", "title": "" }, { "docid": "920d847a2673ac3d823ea5228e7dd1e6", "score": "0.5359284", "text": "def send_jsonified(self, msg, stats=True):\r\n raise NotImplemented()", "title": "" }, { "docid": "4ee965b034ecd45389f4f6ae352172da", "score": "0.5357432", "text": "def __str__(self) -> str:\n output = str(self.code)\n if self.message:\n output += \":\" + self.message\n return output", "title": "" }, { "docid": "24d2b4739497c33b5892c9337647adc5", "score": "0.5339029", "text": "def encode_request(message):\n data = json.dumps(message)\n return data.encode('utf-8')", "title": "" }, { "docid": "b9fe4e3dd4c1f63c196254c1f771bbcc", "score": "0.53383285", "text": "def input_encoding(self) -> str:\n ...", "title": "" }, { "docid": "b487c0b9b67609ceb466410a62b772fb", "score": "0.5331605", "text": "def encode(strio, compDict = None):", "title": "" }, { "docid": "63fb2d562c5740a2ecbdb388f16a002f", "score": "0.5322433", "text": "def _make_msg(self, f, hse, key):\n return", "title": "" }, { "docid": "fb55c63c536b2c058ce79074b4a0e2b4", "score": "0.53211635", "text": "def encodeIngestMessage(supercuboid_key, message_id, receipt_handle):\n return NotImplemented", "title": "" }, { "docid": "fb55c63c536b2c058ce79074b4a0e2b4", "score": "0.53211635", "text": "def encodeIngestMessage(supercuboid_key, message_id, receipt_handle):\n return NotImplemented", "title": "" }, { "docid": "b8faee3b75c50b80b6786706e2480e2b", "score": "0.53158283", "text": "def encode(self):\n return self.format.pack(self.pdu_type, self.reserved1, self.pdu_length,\n self.reserved2, self.reserved3, self.source,\n self.reason_diag)", "title": "" }, { "docid": "5788bce9319c13385dc7788083181725", "score": "0.5300703", "text": "def define_message(self):\n pass", "title": "" }, { "docid": "ad7022b34f339606cd7a516d8c2d3974", "score": "0.528773", "text": "def encode(self):\n return self.format.pack(self.pdu_type, self.reserved1, self.pdu_length,\n self.reserved2, self.result, self.source,\n self.reason_diag)", "title": "" }, { "docid": "ca8ffd9e055cb4bee0c7e0aed1a6f7a0", "score": "0.52860594", "text": "def __repr__(self):\r\n return \"%s:%s : %s\" % (self._code, self._message, self._details)", "title": "" }, { "docid": "103233ed9b5f2f8e6177596bbb230ee5", "score": "0.5275812", "text": "def __str__(self) -> str:\n return 'Message({cmd}): {payload}'.format(\n cmd=self.command.encode('utf-8'),\n payload=self.payload)", "title": "" }, { "docid": "a57bd457a79543b6bcc25f676932088d", "score": "0.52725106", "text": "def encode(self, *args, **kwds):\n return string(keymap.encode(self, *args, **kwds), encoding=self.__type__, **self._config)", "title": "" }, { "docid": "b47b4295edf5596dae9985bf5a7511ed", "score": "0.52721864", "text": "def test_encodeMsg(self):\n self.hdlcMsg.encodeMsg(testMsg)\n assert(self.hdlcMsg.hdlc == truthHDLCMsg)", "title": "" }, { "docid": "e7b7cabff5fcb29c5fb9b58b58cc42ac", "score": "0.52701604", "text": "def __init__(self, msg: unicode):\n ...", "title": "" }, { "docid": "5ee9141ac3de55eb6c735a68aee6b3a2", "score": "0.5269771", "text": "def message_encode(type, sequence, initiator, neighbor,\n operation=0, capability=0, payload=0):\n ix, iy = initiator\n nx, ny = neighbor\n return message_format.pack(type, sequence, ix, iy, nx, ny,\n operation, capability, payload)", "title": "" }, { "docid": "09d088447a4483b70ab4041aa5036248", "score": "0.52526313", "text": "def encode_message(self, msg_code, msg_bytes=b''):\n len_msg = len(msg_bytes)\n len_msg += MSG_HEADER_LEN\n encoded_message = pack(MSG_LEN_PACKING_FORMAT, len_msg) \\\n + pack(MSG_CODE_PACKING_FORMAT, msg_code) + msg_bytes\n return encoded_message", "title": "" }, { "docid": "f114487b1988878edad120da58e9288e", "score": "0.5251703", "text": "def __str__(self):\n return (\"Message from address %d of type %s with args %s\" % \n (self.sender_address[1], self.message_type, self.message_data))", "title": "" }, { "docid": "c734c44c1e684c081e1202c1bb26132a", "score": "0.5251173", "text": "def __str__(self):\n return _packetEncoder().encode(self)", "title": "" }, { "docid": "c8040c4f8963821bf38b404ccf95fbac", "score": "0.52478385", "text": "def encrypt(self, msg, key):\n raise NotImplementedError", "title": "" }, { "docid": "a6ebe3b4db229e2504f9681abdc1e586", "score": "0.5234536", "text": "def encode(self, x):\n return '{}'.format(self.__encode(x))", "title": "" }, { "docid": "3377d1d8d9eb144f843cf76280e4e5a8", "score": "0.5233163", "text": "def __repr__(self):\n tp = self.TYPE_NAMES[self.mtype].lower()\n name = self.name\n if self.arguments:\n escaped_args = [self.ESCAPE_RE.sub(self._escape_match, x)\n for x in self.arguments]\n for arg in escaped_args:\n if len(arg) > 10:\n arg = arg[:10] + \"...\"\n args = \"(\" + \", \".join(escaped_args) + \")\"\n else:\n args = \"\"\n return \"<Message %s %s %s>\" % (tp, name, args)", "title": "" }, { "docid": "70aa085cbbfdd57f923ceaec452766d4", "score": "0.52306956", "text": "def _miio_msg_encode(self, data):\n if data.get(\"method\") and data.get(\"method\") == \"internal.PING\":\n msg = data\n else:\n if self._miio_id != 12345:\n self._miio_id = self._miio_id + 1\n else:\n self._miio_id = self._miio_id + 2\n if self._miio_id > 999999999:\n self._miio_id = 1\n msg = { \"id\": self._miio_id };\n msg.update(data);\n return([self._miio_id, (json.dumps(msg)).encode()])", "title": "" }, { "docid": "442bc195fe4d88f46fbf8e6a7e6d2d04", "score": "0.5223395", "text": "def encrypt_message(self):\n return ''.join([self.code_dict[i] for i in self.message_text])", "title": "" }, { "docid": "b7fc9234a2b3c0d415bdc8714efbffc0", "score": "0.52206415", "text": "def b64_encode(self, *args, **kwargs): # real signature unknown\n pass", "title": "" }, { "docid": "756596a019d48c6d4624f7a55aafd15e", "score": "0.52161443", "text": "def quote_encode(message):\n return quote(message, safe=_UNQUOTED, encoding=\"utf-8\")", "title": "" }, { "docid": "989b79a9d9a3862a590e641fda5bb333", "score": "0.5215969", "text": "def encode(self, strio, compDict=None):\n strio.write(\n struct.pack(self._fmt, self.code, len(self.data)) + self.data)", "title": "" }, { "docid": "61583fe9328b1434d707e2ad1a964bf3", "score": "0.52063715", "text": "def encode(self):\r\n header = b\" \".join([MAGIC_V1, self.type, self.id])\r\n return b\"\".join([header, b\"\\0\", self.body])", "title": "" }, { "docid": "18a8ace1813de03aa01955dd75b63f73", "score": "0.5206117", "text": "def encode_event(self, event):\n pass", "title": "" }, { "docid": "e6c6ebe6b82b7b6acd2642c547786f0c", "score": "0.51931405", "text": "def _Encoded(cls, value=None):", "title": "" }, { "docid": "2f756994e847b09b50842af820384a48", "score": "0.51910704", "text": "def get_encoder(self):", "title": "" }, { "docid": "308a3de68c07033b1a7f743fffaeed00", "score": "0.51862156", "text": "def format_message(self, msg, emit=False):\n # We don't want to count things like external sources as senders for\n # the purpose of constructing the message string.\n senders = [sender for sender in msg.senders if hasattr(sender, 'key')]\n if not senders:\n emit = True\n if emit:\n return msg.message\n else:\n senders = [sender.key for sender in msg.senders]\n senders = ', '.join(senders)\n return self.pose_transform(msg, senders)", "title": "" }, { "docid": "51f11f1ece9829a5f4fc3acb5175578f", "score": "0.5182676", "text": "def encode(self):\n encoded = \"\"\n\n # The first section contains the category names.\n for category in self.categories:\n encoded += category.name + \"\\n\"\n if len(self.categories) == 0:\n encoded += \":None\\n\"\n encoded += \"\\n\"\n\n # The remaining sections contain the questions.\n for question in self.questions:\n encoded += question.text + \"\\n\"\n encoded += question.encode() + \"\\n\"\n for category in self.categories:\n answer_spec = category.get_answer_spec(question)\n if answer_spec is None:\n encoded += \"ERROR: No answer spec for this category\\n\"\n else:\n encoded += answer_spec.scoring_function.encode() + \"\\n\"\n encoded += \"\\n\"\n\n return encoded", "title": "" }, { "docid": "f9b0c6a086a5959637ca545fbf07a104", "score": "0.51822644", "text": "def send(self, msg):\n self.encoded_message = json.dumps(self.message).encode(ENCODING)\n self.received_message = msg", "title": "" }, { "docid": "aad05536e4f49bb019a318c5667c905d", "score": "0.5181831", "text": "def wrap(self, msg):\n if isinstance(msg, basestring):\n msg = msg\n elif isinstance(msg, Message):\n fp = StringIO()\n g = Generator(fp, mangle_from_=False, maxheaderlen=0)\n g.flatten(msg)\n msg = fp.getvalue()\n return base64.b64encode(bz2.compress(msg))", "title": "" }, { "docid": "039e193c056aa9fd79f70b905741e633", "score": "0.5181517", "text": "def encode(self, sentences):\n raise NotImplementedError()", "title": "" }, { "docid": "4235ab14432cd06315a8980507ce8f1b", "score": "0.5176793", "text": "def get_encoded(self):\n # use 'ljust' func to add blank space to end of each header part, ensuring fixed length\n header_id_part = self.__client_id.ljust(self.header_part_length)\n header_type_part = self.__type.ljust(self.header_part_length)\n header_length_part = str(len(self.__body)).ljust(self.header_part_length)\n\n # build header from its 3 parts <client id><message type><message length>\n message_header = header_id_part + header_type_part + header_length_part\n\n # connect header to message body and UTF-8 ecnode\n return (message_header + self.__body).encode('utf-8')", "title": "" }, { "docid": "ec61924f51c2883576d1a5cc687f4e8f", "score": "0.5175645", "text": "def listen_encode(self, code):\n\n pass", "title": "" }, { "docid": "dec86ba0ac97378889c8775ccf737f4d", "score": "0.5174256", "text": "def pre_send_message(self, msg):\n return msg", "title": "" }, { "docid": "de4d2d2490409e3325eaa0b0cd0d1d0b", "score": "0.51690805", "text": "def help(_, message: Message):\n Msg.format_help(message)", "title": "" }, { "docid": "c4611a6329ed6ab35f56cb2b3aab29fc", "score": "0.51638347", "text": "def direct_message():", "title": "" }, { "docid": "692868a8d30ed6c2f901f223dc2561e2", "score": "0.5148127", "text": "def encode(self):\n return self._encode(self.data)", "title": "" }, { "docid": "55daca194b51aac1b83e5ba7543796e7", "score": "0.5146633", "text": "def encoding(self):\n self.binary_tree()\n self.set_dictionary()\n output = ''\n for char in self.data:\n output += self.tree.dictionary[char]\n self.encode = output\n return output", "title": "" }, { "docid": "8bc6679a1499b51ab4ed9eb81772f207", "score": "0.51438624", "text": "def _build_message(self, message, **kwargs):\n if not message:\n try:\n message = self.message_format % kwargs\n except UnicodeDecodeError:\n try:\n kwargs = {k: encodeutils.safe_decode(v)\n for k, v in kwargs.items()}\n except UnicodeDecodeError:\n message = self.message_format\n else:\n message = self.message_format % kwargs\n\n return message", "title": "" }, { "docid": "e7999432079377c7e981cb632d096394", "score": "0.51379585", "text": "def encode_to_args(self):\n raise NotImplementedError", "title": "" }, { "docid": "a500ca770394a11ae6d25700cbc88172", "score": "0.51366985", "text": "def encode_message(self, data, is_file=False):\n data['message_settings'] = {'message': data['message']}\n del data['message']\n pub_key = self.members[self.username].public_key\n message_settings = data['message_settings']\n data['message_settings'] = CryptoData.encode(message_settings, pub_key)\n data['message_settings']['username'] = data['username']\n if data['to'] == 'broadcast':\n data['message_settings']['type'] = 'broadcast'\n elif is_file:\n data['message_settings']['type'] = data['username']\n data['message_settings']['is_ready'] = data['is_ready']\n data['message_settings']['ext'] = data['ext']\n else:\n data['message_settings']['type'] = data['username']\n return data", "title": "" }, { "docid": "9bde3d2472536d10836918cd964682d0", "score": "0.5132731", "text": "def message(self):\n\t\treturn \"{}{}{}\".format(self.len_prefix, self.message_id, self.bitfield)", "title": "" } ]
7fe51914cbe9fe09b58be396ee2f3f9f
Check if a fault was detected.
[ { "docid": "e54a3686327db8d5b5e9171e62c207f5", "score": "0.694356", "text": "def DetectedFault(self):\n\n if self.FaultOnEarlyExit and (self.thread is None or not self.thread.is_alive()) and \\\n (self.handledFault is None or not self.handledFault.is_set()):\n print(\">>>>>> RETURNING EARLY EXIT FAULT <<<<<<<<<\")\n return True\n\n if self.handlingFault is None:\n print(\"DetectedFault: Agent was re-set, returning false\")\n return False\n\n if self.thread and self.thread.is_alive():\n time.sleep(0.15)\n\n if not self.handlingFault.is_set():\n return False\n\n print(\">>>>>> RETURNING FAULT <<<<<<<<<\")\n\n return True", "title": "" } ]
[ { "docid": "6e0f214cdbd2d2f656dcdcb35150cef2", "score": "0.72903425", "text": "def check_failure(self, fault_name):\n srch_str = '\\s{}'.format(fault_name)\n if re.findall(srch_str, self.fd_data):\n return False\n else:\n return True", "title": "" }, { "docid": "e4a871cf4efc6af913513bfa0611ecca", "score": "0.7090748", "text": "def DetectedFault(self):\n\n time.sleep(0.25)\n\n if not UnixDebugger.handlingFault.is_set():\n return False\n\n UnixDebugger.handledFault.wait()\n UnixDebugger.lock.acquire()\n\n if UnixDebugger.fault or not self.thread.isAlive():\n print(\">>>>>> RETURNING FAULT <<<<<<<<<\")\n UnixDebugger.fault = False\n UnixDebugger.lock.release()\n return True\n\n UnixDebugger.lock.release()\n return False", "title": "" }, { "docid": "1411dedf679cfc8a260c07045eba9f60", "score": "0.68796057", "text": "def check_contain_fault(self, fault):\n # Extact elements from the fault object\n fault_westend = fault.west_end\n fault_eastend = fault.east_end\n fault_length = fault.length\n\n # Create geometry Point and Segment class object\n westend = Point(fault_westend)\n eastend = Point(fault_eastend)\n line = Segment(westend, eastend)\n\n # Check whether the fault is completely included in this grid\n if self.poly.encloses_point(westend) and self.poly.encloses_point(eastend):\n self.include_fault.append(fault)\n self.f_length.append(fault_length)\n return True\n\n # Check whether the fault crosses one line of this grid\n elif len(self.poly.intersection(line)) == 1:\n self.include_fault.append(fault)\n\n # westend is included\n if self.poly.encloses_point(westend):\n grid_intersection = self.poly.intersection(line)\n margin_x = eastend[0] - grid_intersection[0].x\n margin_y = eastend[1] - grid_intersection[0].y\n margin = margin_x, margin_y\n margin_lon = margin[0]\n margin_lat = margin[1]\n margin_x = margin_lon * km_per_lon\n margin_y = margin_lat * km_per_lat\n length = math.sqrt(margin_x**2 + margin_y**2)\n self.f_length.append(length)\n return True\n # eastend is included\n else:\n grid_intersection = self.poly.intersection(line)\n margin_x = eastend[0] - grid_intersection[0].x\n margin_y = eastend[1] - grid_intersection[0].y\n margin = margin_x, margin_y\n margin_lon = margin[0]\n margin_lat = margin[1]\n margin_x = margin_lon * km_per_lon\n margin_y = margin_lat * km_per_lat\n length = math.sqrt(margin_x ** 2 + margin_y ** 2)\n self.f_length.append(length)\n\n return True\n\n # Check whether the fault crosses two lines of this grid\n elif len(self.poly.intersection(line)) == 2:\n self.include_fault.append(fault)\n grid_intersection = [intersection for intersection in self.poly.intersection(line)]\n intersection_1 = [grid_intersection[0].x, grid_intersection[0].y]\n intersection_2 = [grid_intersection[1].x, grid_intersection[1].y]\n margin = [a - b for a, b in zip(intersection_1, intersection_2)]\n margin_lon = margin[0]\n margin_lat = margin[1]\n margin_x = margin_lon * km_per_lon\n margin_y = margin_lat * km_per_lat\n length = math.sqrt(margin_x ** 2 + margin_y ** 2)\n self.f_length.append(length)\n return True\n\n # Fault is not included\n else:\n return False", "title": "" }, { "docid": "b01e94b9be6c2e39c9ec6796b8150ba4", "score": "0.6441169", "text": "def is_faulted_analysis(self, ope):\n super().is_faulted_analysis(ope)\n self.update_fault_matrix(ope)", "title": "" }, { "docid": "b42fbc9f52156278280112dede885179", "score": "0.63725877", "text": "def crash_check(self):\n task = self.db.view_task(self.task_id)\n if task.status == \"reported\":\n fuzzer_path = os.path.dirname(os.path.realpath(__file__))\n cuckoo_path = \"/\".join(fuzzer_path.split('/')[:-1] + [\"cuckoo\"])\n report_path = \"storage/analyses/{}/reports/report.json\".format(task.id)\n report_path = os.path.join(cuckoo_path, report_path)\n report = json.loads(open(report_path, 'r').read())\n for process in report['behavior']['processes']:\n for call in process['calls']:\n if call['api'] == 'LdrLoadDll':\n for arg in call['arguments']:\n if arg['name'] == 'FileName' and 'faultrep.dll' in arg['value']:\n self.status = FUZZ_STATUS_CRASH\n print \"Fuzzer status: {}\".format(self.status)", "title": "" }, { "docid": "d7c47bae9cf8aae34df9046380c2f9e0", "score": "0.6301094", "text": "def fault(self) -> int:\n return self._fault", "title": "" }, { "docid": "b55e14ec880384fd776cb6c26898a40d", "score": "0.6053677", "text": "def test_fault_exception(self):\n fault = faults.Fault(webob.exc.HTTPBadRequest(\n explanation='test'))\n self.assertTrue(isinstance(fault.wrapped_exc,\n webob.exc.HTTPBadRequest))", "title": "" }, { "docid": "04a30ed435eb10b3b482cc32c8b8bbbc", "score": "0.602668", "text": "def verify_device_tracking_counters_vlan_faults(device, vlanid, faults,\n max_time=10, check_interval=5):\n\n timeout = Timeout(max_time, check_interval)\n while timeout.iterate():\n output = device.parse('show device-tracking counters vlan {vlanid}'.format(vlanid=vlanid))\n target_faults = output.get('vlanid', {}).get(int(vlanid), {}).get(\"faults\", [])\n if faults:\n if target_faults:\n for fault in faults:\n if fault not in target_faults:\n log.info('Fault {fault} is not found on the target'.format(fault=fault))\n return False\n else:\n log.info('Expected faults, but there are none the target')\n return False\n else:\n if target_faults:\n log.info('Expected no faults, but there are faults the target')\n return False\n else:\n log.info('There are no faults the target as expected')\n return True\n timeout.sleep()\n\n log.info('The correct faults are found on the target'.format(fault=fault))\n return True", "title": "" }, { "docid": "41e908defc2e05e99d4ace1ce45716e6", "score": "0.59322965", "text": "def not_our_fault(exception: Exception):\n return not isinstance(exception, ShopifyCallInvalidError)", "title": "" }, { "docid": "39548bad3142eb5c0fe3a8782a5107e5", "score": "0.5893135", "text": "def check_error(route_result) -> bool:\r\n \r\n status_code = route_result[\"info\"][\"statuscode\"]\r\n if status_code == 400:\r\n print(\"\\nNO ROUTE FOUND\")\r\n check = False\r\n else:\r\n check = True\r\n return check", "title": "" }, { "docid": "b858a6e0e72f00f37e2addb6cd7776ae", "score": "0.58672476", "text": "def test_fault_exception_status_int(self):\n fault = faults.Fault(webob.exc.HTTPNotFound(explanation='test'))\n self.assertEquals(fault.wrapped_exc.status_int, 404)", "title": "" }, { "docid": "950ff65b7c27fdfc214937855005c506", "score": "0.5865018", "text": "def _identify_faultplanes(self):\r\n # TODO !!!!!!\r\n pass", "title": "" }, { "docid": "595402f8c7a2559beec1a6696703fb6c", "score": "0.5853483", "text": "def GetDriveFault1(self): \n try:\n i2cRecv = self.RawRead(COMMAND_GET_DRIVE_A_FAULT, I2C_MAX_LEN)\n except KeyboardInterrupt:\n raise\n except:\n self.Print('Failed reading the drive fault state for motor #1!')\n return\n\n if i2cRecv[1] == COMMAND_VALUE_OFF:\n return False\n else:\n return True", "title": "" }, { "docid": "c98913160b53f6c915d84d5e14cdd941", "score": "0.5811094", "text": "def haserror(self):\n return (self.deep_get('Status') == 'error')", "title": "" }, { "docid": "8a42f023c768794681a0f32bec06f655", "score": "0.5799209", "text": "def fault(self, fault: int):\n\n self._fault = fault", "title": "" }, { "docid": "1958bbe20e2a101a0691f92639a7bf37", "score": "0.57297236", "text": "def if_error(self):\r\n if \"error\" in [x for x in self.data[self.named][\"data\"]]:\r\n return True\r\n return False", "title": "" }, { "docid": "41b02753d57a19704a6fb63d0cd21df8", "score": "0.56289494", "text": "def has_error(self):\n return self._has_section('error')", "title": "" }, { "docid": "8e2674ca6a036a901f230882aa55e73b", "score": "0.5586522", "text": "def check_for_errors(request, response):\n exc = request.exception\n\n if not (exc is None or is_error_ignored(exc)):\n # extract request information to notify about\n info = get_request_info(request)\n\n # generate the notice information\n return errbit_client.notify(**info)", "title": "" }, { "docid": "1e902935bfa39595852a8a268310ce39", "score": "0.5534604", "text": "def check_success(self, fault_name, search_patterns):\n pattern_found = 0\n for p in search_patterns:\n srch_str = '{}.*?\\s{}'.format(p, fault_name)\n # print (srch_str)\n res = re.findall(srch_str, self.fd_data)\n if res:\n pattern_found = 1\n else:\n pattern_found = 0\n if pattern_found:\n return True\n else:\n return False", "title": "" }, { "docid": "9a779387b5466b485a90b3c4cfb158a0", "score": "0.5514841", "text": "def is_existent(self):\n if self.status <= 3:\n return True\n else:\n return False", "title": "" }, { "docid": "1439235f9521648874f3298c0424c66b", "score": "0.5500447", "text": "def checkCollision(self):\n\n scan_ = np.array(self.scan, dtype=np.float32)\n return self.cars[player][0].isCrashed(scan_, self.num_rays, 1)", "title": "" }, { "docid": "10ba445f15ba6818854b618cd584fa90", "score": "0.5498925", "text": "def validate_crash(uc, err, _input, persistent_round, user_data):\n if hasattr(_ql.os.heap, \"validate\"):\n if not _ql.os.heap.validate():\n # Canary was corrupted.\n verbose_abort(_ql)\n return True\n\n crash = (_ql.internal_exception is not None) or (err.errno != UC_ERR_OK)\n return crash", "title": "" }, { "docid": "89400b1a4dbdf248f6f44572a343ee48", "score": "0.54919565", "text": "def is_relocation_error(self):\n return self._tag == 'relocation_error'", "title": "" }, { "docid": "a021131f67f0bd37998cdeae71263b7c", "score": "0.54757", "text": "def should_error(self):\n return not self.on", "title": "" }, { "docid": "e84f0bee60a603d39074f596c0d89d93", "score": "0.5466067", "text": "def runToFault(self):\n\t\tcomplete = False\n\t\twhile(\"Page Fault\" not in self.status):\n\t\t\tline = self.file_contents[self.curr_ref_index]\n\t\t\tself._manage(line)\n\t\t\tself.curr_ref_index += 1\n\t\t\tif self.curr_ref_index == len(self.file_contents):\n\t\t\t\tcomplete = True\n\t\t\t\tbreak;\n\t\tif complete:\n\t\t\tself.printProgramExecution()\n\t\t\tself.status = \"Simulation Status: Simulation COMPLETE, reset and run again!\"", "title": "" }, { "docid": "90ec30642c17411284d00aac6acda084", "score": "0.5449252", "text": "def fault():\n return FaultCohesiveImpulses()", "title": "" }, { "docid": "ca95e06acc26a97542590e559459aca4", "score": "0.542098", "text": "def isgood(self):\n if self.flowcell_status is not None and \\\n self.flowcell_status.lower() == \"failed\":\n return False\n return True", "title": "" }, { "docid": "15df4529ec591d907e849232737aacd0", "score": "0.5418428", "text": "def check (self):\r\n if self.aggregate.config[\"trace\"]:\r\n trace.trace_on()\r\n try:\r\n self.local_check()\r\n except (socket.error, select.error):\r\n # on Unix, ctrl-c can raise\r\n # error: (4, 'Interrupted system call')\r\n etype, value = sys.exc_info()[:2]\r\n if etype == errno.EINTR:\r\n raise KeyboardInterrupt(value)\r\n else:\r\n raise", "title": "" }, { "docid": "b5050fb5f1360be05e0b376b63494cf1", "score": "0.5412405", "text": "def is_firmware_abort(self, message: Message) -> bool:\n firmware_update_abort = message.topic == self.firmware_abort_topic\n self.logger.debug(\n f\"{message.topic} is firmware abort: {firmware_update_abort}\"\n )\n return firmware_update_abort", "title": "" }, { "docid": "34f9c10acf554bac91ea2d13120fda62", "score": "0.5388818", "text": "def _check_exception(self):\n if self.cds is not None:\n if self.cds.offset is not None and self.cds.offset > 1:\n print(str(self.id) + \" : codon start > 1 --> verifier la taille du cds pour confirmation formule\")", "title": "" }, { "docid": "8d712804feffdf0bf7cd5663d25c48df", "score": "0.5359749", "text": "def isCrashed():\n return cur_state == -1", "title": "" }, { "docid": "aa15552e4a494fd4cdf364f2dc1e9881", "score": "0.53514177", "text": "def _check_request(self, what, **params):\n response = self.request(what, **params)\n return response and response.get('status') == 'success'", "title": "" }, { "docid": "6e764a3dcb4ce8696bea618236dd7827", "score": "0.5346321", "text": "def check_kb_status():\n result = minisat(agent.kb.clauses)\n if result:\n print(\"Agent KB is satisfiable\")\n else:\n print(\"Agent KB is NOT satisfiable!! There is contradiction that needs fixing!\")", "title": "" }, { "docid": "5777b4114b0dc8a173e7a12568c0f046", "score": "0.53241706", "text": "def has_error(self, exception: Type[ClientError]) -> bool:\n for r in self.reasons:\n if r is exception:\n return True\n else:\n return False", "title": "" }, { "docid": "1c45e2e4b271d301c33424f3b531268b", "score": "0.53198946", "text": "def call_soap_method_expecting_fault(self, name, *args):\n return self._call(None, None, True, name, *args)", "title": "" }, { "docid": "4cd5f0ae2cad6243346a87e0fc34cbff", "score": "0.5311064", "text": "def check_response(self, resp):\n if resp.status_code < 300:\n return\n\n try:\n resp_elmt = etree.fromstring(resp.content)\n error_elmt = resp_elmt.find('tab_ns:error', XML_NS)\n summary_elmt = error_elmt.find('tab_ns:summary', XML_NS)\n detail_elmt = error_elmt.find('tab_ns:detail', XML_NS)\n self.logger.error(\"API error %s (HTTP status code %s): %s/%s\", error_elmt.attrib['code'], resp.status_code,\n summary_elmt.text, detail_elmt.text)\n except:\n self.logger.exception(\"Error processing API error response: %s\", resp.content)\n finally:\n raise RuntimeError(\"API error {}\".format(resp.status_code))", "title": "" }, { "docid": "a05562bafcab52932c2766636951a07e", "score": "0.53088605", "text": "def check_errback(error):\r\n self.assertIsInstance(error.value, RoutingTableEmpty)", "title": "" }, { "docid": "87b4852e83077848fa008efa9571b0e3", "score": "0.53064865", "text": "def is_error(self, notification):\n raise NotImplementedError", "title": "" }, { "docid": "110b6083844c059df0b94243fe8d403f", "score": "0.5297313", "text": "def OnFault(self):\n self._StopDebugger()", "title": "" }, { "docid": "110b6083844c059df0b94243fe8d403f", "score": "0.5297313", "text": "def OnFault(self):\n self._StopDebugger()", "title": "" }, { "docid": "d3ba4a7d182d8c07afe0f09d87cdfc74", "score": "0.52918154", "text": "def check_status(self):\n raise NotImplementedError()", "title": "" }, { "docid": "20943c3e019d9e8b04395dc44cf40ab1", "score": "0.52886516", "text": "def check():", "title": "" }, { "docid": "b6a4307f8a29ee8006b132b3577520fa", "score": "0.5253648", "text": "def GetDriveFault2(self): \n try:\n i2cRecv = self.RawRead(COMMAND_GET_DRIVE_B_FAULT, I2C_MAX_LEN)\n except KeyboardInterrupt:\n raise\n except:\n self.Print('Failed reading the drive fault state for motor #2!')\n return\n\n if i2cRecv[1] == COMMAND_VALUE_OFF:\n return False\n else:\n return True", "title": "" }, { "docid": "49577dfe72545ebf03311b298c12481a", "score": "0.52519447", "text": "def __error_check(self, code : int) -> bool:\n if self.print_errors:\n if code in (200, 204):\n print(str(code) + \": Action performed successfully.\")\n return True\n if code == 400:\n print(\"Error 400: Bad request.\")\n elif code == 401:\n print(\"Error 401: Unauthorized, invalid auth token. \" +\n \"Please generate a new one.\")\n elif code == 403:\n print(\"Error 403: Unauthorized, please hold the power \" +\n \"button on the controller for 5-7 seconds, then try again.\")\n elif code == 404:\n print(\"Error 404: Resource not found.\")\n elif code == 500:\n print(\"Error 500: Internal server error.\")\n return False\n return bool(code in (200, 204))", "title": "" }, { "docid": "2632c2794d9b1fa74415b338ed03eec2", "score": "0.5244246", "text": "def is_on(self):\n return self._device.status not in (ZappiStatus.FAULT, ZappiStatus.NOT_CONNECTED)", "title": "" }, { "docid": "74bd4996fcbe9ebc9d194b6699909676", "score": "0.5244223", "text": "def no_log_found(exc):\n if exc.args and isinstance(exc.args[0], str) and \"No such object\" in exc.args[0]:\n return True\n elif getattr(exc, \"resp\", {}).get(\"status\") == \"404\":\n return True\n return False", "title": "" }, { "docid": "43d1297db60df9ff12d1c6a3be159143", "score": "0.52422446", "text": "def is_failure(status):\r\n return status >= 13", "title": "" }, { "docid": "809859074a53c8436f2c4ab60635f2b6", "score": "0.5240108", "text": "def is_status_not_found(response: int):\n return response == 404", "title": "" }, { "docid": "245ef3402a7583ba43370480c911ca3f", "score": "0.52388704", "text": "def do_check_fusion(self):\n self.fusion.check()", "title": "" }, { "docid": "8d0ee56c415f252ad1b6e7d1d3fd0966", "score": "0.5225209", "text": "def is_error(self, data):\n\n if string.find(data, \"error\", 0, 4) == -1 :\n return False\n else:\n return True", "title": "" }, { "docid": "db619ab6fd1f68fee170636677ee732f", "score": "0.52195615", "text": "def RedoTest(self):\n\n if self.handlingFault is None:\n return False\n\n if self.thread and self.thread.is_alive():\n time.sleep(0.15)\n\n if not self.handlingFault.is_set():\n return False\n\n print(\"RedoTest: Waiting for self.handledFault...\")\n\n t = 60.0 * 3\n self.handledFault.wait(timeout=t)\n\n if not self.handledFault.is_set():\n print(\"RedoTest: Timmed out waiting for fault information\")\n print(\"RedoTest: Killing debugger and target\")\n self._StopDebugger(True)\n _DbgEventHandler.TakeStackTrace = False\n print(\"RedoTest: Attempting to re-run iteration\")\n return True\n\n return False", "title": "" }, { "docid": "8f77de8c7d6dd6a61db686cce197e037", "score": "0.5204943", "text": "def _check_api_error(response:json) -> bool:\r\n\r\n is_error = False\r\n\r\n try:\r\n if list(response.keys())[0] == 'errors':\r\n is_error = True\r\n except IndexError:\r\n pass\r\n \r\n return is_error", "title": "" }, { "docid": "eb62088b5c0c47d886e84acc59d438b8", "score": "0.52044326", "text": "def is_lookup_failed(self):\n return self._tag == 'lookup_failed'", "title": "" }, { "docid": "64fb4f0fd4ec0a220df6b5c0c25effdc", "score": "0.5201944", "text": "def filter_errors(exc: Exception) -> bool:\n # pylint: disable=import-outside-toplevel\n from .client import HTTPError\n\n if isinstance(exc, HTTPError):\n return exc.status_code in (\n HTTPStatus.TOO_MANY_REQUESTS,\n HTTPStatus.INTERNAL_SERVER_ERROR,\n HTTPStatus.SERVICE_UNAVAILABLE,\n HTTPStatus.GATEWAY_TIMEOUT,\n BANDWIDTH_LIMIT_EXCEEDED,\n )\n return True", "title": "" }, { "docid": "c795b9735429cd53fff07fc9ee07a17d", "score": "0.51971585", "text": "def _check_icmp(self, icmp_data):\r\n try:\r\n return isinstance(icmp_data, dpkt.icmp.ICMP) and \\\r\n len(icmp_data.data) > 0\r\n except:\r\n return False", "title": "" }, { "docid": "2a964849bb845e1ab2275d2ca8efe479", "score": "0.5185664", "text": "def sanity_check(self):\n pass_tests = True\n\n # top part of lanes should be separated by approx distance\n top_width = self.recent_rightx[-1][-1] - self.recent_leftx[-1][0]\n if top_width < (self.top_lane_width - 150) or top_width > (self.top_lane_width + 150):\n pass_tests = False\n print(f'Top lane width fail = {top_width}. resetting...')\n \n # bottom part of lanes should be separated by approx. correct horizontal distance\n width = self.recent_rightx[-1][0] - self.recent_leftx[-1][-1]\n if width < (self.lane_width - 250) or width > (self.lane_width + 250):\n pass_tests = False\n print(f'Bottom lane width fail = {width}. resetting...')\n\n if pass_tests:\n self.detected = True\n self.consecutive_bad_frames = 0\n else:\n self.detected = False\n self.consecutive_bad_frames += 1", "title": "" }, { "docid": "e8131dbec8489789a40a993bb72673c2", "score": "0.5164079", "text": "def is_error(obj):\n if obj is None:\n return False\n if 'error' in obj:\n return True\n status = obj_status(obj)\n if isinstance(status, int):\n if status < 200 or status >= 300:\n return True\n return False\n return False", "title": "" }, { "docid": "6981b61a4576e3444adfecaa70d2862d", "score": "0.51606095", "text": "def checkKernelPanic(self) -> bool:\n term_path = self.outdir / \"system.pc.com_1.device\"\n if not term_path.exists():\n return False\n\n with open(term_path, \"rb\") as f:\n try:\n f.seek(-1000, os.SEEK_END)\n except OSError:\n return False\n try:\n # There was a case where reading `term_path` resulted in a\n # UnicodeDecodeError. It is known that the terminal output\n # (content of 'system.pc.com_1.device') is written from a\n # buffer from gem5, and when gem5 stops, the content of the\n # buffer is stopped being copied to the file. The buffer is\n # not flushed as well. So, it might be a case that the content\n # of the `term_path` is corrupted as a Unicode character could\n # be longer than a byte.\n last = f.readlines()[-1].decode()\n if \"Kernel panic\" in last:\n return True\n else:\n return False\n except UnicodeDecodeError:\n return False", "title": "" }, { "docid": "5b4e467540fce0747480ba68efe5c8ea", "score": "0.51590085", "text": "def faultHandler(self, prevState):\n with self.streamLock:\n self.info_stream('Entering faultHandler')\n handledStates = [PyTango.DevState.FAULT]\n waitTime = 0.1\n retries = 0\n maxTries = 5\n\n faultProcessFlag = True\n s = ''.join(('Fault condition. Processing...\\n'))\n t0 = time.time()\n \n while self.stopStateThreadFlag == False:\n if self.get_state() not in handledStates:\n break\n try:\n self.device.close()\n faultProcessFlag = False\n except Exception, e:\n with self.streamLock:\n self.error_stream(''.join(('Close...', str(e))))\n self.set_state(PyTango.DevState.FAULT)\n self.set_status(''.join((s, 'Error closing connection')))\n faultProcessFlag = True\n if faultProcessFlag == False:\n try:\n self.device.connect(self.Port)\n faultProcessFlag = False\n except Exception, e:\n with self.streamLock:\n self.error_stream(''.join(('Connect...', str(e))))\n self.set_state(PyTango.DevState.UNKNOWN)\n self.set_status(''.join((s, 'Error connecting')))\n faultProcessFlag = True\n if faultProcessFlag == False:\n try:\n stat = self.device.getPower()\n faultProcessFlag = False\n except Exception, e:\n with self.streamLock:\n self.error_stream(''.join(( 'Communicate...', str(e))))\n self.set_state(PyTango.DevState.FAULT)\n self.set_status(''.join((s, 'Error receiving response')))\n faultProcessFlag = True\n if time.time() - t0 > 10:\n faultProcessFlag = False\n retries += 1\n if retries > maxTries:\n self.set_state(PyTango.DevState.UNKNOWN)", "title": "" }, { "docid": "cd269e07b91d0220afd59ea0199b620e", "score": "0.5153396", "text": "def isFailure(self):\n return self.enmStatus in self.kasBadTestStatuses;", "title": "" }, { "docid": "b73c57319feb27a76950d4713aa3ccc7", "score": "0.5133983", "text": "def wasInterrupted(self):\n return isinstance(self._getStatus()[1], Exception)", "title": "" }, { "docid": "37a93a422bc7618b56c9c1bc0283821c", "score": "0.51290244", "text": "def _check_status(cls, request_result):\n status = request_result.status_code\n if status in [200, 201, 203]:\n return\n\n err = request_result.json().get(\"error\", f\"{status}\")\n\n if status == 400:\n raise exc.MalformedRequestError(err)\n elif status == 401:\n raise exc.AuthError(err)\n elif status == 404:\n raise exc.NotFoundError(err)\n elif status == 409:\n raise exc.EtagMismatchError(err)\n elif status == 500:\n raise exc.ServerError(err)\n raise Exception(err)", "title": "" }, { "docid": "eb3816841f6c8c81ba6048207db98109", "score": "0.51280713", "text": "def _check_exists(self, message=\"Error code: 404\"):\n if not self.exists:\n raise ScreenNotFoundError(message)", "title": "" }, { "docid": "16e5a83b2eabc5e41bf8a84963821d37", "score": "0.51140654", "text": "def _check_issues(self, response):\n if response['total'] == 0:\n print('no new issues for env', self._env)\n return self._count_issues(response)", "title": "" }, { "docid": "6d3eeec6577eb04370a95a0489ce0245", "score": "0.51108676", "text": "def _check_token(self):\n try:\n ##TODO improve hacky approach below\n # if the request is correct then no message is returned and we have a TypeError\n if (\n utils.request(self.issues_url, self.headers)[\"message\"]\n == \"Bad credentials\"\n ):\n raise InvalidTokenError(\n f\"\\nError: Bad credentials. The OAUTH token {self.api_token} is not correct.\"\n )\n except InvalidTokenError as _e:\n sys.exit(_e)\n except:\n # we don't care about the TypeError\n pass", "title": "" }, { "docid": "2b496ce0c0a1e4dc8d87970e3f8684f3", "score": "0.5109979", "text": "def error(self):\n return self.status < 200 or self.status >= 300", "title": "" }, { "docid": "2ed3f7037739e86fa06c91d4d18d9e9e", "score": "0.5096272", "text": "def is_error(self):\n return self.type_id == STATE_ERROR", "title": "" }, { "docid": "6e94318a6005ca20b91ac71f9737f52f", "score": "0.5095777", "text": "def _is_weird_upload_error(response):\n # one known weird situation is fragmentRowCountCheckFailed (see\n # https://github.com/zmwangx/pyonedrive/issues/1)\n if response.status_code == 416:\n try:\n if ((response.json()[\"error\"][\"innererror\"][\"code\"] ==\n \"fragmentRowCountCheckFailed\")):\n return True\n except KeyError:\n pass\n\n # here are the codes with known solutions\n if response.status_code in {200, 201, 202, 404, 416, 500, 502, 503, 504}:\n return False\n\n return True", "title": "" }, { "docid": "1ddffd7d492b560eb7ee918796678b74", "score": "0.50950724", "text": "def test_flaky_thing_that_fails_then_succeeds(self):\n self._threshold += 1\n if self._threshold < 1:\n raise Exception(\"Threshold is not high enough.\")", "title": "" }, { "docid": "1ddffd7d492b560eb7ee918796678b74", "score": "0.50950724", "text": "def test_flaky_thing_that_fails_then_succeeds(self):\n self._threshold += 1\n if self._threshold < 1:\n raise Exception(\"Threshold is not high enough.\")", "title": "" }, { "docid": "e0ef558ea86ce139fbb03b9fb0f5f1af", "score": "0.50873166", "text": "def hasError(self): #$NON-NLS-1$\r\n pass", "title": "" }, { "docid": "4e9e60bdd02a7983359c2705f7f4c951", "score": "0.50846124", "text": "def is_error(status):\n return int(status.split(' ', 1)[0]) >= 400", "title": "" }, { "docid": "acd99cb091e8396b55728deb4a12225a", "score": "0.50798184", "text": "def _check_db_errors(self, response):\n if response[\"errors\"] > 0:\n self.log(\n 40,\n response[\"first_error\"]\n )\n return False\n return True", "title": "" }, { "docid": "5fa7f3125c373815dcabcf49ecf8789a", "score": "0.50788003", "text": "def fault_profile(self):\n self.faulted = True\n \n ind_fault = int(round(self.fault_position * len(self.eta))) -1\n \n self.eta[ind_fault:] = self.eta[ind_fault:] - self.fault_height", "title": "" }, { "docid": "cfd4291d36c6e7e559aa80b640bda15c", "score": "0.5075065", "text": "def assert_service_is_faultless(machine, service):\n date_since = (\n datetime.now() - timedelta(days=config.DAYS_TO_CHECK_LOGS)\n ).strftime(\"%F\")\n cmd = ['journalctl', '--unit', service, '-p', '0..4', '-S', date_since]\n testflow.step(\n \"Running command %s on machine %s\", \" \".join(cmd), machine.fqdn\n )\n rc, out, err = machine.executor().run_cmd(cmd)\n assert not int(rc), (\n \"journalctl cmd on machine %s failed with error %s\" % (\n machine.fqdn, err\n )\n )\n out = out.split('\\n', 1)[1]\n if out:\n testflow.step(\n \"Check if unit file of %s service on host %s \"\n \"contains any errors or warnings\",\n service,\n machine.fqdn\n )\n logger.warning(\n \"On machine %s there were these errors/warnings: %s\",\n machine.fqdn, out\n )\n tracebacks = []\n for match in re.finditer(\n \"((.*\\n)^.*?traceback.*?$(.*\\n)*?)[a-z]{3} [0-9]{1,2}\",\n out, re.MULTILINE | re.IGNORECASE\n ):\n tracebacks.append(match.group(1))\n testflow.step(\n \"Check if there are any tracebacks on machine %s\",\n machine.fqdn\n )\n assert not tracebacks, (\n \"On machine %s these tracebacks were found: %s\" % (\n machine.fqdn, '\\n'.join(tracebacks)\n )\n )\n else:\n logger.info(\n \"journalctl output was empty, \"\n \"no errors nor warnings were found\"\n )", "title": "" }, { "docid": "80727d09f564052e2325890564bbdd2b", "score": "0.5069866", "text": "def is_fibre_failed(self):\n return self._fibre_failure", "title": "" }, { "docid": "50f4455820b3e4c4983712515300f665", "score": "0.5067982", "text": "def detect(self, dir_name):\n file_names = glob.glob(\"%s/*.error\" % dir_name)\n rx = re.compile(r'segmentation', re.IGNORECASE)\n for file_name in file_names:\n with zopen(file_name, 'r') as f:\n lines = f.readlines()\n for line in lines:\n if rx.search(line) is not None:\n return set([\"SEGFAULT\"])\n return set()", "title": "" }, { "docid": "1c3f9b6233f894c5d286f8ead910dbcd", "score": "0.50673336", "text": "def error_in_error_queue(self) -> bool:\n\t\treturn False", "title": "" }, { "docid": "969c2e0896d384ea94d47b66dcd10efa", "score": "0.5065613", "text": "def is_status_bad_request(response: int):\n return response == 400", "title": "" }, { "docid": "2ca6e65c627b61684f6f6f7f5bfcb958", "score": "0.5063531", "text": "def has_errros(self):\r\n return len(self.excel_decoration_process_errors) > 0", "title": "" }, { "docid": "f9466ca4530a82a10d8ed8db3d53a53b", "score": "0.50620216", "text": "def check_messages(self):\r\n threadprop.get_app().processEvents(QtCore.QEventLoop.AllEvents)\r\n if self._stop_requested:\r\n raise threadprop.InterruptExceptionStop()", "title": "" }, { "docid": "c04353c185fbd5e1027559eeac1d585a", "score": "0.5059788", "text": "def is_error(self, field):\n return field in self.errors", "title": "" }, { "docid": "fbc63cd3a8cee9d6cb3c5ddb0a656dab", "score": "0.50579566", "text": "def _CheckForError(self, data):\n # Errors are relatively unlikely, so it is faster\n # to check first, rather than try and catch the exception\n if 'error' in data:\n raise TiendaMobilError('Error: {0}'.format(data['error']))\n if 'errors' in data:\n errors = data['errors']\n if type(errors) == list:\n errors = ', '.join(errors)\n raise TiendaMobilError('Errors: {0}'.format(errors))", "title": "" }, { "docid": "3b38e8c874d60fde16743d7b6c6009fe", "score": "0.5051344", "text": "def api_error_flag(self, state: c_void_p) -> bool:\n if self.api.apiErrorFlag(state) == 1:\n return True\n return False", "title": "" }, { "docid": "e91ee03879e351a200bbb025670f1e90", "score": "0.50417686", "text": "def _raise_soap_errors(cls, fault):\n # Fault: See http://www.w3.org/TR/2000/NOTE-SOAP-20000508/#_Toc478383507\n faultcode = get_xml_attr(fault, 'faultcode')\n faultstring = get_xml_attr(fault, 'faultstring')\n faultactor = get_xml_attr(fault, 'faultactor')\n detail = fault.find('detail')\n if detail is not None:\n code, msg = None, ''\n if detail.find('{%s}ResponseCode' % ENS) is not None:\n code = get_xml_attr(detail, '{%s}ResponseCode' % ENS).strip()\n if detail.find('{%s}Message' % ENS) is not None:\n msg = get_xml_attr(detail, '{%s}Message' % ENS).strip()\n msg_xml = detail.find('{%s}MessageXml' % TNS) # Crazy. Here, it's in the TNS namespace\n if code == 'ErrorServerBusy':\n back_off = None\n try:\n value = msg_xml.find('{%s}Value' % TNS)\n if value.get('Name') == 'BackOffMilliseconds':\n back_off = int(value.text) / 1000.0 # Convert to seconds\n except (TypeError, AttributeError):\n pass\n raise ErrorServerBusy(msg, back_off=back_off)\n if code == 'ErrorSchemaValidation' and msg_xml is not None:\n violation = get_xml_attr(msg_xml, '{%s}Violation' % TNS)\n if violation is not None:\n msg = '%s %s' % (msg, violation)\n try:\n raise vars(errors)[code](msg)\n except KeyError:\n detail = '%s: code: %s msg: %s (%s)' % (cls.SERVICE_NAME, code, msg, xml_to_str(detail))\n try:\n raise vars(errors)[faultcode](faultstring)\n except KeyError:\n pass\n raise SOAPError('SOAP error code: %s string: %s actor: %s detail: %s' % (\n faultcode, faultstring, faultactor, detail))", "title": "" }, { "docid": "f335b2e4d09476f6dbd40be56bff3f56", "score": "0.5039276", "text": "def checkForError(self):\n errors = self.getErrors()\n\n if len(errors) == 1:\n code, msg = errors[0]\n raise labtronyx.DeviceError(msg.strip('\"'))\n elif len(errors) > 1:\n raise labtronyx.DeviceError(\"Multiple errors\")", "title": "" }, { "docid": "132dd75de828533bfc18a9a2ed9f27c2", "score": "0.5035912", "text": "def _check_response(self, response):\n if response.status_code == 500:\n req = response.request\n try:\n body = response.json()\n if 'traceback' in body:\n msg = ('Traceback from test hello-bottle server '\n 'when calling {m} {p}\\n{tb}')\n self.fail(\n msg.format(m=req.method,\n p=req.path_url,\n tb=body['traceback']) # fail\n )\n else:\n self.fail(pprint.pformat(body, indent=2))\n except (TypeError, ValueError):\n pass", "title": "" }, { "docid": "ab1516321fbfeb0eb9367e868ad81052", "score": "0.50247824", "text": "def _checkError(self, satellite):\n failedInx = np.where(satellite.error != 0)[0]\n if len(failedInx) > 0:\n raise ValueError('SGP4 failed!')", "title": "" }, { "docid": "209f5daee9a9a51be98ee947d53263a7", "score": "0.50242156", "text": "def test_exists(self, ray_tracer, ray_tracer2, ray_tracer3,\n bad_tracer, out_tracer):\n assert ray_tracer.exists\n assert ray_tracer2.exists\n assert ray_tracer3.exists\n assert not bad_tracer.exists\n assert not out_tracer.exists", "title": "" }, { "docid": "df7a3325af15e3bd9106982bb3699d2a", "score": "0.50185835", "text": "def check_error(self, timeout = 1):\n starttime = time.time()\n while time.time() - starttime < timeout:\n # Check if there are any error messages in the top bar\n try:\n errormessage = mws.get_top_bar_text().lower()\n if \"no main safe\" in errormessage or \"error\" in errormessage:\n self.log.error(errormessage)\n self.log.error(\"Some part of your configuration was incorrect. Exiting...\")\n mws.recover()\n return False\n except:\n continue\n return True", "title": "" }, { "docid": "9a7c44d60ca519a0bbec255e8803e690", "score": "0.50142425", "text": "def check_for_b904(self, node):\n if (\n node.cause is None\n and node.exc is not None\n and not (isinstance(node.exc, ast.Name) and node.exc.id.islower())\n and any(isinstance(n, ast.ExceptHandler) for n in self.node_stack)\n ):\n self.errors.append(B904(node.lineno, node.col_offset))", "title": "" }, { "docid": "b560c6f923501577b983a6bd5c7e4d7b", "score": "0.5013435", "text": "def checkOk(self):\n return True", "title": "" }, { "docid": "2a9f05ceea07c1a2fea169647e1a810a", "score": "0.5012351", "text": "def _check_hs2_rpc_status(self, status):\n if status.statusCode == TStatusCode.ERROR_STATUS:\n # Suppress the errors from cancelling a query that is in fetch state\n if self.is_query_cancelled:\n raise QueryCancelledByShellException()\n raise RPCException(\"ERROR: {0}\".format(status.errorMessage),\n RPC_EXCEPTION_SERVER)\n elif status.statusCode == TStatusCode.INVALID_HANDLE_STATUS:\n if self.is_query_cancelled:\n raise QueryCancelledByShellException()\n raise QueryStateException('Error: Stale query handle')\n else:\n # Treat all non-error codes as success.\n assert self._is_hs2_nonerror_status(status.statusCode), status.statusCode", "title": "" }, { "docid": "96a1adf673f00f1d4251df4aa506751e", "score": "0.5007236", "text": "def isError(self):\r\n if self.recptr - self.sendptr < len(SLPacket.ERRORSIGNATURE):\r\n msg = \"not enough bytes to determine packet type\"\r\n raise SeedLinkException(msg)\r\n return self.databuf[self.sendptr: self.sendptr +\r\n len(SLPacket.ERRORSIGNATURE)].lower() == \\\r\n SLPacket.ERRORSIGNATURE.lower() # @UndefinedVariable\r", "title": "" }, { "docid": "408ded07d05c8d58c21e987e0ebd9c84", "score": "0.5005469", "text": "def test_flaky_thing_that_succeeds_then_fails_then_succeeds(self):\n self._threshold += 1\n if self._threshold == 1:\n self.assertFalse(True)", "title": "" }, { "docid": "6e2b32f472c34cab5e9553f754be562a", "score": "0.50044656", "text": "def _canraise(self, op):\n if op.opname == 'pseudo_call_cannot_raise':\n return False\n try:\n if self.raise_analyzer.can_raise(op):\n if self.raise_analyzer_ignore_memoryerror.can_raise(op):\n return True\n else:\n return \"mem\"\n else:\n return False\n except lltype.DelayedPointer:\n return True # if we need to look into the delayed ptr that is\n # the portal, then it's certainly going to raise", "title": "" }, { "docid": "ec3838ccbea3e20e4d8da8a0fe69f78e", "score": "0.49974144", "text": "def check_for_errors(self):\n # Let's check for the word error, you never know #\n if \"error\" in self.paths.log.contents.lower():\n raise Exception(\"SIT did not run properly.\")\n # For some reason when appending we don't get the \"Done\" at the end #\n if not self.append:\n assert self.paths.log.contents.endswith(\"Done\\n\")", "title": "" }, { "docid": "29878aef0900feaa11d3adcd874b1ce0", "score": "0.49859738", "text": "def check(self):\n #TODO Implement\n pass", "title": "" }, { "docid": "586749d31541e13d6a847dd79cb5d254", "score": "0.49823433", "text": "def test_view_incident_not_found(self):\n # redflag does not exist\n response = self.app.get('/api/v1/incidents/1344/')\n self.assertEqual(response.status_code, 404)\n data = json.loads(response.get_data())\n self.assertEqual(data['error'], \"Redflag not found\")", "title": "" }, { "docid": "d75a3eb9b4b2535be7d2825d5c38751f", "score": "0.49777547", "text": "def has_error(self):\n raise NotImplementedError()", "title": "" } ]
14ac21724398d659a2adc4e56824b6a6
Return a list of string containing all the current hostnames
[ { "docid": "25c7f310d6616a91bc15cbb5a1776bb5", "score": "0.80563426", "text": "def getListHostnames(self):\n\t\tlist = self.getSession().query(Node.name).all()\n\t\treturn [item for item, in list]", "title": "" } ]
[ { "docid": "fda32db0718449990a2b6180d66bc9bd", "score": "0.7972852", "text": "def hostnames(self) -> pulumi.Output[Sequence[str]]:\n return pulumi.get(self, \"hostnames\")", "title": "" }, { "docid": "b041d5586a7c69cbc0634e494a0d123f", "score": "0.78123796", "text": "def getHostList():\n bhostsDic = getBhostsInfo()\n hostList = bhostsDic['HOST_NAME']\n return(hostList)", "title": "" }, { "docid": "da7f9df0c3a5b249c0e89f07dccf0004", "score": "0.77144074", "text": "def local_hostnames():\n return [socket.gethostname(), socket.getfqdn()]", "title": "" }, { "docid": "b0da21fae4934340a1d2831c4c636cab", "score": "0.770072", "text": "def _get_host_names(self):\n res=self.execute('vagrant status', result=True)\n if isinstance(res,Exception):\n print(res)\n return []\n \n res=res.decode('utf8')\n res=re.split('[\\r\\n]{1,2}',res)\n host_lines=res[res.index('',1)+1:res.index('',2)]\n host_names=[re.split('\\s+',x)[0] for x in host_lines]\n return host_names", "title": "" }, { "docid": "1d35a9d5fb9da6fade0c6ee7ef04f428", "score": "0.7593161", "text": "def get_host_names():\n my_host_name = socket.gethostname()\n num_hosts = jax.host_count()\n my_host_id = jax.host_id()\n max_host_char_length = 128\n all_host_names = onp.zeros([num_hosts, max_host_char_length], dtype=onp.int32)\n i = 0\n for c in my_host_name:\n all_host_names[my_host_id, i] = ord(c)\n i += 1\n all_host_names = per_host_sum_pmap(all_host_names)\n host_list = []\n for i in range(num_hosts):\n host_name = ''\n for c in all_host_names[i]:\n if c == 0:\n break\n host_name += chr(c)\n host_list.append(host_name.split('.')[0])\n return host_list", "title": "" }, { "docid": "674687073aa77571390472a23a2ccc8f", "score": "0.7519632", "text": "def rpc_hostnames_get(handler):\n\treturn list(web_tools.get_hostnames(handler.config))", "title": "" }, { "docid": "8657b019e981384e9a045180a474d53e", "score": "0.7480872", "text": "def get_hostnames(self):\n return sorted(self._hosts.keys())", "title": "" }, { "docid": "22328ecf33b39e53c93aeb1a4e9164f3", "score": "0.7438526", "text": "def hostnames(self) -> pulumi.Input[Sequence[pulumi.Input[str]]]:\n return pulumi.get(self, \"hostnames\")", "title": "" }, { "docid": "d12cf29d703ab1ef4e9d6b34ea60a952", "score": "0.7395814", "text": "def get_collector_host_names() -> List[str]:\n collector_host = get_collector_host()\n hosts = []\n if not collector_host:\n return hosts\n for host in re.split(r'[ ,]+', collector_host.strip()):\n if host.startswith(\"<\"):\n # Looks like `host` is not a host, it is a <sinful> string.\n # Parse alias out of it to get the actual host:port\n m = re.search(r'&alias=([^&>]+)', host)\n if m:\n host = m.group(1)\n else:\n continue\n if ':' in host: # `host` is a host:port but we just want the host\n hosts.append(host.split(':')[0])\n else:\n hosts.append(host)\n return hosts", "title": "" }, { "docid": "5f3f0c18013f439cf124de58234e7f1b", "score": "0.7294311", "text": "def getKnownHostnames(self):\n return self.__mymachines.keys()", "title": "" }, { "docid": "0dbc63918b9f13677a4e0615d6581be9", "score": "0.7236125", "text": "def get_all_hosts(self) -> [str]:\n self.connect_db()\n \n query = \"SELECT DISTINCT host from events WHERE host != 'NULL'\"\n\n cursor = self.db.cursor()\n cursor.execute(query)\n\n hosts = []\n\n for host in cursor:\n sub_hosts = host[0].split(\" \")\n for sub_host in sub_hosts:\n hosts.append(sub_host)\n\n self.db.close()\n return list(set(hosts)) #This makes sure values are unique", "title": "" }, { "docid": "7f6593b27f67a3f23e68b0e930cb5f88", "score": "0.7224779", "text": "def list_host():\n t = open_file()\n all_hosts = []\n for i in range(len(t)):\n all_hosts.append(t[i][0])\n return all_hosts", "title": "" }, { "docid": "c20fa85b4b15df23313237c5319f192c", "score": "0.7208347", "text": "def list():\n _LOGGER.info('fetched server list from IPA')\n return set(GLOBAL.ipaclient.get_hosts(nshostlocation=_ACCOUNT))", "title": "" }, { "docid": "d7be1756bc4a751de4d0e74046393e5c", "score": "0.71368474", "text": "def list_hosts(self):\n return list(self.hosts.values())", "title": "" }, { "docid": "a00174fd1c53b98d7e0da205691f5cff", "score": "0.7116978", "text": "def hosts(self) -> typing.List[str]:\n return typing.cast(\n typing.List[str],\n self._properties.get(\"hosts\"),\n )", "title": "" }, { "docid": "a00174fd1c53b98d7e0da205691f5cff", "score": "0.7116978", "text": "def hosts(self) -> typing.List[str]:\n return typing.cast(\n typing.List[str],\n self._properties.get(\"hosts\"),\n )", "title": "" }, { "docid": "7e592d77b481de2051f0e0fb40a182c5", "score": "0.7109232", "text": "def xhostList(self):\n return sorted([host.name() for host in self.hosts.values() if not host.fullInfoAvailable()])", "title": "" }, { "docid": "ac640b337b81fc3ec0ef99c8801c0817", "score": "0.7104742", "text": "def hosts(self):\n return \",\".join(self._pod_name_list)", "title": "" }, { "docid": "74a4fdb45d9f9bb20195b42be12b6852", "score": "0.7087555", "text": "def hostList(self):\n return sorted([host for host in self.hosts.values() if host.fullInfoAvailable()])", "title": "" }, { "docid": "4b0844923f9a4dfb4f2b7cad925d8f2d", "score": "0.7033015", "text": "def get_workers_list(self):\n\n\t\tsql = \"SELECT hostname from hosts\"\n\t\tself.cursor.execute(sql)\n\t\tlistOfWorkersUnparsed = self.cursor.fetchall()\n\t\tlistOfWorkers = [i[0] for i in listOfWorkersUnparsed]\n\t\treturn listOfWorkers", "title": "" }, { "docid": "729082ae25d86ffdafc5a3d849056cec", "score": "0.70318204", "text": "def get_monitored_hosts(self):\n # TODO: add ability request more custom column fields\n qs = self.__get_socket().hosts.columns('host_name', 'host_address')\n return qs.call()", "title": "" }, { "docid": "ed239f6e71fc3866a417da484c191a11", "score": "0.69708794", "text": "def hostnames(self) -> Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]:\n return pulumi.get(self, \"hostnames\")", "title": "" }, { "docid": "9b8c4fd2e3902444de9291423123ceb9", "score": "0.6939435", "text": "def hosts(self):\r\n return [row.get(\"host\") for row in self._results]", "title": "" }, { "docid": "409e283bfbdbbde4afb9ae86fa9b3ea2", "score": "0.69188124", "text": "def get_hostnames(name=''):\n fqdn = socket.getfqdn(name)\n return fqdn, fqdn.split('.')[0]", "title": "" }, { "docid": "b1c0b0b5eaaac619c84a309be875f508", "score": "0.6918514", "text": "def getservernames(self):\n with self.servers_lock:\n return sorted(self.servers.keys())", "title": "" }, { "docid": "5d827a1a8f1c9ea64e98678d43dde818", "score": "0.6906763", "text": "def __hostname_list(hosts):\n logger.info(\"\\n Argument List:\" + \"\\n hosts:\" + str(hosts))\n logger.info(\"Creating host name list\")\n out_list = []\n for i in range(len(hosts)):\n name = hosts[i].get(consts.HOST).get(consts.HOST_NAME)\n if name:\n host_name = name\n out_list.append(host_name)\n\n return out_list", "title": "" }, { "docid": "e64d8dd6b3124caceb44d8e58c09468d", "score": "0.6860596", "text": "def get_all_hosts(hosts):\n return \"\\n\".join(hosts)", "title": "" }, { "docid": "d42d8fffaa7d4949225c9f78c42f038f", "score": "0.6843968", "text": "def get_hosts(self):\n if not self._on_hosts_page():\n sel.force_navigate('infrastructure_datastore', context=self._get_context())\n try:\n list_acc.select('Relationships', 'Show all registered Hosts')\n except sel.NoSuchElementException:\n return []\n return [q.name for q in Quadicon.all(\"host\")]", "title": "" }, { "docid": "79316c6582d01eb12f4f87f52b291104", "score": "0.68382597", "text": "def get_live_usernames(self):\n username_list = []\n for client_socket in self.clients:\n username = self.clients[client_socket][\"data\"].decode()\n username_list.append(username)\n return username_list", "title": "" }, { "docid": "165fe26795b6e39821719be3598a75b1", "score": "0.68118626", "text": "def hosts(self) -> pulumi.Output[Optional[Sequence[str]]]:\n return pulumi.get(self, \"hosts\")", "title": "" }, { "docid": "c992850c52b5d25ebc0813fad0aa156d", "score": "0.680192", "text": "def hosts(self):\n return [x.partition(\"ssh_\")[2] for x in self.__entries.keys() if\n x.find(\"ssh_\", 0, 4) >= 0]", "title": "" }, { "docid": "e40ee6a3d338c508b42f5cba87e0451c", "score": "0.6784646", "text": "def get_host_list(self):\n all_hosts = self.service_states.keys()\n ret = []\n for host in self.service_states:\n for svc in self.service_states[host]:\n ret.append({\"service\": svc, \"host_name\": host})\n return ret", "title": "" }, { "docid": "e325243702ddb9fee087e1bde8f1873e", "score": "0.6743791", "text": "def hosts(self) -> Optional[Sequence[str]]:\n return pulumi.get(self, \"hosts\")", "title": "" }, { "docid": "598aa7706e23ce55eb739cf742a02720", "score": "0.6727066", "text": "def names():\n return list(lambda name, url: name, remotes())", "title": "" }, { "docid": "8ce9529f7712db81004ae1e79680eb1e", "score": "0.6718695", "text": "def _siteList(self):\n names = []\n for x in iter(self.db):\n names.append(x.name)\n return names", "title": "" }, { "docid": "53c88d9491daef9b1a033e596790d0df", "score": "0.67141443", "text": "def _get_arpsweep_hosts(self):\n\n hosts = []\n\n data = self._parse_arpsweep_log()\n for i in data:\n hosts.append(i['host'])\n\n return hosts", "title": "" }, { "docid": "d8d87f8451ce1ef3adc0053a4bcc521e", "score": "0.6684308", "text": "def get_host_keys(self):\n return self._host_keys", "title": "" }, { "docid": "f3528dcab1ca40df8abdb8e0ee31b973", "score": "0.6679057", "text": "def list_domain_names():\n pass", "title": "" }, { "docid": "1c133ed8cebc36dc40e89dbc91746aaf", "score": "0.6677861", "text": "def get_hosts(cls):\n return [cls.domain]", "title": "" }, { "docid": "4afe6e67f703b858bb5218cbfb58b8e0", "score": "0.6674513", "text": "def all_hosts(self):\n return self.all_options", "title": "" }, { "docid": "c8dc28c67f033e4bba7f6d31582fe478", "score": "0.66722065", "text": "def get_hosts(k8s_conf):\n out = list()\n node_confs = get_k8s_dict(k8s_conf)[consts.NODE_CONF_KEY]\n for node_conf in node_confs:\n host_conf = node_conf[consts.HOST_KEY]\n out.append(host_conf[consts.HOSTNAME_KEY])\n return out", "title": "" }, { "docid": "0f8d3b4d454e74111cd91bab56b02987", "score": "0.66663074", "text": "def __repr__(self):\n return '\\n'.join(['%s'%(host) for host in sorted(self.hosts.values()) if host.fullInfoAvailable()])", "title": "" }, { "docid": "1122276843013e0c402289ddcd0e25c6", "score": "0.6648613", "text": "def getAllActiveNames(self):\r\n return [client.prettyName() for client in self.clients if client.isActive()]", "title": "" }, { "docid": "59db45eec97f8acc66088775aee254aa", "score": "0.66280484", "text": "def find_hosts(network):\n return [str(host) for host in ipcalc.Network(network) if is_alive(host)]", "title": "" }, { "docid": "e10dc296f8d721d80ed5588a4bb7f698", "score": "0.6612572", "text": "def list(cls, connection):\n\n uri = connection.uri(\"hosts\")\n response = connection.get(uri)\n\n if response.status_code == 200:\n response_json = json.loads(response.text)\n host_count = response_json['host-default-list']['list-items']['list-count']['value']\n\n result = []\n if host_count > 0:\n for item in response_json['host-default-list']['list-items']['list-item']:\n result.append(item['nameref'])\n else:\n raise UnexpectedManagementAPIResponse(response.text)\n\n return result", "title": "" }, { "docid": "1182a1904db7f9f2d7184f8a7bb6f892", "score": "0.66079974", "text": "def host_list(configuration):\n username = 'cc'\n formatted = lambda l: ['{}@clarity{}'.format(username, s) for s in l]\n if configuration is None:\n configuration = 'all'\n if configuration == 'all':\n return formatted([25, 26])\n elif configuration == 'hetero':\n return formatted([24, 26])\n else:\n return [main_host(configuration)]", "title": "" }, { "docid": "b58a1bd9e538edfd4cf4ea03721493b2", "score": "0.6604774", "text": "def server_hostnames(config):\n global cached_server_hostnames\n if cached_server_hostnames != []:\n return cached_server_hostnames\n\n if config.has_option('server', 'hostname_script'):\n scriptname = config.get('server', 'hostname_script')\n try:\n osStat = subprocess32.Popen([scriptname], stdout=subprocess32.PIPE, stderr=subprocess32.PIPE)\n out, err = osStat.communicate()\n if (0 == osStat.returncode and 0 != len(out.strip())):\n cached_server_hostnames = arrayFromCsvString(out)\n logger.info(\"Read server hostname '\" + cached_server_hostnames + \"' using server:hostname_script\")\n except Exception, err:\n logger.info(\"Unable to execute hostname_script for server hostname. \" + str(err))\n\n if not cached_server_hostnames:\n cached_server_hostnames = arrayFromCsvString(config.get('server', 'hostname'))\n return cached_server_hostnames", "title": "" }, { "docid": "2593342f833379d1ce1138f10430e11c", "score": "0.65740573", "text": "def get_all_hosts(self):\n return set( name\n for name in self.get_dirs_inside(['']))", "title": "" }, { "docid": "cc101b3705da5381c437bb4f8d2057f5", "score": "0.65712607", "text": "def get_domains_name():\n ret = []\n domains = sudo_utils.call_output([VIRSH_CMD, 'list', '--name', '--all'])\n if domains is not None:\n for d in domains.decode('utf-8').splitlines():\n if len(d) > 0:\n ret.append(d)\n\n return ret", "title": "" }, { "docid": "c43d212c128ed7944a296f69a0fad239", "score": "0.655063", "text": "def DbGetHostList(self, argin):\n self._log.debug(\"In DbGetHostList()\")\n argin = replace_wildcard(argin)\n return self.db.get_host_list(argin)", "title": "" }, { "docid": "819c2fbeee0d86e41ff4bbb028a95f22", "score": "0.6548317", "text": "def list_vhosts():\n try:\n output = subprocess.check_output([RABBITMQ_CTL, 'list_vhosts'])\n\n # NOTE(jamespage): Earlier rabbitmqctl versions append \"...done\"\n # to the output of list_vhosts\n if '...done' in output:\n return output.split('\\n')[1:-2]\n else:\n return output.split('\\n')[1:-1]\n except Exception as ex:\n # if no vhosts, just raises an exception\n log(str(ex), level='DEBUG')\n return []", "title": "" }, { "docid": "1495f050e1f4b6002a6ac40c1aa840a6", "score": "0.6545292", "text": "def hosts(self):\n if self._hosts is None:\n self._hosts = []\n for line in self._vagrant(\"status --machine-readable\"):\n bits = line.split(\",\")\n if bits[2] == \"state\":\n self._hosts.append(bits[1])\n self._hosts = tuple(self._hosts)\n return self._hosts", "title": "" }, { "docid": "8add78b869a789a98864bc1850b7f676", "score": "0.653591", "text": "def nameservers(self) -> Optional[List[str]]:\n return self.__nameservers", "title": "" }, { "docid": "72aaee88ace00acd9b8c2a2e7c28c09f", "score": "0.65294427", "text": "def storage3par_get_hosts(self):\n return [v.get('name') for v in self._get_members(self.client.getHosts())]", "title": "" }, { "docid": "58afc9b0ce06dcec00da52ac911d7d0b", "score": "0.65274554", "text": "def gethosts(self, cluster=None):\n res = self.getclusters()\n\n hosts = []\n for c in res:\n if cluster is not None and c[\"id\"] != cluster:\n continue\n uri = c[\"mongoURI\"]\n for url in uri.split(\",\"):\n tup = urllib3.util.url.parse_url(url)\n hosts += [tup.host]\n\n return hosts", "title": "" }, { "docid": "3ca4a634980bd97ff6c572a4f1e542fa", "score": "0.65171635", "text": "def get_hostnames(path):\n return [hostname for hostname, ip in path if hostname is not None]", "title": "" }, { "docid": "4289b6f0a6726260c5c4d27b3957e3a2", "score": "0.64965564", "text": "def get_hostnames(self):\n print \"Updating minions with hostnames...\"\n resp = self.run('hostname', func='cmd.run_all')\n for resp_id, hostname in resp.iteritems():\n if hostname and (resp_id in self.minions):\n #Version 2014.1.1 - Output was changed to dict\n if isinstance(hostname, dict):\n value = hostname['stdout']\n # Versuib 2014.1.0\n else:\n value = hostname\n self.minions[resp_id].update(hostname=value)", "title": "" }, { "docid": "e67a035c918cc6f7567b632882dfca2c", "score": "0.64622533", "text": "def getHostList(self, includeExpansionSegs = False):\n hostList = []\n hostList.append(self.coordinator.getSegmentHostName())\n if (self.standbyCoordinator and\n self.coordinator.getSegmentHostName() != self.standbyCoordinator.getSegmentHostName()):\n hostList.append(self.standbyCoordinator.getSegmentHostName())\n\n dbList = self.getDbList(includeExpansionSegs = includeExpansionSegs)\n for db in dbList:\n if db.getSegmentHostName() in hostList:\n continue\n else:\n hostList.append(db.getSegmentHostName())\n return hostList", "title": "" }, { "docid": "382f9bb4f418a4b203cd3406dbe75b82", "score": "0.6454314", "text": "def hosts(self):\n return self._hosts", "title": "" }, { "docid": "382f9bb4f418a4b203cd3406dbe75b82", "score": "0.6454314", "text": "def hosts(self):\n return self._hosts", "title": "" }, { "docid": "4999dadcd8fa6d215bbc4505cf2d97d5", "score": "0.64525765", "text": "def scanned_hosts(self):\n return [ip for ip in self._result]", "title": "" }, { "docid": "c79dc2d44830a0832157155e69a103e8", "score": "0.6450231", "text": "def get_dev_names():\n devices = nr.filter(F(site=\"central\")).inventory.hosts.keys()\n return devices", "title": "" }, { "docid": "d7de7f728bc1be2faaaccf073dc20669", "score": "0.6447246", "text": "def unique_hosts(self):\n return set(list(map(lambda x: x.split(':')[0], self._connections.keys())))", "title": "" }, { "docid": "894ce9ab3125678d25c353ba0ebd8912", "score": "0.6429284", "text": "def GetAssignedNameServers():\n if sys.platform == 'darwin':\n return _GetNameServersFromMacIpConfig()\n else:\n return _GetNameServersFromDhclient()", "title": "" }, { "docid": "39e48a59132476b91b3cd127427b42ef", "score": "0.64216954", "text": "def cmd_hostname(self, args):\n fog_host = self.app.settings[\"fog_host\"]\n return [self._client_hostname(fog_host, mac) for mac in get_macs()]", "title": "" }, { "docid": "9fbfaa7790645eb396988749e0ca03e8", "score": "0.64149714", "text": "def hosts(to_write=False):\n return settings.server_manager.active_hosts(to_write=to_write)", "title": "" }, { "docid": "f8fd4992689b10975a129e932617f71e", "score": "0.64149123", "text": "def listhostsid():\n\n for host in api.hosts.list():\n print \"host %s id is -> %s\" %(host.get_name(),host.get_id())", "title": "" }, { "docid": "2e1c7cd87983727c12db28d107591766", "score": "0.6414428", "text": "def get_ip_list(hostname):\r\n ip_list = []\r\n ais = socket.getaddrinfo(hostname,0,0,0,0)\r\n for result in ais:\r\n ip_list.append(result[-1][0])\r\n ip_list = list(set(ip_list))\r\n return ip_list", "title": "" }, { "docid": "1d35404627492601ac70546ce813d97d", "score": "0.6413877", "text": "def GetCurrentNameServers():\n try:\n servers = dns.resolver.Resolver().nameservers\n except:\n print \"Unable to get list of internal DNS servers.\"\n servers = []\n\n # dnspython does not always get things right on Windows, particularly in\n # versions with right-to-left languages. Fall back to ipconfig /all\n if not servers and sys.platform[:3] == 'win':\n return _GetNameServersFromWinIpConfig()\n return servers", "title": "" }, { "docid": "3210ba59c47a5ea1a8224c1b165578ec", "score": "0.6400173", "text": "def hostsOnNetwork():\n os.system('net view > conn.tmp')\n f = open('conn.tmp', 'r')\n f.readline();f.readline();f.readline()\n \n conn = []\n host = f.readline()\n while host[0] == '\\\\':\n conn.append(host[2:host.find(' ')])\n host = f.readline()\n \n f.close()\n return conn", "title": "" }, { "docid": "0a86712ae9f8d2820583b127ad95c0ff", "score": "0.6393474", "text": "def host(self):\n\n return '|'.join(\n split_host_port(host, self.DEFAULT_PORT)[0]\n for host in self.hosts\n )", "title": "" }, { "docid": "e523ffe77811897dcf0d8ec35bdc508f", "score": "0.6385692", "text": "def get():\n with open('/etc/resolv.conf', 'r') as f:\n lines = f.readlines()\n ns = [x for x in lines if x.strip().startswith('nameserver')]\n ips = map(lambda x: x.split()[1], ns)\n local_dns_servers = __salt__['search.resolve_ips'](__salt__['search.mine_by_host']('roles:mesos.dns'))\n return [ip for ip in ips if ip not in local_dns_servers]", "title": "" }, { "docid": "93cbd46f346b580dbea5bd5b883bea11", "score": "0.636342", "text": "def get_hosts(ip):\n\n hosts = []\n if CONFIG[\"do_reverse_dns\"].lower() == \"true\":\n try:\n with open(\"/etc/hosts\") as f:\n entries = f.read().split(\"\\n\")\n for entry in entries:\n entry = entry.strip()\n if entry.startswith(ip + \" \"):\n hosts.append(entry[entry.rfind(\" \")+1:])\n except FileNotFoundError:\n pass\n\n if not hosts:\n try:\n hosts.append(socket.gethostbyaddr(ip)[0])\n except socket.herror:\n hosts.append(ip)\n\n else:\n hosts = [ip]\n\n return hosts", "title": "" }, { "docid": "c38df341d77a40b61292506d29942323", "score": "0.6340111", "text": "def getGhosts(self):\n\t\treturn self.ghosts", "title": "" }, { "docid": "df8dab915152dbfecc218e1deb133e6f", "score": "0.63361317", "text": "def active_domains(self):\n\n if not hasattr(self, '_active_domains'):\n self._active_domains = []\n for domain in self.allowed_domains:\n try:\n socket.gethostbyname(domain)\n self._active_domains.append(domain)\n except (socket.gaierror,):\n pass\n return self._active_domains", "title": "" }, { "docid": "c1bb6b090af2174da74c85d0f79882a4", "score": "0.6335734", "text": "def _list(self):\n conn = self._connect()\n try:\n names = conn.listDefinedDomains()\n except libvirt.libvirtError:\n raise CuckooMachineError(\"Cannot list domains\")\n finally:\n self._disconnect(conn)\n return names", "title": "" }, { "docid": "8b0b547d12211d05629c9224b9e9a88f", "score": "0.63320756", "text": "def serverlist(self):\n\t\treturn self.send_command('serverlist')", "title": "" }, { "docid": "8a7ff47348ac4889641f92993efb7a61", "score": "0.6328612", "text": "def getServers():\n\n return serverList", "title": "" }, { "docid": "9670af82b6c176a7f2460c89a2aeec47", "score": "0.6328393", "text": "def GetFullHostName():", "title": "" }, { "docid": "84f3829af52617b09d03b5fc6be5921d", "score": "0.6323131", "text": "def get_clients_list(self):\n\n res = []\n for hit in self.all_results:\n host = hit[\"_source\"][\"host\"]\n if host not in res:\n res.append(host)\n log.info(f\"The pods names used in this test are {res}\")\n return res", "title": "" }, { "docid": "175dd482b3e589a926ad7db1e6a9d5b2", "score": "0.63036466", "text": "def get_nodes():\n hostfile = os.getenv('ARL_HOSTFILE', None)\n if hostfile is None:\n print(\"No hostfile specified\")\n return None\n \n import socket\n with open(hostfile, 'r') as file:\n nodes = [line.replace('\\n', '') for line in file.readlines()]\n print(\"Nodes being used are %s\" % nodes)\n nodes = [socket.gethostbyname(node) for node in nodes]\n print(\"Nodes IPs are %s\" % nodes)\n return nodes", "title": "" }, { "docid": "bfb1ec5cf0f045aa9e239b6982a69d43", "score": "0.62972957", "text": "def getMachineNames():\n\tnames = []\n\tfor key in getMachineDict():\n\t\tnames.append(key)\n\treturn ' '.join(names)", "title": "" }, { "docid": "246b5230e922ff5d953e458ec2857c6f", "score": "0.6296767", "text": "def get_nameservers(fqdn):\n\n print('Retrieving nameservers ...', end='')\n ret = {name: [] for name in livedns_ns(fqdn)}\n\n print('\\b\\b\\b' + ', '.join(ret))\n\n fmt = '\\rRetrieving IP addresses %%d/%d' % len(ret)\n print(fmt % 0, end='')\n\n for i, name in enumerate(ret, 1):\n ret[name] = resolve_name(name)\n print(fmt % i, end='')\n\n print('\\n')\n\n return ret", "title": "" }, { "docid": "52c3964a631607463e5c571802214ebe", "score": "0.6293744", "text": "def _read_connections(self) -> List[str]:\n self._update_connection_list()\n return [ch.short_name for ch in self.connection_list]", "title": "" }, { "docid": "350d0840ba03c7b202ff2428c331c9dd", "score": "0.626931", "text": "def get_node_hostnames(cm_host):\n # logger.info('Querying CM API for node hostnames')\n # api = ApiResource(cm_host, username=\"admin\", password=\"admin\", version=15)\n # hosts = api.get_all_hosts()\n # return [host.hostname for host in hosts[1:]] # Skip hosts[0], the CM itself\n raise NotImplementedError(\"cm_api module is deprecated: need to use cm_client\")", "title": "" }, { "docid": "63d3736c039d0c4d0b220c76f3ce72bc", "score": "0.62687737", "text": "def serverlist(self):\n return self.send_command('serverlist')", "title": "" }, { "docid": "0677289f789ebfc78c97903c893c7343", "score": "0.62674475", "text": "def servers(self):\n servers = self.run_command('ismaster').get('hosts', [])\n return [member for member in self.members() if member['host'] in servers]", "title": "" }, { "docid": "42e6317c863328a7173708631b91fe9d", "score": "0.6249068", "text": "def get_molvis_hosts(self):\n\n resources = self._parse_resources_file()\n hosts = resources['molvis_server'].split(',')\n\n self.logger.debug('molvis_server: %s' % (hosts))\n\n return hosts", "title": "" }, { "docid": "683e04172e445a1b5a8dbcd4e51d1472", "score": "0.62431705", "text": "def local_servers(self):\n return self._local_server_controller.local_servers", "title": "" }, { "docid": "514e28d0958e199b32dae67ba894a79f", "score": "0.6242789", "text": "def servers():\n return running.working_servers", "title": "" }, { "docid": "61b68da897d633ded0e8440a8d3e18f1", "score": "0.62411755", "text": "def stack_hostnames(client, stack_title):\n stack_id = get_stack_id(client, stack_title)\n hosts = client.get_stack_hosts(stack_id)\n\n click.echo('Hostnames:')\n for host in hosts:\n click.echo(' - {0} ({1})'.format(host['fqdn'], host['state']))", "title": "" }, { "docid": "b51b8a5d7b7ccc28bd81e9d2ab523c99", "score": "0.624094", "text": "def get_nodes(self):\n res = subprocess.check_output(\n [os.path.dirname(os.path.abspath(__file__)) +'/nodes.sh'], shell=True\n )\n res = res.decode('utf8').strip().split(\"\\n\")\n return {keyval[0]: socket.gethostbyname(keyval[1]) for keyval in [line.split(\" - \") for line in res]}", "title": "" }, { "docid": "3112aca6e72987e5554cc10976245c32", "score": "0.62396014", "text": "def ip_addresses(self):\n return self._resolve(\"ahosts\")", "title": "" }, { "docid": "30c043fac20043cb5a68cbcd59797b5d", "score": "0.6234668", "text": "def toString(self):\n entries = []\n for ip, aliases in self.items():\n sortedHostnames = sortHostnames(aliases)\n entries.append(\"{ip} {aliases}\".format(ip=ip, aliases=\" \".join(sortedHostnames)))\n return \"\\n\".join(entries) + \"\\n\"", "title": "" }, { "docid": "4acbdb24d64a8d5addeb006f3513bcea", "score": "0.6219504", "text": "def get_nodes(self):\n res = subprocess.check_output(\n [os.path.dirname(os.path.abspath(__file__)) + '/nodes.sh'], shell=True\n )\n res = res.decode('utf8').strip().split(\"\\n\")\n return {keyval[0]: socket.gethostbyname(keyval[1]) for keyval in [line.split(\" - \") for line in res]}", "title": "" }, { "docid": "75e0a212cd94eeda72407278a5afbf49", "score": "0.6212591", "text": "def get_static_hosts(self):\n config = self.get_config('DNSNameService', persisted=False)\n static_hosts = config.xml.findall(CONFIG_XPATH + '/StaticHosts')\n\n return [\n (x.find('Hostname').text, x.find('IPAddress').text)\n for x in static_hosts]", "title": "" }, { "docid": "94215dfa95be236054018c3b7fce035c", "score": "0.6208681", "text": "def get_ghosts(self):\n return self.__ghosts", "title": "" }, { "docid": "998be57799e272d39f49ac3f5eaa2b86", "score": "0.62003386", "text": "def DbGetHostServerList(self, argin):\n self._log.debug(\"In DbGetHostServerList()\")\n argin = replace_wildcard(argin)\n return self.db.get_host_server_list(argin)", "title": "" }, { "docid": "c7380ddf7b928938f1845e7835756ad2", "score": "0.61984676", "text": "def GetHostName():", "title": "" }, { "docid": "a74474b419173d6cc6080d6cf5b62526", "score": "0.61915797", "text": "def getHostsInAmbari(self):\n config = Script.get_config()\n hosts_in_ambari = config['clusterHostInfo']['mongodc_hosts'] ## Service hosts list\n return hosts_in_ambari", "title": "" } ]
35c623f30178746f5f5e8f39c70f007d
Return the DataContainer from a table row instance.
[ { "docid": "2a9b0521fe06029a4ddea1f3ce11bf0d", "score": "0.5291903", "text": "def _retrieve(self, row):\n uid = uuid.UUID(hex=row['uid'], version=4)\n return Particle(\n uid=uid, coordinates=row['coordinates'], data=self._data[uid])", "title": "" } ]
[ { "docid": "01a807f81ace83f5eeb9eb05cdcb0fa3", "score": "0.62217975", "text": "def get_row(self, row):\n return self._db.get_row(self._name, row, column_map=self._colmap)", "title": "" }, { "docid": "afeecb2a704c4bdaeb7392b3d002009b", "score": "0.6117592", "text": "def row(self):\n if self._row is None:\n self._row = Rows(self)\n return self._row", "title": "" }, { "docid": "53c0c1bfda6be92ee6542c4dd8b5c5b8", "score": "0.604547", "text": "def getRow(self, row):\n m = self.copy()\n return m.mData[row]", "title": "" }, { "docid": "fdfdd2f4318f8a693b20f412d8c97eec", "score": "0.6029019", "text": "def row(self, row_key):\n return Row(row_key, self)", "title": "" }, { "docid": "69662ab1773ce1f8440802cf9ee79bd8", "score": "0.60174733", "text": "def FromRow(cls, row):\n return Entry(*row)", "title": "" }, { "docid": "00699a09d6c6783bb457bf02134e860c", "score": "0.5991917", "text": "def _row_to_obj(self, row):\n kwargs = dict((col_info[0], val)\n for (val, col_info) in zip(row, self.columns))\n kwargs[self.key_col] = row[-1]\n logging.debug(kwargs)\n return self.obj_ctor(**kwargs)", "title": "" }, { "docid": "546487bf1790db71c489c848d9794d83", "score": "0.5936227", "text": "def from_db(cls, store, row):\n return cls(store, handle=FileHandle(row.handle_type, row.handle), ino=row.ino, type=row.type, iid=row.iid)", "title": "" }, { "docid": "6df1f90f99031773c123bb5ce8ed92cb", "score": "0.5900379", "text": "def from_sql(row):\n data = row.__dict__.copy()\n data['videoID'] = row.videoID\n data.pop('_sa_instance_state')\n return data", "title": "" }, { "docid": "2c060fbff3a9b230185db7e8283283ef", "score": "0.57909846", "text": "def construct_rows(self):\n\n def extract_sub_row(row, selectables):\n\n \"\"\"Adapt a row result to the expectation of sqlalchemy.\n :param row: a list of python objects\n :param selectables: a list entity class\n :return: the response follows what is required by sqlalchemy (if len(model)==1, a single object is fine, in\n the other case, a KeyTuple where each sub object is associated with it's entity name\n \"\"\"\n\n if len(selectables) > 1:\n\n labels = []\n\n for selectable in selectables:\n labels += [self.find_table_name(selectable._model).capitalize()]\n\n product = []\n for label in labels:\n product = product + [getattr(row, label)]\n\n # Updating Foreign Keys of objects that are in the row\n for label in labels:\n current_object = getattr(row, label)\n metadata = current_object.metadata\n if metadata and hasattr(metadata, \"_fk_memos\"):\n for fk_name in metadata._fk_memos:\n fks = metadata._fk_memos[fk_name]\n for fk in fks:\n local_field_name = fk.column._label\n remote_table_name = fk._colspec.split(\".\")[-2].capitalize()\n remote_field_name = fk._colspec.split(\".\")[-1]\n\n try:\n remote_object = getattr(row, remote_table_name)\n remote_field_value = getattr(remote_object, remote_field_name)\n setattr(current_object, local_field_name, remote_field_value)\n except:\n pass\n\n # Updating fields that are setted to None and that have default values\n for label in labels:\n current_object = getattr(row, label)\n for field in current_object._sa_class_manager:\n instance_state = current_object._sa_instance_state\n field_value = getattr(current_object, field)\n if field_value is None:\n try:\n field_column = instance_state.mapper._props[field].columns[0]\n field_default_value = field_column.default.arg\n setattr(current_object, field, field_default_value)\n except:\n pass\n\n return KeyedTuple(product, labels=labels)\n else:\n model_name = self.find_table_name(selectables[0]._model).capitalize()\n return getattr(row, model_name)\n\n\n request_uuid = uuid.uuid1()\n\n labels = []\n columns = set([])\n rows = []\n\n model_set = extract_models(self._models)\n\n # get the fields of the join result\n for selectable in model_set:\n labels += [self.find_table_name(selectable._model).capitalize()]\n\n if selectable._attributes == \"*\":\n try:\n selected_attributes = selectable._model._sa_class_manager\n except:\n selected_attributes = selectable._model.class_._sa_class_manager\n pass\n else:\n selected_attributes = [selectable._attributes]\n\n for field in selected_attributes:\n\n attribute = None\n if hasattr(self._models, \"class_\"):\n attribute = selectable._model.class_._sa_class_manager[field].__str__()\n elif hasattr(self._models, \"_sa_class_manager\"):\n attribute = selectable._model._sa_class_manager[field].__str__()\n\n if attribute is not None:\n columns.add(attribute)\n\n # construct the cartesian product\n list_results = []\n for selectable in model_set:\n tablename = find_table_name(selectable._model)\n objects = get_objects(tablename, request_uuid=request_uuid)\n list_results += [objects]\n\n # construct the cartesian product\n cartesian_product = []\n for element in itertools.product(*list_results):\n cartesian_product += [element]\n\n # filter elements of the cartesian product\n for product in cartesian_product:\n if len(product) > 0:\n row = KeyedTuple(product, labels=labels)\n all_criterions_satisfied = True\n\n for criterion in self._criterions:\n if not criterion.evaluate(row):\n all_criterions_satisfied = False\n if all_criterions_satisfied and not row in rows:\n rows += [extract_sub_row(row, model_set)]\n\n final_rows = []\n showable_selection = [x for x in self._models if (not x.is_hidden) or x._is_function]\n\n if self.all_selectable_are_functions():\n final_row = []\n for selection in showable_selection:\n value = selection._function._function(rows)\n final_row += [value]\n return [final_row]\n else:\n for row in rows:\n final_row = []\n for selection in showable_selection:\n if selection._is_function:\n value = selection._function._function(rows)\n final_row += [value]\n else:\n current_table_name = self.find_table_name(selection._model)\n key = current_table_name.capitalize()\n value = None\n if not is_novabase(row) and hasattr(row, key):\n value = getattr(row, key)\n else:\n value = row\n if value is not None:\n if selection._attributes != \"*\":\n final_row += [getattr(value, selection._attributes)]\n else:\n final_row += [value]\n if len(showable_selection) == 1:\n final_rows += final_row\n else:\n final_rows += [final_row]\n\n return final_rows", "title": "" }, { "docid": "2a95a3541a7f4685bb543b24baef4378", "score": "0.5757766", "text": "def get_row(self):\n return self.row", "title": "" }, { "docid": "2a95a3541a7f4685bb543b24baef4378", "score": "0.5757766", "text": "def get_row(self):\n return self.row", "title": "" }, { "docid": "0fee2a68b9ea2f5c7d2f02091951794a", "score": "0.5725369", "text": "def decode_row(self, row):\r\n return row", "title": "" }, { "docid": "2c68a2d94028db638002d184ad450275", "score": "0.5711138", "text": "def rows(self):\n return _RowCollection(self._tbl, self)", "title": "" }, { "docid": "8918d0e4da5ae088c7821b8e750dda19", "score": "0.5672764", "text": "def get_row(self):\n return self._row", "title": "" }, { "docid": "55bba12a263b2ac9a08ed31a5ede36b4", "score": "0.5649798", "text": "def get_row(self, row):\n\t\treturn self.__rows[row]", "title": "" }, { "docid": "5425e901821c3dc369bfd2f1249e814a", "score": "0.5649382", "text": "def get_rows_dictionary(self):\n return self.rows", "title": "" }, { "docid": "ca133bf43c3edbdc4a62f7404d6f881f", "score": "0.5628909", "text": "def __iter__(self):\n df = self.dataframe\n rows = df.query(self.rows) if self.rows else df\n series = rows[self.colnames] if self.colnames else rows\n Row = namedtuple(self.rowname, series.columns.to_list())\n\n if not self.replacenan is False:\n values = (self._replacenan(row) for row in series.values)\n elif self.dropnan:\n values = series.dropna().values\n else:\n values = series.values\n return (Row(*v) for v in values)", "title": "" }, { "docid": "b7d993b11901d9b8cf83d681500d9a9b", "score": "0.5573983", "text": "def get_table_object(self):\n return None", "title": "" }, { "docid": "969c6fb42134994eba514cbc66f06508", "score": "0.55576646", "text": "def _get_row(self, i):\n if i not in self._cached_rows:\n # If rows are from a fork, they are safe to access directly\n if isinstance(self._data[i], Row):\n self._cached_rows[i] = self._data[i]\n else:\n self._cached_rows[i] = Row(self, i)\n\n return self._cached_rows[i]", "title": "" }, { "docid": "0ee3ec117098ef27973a81be4b7be81d", "score": "0.5536246", "text": "def _from_table(t):\n table = copy.deepcopy(t)\n # Default the time index to the first column\n index_name = table.colnames[0]\n # Check if another column is defined as the index/primary_key\n if table.primary_key:\n # Check there is only one primary_key/index column\n if len(table.primary_key) == 1:\n table.primary_key[0]\n else:\n raise ValueError(\"Invalid input Table, TimeSeries doesn't support conversion\"\n \" of tables with more then one index column.\")\n\n # Extract, convert and remove the index column from the input table\n index = table[index_name]\n # Convert if the index is given as an astropy Time object\n if isinstance(index, Time):\n index = index.datetime\n index = pd.to_datetime(index)\n table.remove_column(index_name)\n\n # Extract the column values from the table\n data = {}\n units = {}\n for colname in table.colnames:\n data[colname] = table[colname]\n units[colname] = table[colname].unit\n\n # Create a dataframe with this and return\n df = pd.DataFrame(data=data, index=index)\n return df, MetaDict(table.meta), units", "title": "" }, { "docid": "f34bc445a3642b7c19bb649553ee348d", "score": "0.5522289", "text": "def transform(self, row: t.Dict) -> t.Dict:\n return row", "title": "" }, { "docid": "4f69a21b1b4173c7b434453f290e80d6", "score": "0.550745", "text": "def get_obj(self, key):\n return self._row_to_obj(self.sdb.query_one_row_always(\n \"SELECT {cols}, {key_col} \"\n \"FROM {table} \"\n \"WHERE {key_col} = {key};\".\n format(key_col=self.key_col,\n cols=', '.join(ci[0] for ci in self.columns),\n table=self.table,\n key=key)))", "title": "" }, { "docid": "edf2ee169aa128ac706fa41342362de5", "score": "0.55044746", "text": "def create_row(cls, parent):\n selection_set = SelectionSet()\n selection_set.update()\n row = SelectionRow(parent, selection_set)\n return row", "title": "" }, { "docid": "12fb09c612ba149858bdd329a2f015ac", "score": "0.54953986", "text": "def create_resource_from_db_row(row):\n parent = (\n create_resource_from_db_row(row.parent) if row.parent else None)\n\n return create_resource_from_json(row.type, parent, row.data)", "title": "" }, { "docid": "98c6b958f040e0437e3bf4cf17fe6721", "score": "0.5477457", "text": "def _fork(self, rows, column_types=[], column_names=[]):\n if not column_types:\n column_types = self._column_types\n\n if not column_names:\n column_names = self._column_names\n\n return Table(rows, column_types, column_names)", "title": "" }, { "docid": "57892128685aa920cea000b063897ee6", "score": "0.5429801", "text": "def get_ayah_object_from_row(self, row):\n ayah_id = row.get('ayat')\n surah_id = row.get('sura')\n\n ayah = Ayah.objects.get(\n number=ayah_id,\n surah=surah_id,\n )\n return ayah", "title": "" }, { "docid": "cd5de60a9aeb25f0ab7e4e5832860af9", "score": "0.5403882", "text": "def _patient_wrapper(row):\n from bhoma.apps.patient.models import CPatient\n data = row.get('value')\n docid = row.get('id')\n doc = row.get('doc')\n if not data or data is None:\n return row\n if not isinstance(data, dict) or not docid:\n return row\n else:\n if 'rev' in data:\n data['_rev'] = data.pop('rev')\n case = cls.wrap(data)\n case.patient = None\n if doc == None:\n # there's (I think) a bug in couchdb causing these to come back empty\n try:\n doc = CPatient.get_db().get(docid)\n except Exception, e:\n pass\n if doc and doc.get(\"doc_type\") == \"CPatient\":\n case.patient = CPatient.wrap(doc)\n return case", "title": "" }, { "docid": "6146925bfb7a528247641673b44832ff", "score": "0.53606945", "text": "def parse(self, data: Mapping) -> T:\n data = super().parse(data)\n obj: T = dacite.from_dict(self.dataclass_cls, {k: v for k, v in data.items()})\n return obj", "title": "" }, { "docid": "87fdc726e4eadcf1f89b34f88e5bf1d4", "score": "0.5355016", "text": "def __getitem__(self, idx):\n if idx < 0 or idx >= len(self._tbl_elm.tr):\n msg = \"row index [%d] out of range\" % idx\n raise IndexError(msg)\n return _Row(self._tbl_elm.tr[idx], self)", "title": "" }, { "docid": "70f5e35f165ecd9da01fc58ecaf6f2ef", "score": "0.533371", "text": "def dataclass_to_dataframe(instance) -> pd.DataFrame:\n if not is_dataclass(instance):\n raise ValueError('Input must be a dataclass')\n\n value = pd.DataFrame.from_dict(asdict(instance), orient='index', columns=['value'])\n metas = dataclass_meta_to_dataframe(instance)\n\n dataframe = pd.merge(value, metas, left_index=True, right_index=True)\n return dataframe", "title": "" }, { "docid": "e5e75a8a45b16b6321db9b125d97b22c", "score": "0.5294269", "text": "def _get_item_as_row(self, item_name):\n # \"field\" usually equals to {tg_package}.model.User.user_name\n # or {tg_package}.model.Group.group_name\n field = getattr(self.children_class, self.translations['item_name'])\n query = self.dbsession.query(self.children_class).options(eagerload(self.translations['sections']))\n try:\n# item_as_row = query.filter(field == item_name).one()\n #change by CL.Lam on 20101-12-21 , to solve the login case-insensitive problem.\n item_as_row = query.filter(field.op(\"ilike\")(item_name)).one()\n except NoResultFound:\n msg = 'Item (%s) \"%s\" does not exist in the child table'\n msg = msg % (self.translations['item_name'], item_name)\n raise SourceError(msg)\n return item_as_row", "title": "" }, { "docid": "084e44f132dd59665d6db3dbe76e8121", "score": "0.5289469", "text": "def GetDataTable(table_schema, table_rows):\r\n if not table_schema or not table_rows:\r\n return None\r\n\r\n data_table_output = gviz_api.DataTable(table_schema)\r\n data_table_output.LoadData(table_rows)\r\n\r\n return data_table_output", "title": "" }, { "docid": "152cf9fad200e51e059334005b9bfc66", "score": "0.52872825", "text": "def _row_to_dict(self, row):\n raise NotImplementedError()", "title": "" }, { "docid": "22796c9fbee0dedc08b621094846f823", "score": "0.52856624", "text": "def extract_table_entry(container: LTContainer, row: Tuple, columns: List, field_names: List) -> Dict:\n\n # Extract row content and assign a field to each cell\n cells = extract_row_content(container, row, columns)\n entry = {field: content for field, content in zip(field_names, cells)}\n\n return entry", "title": "" }, { "docid": "25a287222d2bce40db0fa522d81ec50c", "score": "0.52724856", "text": "def initFromDbRowEx(self, aoRow, oDb, tsNow = None):\n SchedGroupData.initFromDbRow(self, aoRow);\n return self._initExtraMembersFromDb(oDb, tsNow);", "title": "" }, { "docid": "1a4b9454464cfcce24c2219d397e4e68", "score": "0.525325", "text": "def get_r(self, row):", "title": "" }, { "docid": "7fd85e9d7064dd92f8fee772c0009cdd", "score": "0.52523226", "text": "def data_rows(table, query, orderby=None, limitby=None, fields=None):\n rows = []\n for r in table._db(query).select(limitby=limitby, orderby=orderby):\n vals = []\n for f in fields or table.fields:\n if (f in table and table[f].represent):\n vals.append(table[f].represent(r[f]))\n else:\n vals.append(r[f])\n rows.append(dict(id=r.id, cell=vals))\n return rows", "title": "" }, { "docid": "f301e713fa2347c370d185b9750d19a5", "score": "0.52393806", "text": "def get(self):\n if self.arrow_table is not None:\n return self.arrow_table\n return self.pandas_df", "title": "" }, { "docid": "9042a3d6fb7ec699dffc9f143156aa67", "score": "0.52379125", "text": "def child(self, row):\n return self._children[row]", "title": "" }, { "docid": "899b860fa0ffa15259b8579d7a7003b7", "score": "0.52308077", "text": "def child(self, row):\n return self._child_items[row]", "title": "" }, { "docid": "012caac4b10d11358cb1a8c783fc2520", "score": "0.5222286", "text": "def row2dict(row):\n\n if row is None:\n return None\n d = {}\n if hasattr(row, '__table__'):\n for column in row.__table__.columns:\n value = getattr(row, column.name)\n d[column.name] = value\n elif hasattr(row, '_fields'):\n for column in row._fields:\n d[column] = getattr(row, column)\n return d", "title": "" }, { "docid": "280c8eb3fecb5b4e5e362eb492ddf352", "score": "0.52215755", "text": "def from_row(cls, **row):\n return Proj(**row)", "title": "" }, { "docid": "5109c550c37b13516d71afc006ccad7b", "score": "0.52157116", "text": "def get_item(self, table, column, row):\n self._check_table(table)\n row = _fix_row_index(row, len(self[table]))\n column = _sanitize_colnames([column])[0]\n return self.get_column(table, column)[row]", "title": "" }, { "docid": "b7ba0c0661e2c55051da31c6e73b7b78", "score": "0.5205758", "text": "def dataset(self):\n # Create a new Tablib Dataset.\n data = tablib.Dataset()\n\n # Set the column names as headers on Tablib Dataset.\n first = self[0]\n\n data.headers = first._fields\n for row in self.all():\n row = _reduce_datetimes(row)\n data.append(row)\n\n return data", "title": "" }, { "docid": "d75ab888bab9783ee311bd907347e610", "score": "0.5201109", "text": "def getData(self):\r\n return self.tabledata", "title": "" }, { "docid": "39ce26ebe7f9ce59b3c66fb361551714", "score": "0.519904", "text": "def get_wrapped_table(self):\r\n if self.is_table_wrapper:\r\n for child in self.children:\r\n if isinstance(child, TableBox):\r\n return child\r\n else: # pragma: no cover\r\n raise ValueError('Table wrapper without a table')", "title": "" }, { "docid": "a348152ae95f96f584ba2abe37745425", "score": "0.5178131", "text": "def __getitem__(self, row_idx):\n return self.query({DAO.COL_ROW_IDX: row_idx})", "title": "" }, { "docid": "a1996721b6e04b8b203c1b4dcded1fd5", "score": "0.5177995", "text": "def rowToObject (self, rowTuple):\n if rowTuple:\n departmentID, \\\n deptCode, \\\n name, \\\n managerID = rowTuple\n if managerID != None:\n managerID = int (managerID)\n newDepartment = Department (int (departmentID),\n deptCode,\n name,\n managerID, 1, 0)\n return self.cachedObject (newDepartment)\n else:\n return None", "title": "" }, { "docid": "a7d8463847f74536bcbe50e1b6a10376", "score": "0.51670223", "text": "def namedtuple_factory(cursor, row):\n fields = [col[0] for col in cursor.description if col[0].isidentifier()]\n Row = namedtuple(\"Row\", fields)\n return Row(*row)", "title": "" }, { "docid": "719c09796e606ac578dce5cd0845ef9f", "score": "0.51570827", "text": "def _get_row(self, row_key):\n if row_key not in self._row_map:\n table = self._table._low_level_table\n self._row_map[row_key] = table.row(row_key)\n\n return self._row_map[row_key]", "title": "" }, { "docid": "51c17986ef45d0db942355450629c677", "score": "0.513401", "text": "def mock_query_tbl_row_2():\n\n rows = {\"rows\": [{\"last_object\": 10}, {\"last_object\": 11}]}\n return rows", "title": "" }, { "docid": "c6bfe8363d0b78d125a7b10e302a6a6d", "score": "0.51277834", "text": "def get_data(self, datum, column, row):\r\n table = row.table\r\n if column.auto == \"multi_select\":\r\n data = \"\"\r\n if row.can_be_selected(datum):\r\n widget = forms.CheckboxInput(check_test=lambda value: False)\r\n # Convert value to string to avoid accidental type conversion\r\n data = widget.render('object_ids',\r\n unicode(table.get_object_id(datum)),\r\n {'class': 'table-row-multi-select'})\r\n table._data_cache[column][table.get_object_id(datum)] = data\r\n elif column.auto == \"form_field\":\r\n widget = column.form_field\r\n if issubclass(widget.__class__, forms.Field):\r\n widget = widget.widget\r\n\r\n widget_name = \"%s__%s\" % \\\r\n (column.name,\r\n unicode(table.get_object_id(datum)))\r\n\r\n # Create local copy of attributes, so it don't change column\r\n # class form_field_attributes\r\n form_field_attributes = {}\r\n form_field_attributes.update(column.form_field_attributes)\r\n # Adding id of the input so it pairs with label correctly\r\n form_field_attributes['id'] = widget_name\r\n\r\n data = widget.render(widget_name,\r\n column.get_data(datum),\r\n form_field_attributes)\r\n table._data_cache[column][table.get_object_id(datum)] = data\r\n elif column.auto == \"actions\":\r\n data = table.render_row_actions(datum)\r\n table._data_cache[column][table.get_object_id(datum)] = data\r\n else:\r\n data = column.get_data(datum)\r\n return data", "title": "" }, { "docid": "9b1d9d3c5cfb9ccbc5f0895a07ec9a0e", "score": "0.51202184", "text": "def __dict_factory(cursor, row):\n\n d = {}\n for idx, col in enumerate(cursor.description):\n d[col[0]] = row[idx]\n return d", "title": "" }, { "docid": "6c837f341df21f0ac2dc28bd3753de7d", "score": "0.511994", "text": "def _return_dataset(self):\n\n return self.df", "title": "" }, { "docid": "6c837f341df21f0ac2dc28bd3753de7d", "score": "0.511994", "text": "def _return_dataset(self):\n\n return self.df", "title": "" }, { "docid": "18a201e55be55c4ece8cc6e79fb4644a", "score": "0.51187474", "text": "def unpack_dataset(cls, data_item: DataItem):\n return data_item.as_df()", "title": "" }, { "docid": "04a2cc2571e735461bab3253097428fc", "score": "0.5093348", "text": "def mock_query_tbl_row_1():\n\n rows = {\"rows\": [{\"last_object\": 10}]}\n return rows", "title": "" }, { "docid": "58c0fd1f71036483691fb3dd078968ed", "score": "0.5092879", "text": "def get_data(self):\n return self.__df", "title": "" }, { "docid": "9787e6ecf6a1f56b026dddd1575871a5", "score": "0.50809103", "text": "def _retrieve(self, row):\n uid = uuid.UUID(hex=row['uid'], version=4)\n number_of_items = row['n_particles']\n particles = [\n uuid.UUID(bytes=buffer(value), version=4)\n for value in row['particles'][:number_of_items]]\n return Bond(\n uid=uid, particles=particles, data=self._data[uid])", "title": "" }, { "docid": "1eb706eff8fa10fa658461f9a7fa6d41", "score": "0.5074743", "text": "def row(self, row_idkey):\n fmt = id_key_format(\"Row ID/Key\", row_idkey,\n u\"Row(%s=%s)\",\n u\"Row(%s='%s')\")\n return PQLQuery(fmt % (self.name, row_idkey), self.index)", "title": "" }, { "docid": "4a305c9169d0db270a7e5b127e5ed80f", "score": "0.50744677", "text": "def get(self, rows=None, cols=None):\n\n # determine rows and cols\n if rows is None:\n rows = range(len(self))\n\n if cols is None:\n cols = self.headers\n\n tab = self.new(cols)\n\n # copy data\n for i in rows:\n row = self[i]\n row2 = {}\n for j in cols:\n row2[j] = row[j]\n tab.append(row2)\n\n return tab", "title": "" }, { "docid": "a933e7bf2bd0d086779444b3cdc4b527", "score": "0.507241", "text": "def get_as_row(self):\r\n\t\treturn self.text, self.id, self.location", "title": "" }, { "docid": "0439ef418907630a126f57c73ed5a096", "score": "0.50685966", "text": "def table(rows: Iterable[Mapping[str, Any]]) -> Table:\n return Table.from_iterable(rows)", "title": "" }, { "docid": "eef2c23b3be0ae9f3d03105e7d11e17d", "score": "0.50629777", "text": "def row_dict(self):\n return {r.name: r for r in self.rows if r.parent is None}", "title": "" }, { "docid": "507019f2224439c087b28ba4dab94ec4", "score": "0.5053642", "text": "def dict_factory(cursor, row):\n dictionary = {}\n for idx, col in enumerate(cursor.description):\n dictionary[col[0]] = row[idx]\n return dictionary", "title": "" }, { "docid": "fb78a7e9b98a8a1b0f40a2812fc25408", "score": "0.50525904", "text": "def _prepare_object_values(self, row):\n prepared_row = dict()\n prepared_row.update(row)\n self.prepare_object_values(prepared_row)\n return prepared_row", "title": "" }, { "docid": "62e9f52d9585d05ca908e4f5db2fbf2f", "score": "0.50476205", "text": "def get_data(self):\n return self.df", "title": "" }, { "docid": "ac88afe25d1200f6ca07373f683121f6", "score": "0.50443125", "text": "def build_row_object(isolate):\n mic_data = isolate['isolate_data']['mic_data'] \n drug_mic_data = list(map(lambda x: get_drug_mic_data(x), mic_data))\n row = {drug:result for drugResult in drug_mic_data for drug,result in drugResult.items()}\n row['isolate_date'] = isolate['isolate_date']\n row['species'] = isolate['isolate_data']['organism_name']\n return row", "title": "" }, { "docid": "03d60e87214d01c175f05f468712b63d", "score": "0.50437206", "text": "def get_row(self, table, index, column_map=None):\n self._check_table(table)\n index = _fix_row_index(index, len(self[table]))\n row = self._row_indexes[table][index]\n return SQLRow(self, table, row, colmap=column_map)", "title": "" }, { "docid": "ae70d6f15029b9d1f50d4e79ff460556", "score": "0.50361985", "text": "def data_class(self, T: Type[_T]) -> _T:\n assert self.data_string.startswith(T.__name__ + \"{\")\n result = dacite.from_dict(T, json.loads(self.data_string[len(T.__name__) :]))\n assert isinstance(result, T)\n return result", "title": "" }, { "docid": "6fe9b0bc12b9f22522a883810cc4bf67", "score": "0.50338614", "text": "def createRow(columns, row):\n return [voxie.Variant(column.dbusType, column.converter(row.get(column.name, column.default))) for column in columns]", "title": "" }, { "docid": "03c2d6211dc8b4a9705dbcf8b5340078", "score": "0.50278395", "text": "def __getitem__(self, idx):\n if idx < 0 or idx >= len(self):\n msg = \"row index [%d] out of range\" % idx\n raise IndexError(msg)\n return _Row(self._tbl.tr_lst[idx], self)", "title": "" }, { "docid": "ee0fa751c22afcd72dfa8f9593c34a8b", "score": "0.5013629", "text": "def __init__(self, row):\n super().__init__(row)\n self.period = dutils.EventPeriod(self.period)", "title": "" }, { "docid": "b1cff65c843e35f6652df6654074da2b", "score": "0.5011547", "text": "def newRow(self):\n excelobj.__init__()", "title": "" }, { "docid": "6cfc4c502111895714fe46e8ced30fb3", "score": "0.5005938", "text": "def rows(self) -> Sequence[TModel]:\n try:\n response = self.client.get(\n spreadsheetId=self.spreadsheet_id, range=self.range\n ).execute()\n except googleapiclient.errors.HttpError as ex:\n raise SheetError(str(ex))\n try:\n rows = response[\"values\"]\n data = self._convert_to_dict(rows)\n # ignoring type (mypy bug?) \"Name 'self.structure' is not defined\"\n response = self.converter.structure(\n data, Sequence[self.structure] # type: ignore\n )\n except (TypeError, AttributeError) as ex:\n raise SheetError(str(ex))\n return response", "title": "" }, { "docid": "9729319d35095bb9a9fbbaaf8db40ed9", "score": "0.50049704", "text": "def data_row(row_id, data):\n row = []\n headers = []\n for k, v in data.items():\n row.append(v[row_id])\n headers.append(k)\n\n return row, headers", "title": "" }, { "docid": "57ba7bbacf0c0274b832d854740cf2b2", "score": "0.50033236", "text": "def initFromDbRow(self, aoRow):\n if aoRow is None:\n raise TMRowNotFound('Build not found.');\n\n self.idBuild = aoRow[0];\n self.tsCreated = aoRow[1];\n self.tsEffective = aoRow[2];\n self.tsExpire = aoRow[3];\n self.uidAuthor = aoRow[4];\n self.idBuildCategory = aoRow[5];\n self.iRevision = aoRow[6];\n self.sVersion = aoRow[7];\n self.sLogUrl = aoRow[8];\n self.sBinaries = aoRow[9];\n self.fBinariesDeleted = aoRow[10];\n return self;", "title": "" }, { "docid": "d614801e230f1eb5067106438ed61316", "score": "0.49888256", "text": "def initFromDbRow(self, aoRow):\n\n if aoRow is None:\n raise TMRowNotFound('SchedGroupMember not found.');\n\n self.idSchedGroup = aoRow[0];\n self.idTestGroup = aoRow[1];\n self.tsEffective = aoRow[2];\n self.tsExpire = aoRow[3];\n self.uidAuthor = aoRow[4];\n self.iSchedPriority = aoRow[5];\n self.bmHourlySchedule = aoRow[6]; ## @todo figure out how bitmaps are returned...\n self.idTestGroupPreReq = aoRow[7];\n return self;", "title": "" }, { "docid": "048a6356b648a8e21e338397aca1989a", "score": "0.49858505", "text": "def from_row(cls, **row):\n return Ammo(**row)", "title": "" }, { "docid": "465e36255a52fc5446fe626fe595ccb6", "score": "0.49787477", "text": "def __getitem__(self, table: str) -> ir.Table:\n return self.table(table)", "title": "" }, { "docid": "ecf263544ea7b53ea4f1b019da4319cb", "score": "0.4967098", "text": "def parse_row(self, row):\n entity = Entity(row['FullName'], row['Email'])\n action = Action(row['Action'], datetime.strptime(row['ActionDate'], dtfmt).date())\n detail = Detail(row['Detail'])\n return Triple(entity, action, detail)", "title": "" }, { "docid": "ace6e889d9afa1a46d9c744cb0139d47", "score": "0.49664667", "text": "def _get_table(self, obj):\r\n if isinstance(obj, Marble):\r\n return obj\r\n else:\r\n return obj.table", "title": "" }, { "docid": "9fda10a72d985da552e042df972cb114", "score": "0.4954344", "text": "def initFromDbRow(self, aoRow):\n\n if aoRow is None:\n raise TMRowNotFound('SchedGroup not found.');\n\n self.idSchedGroup = aoRow[0];\n self.tsEffective = aoRow[1];\n self.tsExpire = aoRow[2];\n self.uidAuthor = aoRow[3];\n self.sName = aoRow[4];\n self.sDescription = aoRow[5];\n self.fEnabled = aoRow[6];\n self.enmScheduler = aoRow[7];\n self.idBuildSrc = aoRow[8];\n self.idBuildSrcTestSuite = aoRow[9];\n self.sComment = aoRow[10];\n return self;", "title": "" }, { "docid": "7650218d808f140ad2e6e8de165bb82b", "score": "0.4949686", "text": "def get(cls, consistent=False, **kwargs):\n item = cls.Table.get(consistent=consistent, **kwargs)\n return cls.new_from_raw(item)", "title": "" }, { "docid": "b7607e2357f9d898cad6b326d41eecc4", "score": "0.49320677", "text": "def read_row(self, keyspace, table, pk_name, pk_value):\n\n path = self.__row_url_path(keyspace, table, pk_name, pk_value)\n response = self.rest.request(path=path) \n return response.json()", "title": "" }, { "docid": "f97c40b438bdb06a13992bea046bb9cd", "score": "0.49260312", "text": "def change_rows_to_dic(data, row):\n return data.iloc[row].to_dict()", "title": "" }, { "docid": "c6fc882de5202cbd988cce7aecf03156", "score": "0.49175605", "text": "def get_row(client, instance, file_=None):\n data = {\"instance\": instance}\n if file_ is not None:\n data[\"file\"] = file_\n else:\n active_file = client.file_get_active()\n if active_file:\n data[\"file\"] = active_file[\"file\"]\n return client._creoson_post(\"familytable\", \"get_row\", data, \"columns\")", "title": "" }, { "docid": "fcc658c52737121d1a6dd61f2c046757", "score": "0.49153292", "text": "def report_row_factory(cursor, row: tuple) -> ReportRecord:\n return ReportRecord(*row)", "title": "" }, { "docid": "da54a234d59b7e1aac068ecb8cc346f2", "score": "0.49144846", "text": "def from_row(cls, row, datemode):\n return cls(\n ActivityTitle = row[0].value,\n ActivityDesc = row[1].value,\n ActivityID = int(row[2].value),\n DistrictID = int(row[3].value),\n ProviderID = int(row[4].value),\n ActivityFormatID = int(row[5].value),\n PaymentFormatID = int(row[6].value),\n CCatalogID = int(row[7].value),\n EventID = int(row[8].value),\n Type= row[9].value,\n InstructorUserID = int(row[10].value),\n InstructorName = row[11].value,\n PGDate = to_datetime(row[12], datemode),\n StartDate = to_datetime(row[13], datemode),\n DateExpired = to_datetime(row[14], datemode),\n MeetingDates= row[15].value,\n URL= row[16].value,\n EndDate = to_datetime(row[17], datemode),\n MaxUsers = int(row[18].value),\n Archived = bool(row[19].value),\n ApprovalRequired = bool(row[20].value),\n UserCanEdit = bool(row[21].value),\n ActivityHours = float(row[22].value),\n QuickApprove = bool(row[23].value),\n Credits = float(row[24].value),\n FormID = int(row[25].value),\n Comments = row[26].value,\n ConferenceLocation = row[27].value,\n Level = row[28].value,\n SubscriberFee = Decimal(row[29].value),\n NonSubscriberFee = Decimal(row[30].value),\n DateAdded = to_datetime(row[31], datemode),\n DateUpdated = to_datetime(row[32], datemode),\n ShowSubscriberFee = bool(row[33].value),\n StartShowing = to_datetime(row[34], datemode),\n StopShowing = to_datetime(row[35], datemode),\n )", "title": "" }, { "docid": "f23487020fcba6de7fd6e3dd418cb597", "score": "0.49093932", "text": "def from_db(cls, db_row):\n geocode_geo_json = spatial_utils.to_geo_json(db_row[\"geocode_geo\"])\n parcel_geo_json = spatial_utils.to_geo_json(db_row[\"parcel_geo\"])\n building_geo_json = spatial_utils.to_geo_json(db_row[\"building_geo\"])\n image_bounds_geo_json = spatial_utils.from_bbox_array(\n db_row[\"image_bounds\"])\n return cls(id=db_row[\"id\"],\n geocode_geo=geocode_geo_json,\n parcel_geo=parcel_geo_json,\n building_geo=building_geo_json,\n image_bounds=image_bounds_geo_json,\n image_url=db_row[\"image_url\"])", "title": "" }, { "docid": "f45b2040e1151b586779d184dffe91dc", "score": "0.4907717", "text": "def get_one(self, query):\n res = self.engine.execute(query)\n for row in res:\n return dict(row)\n\n return None", "title": "" }, { "docid": "f35e93bc5ed5e7ba24b4277b999f070c", "score": "0.49069655", "text": "def unpack_user_items(row):\n # items is a series of lists - chain will make one long iterable\n # convert this to a data frame\n idf = pd.DataFrame.from_records(row.items)\n # now fix up data types\n idf['item_id'] = idf['item_id'].astype('i8')\n idf['user_id'] = row.user_id\n return idf[['user_id', 'item_id', 'item_name']]", "title": "" }, { "docid": "3740b7e0d954ca7780d3a687c5add601", "score": "0.49045742", "text": "def row(self, row):\n return self._N.row(row)", "title": "" }, { "docid": "9618375dfdb1b00f0cf52884e1ea4b1c", "score": "0.49022734", "text": "def get_by_id(cls, row_id, table=None):\n from src.xco2 import Xco2\n if not table:\n table = Xco2\n name = table.__tablename__\n return cls._connected(\n 'SELECT * FROM ' + name + ' WHERE id = %s;',\n **{\n 'values': (row_id, ),\n 'multi': None\n }\n )", "title": "" }, { "docid": "78e2c8f4141e75f80808a209f9d59e0c", "score": "0.48977536", "text": "def data_cls(self):\n return self[0]", "title": "" }, { "docid": "2bd6d341d2bb99c196ee6c1c7ba72ab5", "score": "0.48901287", "text": "def RowToExample(self, instance: Dict[str, Any]) -> tf.train.Example:\n return utils.row_to_example(self._type_map, instance)", "title": "" }, { "docid": "30d3227802f9a952be65980bdb325b7d", "score": "0.4890081", "text": "def parse_row(row, columns_map):\n row_dict = row.to_dict()\n cells = row_dict['cells']\n result = {\n 'id': row_dict['id'],\n 'row_number': row_dict['rowNumber'],\n 'parent_id': row_dict.get('parentId'),\n 'name': cells[columns_map['name']].get('value'),\n 'date_start': cells[columns_map['start']].get('value'),\n 'date_finish': cells[columns_map['finish']].get('value'),\n }\n return result", "title": "" }, { "docid": "1455e806e29ed686ff42d410b5aaa5d7", "score": "0.48862633", "text": "def mock_query_tbl_row_0():\n\n rows = {\"rows\": []}\n return rows", "title": "" }, { "docid": "f26c1e1febc73af04f88782ba2ab543b", "score": "0.48807195", "text": "def rows(self):\n bf = self.copy()\n result = bf.query.executeQuery(format=\"soa\")\n return result[\"_rowName\"]", "title": "" }, { "docid": "56470709cb14226b550d9c63959dd617", "score": "0.4877428", "text": "def iterdata(self):\n return iter(self._data_table)", "title": "" } ]
13ba29af0c10bb044a0db3512e28328d
Convert string to int
[ { "docid": "28dd1eb62954e144c7b6c8cb573b83a1", "score": "0.76648444", "text": "def string_to_int(string):\n\n string = string.replace('.', '')\n string = string.split('(')[0].strip()\n return int(string)", "title": "" } ]
[ { "docid": "4b040796ab3ac4bb90c477eb99082b3b", "score": "0.835715", "text": "def _to_int(string):\r\n if isinstance(string, str):\r\n return ord(string[0])\r\n else:\r\n return string", "title": "" }, { "docid": "053ae9ba7134f6482eed54ec1895a44c", "score": "0.8217348", "text": "def string_to_number(s):\n return int(s)", "title": "" }, { "docid": "7dd468c118a1d6f378da6d93aacdd751", "score": "0.81715345", "text": "def str_to_int(string):\n try:\n int_ = int(string)\n except ValueError:\n return ValueError\n return int_", "title": "" }, { "docid": "e2571b0435ef907fcdcd6db9dd690a92", "score": "0.81628877", "text": "def to_int(curr_str):\n try:\n return int(curr_str)\n except ValueError:\n return -1", "title": "" }, { "docid": "f48e99d881c753942ae0aa5765c8e84e", "score": "0.8161581", "text": "def _toint(string):\n if string.isdigit():\n return int(string)\n else:\n return 0", "title": "" }, { "docid": "6aa3da314f31921af58fbaf70b947ad9", "score": "0.8044264", "text": "def _str_to_int(self, string):\n\t\tstring = string.lower()\n\t\tif string.endswith(\"l\"):\n\t\t\tstring = string[:-1]\n\t\tif string.lower().startswith(\"0x\"):\n\t\t\t# should always match\n\t\t\tmatch = re.match(r'0[xX]([a-fA-F0-9]+)', string)\n\t\t\treturn int(match.group(1), 0x10)\n\t\telse:\n\t\t\treturn int(string)", "title": "" }, { "docid": "31d1da23a1d53e7c3cc74d00bf52a6a0", "score": "0.80017704", "text": "def text_to_int(self, text):\n return int(text)", "title": "" }, { "docid": "707b73dc66ea35b8d5aee460b6da905c", "score": "0.7917471", "text": "def myAtoi(self, string: str) -> int:\n return atoi(string)", "title": "" }, { "docid": "2ac362f270b9212c6e19247612f13309", "score": "0.7876151", "text": "def to_int(s, fail=None):\n try:\n return int(s)\n except:\n return fail", "title": "" }, { "docid": "31bce49b79f1b9caf0aae8fa630c4358", "score": "0.77755475", "text": "def cast_int(s):\n try:\n return int(s)\n except ValueError:\n return s", "title": "" }, { "docid": "f50be2c121c679be7a70c70e79ccd9af", "score": "0.7656027", "text": "def cast_to_int(string):\n try:\n return int(string)\n except (ValueError, TypeError) as e:\n return None", "title": "" }, { "docid": "15905aa63e0522bd5b6cfe0267cccce8", "score": "0.76458347", "text": "def save_cast_int(int_str: str) -> int:\n try:\n return int(int_str)\n except ValueError:\n return 0", "title": "" }, { "docid": "b7a2382298e13983c191e19059db2ce7", "score": "0.7631166", "text": "def string_to_int_convert_test(string):\n try:\n return int(string)\n except ValueError:\n return None", "title": "" }, { "docid": "b7a2382298e13983c191e19059db2ce7", "score": "0.7631166", "text": "def string_to_int_convert_test(string):\n try:\n return int(string)\n except ValueError:\n return None", "title": "" }, { "docid": "ab3df445a7bbb7cf1689075c26500b75", "score": "0.76056796", "text": "def _hash_to_int(s: str) -> int:\n return int(s, _hash_size)", "title": "" }, { "docid": "18c807589e75b6291fecffdb78920bfb", "score": "0.75836504", "text": "def convert_int(string):\n\n try:\n integer = int(string)\n return integer\n except:\n print string, \" is not a valid int\"\n exit()", "title": "" }, { "docid": "e8cf1890b201917becfe625d61e6185d", "score": "0.7549631", "text": "def myInt(_str):\n try:\n val= int(_str)\n except ValueError:\n val=0\n return val", "title": "" }, { "docid": "02367442812a130216c6a63bc18a7c43", "score": "0.754107", "text": "def myAtoi(self, str):\n res = re.match(r\"^([+\\-]?\\d+).*$\", str.strip())\n if res is None:\n return 0\n return min(max(int(res.group(1)), -2147483648), 2147483647)", "title": "" }, { "docid": "54dcbe644aec7d6aa1a0dabdd3f6f63f", "score": "0.75303733", "text": "def to_int(inputstr):\n\tif str(inputstr).lower().startswith(\"0x\"):\n\t\treturn hexStrToInt(inputstr)\n\telse:\n\t\treturn int(inputstr)", "title": "" }, { "docid": "10976bc043e9800993cabd84660ef480", "score": "0.75023335", "text": "def _str_to_int(self, s):\n i = int(s, 16)\n if i >= 2**7:\n i -= 2**8\n return i", "title": "" }, { "docid": "10976bc043e9800993cabd84660ef480", "score": "0.75023335", "text": "def _str_to_int(self, s):\n i = int(s, 16)\n if i >= 2**7:\n i -= 2**8\n return i", "title": "" }, { "docid": "c054c446a8ee2196caa2e1e928b43525", "score": "0.7475564", "text": "def toInt(self, str_int):\n\n if str_int is None:\n return None\n else:\n return int(str_int)", "title": "" }, { "docid": "fc39f4c18423b44ac92b01358085bbd4", "score": "0.7458982", "text": "def string_to_int(s):\n output_int = 0\n\n for index, char in enumerate(reversed(s)):\n if char == \"-\":\n output_int *= -1\n else:\n output_int += char_int_mapping[char] * 10**index\n\n return output_int", "title": "" }, { "docid": "759332547175fa04817543478f52d206", "score": "0.74278396", "text": "def string_to_int(s):\n result = 0\n for c in s:\n if not isinstance(c, int):\n c = ord(c)\n result = 256 * result + c\n return result", "title": "" }, { "docid": "fd2db510230cf654b34c0db89eddd245", "score": "0.7413437", "text": "def toInteger(s , iDefault = None):\n try:\n iResult = int(s)\n return iResult \n except ValueError:\n return iDefault", "title": "" }, { "docid": "0d73b6dd6c9020c9471ef0b8bdb8557c", "score": "0.7408998", "text": "def safeint(s):\n try:\n return int(force_unicode(s))\n except (ValueError, TypeError):\n return 0", "title": "" }, { "docid": "6091a7d5a5d74166ba9520355967e62e", "score": "0.74020165", "text": "def convert(string):\n return int(\"\".join(re.findall(\"\\d*\", string)))", "title": "" }, { "docid": "6c058146c98db3f87eed4d1dd3ad1b51", "score": "0.737633", "text": "def return_int(s: str):\n return int(''.join([l for l in s if l.isdigit()]))", "title": "" }, { "docid": "303e326c2e10de103e96b7efcf51205c", "score": "0.73619133", "text": "def toInteger(s):\n res = 0\n if isinstance(s, str):\n list = s.split('.')\n for i in list:\n res <<= 8\n res += int(i)\n return res", "title": "" }, { "docid": "4268895ab04dbcb40ad40497b0d87d13", "score": "0.73499113", "text": "def _to_pos_int(string):\n\n try:\n num = int(string)\n if num < 1:\n raise ValueError()\n return num\n except ValueError:\n raise ValueError(\"Invalid numerical parm \" + string)", "title": "" }, { "docid": "c3dc216d5ab76a1c98d0a158a68038bb", "score": "0.7343904", "text": "def to_int(value):\n return _es.to_int(value)", "title": "" }, { "docid": "3d510087bf646dbbbb859e972cec392f", "score": "0.7298224", "text": "def str_to_int(h):\n return _cy.str_to_int(h)", "title": "" }, { "docid": "2d8e3c3ffbd9375553a7c016357a39b8", "score": "0.72161067", "text": "def atoi(string: str) -> int:\n num_str = re.match('(-|\\+)?\\d+', string.strip())\n if num_str is None:\n return 0\n\n num = int(num_str.group())\n if num < -2147483648:\n return -2147483648 \n if num > 2147483647:\n return 2147483647 \n return num", "title": "" }, { "docid": "90fce148220ce57bf2688d59cdcd1a2d", "score": "0.72102755", "text": "def a_to_i(text):\n return int(text) if text.isdigit() else text", "title": "" }, { "docid": "90110bf8e974d7cd8686e379e4d62017", "score": "0.71757513", "text": "def str_to_int(self, bytes_str):\n result = 0\n for ch in bytes_str:\n result = result * 256 + ord(ch)\n return result", "title": "" }, { "docid": "90110bf8e974d7cd8686e379e4d62017", "score": "0.71757513", "text": "def str_to_int(self, bytes_str):\n result = 0\n for ch in bytes_str:\n result = result * 256 + ord(ch)\n return result", "title": "" }, { "docid": "511d55507464cc3ec05893cd94726eb5", "score": "0.71701384", "text": "def myAtoi(self, s: str) -> int:\n result = 0\n is_negative = False\n i = 0\n while i < len(s) and s[i] == \" \":\n i += 1\n if i == len(s):\n return 0\n if s[i] == '-':\n is_negative = True\n i += 1\n elif s[i] == '+':\n i += 1\n elif not s[i].isdigit():\n return 0\n while i < len(s) and s[i].isdigit():\n result *= 10\n result += (ord(s[i]) - ord('0'))\n i += 1\n if is_negative:\n result *= -1\n if result < -(2**31):\n result = -(2**31)\n elif result > (2**31 - 1):\n result = 2**31 - 1\n return result", "title": "" }, { "docid": "74e58bff6c3da8909be90dfac79b8a5b", "score": "0.7162318", "text": "def convert_to_int(data):\n try:\n return int(data)\n except (ValueError, TypeError) as e:\n msg = _(\"'%s' is not an integer\") % data\n raise n_exc.InvalidInput(error_message=msg) from e", "title": "" }, { "docid": "9a78f65e837dce6f21cf7c24702aa0a6", "score": "0.71528023", "text": "def __intConvert ( self, raw, text ):\n try:\n result = int ( raw )\n return result\n except ValueError:\n raise IOError, ( \"Invalid %s field value, expecting an \"\n \"integer: [%s]\" % ( text, raw ) )", "title": "" }, { "docid": "f839e1913d53052c56104bfad846c57c", "score": "0.709246", "text": "def _bin_to_int(string):\r\n if isinstance(string, str):\r\n return struct.unpack(\"b\", string)[0]\r\n else:\r\n return struct.unpack(\"b\", bytes([string]))[0]", "title": "" }, { "docid": "80d4b40d6410d94cc26988e72e0615a8", "score": "0.706443", "text": "def convert_to_int(s):\n try:\n print('Conversion succeeded! for item {}'.format(s))\n return int(s)\n # exception accepts tuple of errors type\n except (ValueError, TypeError) as exp:\n print(\"Conversion failed for item {}: {}\"\n .format(s, str(exp)), file=sys.stderr) # print message to the standard error stream\n raise", "title": "" }, { "docid": "1a99efe0b1c0948d41ad58a0b4f65dcf", "score": "0.7062852", "text": "def to_number(string):\n try:\n n = int(string)\n except ValueError:\n n = float(string)\n return n", "title": "" }, { "docid": "a56561485bc54a24c0f929b40594b87e", "score": "0.7058016", "text": "def string_2_int(string):\n # Try to encode utf-8 else pass\n try:\n string = string.encode('utf-8')\n except AttributeError:\n pass\n hashed_name = hashlib.sha256(string).hexdigest()\n return int(hashed_name, 36) % 10240", "title": "" }, { "docid": "7b5cfa89791d6eb1760bdf3c05e5a7a0", "score": "0.7047307", "text": "def to_int(value, base=10):\n if not isinstance(value, str):\n return long(value)\n return long(value.replace(\" \", \"\").replace(\"\\n\", \"\").strip(), base)", "title": "" }, { "docid": "eaa6f6036ced970c35ccead70f25747b", "score": "0.7040806", "text": "def innerInt(self, s):\n try:\n int(s)\n return int(s)\n except ValueError:\n return 0", "title": "" }, { "docid": "5c2695dcc7b2c65099eedd0d29cc2798", "score": "0.7007743", "text": "def str_to_int(date):\n return int(date.replace('-', ''))", "title": "" }, { "docid": "5c2695dcc7b2c65099eedd0d29cc2798", "score": "0.7007743", "text": "def str_to_int(date):\n return int(date.replace('-', ''))", "title": "" }, { "docid": "8157f90ed97fc3a3744efd767dd73c04", "score": "0.7005789", "text": "def scrub_text2int(s):\n return only_numerics_int(s)", "title": "" }, { "docid": "dbe838c7042e46e398bba9308fc58362", "score": "0.6987354", "text": "def try_parse_int(s, base=10, val=None):\n try:\n return int(s, base)\n except ValueError:\n return val", "title": "" }, { "docid": "1985808699ef876df5ba646817e4bc24", "score": "0.69789433", "text": "def convert(self, non_decimal_string):\n\n return int(non_decimal_string, 0)", "title": "" }, { "docid": "aa8188c2c29a52d22a59496fea80acb7", "score": "0.68997014", "text": "def atoi(text):\n return int(text) if text.isdigit() else text", "title": "" }, { "docid": "f1681d65e2172108d1b912e6990ab069", "score": "0.6895015", "text": "def conversion_string_to_integer(string_number):\n result = 0\n for i, digit in enumerate(string_number):\n if ord(digit) < ord('0') or ord(digit) > ord('9'):\n raise ValueError\n result += (\n (ord(digit) - ord('0')) * (10 ** (len(string_number) - 1 - i))\n )\n return result", "title": "" }, { "docid": "25cc335b0f7f01c19e9c15f8b521e2f5", "score": "0.68840677", "text": "def str2int(s, trim='\\s*', do_eval=False):\n\n if not isinstance(s, (str,unicode)):\n raise TypeError(\"`s` must be a string (str or unicode). \"\n \"The actual type is %s.\" % type(s))\n\n # Strip specified regular expression from beginning and end of `s`:\n if trim:\n s= re.sub('^(' + trim + ')', '', s)\n s= re.sub('(' + trim + ')$', '', s)\n\n\n # 1. Compare `s` against anchored regular expression to determine whether\n # there is a match and extract useful components of the string.\n\n m= int_pat_anchored.search(s)\n\n\n # 2. Attempt conversion to float.\n\n if m is not None and m.group('man'):\n\n # Convert mantissa to an int:\n mantissa= int(m.group('man'))\n\n if m.group('exp'):\n # Strip off the exponent marker character ('e' or 'E') and convert the\n # rest of the exponent string to an int:\n exponent= int(m.group('exp')[1:])\n\n # Combine the mantissa and exponent and return the result:\n return mantissa * 10**exponent\n\n # There is no exponent, so we simply return the mantissa:\n return mantissa\n\n\n # 3. The string in `s` does not represent an integer, but it might contain an\n # expression whose evaluation will yield an integer.\n\n if do_eval:\n try:\n result= eval(s, namespace)\n except:\n result= None\n\n if isinstance(result, (int, numpy.int32)):\n return result\n\n raise ValueError(\"'%s' does not represent an integer.\" % s)", "title": "" }, { "docid": "5c3e42930df6191d24ac5f16e639e3f1", "score": "0.68807054", "text": "def bytes_to_int(source: bytes) -> int:\n # logging.info(\"source = %s\", source)\n # logging.info(\"result = %s\", json.loads(source.decode(\"utf8\")))\n return int(source.decode(\"utf8\"))", "title": "" }, { "docid": "a36e4dae399ceb461caaf1d5f355edb2", "score": "0.68799084", "text": "def test_cast_to_int(self) -> None:\n number = pysigdig.Number('3600')\n self.assertEqual(int(number), 3600)", "title": "" }, { "docid": "127877e9490b6dfad60339637a23745e", "score": "0.6876412", "text": "def re_to_int(regex):\n try:\n return int(regex.group(1))\n except AttributeError:\n return 0", "title": "" }, { "docid": "b6e5592fdc95389ba929fb8e57caf01d", "score": "0.687114", "text": "def int_convertable(string):\r\n try: \r\n int(string)\r\n return True\r\n except (ValueError, TypeError):\r\n return False", "title": "" }, { "docid": "4a620dc45f0ab7b878b596dfb8885406", "score": "0.685042", "text": "def readInt(self):\n strval = self.readStr()\n return 0 if strval is None else Integer.parseInt(strval)", "title": "" }, { "docid": "9d9dccc22eab6ad26a91882c8b0d310b", "score": "0.68467", "text": "def parse_number(text):\n return int(text)", "title": "" }, { "docid": "95f243ba2c324d7efda74fd7984be091", "score": "0.68434703", "text": "def to_int(value=None, hexstr=None, text=None):\n assert_one_val(value, hexstr=hexstr, text=text)\n\n if hexstr is not None:\n return int(hexstr, 16)\n elif text is not None:\n return int(text)\n elif isinstance(value, bytes):\n return big_endian_to_int(value)\n elif isinstance(value, str):\n raise TypeError(\"Pass in strings with keyword hexstr or text\")\n else:\n return int(value)", "title": "" }, { "docid": "60be069c4eeafe9ed0ddb6a04f90acf0", "score": "0.68363166", "text": "def parse_int(string: Any, base: int = 10) -> Optional[int]:\n\n if isinstance(string, int):\n return string\n\n try:\n return int(string, base=base)\n except Exception:\n pass\n\n try:\n return int(string)\n except Exception:\n pass\n\n return None", "title": "" }, { "docid": "ff7b57c440e6c263db2cb3d1e9d3f54f", "score": "0.6817568", "text": "def try_parse_integer(int_to_parse: Any, err_msg: str) -> int:\n try:\n res: int = int(int_to_parse)\n except (TypeError, ValueError):\n raise DemistoException(err_msg)\n return res", "title": "" }, { "docid": "cc729b32511b76c1e775f32fbf67df22", "score": "0.6807431", "text": "def _get_int(p_str):\n l_int = 0\n try:\n l_int = int(p_str, 16)\n except:\n l_int = 0\n return l_int", "title": "" }, { "docid": "4709404a0d8018121c7db4d0794a7710", "score": "0.6803439", "text": "def parse_american_int(c):\n if not isinstance(c, str):\n raise TypeError\n #dirty hack; also what SO decided on: http://stackoverflow.com/questions/2953746/python-parse-comma-separated-number-into-int\n return int(c.replace(\",\",\"\"))", "title": "" }, { "docid": "7fe982611ba6253820755700a5549a9d", "score": "0.68009466", "text": "def to_int(val):\n try:\n return int(str(val), 0)\n except:\n return None", "title": "" }, { "docid": "8dc05f2f6974c9338133791b4d0076c4", "score": "0.6796589", "text": "def int_from_str(value: str) -> Optional[int]:\n try:\n return int(value)\n except ValueError:\n return None", "title": "" }, { "docid": "9dfaf905004900864bc6592b92255746", "score": "0.67887557", "text": "def convertStr(s):\n try:\n ret = int(s)\n except ValueError:\n #Try float.\n ret = float(s)\n return ret", "title": "" }, { "docid": "6a6a573c1fd0eded763f255f5304f9fa", "score": "0.67868733", "text": "def idx_to_int(string_idx):\n if \"PS122\" in string_idx:\n str_parts = re.split(\"_|-\", string_idx)\n # Mosaic + Leg + week + id number\n return int(\"1\" + str_parts[1] + str_parts[2].zfill(2) + str_parts[3].zfill(3))\n\n elif \"S31H\" in string_idx:\n return int(\"2\" + string_idx[-4:].zfill(6))\n elif \"S43M\" in string_idx:\n return int(\"3\" + string_idx[-4:].zfill(6))\n elif \"S49M\" in string_idx:\n return int(\"4\" + string_idx[-4:].zfill(6))\n else:\n return 0", "title": "" }, { "docid": "f1c55c8bf9a4b5df1cc993a570c2a036", "score": "0.67755693", "text": "def str_to_int(date: str):\n tokens = date.strip().split('/')\n if len(tokens) != 3:\n return None\n return int(f'{tokens[-1]}{tokens[0]}{tokens[1]}')", "title": "" }, { "docid": "61ca234f99f67c4060502a484a74fec7", "score": "0.6763571", "text": "def _convert_num(s):\n try:\n return int(s)\n except ValueError:\n return float(s)", "title": "" }, { "docid": "7253d98131f950306d85cc9388966227", "score": "0.6757862", "text": "def string_to_int( octet_string ):\n long_int = 0\n for c in octet_string:\n long_int = 256 * long_int + c\n return long_int", "title": "" }, { "docid": "bf10b0423f2fb4d7d1ffbe56050b2c68", "score": "0.6725569", "text": "def num(s):\n try:\n return int(s)\n except ValueError:\n return float(s)", "title": "" }, { "docid": "e1b9ac2447210536db1054a61bf1adaa", "score": "0.672422", "text": "def to_int(param, param_name):\n try:\n param = int(param)\n except ValueError:\n raise ParamTypeError(param_name, 'Integer')\n return param", "title": "" }, { "docid": "44289e56cb3d64dd7520080234b5f966", "score": "0.6721272", "text": "def _int(self, stri):\n try:\n int(stri)\n return True\n except ValueError:\n return False", "title": "" }, { "docid": "16c9dd2aabac5a50f81b0d57682ea3a3", "score": "0.67182815", "text": "def to_int(value):\r\n try:\r\n result = int(value)\r\n except ValueError as e:\r\n result = None\r\n return result", "title": "" }, { "docid": "bd2195920f2019b147a911e403192854", "score": "0.67175627", "text": "def elisp_string_to_number(x):\n m = re.search(r'^([0-9]+)',x)\n if not m:\n return 0\n else:\n return m.group(1)", "title": "" }, { "docid": "ed29f59cc809ca87bd48b9668173320e", "score": "0.666843", "text": "def _convert_to_integer(xml, nsmap=None):\n as_string = _get_value_as_string(xml, nsmap)\n if as_string is None or as_string.upper() == 'NONE':\n return int()\n else:\n return int(as_string)", "title": "" }, { "docid": "41594e172d379c9235a27715ec518569", "score": "0.6665975", "text": "def issn2int(issn_str):\n\n pat = r'^\\d{4}-\\d{3}[\\dxX]$'\n p = compile(pat)\n if p.match(issn_str):\n res = 0\n check = map(lambda x: int(x), issn_str[:4] + issn_str[5:8])\n check_bit = int(issn_str[-1]) if is_int(issn_str[-1]) else issn_str[-1]\n for pp in zip(check, range(8, 1, -1)):\n res += pp[0] * pp[1]\n\n rem = (11 - res) % 11\n rem = 'X' if rem == 10 else rem\n\n if rem == check_bit:\n return int(issn_str[0:4] + issn_str[5:8])\n else:\n logging.error(' issn2int() : in issn {0} '\n 'check bit is corrupt'.format(issn_str))\n logging.error(' equal to {0}, should be {1}'.format(check_bit, rem))\n # raise ValueError(' issn2int(): invalid check digit'.format(check_bit, rem))\n return int(issn_str[0:4] + issn_str[5:8])\n\n else:\n logging.error(' issn2int() : issn {0} : does not match '\n 'the pattern {1}'.format(issn_str, pat))\n\n raise ValueError(' issn2int(): invalid issn string')", "title": "" }, { "docid": "923136ca6b4979981b058e24ef459198", "score": "0.6663333", "text": "def int_from_bytes(bytes):\n # TODO: python < 3.2 compat\n return int.from_bytes(bytes, byteorder='big')", "title": "" }, { "docid": "1a22fd79577cdaed4598d47fd4ed268a", "score": "0.6655058", "text": "def comma_num_str_to_int(n):\n try:\n num = int(n.replace(',', ''))\n except:\n num = 0\n return num", "title": "" }, { "docid": "9edc9e0f449327b9862dc3afa360b1c0", "score": "0.6646671", "text": "def string_or_int(string):\n if is_str_int(string):\n return int(string)\n else:\n return string", "title": "" }, { "docid": "cfae8bea4fe64193e913dda68d0aafd9", "score": "0.6645774", "text": "def v_suffix_to_int(v_string):\n digits = ''.join([x for x in v_string if x.isdigit()])\n return int(digits)", "title": "" }, { "docid": "0df28677688f6b23811d07f083490886", "score": "0.6639167", "text": "def ints(s):\n s = s.strip()\n if not s:\n return \"\"\n return int(s.replace(',', ''))", "title": "" }, { "docid": "5b9d72488261f268ccdb5761113f30db", "score": "0.66380066", "text": "def get_integer(number: str) -> int:\n return int(\"\".join(re.findall(r\"\\d+\", number)))", "title": "" }, { "docid": "0ab7a007eec8793c510c4d8a6a24150c", "score": "0.663136", "text": "def to_int(self, format=None):\n return int(self.to_str(format))", "title": "" }, { "docid": "967f7b44898087290535479a2e00feb4", "score": "0.6622285", "text": "def hexStrToInt(inputstr):\n\tvaltoreturn = 0\n\ttry:\n\t\tvaltoreturn = int(inputstr, 16)\n\texcept:\n\t\tvaltoreturn = 0\n\treturn valtoreturn", "title": "" }, { "docid": "18a7ecd793556ce4b1b7778d5a761dca", "score": "0.66181713", "text": "def bytes_to_int(value):\n\n\tassert type(value) == bytes\n\treturn int.from_bytes(value, 'big')", "title": "" }, { "docid": "08176954793e19c157a1a1e47ef8bae6", "score": "0.6615962", "text": "def to_number(string):\n try:\n res = int(string)\n return res\n except:\n pass\n try:\n res = float(string)\n return res\n except:\n return string", "title": "" }, { "docid": "4c4f2187ab293f7f0a13e8aa2fb8cb8d", "score": "0.6613608", "text": "def parse_int(number: str) -> int:\n match = NUMBER_FORMAT.match(number)\n if match is None:\n raise ValueError('invalid number')\n base = {\n '0b': 2,\n '0x': 16,\n None: 10\n }[match.group('prefix')]\n return int(match.group('number'), base=base)", "title": "" }, { "docid": "db8601a6b3bce3933a61753fd8072ff0", "score": "0.660818", "text": "def convert_base_eight_str_to_base_ten_int(base_eight_str):\n # Example: '111' -> 73.\n return int(base_eight_str, 8)", "title": "" }, { "docid": "c51a7f2d8527176073da23d0c6395223", "score": "0.66030544", "text": "def base_n_to_int(string, charset):\n if len(charset) > len(set(charset)):\n raise ValueError(\"character set contains duplicate characters\")\n base = len(charset)\n integer = 0\n for index, char in enumerate(reversed(string)):\n integer += charset.index(char) * (base ** index)\n return integer", "title": "" }, { "docid": "0f7112ba9435b9e35cc00439f9aeaac1", "score": "0.6579142", "text": "def integer(value):\n if value.startswith('#'):\n value = value.replace('#', '0b')\n\n if value.startswith('0b'):\n value = value.replace('x', '0')\n return int(value, 0)", "title": "" }, { "docid": "f1f5d7d3a8575a232052ab1d966335d5", "score": "0.655964", "text": "def getint(string):\n n = 1\n i = 0\n while i<=len(string):\n try: n = int(string[0:i+1])\n except ValueError: return n\n except: print \"Unexpected error!\"\n i = i + 1\n return n", "title": "" }, { "docid": "f97745b21ebd5d069f8804955d5efb33", "score": "0.65388584", "text": "def int_or_str(text):\n try:\n return int(text)\n except ValueError:\n return text", "title": "" }, { "docid": "073d6f52c3a54d7c852cace37107d421", "score": "0.65340453", "text": "def numify(x):\n return int(str(x).strip())", "title": "" }, { "docid": "d08397ef702663ebfb301435054065c4", "score": "0.6531244", "text": "def parse_int(number_as_string):\r\n try:\r\n if len(number_as_string) > 1:\r\n int(str(number_as_string)[:-1])\r\n else:\r\n if len(number_as_string) == 0:\r\n raise ValueError\r\n if len(number_as_string) == 1 and number_as_string.isdigit():\r\n return int(number_as_string)\r\n else:\r\n raise ValueError\r\n except ValueError:\r\n raise ValueError\r\n last_char = str(number_as_string)[-1]\r\n if last_char.isdigit():\r\n return int(number_as_string)\r\n elif last_char == 'k':\r\n return int(number_as_string[:-1]) * 1000\r\n elif last_char == 'm':\r\n return int(number_as_string[:-1]) * 1000000\r\n elif last_char == 'b':\r\n return int(number_as_string[:-1]) * 1000000000\r\n else:\r\n raise ValueError", "title": "" }, { "docid": "b74550fe08dfb0e34e40bf68e36f028a", "score": "0.6524076", "text": "def robust_int(s):\n x = robust_float(s)\n return x if x is None else int(x)", "title": "" }, { "docid": "14dca152bd204456aed831f68ed0cd9c", "score": "0.6516271", "text": "def _try_to_int(d):\n if isinstance(d, (str, np.int32, np.int64)):\n return int(d)\n else:\n return d", "title": "" }, { "docid": "6310077c5d22ec292781e8e3849f880b", "score": "0.65159124", "text": "def to_integer(tag):\n\n return int(tag.text.replace('\\xa0', '').replace(' ', ''))", "title": "" }, { "docid": "3c0b58f0d47de93d20cfb847a793f0d5", "score": "0.6513054", "text": "def int_or_str(self, text):\n try:\n return int(text)\n except ValueError:\n return text", "title": "" } ]
0efdbe0ce685754dada5ce85c74f8460
Measure the agreement and KL divergence between the predictions produced by model trained on exact augmentation objectives vs models trained on approximate objectives.
[ { "docid": "470839c6e196ef515bb7b98ff504f674", "score": "0.67365485", "text": "def agreement_kl_difference(augmentations):\n model_variants = {'kernel': lambda model: [model, exact_to_og_model(model), exact_to_1st_order_model(model),\n exact_to_2nd_order_no_1st_model(model), exact_to_2nd_order_model(model)],\n 'lenet': lambda model: [model, exact_to_og_model(model), exact_to_1st_order_model(model),\n exact_to_2nd_order_no_1st_model(model)] +\n [exact_to_2nd_order_model_layer_avg(model, layer_to_avg) for layer_to_avg in [4, 3, 2, 1, 0]]}\n\n for model_name in ['kernel', 'lenet']:\n for augmentation in augmentations:\n for seed in range(n_trials):\n print(f'Seed: {seed}')\n torch.manual_seed(n_trials + seed)\n loader = loader_from_dataset(augmentation.dataset)\n model = model_factories[model_name]()\n models = model_variants[model_name](model)\n for model in models:\n model.to(device)\n optimizers = [sgd_opt_from_model(model) for model in models]\n for model in models:\n model.train()\n train_agreement, valid_agreement, valid_acc, valid_kl = [], [], [], []\n for epoch in range(sgd_n_epochs):\n print(f'Train Epoch: {epoch}')\n train_agreement_epoch = train_models_compute_agreement(loader, models, optimizers)\n train_agreement.append(np.array(train_agreement_epoch).mean(axis=0))\n # Agreement and KL on validation set\n valid_agreement_epoch, valid_kl_epoch, valid_acc_epoch = agreement_kl_accuracy(valid_loader, models)\n valid_agreement.append(np.array(valid_agreement_epoch).mean(axis=0))\n valid_acc.append(np.array(valid_acc_epoch).mean(axis=0))\n valid_kl.append(np.array(valid_kl_epoch).mean(axis=0))\n train_agreement = np.array(train_agreement).T\n valid_agreement = np.array(valid_agreement).T\n valid_acc = np.array(valid_acc).T\n valid_kl = np.array(valid_kl).T\n np.savez(f'saved/train_valid_agreement_kl_{model_name}_{augmentation.name}_{seed}.npz',\n train_agreement=train_agreement, valid_agreement=valid_agreement, valid_acc=valid_acc, valid_kl=valid_kl)", "title": "" } ]
[ { "docid": "c322dbaec2b331c7a9ddb961765bb690", "score": "0.64305675", "text": "def compute_accuray(predictions, y):\n\treturn np.mean(np.equal(predictions.detach().numpy(), y.numpy()))", "title": "" }, { "docid": "ba7d945e9ae06d3e616a6c0fd6cc9414", "score": "0.63229054", "text": "def prediction_comparisons():\n experiments = [Experiment.ALL, Experiment.CURR]\n # Operate on cached data/models only.\n\n experiment_data = {}\n experiment_models = {}\n\n for experiment in experiments:\n get_data(experiment, cache_check=True)\n get_experiment_split_data.check_in_store(experiment)\n X_train, X_test, y_train, y_test = get_experiment_split_data(experiment)\n get_model(X_train, y_train, cache_check=True)\n\n experiment_data[experiment] = get_endog_exog_mask(experiment)\n experiment_models[experiment] = get_model(X_train, y_train)\n\n # Ensure masks are aligned.\n check_master_masks(*(data[2] for data in experiment_data.values()))\n\n master_mask = next(iter(experiment_data.values()))[2]\n\n # Record predictions and errors.\n experiment_predictions = {}\n experiment_errors = {}\n map_experiment_predictions = {}\n map_experiment_errors = {}\n\n for experiment in experiments:\n X_train, X_test, y_train, y_test = get_experiment_split_data(experiment)\n predicted_test = threading_get_model_predict(\n X_train=X_train,\n y_train=y_train,\n predict_X=X_test,\n )\n\n print(\"Experiment:\", experiment.name)\n print(\"mean observed test:\", np.mean(y_test.values))\n print(\"mean predicted test:\", np.mean(predicted_test))\n print(\"lowest observed test:\", np.min(y_test.values))\n print(\n \"fraction of times this occurs:\",\n np.sum(y_test.values == np.min(y_test.values)) / y_test.values.size,\n )\n print(\"lowest test prediction:\", np.min(predicted_test))\n\n experiment_predictions[experiment] = predicted_test\n experiment_errors[experiment] = y_test.values - predicted_test\n\n map_experiment_predictions[experiment] = get_mm_data(\n experiment_predictions[experiment], master_mask, kind=\"val\"\n )\n map_experiment_errors[experiment] = get_mm_data(\n experiment_errors[experiment], master_mask, kind=\"val\"\n )\n\n error_mag_diff = np.abs(map_experiment_errors[experiments[1]]) - np.abs(\n map_experiment_errors[experiments[0]]\n )\n\n y_test = get_experiment_split_data(experiment)[3]\n\n rel_error_mag_diff = np.mean(error_mag_diff, axis=0) / np.mean(\n get_mm_data(y_test.values, master_mask, kind=\"val\"), axis=0\n )\n all_rel = get_unmasked(rel_error_mag_diff)\n\n print(f\"% >0: {100 * np.sum(all_rel > 0) / all_rel.size:0.1f}\")\n print(f\"% <0: {100 * np.sum(all_rel < 0) / all_rel.size:0.1f}\")\n\n fig, ax, cbar = disc_cube_plot(\n dummy_lat_lon_cube(rel_error_mag_diff),\n bin_edges=(-0.5, 0, 0.5),\n extend=\"both\",\n cmap=\"PiYG\",\n cmap_midpoint=0,\n cmap_symmetric=False,\n cbar_label=f\"<|Err({experiments[1].name})| - |Err({experiments[0].name})|> / <Ob.>\",\n cbar_shrink=0.3,\n cbar_aspect=15,\n cbar_extendfrac=0.1,\n cbar_pad=0.02,\n cbar_format=None,\n **get_aux0_aux1_kwargs(y_test, master_mask),\n loc=(0.79, 0.14),\n height=0.05,\n aspect=1.25,\n spacing=0.06 * 0.2,\n )\n cbar.ax.yaxis.label.set_size(7)\n\n map_figure_saver.save_figure(\n fig, f\"rel_error_mag_diff_{'_'.join(map(attrgetter('name'), experiments))}\"\n )", "title": "" }, { "docid": "1bcb780c1d8ed46c4bd016318e738855", "score": "0.6322648", "text": "def predict(self):\r\n total = 0\r\n for clf in self.__clf_and_test.keys():\r\n total += clf.predict(self.__clf_and_test[clf])\r\n\r\n return total/len(self.__clf_and_test)", "title": "" }, { "docid": "f79f6b2612ef3537e8a9c0cac5bf5f15", "score": "0.62786305", "text": "def calculate_metrics(results):\n preds = results[\"predictions\"]\n targets = results[\"targets\"]\n entropy = results[\"entropies\"]\n p = preds[range(preds.shape[0]), targets.astype(np.int)]\n ll = np.log(p + 1e-6)\n nll = -np.mean(ll)\n acc = np.mean(preds.argmax(-1) == targets)\n\n\n # calculate expected calibration error\n # use the maximum probability as probability of positive\n # and calculate prediction against a binary correct/incorrect problem\n maxprob = preds.max(-1)\n correct = preds.argmax(-1) == targets\n prob_true, prob_pred = calibration_curve(correct, maxprob, n_bins=5) # 20 used in SWAG paper\n ece = np.mean(np.abs(prob_true - prob_pred))\n return nll, acc, ece", "title": "" }, { "docid": "8fc9cd2485feddd9814bb94c222c0986", "score": "0.6270073", "text": "def evaluate(self):\n self._gender_accuracy = self._gender_correct_num / self._num_images\n self._age_accuracy = self._age_correct_num / self._num_images\n self._age_adience_accuracy = self._age_adience_correct_num / self._num_images", "title": "" }, { "docid": "05695f93c19b9459c603a54f8ce7747d", "score": "0.6195604", "text": "def model_assessment(model,x_train,x_test,y_train,y_test):\n\n model.fit(x_train,y_train)\n\n yPredict = model.predict(x_test)\n acc = accuracy_score(yPredict,y_test)\n numMistake = (1 - acc) * len(y_test)\n return numMistake", "title": "" }, { "docid": "a1d787fa1b7908bbde88a639b82bdb01", "score": "0.6183368", "text": "def compute(self):\n return float(self.total_correct_predictions / self.total_num_tokens)", "title": "" }, { "docid": "c784f7c8682b3d92f3b2487ce0bb5eb0", "score": "0.6096812", "text": "def getMetrics(predictions, ground):\r\n # [0.1, 0.3 , 0.2, 0.1] -> [0, 1, 0, 0]\r\n discretePredictions = to_categorical(predictions.argmax(axis=1))\r\n\r\n truePositives = np.sum(discretePredictions*ground, axis=0)\r\n falsePositives = np.sum(np.clip(discretePredictions - ground, 0, 1), axis=0)\r\n falseNegatives = np.sum(np.clip(ground-discretePredictions, 0, 1), axis=0)\r\n\r\n print(\"True Positives per class : \", truePositives)\r\n print(\"False Positives per class : \", falsePositives)\r\n print(\"False Negatives per class : \", falseNegatives)\r\n\r\n # ------------- Macro level calculation ---------------\r\n macroPrecision = 0\r\n macroRecall = 0\r\n # We ignore the \"Others\" class during the calculation of Precision, Recall and F1\r\n for c in range(1, NUM_CLASSES):\r\n precision = truePositives[c] / (truePositives[c] + falsePositives[c])\r\n macroPrecision += precision\r\n recall = truePositives[c] / (truePositives[c] + falseNegatives[c])\r\n macroRecall += recall\r\n f1 = ( 2 * recall * precision ) / (precision + recall) if (precision+recall) > 0 else 0\r\n print(\"Class %s : Precision : %.3f, Recall : %.3f, F1 : %.3f\" % (label2emotion[c], precision, recall, f1))\r\n\r\n macroPrecision /= 3\r\n macroRecall /= 3\r\n macroF1 = (2 * macroRecall * macroPrecision ) / (macroPrecision + macroRecall) if (macroPrecision+macroRecall) > 0 else 0\r\n print(\"Ignoring the Others class, Macro Precision : %.4f, Macro Recall : %.4f, Macro F1 : %.4f\" % (macroPrecision, macroRecall, macroF1))\r\n\r\n # ------------- Micro level calculation ---------------\r\n truePositives = truePositives[1:].sum()\r\n falsePositives = falsePositives[1:].sum()\r\n falseNegatives = falseNegatives[1:].sum()\r\n\r\n print(\"Ignoring the Others class, Micro TP : %d, FP : %d, FN : %d\" % (truePositives, falsePositives, falseNegatives))\r\n\r\n microPrecision = truePositives / (truePositives + falsePositives)\r\n microRecall = truePositives / (truePositives + falseNegatives)\r\n\r\n microF1 = ( 2 * microRecall * microPrecision ) / (microPrecision + microRecall) if (microPrecision+microRecall) > 0 else 0\r\n # -----------------------------------------------------\r\n\r\n predictions = predictions.argmax(axis=1)\r\n ground = ground.argmax(axis=1)\r\n accuracy = np.mean(predictions==ground)\r\n\r\n print(\"Accuracy : %.4f, Micro Precision : %.4f, Micro Recall : %.4f, Micro F1 : %.4f\" % (accuracy, microPrecision, microRecall, microF1))\r\n return accuracy, microPrecision, microRecall, microF1", "title": "" }, { "docid": "163c1a00740877fbb29e5948b9172b53", "score": "0.6069938", "text": "def evaluate_model(y, pred_probs, train_times, test_times, accuracies, classifier):\n\n levels = ['Precision at .05', 'Precision at .10', 'Precision at .2', 'Precision at .25', 'Precision at .5', 'Precision at .75', 'Precision at .85']\n recalls = ['Recall at .05', 'Recall at .10', 'Recall at .20', 'Recall at .25', 'Recall at .5', 'Recall at .75', 'Recall at .85']\n amts= [.05, .1, .2, .25, .5, .75, .85]\n res = {}\n y_range = range(0, len(y))\n res['classifier'] = classifier\n for x in range(0, len(amts)):\n #print('check 1')\n thresh = amts[x]\n #pdb.set_trace()\n preds = [np.asarray([1 if j >= thresh else 0 for j in z]) for z in pred_probs]\n prec = [metrics.precision_score(y[j], preds[j]) for j in y_range]\n rec = [metrics.recall_score(y[j], preds[j]) for j in y_range]\n prec_std = np.std(prec)\n rec_std = np.std(rec)\n #print('check 2')\n f1_score = [2*(prec[j]*rec[j])/(prec[j]+rec[j]) for j in y_range]\n f1_std = np.std(f1_score)\n\n prec_m = np.mean(prec)\n rec_m = np.mean(rec)\n f1_m = np.mean(f1_score)\n res[levels[x]] = str(prec_m) + ' (' + str(prec_std) + ')'\n res[recalls[x]] = str(rec_m) + ' (' + str(rec_std) + ')'\n res['f1 at ' + str(thresh)] = str(f1_m) + ' (' + str(f1_std) + ')'\n\n auc = [metrics.roc_auc_score(y[j], pred_probs[j]) for j in y_range]\n auc_std = np.std(auc)\n auc_m = np.mean(auc)\n train_m = np.mean(train_times)\n train_std = np.std(train_times)\n test_m = np.mean(test_times)\n test_std = np.std(test_times)\n acc_m = np.mean(accuracies)\n acc_std = np.std(accuracies)\n\n res['AUC'] = str(auc_m) + ' (' + str(auc_std) + ')'\n res['train_time (sec)'] = str(train_m) + ' (' + str(train_std) + ')'\n res['test_time (sec)'] = str(test_m) + ' (' + str(test_std) + ')'\n res['Accuracy'] = str(acc_m) + ' (' + str(acc_std) + ')' #mean_std_to_string(acc_m, acc_std)\n\n return res", "title": "" }, { "docid": "bf5617070d9b1440ed3e01fa68c19ed4", "score": "0.6054789", "text": "def evaluate():\n\n with open(TEST_SET, 'r') as fr_test, open(ENTROPY_DIC, 'rb') as fr_ent_dic, \\\n open(MAX_FREQ_DIC, 'rb') as fr_max_freq_dic, \\\n open(GLOVE, 'rb') as fr_vectors, \\\n open(SVM_DIC, 'rb') as fr_svm_dic:\n\n svm_dic = pickle.load(fr_svm_dic)\n max_freq_dic = pickle.load(fr_max_freq_dic)\n ent_dic = pickle.load(fr_ent_dic)\n glove_model = pickle.load(fr_vectors)\n\n count = 0\n correct = 0\n\n #plt.hist(list(ent_dic.values()), bins=20)\n # plt.savefig('histogram')\n #plt.hist([v for v in ent_dic.values() if v != 0], bins=20)\n # plt.savefig('histogram2')\n\n checked_words = set([])\n result_dic = {}\n total_dic = {}\n correct_dic = {}\n nword = top_nword(ent_dic, NWORD)\n for test_line in fr_test:\n test_line = test_line.replace(\"\\n\", \"\")\n test_tokens = re.split('[ ]', test_line)\n\n for index, token in enumerate(test_tokens):\n key = re.sub(r'__[\\d][\\d]', '', token)\n\n if not key in nword:\n continue\n\n if TKN_PTN.match(token):\n # if ent_dic.get(key, -1) >= ENTROPY_THRESHOLD:\n if ent_dic.get(key, -1) < ENTROPY_THRESHOLD:\n continue\n total_dic[key] = total_dic.get(key, 0)+1\n count = count + 1\n checked_words.add(key)\n answer_word = predict(re.split(\n '[ ]', re.sub(r'__[\\d][\\d]', '', test_line)), \\\n index, STRATEGY_FOR_HIGH, max_freq_dic, svm_dic, glove_model)\n if token == answer_word:\n correct = correct + 1\n correct_dic[key] = correct_dic.get(key, 0)+1\n\n else:\n if MFS: # if MFS option activated\n count = count + 1\n checked_words.add(key)\n answer_word = max_freq_dic.get(key, \"UNKNOWN\")\n if token == answer_word:\n correct = correct + 1\n\n with open(RESULT_FILE, 'wb') as fw_result:\n acc_result = {}\n for target_word, target_total in total_dic.items():\n target_correct = correct_dic.get(target_word, 0)\n result_dic[target_word] = (target_correct,\n target_total,\n (target_correct/target_total)*100)\n\n result_dic['TOTAL'] = (len(checked_words),\n correct,\n count,\n (correct/count)*100)\n result_dic['META'] = META\n pickle.dump(result_dic, fw_result)\n\n print(\"The number of target words : \", len(checked_words))\n print(\"The number of homograph eo-jeul : \", count)\n print(\"The number of correct answer : \", correct)\n print(\"Accuracy : \", (correct / count) * 100)", "title": "" }, { "docid": "3cf7dda14b6ba99d6538364c1de4a308", "score": "0.6036536", "text": "def evaluate(labels, predictions):\n total = 0\n sensitivity = 0\n specificity = 0\n sens_tot = 0\n spec_tot = 0\n for actual, predicted in zip(labels, predictions):\n\n # Sum actual totals for each sensitivity and specificity\n if actual == 1:\n sens_tot += 1\n elif actual == 0:\n spec_tot += 1\n\n # Calculate correctly predicted labels\n if actual == predicted == float(1):\n sensitivity += 1\n \n elif actual == predicted == float(0):\n specificity += 1\n # else:\n # print(f'No match: {actual}, {predicted}')\n\n sensitivity /= sens_tot\n specificity /= spec_tot\n\n # print(sensitivity,'\\n', specificity)\n\n return (sensitivity, specificity)", "title": "" }, { "docid": "c4eb82ac187b7fc7d583aff3d436a8b2", "score": "0.6034393", "text": "def evaluate(self):\n self.val_marginals = self.vf.val_marginals\n self.train_marginals = self.vf.train_marginals\n\n def calculate_accuracy(marginals, b, ground):\n total = np.shape(np.where(marginals != 0.5))[1]\n labels = np.sign(2*(marginals - 0.5))\n return np.sum(labels == ground)/float(total)\n \n def calculate_coverage(marginals, b, ground):\n total = np.shape(np.where(marginals != 0.5))[1]\n labels = np.sign(2*(marginals - 0.5))\n return total/float(len(labels))\n\n \n self.val_accuracy = calculate_accuracy(self.val_marginals, self.b, self.val_ground)\n self.train_accuracy = calculate_accuracy(self.train_marginals, self.b, self.train_ground)\n self.val_coverage = calculate_coverage(self.val_marginals, self.b, self.val_ground)\n self.train_coverage = calculate_coverage(self.train_marginals, self.b, self.train_ground)\n return self.val_accuracy, self.train_accuracy, self.val_coverage, self.train_coverage", "title": "" }, { "docid": "db259fbcde5058bdd9ad441fbc770ad8", "score": "0.60313016", "text": "def get_individual_accuracies(my_model,size,num,run,verbose,pkudir):\n\n directory_to_use = pkudir\n\n test_datagen = ImageDataGenerator(rescale=1./255)\n test_dir = os.path.join(directory_to_use, 'target')\n\n my_model.summary()\n\n test_generator = test_datagen.flow_from_directory(test_dir,target_size=(159, 75),batch_size=1,class_mode='categorical')\n\n filenames = test_generator.filenames\n\n predict_generator = test_datagen.flow_from_directory(\n test_dir,\n target_size=(159, 75),\n batch_size=1,\n class_mode=None, # only data, no labels\n shuffle=False) # keep data in same order as labels\n\n\n\n\n predict_generator.reset()\n predictions = my_model.predict_generator(predict_generator,verbose=1,steps=size )\n predictions = np.argmax(predictions, axis=-1)\n\n filenames = predict_generator.filenames\n filenames2 = []\n\n for f in filenames: filenames2.append(f.split(\"/\")[1])\n\n\n true_positives0deg = 0\n true_positives45deg = 0\n true_positives_45deg = 0\n true_positives_90deg = 0\n true_positives90deg = 0\n\n\n label_map = list(predict_generator.class_indices.keys())\n label_map_int=[]\n #label_map = dict((v,k) for k,v in label_map.items())\n\n for i in range(0,len(label_map)):\n\n action_str = str(label_map[i])\n values = action_str.split('_')\n label_map_int.append(int(values[0]))\n\n\n\n\n y_pred = predictions\t#predicted labels\n y_true = predict_generator.classes\t#true labels\n\n y_ground_truth = []\n y_prediction = []\n\n #total = 7151 #51\n #total = 1502 #11\n for i in range(0,len(y_pred)):\n y_prediction.append( label_map_int[y_pred[i]] )\n\n if verbose: print(len(y_pred),len(y_true))\n for i in range(0,len(y_true)):\n y_ground_truth.append( label_map_int[y_true[i]])\n cc=0\n for i in range(0,len(y_prediction)):\n if y_prediction[i]==y_ground_truth[i]:\n rot = kind_of_rotation(filenames2[i])\n\n if rot==2: cc+=1\n if(rot == 2 or rot == 1 or rot == 3 ): true_positives0deg+=1\n elif(rot == 6 or rot==5 or rot==15): true_positives45deg+=1\n elif(rot == 7 or rot==9 or rot==12): true_positives_45deg+=1\n elif(rot==14 or rot==10 or rot==4): true_positives90deg+=1\n elif(rot == 11 or rot ==8 or rot==13): true_positives_90deg+=1\n if verbose:\n print(cc)\n print(true_positives0deg/total)\n print(true_positives45deg/total)\n print(true_positives_45deg/total)\n print(true_positives90deg/total)\n print(true_positives_90deg/total)\n print(true_positives0deg)\n print(true_positives45deg)\n print(true_positives_45deg)\n print(true_positives90deg)\n print(true_positives_90deg)\n\n string = \"log_multi_method_version51_crossubject_crossview_\" +str(run)\n string11=\"log_multi_method_version11_crossubject_crossview_\" +str(run)\n\n if num == 51 : results = open(string,\"a\")\n else: results = open(string11,\"a\")\n\n results.write(\"---- case start -----\\n\")\n results.write(\"percentages for the model 1st test are %f \\n\" % (true_positives0deg/total))\n results.write(\"percentages for the model 2nd test are %f \\n\" % (true_positives45deg/total))\n results.write(\"percentages for the model 3d test are %f \\n\" % (true_positives_45deg/total))\n results.write(\"percentages for the model 4th test are %f \\n\" % (true_positives90deg/total))\n results.write(\"percentages for the model 5th test are %f \\n\" % (true_positives_90deg/total))\n results.write(\"---- case end -----\\n\")\n\n results.close()", "title": "" }, { "docid": "0e5d3c864040491a68367b9e1dc09382", "score": "0.5991827", "text": "def evaluate_model(test_labels, predictions):\n \n accuracy = accuracy_score(test_labels, predictions)\n hamming = hamming_loss(test_labels, predictions)\n precision = precision_score(test_labels, predictions, average='micro')\n recall = recall_score(test_labels, predictions, average='micro')\n f1 = f1_score(test_labels, predictions, average='micro')\n print(\"Micro-average quality numbers\")\n print(\"Precision: {:.4f}, Recall: {:.4f}, F1-measure: {:.4f}\".format(precision, recall, f1))\n\n precision = precision_score(test_labels, predictions, average='macro')\n recall = recall_score(test_labels, predictions, average='macro')\n f1 = f1_score(test_labels, predictions, average='macro')\n\n print(\"Macro-average quality numbers\")\n print(\"Precision: {:.4f}, Recall: {:.4f}, F1-measure: {:.4f}\".format(precision, recall, f1))\n \n print(\"Global measures\")\n print(\"Accuracy: {:.4f}, Hamming Loss: {:.4f}\".format(accuracy, hamming))", "title": "" }, { "docid": "e44d3c1b11f89a4abf2e29a5321617a4", "score": "0.5988864", "text": "def _calculate_detection(self):\n return self.incubation + self.onsetTodetection", "title": "" }, { "docid": "a6cee2307991467a1e9115f25c85d32d", "score": "0.59848684", "text": "def accuracy(self):\n ...", "title": "" }, { "docid": "c12dad49e09b9a2f81abc746ef50c120", "score": "0.5969456", "text": "def precision(y_true, y_pred):\n true_positives = K.sum(K.round(K.clip(y_true * y_pred, 0, 1)))\n predicted_positives = K.sum(K.round(K.clip(y_pred, 0, 1)))\n precision = true_positives / (predicted_positives + K.epsilon())\n return precision", "title": "" }, { "docid": "653bca66f08f85e55c405be0d9f0ecc7", "score": "0.5954378", "text": "def experiment(ww_train, rw_train, ww_test, rw_test):\n ww_centroid = compute_centroid(ww_train)\n rw_centroid = compute_centroid(rw_train)\n \n correct_int, incorrect_int = 0, 0\n for datum in ww_test:\n if euclidean_distance(datum, ww_centroid) <= euclidean_distance(datum, rw_centroid):\n correct_int += 1\n else:\n incorrect_int += 1\n for datum in rw_test:\n if euclidean_distance(datum, rw_centroid) <= euclidean_distance(datum, ww_centroid):\n correct_int += 1\n else:\n incorrect_int += 1\n \n total_int = correct_int + incorrect_int\n accuracy_float = correct_int / total_int\n approx_str = \" approximately\"\n rounded_accuracy_float = round(accuracy_float, 6)\n if accuracy_float - rounded_accuracy_float <= 1e-10:\n approx_str = \"\"\n \n print(\"The model made {} predictions.\".format(correct_int + incorrect_int))\n print(\"{} of those predictions were correct, and {} were incorrect.\".format(correct_int, incorrect_int))\n print(\"Thus, the model has an accuracy of{} {:0.4f}%.\".format(approx_str, accuracy_float * 100)) \n return accuracy_float", "title": "" }, { "docid": "b588f7135be8aded42fcf99787517942", "score": "0.5932604", "text": "def analyze_performance(model, model_name, get_examplars=True, include_validation=False):\n print(\"Loading test data\")\n data_dir = os.path.expanduser('~/data/div_detect')\n with h5py.File('{}/test_recs.h5'.format(data_dir), 'r') as test_file:\n test_data = test_file['records'][:]\n test_labels = test_file['labels'][:]\n test_paths = list(test_file['record_paths'][:])\n\n if include_validation:\n print(\"Loading validation data as well\")\n with h5py.File('{}/valid_recs.h5'.format(data_dir), 'r') as valid_file:\n test_data = np.append(test_data, valid_file['records'][:], axis=0)\n test_labels = np.append(test_labels, valid_file['labels'][:], axis=0)\n test_paths += list(valid_file['record_paths'][:])\n\n print(\"Computing predicted labels\")\n raw_predictions = model.predict(test_data, verbose=1)\n class_predictions = (raw_predictions > 0.5).astype(int)\n correct_predictions = class_predictions == test_labels\n test_accuracy = np.sum(correct_predictions) / float(len(correct_predictions))\n n_pos_samples = np.sum(test_labels)\n n_neg_samples = np.sum(np.logical_not(test_labels))\n\n print(\"Achieved {} test set accuracy\".format(test_accuracy))\n print(\"Test set contains {} positive examples and {} negative examples\".format(\n n_pos_samples,\n n_neg_samples\n ))\n\n print(\"Computing precision recall curve\")\n precision, recall, thresholds = precision_recall_curve(test_labels.ravel(), raw_predictions.ravel(), pos_label=1)\n precision_recall_dict = {\n 'precision': precision,\n 'recall': recall,\n 'thresholds': thresholds\n }\n\n print(\"Computing ROC curve\")\n false_pos_rate, true_pos_rate, thresholds = roc_curve(test_labels.ravel(), raw_predictions.ravel(), pos_label=1)\n roc_dict = {\n 'false_pos_rate': false_pos_rate,\n 'true_pos_rate': true_pos_rate,\n 'thresholds': thresholds\n }\n\n if get_examplars:\n print(\"Selecting examplars\")\n examplar_dict = select_examplars(test_data, test_labels, test_paths,\n raw_predictions, model_name, save=True)\n return precision_recall_dict, roc_dict, raw_predictions, examplar_dict\n else:\n return precision_recall_dict, roc_dict, raw_predictions", "title": "" }, { "docid": "5705fa8ffac623db251f2337955f82ea", "score": "0.59296477", "text": "def accuracy_on_true_objective(augmentations):\n for model_name in ['kernel', 'lenet']:\n for augmentation in augmentations:\n for seed in range(n_trials):\n print(f'Seed: {seed}')\n torch.manual_seed(seed)\n model = model_factories[model_name]().to(device)\n optimizer = sgd_opt_from_model(model)\n loader = loader_from_dataset(augmentation.dataset)\n train_loss, train_acc, valid_acc = train_all_epochs(loader, valid_loader, model, optimizer, sgd_n_epochs)\n train_loss, train_acc, valid_acc = np.array(train_loss), np.array(train_acc), np.array(valid_acc)\n np.savez(f'saved/train_valid_acc_{model_name}_{augmentation.name}_{seed}.npz',\n train_loss=train_loss, train_acc=train_acc, valid_acc=valid_acc)", "title": "" }, { "docid": "7a34bd4da06797507e025cbd6edd19f6", "score": "0.59292406", "text": "def evaluate(model, test_features, test_labels):\n predictions = model.predict(test_features)\n errors = abs(predictions - test_labels)\n mape = 100 * np.mean(errors / test_labels)\n accuracy = accuracy_score(test_labels, predictions)\n print('Model Performance')\n print('Average Error: {:0.4f}'.format(np.mean(errors)))\n print('Accuracy = {:0.2f}%.'.format(accuracy))\n return accuracy", "title": "" }, { "docid": "e079b26607497e1cd4445725cd1916c3", "score": "0.5925154", "text": "def evaluate(labels, predictions):\n sensitivity = float(0)\n specificity = float(0)\n if labels.count(1)==0:\n sys.exit(\"No positve label in true labels\")\n if labels.count(0)==0:\n sys.exit(\"No negative label in true labels\")\n\n\n common_ones = [1 if i==j and j==1 else 0 for i, j in zip(labels,predictions)]\n common_ones_count=common_ones.count(1)\n labels_ones_count=labels.count(1)\n sensitivity=common_ones_count/labels_ones_count\n common_zeros=[1 if i==j and j==0 else 0 for i,j in zip(labels,predictions)]\n common_zeros_count=common_zeros.count(1)\n labels_zeros_count=labels.count(0)\n specificity=common_zeros_count/labels_zeros_count\n\n return sensitivity, specificity\n\n\n #raise NotImplementedError", "title": "" }, { "docid": "ff787089bfce66638ccf73e59e3031a7", "score": "0.5918919", "text": "def __call__(self, y_true, y_pred) -> float:\n prec = _step_detection_precision(y_true, y_pred)\n rec = _step_detection_recall(y_true, y_pred)\n\n if prec + rec < 1e-6:\n return 0.\n return (2 * prec * rec) / (prec + rec)", "title": "" }, { "docid": "5786ca8a892cddbdef36d7f42a3cec2f", "score": "0.5908679", "text": "def mean_kl_divergence(self, model):\n observations_tensor = torch.cat(\n [Variable(Tensor(observation)).unsqueeze(0) for observation in self.observations])\n actprob = model(observations_tensor).detach() + 1e-8\n old_actprob = self.policy_model(observations_tensor)\n return torch.sum(old_actprob * torch.log(old_actprob / actprob), 1).mean()", "title": "" }, { "docid": "2c7bb18ad3f1bdf1ab82e51e152e5fb9", "score": "0.58990055", "text": "def test_model(examples, words, senses, features):\n\tnum_correct = 0.0 \n\tsoft_score = 0.0 \n\ttotal = len(examples) \n\texample_num = 0\n\n\tfor example in examples:\n\t\texample_num += 1\n\t\tprediction, confidence = predict(example, words, senses, features, 0)\n\t\t# prediction, confidence = predict_bassline(example, words, senses, features, 0)\n\n\t\tif confidence == -1: \n\t\t\t# test example wasn't found in trained model \n\t\t\ttotal -= 1\n\t\telse: \n\t\t\t# soft-scoring approach - TODO: do we divide??? \n\t\t\tif prediction == example.sense_id:\n\t\t\t\tsoft_score += confidence\n\t\t\telse:\n\t\t\t\tsoft_score -= confidence\n\t\t\t\n\t\t\t# regular accuracy \n\t\t\tnum_correct += float(prediction == example.sense_id)\n\t\n\taccuracy = float(num_correct) / total\n\treturn accuracy, soft_score / total", "title": "" }, { "docid": "c2c4c108b248913833d1c1df6676cb6e", "score": "0.5896335", "text": "def evaluateModel(gt_result, model_result):\n\n gt_images = list(gt_result.keys())\n\n tp = tn = fp = fn = 0\n\n for img in gt_images:\n\n gt_association = gt_result[img]\n model_association = model_result[img]\n gt_list = []\n model_list = []\n\n if gt_association:\n for i in range(len(gt_association)):\n gt_list.append(gt_association[i][0])\n\n for j in range(len(model_association)):\n model_list.append(model_association[j][0])\n\n gt_copy = gt_list.copy()\n model_copy = model_list.copy()\n\n for association in gt_list:\n if association in model_list:\n if img != \"NA\":\n tp += 1\n else:\n tn += 1\n gt_copy.remove(association)\n model_copy.remove(association)\n\n else:\n fn += 1\n\n for found in model_copy:\n if found not in gt_copy:\n fp += 1\n\n elif not model_association:\n tn += 1\n\n else:\n fp += len(model_association)\n\n precision = tp / (tp + fp)\n\n recall = tp / (tp + fn)\n\n f1_score = 2* ((precision * recall) / (precision + recall))\n\n print(\"Precision: \", precision)\n print(\"recall: \", recall)\n print(\"F1 Score:\", f1_score)\n\n confusion_matrix = np.array(([tp, fp], [fn, tn]))\n\n print(\"Confusion Matrix:\", confusion_matrix)", "title": "" }, { "docid": "0d6a07112ba5fca002e529ffb7bf8add", "score": "0.5884843", "text": "def evaluate(model, test_data):\n total_frames = 0\n total_correct_frames = 0\n arco_sum = 0\n num_songs = len(test_data['labels'])\n error_matrix = np.zeros((NUM_CHORDS, NUM_CHORDS)) # correct -> prediction\n for i in range(num_songs): # for each song\n label = test_data['labels'][i]\n annotated = test_data['annotated_chromas'][i]\n chromagram = np.concatenate(test_data['annotated_chromas'][i], axis=1)\n stretched_label = match_frame(label, annotated)\n prediction = model.predict(chromagram.T).tolist()\n num_frames = chromagram.shape[1]\n total_frames += num_frames\n curr_song_correct = 0\n for i in range(len(prediction)):\n if (int(prediction[i]) == int(stretched_label[i])):\n curr_song_correct += 1\n total_correct_frames += 1\n else:\n error_matrix[int(stretched_label[i]), int(prediction[i])] += 1\n arco_sum += curr_song_correct/num_frames\n result = {}\n print(\"Correct: {}/{} = {}\".format(total_correct_frames, total_frames, total_correct_frames/total_frames))\n result['TRCO'] = total_correct_frames/total_frames\n result['ARCO'] = arco_sum/num_songs\n result['err_matrix'] = error_matrix\n print('TRCO: {}\\nARCO: {}'.format(result['TRCO'], result['ARCO']))\n return result", "title": "" }, { "docid": "b87bac8274f21087ccfbc3a49e5e2efa", "score": "0.58501995", "text": "def precision_m(y_true, y_pred):\n\n true_positives = K.sum(K.round(K.clip(y_true * y_pred, 0, 1)))\n predicted_positives = K.sum(K.round(K.clip(y_pred, 0, 1)))\n precision = true_positives / (predicted_positives + K.epsilon())\n return precision", "title": "" }, { "docid": "8a621b7abfcd13f5465cbc7d04e68b57", "score": "0.5841447", "text": "def predict(self, X):\n\n x_train = np.array(self.x)\n y_train = np.array(self.y)\n y_train = np.reshape(y_train, (len(y_train), 1))\n x_test = np.array(X)\n\n estimated_y = np.zeros((x_test.shape[0], self.n_components))\n distance_matrix = cdist(x_train, x_test, 'euclidean')\n for test_sample_number in range(x_test.shape[0]):\n query_x_test = x_test[test_sample_number, :]\n query_x_test = np.reshape(query_x_test, (1, len(query_x_test)))\n distance = distance_matrix[:, test_sample_number]\n similarity = np.diag(np.exp(-distance / distance.std(ddof=1) / self.lambda_in_similarity))\n # similarity_matrix = np.diag(similarity)\n\n y_w = y_train.T.dot(np.diag(similarity)) / similarity.sum()\n x_w = np.reshape(x_train.T.dot(np.diag(similarity)) / similarity.sum(), (1, x_train.shape[1]))\n centered_y = y_train - y_w\n centered_x = x_train - np.ones((x_train.shape[0], 1)).dot(x_w)\n centered_query_x_test = query_x_test - x_w\n estimated_y[test_sample_number, :] += y_w\n for component_number in range(self.n_components):\n w_a = np.reshape(centered_x.T.dot(similarity).dot(centered_y) / np.linalg.norm(\n centered_x.T.dot(similarity).dot(centered_y)), (x_train.shape[1], 1))\n t_a = np.reshape(centered_x.dot(w_a), (x_train.shape[0], 1))\n p_a = np.reshape(centered_x.T.dot(similarity).dot(t_a) / t_a.T.dot(similarity).dot(t_a),\n (x_train.shape[1], 1))\n q_a = centered_y.T.dot(similarity).dot(t_a) / t_a.T.dot(similarity).dot(t_a)\n t_q_a = centered_query_x_test.dot(w_a)\n estimated_y[test_sample_number, component_number:] = estimated_y[test_sample_number,\n component_number:] + t_q_a * q_a\n if component_number != self.n_components:\n centered_x = centered_x - t_a.dot(p_a.T)\n centered_y = centered_y - t_a * q_a\n centered_query_x_test = centered_query_x_test - t_q_a.dot(p_a.T)\n estimated_y = estimated_y[:, -1]\n \n if np.isnan(estimated_y).any():\n estimated_y = np.ones(X.shape[0]) * y_train.mean()\n \n return estimated_y", "title": "" }, { "docid": "b434781abdef14e2c3282f84e8a59002", "score": "0.5833349", "text": "def get_accuracy(self):\n total_predictions = 0.0\n true_predictions = 0.0\n\n for sample_type in self.sample_type_dic:\n predictions = self.sample_type_dic[sample_type]\n\n total_predictions += predictions['trues'] + predictions['falses']\n true_predictions += predictions['trues']\n\n return true_predictions / total_predictions", "title": "" }, { "docid": "8598363031b4f2c322fc4a83a6f4cc14", "score": "0.5825132", "text": "def precision(y_true, y_pred):\n #true_positives = K.sum(K.round(K.clip(y_true * y_pred, 0, 1)))\n #predicted_positives = K.sum(K.round(K.clip(y_pred, 0, 1)))\n true_positives = K.sum(y_true * y_pred)\n predicted_positives = K.sum(y_pred)\n \n precision = true_positives / (predicted_positives + K.epsilon())\n return precision", "title": "" }, { "docid": "aa876f4d52dca5add3597acd411feb9b", "score": "0.58247685", "text": "def test_compute_gains(self):\n lqr_solver = lqr_gain_manifold.CentroidalLqr(self.data_dir)\n lqr_solver.compute_gains()\n\n feedback = lqr_solver.kfb.copy()\n time = lqr_solver.dt * np.arange(feedback.shape[0])\n norms = []\n for t in range(feedback.shape[0]):\n norms += [np.linalg.norm(feedback[t])]\n\n # plt.figure('gains')\n # plt.plot(time, norms)\n # plt.grid()\n # plt.show()", "title": "" }, { "docid": "103722290c81f472bb149e4ee4b5d9b1", "score": "0.58155197", "text": "def measure(self, model, epoch):\n if epoch % self.update_epochs:\n return\n\n self.epochs.append(epoch)\n\n torch.cuda.empty_cache()\n\n _ = model.eval()\n\n # Compute Embeddings\n with torch.no_grad():\n feature_coll, target_coll = [], []\n data_iter = tqdm(\n self.checkdata, desc=\"Estimating Data Distances...\"\n )\n for idx, data in enumerate(data_iter):\n input_img, target = data[1], data[0]\n features = model(input_img.to(self.pars.device))\n feature_coll.extend(features.cpu().detach().numpy().tolist())\n target_coll.extend(target.numpy().tolist())\n\n feature_coll = np.vstack(feature_coll).astype(\"float32\")\n target_coll = np.hstack(target_coll).reshape(-1)\n avail_labels = np.unique(target_coll)\n\n # Compute indixes of embeddings for each class.\n class_positions = []\n for lab in avail_labels:\n class_positions.append(np.where(target_coll == lab)[0])\n\n # Compute average intra-class distance and center of mass.\n com_class, dists_class = [], []\n for class_pos in class_positions:\n dists = distance.cdist(\n feature_coll[class_pos], feature_coll[class_pos], \"cosine\"\n )\n dists = np.sum(dists) / (len(dists) ** 2 - len(dists))\n # dists = np.linalg.norm(np.std(feature_coll_aux[class_pos],axis=0).reshape(1,-1)).reshape(-1)\n com = normalize(\n np.mean(feature_coll[class_pos], axis=0).reshape(1, -1)\n ).reshape(-1)\n dists_class.append(dists)\n com_class.append(com)\n\n # Compute mean inter-class distances by the class-coms.\n mean_inter_dist = distance.cdist(\n np.array(com_class), np.array(com_class), \"cosine\"\n )\n mean_inter_dist = np.sum(mean_inter_dist) / (\n len(mean_inter_dist) ** 2 - len(mean_inter_dist)\n )\n\n # Compute distance ratio\n mean_class_dist = np.mean(np.array(dists_class) / mean_inter_dist)\n self.mean_class_dists.append(mean_class_dist)\n\n self.update(mean_class_dist)", "title": "" }, { "docid": "b0e1d190a568712ef23f6473bf399632", "score": "0.5811317", "text": "def vpa(self):\n print()\n print(\"Starting Vector Prediction Accuracy on\", self.model_name, str(self.model))\n\n model = self.model\n corpus = list(self.corpus) #gets every TaggedDocument out of the iterator.\n results = []\n correct = 0\n ap = results.append\n for doc in corpus:\n inferred_vector = model.infer_vector(doc.words) #have to call .words on TaggedDoc with words (tokens) and tags(labels)\n similar_vector = model.docvecs.most_similar([inferred_vector], topn=1)\n ap(similar_vector[0][1]) \n\n if similar_vector[0][0] == doc.tags[0]: #tag of most similar vector should match tag of the document in the corpus\n correct += 1\n\n npArray = np.array(results)\n min_ = min(results) #smallest cosine simularity\n max_ = max(results) #largest = the most similar\n mean_ = np.mean(npArray, dtype=np.float64) #float64 more accurate\n median_ = np.percentile(npArray, 50)\n firstQ = np.percentile(npArray, 25)\n thirdQ = np.percentile(npArray, 75)\n\n\n\n return (correct, len(results), min_, max_, mean_, median_, firstQ, thirdQ)", "title": "" }, { "docid": "ee5d3ad9ab70c0a185a43af56b114212", "score": "0.5809214", "text": "def accuracy(predictions, targets):\n\n ########################\n # PUT YOUR CODE HERE #\n #######################\n preds = torch.max(predictions,1)[1]\n tags = torch.max(targets,1)[1]\n return (preds == tags).float().mean()\n ########################\n # END OF YOUR CODE #\n #######################\n\n return accuracy", "title": "" }, { "docid": "09e48b78328ef3f1330bd4df42244360", "score": "0.5802593", "text": "def evaluate(labels, predictions):\n sensitivity = 0.0\n specificity = 0.0\n positive = 0.0\n negative = 0.0\n \n length = len(labels)\n for pos in range(length):\n if predictions[pos] == 1:\n positive += 1\n if labels[pos] == predictions[pos]:\n sensitivity += 1\n elif predictions[pos] == 0:\n negative += 1\n if labels[pos] == predictions[pos]:\n specificity += 1\n sensitivity /= positive\n specificity /= negative\n\n return sensitivity, specificity", "title": "" }, { "docid": "aa1136f29fd25914ca6e33143672a89c", "score": "0.58006704", "text": "def metrics(model,x,y):\n yhat = model.predict(x)\n ols = sum(numpy.square((y-yhat)))\n rmse = (ols/len(y))**0.5\n corr = numpy.corrcoef(y,yhat)\n absME = sum(numpy.absolute(y-yhat))/len(y)\n return absME ,rmse,corr", "title": "" }, { "docid": "bce70704802774330b9319478601eb4e", "score": "0.5797814", "text": "def predict_credibility(text):\n\n # feature_result = text_features_model(text)\n # domain_result = domain_model(title)\n # return int(0.5*feature_result + 0.5*domain_result)\n model_count_result = predict_count(text)\n model_tfidf_result = predict_tfidf(text)\n return round(100 * (0.5 * model_count_result + 0.5 * model_tfidf_result),\n 2)", "title": "" }, { "docid": "7f48d7747c3e32238a6d20409a52fa26", "score": "0.5790421", "text": "def examine_local_fidelity(model, X, y, epsilon=5, resolution=10, count_per_step=5, framework='shap', from_zero=False,\n proportionality_mode=0, categorical_cols=[], probability_multiplier=1):\n\n baseline_predictions = model.predict(X)\n baseline_accuracy = accuracy_score(y, baseline_predictions)\n available_frameworks = ['shap', 'lime']\n\n accuraties = []\n\n if framework == 'shap':\n explainer = shap.TreeExplainer(model)\n all_importances = explainer.shap_values(X)\n\n # If is multiclass, choose explanation for the correct class\n if isinstance(all_importances, list):\n right_imps = []\n for idx, label in enumerate(y):\n right_imps.append(all_importances[label][idx])\n all_importances = right_imps\n # explainer = shap.KernelExplainer(model, data=X)\n # all_importances = explainer.shap_values(X)\n\n elif framework == 'lime':\n all_importances = []\n explainer = lime.lime_tabular.LimeTabularExplainer(X.values, feature_names=X.columns)\n\n for index, row in X.iterrows():\n correct_label = y[index]\n if np.shape(correct_label):\n correct_label = correct_label.iloc[0]\n # If is multiclass, choose explanation for the correct class\n exp = explainer.explain_instance(row, model.predict_proba, num_features=len(X.columns),\n labels=(correct_label,))\n imps = dict()\n\n for feat in exp.local_exp[correct_label]:\n imps[feat[0]] = feat[1]\n imp_vals = []\n for i in range(len(imps)):\n imp_vals.append(imps[i])\n\n all_importances.append(imp_vals)\n\n else:\n raise ValueError(\"Framework not found\")\n\n abs_imps = [list(map(abs, row)) for row in all_importances]\n imp_shares = [list(map(lambda x: x / sum(row), row)) for row in abs_imps]\n\n if proportionality_mode == 0:\n reversed_importances = [list(map(lambda x: max(row) - x, row)) for row in abs_imps]\n reversed_imp_shares = [list(map(lambda x: x / sum(row), row)) for row in reversed_importances]\n elif proportionality_mode == 1:\n reversed_imp_shares = []\n indexes = [i for i in range(len(imp_shares[0]))]\n for single_obs_imp in imp_shares:\n importances_df = pd.DataFrame([*zip(*[indexes, single_obs_imp])])\n importances_df.sort_values(by=1, axis=0, inplace=True)\n flipped_importances = pd.Series(np.flip(importances_df[1].values))\n importances_df.reset_index(inplace=True)\n importances_df[1] = flipped_importances\n importances_df.sort_values(by=0, axis=0, inplace=True)\n reversed_imp_shares.append(importances_df[1])\n\n if from_zero:\n imps_matrix = np.linspace(np.zeros(np.shape(reversed_imp_shares)), reversed_imp_shares, resolution)\n else:\n imps_matrix = np.linspace(reversed_imp_shares, imp_shares, resolution)\n\n accuraties = []\n\n # One hot encoded category handling to be added\n # all_categorical_cols = list(chain(*categorical_cols))\n\n for step_importances in imps_matrix:\n this_step_accuraties = []\n for i in range(count_per_step):\n perturbed_dataset = X.copy()\n for index, (skip, row) in enumerate(perturbed_dataset.iterrows()):\n for idx in range(len(row)):\n # perturb numerical data\n if idx not in categorical_cols:\n row[idx] = row[idx] + step_importances[index][idx] * np.random.normal(0, epsilon)\n # change randomly categorical data\n else:\n if np.random.random() < probability_multiplier * step_importances[index][idx]:\n row[idx] = 1 - row[idx]\n\n predictions = model.predict(perturbed_dataset)\n this_step_accuraties.append(accuracy_score(y, predictions))\n accuraties.append(baseline_accuracy - np.mean(this_step_accuraties))\n\n plt.plot(np.linspace(0, 100, resolution), accuraties)\n plt.xlabel('Percentile of perturbation range', fontsize=13)\n plt.ylabel('Loss of accuracy', fontsize=13)\n return accuraties", "title": "" }, { "docid": "bbc2f00c24507ebb724dad24c3dcc836", "score": "0.57879347", "text": "def hill_predict(self, X):\n\n # Average the in-sample predictions from each model and take a majority\n # vote.\n y_pred = np.zeros(X.shape[0], dtype=float)\n for m in self.models:\n y_pred += m.hill_predict(X)\n y_pred /= len(self.models)\n return (y_pred >= 0.5).astype(int)", "title": "" }, { "docid": "d1d0c3f8c3868b7c1a35fb36ae872ec7", "score": "0.5785669", "text": "def estimated_exercise_accuracies(self, history):\n pass", "title": "" }, { "docid": "1263493b490dd9f8fb6aea872579576a", "score": "0.57767177", "text": "def precision(y_true, y_pred):\r\n true_positives = K.sum(K.round(K.clip(y_true * y_pred, 0, 1)))\r\n predicted_positives = K.sum(K.round(K.clip(y_pred, 0, 1)))\r\n precision = true_positives / (predicted_positives + K.epsilon())\r\n return precision", "title": "" }, { "docid": "65b6fa0ded1743625516a62e12e6f028", "score": "0.57749295", "text": "def precision(y_true, y_pred):\n true_positives = K.sum(K.round(K.clip(y_true * y_pred, 0, 1)))\n predicted_positives = K.sum(K.round(K.clip(y_pred, 0, 1)))\n return true_positives / (predicted_positives + K.epsilon())", "title": "" }, { "docid": "65b6fa0ded1743625516a62e12e6f028", "score": "0.57749295", "text": "def precision(y_true, y_pred):\n true_positives = K.sum(K.round(K.clip(y_true * y_pred, 0, 1)))\n predicted_positives = K.sum(K.round(K.clip(y_pred, 0, 1)))\n return true_positives / (predicted_positives + K.epsilon())", "title": "" }, { "docid": "28a533a23e61a63d571dc4bcad189d41", "score": "0.5768082", "text": "def compute(cls, observation, prediction):\n\n p_value_I_maxNumAP = prediction['model_I_maxNumAP']\n p_value_I_below_depol_block = prediction['model_I_below_depol_block']\n o_mean_Ith = observation['mean_Ith']\n o_std_Ith = observation['Ith_std']\n p_value_Veq = prediction['model_Veq']\n o_mean_Veq = observation['mean_Veq']\n o_std_Veq = observation['Veq_std']\n\n try:\n result_I_maxNumAP = abs(p_value_I_maxNumAP - o_mean_Ith)/o_std_Ith\n result_I_maxNumAP = assert_dimensionless(result_I_maxNumAP)\n if not math.isnan(p_value_I_below_depol_block):\n result_I_below_depol_block = abs(p_value_I_below_depol_block - o_mean_Ith)/o_std_Ith\n else:\n result_I_below_depol_block = float('NaN')\n result_I_below_depol_block = assert_dimensionless(result_I_below_depol_block)\n if not math.isnan(p_value_Veq):\n result_Veq = abs(p_value_Veq - o_mean_Veq)/o_std_Veq\n else:\n result_Veq = float('NaN')\n result_Veq = assert_dimensionless(result_Veq)\n\n except (TypeError,AssertionError) as e:\n result_I_maxNumAP = e\n result_I_below_depol_block = e\n result_Veq = e\n\n if p_value_I_maxNumAP != p_value_I_below_depol_block and not math.isnan(result_I_below_depol_block): # according to the experiment thesetwo should be equal\n I_diff_penalty = 200.0*(abs(p_value_I_maxNumAP - p_value_I_below_depol_block)/(1*nA)) # divided be (1*nA) to make it dimensionless\n I_diff_penalty = assert_dimensionless(I_diff_penalty)\n else:\n I_diff_penalty = 0\n\n if math.isnan(result_I_below_depol_block) or math.isnan(result_Veq) :\n final_score = 100.0\n else:\n final_score = numpy.nanmean([result_I_maxNumAP, result_I_below_depol_block, result_Veq]) + I_diff_penalty\n\n return final_score, result_I_maxNumAP, result_I_below_depol_block, result_Veq, I_diff_penalty", "title": "" }, { "docid": "82a7c80cc528292aeaa219e3f35a9cff", "score": "0.5767316", "text": "def accuracy(predictions, targets):\n\n ########################\n # PUT YOUR CODE HERE #\n #######################\n pred_index = np.argmax(predictions,axis=1)\n target_index = np.argmax(targets, axis=1)\n correct = np.count_nonzero(np.equal(pred_index,target_index),axis=0)\n accuracy = correct/targets.shape[0]\n ########################\n # END OF YOUR CODE #\n #######################\n\n return accuracy", "title": "" }, { "docid": "fa96bc76f05b45256582d695b3ae7e39", "score": "0.5766702", "text": "def test_marginal_gain_calculation(self):\n V = set(range(self.cov_kernel.shape[0]))\n A_random = set([1])\n V_minus_A_random = V - A_random\n y = 2\n I_A_random = _compute_mi(self.cov_kernel, A_random, V_minus_A_random)\n A_aug = deepcopy(A_random)\n A_aug.add(y)\n V_minus_A_aug = V - A_aug\n I_A_aug = _compute_mi(self.cov_kernel, A_aug, V_minus_A_aug)\n diff_via_direct = abs(I_A_aug - I_A_random)\n print(f\"MI(A) {I_A_random}, MI(A U y) {I_A_aug}, diff {diff_via_direct}\")\n\n diff = aa.compute_marginal_gain(y, A_random, V, self.cov_kernel)\n # the marginal_gain leaves out 0.5 * log term as it does not\n # matter for ranking elements\n half_log_diff = 0.5 * np.log(diff)\n print(f\"Diff via aa.compute {half_log_diff}\")\n self.assertAlmostEqual(diff_via_direct, half_log_diff, delta=0.01)", "title": "" }, { "docid": "07929642a5ec433cba6e97170ff9f453", "score": "0.57659584", "text": "def objective_difference(augmentations):\n for model_name in ['kernel', 'lenet']:\n for augmentation in augmentations:\n for seed in range(n_trials):\n print(f'Seed: {seed}')\n torch.manual_seed(seed)\n model = model_factories[model_name]().to(device)\n optimizer = sgd_opt_from_model(model)\n loader = loader_from_dataset(augmentation.dataset)\n model.train()\n losses = []\n losses.append(all_losses(loader, model).mean(axis=0))\n train_loss, train_acc, valid_acc = [], [], []\n for epoch in range(sgd_n_epochs):\n train_loss_epoch, train_acc_epoch = train(loader, model, optimizer)\n train_loss += train_loss_epoch\n train_acc += train_acc_epoch\n print(f'Train Epoch: {epoch}')\n correct, total = accuracy(valid_loader, model)\n valid_acc.append(correct / total)\n print(\n f'Validation set: Accuracy: {correct}/{total} ({correct/total*100:.4f}%)'\n )\n losses.append(np.array(all_losses(loader, model)).mean(axis=0))\n train_loss, train_acc, valid_acc = np.array(train_loss), np.array(train_acc), np.array(valid_acc)\n np.savez(f'saved/train_valid_acc_{model_name}_{augmentation.name}_{seed}.npz',\n train_loss=train_loss, train_acc=train_acc, valid_acc=valid_acc)\n losses = np.array(losses).T\n np.save(f'saved/all_losses_{model_name}_{augmentation.name}_{seed}.npy', losses)", "title": "" }, { "docid": "4ebd1cd962e7fa70f433ff16816e72f9", "score": "0.5756597", "text": "def precision(y_true, y_pred):\r\n true_positives = K.sum(K.round(K.clip(y_true * y_pred, 0, 1)))\r\n predicted_positives = K.sum(K.round(K.clip(y_pred, 0, 1)))\r\n precision = true_positives / (predicted_positives + K.epsilon())\r\n return precision", "title": "" }, { "docid": "48f5b25e0d9a53cdfd5c7f54770050c8", "score": "0.5748814", "text": "def compute_model_metrics(y, preds):\n fbeta = fbeta_score(y, preds, beta=1, zero_division=1)\n precision = precision_score(y, preds, zero_division=1)\n recall = recall_score(y, preds, zero_division=1)\n return precision, recall, fbeta", "title": "" }, { "docid": "84c300622478632d70f689a7e1e3a1e3", "score": "0.57438976", "text": "def test_all_models(self, clf, non_param_model):\n y = self.y.to_numpy()\n metrics = {}\n predictions_df, metrics = pred_fncs.baseline_OR(X=self.X, y=y, metrics=metrics)\n\n np_df = pred_fncs.get_nonparametric_preds(X=self.X, model=non_param_model)\n np_yhat = np_df['Predictions'].to_numpy()\n np_results, np_metrics = my_metrics.get_model_metrics(y_true=y, y_hat=np_yhat)\n metrics['Nonparametric'] = np_metrics\n predictions_df['prob np'] = np_df['Probability']\n predictions_df['pred np'] = np_df['Predictions']\n\n gt_df = pred_fncs.test_with_GT(logit_clf=clf, X_df=self.X)\n gt_yhat = gt_df['Predictions'].to_numpy()\n gt_results, gt_metrics = my_metrics.get_model_metrics(y_true=y, y_hat=gt_yhat)\n metrics['ground truth AR'] = gt_metrics\n predictions_df['prob gt-AR'] = gt_df['Probability']\n predictions_df['pred gt-AR'] = gt_df['Predictions'] \n\n self.df = pred_fncs.test_with_predictions(logit_clf=clf, X=self.X)\n self.yhat = self.df['Predictions'].to_numpy()\n results, pred_metrics = my_metrics.get_model_metrics(y_true=y, y_hat=self.yhat)\n metrics['AR predictions (aie)'] = pred_metrics\n predictions_df['prob AR'] = self.df['Probability']\n predictions_df['pred AR'] = self.df['Predictions']\n\n predictions_df['ground truth'] = self.y\n \n # print(predictions_df)\n\n self.metrics = pd.DataFrame(metrics).transpose()\n # print(self.metrics)\n self.predictions = predictions_df", "title": "" }, { "docid": "e945888fdbf377dde973d7baa1effeb4", "score": "0.5735963", "text": "def evaluate(self, testRatings: interactions.ClassificationInteractions, K: int, output_ranking = False, **kargs):\n all_labels = []\n all_final_probs = []\n list_error_analysis = []\n for query, evidences_info in testRatings.dict_claims_and_evidences_test.items():\n evd_ids, labels, evd_contents, evd_lengths = evidences_info\n assert len(set(labels)) == 1, \"Must have only one label due to same claim\"\n all_labels.append(labels[0])\n claim_content = testRatings.dict_claim_contents[query]\n claim_source = np.array([testRatings.dict_claim_source[query]] * len(labels)) # (len(labels), 1)\n evd_sources = np.array([testRatings.dict_evd_source[e] for e in evd_ids]) # (len(labels), 1)\n query_len = np.array([testRatings.dict_claim_lengths[query]] * len(labels))\n\n # doc_lens = [testRatings.dict_doc_lengths[d] for d in docs]\n\n claim_content = np.tile(claim_content, (len(labels), 1)) # len(labels), query_contnt_leng)\n evd_contents = np.array(evd_contents) # shape = (real_num_evd, R) or = (len(labels), R)\n # claim_content = my_utils.gpu(claim_content)\n # evd_contents = my_utils.gpu(evd_contents)\n\n claim_content = my_utils.gpu(my_utils.numpy2tensor(claim_content, dtype=torch.int), self._use_cuda)\n evd_contents = my_utils.gpu(my_utils.numpy2tensor(evd_contents, dtype=torch.int), self._use_cuda)\n # for evidences\n evd_lengths = np.array(evd_lengths)\n d_new_indices, d_old_indices = torch_utils.get_sorted_index_and_reverse_index(evd_lengths)\n q_new_indices, q_old_indices = torch_utils.get_sorted_index_and_reverse_index(query_len)\n evd_lengths = my_utils.gpu(my_utils.numpy2tensor(evd_lengths, dtype=torch.int), self._use_cuda)\n query_len = my_utils.gpu(my_utils.numpy2tensor(query_len, dtype=torch.int), self._use_cuda)\n\n # for sources\n claim_source = my_utils.gpu(my_utils.numpy2tensor(claim_source, dtype=torch.int), self._use_cuda)\n evd_sources = my_utils.gpu(my_utils.numpy2tensor(evd_sources, dtype=torch.int), self._use_cuda)\n\n query_id_tsr = torch_utils.gpu(torch.from_numpy(np.array([query])), self._use_cuda)\n additional_information = {\n KeyWordSettings.Query_lens: query_len,\n KeyWordSettings.Doc_lens: evd_lengths,\n KeyWordSettings.DocLensIndices: (d_new_indices, d_old_indices, evd_lengths),\n KeyWordSettings.QueryLensIndices: (q_new_indices, q_old_indices, query_len),\n KeyWordSettings.QuerySources: claim_source,\n KeyWordSettings.DocSources: evd_sources,\n KeyWordSettings.QueryIDs: query_id_tsr,\n KeyWordSettings.DocIDs: torch_utils.gpu(torch.from_numpy(np.array(evd_ids)), self._use_cuda)\n }\n probs = self._net.predict(claim_content, evd_contents, **additional_information) # shape = (len(labels), )\n all_final_probs.append(float(my_utils.cpu(probs).detach().numpy().flatten()))\n\n\n results = self._computing_metrics(true_labels = all_labels, predicted_probs = all_final_probs)\n if output_ranking: return results, [] # sorted(list_error_analysis, key=lambda x: x[\"qid\"])\n\n return results", "title": "" }, { "docid": "fbdaa0812e4fe823061a7f859e0889f4", "score": "0.5733504", "text": "def precision(y_true, y_pred):\n true_positives = K.sum(K.round(K.clip(y_true * y_pred, 0, 1)))\n predicted_positives = K.sum(K.round(K.clip(y_pred, 0, 1)))\n precision = true_positives / (predicted_positives + K.epsilon())\n return precision", "title": "" }, { "docid": "fbdaa0812e4fe823061a7f859e0889f4", "score": "0.5733504", "text": "def precision(y_true, y_pred):\n true_positives = K.sum(K.round(K.clip(y_true * y_pred, 0, 1)))\n predicted_positives = K.sum(K.round(K.clip(y_pred, 0, 1)))\n precision = true_positives / (predicted_positives + K.epsilon())\n return precision", "title": "" }, { "docid": "fbdaa0812e4fe823061a7f859e0889f4", "score": "0.5733504", "text": "def precision(y_true, y_pred):\n true_positives = K.sum(K.round(K.clip(y_true * y_pred, 0, 1)))\n predicted_positives = K.sum(K.round(K.clip(y_pred, 0, 1)))\n precision = true_positives / (predicted_positives + K.epsilon())\n return precision", "title": "" }, { "docid": "fbdaa0812e4fe823061a7f859e0889f4", "score": "0.5733504", "text": "def precision(y_true, y_pred):\n true_positives = K.sum(K.round(K.clip(y_true * y_pred, 0, 1)))\n predicted_positives = K.sum(K.round(K.clip(y_pred, 0, 1)))\n precision = true_positives / (predicted_positives + K.epsilon())\n return precision", "title": "" }, { "docid": "fbdaa0812e4fe823061a7f859e0889f4", "score": "0.5733504", "text": "def precision(y_true, y_pred):\n true_positives = K.sum(K.round(K.clip(y_true * y_pred, 0, 1)))\n predicted_positives = K.sum(K.round(K.clip(y_pred, 0, 1)))\n precision = true_positives / (predicted_positives + K.epsilon())\n return precision", "title": "" }, { "docid": "fbdaa0812e4fe823061a7f859e0889f4", "score": "0.5733504", "text": "def precision(y_true, y_pred):\n true_positives = K.sum(K.round(K.clip(y_true * y_pred, 0, 1)))\n predicted_positives = K.sum(K.round(K.clip(y_pred, 0, 1)))\n precision = true_positives / (predicted_positives + K.epsilon())\n return precision", "title": "" }, { "docid": "fbdaa0812e4fe823061a7f859e0889f4", "score": "0.5733504", "text": "def precision(y_true, y_pred):\n true_positives = K.sum(K.round(K.clip(y_true * y_pred, 0, 1)))\n predicted_positives = K.sum(K.round(K.clip(y_pred, 0, 1)))\n precision = true_positives / (predicted_positives + K.epsilon())\n return precision", "title": "" }, { "docid": "fbdaa0812e4fe823061a7f859e0889f4", "score": "0.5733504", "text": "def precision(y_true, y_pred):\n true_positives = K.sum(K.round(K.clip(y_true * y_pred, 0, 1)))\n predicted_positives = K.sum(K.round(K.clip(y_pred, 0, 1)))\n precision = true_positives / (predicted_positives + K.epsilon())\n return precision", "title": "" }, { "docid": "fbdaa0812e4fe823061a7f859e0889f4", "score": "0.5733504", "text": "def precision(y_true, y_pred):\n true_positives = K.sum(K.round(K.clip(y_true * y_pred, 0, 1)))\n predicted_positives = K.sum(K.round(K.clip(y_pred, 0, 1)))\n precision = true_positives / (predicted_positives + K.epsilon())\n return precision", "title": "" }, { "docid": "fbdaa0812e4fe823061a7f859e0889f4", "score": "0.5733504", "text": "def precision(y_true, y_pred):\n true_positives = K.sum(K.round(K.clip(y_true * y_pred, 0, 1)))\n predicted_positives = K.sum(K.round(K.clip(y_pred, 0, 1)))\n precision = true_positives / (predicted_positives + K.epsilon())\n return precision", "title": "" }, { "docid": "fbdaa0812e4fe823061a7f859e0889f4", "score": "0.5733504", "text": "def precision(y_true, y_pred):\n true_positives = K.sum(K.round(K.clip(y_true * y_pred, 0, 1)))\n predicted_positives = K.sum(K.round(K.clip(y_pred, 0, 1)))\n precision = true_positives / (predicted_positives + K.epsilon())\n return precision", "title": "" }, { "docid": "fbdaa0812e4fe823061a7f859e0889f4", "score": "0.5733504", "text": "def precision(y_true, y_pred):\n true_positives = K.sum(K.round(K.clip(y_true * y_pred, 0, 1)))\n predicted_positives = K.sum(K.round(K.clip(y_pred, 0, 1)))\n precision = true_positives / (predicted_positives + K.epsilon())\n return precision", "title": "" }, { "docid": "fbdaa0812e4fe823061a7f859e0889f4", "score": "0.5733504", "text": "def precision(y_true, y_pred):\n true_positives = K.sum(K.round(K.clip(y_true * y_pred, 0, 1)))\n predicted_positives = K.sum(K.round(K.clip(y_pred, 0, 1)))\n precision = true_positives / (predicted_positives + K.epsilon())\n return precision", "title": "" }, { "docid": "fbdaa0812e4fe823061a7f859e0889f4", "score": "0.5733504", "text": "def precision(y_true, y_pred):\n true_positives = K.sum(K.round(K.clip(y_true * y_pred, 0, 1)))\n predicted_positives = K.sum(K.round(K.clip(y_pred, 0, 1)))\n precision = true_positives / (predicted_positives + K.epsilon())\n return precision", "title": "" }, { "docid": "fbdaa0812e4fe823061a7f859e0889f4", "score": "0.5733504", "text": "def precision(y_true, y_pred):\n true_positives = K.sum(K.round(K.clip(y_true * y_pred, 0, 1)))\n predicted_positives = K.sum(K.round(K.clip(y_pred, 0, 1)))\n precision = true_positives / (predicted_positives + K.epsilon())\n return precision", "title": "" }, { "docid": "fbdaa0812e4fe823061a7f859e0889f4", "score": "0.5733504", "text": "def precision(y_true, y_pred):\n true_positives = K.sum(K.round(K.clip(y_true * y_pred, 0, 1)))\n predicted_positives = K.sum(K.round(K.clip(y_pred, 0, 1)))\n precision = true_positives / (predicted_positives + K.epsilon())\n return precision", "title": "" }, { "docid": "fbdaa0812e4fe823061a7f859e0889f4", "score": "0.5733504", "text": "def precision(y_true, y_pred):\n true_positives = K.sum(K.round(K.clip(y_true * y_pred, 0, 1)))\n predicted_positives = K.sum(K.round(K.clip(y_pred, 0, 1)))\n precision = true_positives / (predicted_positives + K.epsilon())\n return precision", "title": "" }, { "docid": "fbdaa0812e4fe823061a7f859e0889f4", "score": "0.5733504", "text": "def precision(y_true, y_pred):\n true_positives = K.sum(K.round(K.clip(y_true * y_pred, 0, 1)))\n predicted_positives = K.sum(K.round(K.clip(y_pred, 0, 1)))\n precision = true_positives / (predicted_positives + K.epsilon())\n return precision", "title": "" }, { "docid": "fbdaa0812e4fe823061a7f859e0889f4", "score": "0.5733504", "text": "def precision(y_true, y_pred):\n true_positives = K.sum(K.round(K.clip(y_true * y_pred, 0, 1)))\n predicted_positives = K.sum(K.round(K.clip(y_pred, 0, 1)))\n precision = true_positives / (predicted_positives + K.epsilon())\n return precision", "title": "" }, { "docid": "fbdaa0812e4fe823061a7f859e0889f4", "score": "0.5733504", "text": "def precision(y_true, y_pred):\n true_positives = K.sum(K.round(K.clip(y_true * y_pred, 0, 1)))\n predicted_positives = K.sum(K.round(K.clip(y_pred, 0, 1)))\n precision = true_positives / (predicted_positives + K.epsilon())\n return precision", "title": "" }, { "docid": "1403d4c3f702901ee0efdac6975a82ef", "score": "0.57321113", "text": "def run_recognizer():\n training_data, training_labels, prediction_data, prediction_labels = make_sets()\n\n print(\"size of training set is:\", len(training_labels), \"images\")\n fishface.train(training_data, np.asarray(training_labels))\n\n print(\"predicting classification set\")\n correct = sum(1 for id, image in enumerate(prediction_data) if fishface.predict(image)[0] == prediction_labels[id])\n\n return ((100 * correct) / len(prediction_data))", "title": "" }, { "docid": "1403d4c3f702901ee0efdac6975a82ef", "score": "0.57321113", "text": "def run_recognizer():\n training_data, training_labels, prediction_data, prediction_labels = make_sets()\n\n print(\"size of training set is:\", len(training_labels), \"images\")\n fishface.train(training_data, np.asarray(training_labels))\n\n print(\"predicting classification set\")\n correct = sum(1 for id, image in enumerate(prediction_data) if fishface.predict(image)[0] == prediction_labels[id])\n\n return ((100 * correct) / len(prediction_data))", "title": "" }, { "docid": "d66d6e62ef4110d5cadd2df83abf724b", "score": "0.5728083", "text": "def get_acc(clf, X, y):\n\n acc = sum((yi_hat == yi for yi_hat, yi in zip(clf.predict(X), y))) / len(y)\n print(\"Test accuracy is %.3f%%!\" % (acc * 100))\n return acc", "title": "" }, { "docid": "edfb30492cfbc464a11a67a7d1ae896c", "score": "0.57234704", "text": "def precision(y_true, y_pred):\n print type(y_true)\n print y_true.shape\n print y_pred.shape\n true_positives = K.sum(K.round(K.clip(y_true * y_pred, 0, 1)))\n predicted_positives = K.sum(K.round(K.clip(y_pred, 0, 1)))\n precision = true_positives / (predicted_positives + K.epsilon())\n return precision", "title": "" }, { "docid": "a515fa1ef3c0bf2a1ff4bcaaf0f28bb5", "score": "0.5720133", "text": "def precision(y_true: np.ndarray, y_pred: np.ndarray):\n true_positives = K.sum(K.round(K.clip(y_true * y_pred, 0, 1)))\n predicted_positives = K.sum(K.round(K.clip(y_pred, 0, 1)))\n precision = true_positives / (predicted_positives + K.epsilon())\n return precision", "title": "" }, { "docid": "3486fe7731b1f44e1c96a366b0a44259", "score": "0.5716885", "text": "def precision(y_true, y_pred):\n\ttrue_positives = K.sum(K.round(K.clip(y_true * y_pred, 0, 1)))\n\tpredicted_positives = K.sum(K.round(K.clip(y_pred, 0, 1)))\n\tprecision = true_positives / (predicted_positives + K.epsilon())\n\treturn precision", "title": "" }, { "docid": "f380d42e20be2514beaca3e3fc8661e0", "score": "0.5715378", "text": "def test_predictions_match(self):\n # Load model from path\n loaded_model = load_model(self.model_path_v1)\n sequence_length = self.estimator.get_sequence_length()\n\n # Obtain sample data\n random_data = sample_test_data()\n\n # Preprocess sample data\n random_data = pd.DataFrame(self.preprocessor.transform(random_data))\n len_random_data = len(random_data)\n\n # Obtain sequences from sample data\n random_data_sequences = np.array([random_data[start:stop].values for start, stop in zip(range(0, len_random_data - sequence_length), range(sequence_length, len_random_data))])\n\n # Assert prediction of keras model is the same as the estimator\n np.testing.assert_equal(self.estimator.predict(random_data_sequences), loaded_model.predict(random_data_sequences))", "title": "" }, { "docid": "62319fc8499a17f0f6772f6fb7627f94", "score": "0.5709459", "text": "def calculate_accuracy(oracle_output, sut_output):\n accuracy = (oracle_output == real_test).sum()/real_test.shape[0]\n print(f'Models accuracy: {accuracy*100}%')\n return accuracy", "title": "" }, { "docid": "337b737a0ff549656dc5a8b446fb8bf8", "score": "0.5705842", "text": "def _mv_cbound(self, data = 'train'):\n\n if data == 'train':\n predictions = self.train_predictions_classifiers\n labels = self.y_train\n\n errors_t = []\n disaggrement_t = []\n # example_weights = np.ones(self.num_train_examples) / self.num_train_examples # to not to consider example weights.\n # Computing View-Specific Error and disagreement on weighted training data.(Line 11-12)\n for name_of_view in self.all_views:\n\n classifier_errors = []\n paired_disagreements = []\n\n # compute view-specific error (Line 11)\n for classifier_output in predictions[name_of_view]:\n error = [int(x) for x in (classifier_output != labels[name_of_view])]\n weighted_error = np.mean(error)\n classifier_errors.append(weighted_error)\n\n classifier_errors = np.array(classifier_errors)\n classifier_weights = np.array(self.weights_classfiers[name_of_view]) / sum(np.array(self.weights_classfiers[name_of_view]))\n errors_t.append(sum(classifier_errors * classifier_weights))\n\n # compute view-specific disagreement (Line 12)\n for index_1, classifier_output_1 in enumerate(predictions[name_of_view]):\n for index_2, classifier_output_2 in enumerate(predictions[name_of_view]):\n disagreement = [int(x) for x in (classifier_output_1 != classifier_output_2)]\n weighted_disagreement = np.mean(disagreement)\n classifier_weights = np.array(self.weights_classfiers[name_of_view]) / sum(np.array(self.weights_classfiers[name_of_view]))\n\n weight_1 = classifier_weights[index_1]\n weight_2 = classifier_weights[index_2]\n\n paired_disagreements.append(weighted_disagreement * weight_1 * weight_2)\n\n disaggrement_t.append(sum(paired_disagreements))\n\n rho = np.array(self.rho)\n risk_total = sum(np.array(errors_t) * rho)\n disagreement_total = sum(np.array(disaggrement_t) * rho)\n c_bound = self._compute_Cbound(risk_total,disagreement_total)\n\n return c_bound", "title": "" }, { "docid": "d7ff1e0c4aa4289d9775affd9c342515", "score": "0.56976247", "text": "def criterion(self, model: Autoencoder, seq_pred: list, seq_true: list) -> float:\n\n # Get reconstruction loss as mean squared error\n reconstruction_loss = nn.MSELoss()(seq_pred, seq_true)\n\n # Create spare representation and fit k-means on it\n data = np.array(seq_true)\n encode_data = autoencoder_functions.encode_data(model, data, batch_size=len(data))\n self.kmeans.fit(encode_data)\n\n # Get KL-Divergence\n kl_addition = np.min(data) * -1 + 0.00001\n kl_per_cluster = []\n\n with warnings.catch_warnings():\n warnings.simplefilter(\"ignore\")\n for label in set(self.kmeans.labels_):\n cluster = []\n\n for j, spike in enumerate(data):\n if self.kmeans.labels_[j] == label:\n cluster.append(spike)\n\n if len(cluster) > 1:\n kl_in_cluster = []\n for i1, spike1 in enumerate(cluster):\n spike1 = spike1 + kl_addition\n for i2, spike2 in enumerate(cluster):\n if i1 != i2:\n spike2 = spike2 + kl_addition\n kl_in_cluster.append(entropy(spike1, spike2))\n kl_per_cluster.append(np.mean(kl_in_cluster))\n else:\n kl_per_cluster.append(0)\n\n cluster_loss = np.mean(kl_per_cluster)\n\n # Combine losses\n loss = reconstruction_loss + cluster_loss\n # print(float(reconstruction_loss), (cluster_loss), float(cluster_loss) / float(reconstruction_loss))\n return loss", "title": "" }, { "docid": "903451092f64ca167fda6f6e8003cbee", "score": "0.56962675", "text": "def error(self):\n accs = []\n for i in range(self.X_test.shape[0]):\n pred = self.predict(i)\n acc = np.count_nonzero(pred == self.y_test_labels[i]) / self.num_patches\n accs.append(acc)\n return np.mean(acc)", "title": "" }, { "docid": "21e57b9f1b11ebcf91f16e21f02f3039", "score": "0.5692943", "text": "def precision_k(y_true, y_pred):\n true_positives = K.sum(K.round(K.clip(y_true * y_pred, 0, 1)))\n predicted_positives = K.sum(K.round(K.clip(y_pred, 0, 1)))\n precision = true_positives / (predicted_positives + K.epsilon())\n return precision", "title": "" }, { "docid": "8f70398d42502a7f1fdf7ea484425859", "score": "0.56918263", "text": "def get_metrics(self):\n # Standard metrics (TP,TN,FP,FN) for edge prediction\n binary_gamma = self.get_binary_adjmatrix()\n false_positives = torch.logical_and(binary_gamma, ~self.true_adj_matrix)\n false_negatives = torch.logical_and(~binary_gamma, self.true_adj_matrix)\n TP = torch.logical_and(binary_gamma, self.true_adj_matrix).float().sum().item()\n TN = torch.logical_and(~binary_gamma, ~self.true_adj_matrix).float().sum().item()\n FP = false_positives.float().sum().item()\n FN = false_negatives.float().sum().item()\n TN = TN - self.gamma.shape[-1] # Remove diagonal as those are not being predicted\n recall = TP / max(TP + FN, 1e-5)\n precision = TP / max(TP + FP, 1e-5)\n # Structural Hamming Distance score\n rev = torch.logical_and(binary_gamma, self.true_adj_matrix.T)\n num_revs = rev.float().sum().item()\n SHD = (false_positives + false_negatives + rev + rev.T).float().sum().item() - num_revs\n\n # Get details on False Positives (what relations have the nodes of the false positives?)\n FP_elems = torch.where(torch.logical_and(binary_gamma, ~self.true_adj_matrix))\n FP_relations = self.true_node_relations[FP_elems]\n FP_dict = {\n \"ancestors\": (FP_relations == -1).sum().item(), # i->j => j is a child of i\n \"descendants\": (FP_relations == 1).sum().item(),\n \"confounders\": (FP_relations == 2).sum().item(),\n \"independents\": (FP_relations == 0).sum().item()\n }\n\n # Details on orientation prediction of theta, independent of gamma\n orient_TP = torch.logical_and(self.true_adj_matrix == 1, self.theta.cpu() > 0.0).float().sum().item()\n orient_FN = torch.logical_and(self.true_adj_matrix == 1, self.theta.cpu() <= 0.0).float().sum().item()\n orient_acc = orient_TP / max(1e-5, orient_TP + orient_FN)\n orient_dict = {\n \"TP\": int(orient_TP),\n \"FN\": int(orient_FN),\n \"acc\": orient_acc\n }\n\n # Summarizing all results in single dictionary\n metrics = {\n \"TP\": int(TP),\n \"TN\": int(TN),\n \"FP\": int(FP),\n \"FN\": int(FN),\n \"SHD\": int(SHD),\n \"reverse\": int(num_revs),\n \"recall\": recall,\n \"precision\": precision,\n \"FP_details\": FP_dict,\n \"orient\": orient_dict\n }\n\n if self.graph.num_latents > 0:\n metrics[\"confounders\"] = self.get_confounder_metrics()\n return metrics", "title": "" }, { "docid": "563a3661e382757e395d0dab2330f396", "score": "0.5688863", "text": "def predict(X, y, parameters):\n\tPERF_FORMAT_STRING = \"\\\n\t\\tAccuracy: {:>0.{display_precision}f}\\tPrecision: {:>0.{display_precision}f}\\t\\\n\tRecall: {:>0.{display_precision}f}\\tF1: {:>0.{display_precision}f}\\tF2: {:>0.{display_precision}f}\"\n\tRESULTS_FORMAT_STRING = \"\\tTotal predictions: {:4d}\\tTrue positives: {:4d}\\tFalse positives: {:4d}\\\n\t\\tFalse negatives: {:4d}\\tTrue negatives: {:4d}\"\n\t\n\tm = X.shape[1]\n\tn = len(parameters) // 2\n\tpred = np.zeros((1,m))\n\t\n\t# Forward propagation\n\tprobab, caches = L_model_forward(X, parameters)\n\n\t# convert probability to 'Benign/Malignant' predictions\n\tfor i in range(0, probab.shape[1]):\n\t\tif probab[0,i] > 0.5:\n\t\t\tpred[0,i] = 1\n\t\telse:\n\t\t\tpred[0,i] = 0\n\t\n\t#print (\"predictions: \" + str(p))\n\t#print (\"true labels: \" + str(y))\n\ttrue_negatives = 0\n\tfalse_negatives = 0\n\ttrue_positives = 0\n\tfalse_positives = 0\n\t\n\tfor prediction, truth in zip(np.squeeze(pred), np.squeeze(y)):\n\t\tif prediction == 1 and truth == 1:\n\t\t\ttrue_negatives += 1\n\t\telif prediction == 1 and truth == 0:\n\t\t\tfalse_negatives += 1\n\t\telif prediction == 0 and truth == 1:\n\t\t\tfalse_positives += 1\n\t\telif prediction == 0 and truth == 0:\n\t\t\ttrue_positives += 1\n\n\ttry:\n\t\ttotal_predictions = true_negatives + false_negatives + false_positives + true_positives\n\t\taccuracy = 1.0*(true_positives + true_negatives)/total_predictions\n\t\tprecision = 1.0*true_positives/(true_positives+false_positives)\n\t\trecall = 1.0*true_positives/(true_positives+false_negatives)\n\t\tf1 = 2.0 * true_positives/(2*true_positives + false_positives+false_negatives)\n\t\tf2 = (1+2.0*2.0) * precision*recall/(4*precision + recall)\n\t\tprint(PERF_FORMAT_STRING.format(accuracy, precision, recall, f1, f2, display_precision = 5))\n\t\tprint(RESULTS_FORMAT_STRING.format(total_predictions, true_positives, false_positives, false_negatives, true_negatives))\n\t\tprint(\"\")\n\texcept:\n\t\tprint(\"Got a divide by zero when trying out:\", clf)\n\t\tprint(\"Precision or recall may be undefined due to a lack of true positive predicitons.\")\n\n\tprint(\"False positive rate: \" + str((false_positives/(false_positives+true_negatives))*100))\n\tprint(\"False negative rate: \" + str((false_negatives/(false_negatives+true_positives))*100))\n\t\n\t## other methods to calculate accuracy\n\t# print(\"accuracy: \" + str(np.sum((p == y)/m)))\n\t# print(\"accuracy: {} %\".format(100 - np.mean(np.abs(pred - y)) * 100))\n\t\t\n\treturn\taccuracy", "title": "" }, { "docid": "fa58f48dac3100a4fd9b58d4029a3b8e", "score": "0.56871206", "text": "def assess(lvq, input_data, label_data):\n correct = 0.0\n incorrect = 0.0\n for lab, vec in zip(label_data, input_data):\n if lvq.classify(vec) == lab:\n correct += 1\n else:\n incorrect += 1\n\n return 100*correct/(correct+incorrect)", "title": "" }, { "docid": "e58b3342345122c6d11d27a60493ee73", "score": "0.5677028", "text": "def evaluate(self, x_test, w_test):\n w_est = self.predict(x_test)\n num_correct_predictions = np.sum(w_est == np.array(w_test))\n accuracy = num_correct_predictions/float(len(w_est))\n return (num_correct_predictions, accuracy)", "title": "" }, { "docid": "7e5ca971494ba5e4fff244b23a95f433", "score": "0.5676851", "text": "def precision(y_true, y_pred):\n true_positives = K.sum(K.round(K.clip(y_true * y_pred, 0, 1)))\n predicted_positives = K.sum(K.round(K.clip(y_pred, 0, 1)))\n precision = true_positives / (predicted_positives + K.epsilon())\n \n return precision", "title": "" }, { "docid": "dc7dc38318c1f1e359ba43d6d9b6c652", "score": "0.56761193", "text": "def test_predict(self):\n rc = ResComp(**RES)\n t, U = make_data()\n rc.train(t, U)\n pre = rc.predict(t[500:], U[500, :])\n error = np.max(np.linalg.norm(pre - U[500:, :], ord=np.inf, axis=0))\n assert error < 0.5", "title": "" }, { "docid": "649fbe7c7101c01f986ae510b39c8f55", "score": "0.5668619", "text": "def evaluate_model(self, x, y, **kwargs):\n acc = {}\n if len(self._predictors) > 0:\n y_hat = self.predict(x)\n correct = 0\n for i in range(x.shape[0]):\n if y_hat[i] == y[i]:\n correct += 1\n\n acc = {'acc': correct/float(len(y))}\n additional_metrics = fl_metrics.get_eval_metrics_for_classificaton(\n y, y_hat)\n\n acc = {**acc, **additional_metrics}\n return acc\n else:\n logger.info('No models have been trained yet.')\n return {}", "title": "" }, { "docid": "8a7b1cf96308dd5a9e43f81e8d233bd7", "score": "0.5666677", "text": "def test_doc_stats(verbose):\n epsilon = 0.001\n true_positives = epsilon\n true_negatives = epsilon\n false_positives = epsilon\n false_negatives = epsilon\n\n for fn in TEST_CAT_LIST:\n # concatenating to file path\n path = os.path.join(TEST_CAT_DIR + fn)\n img = image.load_img(path, target_size=(150, 150))\n\n xs = image.img_to_array(img)\n xs = np.expand_dims(xs, axis = 0)\n\n images = np.vstack([xs])\n classes = model.predict(images, batch_size = 20)\n \n if classes[0] < 0.5:\n if verbose:\n class_log = \"\\n\" + fn + \" is a cat\"\n logging.info(class_log)\n true_negatives += 1\n else:\n if verbose:\n class_log = \"\\n\" + fn + \" is a dog\"\n logging.info(class_log)\n false_negatives += 1\n\n for fn in TEST_DOG_LIST:\n # concatenating to file path\n path = os.path.join(TEST_DOG_DIR + fn)\n img = image.load_img(path, target_size=(150, 150))\n\n xs = image.img_to_array(img)\n xs = np.expand_dims(xs, axis = 0)\n\n images = np.vstack([xs])\n classes = model.predict(images, batch_size = 20)\n \n if classes[0] < 0.5:\n if verbose:\n class_log = \"\\n\" + fn + \" is a cat\"\n logging.info(class_log)\n false_positives += 1\n else:\n if verbose:\n class_log = \"\\n\" + fn + \" is a dog\"\n logging.info(class_log)\n true_positives += 1\n\n # Calculating Precision and Recall\n precision = true_positives / (true_positives + false_positives)\n recall = true_positives / (true_positives + false_negatives)\n F1_Score = 2 * ((precision * recall) / (precision + recall))\n precision = '%.3f'%precision\n recall = '%.3f'%recall\n F1_Score = '%.3f'%F1_Score\n\n precision_log = \"\\nPrecision = \" + str(precision)\n recall_log = \"\\nRecall = \" + str(recall)\n f1_log = \"\\nF1 Score = \" + str(F1_Score)\n message = \"\\nTest Set results on Model as a Dog classifier\"\n\n logging.info(message)\n logging.info(precision_log)\n logging.info(recall_log)\n logging.info(f1_log)", "title": "" }, { "docid": "6430ca0825dde056dcd299d096893bc9", "score": "0.5664239", "text": "def precision(y_true, y_pred):\n true_positives = K.sum(K.round(K.clip(y_true * y_pred, 0, 1)))\n predicted_positives = K.sum(K.round(K.clip(y_pred, 0, 1)))\n precision = true_positives / (predicted_positives + K.epsilon())\n return precision", "title": "" }, { "docid": "6430ca0825dde056dcd299d096893bc9", "score": "0.5664239", "text": "def precision(y_true, y_pred):\n true_positives = K.sum(K.round(K.clip(y_true * y_pred, 0, 1)))\n predicted_positives = K.sum(K.round(K.clip(y_pred, 0, 1)))\n precision = true_positives / (predicted_positives + K.epsilon())\n return precision", "title": "" }, { "docid": "6430ca0825dde056dcd299d096893bc9", "score": "0.5664239", "text": "def precision(y_true, y_pred):\n true_positives = K.sum(K.round(K.clip(y_true * y_pred, 0, 1)))\n predicted_positives = K.sum(K.round(K.clip(y_pred, 0, 1)))\n precision = true_positives / (predicted_positives + K.epsilon())\n return precision", "title": "" }, { "docid": "6430ca0825dde056dcd299d096893bc9", "score": "0.5664239", "text": "def precision(y_true, y_pred):\n true_positives = K.sum(K.round(K.clip(y_true * y_pred, 0, 1)))\n predicted_positives = K.sum(K.round(K.clip(y_pred, 0, 1)))\n precision = true_positives / (predicted_positives + K.epsilon())\n return precision", "title": "" }, { "docid": "6430ca0825dde056dcd299d096893bc9", "score": "0.5664239", "text": "def precision(y_true, y_pred):\n true_positives = K.sum(K.round(K.clip(y_true * y_pred, 0, 1)))\n predicted_positives = K.sum(K.round(K.clip(y_pred, 0, 1)))\n precision = true_positives / (predicted_positives + K.epsilon())\n return precision", "title": "" }, { "docid": "7edf223f89dcd8708a437d281ee3d4cb", "score": "0.5659671", "text": "def calculate_intrinsic_pka(self):\n back_bone = 0.0\n for determinant in self.determinants['backbone']:\n value = determinant.value\n back_bone += value\n\n side_chain = 0.0\n for determinant in self.determinants['sidechain']:\n if determinant.label[0:3] not in ['ASP','GLU','LYS','ARG','HIS','CYS','TYR','C- ','N+ ']:\n value = determinant.value\n side_chain += value\n\n self.intrinsic_pKa = self.model_pka + self.Emass + self.Elocl + back_bone + side_chain\n\n return", "title": "" }, { "docid": "b66b5e32d63660fa5e206e08b16c42de", "score": "0.56567746", "text": "def evaluate(self, X_test, y_test):\n y_pred=self.pipeline.predict(X_test)\n return np.sqrt(((y_pred - y_test)**2).mean())", "title": "" }, { "docid": "5a21b300f6a481dac143a6405c4caf47", "score": "0.56545943", "text": "def acc_score(self):\n if 0 == self.total_labels:\n return 0.0\n accuracy = float(self.correct_labels) / self.total_labels\n return accuracy", "title": "" } ]
5bcc2d1e5631452b6d04a2d77bd3b135
This tests the functionality of raising TerminateSimulation exception in handle_result.
[ { "docid": "c5e35f86415dfcf81cf24b4452ab42fc", "score": "0.6437277", "text": "def test_terminate_simulation(self):\r\n class Extended_Problem(Implicit_Problem):\r\n def __init__(self):\r\n pass\r\n def handle_event(self,solver, event_info):\r\n if solver.t > 1.5:\r\n raise TerminateSimulation\r\n res = lambda self,t,y,yd,sw: N.array([y[0]-1.0])\r\n state_events = lambda self,t,y,yd,sw: N.array([t-1.0, t-2.0])\r\n y0 = [1.0]\r\n yd0 = [1.0]\r\n sw0 = [False]\r\n\r\n prob = Extended_Problem()\r\n \r\n sim = IDA(prob)\r\n sim.simulate(2.5)\r\n \r\n nose.tools.assert_almost_equal(sim.t, 2.000000, 4)", "title": "" } ]
[ { "docid": "a5e7d92dcfa627fb50dc50d429c88849", "score": "0.70890814", "text": "def test_handle_stop_and_exit(self) -> None:\n self.aea._skills_exception_policy = ExceptionPolicyEnum.stop_and_exit\n self.handler.handle = self.raise_exception # type: ignore # cause error: Cannot assign to a method\n self.aea_tool.put_inbox(self.aea_tool.dummy_envelope())\n\n with pytest.raises(\n AEAException, match=r\"AEA was terminated cause exception .*\"\n ):\n self.aea.start()\n\n assert not self.aea.is_running", "title": "" }, { "docid": "f9f1ce7f7b082bf65ebaadca364bcbca", "score": "0.69294435", "text": "def test_simulation_termination(self):\n DESTest.sim.reset()\n\n SimulationTermination(DESTest.sim, 0).process()\n self.assertEqual(DESTest.sim.sim_state.stop, True,\n msg=\"Error in SimulationTermination. Stopping simulation doesn't work as expected.\")", "title": "" }, { "docid": "1465777afd120b8c866e99ae5357485d", "score": "0.6790665", "text": "def test_handle_exception_v2():\n with pytest.raises(MyError):\n wait_for(raise_(MyError), handle_exception=False)", "title": "" }, { "docid": "74f3c1d6678a591a977515137806670b", "score": "0.6790592", "text": "def test_act_stop_and_exit(self) -> None:\n self.aea._skills_exception_policy = ExceptionPolicyEnum.stop_and_exit\n self.behaviour.act = self.raise_exception # type: ignore # cause error: Cannot assign to a method\n\n with pytest.raises(\n AEAException, match=r\"AEA was terminated cause exception .*\"\n ):\n self.aea.start()\n\n assert not self.aea.is_running", "title": "" }, { "docid": "762960285f014a4cb882a6c38886538f", "score": "0.66955596", "text": "def test_not_handle_unexpected_exception():\n with pytest.raises(AnotherError):\n wait_for(raise_(AnotherError), handle_exception=MyError, num_sec=0.1)", "title": "" }, { "docid": "4b95a8d7f581ebebda967136cb3dad98", "score": "0.66822463", "text": "def raise_StopSimulation(sim):\n raise StopSimumulation()\n return\n yield", "title": "" }, { "docid": "390dae415d5def05db85e6a1ddb8e8d9", "score": "0.6664345", "text": "def test_stoppedRaises(self):\n self.fail()", "title": "" }, { "docid": "343570460a8338be09ea9679de6513ec", "score": "0.659098", "text": "def test_not_handle_unexpected_exceptions():\n with pytest.raises(AnotherError):\n wait_for(raise_(AnotherError), handle_exception=(ValueError, RuntimeError,), num_sec=0.1)", "title": "" }, { "docid": "5f07505e4925d298e9eb6a9b73d01815", "score": "0.65743524", "text": "def test_handle_exception_v3():\n with pytest.raises(TimedOutError):\n wait_for(raise_(MyError), handle_exception=True, num_sec=0.1)", "title": "" }, { "docid": "3bd04bb6d773cd5c42ac6d181a20fdef", "score": "0.655613", "text": "def test_handle_exception_v1():\n with pytest.raises(MyError):\n wait_for(raise_(MyError))", "title": "" }, { "docid": "e27d45912b1abf0d9a740bfea2256cec", "score": "0.6511515", "text": "def test_reraise_exception():\n with pytest.raises(MyError):\n wait_for(raise_(MyError), handle_exception=True, num_sec=0.1, raise_original=True)", "title": "" }, { "docid": "c61ceb4b3fc9761e538d5d68f81ab201", "score": "0.64436936", "text": "def _raise_and_terminate(what, exception):\n Logger().fatal('Unable to instantiate the {0}.'.format(what))\n Logger().fatal('exception message was:')\n Logger().fatal(str(exception))\n raise exception", "title": "" }, { "docid": "45a6163c367b70a0720f4296c5e94ca0", "score": "0.64372176", "text": "def give_up(exception):\n print(exception)\n exit(1)", "title": "" }, { "docid": "075235dfeb1da146ab0b660a25576fa3", "score": "0.6414346", "text": "def test_handle_specific_exception():\n with pytest.raises(TimedOutError):\n wait_for(raise_(MyError), handle_exception=MyError, num_sec=0.1)", "title": "" }, { "docid": "c360f19b5eb29275905be69e51a04711", "score": "0.6362716", "text": "def test_simulation_failed(self):\n mocked_backend = FakeFailureQasmSimulator(time_alive=0)\n qr = QuantumRegister(1)\n cr = ClassicalRegister(1)\n failed_circuit = QuantumCircuit(qr, cr)\n quantum_circuit = transpile(failed_circuit, mocked_backend)\n qobj = assemble(quantum_circuit)\n job = mocked_backend.run(qobj)\n self.assertRaises(AerError, job.result)", "title": "" }, { "docid": "137c43ef4914a7729e534c57a803bebc", "score": "0.63588595", "text": "def test_terminate_simulation(self):\r\n class Extended_Problem(Explicit_Problem):\r\n def __init__(self):\r\n pass\r\n def handle_event(self, solver, event_info):\r\n if solver.t > 1.5:\r\n raise TerminateSimulation\r\n rhs = lambda self,t,y,sw: N.array([1.0])\r\n y0 = [1.0]\r\n sw0 = [False,True]\r\n state_events = lambda self,t,y,sw: N.array([t-1.0, t-2.0])\r\n\r\n exp_mod = Extended_Problem()\r\n simulator = CVode(exp_mod)\r\n simulator(3.)\r\n \r\n nose.tools.assert_almost_equal(simulator.t, 2.000000, 4)", "title": "" }, { "docid": "fb9441740ee973a7032793eae0d47d76", "score": "0.6349003", "text": "def test_handle_specific_exception_from_general_one():\n with pytest.raises(TimedOutError):\n wait_for(raise_(MyError), handle_exception=(Exception,), num_sec=0.1)", "title": "" }, { "docid": "e0363dfedcd4ff8a61e8774fbdd1d4af", "score": "0.6344183", "text": "def test_event_reporter_graceful_exit_unexpected_error(self):\n error = test.TestingException('uh oh')\n self.assertRaises(test.TestingException,\n self._test_event_reporter_graceful_exit, error)", "title": "" }, { "docid": "bc77d8aede32aed4df8770c1de815a10", "score": "0.63286835", "text": "def test_teardown_exception(self):\n self.executed = False\n\n class MyService(service.Service):\n def do_teardown(self):\n raise RuntimeError\n\n def check_error(exc_type, exc_value, exc_tb):\n self.assertIsInstance(exc_value, RuntimeError)\n self.executed = True\n\n my_service = MyService()\n mock_service = mock.Mock()\n\n my_service.add_service(mock_service)\n\n my_service.start()\n\n my_service.on('error', check_error)\n\n # stopping the service will not raise the exception but will dump it\n # to the logger\n my_service.stop()\n\n mock_service.stop.assert_called_once_with()\n self.assertTrue(self.executed)", "title": "" }, { "docid": "c91718298d65433a16775e80c844f45c", "score": "0.6323919", "text": "def test_exception():", "title": "" }, { "docid": "32bc1db14eb184c0db2179f7269f91fe", "score": "0.6298236", "text": "def test_handle_exception_silent_failure():\n _, num_sec = wait_for(raise_(MyError), handle_exception=True, num_sec=0.1, silent_failure=True,)\n assert isinstance(num_sec, float)", "title": "" }, { "docid": "89faa616199a8c5c543c0456f3c0cb29", "score": "0.6177874", "text": "def sigterm_exception(signum, stackframe):\n raise Terminate()", "title": "" }, { "docid": "00cd130d02fdfe44ed61a4846abb5ded", "score": "0.61646813", "text": "def test_handle_exception_in_iterable_containing_not_exception_types_are_interpreted_as_True(\n handle_exception\n):\n with pytest.raises(TimedOutError):\n wait_for(\n raise_(\n MyError, AnotherError, MyError(), AnotherError(), RuntimeError, RuntimeError('Foo')\n ),\n handle_exception=handle_exception,\n num_sec=1,\n delay=0.1\n )", "title": "" }, { "docid": "e700b921cd5ca30b301dd3a13a57881f", "score": "0.6147867", "text": "def terminate(self):\n self.raise_exc(SystemExit)", "title": "" }, { "docid": "e700b921cd5ca30b301dd3a13a57881f", "score": "0.6147867", "text": "def terminate(self):\n self.raise_exc(SystemExit)", "title": "" }, { "docid": "e700b921cd5ca30b301dd3a13a57881f", "score": "0.6147867", "text": "def terminate(self):\n self.raise_exc(SystemExit)", "title": "" }, { "docid": "e700b921cd5ca30b301dd3a13a57881f", "score": "0.6147867", "text": "def terminate(self):\n self.raise_exc(SystemExit)", "title": "" }, { "docid": "e700b921cd5ca30b301dd3a13a57881f", "score": "0.6147867", "text": "def terminate(self):\n self.raise_exc(SystemExit)", "title": "" }, { "docid": "3a7c98d892a43af6fc7d9a035b0d0bba", "score": "0.61446005", "text": "def test_ask_exception_handling(r):\n resp = ResponseError()\n resp.message = \"ASK 1337 127.0.0.1:7000\"\n assert r.handle_cluster_command_exception(resp) == {\n \"name\": \"127.0.0.1:7000\",\n \"method\": \"ask\",\n }", "title": "" }, { "docid": "981f5ca4c365a77c3ffd0cd35cac1d72", "score": "0.611749", "text": "def test_destroy_call():\n pytest.raises(SaltCloudSystemExit, gce.destroy, vm_name=VM_NAME, call=\"function\")", "title": "" }, { "docid": "a288d46fca671469b16f239dd62a3e0e", "score": "0.60988957", "text": "def test_cleanup_failure(self):\n self.secondary_setup(manager_start=None,\n manager_end=None)\n with self.assertRaises(RuntimeError):\n self.client.cleanup_storage()", "title": "" }, { "docid": "6b604a98da81bfc6c0b762c23da91322", "score": "0.60755634", "text": "def test_error(self):\n raise Exception", "title": "" }, { "docid": "15b03b2f9b2061c457c04ee55f4c6a09", "score": "0.60745317", "text": "def test_returnQuit(self):\n cmdLine = InteractiveCommandLine()\n quitCmd = CmdQuit(cmdLine)\n try:\n quitCmd.call()\n # quit command did not throw, test failure\n self.assertEqual(True, False)\n except Exception as e:\n self.assertEqual(str(e), 'quit')", "title": "" }, { "docid": "97fe9b3a2bdf5aeb42e896eacdf60d89", "score": "0.6072547", "text": "def test_handle_propagate(self) -> None:\n self.aea._skills_exception_policy = ExceptionPolicyEnum.propagate\n self.handler.handle = self.raise_exception # type: ignore # cause error: Cannot assign to a method\n self.aea_tool.put_inbox(self.aea_tool.dummy_envelope())\n\n with pytest.raises(ExpectedExcepton):\n self.aea.start()\n\n assert not self.aea.is_running", "title": "" }, { "docid": "3ac77355fd053359f450f00a056c8679", "score": "0.6065583", "text": "def test_result_can_fail_after_teardown(self):\n input = self.test_modify_input()\n input.fail_after = State.TEARDOWN\n job = launch(input)\n self.assertEqual(False, job.wait_for_result())", "title": "" }, { "docid": "33fe7b40a97b52b918aac836d4636d45", "score": "0.60540634", "text": "def test_wait_timeout_w_error(mock_agent):\n mock_agent.register_task('test_task', tfunc_raise)\n mock_agent.start('test_task')\n res = yield mock_agent.wait('test_task', timeout=1)\n print('result:', res)\n # Hmm, I thought maybe this would hit the except FirstErorr as e line, but\n # it doesn't.", "title": "" }, { "docid": "a38e062815494d6c2a115ad2eaa324f3", "score": "0.6048849", "text": "def terminate(self):\n raise NotImplementedError()", "title": "" }, { "docid": "2eb45221787d38f2680cce7cf583e5dc", "score": "0.60471094", "text": "def test_fb_exception(self):\n @exception_handler\n def get_val():\n \"\"\"\n Do something...\n \"\"\"\n raise FittingProblemError\n\n self.assertRaises(SystemExit, get_val)", "title": "" }, { "docid": "0bea12a89fa43d8995c2f664dbbc0d6f", "score": "0.60460085", "text": "def exception_quit(self, e: Exception, err_message: str = None):\n self.driver.quit()\n\n if len(self.outcomes) > 0:\n self.save_results()\n\n if err_message is None:\n raise e\n else:\n raise Exception(err_message) from e", "title": "" }, { "docid": "2a54c97fcb70b5210c1331bf9ca1d602", "score": "0.6044891", "text": "async def test_raising_error_trait(hass: HomeAssistant) -> None:\n hass.states.async_set(\n \"climate.bla\",\n HVACMode.HEAT,\n {\n ATTR_MIN_TEMP: 15,\n ATTR_MAX_TEMP: 30,\n ATTR_UNIT_OF_MEASUREMENT: UnitOfTemperature.CELSIUS,\n },\n )\n\n events = async_capture_events(hass, EVENT_COMMAND_RECEIVED)\n await hass.async_block_till_done()\n\n result = await sh.async_handle_message(\n hass,\n BASIC_CONFIG,\n \"test-agent\",\n {\n \"requestId\": REQ_ID,\n \"inputs\": [\n {\n \"intent\": \"action.devices.EXECUTE\",\n \"payload\": {\n \"commands\": [\n {\n \"devices\": [{\"id\": \"climate.bla\"}],\n \"execution\": [\n {\n \"command\": \"action.devices.commands.\"\n \"ThermostatTemperatureSetpoint\",\n \"params\": {\"thermostatTemperatureSetpoint\": 10},\n }\n ],\n }\n ]\n },\n }\n ],\n },\n const.SOURCE_CLOUD,\n )\n\n assert result == {\n \"requestId\": REQ_ID,\n \"payload\": {\n \"commands\": [\n {\n \"ids\": [\"climate.bla\"],\n \"status\": \"ERROR\",\n \"errorCode\": \"valueOutOfRange\",\n }\n ]\n },\n }\n\n assert len(events) == 1\n assert events[0].event_type == EVENT_COMMAND_RECEIVED\n assert events[0].data == {\n \"request_id\": REQ_ID,\n \"entity_id\": [\"climate.bla\"],\n \"execution\": [\n {\n \"command\": \"action.devices.commands.ThermostatTemperatureSetpoint\",\n \"params\": {\"thermostatTemperatureSetpoint\": 10},\n }\n ],\n \"source\": \"cloud\",\n }", "title": "" }, { "docid": "3f02ad5b4cc781089436fe79f6c8c481", "score": "0.60397714", "text": "def __exit__(self, exc_type, exc_val, exc_tb):\n\t\tself.shutdown()\n\t\treturn False # Reraise the exception", "title": "" }, { "docid": "1c140b7c5a6abc596611f97ed6a43c51", "score": "0.60038525", "text": "def test_handle_exception_raises_TimedOutError_from_occured_exception():\n try:\n wait_for(raise_(MyError), handle_exception=True, num_sec=0.1)\n except TimedOutError as timeout_exception:\n assert isinstance(timeout_exception.__cause__, MyError)\n else:\n assert False, \"Wasn't raised\"", "title": "" }, { "docid": "f2645a21524ab0bf7b44de095d231457", "score": "0.5993989", "text": "def crash():\n raise Exception(\"Test crash\")", "title": "" }, { "docid": "ca9c4958adf2460bd04965f6342fb6af", "score": "0.59796345", "text": "def __exit__(self, exc_type: Any, exc_value: Any, traceback: Any) -> None:\n # handle exceptions with those variables\n self.stop()", "title": "" }, { "docid": "ce39a1284cec8589279ff99deaecf89f", "score": "0.5971375", "text": "def exit_fail(self, log_handler=None):\n \n if log_handler:\n print('FAILURE DETECTED. Log entries:\\n', \n log_handler.handlers[0].printMessages())\n else:\n print 'FAILURE DETECTED.\\n'\n try:\n fpga.stop()\n except: pass\n raise\n sys.exit()", "title": "" }, { "docid": "bd891a720a10fb345ed7dc6c9acdcab0", "score": "0.59673756", "text": "def test_systemExit_is_raised(self):\n test_instance = LoadDataFromPostSQl('NONVALID', \"2021-08-01\")\n with self.assertRaises(SystemExit):\n test_instance.load_data()", "title": "" }, { "docid": "fd0eaefb72a496b3c93c441c3f20e194", "score": "0.59627676", "text": "def test_non_fb_exception(self):\n @exception_handler\n def get_val():\n \"\"\"\n Do something...\n \"\"\"\n raise RuntimeError\n\n self.assertRaises(SystemExit, get_val)", "title": "" }, { "docid": "1312f8f12ebff1696ee61f068ae9644b", "score": "0.59553856", "text": "def test_handle_exceptions_in_empty_iterable_are_interpreted_as_False(handle_exception, _):\n with pytest.raises(MyError):\n wait_for(raise_(MyError), handle_exception=handle_exception, num_sec=1, delay=0.1)", "title": "" }, { "docid": "8fa4b965fc7bcda3e035487f55dee482", "score": "0.59462786", "text": "def run_with_exception(self):\n raise NotImplementedError", "title": "" }, { "docid": "1b782129af243afc90b12683c920dac7", "score": "0.59404516", "text": "def test_handle_specific_exception_in_iterable():\n with pytest.raises(TimedOutError):\n wait_for(raise_(MyError), handle_exception=(MyError,), num_sec=0.1)", "title": "" }, { "docid": "08807ca80a6c8ca7cf1ee9330efa7ef2", "score": "0.5939741", "text": "def __exit__(self, _exc_type, _exc_val, _exc_tb):\n self._executor.shutdown()", "title": "" }, { "docid": "359c119acf5f392b232ea6f643493b76", "score": "0.5922703", "text": "def __exit__(self, unused_exc_type, unused_exc_val, unused_exc_tb):\n self.StopServer()", "title": "" }, { "docid": "fff2590abcdfc3c0a3438ede99594b35", "score": "0.5913824", "text": "def error_test_task():\n raise Exception(\"I'm sorry Dave, I'm afraid I can't do that.\")", "title": "" }, { "docid": "c4d7623622f3c437b16b387ecfae549e", "score": "0.589941", "text": "def test_raise_regular_exception(r):\n e = Exception(\"foobar\")\n with pytest.raises(Exception) as ex:\n r.handle_cluster_command_exception(e)\n assert unicode(ex.value).startswith(\"foobar\")", "title": "" }, { "docid": "e604ff8334dd0f80cce5002251f7b147", "score": "0.5898073", "text": "def test_action_throws_exception():\n errors = ProcessErrors()\n output = SendOptions(['Throws Exception'], keep=True)\n\n start_process(__name__, 'create_exception_test_menu', output, errors)\n\n assert errors.hasError is False, 'The Action exception was incorrectly propagated out of the menu call: {}'.format(errors.error)\n\n assert 'Action is throwing an exception' in output, 'The Action command did not throw an exception'", "title": "" }, { "docid": "9d811eaf686fe1dec4b4b7fdc76ea058", "score": "0.5890969", "text": "def test_is_unexpected_exit(self) -> None:\n self.assertTrue(is_unexpected_exit(ret_code=1))\n self.assertFalse(is_unexpected_exit(ret_code=0))\n self.assertFalse(is_unexpected_exit(ret_code=-2))\n self.assertFalse(is_unexpected_exit(ret_code=-15))", "title": "" }, { "docid": "108b5ffbd5dc30acf7579234bd002b37", "score": "0.5876311", "text": "def terminate(self):", "title": "" }, { "docid": "108b5ffbd5dc30acf7579234bd002b37", "score": "0.5876311", "text": "def terminate(self):", "title": "" }, { "docid": "108b5ffbd5dc30acf7579234bd002b37", "score": "0.5876311", "text": "def terminate(self):", "title": "" }, { "docid": "b8935e1e827cb9f1562a0c8c872f34f5", "score": "0.58755904", "text": "def terminate(self):\n ...", "title": "" }, { "docid": "b8935e1e827cb9f1562a0c8c872f34f5", "score": "0.58755904", "text": "def terminate(self):\n ...", "title": "" }, { "docid": "b8935e1e827cb9f1562a0c8c872f34f5", "score": "0.58755904", "text": "def terminate(self):\n ...", "title": "" }, { "docid": "5c409102bab05725fcfca09bc6fba28c", "score": "0.5869322", "text": "def test_throw(self):\n self.instance.throw()\n self.assertNotEqual(self.instance.side, 0)", "title": "" }, { "docid": "9e1606e348183b987608c7c9e7308dc6", "score": "0.5861405", "text": "def test_spawn_exit(self):\n my_service = make_service()\n\n self.executed = False\n\n def trap_error(exc_type, exc_value, exc_traceback):\n raise AssertionError('Whoops the error was trapped')\n\n def raise_error():\n self.executed = True\n raise gevent.GreenletExit\n\n my_service.on('error', trap_error)\n\n my_service.spawn(raise_error)\n\n gevent.sleep(0.0)\n\n self.assertTrue(self.executed)", "title": "" }, { "docid": "738f4faba3ad3a729c078976b7975031", "score": "0.58585787", "text": "def raise_exception(*args, **kwargs) -> None:\n raise ExpectedExcepton(\"we wait it!\")", "title": "" }, { "docid": "18c00af4e8194fd3c436e9beea78d5e8", "score": "0.58539295", "text": "def test_wait_re_raise(self):\n self.assertRaises(TestException, self.manager.submitTask, TestTask())", "title": "" }, { "docid": "0f949f33fb8b52c8484be202b34085f0", "score": "0.5849599", "text": "def __exit__(self, exc_type, exc_val, exc_tb):\n self._stop()\n return False", "title": "" }, { "docid": "269221333b28f9382e5a9c80147aa180", "score": "0.58490443", "text": "def testExceptionHandler(self):\n stage = self.ConstructStage()\n e = ValueError('foo')\n try:\n raise e\n except ValueError:\n ret = stage._HandleStageException(sys.exc_info())\n self.assertTrue(isinstance(ret, tuple))\n self.assertEqual(len(ret), 3)\n self.assertEqual(ret[0], e)", "title": "" }, { "docid": "6d1c7beffce17cd51237b986ec992a65", "score": "0.5843405", "text": "def exit_sample(msg_or_exception):\n if isinstance(msg_or_exception, Exception):\n logger.error(\"Exiting sample due to exception.\")\n traceback.print_exception(msg_or_exception.__class__, msg_or_exception, sys.exc_info()[2])\n else:\n logger.info(\"Exiting: %s\", msg_or_exception)\n\n if not mqtt_connection:\n logger.info(\"Disconnecting...\")\n mqtt_connection.disconnect()\n sys.exit(0)", "title": "" }, { "docid": "5b2ab6430d629df75022533be0b88495", "score": "0.58409774", "text": "def __exit__(self, exc_type, exc_val, exc_tb):\n print('error type: {}'.format(exc_type))\n print('error value: {}'.format(exc_val))\n print('error traceback: {}'.format(exc_tb))\n print('we are leaving \"with\"')", "title": "" }, { "docid": "9f3e4c0b7d75484ca77e7d806db2aaec", "score": "0.58377504", "text": "def test_subthrow():\n ThrowsError = PyTorchHelpers.load_lua_class('test/test_throw.lua', 'ThrowsError')\n throwsError = ThrowsError()\n try:\n throwsError.insub_anteater()\n except Exception as e:\n print('error', e)\n assert 'test_throw.lua:18' in str(e)", "title": "" }, { "docid": "66276f44066684334499e982b7518052", "score": "0.5809587", "text": "def test_spawn_error(self):\n my_service = make_service()\n\n self.executed = False\n\n def trap_error(exc_type, exc_value, exc_traceback):\n self.assertIsInstance(exc_value, RuntimeError)\n self.executed = True\n\n def raise_error():\n raise RuntimeError\n\n my_service.on('error', trap_error)\n\n my_service.spawn(raise_error)\n\n gevent.sleep(0.0)\n\n self.assertTrue(self.executed)", "title": "" }, { "docid": "9c931a20dcb35620c19b02e3c56d6ede", "score": "0.5805395", "text": "def test_shutdown_zap_daemon_exception(self, helper_mock):\n helper_mock.side_effect = ZAPError('error')\n result = self.runner.invoke(cli.cli, ['--boring', '--api-key', '', 'shutdown'])\n helper_mock.assert_called_with()\n self.assertEqual(result.exit_code, 1)", "title": "" }, { "docid": "84aa8cc15cba9f5dd1fba2f07c32aaa4", "score": "0.58017087", "text": "def __exit__(self, exc_type, exc_val, exc_tb):\n print('Exit')", "title": "" }, { "docid": "427f9a2d20d7248d9069b5aad30c0d2b", "score": "0.57758284", "text": "async def test_desy_move_verifier_run_exception(config: TestConfig, mocker: MockerFixture) -> None:\n logger_mock = mocker.MagicMock()\n p = DesyMoveVerifier(config, logger_mock)\n p.last_work_end_timestamp = \"\"\n p._do_work = AsyncMock() # type: ignore[method-assign]\n p._do_work.side_effect = [Exception(\"bad thing happen!\")]\n await p.run()\n p._do_work.assert_called()\n assert p.last_work_end_timestamp", "title": "" }, { "docid": "b579d8a30721e1e03c260c6805e834f1", "score": "0.5773396", "text": "def _raise_command_exception(args, returncode, output):\n message = ('Command failed with status {}: {}\\n'\n 'Output:-----------------------------------------\\n{}\\n'\n '------------------------------------------------\\n').format(\n returncode, args, output)\n raise Exception(message)", "title": "" }, { "docid": "9cf762cb8429b422073c1c10c9034066", "score": "0.575852", "text": "def die(string):\r\n\ttraceback.print_exc(sys.exc_info()[2])\r\n\tsys.exit(\"TestManager: \"+string)", "title": "" }, { "docid": "976dec2f227fa3e87a078a622507495c", "score": "0.5752984", "text": "def test_handle_error_ii(self):\n # setup\n oef_dialogue = self.prepare_skill_dialogue(\n dialogues=self.oef_dialogues,\n messages=self.list_of_messages_unregister[:1],\n )\n incoming_message = self.build_incoming_message_for_skill_dialogue(\n dialogue=oef_dialogue,\n performative=OefSearchMessage.Performative.OEF_ERROR,\n oef_error_operation=OefSearchMessage.OefErrorOperation.SEARCH_SERVICES,\n )\n\n # operation\n with patch.object(self.oef_search_handler.context.logger, \"log\") as mock_logger:\n self.oef_search_handler.handle(incoming_message)\n\n # after\n self.assert_quantity_in_outbox(0)\n\n mock_logger.assert_any_call(\n logging.INFO,\n f\"received oef_search error message={incoming_message} in dialogue={oef_dialogue}.\",\n )\n\n assert self.service_registration_behaviour.failed_registration_msg is None", "title": "" }, { "docid": "3b144581f8cfbec5ac80358929484cb3", "score": "0.574976", "text": "def test_act_propagate(self) -> None:\n self.aea._skills_exception_policy = ExceptionPolicyEnum.propagate\n self.behaviour.act = self.raise_exception # type: ignore # cause error: Cannot assign to a method\n\n with pytest.raises(ExpectedExcepton):\n self.aea.start()\n\n assert not self.aea.is_running", "title": "" }, { "docid": "ccfef33ad62d6bf530fda7ba1d5389eb", "score": "0.57484573", "text": "def test_context_exit_no_error(self):\n\n error = Error()\n assert error.__exit__(None, None, None) is None", "title": "" }, { "docid": "62753d6459fa827002d3b229354d9110", "score": "0.5743312", "text": "def test_error_handling_stop_pipeline(sdc_builder, sdc_executor):\n pipeline_builder = sdc_builder.get_pipeline_builder()\n\n raw_data = dict(a='a', b='b')\n\n dev_raw_data_source = pipeline_builder.add_stage('Dev Raw Data Source')\n dev_raw_data_source.data_format = 'JSON'\n dev_raw_data_source.raw_data = json.dumps(raw_data)\n dev_raw_data_source.stop_after_first_batch = True\n\n field_remover = pipeline_builder.add_stage('Field Remover').set_attributes(\n preconditions=[\"${record:value('/a') != 'a'}\"],\n on_record_error='STOP_PIPELINE',\n action='REMOVE',\n fields=['/a']\n )\n\n wiretap = pipeline_builder.add_wiretap()\n\n dev_raw_data_source >> field_remover >> wiretap.destination\n pipeline = pipeline_builder.build()\n\n sdc_executor.add_pipeline(pipeline)\n\n # Check the pipeline stops as soon as it receives the first record with a RunError\n sdc_executor.dump_log_on_error = False\n with pytest.raises(sdc_api.RunError) as exception_info:\n sdc_executor.start_pipeline(pipeline).wait_for_finished()\n sdc_executor.dump_log_on_error = True\n\n # Check the error arisen corresponds to \"CONTAINER_0051 - Unsatisfied precondition(s)\"\n assert(\"CONTAINER_0051\" in exception_info.value.message)", "title": "" }, { "docid": "2ec94d7afa8b649ac06849fa17911046", "score": "0.57385755", "text": "def __exit__(self, exc_type, exc_value, traceback):\n self.stop()", "title": "" }, { "docid": "2f2acbcd473b0a10ca39d6b6ca210de5", "score": "0.57369524", "text": "def Exit(excode=0):\n # future mem tracking logic will need to clear engine cache:\n #ClearCacheForAllEngines()\n rcode = None\n if TestEnv.params[\"interactive\"] == 0:\n if (excode):\n rcode = excode\n if rcode is None and TestEnv.results[\"maxds\"] == 0:\n if TestEnv.results[\"numskip\"] == 0:\n rcode = 111\n else:\n rcode = 119\n if rcode is None and TestEnv.results[\"maxds\"] == 1:\n if TestEnv.results[\"numskip\"] == 0:\n rcode = 112\n else:\n rcode = 120\n if rcode is None and TestEnv.results[\"maxds\"] == 2:\n rcode = 113\n if rcode is None:\n rcode = 114\n LogTestExit(rcode)\n # finalize results.\n open(\"returncode.txt\",\"w\").write(\"%d\\n\" % rcode)\n sys.exit(rcode)", "title": "" }, { "docid": "e3c0b131761ed11a965aa725d2400dc5", "score": "0.5735313", "text": "def test_expected_errors(self, args, expected_message, capsys):\n with pytest.raises(SystemExit):\n main(args=args)\n out, err = capsys.readouterr()\n assert expected_message in out", "title": "" }, { "docid": "2cb60def89300274302d05734363c68f", "score": "0.57318425", "text": "def test_dummy_in_dummy(self):\n\n def outer():\n t = DummyProcess(target=start_w3af_core,\n args=(self._exception_handler,))\n t.start()\n t.join()\n\n t = DummyProcess(target=outer)\n t.start()\n t.join()\n\n self.assertEqual(self._exceptions, [])", "title": "" }, { "docid": "d9bbfdef32bee36a084b410a01024b3f", "score": "0.5729676", "text": "def test_create_api_executor_negative(self,\n fake_basic_info, expect_result):\n fake_login_response = fakes.FakeLoginResponse()\n mock_connection = http_client.HTTPSConnection\n mock_connection.return_value.getresponse.side_effect = [\n fake_login_response,\n fake_basic_info,\n fake_login_response]\n self.assertRaises(\n exception.ShareBackendException,\n self._do_setup,\n 'https://1.2.3.4:443',\n '1.2.3.4',\n 'admin',\n 'qnapadmin',\n 'Storage Pool 1')", "title": "" }, { "docid": "1e520fac3a3b095ef8a1679875a5a495", "score": "0.5727923", "text": "def run(self, result=None):\n if result.errors:\n raise Exception(result.errors[0][1])\n elif result.failures:\n raise Exception(result.failures[0][1])\n else:\n super().run(result)", "title": "" }, { "docid": "a34bc36f355841ddb97b5f381d53a622", "score": "0.57270557", "text": "def __exit__(self, exc_type, exc_val, exc_tb):\r\n print(\"Exit !\")", "title": "" }, { "docid": "80b2ab539f65883d01c175aca7f11123", "score": "0.57268995", "text": "def abort_test( _ ):\n raise KeyboardInterrupt('abort_my_test')", "title": "" }, { "docid": "546d83c439ab6f665f0f17897da40ede", "score": "0.5722085", "text": "def test_trigger_runner_exception_stops_triggerer(session):\n\n class MockTriggerException(Exception):\n pass\n\n class TriggerRunner_(TriggerRunner):\n async def create_triggers(self):\n raise MockTriggerException(\"Trigger creation failed\")\n\n # Use a trigger that will immediately succeed\n trigger = SuccessTrigger()\n create_trigger_in_db(session, trigger)\n\n # Make a TriggererJobRunner and have it retrieve DB tasks\n job = Job()\n job_runner = TriggererJobRunner(job)\n job_runner.trigger_runner = TriggerRunner_()\n thread = Thread(target=job_runner._execute)\n thread.start()\n\n # Wait 4 seconds for the triggerer to stop\n try:\n for _ in range(40):\n time.sleep(0.1)\n if not thread.is_alive():\n break\n else:\n pytest.fail(\"TriggererJobRunner did not stop after exception in TriggerRunner\")\n\n if not job_runner.trigger_runner.stop:\n pytest.fail(\"TriggerRunner not marked as stopped after exception in TriggerRunner\")\n\n finally:\n job_runner.trigger_runner.stop = True\n # with suppress(MockTriggerException):\n job_runner.trigger_runner.join()\n thread.join()", "title": "" }, { "docid": "e20e079a5ffdc9b447e5621837b43cd2", "score": "0.5720799", "text": "def __exit__(self, _exc_type, _exc_val, _exc_tb):\n raise NotImplementedError", "title": "" }, { "docid": "3299b600bce7589359112926aa1d91f9", "score": "0.5718674", "text": "async def __aexit__(self, exc_type, exc_value, traceback):\n return None", "title": "" }, { "docid": "4f94c6e756bf070c19318abb0607098e", "score": "0.5716095", "text": "def test_error_execution(self):\n # Create the utility and register a command\n utility = ConsoleProgram()\n GreetingCommand.handle = mock.MagicMock(side_effect=ValueError(\"bad\"))\n utility.register(GreetingCommand)\n\n # Create fake arguments for the parser\n args = utility.parser.parse_args([\"greeting\", \"bob\"])\n\n # Mock the exit and parser\n utility.exit = mock.MagicMock()\n utility.parser.parse_args = mock.MagicMock(return_value=args)\n\n # Execute the utility\n utility.execute()\n\n # Check the execution status\n utility.exit.assert_called_once_with(1, '\\x1b[31mbad\\x1b[39m')\n self.assertEqual(\"\", self.read(\"stdout\"))\n self.assertEqual(\"\", self.read(\"stderr\"))", "title": "" }, { "docid": "83df3a7944490c87dabe63248edf9273", "score": "0.5715751", "text": "def __exit__(self, exc_type, exc_val, exc_tb) -> None:\n self.stop()", "title": "" }, { "docid": "d96898aea33a170544fa4e872efae98b", "score": "0.57003057", "text": "def test_exit(self):\n \n with shell() as sh:\n pass\n self.assertEqual(sh.shell.returncode, -15)", "title": "" }, { "docid": "f3b5dadc2737bba4b9259bd05ba460d9", "score": "0.5694409", "text": "def abort(self, outcome):\n raise NotImplementedError()", "title": "" }, { "docid": "f3b5dadc2737bba4b9259bd05ba460d9", "score": "0.5694409", "text": "def abort(self, outcome):\n raise NotImplementedError()", "title": "" }, { "docid": "f55571c9b61cadbcad8fc8becb97dc20", "score": "0.5693719", "text": "def test_abort(mock_agent):\n mock_agent.register_task('test_task', tfunc)\n res = mock_agent.abort('test_task')\n print('result:', res)\n assert res[0] == ocs.ERROR\n assert isinstance(res[1], str)\n assert res[2] == {}", "title": "" }, { "docid": "175dea9d6c28cbd38fc53bb69f388e0a", "score": "0.56926095", "text": "def test_terminate_mpibackend(self, run_hnn_core_fixture):\n hnn_core_root = op.dirname(hnn_core.__file__)\n params_fname = op.join(hnn_core_root, 'param', 'default.json')\n params = read_params(params_fname)\n params.update({'N_pyr_x': 3,\n 'N_pyr_y': 3,\n 'tstop': 40,\n 't_evprox_1': 5,\n 't_evdist_1': 10,\n 't_evprox_2': 20,\n 'N_trials': 2})\n net = Network(params, add_drives_from_params=True)\n\n with MPIBackend() as backend:\n event = Event()\n # start background thread that will kill all MPIBackends\n # until event.set()\n kill_t = Thread(target=_terminate_mpibackend,\n args=(event, backend))\n # make thread a daemon in case we throw an exception\n # and don't run event.set() so that py.test will\n # not hang before exiting\n kill_t.daemon = True\n kill_t.start()\n\n with pytest.warns(UserWarning) as record:\n with pytest.raises(\n RuntimeError,\n match=\"MPI simulation failed. Return code: 1\"):\n simulate_dipole(net)\n\n event.set()\n expected_string = \"Child process failed unexpectedly\"\n assert expected_string in record[0].message.args[0]", "title": "" }, { "docid": "984b56fab3ae73b86274c7e184af35a1", "score": "0.56886417", "text": "async def test_sign_out_unknown_error(\n hass: HomeAssistant, config_entry, controller, caplog: pytest.LogCaptureFixture\n) -> None:\n await setup_component(hass, config_entry)\n controller.sign_out.side_effect = HeosError()\n\n await hass.services.async_call(DOMAIN, SERVICE_SIGN_OUT, {}, blocking=True)\n\n assert controller.sign_out.call_count == 1\n assert \"Unable to sign out\" in caplog.text", "title": "" } ]
33d5f69c80edac04532e759092ff0904
Get all axes offset voltages `offsets` is a list of offset voltags, whose length is equal to the number of active (connected) axes.
[ { "docid": "a9105f97237b4bd2c4c935723e01c677", "score": "0.6659708", "text": "def set_all_offsets(self, offsets):\n self._set_all_axes_data(self.set_offset,offsets)\n return self.get_all_offsets()", "title": "" } ]
[ { "docid": "1adc973b87fe44a565cce97d6ac19eb5", "score": "0.66350055", "text": "def get_all_offsets(self):\n return self._get_all_axes_data(self.get_offset)", "title": "" }, { "docid": "1adc973b87fe44a565cce97d6ac19eb5", "score": "0.66350055", "text": "def get_all_offsets(self):\n return self._get_all_axes_data(self.get_offset)", "title": "" }, { "docid": "d8257c2a28f85331a1ea7eaa00b5ad2c", "score": "0.605971", "text": "def equatorial_offsets_to_detector_coordinates(self, offsets):\n coordinates = offsets.copy()\n coordinates.invert_x()\n rotation = self.sky_angle\n if rotation != 0:\n coordinates.rotate(-rotation)\n return coordinates", "title": "" }, { "docid": "e4af77d406c2fd03fe5734af423125fd", "score": "0.55787504", "text": "def get_offsets(self, theta, phi, offsets=None):\n if offsets is None:\n offsets = Coordinate2D(unit='degree')\n\n phi, theta = self.phi_theta_to_radians(phi, theta)\n r = self.y0 - theta\n a = phi * np.cos(theta) / r\n a = a.decompose().value * units.Unit('radian')\n\n x = r * np.sin(a)\n y = self.y0 - r * np.cos(a)\n offsets.set([x, y])\n return offsets", "title": "" }, { "docid": "b69c445cfc89e8fcfe13261f8672f260", "score": "0.55150944", "text": "def xoffsets(self):\n \n return [lay.xoffset for lay in self.layers]", "title": "" }, { "docid": "e3f40936e7396a438988747f60252819", "score": "0.54636747", "text": "def _create_transformation_vectors_for_pixel_offsets(\n self,\n ) -> Optional[List[QVector3D]]:\n try:\n units = self.get_field_attribute(X_PIXEL_OFFSET, CommonAttrs.UNITS)\n unit_conversion_factor = calculate_unit_conversion_factor(units, METRES)\n x_offsets = (\n np.array(self.get_field_value(X_PIXEL_OFFSET)) * unit_conversion_factor\n )\n y_offsets = (\n np.array(self.get_field_value(Y_PIXEL_OFFSET)) * unit_conversion_factor\n )\n x_offsets = x_offsets.tolist()\n y_offsets = y_offsets.tolist()\n except AttributeError:\n logging.info(\n \"In pixel_shape_component expected to find x_pixel_offset and y_pixel_offset datasets\"\n )\n return None\n try:\n z_offsets = self.get_field_value(Z_PIXEL_OFFSET)\n except AttributeError:\n z_offsets = np.zeros_like(x_offsets)\n if not isinstance(x_offsets, list):\n x_offsets = x_offsets.flatten()\n else:\n x_offsets = self.__flatten_list(x_offsets)\n if not isinstance(y_offsets, list):\n y_offsets = y_offsets.flatten()\n else:\n y_offsets = self.__flatten_list(y_offsets)\n if not isinstance(z_offsets, list):\n z_offsets = z_offsets.flatten()\n else:\n z_offsets = self.__flatten_list(z_offsets)\n # offsets datasets can be 2D to match dimensionality of detector, so flatten to 1D\n return [QVector3D(x, y, z) for x, y, z in zip(x_offsets, y_offsets, z_offsets)]", "title": "" }, { "docid": "994bbc52c79ef48ee004b11e305f5595", "score": "0.5433342", "text": "def print_offsets(self):\n\n offs = set(zip(self.xoffsets, self.yoffsets)) \n if len(offs) == 1:\n print((\"print_offsets: In total {} layer{} \"\n \"with\").format(\n self.numlay, \"s\" if self.numlay != 1 else \"\") + \\\n \" {}offsets: (xoffset, yoffset) = {} {}\".format(\n \t\t\"identical \" if self.numlay > 1 else \"\",\n \t\toffs.pop(), UNITS[ELEV]))\n else:\n print(\"print_offsets: In total {} layers{}\".format(\n self.numlay, \".\" if self.numlay == 0 else \\\n \" with offsets (xoffset, yoffset):\"))\n for i, lay in enumerate(self.layers):\n print(\"\\tLayer no. {:<3}: {}, {} {}\".format(\n i, lay.xoffset, lay.yoffset, UNITS[ELEV]))", "title": "" }, { "docid": "4f6f0292f400f6b9554acf4c38054b87", "score": "0.52846414", "text": "def _calculate_cumulative_offsets(offsets):\n\n assert(len(offsets) >0), \"offsets can't be empty\"\n cumulative_offset = 0\n cumulative_offsets = []\n for column in range(0, len(offsets)):\n offset = offsets[column]\n assert(isinstance(offset, int)), \"offsets must be integers\"\n assert(offset >0), \"offsets must be postive\"\n cumulative_offset += offset\n cumulative_offsets.append(cumulative_offset)\n return cumulative_offsets", "title": "" }, { "docid": "1f46b28673e6136a4773811279cce392", "score": "0.52371657", "text": "def fetch_offsets(zk, consumer_group, topics):\n source_offsets = defaultdict(dict)\n for topic, partitions in topics.iteritems():\n for partition in partitions:\n offset, _ = zk.get(\n \"/consumers/{groupid}/offsets/{topic}/{partition}\".format(\n groupid=consumer_group,\n topic=topic,\n partition=partition,\n )\n )\n source_offsets[topic][partition] = offset\n return source_offsets", "title": "" }, { "docid": "d87c74837bbe81d423b358119948574d", "score": "0.52254134", "text": "def offset(self):\n return [obs.offset for obs in self.observations]", "title": "" }, { "docid": "bd6220bbbf6799b69feb8a8ea58cb620", "score": "0.5186373", "text": "def get_all_voltages(self):\n return self._get_all_axes_data(self.get_voltage)", "title": "" }, { "docid": "bd6220bbbf6799b69feb8a8ea58cb620", "score": "0.5186373", "text": "def get_all_voltages(self):\n return self._get_all_axes_data(self.get_voltage)", "title": "" }, { "docid": "c145f86643e8bf609a6a9cf91aa97730", "score": "0.51811343", "text": "def get_ee_points(offsets, ee_pos, ee_rot):\n return ee_rot.dot(offsets.T) + ee_pos.T", "title": "" }, { "docid": "a051b9d8238f9824eb3f6f467040d1ee", "score": "0.5148968", "text": "def get_axis_vector(axis_name, offset=1):\n\n if axis_name in ['X', 'x']:\n return offset, 0, 0\n elif axis_name in ['Y', 'y']:\n return 0, offset, 0\n elif axis_name in ['Z', 'z']:\n return 0, 0, 1", "title": "" }, { "docid": "771cb509327bdd619bb36241092494d2", "score": "0.5127983", "text": "def tz_offsets(self):\n self.tz_resp = self._get_timezones()\n return [(self.tz['id']['value'], self.tz['name'], self.tz['offset']) \n for self.tz in self.tz_resp]", "title": "" }, { "docid": "f9d2c1054a2cf3007eb114c2d9a9ad6e", "score": "0.5063232", "text": "def _uhrig_single_axis_offsets(duration: float, offset_count: int) -> np.ndarray:\n\n # prepare the offsets for delta comb\n constant = 1.0 / (2 * offset_count + 2)\n deltas = np.array(\n [(np.sin(np.pi * k * constant)) ** 2 for k in range(1, offset_count + 1)]\n )\n offsets = duration * deltas\n\n return offsets", "title": "" }, { "docid": "351ba34731718cab6bbb40125a41fbf3", "score": "0.5048727", "text": "def detector_coordinates_to_equatorial_offsets(self, coordinates):\n rotation = self.sky_angle\n offsets = coordinates.copy()\n if rotation != 0:\n offsets.rotate(rotation)\n offsets.invert_x()\n return offsets", "title": "" }, { "docid": "b31e7e47b21b4814dcaed58a68b7c72e", "score": "0.49876514", "text": "def yoffsets(self):\n\n return [lay.yoffset for lay in self.layers]", "title": "" }, { "docid": "ea8d57ed52b2ebbe11b112143f968a80", "score": "0.4864682", "text": "def get_latest_offsets(brokers, topic, group=None, scanner_cls=KafkaScannerDirect, **kwargs):\n scanner = scanner_cls(brokers, topic, group=group, keep_offsets=False, ssl_configs=kwargs)\n return scanner.latest_offsets", "title": "" }, { "docid": "9d44676e479462515f55afc9337946c4", "score": "0.48626024", "text": "def calibrateOffsets(self) -> str:\n return self.executeRemoteCommand(\"CALOFFSETS\")", "title": "" }, { "docid": "58e44630a7f27910baaf62c9868f42e6", "score": "0.4850631", "text": "def read_offset_and_trigger_index(self):\n rf = open(self.resource_dir + \"offset_id.txt\", 'r', encoding='utf-8')\n while True:\n line = rf.readline()\n if line == \"\":\n break\n line = line.strip(\"\\n\").split(\"#\")\n\n for i in range(5):\n line[i] = line[i].split()\n\n self.offsets.append(line[0])\n\n self.trigger_offsets_ids.append([line[1], line[2]])\n # for offset, tri_id in zip(line[1], line[2]):\n # self.trigger_offsets_ids.append([offset, tri_id])\n\n self.entity_offsets_ids.append([line[3], line[4]])\n # for offset, entity_id in zip(line[3], line[4]):\n # self.entity_offsets_ids.append([offset, entity_id])\n\n rf.close()", "title": "" }, { "docid": "f7dbb6c58e7aaadd1cf146926e812ffa", "score": "0.4773164", "text": "def _offset_edge_map(shape, offsets):\n indices = np.indices(shape) # indices.shape = (n,)+shape\n\n #get the distance from each index to the upper or lower edge in each dim\n pos_edges = (shape - indices.T).T\n neg_edges = -1 - indices\n # now set the distances to zero if none of the given offsets could reach\n offsets = np.asarray(offsets)\n maxes = offsets.max(axis=0)\n mins = offsets.min(axis=0)\n for pos, neg, mx, mn in zip(pos_edges, neg_edges, maxes, mins):\n pos[pos > mx] = 0\n neg[neg < mn] = 0\n return pos_edges.astype(EDGE_D), neg_edges.astype(EDGE_D)", "title": "" }, { "docid": "1b53a790b4d04990d3da1b0ffcfe2988", "score": "0.47348234", "text": "def update_available_axes(self):\n axes=[]\n for ax in range(1,8):\n try:\n self.query(\"getm {}\".format(ax))\n axes.append(ax)\n except AttocubeError:\n pass\n self.axes=list(axes)\n return axes", "title": "" }, { "docid": "1493a3ec63d69eb05e97b9bf4370a4b7", "score": "0.46762797", "text": "def compute_offsets(coords, width, **kwargs):\n line = LineString(coords)\n dilated = line.buffer(width, **kwargs)\n return np.array(list(dilated.exterior.coords))", "title": "" }, { "docid": "60ea0cafa10ea4df382e1256d375b17d", "score": "0.4672736", "text": "def suboffsets(self):\n return self._to_ssize_tuple(self._view.suboffsets)", "title": "" }, { "docid": "5121c264c54db873e2ecf7b82b0d5049", "score": "0.46722507", "text": "def calculate_group_sizes_array(offsets: np.array, total_size: int) -> np.array:\n\n # Does not include the LAST one\n group_sizes = [x - y for x, y in zip(offsets[1:], offsets[:-1])]\n group_sizes += [total_size - offsets[-1]]\n group_sizes = np.array(group_sizes, dtype=int)\n\n return group_sizes", "title": "" }, { "docid": "23efeb0cbddcc08536c9a5e9d01182d4", "score": "0.46619225", "text": "def offsets(self):\n return self.tractFile.streamlines._offsets", "title": "" }, { "docid": "b2e6d38841f54add285db75236e44e83", "score": "0.46539932", "text": "def get_indices(offset: int, dim: int, direction: int, height: int) -> np.ndarray:\n y_indices = []\n x_indices = []\n\n for i in range(0, dim):\n if direction == SE.direction:\n if offset < 0:\n y_indices.append(-offset + i)\n x_indices.append(0 + i)\n else:\n y_indices.append(0 + i)\n x_indices.append(offset + i)\n\n if direction == SW.direction:\n if offset < 0:\n y_indices.append(height + offset - i)\n x_indices.append(0 + i)\n else:\n y_indices.append(height - i)\n x_indices.append(offset + i)\n\n return np.array(y_indices), np.array(x_indices)", "title": "" }, { "docid": "41b9c491bf807a3b66c0b48c6cd74d5e", "score": "0.46498084", "text": "def offset(pts, off, axis):\n retval = list()\n for pt in pts:\n if axis == \"x\":\n inverse1, inverse2, inverse3 = pt[0] + off, pt[1], pt[2]\n if axis == \"y\":\n inverse1, inverse2, inverse3 = pt[0], pt[1] + off, pt[2]\n if axis == \"z\":\n inverse1, inverse2, inverse3 = pt[0], pt[1], pt[2] + off\n inverse = (inverse1, inverse2, inverse3)\n retval.append(inverse)\n return retval", "title": "" }, { "docid": "9f86473c95bb6b35e53c177e1d27366c", "score": "0.46453118", "text": "def gen_range(self, offsets):\n return set( (self+offset) for offset in offsets )", "title": "" }, { "docid": "ced9aef84498fe06824ac022adabe51a", "score": "0.4630076", "text": "def get_voltage_outputs(self, names=None):\n if names is None:\n names=self.ao_names\n else:\n names=funcargparse.as_sequence(names,allowed_type=\"array\")\n return [self.ao_values[n] for n in names]", "title": "" }, { "docid": "2b9f90d6a65e3f378cca321dd08a8a9e", "score": "0.46204805", "text": "def get_heatmap_vectors_with_offset(xs, ys, x_axis_sp, y_axis_sp):\n if x_axis_sp.__class__.__name__ == 'SemanticPointer':\n dim = len(x_axis_sp.v)\n else:\n dim = len(x_axis_sp)\n x_axis_sp = spa.SemanticPointer(data=x_axis_sp)\n y_axis_sp = spa.SemanticPointer(data=y_axis_sp)\n\n vectors = np.zeros((len(xs), len(ys), dim))\n\n for i, x in enumerate(xs):\n for j, y in enumerate(ys):\n p = encode_point(\n x=x, y=y, x_axis_sp=x_axis_sp, y_axis_sp=y_axis_sp,\n )\n vectors[i, j, :] = p.v - offset\n\n return vectors", "title": "" }, { "docid": "78e510cca90c3a4108490130d226bd28", "score": "0.45959473", "text": "def set_offsets(self, azimuth_offset, elevation_offset):\n self._send({\n 'command': 'set_azimuth_offset',\n 'position': azimuth_offset,\n })\n self._send({\n 'command': 'set_elevation_offset',\n 'position': elevation_offset,\n })", "title": "" }, { "docid": "0ad069a547bebdd8b1ba73eaead24a9b", "score": "0.45914385", "text": "def _GetAxes(self):\r\n for code in self._POSITION_CODES:\r\n for axis in self._axes.get(code, []):\r\n yield (code, axis)", "title": "" }, { "docid": "03b6eaa966b7ba98f9f0ab686510b94d", "score": "0.45550203", "text": "def plot_npointoffsets(files):\n \n azs = []\n alts = []\n dazs = []\n dalts = []\n\n for f in files:\n npoints = f.npoints\n for t,az,alt,daz,dalt in npoints:\n azs.append(az)\n alts.append(alt)\n dazs.append(daz)\n dalts.append(dalt)\n\n plt.subplot(221)\n ax1 = plt.plot(azs,dazs,'+')\n plt.xlabel('azimuthal [deg]')\n plt.ylabel('azimuth offset')\n\n plt.subplot(222)\n ax3 = plt.plot(alts,dazs,'+')\n plt.xlabel('altitude [deg]')\n plt.ylabel('azimuthal offset')\n\n plt.subplot(223)\n ax2 = plt.plot(azs,dalts,'+')\n plt.xlabel('azimuth [deg]')\n plt.ylabel('altitude offset')\n\n plt.subplot(224)\n ax4 = plt.plot(alts,dalts,'+')\n plt.xlabel('altitude [deg]')\n plt.ylabel('azimuthal offset')\n \n plt.show()", "title": "" }, { "docid": "3e33217c177947dc814a781ed4db6e48", "score": "0.45420367", "text": "def get_offset(self, axis):\n reply=self.query(\"geta {}\".format(axis))\n return self._parse_reply(reply,\"voltage\",\"V\")", "title": "" }, { "docid": "d6b342e891e463f506c715cba26ca7a5", "score": "0.454138", "text": "def get_rg_offset_from_texts(self, texts: List[str], offsets: List[List[Offset]]) -> List[List[Offset]]:\n return [current_offsets + self.get_rg_offset_from_text(text) for text, current_offsets in zip(texts, offsets)]", "title": "" }, { "docid": "e7f4453c64728ddc340a94e144198994", "score": "0.45337322", "text": "def get_all_positions(self):\n return self._get_all_axes_data(self.get_position)", "title": "" }, { "docid": "bae25fe570de80929e5c509cc1bcb42a", "score": "0.45147726", "text": "def normalized_locations_to_indices(offsets, height, width):\n offsets = tf.cast(offsets, dtype=tf.float32)\n\n # Compute the coordinates of the top left of each glimpse.\n indices_height = tf.cast(\n tf.round((height-1.) * (offsets[:, 0] + 1.) / 2.), tf.int32)\n indices_width = tf.cast(\n tf.round((width-1.) * (offsets[:, 1] + 1.) / 2.), tf.int32)\n\n # Clip to the correct size.\n indices_height = tf.clip_by_value(indices_height, 0, height-1)\n indices_width = tf.clip_by_value(indices_width, 0, width-1)\n return indices_height, indices_width", "title": "" }, { "docid": "427360390633c5b3993e9e9c87944de3", "score": "0.45007792", "text": "def offsets_to_mic_distance( self, atoms, ref_atom, indices, offsets ):\n mic_distance = np.zeros(offsets.shape)\n cell = atoms.get_cell()\n icell = np.linalg.pinv(cell)\n pos = atoms.get_positions()\n periodic_shift = offsets.dot(cell)\n r1 = pos[ref_atom,:]\n for i in range(offsets.shape[0]):\n indx = indices[i]\n r2 = pos[indx,:]+periodic_shift[i,:]\n mic_distance[i,:] = r2-r1\n return mic_distance", "title": "" }, { "docid": "770ec93b0ab2bb0fb5625a27eceb0887", "score": "0.44573995", "text": "def create_offsets(zk, consumer_group, offsets):\n # Create new offsets\n for topic, partition_offsets in offsets.iteritems():\n for partition, offset in partition_offsets.iteritems():\n new_path = \"/consumers/{groupid}/offsets/{topic}/{partition}\".format(\n groupid=consumer_group,\n topic=topic,\n partition=partition,\n )\n try:\n zk.create(new_path, value=bytes(offset), makepath=True)\n except NodeExistsError:\n print(\n \"Error: Path {path} already exists. Please re-run the \"\n \"command.\".format(path=new_path),\n file=sys.stderr,\n )\n raise", "title": "" }, { "docid": "38c1695bb05eb215adaf3108cdef97be", "score": "0.44203243", "text": "def relist_streamlines(points, offsets):\n\n streamlines = []\n\n streamlines.append(points[0: offsets[0]])\n\n for i in range(len(offsets) - 1):\n streamlines.append(points[offsets[i]: offsets[i + 1]])\n\n return streamlines", "title": "" }, { "docid": "806f95e9aca99d2e4708f77ef57887e0", "score": "0.44198242", "text": "def get_rand_offsets(num):\n offsets =[]\n while len(offsets) < num:\n x_y = np.random.uniform(-.05, 1.05, size=(2,))\n if not ( 0.1 < x_y[0] < 0.9 and 0.1 < x_y[1] < 0.9 ):\n offsets.append(x_y)\n return offsets", "title": "" }, { "docid": "19b390d4d80bee3ace3419e5724b5e50", "score": "0.44111446", "text": "def get_offsets(vocab_size: int, n_shards: int) -> np.ndarray:\n vocab_shard_size = Embedding.get_vocab_shard_size(vocab_size, n_shards)\n return np.arange(n_shards * vocab_shard_size, step=vocab_shard_size)", "title": "" }, { "docid": "613376cd8420f5a02f8e08d6dd339af5", "score": "0.4408459", "text": "def offset_beam_v(ele):\n # Let v[0] be the original ele, so the indexing looks the same.\n dummy = 0.0\n v = [ele, dummy, ele['s']] + [ele[x] for x in ['x_offset', 'px_offset', 'y_offset','py_offset','z_offset','pz_offset' ]]\n \n return v", "title": "" }, { "docid": "ea3f39fd1f822b3f26abdb7cd9c6fb44", "score": "0.43916377", "text": "def get_slices_indexes(timestamps, delta):\n N = timestamps.size\n slices = []\n cur_start = 0\n for i in range(1, N):\n cur_delta = timestamps[i] - timestamps[cur_start]\n if cur_delta > delta:\n S = range(cur_start, i) # timestamps[cur_start:i - 1]\n # print('slice: %d elements' % len(S))\n slices.append(S)\n cur_start = i\n \n S = range(cur_start, N)\n slices.append(S)\n \n # check that we partitioned the array correctly\n sequence = []\n for s in slices:\n sequence.extend(s)\n assert sequence == range(timestamps.size), sequence\n return slices", "title": "" }, { "docid": "c02e40029690ca3c76dd52969f07efe8", "score": "0.43886283", "text": "def make_events_slices(events, delta):\n timestamps = events['timestamp']\n slices = get_slices_indexes(timestamps, delta)\n xs = []\n for indices in slices:\n x = events[indices]\n xs.append(x)\n return xs", "title": "" }, { "docid": "f47d357df9d0b53b95d0123a25fdfc41", "score": "0.43674952", "text": "def events_between(self, starting_measure, starting_offset, ending_measure, ending_offset):\n output_events = []\n for i in range(starting_measure - 1, ending_measure - 1 + 1):\n for event in self.event_groups[i].events:\n if i == starting_measure - 1:\n if i == 0 and event.offset >= starting_offset:\n output_events.append(event)\n elif i != 0 and event.offset > starting_offset:\n output_events.append(event)\n elif i == ending_measure - 1:\n if event.offset < ending_offset and ending_offset != 0:\n output_events.append(event)\n else:\n output_events.append(event)\n return output_events", "title": "" }, { "docid": "dd9b58f4cecb3a3450b218f2187624c8", "score": "0.4353222", "text": "def get_measurements_from_kal_scan(kal_out):\n result = []\n for line in kal_out.splitlines():\n line = line.decode(\"utf-8\")\n if \"offset \" in line:\n p_line = line.split(' ')\n result.append(p_line[-1])\n return result", "title": "" }, { "docid": "52a991af7e911d73231cafce0663e908", "score": "0.43326357", "text": "def save_offsets(summary, offsets):\n logger.debug(\"Saving detector timing offsets for %s\" % summary)\n off = {'offset_%d' % i: round_in_base(o, 0.25) if not np.isnan(o) else None\n for i, o in enumerate(offsets, 1)}\n DetectorTimingOffset.objects.update_or_create(source=summary, defaults=off)\n logger.debug(\"Saved succesfully\")", "title": "" }, { "docid": "5ce935b2ce59970cbfa79385d2040f71", "score": "0.43281952", "text": "def decode_address_offsets(pd0_bytes, num_data_types, offset=6):\r\n\r\n address_data = []\r\n\r\n # Loop through each data type\r\n for bytes_start in range(offset, offset + (num_data_types * 2), 2):\r\n data = struct.unpack_from('<H', pd0_bytes[bytes_start: bytes_start + 2])[0]\r\n address_data.append(data)\r\n\r\n return address_data", "title": "" }, { "docid": "347053d4e7eb9242a64a987183c6947e", "score": "0.4325169", "text": "def get_locations_per_plane(self):\n shape = self.offsets.shape\n offsets = self.offsets.transpose().flatten()\n v = offsets.copy()\n v[0:-1] = offsets[1::]\n v[-1] = self.locations.shape[1]\n sizes = v - offsets\n return sizes.reshape(shape)", "title": "" }, { "docid": "94ff172bb88c1bc8ea100cd30443bd04", "score": "0.43073097", "text": "def volume_corners(vol_origin, vol_extent):\n corners = []\n for zi in (0,1):\n for yi in (0,1):\n for xi in (0,1):\n corners.append(vol_origin + vol_extent*np.array((xi,yi,zi)))\n return corners", "title": "" }, { "docid": "d28f40d94aaae6897f5a05980a16628c", "score": "0.43013614", "text": "def generate_vectors(axis_ids):\n axis_df = AxisGenerator.generate_star_axis(axis_ids)\n vectors_df = DFMatrixUtils.get_vectors(axis_df)\n return vectors_df", "title": "" }, { "docid": "abc64d9ef9513b8e1fbf3391e8971f55", "score": "0.42999518", "text": "def get_energies(self):\n calib_factor, offset = self.calibration\n chans = self.num_chans\n gain = self.gain\n return [ch/(calib_factor*gain*chans) + offset for ch in range(chans)]", "title": "" }, { "docid": "8226cd9bdad19f7d13d00d3233f908fa", "score": "0.42967469", "text": "def get_sentinel_extents(geolocation, offset=0.0):\n lat_min = np.min(geolocation['Latitudes']) - offset\n lon_min = np.min(geolocation['Longitudes']) - offset\n lat_max = np.max(geolocation['Latitudes']) + offset\n lon_max = np.max(geolocation['Longitudes']) + offset\n\n return lat_min, lat_max, lon_min, lon_max", "title": "" }, { "docid": "7a3d48a452d2d7da63e5b8b0fd91f37d", "score": "0.42956427", "text": "def _get_offsets(self, starting_sample, ending_sample, num_channels):\n offsets = []\n for zero_index in self.audio.zero_indexes:\n index = bisect_left(zero_index, starting_sample) - 1\n if index < 0:\n starting_offset = 0\n else:\n starting_crossing = zero_index[index]\n starting_offset = starting_crossing - starting_sample\n\n index = bisect_left(zero_index, ending_sample)\n if index >= len(zero_index):\n ending_offset = 0\n else:\n zci = min(bisect_right(zero_index, ending_sample), len(zero_index) - 1)\n ending_crossing = zero_index[zci]\n ending_offset = ending_crossing - ending_sample\n\n offsets.append((starting_offset, ending_offset))\n\n if num_channels == 1:\n results = (offsets[0], offsets[0])\n elif num_channels == 2:\n results = (offsets[0], offsets[1])\n\n return results", "title": "" }, { "docid": "10d525c8032827d50a24b46e067ddc75", "score": "0.42953596", "text": "def _all_axes(self):\n return self._custom[\"_all_axes\"]", "title": "" }, { "docid": "b8db273153871bf3bce2b34bd797f5ab", "score": "0.42888013", "text": "def generate_offsets(\n batch_size: int,\n max_seq_len: int,\n load_factor: float,\n offsets_dtype: str,\n spread_radius: float = 0.1,\n) -> torch.Tensor:\n assert 0 <= load_factor <= 1\n assert 0 <= spread_radius <= 1\n\n if load_factor < 1:\n spread = int(max_seq_len * spread_radius)\n mean = int(max_seq_len * load_factor)\n lengths = [\n mean + random.randint(-spread, spread + 1) for _ in range(batch_size)\n ]\n lengths = [max(min(L, max_seq_len), 0) for L in lengths]\n else:\n lengths = [max_seq_len] * batch_size\n\n offsets = [0]\n for length in lengths:\n offsets.append(offsets[-1] + length)\n\n torch_offsets_dtype = string_to_torch_dtype(offsets_dtype)\n return torch.tensor(offsets, dtype=torch_offsets_dtype).cuda()", "title": "" }, { "docid": "50aadd9d12dcbef88a5456e561e40357", "score": "0.42670888", "text": "def _get_check_offset_positions(self, protein_left_position, protein_offset_positions, valid_regions_array, protein_size, frag_len_mean):\n\t\tnew_protein_offset_positions = []\n\t\tapprox_dif = abs(frag_len_mean - protein_size)//2 #estimate shifts of fragments\n\t\tn_proteins = len(protein_offset_positions)\n\n\t\tprotein_absolute_positions = protein_left_position + np.cumsum(protein_offset_positions) #add offset positions\n\t\t\n\t\trm_list = []\n\t\trm_index = 0\n\t\tfor offset_pos in protein_absolute_positions: \n\t\t\tisgood = True\n\t\t\tstart = int(offset_pos - approx_dif)\n\t\t\tstop = int(offset_pos + approx_dif)\n\t\t\t\n\t\t\tfor i in lrange(start,stop+1):\n\t\t\t\tif(valid_regions_array[i] == False):#found overlap with blacklisted region\n\t\t\t\t\tisgood = False\n\t\t\t\t\tn_proteins -= 1\n\t\t\t\t\trm_list.append(rm_index)\n\t\t\t\t\tbreak\n\t\t\t\n\t\t\tif(isgood):#all positions are valid\n\t\t\t\tnew_protein_offset_positions.append(offset_pos)\n\t\t\trm_index += 1\n\n\t\tprotein_offset_positions = np.delete(protein_offset_positions, rm_list) #remove also from offset list\n\t\t\n\t\treturn new_protein_offset_positions, n_proteins, protein_offset_positions", "title": "" }, { "docid": "c2c66975b532b7c74084b3a51d5d8c15", "score": "0.42661026", "text": "def axes(self): # noqa: RT01, D200\n return [self.index]", "title": "" }, { "docid": "78c673dcd6407d007f6cf49aaf03895b", "score": "0.42637113", "text": "def _tf_batch_map_offsets(self, input, offsets, order=1):\n\n input_shape = tf.shape(input)\n batch_size = input_shape[0]\n input_size = input_shape[1]\n\n offsets = tf.reshape(offsets, (batch_size, -1, 2))\n grid = tf.meshgrid(\n tf.range(input_size), tf.range(input_size), indexing='ij'\n )\n grid = tf.stack(grid, axis=-1)\n grid = tf.cast(grid, 'float32')\n grid = tf.reshape(grid, (-1, 2))\n grid = self._tf_repeat_2d(grid, batch_size)\n coords = offsets + grid # 实际的采样坐标\n\n mapped_vals = self._tf_batch_map_coordinates(input, coords) # 双线性插值\n return mapped_vals", "title": "" }, { "docid": "1ae9c1408ca2d930805f17c2b960fefc", "score": "0.42593664", "text": "def next_offsets(self):\n # type: (Descriptor) -> Offsets\n resume_bytes = self._resume()\n with self._meta_lock:\n if self._chunk_num >= self._total_chunks:\n return None, resume_bytes\n if self._offset + self._chunk_size > self._ase.size:\n num_bytes = self._ase.size - self._offset\n else:\n num_bytes = self._chunk_size\n chunk_num = self._chunk_num\n range_start = self._offset\n range_end = self._offset + num_bytes - 1\n self._offset += num_bytes\n self._chunk_num += 1\n if self._ase.is_encrypted and self._offset >= self._ase.size:\n pad = True\n else:\n pad = False\n return Offsets(\n chunk_num=chunk_num,\n num_bytes=num_bytes,\n range_start=range_start,\n range_end=range_end,\n pad=pad,\n ), resume_bytes", "title": "" }, { "docid": "73fc77e742017568e939190280bf04a7", "score": "0.4253348", "text": "def _offset_dis_to_vol(self, offset: float) -> float:\n return offset / self.CONVERT_RATIO - self.MAX_VOL", "title": "" }, { "docid": "0b5eca049c5ac1edf1656c5b326d07fd", "score": "0.424803", "text": "def get_axes(self):\n\n return [f for f in [self.axes, self.axes2] if f.on]", "title": "" }, { "docid": "38713415ad69e09929ecabb4378d6fd4", "score": "0.4242648", "text": "def get_ovconfs(pl, confl=None):\n if confl is None:\n nframe = pl.source.num_frames\n confl = np.arange(nframe)\n boxl = []\n posl = []\n for iconf in confl:\n ovdata = pl.compute(iconf)\n axes = np.array(ovdata.cell.matrix)\n pos = np.array(ovdata.particles.positions.array)\n box = np.diag(axes)\n boxl.append(box)\n posl.append(pos)\n return boxl, posl", "title": "" }, { "docid": "b20be7cd96fec78f8fcaa8784741127b", "score": "0.42330593", "text": "def set_all_voltages(self, voltages):\n self._set_all_axes_data(self.set_voltage,voltages)\n return self.get_all_voltages()", "title": "" }, { "docid": "b20be7cd96fec78f8fcaa8784741127b", "score": "0.42330593", "text": "def set_all_voltages(self, voltages):\n self._set_all_axes_data(self.set_voltage,voltages)\n return self.get_all_voltages()", "title": "" }, { "docid": "6ee612ede4bec73adf15ec43ecf4afd7", "score": "0.42218235", "text": "def all_positions():\n\treturn AutoQuery().get_all()", "title": "" }, { "docid": "499772280a66a8ace095dfe032bdaa14", "score": "0.42154068", "text": "def get_updates(self, offset=None, timeout=30):\n params = {'timeout': timeout, 'offset': offset}\n raw_resp = requests.get(urljoin(self.api_url, 'getUpdates'), params)\n try:\n resp = raw_resp.json()\n except json.decoder.JSONDecodeError as e:\n print('Failed to parse response {}: {}.'.\n format(raw_resp.content, e))\n return []\n\n if 'result' not in resp:\n return []\n return resp['result']", "title": "" }, { "docid": "637b552a10fbddb5a0fffbb6895be7b3", "score": "0.4212921", "text": "def get_all_target_positions(self):\n return self._get_all_axes_data(self.get_target_position)", "title": "" }, { "docid": "8f7387a72d190fdf7e0a8f453cfa4ae1", "score": "0.42035267", "text": "def axes(self):\n if not self.is_valid:\n raise InvalidNXdataError(\"Unable to parse invalid NXdata\")\n\n if self._axes is not None:\n # use cache\n return self._axes\n axes = []\n for axis_name in self.axes_dataset_names:\n if axis_name is None:\n axes.append(None)\n else:\n axes.append(self.group[axis_name])\n\n # keep only good range of axis data\n for i, axis in enumerate(axes):\n if axis is None:\n continue\n if \"first_good\" not in axis.attrs and \"last_good\" not in axis.attrs:\n continue\n fg_idx = axis.attrs.get(\"first_good\", 0)\n lg_idx = axis.attrs.get(\"last_good\", len(axis) - 1)\n axes[i] = axis[fg_idx:lg_idx + 1]\n\n self._axes = axes\n return self._axes", "title": "" }, { "docid": "3968b3798fd87d991bc739962c2a4b92", "score": "0.41957304", "text": "def filter_offsets(self, ra, dec, offset_filter=None):\n try:\n filter_dict = {\n 'r': {'ra': None, 'dec': None},\n 'g': {'ra': None, 'dec': None},\n 'i': {'ra': None, 'dec': None},\n 'u': {'ra': None, 'dec': None},\n 'ifu': {'ra': None, 'dec': None}\n }\n\n if not offset_filter:\n offset_filter = {\n 'r': {'ra': 130, 'dec': 110},\n 'g': {'ra': -325, 'dec': -328},\n 'i': {'ra': -320, 'dec': 93},\n 'u': {'ra': 111, 'dec': -328},\n 'ifu': {'ra': -97, 'dec': -98}\n }\n\n obj = SkyCoord(ra=ra, dec=dec, unit=(u.deg, u.deg))\n for k, v in offset_filter.items():\n offra = (Angle(offset_filter[k]['ra'], unit=u.arcsec) /\n np.cos(obj.dec.to('radian')))\n offdec = Angle(offset_filter[k]['dec'], unit=u.arcsec)\n\n new_pos = SkyCoord(obj.ra + offra, obj.dec + offdec,\n frame='icrs')\n filter_dict[k]['ra'] = round(new_pos.ra.value, 6)\n filter_dict[k]['dec'] = round(new_pos.dec.value, 6)\n\n return True, filter_dict\n except Exception as e:\n print(str(e))\n return False, str(e)", "title": "" }, { "docid": "ca9fad5bdb576e63346de23d11a60b5f", "score": "0.41899046", "text": "def to_vec(\n self,\n chord_onset: Union[float, Tuple[int, Fraction]] = None,\n chord_offset: Union[float, Tuple[int, Fraction]] = None,\n chord_duration: Union[float, Fraction] = None,\n measures_df: pd.DataFrame = None,\n min_pitch: Tuple[int, int] = None,\n max_pitch: Tuple[int, int] = None,\n note_onset: Fraction = None,\n dur_from_prev: Union[float, Fraction] = None,\n dur_to_next: Union[float, Fraction] = None,\n ) -> np.array:\n vectors = []\n\n # Pitch as one-hot\n pitch = np.zeros(NUM_PITCHES[self.pitch_type], dtype=np.float16)\n pitch[self.pitch_class] = 1\n vectors.append(pitch)\n\n # Octave as one-hot\n num_octaves = 127 // NUM_PITCHES[PitchType.MIDI]\n octave = np.zeros(num_octaves, dtype=np.float16)\n octave[self.octave] = 1\n vectors.append(octave)\n\n # Onset metrical level as one-hot\n onset_level = np.zeros(4, dtype=np.float16)\n onset_level[self.onset_level] = 1\n vectors.append(onset_level)\n\n # Offset metrical level as one-hot\n offset_level = np.zeros(4, dtype=np.float16)\n offset_level[self.offset_level] = 1\n vectors.append(offset_level)\n\n # onset, offset, duration as floats, as proportion of chord's range\n if (\n chord_onset is not None\n and chord_offset is not None\n and chord_duration is not None\n and measures_df is not None\n ):\n if note_onset is None:\n onset, offset, duration = get_rhythmic_info_as_proportion_of_range(\n pd.Series(\n {\n \"mc\": self.onset[0],\n NOTE_ONSET_BEAT: self.onset[1],\n \"duration\": self.duration,\n }\n ),\n chord_onset,\n chord_offset,\n measures_df,\n range_len=chord_duration,\n )\n else:\n try:\n onset = note_onset / chord_duration\n duration = self.duration / chord_duration\n offset = onset + duration\n except Exception:\n # Bugfix for chord duration 0, due to an error in the TSVs\n onset = Fraction(1)\n duration = Fraction(1)\n offset = Fraction(1)\n metrical = np.array([onset, offset, duration], dtype=np.float16)\n vectors.append(metrical)\n else:\n vectors.append(np.zeros(3, dtype=np.float16))\n\n # Duration to surrounding notes\n durations = [\n 0 if dur_from_prev is None else dur_from_prev,\n 0 if dur_to_next is None else dur_to_next,\n ]\n vectors.append(durations)\n\n # Binary -- is this the lowest note in this set of notes\n midi_note_number = self.get_midi_note_number()\n is_min = [1 if min_pitch is not None and midi_note_number == min_pitch[1] else 0]\n vectors.append(is_min)\n\n # Octave related to surrounding notes as one-hot\n relative_octave = np.zeros(num_octaves, dtype=np.float16)\n lowest_octave = 0 if min_pitch is None else min_pitch[0]\n relative_octave[self.octave - lowest_octave] = 1\n vectors.append(relative_octave)\n\n # Normalized pitch height\n norm_pitch_height = [midi_note_number / 127]\n vectors.append(norm_pitch_height)\n\n # Relative to surrounding notes\n if min_pitch is not None and max_pitch is not None:\n range_size = max_pitch[1] - min_pitch[1]\n\n # If min pitch equals max pitch, we set the range to 1 and every note will have\n # norm_relative = 0 (as if they were all the bass note).\n if range_size == 0:\n range_size = 1\n max_pitch = (max_pitch[0], max_pitch[1] + 1)\n\n relative_norm_pitch_height = [(midi_note_number - min_pitch[1]) / range_size]\n vectors.append(relative_norm_pitch_height)\n\n else:\n vectors.append([0])\n\n return np.concatenate(vectors).astype(np.float16)", "title": "" }, { "docid": "19dede440591816398cea07c27acd4f3", "score": "0.41794235", "text": "def point_offset(volume, point, offset):\n\n # Convert the quantities into arrays for ease of use.\n point_array = np.array(point)\n offset_array = np.array(offset)\n\n return volume.mri_data[tuple(point_array + offset_array)].reshape(1)", "title": "" }, { "docid": "cda504c0733b924b7475e79e6321b83d", "score": "0.4178159", "text": "def get_all_outputs(self):\n return self._get_all_axes_data(self.get_output)", "title": "" }, { "docid": "13ae7eb0a129db45d798b1fdb55e5c6b", "score": "0.41630244", "text": "def offsets_to_operating_expenditures(self, offsets_to_operating_expenditures):\n\n self._offsets_to_operating_expenditures = offsets_to_operating_expenditures", "title": "" }, { "docid": "0d224757293aee899a6227401b4d4e90", "score": "0.41463768", "text": "def _get_slices_deltas(series: Series) -> np.ndarray:\n slice_locations = get_slice_locations(series)\n deltas = np.abs(np.diff(sorted(slice_locations)))\n return deltas", "title": "" }, { "docid": "cbfafdb4bfbd788ff9d948615fab6118", "score": "0.41450942", "text": "def apply(self, events):\n new_events = np.copy(events)\n new_events[new_events < self.offset] = self.offset\n new_events = 1. / self.decades * (np.log10(new_events) - np.log10(self.offset))\n\n return new_events", "title": "" }, { "docid": "01c12ad8d8956c06beb9c19b44a37c0b", "score": "0.41429472", "text": "def _offset_samples(\n self, starting_sample, ending_sample, left_offsets, right_offsets, num_channels\n ):\n left_slice = (\n 0,\n slice(starting_sample + left_offsets[0], ending_sample + left_offsets[1]),\n )\n right_slice = left_slice\n\n if num_channels == 2:\n right_slice = (\n 1,\n slice(\n starting_sample + right_offsets[0], ending_sample + right_offsets[1]\n ),\n )\n\n left_channel = self.audio.raw_samples[left_slice]\n right_channel = self.audio.raw_samples[right_slice]\n return np.array([left_channel, right_channel])", "title": "" }, { "docid": "0551016b4f79898d2fa7f5fb5a92b216", "score": "0.41410762", "text": "def _event_indexes(self, pairing_seq, offset):\n count = 0\n kmer = []\n for i in range(offset, len(pairing_seq)):\n if count == self.k: break\n if pairing_seq[i] is not None:\n count += 1\n kmer.append(i)\n if len(kmer) != self.k:\n raise AlignmentEndException\n return kmer", "title": "" }, { "docid": "ce281e9678c966b35d55e6bf55e5ae31", "score": "0.4139732", "text": "def plot_trackoffsets(files, threshold=360.):\n\n sun = ephem.Sun(epoch=ephem.B1950)\n tazs = []\n talts = []\n trk_az_off = []\n trk_alt_off = []\n seps = []\n for f in files:\n \n azs = (f.data_info['az'] + f.data_info['az_off'])\n alts = (f.data_info['alt'] + f.data_info['alt_off'])\n for az in azs:\n tazs.append(az)\n for alt in alts:\n talts.append(alt)\n\n tsteps = f.tsteps\n tmax = 0\n#find (alt,az) of sun, where we were pointing, and their separation\n for ti,tv in enumerate(tsteps):\n f.obs.date = tv\n sun.compute(f.obs)\n# create object at latest telescope Alt/Az\n# so we can calculate the separation from sun\n srtbod = ephem.FixedBody(epoch=ephem.B1950)\n ra,dec = f.obs.radec_of(str(azs[ti]),str(alts[ti])) #current ra/dec in B1950\n srtbod._ra = str(ra)\n srtbod._dec = str(dec)\n srtbod.compute(f.obs)\n# trk_az_off.append(srtbod.az - sun.az) #rad\n# trk_alt_off.append(srtbod.alt - sun.alt) #rad\n\n# line up the objects to compute offset in each direction\n trk_az_off.append(ephem.separation((srtbod.az, srtbod.alt), (sun.az, srtbod.alt)))\n trk_alt_off.append(ephem.separation((srtbod.az, srtbod.alt), (srtbod.az, sun.alt)))\n seps.append(float(ephem.separation((srtbod.az, srtbod.alt), (sun.az, sun.alt)))) #ra sep.\n\n\n idcs = np.where(np.array(seps) < threshold*np.pi/180.)[0]\n#convert rad --> deg\n trk_az_off = np.array(trk_az_off)*180./np.pi\n trk_alt_off = np.array(trk_alt_off)*180./np.pi\n tazs = np.array(tazs)\n talts = np.array(talts)\n#plot the az and alt offsets\n plt.subplot(2,1,1)\n plt.plot(tazs[idcs],trk_az_off[idcs],'b+')\n plt.xlabel('telescope azimuth')\n plt.ylabel('aziumuthal separation [deg]')\n plt.title('telescope.az - sun.az')\n plt.subplot(2,1,2)\n plt.plot(talts[idcs],trk_alt_off[idcs],'b+')\n plt.xlabel('telescope altitude')\n plt.ylabel('altitude separation [deg]')\n plt.title('telescope.alt - sun.alt')\n\n plt.show()", "title": "" }, { "docid": "31183cb11a7603fc464b8c85a8beb122", "score": "0.41337988", "text": "def get_track_offsets(self, row_idx, col_idx):\n # type: (int, int) -> Tuple[float, ...]\n dx = self._core_offset[0] + self._core_pitch[0] * col_idx\n dy = self._core_offset[1] + self._core_pitch[1] * row_idx\n bot_layer = self.bot_layer_id\n offsets = []\n for lay in range(bot_layer, bot_layer + len(self._num_tracks)):\n dim = dx if self.grid.get_direction(lay) == 'y' else dy\n pitch = self.grid.get_track_pitch(lay, unit_mode=True)\n offsets.append(dim / pitch)\n\n return tuple(offsets)", "title": "" }, { "docid": "6d75a248bed64145a3db63ef6de1639b", "score": "0.41313195", "text": "def print_offsets_times_70(experiment, obs_type, truth_dir=TRUTH_DIR):\n subfield_index, x_offset_deg, y_offset_deg = evaluate.get_generate_variable_offsets(\n experiment, obs_type, truth_dir=truth_dir)\n print \"70. * x_offset =\"\n print x_offset_deg * 70.\n print \"70. * y_offset =\"\n print y_offset_deg * 70.\n return", "title": "" }, { "docid": "be9bf90d7339af7488ab219051627e8a", "score": "0.4121535", "text": "def articulation_positions(cursor):\r\n c = lydocument.cursor(cursor)\r\n if not cursor.hasSelection():\r\n # just select til the end of the current line\r\n c.select_end_of_block()\r\n rests = True\r\n partial = ly.document.OUTSIDE\r\n else:\r\n rests = False\r\n partial = ly.document.INSIDE\r\n source = lydocument.Source(c, True, partial, True)\r\n \r\n positions = []\r\n for p in ly.rhythm.music_tokens(source):\r\n if not rests and isinstance(p[0], ly.lex.lilypond.Rest):\r\n continue\r\n positions.append(source.cursor(p[-1], start=len(p[-1])))\r\n if not cursor.hasSelection():\r\n break # leave if first found, that's enough\r\n return positions", "title": "" }, { "docid": "56855d3acb59326c146e167b2f8a2b1f", "score": "0.41199803", "text": "def offset_flux(self, offset, *trace_keys):\n\n if not trace_keys:\n trace_keys = self.spectra.keys()\n \n for trace_key in trace_keys:\n spectrum = self.spectra.get(trace_key)\n if spectrum is not None:\n spectrum.object.flux_offset = offset", "title": "" }, { "docid": "55d7a1cfd39e97238092f28c915d8d22", "score": "0.4115952", "text": "def volume_distance_field_batch(vols):\n vol_edt_list = []\n for b in range(vols.shape[0]):\n vol_edt = volume_distance_field(np.squeeze(vols[b,...]))\n vol_edt_list.append(np.expand_dims(np.expand_dims(vol_edt, -1), 0))\n return np.concatenate(vol_edt_list, 0)", "title": "" }, { "docid": "ff5dbb5c849ca752aa024102eb3bf0fd", "score": "0.41069746", "text": "def _get_indexes_xaxis(self, idx):\n if idx < 0 or idx > 80:\n raise RuntimeError(\"no valid coordinate. idx = {}\".format(idx))\n row = idx // 9\n return [v for v in range(row*9, row*9+9)]", "title": "" }, { "docid": "c6bbf47b81b668620c16746698f5e2ac", "score": "0.41008052", "text": "def _carr_purcell_meiboom_gill_offsets(\n duration: float, offset_count: int\n) -> np.ndarray:\n\n spacing = 1.0 / offset_count\n start = spacing * 0.5\n\n # prepare the offsets for delta comb\n deltas = spacing * np.arange(offset_count)\n deltas += start\n offsets = deltas * duration\n\n return offsets", "title": "" }, { "docid": "f23eb02329861b196384a3f653e7ff36", "score": "0.41001177", "text": "def read_data(self, offsets):\n # type: (Descriptor, Offsets) -> Tuple[bytes, Offsets]\n newoffset = None\n if not self.local_path.use_stdin:\n if offsets.num_bytes == 0:\n return None, None\n # compute start from view\n start = self.local_path.view.fd_start + offsets.range_start\n # encrypted offsets will read past the end of the file due\n # to padding, but will be accounted for after encryption+padding\n with self.local_path.absolute_path.open('rb') as fd:\n fd.seek(start, 0)\n data = fd.read(offsets.num_bytes)\n else:\n data = blobxfer.STDIN.read(self._chunk_size)\n if not data:\n with self._meta_lock:\n self._offset -= offsets.num_bytes\n self._ase.size -= offsets.num_bytes\n self._total_chunks -= 1\n self._chunk_num -= 1\n self._outstanding_ops -= 1\n else:\n num_bytes = len(data)\n with self._meta_lock:\n self._offset -= offsets.num_bytes\n self._ase.size -= offsets.num_bytes\n newoffset = Offsets(\n chunk_num=self._chunk_num - 1,\n num_bytes=num_bytes,\n range_start=self._offset,\n range_end=self._offset + num_bytes - 1,\n pad=False,\n )\n self._total_chunks += 1\n self._outstanding_ops += 1\n self._offset += num_bytes\n self._ase.size += num_bytes\n if self.must_compute_md5 and data:\n with self._hasher_lock:\n self.md5.update(data)\n if self.is_resumable:\n self._md5_cache[self._chunk_num - 1] = self.md5.hexdigest()\n return data, newoffset", "title": "" }, { "docid": "6939b53d398a89b0d2c500c16fbce0a4", "score": "0.40981266", "text": "def list_axes(*args):\n # Clear previus UI list\n clear_external_axis_list()\n\n # Check viewport robot selection\n # If robots are selected, list all external axes on selected robots\n selected_robots = mimic_utils.get_robot_roots()\n if selected_robots:\n robots = selected_robots\n # If no robots are selected, list all axes in the scene\n else:\n robots_in_scene = mimic_utils.get_robot_roots(all_robots=True)\n # If there are no robots in the scene, raise an exception\n if not robots_in_scene:\n raise MimicError('No robots in scene')\n else:\n robots = robots_in_scene\n\n # Keep track of selected robots without axes for a heads-up message\n selected_robots_without_axes = []\n\n # For each robots, get a list of all its external axes\n for robot in robots:\n robots_external_axes = get_external_axis_names(robot)\n\n # If we're only looking at selected robots, check if each selected\n # robot has external axes. If not, add it to a list to display in\n # a heads up message\n if selected_robots:\n if not robots_external_axes:\n selected_robots_without_axes.append(robot)\n\n # Update Mimic UI with list of external axes\n for axis in robots_external_axes:\n append_string = robot + ': ' + axis\n pm.textScrollList('tsl_externalAxes',\n edit=True,\n append=append_string)\n\n if selected_robots_without_axes:\n robot_list_str = ''\n for each in selected_robots_without_axes:\n robot_list_str += each + ', '\n\n pm.headsUpMessage('{} has no External Axes'\n .format(robot_list_str))", "title": "" }, { "docid": "9a13f01b1aa7896744863954a010c62b", "score": "0.40884963", "text": "def velocities(self):\n velocities = []\n for atom in self._atoms:\n velocities.append(atom.v)\n self._velocities = velocities[:]\n return np.asarray(self._velocities)", "title": "" }, { "docid": "b4b5a67932d7e67d25767cb6a3e04263", "score": "0.40826753", "text": "def get_lattice_volume(axes):\r\n axes = nm.asarray(axes)\r\n\r\n dim = axes.shape[0]\r\n\r\n if dim == 2:\r\n volume = nm.abs(nm.cross(axes[0], axes[1]))\r\n\r\n elif dim == 3:\r\n volume = nm.dot(nm.cross(axes[0], axes[1]), axes[2])\r\n\r\n else:\r\n raise ValueError('wrong axes shape! (%s)' % axes.shape)\r\n\r\n return volume", "title": "" }, { "docid": "4ef6c5d166d831150c39fde9bad46aa3", "score": "0.40800217", "text": "def xranges(self):\n \n return [lay.xran for lay in self.layers]", "title": "" }, { "docid": "8b7cdc4648ef14cade794c55e2e4348c", "score": "0.4076538", "text": "def xy_to_xyz_v(coords, xy_axes=default_xy_axes):\n\n return np.dot(coords, xy_axes.T)", "title": "" }, { "docid": "8571013ab2c85fef93e48952c7e149b3", "score": "0.4076176", "text": "def axes(self): # noqa: RT01, D200\n return [self.index, self.columns]", "title": "" }, { "docid": "419efcdcdd8cc8577a7546d5bc4d69de", "score": "0.4074621", "text": "def get_vecs(self):\n\n return self.get_4_delta_idf_vecs()", "title": "" }, { "docid": "4be4933f11303610b4f77def993f27e3", "score": "0.4068588", "text": "def find_offset_parameter(self, x_values=None, data=None):\n # lorentzian filter\n mod, params = self.make_lorentzian_model()\n\n # Todo: exclude filter in seperate method to be used in other methods\n\n if len(x_values) < 20.:\n len_x = 5\n elif len(x_values) >= 100.:\n len_x = 10\n else:\n len_x = int(len(x_values)/10.)+1\n\n lorentz = mod.eval(x=np.linspace(0, len_x, len_x), amplitude=1, offset=0.,\n sigma=len_x / 4., center=len_x / 2.)\n data_smooth = convolve1d(data, lorentz / lorentz.sum(),\n mode='constant', cval=data.max())\n\n # finding most frequent value which is supposed to be the offset\n hist = np.histogram(data_smooth, bins=10)\n offset = (hist[1][hist[0].argmax()]+hist[1][hist[0].argmax()+1])/2.\n\n return data_smooth, offset", "title": "" }, { "docid": "9e6b9b616631469216aefc998cf6cd9c", "score": "0.40648767", "text": "def set_offset(self, axis, voltage):\n self.query(\"seta {} {}\".format(axis,voltage))\n return self.get_offset(axis)", "title": "" } ]
9567fd29f63203e11751913ca00a388a
Get work history for the last years
[ { "docid": "f7cf9bac763b508fcc011afe972f3c3d", "score": "0.7542441", "text": "def getHistoryYear(num = 0):\n\tresult=[]\n\tcurrentTime=datetime.datetime.now()\n\tlimit=datetime.datetime(currentTime.year-num, currentTime.month, currentTime.day)\n\tfor job in resume[\"work\"]:\n\t\tif \"endDate\" not in job:\n\t\t\tend=currentTime\n\t\telse:\n\t\t\ttry: \n\t\t\t\tend=datetime.datetime.strptime(job['endDate'], \"%Y-%m-%d\")\n\t\t\texcept:\n\t\t\t\tend=datetime.datetime.strptime(job['endDate'], \"%Y-%m\")\n\t\tif limit <= end:\n\t\t\tresult.append(job)\n\n\treturn jsonify(result)", "title": "" } ]
[ { "docid": "6a57c759e75570e494a30824b6c5c3f0", "score": "0.6485748", "text": "def getHistory(num = None):\n\thistory=[]\n\tif num != None:\n\t\thistory=resume[\"work\"][:int(num)]\n\telse:\n\t\thistory=resume[\"work\"]\n\treturn jsonify(history)", "title": "" }, { "docid": "0be23c49742351038eab796d41ba6c94", "score": "0.6298559", "text": "def _get_history(self):\n return self.history", "title": "" }, { "docid": "c0e87ec284ad7e899b967ff12ec493f7", "score": "0.6146807", "text": "def get_history(last=None): # noqa: E501\n #reading all previous predictions from database\n db = connect_db()\n history = []\n for prediction in db.find():\n history.append(Mushroom.from_dict(prediction))\n #mushroom_history = list(db.find())\n #print (mushroom_history)\n \n return history", "title": "" }, { "docid": "7b7b1168a7fc03cf9dc9b79a44495e18", "score": "0.6136298", "text": "def get_history(self, history_start_date, history_end_date, gsr):\n\n return self.historian.get_history(\n start_date = history_start_date,\n end_date=history_end_date,\n gsr=gsr\n )", "title": "" }, { "docid": "58de16931e207a23cdc11b7d3a109590", "score": "0.6055987", "text": "def get_history(self):\n return self.history", "title": "" }, { "docid": "58de16931e207a23cdc11b7d3a109590", "score": "0.6055987", "text": "def get_history(self):\n return self.history", "title": "" }, { "docid": "58de16931e207a23cdc11b7d3a109590", "score": "0.6055987", "text": "def get_history(self):\n return self.history", "title": "" }, { "docid": "81152b1586468c7a711d32d058e4cfac", "score": "0.60255224", "text": "def history(self):\n return self.runner.execute('history')", "title": "" }, { "docid": "80b0e6a583d5e503ee32e80485819e57", "score": "0.6022308", "text": "def get_history():\r\n history_file = open(HISTORY_FILEPATH)\r\n history = history_file.readlines()\r\n history_file.close()\r\n return history", "title": "" }, { "docid": "d711e29582edf0308a54e16f966e4ba8", "score": "0.6001252", "text": "def list_history():\n api_endpoint = \"/history\"\n print(\"\\nHere's the tasks history:\\n\")\n pprint.pprint(api_get_request(api_endpoint))\n print()", "title": "" }, { "docid": "20d9fe68851295cb9b6ad08083b162a6", "score": "0.5987665", "text": "def history(self, period: str = '1d', start: str = None, end: str = None) -> pd.DataFrame:\n return self.company.history(period=period, start=start, end=end)", "title": "" }, { "docid": "f8d7d11a406f219c7a9ec42d61c42496", "score": "0.59490705", "text": "def get_ticker_history(ticker):\n ticker_obj = yf.Ticker(ticker)\n ticker_hist = ticker_obj.history(period=\"ytd\")\n return ticker_hist", "title": "" }, { "docid": "c46550fb9d41e76af230679bb53f6b8f", "score": "0.59349465", "text": "def app_history():\n if hist == []:\n print(\"城市天气查询记录为空。\")\n else:\n print(\"城市查询历史:\")\n for item in hist:\n print(item)", "title": "" }, { "docid": "8af4b99adf352cec03ee5294f54bed56", "score": "0.59344417", "text": "def history():\n Files.read_history()", "title": "" }, { "docid": "2f4fc1796c75e7d4d963e945d308f348", "score": "0.59259576", "text": "def get_history_of_day(self):\n histories_day = app.db.get(self.key)\n if not histories_day:\n histories_day = []\n return histories_day", "title": "" }, { "docid": "02fa2c47bcbe9ace9da6686e33bd80c9", "score": "0.5918976", "text": "def get_history():\n\n response = {'history': []}\n try:\n user_id = session[\"user_id\"]\n history = User.get_history(user_id)\n except:\n try:\n searches = session[\"history\"]\n history = Search.get_data(searches)\n except:\n history = None\n if history:\n for search in history:\n thing = {\"start_lat\": search.a.latitude,\n \"start_lng\": search.a.longitude,\n \"start_ele\": int(search.a.elevation),\n \"end_lat\": search.p.latitude,\n \"end_lng\": search.p.longitude,\n \"end_ele\": int(search.p.elevation),\n \"min_ele\": search.min_ele,\n \"max_time\": search.max_time,\n \"travel_mode\": search.travel_mode,\n \"search_id\": search.search_id}\n response['history'].append(thing)\n return jsonify(response)", "title": "" }, { "docid": "26a3ce382e640ca31e2cd154876eb733", "score": "0.591221", "text": "async def get_season_history(self):\n if hasattr(self, \"_history\"):\n history = self._history\n else:\n history = await fetch(\n self._session, API_URLS[\"user_history\"].format(self.id))\n\n self._history = history\n return history[\"past\"]", "title": "" }, { "docid": "ac77d8b52ba1289cc186f17aee3e7f53", "score": "0.5894235", "text": "def get_history(self):\n return self._history", "title": "" }, { "docid": "8023662099e1baf2453e388f0ad210ce", "score": "0.5886394", "text": "def get_historical_data(ticker, period=\"max\"):\n today_date = datetime.date.today().replace(day=1)\n last_year_today_date = today_date - datetime.timedelta(days=365)\n today_str = today_date.strftime(\"%Y-%m-%d\")\n last_year_str = last_year_today_date.strftime(\"%Y-%m-%d\")\n year_pth = (\n Location.Home(__file__) + \"/stocks/long_term/\" + today_str + \"_\" + last_year_str\n )\n if not os.path.exists(year_pth):\n force_mkdir(year_pth)\n ticker_pth = year_pth + \"/\" + ticker + \".csv\"\n if not os.path.exists(ticker_pth):\n if period == \"max\":\n temp = yf.Ticker(ticker)\n data = temp.history(period=\"max\")\n data.to_csv(ticker_pth)\n else:\n data = yf.download(ticker, start=last_year_str, end=today_str)\n data.to_csv(ticker_pth)", "title": "" }, { "docid": "62043fb86adac3ce7134daa3b9fa8a6a", "score": "0.5875762", "text": "def history(request):\n trunk_id = request.GET.get('trunk_id')\n trunk = db.get(trunk_id)\n data = []\n revs = [i.obj_ref\n for i in models.TrunkRevisionModel.all().ancestor(trunk).order('-created')]\n for it, previous in itertools.izip(revs, revs[1:] + [None]):\n datum = {\n 'doc': db.get(it),\n 'previous': previous,\n }\n data.append(datum)\n\n return respond(request, constants.DEFAULT_TITLE,\n \"history.html\", {\n 'trunk_id': trunk_id,\n 'data': data,\n })", "title": "" }, { "docid": "734805ad0e34d0ef094eef0c0e4813c2", "score": "0.5860595", "text": "def get_past(pair, days_history, exchange):\n now = int(time.time())\n params = {\n 'fsym': pair[0],\n 'tsym': pair[1],\n 'toTs': now,\n 'limit': days_history,\n 'aggregate': 1,\n 'e': exchange\n }\n\n response = requests.get('https://min-api.cryptocompare.com/data/histoday',\n params=params)\n results = response.json()['Data']\n # print(results)\n return results", "title": "" }, { "docid": "e65b907cf64ac90ef75ff29649611be4", "score": "0.58579737", "text": "def _get_history(project_details, auth_token):\n for milestone in project_details.milestones:\n for user_story in milestone.user_stories:\n history, is_error = TaigaAPI.get_history(user_story.id,\n auth_token, \"HISTORY_US\")\n if is_error:\n user_story.history = None\n else:\n user_story.history = AnalysisPerformer._extract_history(history)\n for task in user_story.tasks:\n history, is_error = TaigaAPI.get_history(user_story.id,\n auth_token, \"HISTORY_TASK\")\n if is_error:\n task.history = None\n else:\n task.history = AnalysisPerformer._extract_history(history)\n return project_details", "title": "" }, { "docid": "18afeec104cade09819868e78e188793", "score": "0.5847933", "text": "def list_history(request):\n history = models.JobHistory.objects\n\n if not request.user.is_superuser:\n history = history.filter(owner=request.user)\n history = history.order_by('-submission_date')\n\n return render('list_history.mako', request, {\n 'history': history,\n })", "title": "" }, { "docid": "5560d68be2f17ae40ea8147d0d396934", "score": "0.5822669", "text": "def get_history(self, history_id=None):\n history = []\n no_history = False\n if not history_id:\n history_id = self.sql3.get_history_id()\n\n bar = self.bar(leave=True, total=10, desc='fetching changes')\n pages = 0\n try:\n for results in self.gmail.get_history_since(history_id):\n history += results\n pages += 1\n bar.update(1)\n except remote.Gmail.NoHistoryException:\n no_history = True\n for _ in range(pages, 10):\n bar.update(1)\n bar.close()\n if no_history:\n return None\n return history", "title": "" }, { "docid": "8779966421d4fb62841cf33c81ed4940", "score": "0.5811631", "text": "def get_all_years(min_year, max_year, till_now = True):\n today = date.today()\n year = str(today.year)\n month = str(today.month)\n day = str(today.day)\n if(max_year > int(year) or till_now):\n max_year == int(year)\n all_ext = []\n for y in range(min_year, max_year):\n #for q in [1, 4, 7]:\n for q in [1, 3, 5, 7, 9]:\n all_ext.append(\n '%20since%3A' + str(y) + '-0' + str(q) + '-02%20until%3A' + str(y) + '-0' + str(q + 2) + '-01')\n all_ext.append(\n '%20since%3A' + str(y) + '-' + str(11) + '-02%20until%3A' + str(y + 1) + '-0' + str(1) + '-01')\n if till_now:\n if int(year) == max_year:\n for inc in range(1, int(month)-1, 2):\n all_ext.append(\n '%20since%3A' + year + '-0' + str(inc) + '-02%20until%3A' + year + '-0' +\n str(inc+2) + '-01')\n if int(month) % 2 == 0:\n all_ext.append(\n '%20since%3A' + year + '-0' + str(int(month)-1) + '-02%20until%3A' + year + '-0' +\n month + '-0' + day)\n else:\n all_ext.append(\n '%20since%3A' + year + '-0' + month + '-02%20until%3A' + year + '-0' +\n month + '-0' + day)\n return all_ext", "title": "" }, { "docid": "accd29f7bd880ba4538679198d9f6c23", "score": "0.5801146", "text": "def getHistory(self):\n return self.__history", "title": "" }, { "docid": "f5a7cb70c0934f44fb08b69caeea0ab4", "score": "0.5797689", "text": "def history(self):\n with lock:\n return self._history", "title": "" }, { "docid": "d083c5b633d2755f528339520696e9b7", "score": "0.57482", "text": "def history(all_data, (sex, age, sline), days=30):\n start_date = None\n end_date = None\n hist_data = {}\n for row in all_data:\n admit_date = row[2]\n agef_in_years = int(row[9])\n agef = fp.split_age(agef_in_years)\n sexf = int(row[10])\n slinef = row[14]\n rlos = row[5]\n\n if slinef is None:\n continue\n if len(admit_date) == 0:\n continue\n\n if len(rlos) == 0:\n continue\n\n if (sex, age, sline) != (sexf, agef, slinef):\n continue\n datetime = fp.parse_datetime(admit_date)\n\n start_date = datetime if start_date is None or datetime < start_date else start_date\n end_date = datetime if end_date is None or datetime > end_date else end_date\n hist_data.setdefault(datetime, [])\n hist_data[datetime].append((admit_date, sex, agef_in_years, sline, int(rlos)))\n\n start_day = random.randint(0, (end_date - start_date).days)\n end_day = start_day + days\n result = []\n for day in range(start_day, end_day + 1):\n day_dt = start_date + timedelta(day)\n if day_dt in hist_data:\n result.extend(hist_data[day_dt])\n\n return result, start_date + timedelta(start_day), start_date + timedelta(end_day)", "title": "" }, { "docid": "2648bee3d4f95d5bbe8de1bf7d16aff1", "score": "0.5747052", "text": "def get_play_history(**kwargs):\n url = URLBuilder(\"https://www.boardgamegeek.com/xmlapi2/plays\")\n if kwargs.get(\"username\") is None:\n raise ValueError(\"username is required\")\n url.addQueryArg(\"username\", kwargs.get(\"username\"))\\\n .addQueryArg(\"mindate\", kwargs.get(\"startdate\"))\\\n .addQueryArg(\"maxdate\", kwargs.get(\"enddate\"))\n\n executor = Executor()\n first_page = yield from executor.addRequests([url.build()])\n\n # bgg api returns the total number of records and 100 records per page\n pageCount = math.ceil(int(first_page[0].attrib.get(\"total\"))/100)\n\n # new list with url strings for pages 2..n\n # if pageCount is 1, then the list is empty\n last_pages = [url.addQueryArg(\"page\", n).build() for n in range(2, pageCount + 1)]\n responseTrees = yield from executor.addRequests(last_pages)\n\n playHistory = build_play_history(responseTrees)\n return playHistory", "title": "" }, { "docid": "3cc5a699b8ee1fb096942be8b00b6586", "score": "0.574003", "text": "def pagePlayedHistory(self, year=0, month=0):\n\n if year == 0:\n today = datetime.datetime.now()\n year = today.year\n if month == 0:\n today = datetime.datetime.now()\n month = today.month\n\n ### haal jaar, maand, en dag gegevens op, en maak er dictionaries van voor weergave op webpagina\n # haal jaren op\n query = \"\"\"\n select year, played \n from played_history\n where month = 0\n order by year\n \"\"\"\n yearsdict = self._db.dbGetData(query)\n print 'yearsdict: ', yearsdict\n\n # haal maanden op\n query = \"\"\"select year, month, played from played_history\n where year = %s and month <> 0 and day = 0\"\"\" % (year)\n records = self._db.dbGetData(query)\n monthsdict = {}\n for record in records:\n key = 'month' + str(record['month'])\n monthsdict[key] = record['played']\n # voeg ook year en month toe aan dictionary\n monthsdict['year'] = year\n monthsdict['month'] = month\n # print 'monthsdict ->', monthsdict\n\n # haal dagen op\n query = \"\"\"select year, month, day, played\n , ltrim(to_char(year, '9999')) || ltrim(to_char(month, '09')) || ltrim(to_char(day, '09')) as datum\n from played_history\n where year = %s and month = %s and day <> 0\"\"\" % (year, month)\n records = self._db.dbGetData(query)\n # print query\n # print records\n daysdict = {} # per dagnr de afgespeelde records\n for record in records:\n key = 'day' + str(record['day'])\n key2 = 'dayb' + str(record['day'])\n daysdict[key] = record['played']\n daysdict[key2] = record['datum']\n # print 'daysdict', daysdict\n\n h = mymc_html.pagePlayedHistory(year, month, yearsdict, monthsdict, daysdict)\n # print h\n\n return h", "title": "" }, { "docid": "abd10442d9d09fece98c058d2b67a2b3", "score": "0.5739586", "text": "def get_info_list(ticker:str, low_year:int, high_year:int) -> list:\n info_cursor = ctx.cursor().execute(f\"SELECT * FROM US_STOCKS_DAILY.PUBLIC.STOCK_HISTORY WHERE symbol='{ticker}' AND date<'{high_year+1}-1-1' AND date>='{low_year}-1-1' ORDER BY date\")\n return info_cursor.fetchall()", "title": "" }, { "docid": "54c269455656633b2728146a8d832a16", "score": "0.5698984", "text": "def history(self):\n hmwf = max([agg*TIME_AGGS[agg] for agg in TIME_AGGS])\n return hmwf", "title": "" }, { "docid": "aa4478b1cba60e8713d9d73b6fddbe02", "score": "0.568384", "text": "def history(game_id, week_id):\n return _game_view(game_id, week_id)", "title": "" }, { "docid": "8f6cad7d0317dfc43146be1be25eaa24", "score": "0.5673654", "text": "def history(self):\n return _data_equalizing_swig.HYR_ofdm_sptr_history(self)", "title": "" }, { "docid": "33681e5b3f8c882993c42a3741fe1738", "score": "0.565247", "text": "def get_all_logs( savefile=None ):\n base = 'http://heracles.astro.berkeley.edu/wiki/doku.php?id='\n main = 'start'\n archive = 'past_years_logs'\n creds = urllib.urlencode({\"u\" : wiki_un, \"p\" : wiki_pw})\n\n outdict = {}\n\n # do all of the links from this year\n page = urllib.urlopen(base+main,creds).read()\n t = page.split('Night Summaries, Logs, and Quicklooks')[2]\n # process out the page IDs\n pageids = re.findall('title=\"\\d+_\\d+.+\"', t)\n pageids = [re.search('\\d+_\\d+[^\"]+', row).group() for row in pageids if 'nofollow' not in row]\n\n # now go through all of these pages and parse them\n for pid in pageids:\n try:\n o,a,f = wiki2log( pid )\n except:\n print 'Cannot open',pid\n continue\n names = [oo[-1] for oo in o]\n for n in names:\n if n not in outdict.keys():\n outdict[n] = []\n outdict[n].append(pid)\n\n # do all of the links from past years\n page = urllib.urlopen(base+archive,creds).read()\n pageids = re.findall('title=\"\\d+_\\d+.+\"', page)\n pageids = [re.search('\\d+_\\d+[^\"]+', row).group() for row in pageids if 'nofollow' not in row]\n\n # now go through all of these pages and parse them\n for pid in pageids:\n try:\n o,a,f = wiki2log( pid )\n except:\n print 'Cannot open',pid\n continue\n names = [oo[-1] for oo in o]\n for n in names:\n if n not in outdict.keys():\n outdict[n] = []\n outdict[n].append(pid)\n\n if savefile != None:\n pickle.dump(outdict, open(savefile,'w'))\n print 'pickled to',savefile\n return outdict", "title": "" }, { "docid": "8621749934b397d7121a328f0afb50ab", "score": "0.5646118", "text": "def history():\n\n # get the symbol from query string parameters\n symbol = request.args.get('symbol', default=\"AAPL\")\n period = '2y' # set default period\n interval = '1mo' # set default interval\n\n #pull the quote\n quote = yf.Ticker(symbol)\n\n #use the quote to pull the historical data from Yahoo finance\n hist = quote.history(period=period, interval=interval)\n\n #convert the historical data to JSON\n data = hist.to_json()\n\n #return the JSON in the HTTP response\n return data", "title": "" }, { "docid": "e36a5de7fa24c38db4ced8697495b953", "score": "0.56342506", "text": "def generate_recent_history(history_model, visitor, amount):\n return history_model.query.filter_by(visitor=visitor).order_by(history_model.id.desc())[:amount]", "title": "" }, { "docid": "38af4157cde7616fb1e3d3788aeed811", "score": "0.5592317", "text": "def load_history(self):\n self.history = []\n try:\n for entry in self.wiki.git_wiki.get_walker(\n paths=[self.physical_path.encode('utf-8')]):\n self.history.append(ArticleHistoryEntry(self, entry.commit))\n # A KeyError can occur on freshly created repositories (without commits)\n except KeyError as e:\n pass", "title": "" }, { "docid": "c0c525400b6e54a91ad9feba89b9b027", "score": "0.5579854", "text": "def get_game_years() -> list:\n olympic_games_years = []\n with sqlite3.connect('olympic_history.db') as conn:\n cursor = conn.cursor()\n game_years_raw = cursor.execute('select year from games').fetchall()\n for year_tuple in game_years_raw:\n olympic_games_years.append(str(year_tuple[0]))\n\n return olympic_games_years", "title": "" }, { "docid": "f047a965a1414051c5331e80a9f28529", "score": "0.55406266", "text": "def past_due_records(repo):\n import utils\n return Person.all(filter_expired=False).filter(\n 'expiry_date <=', utils.get_utcnow()).filter(\n 'repo =', repo)", "title": "" }, { "docid": "8cad63d46629176489588c7cd3ab4ffe", "score": "0.5521652", "text": "def get_game_history(self, request):\n game = self._get_by_urlsafe(request.urlsafe_game_key, Game)\n return game.get_history()", "title": "" }, { "docid": "30a9096057be7b2d59c2b757a44a6caa", "score": "0.5519728", "text": "def history():\n engine = get_engine()\n config, _ = get_alembic_config(engine)\n return alembic_history(config)", "title": "" }, { "docid": "dd523e71eee3717ed596bc115b084b1e", "score": "0.5497751", "text": "def history():\n url = config.get('base_uber_url_v1_1') + 'history'\n params = {\n 'offset': 0,\n 'limit': 5,\n }\n\n response = requests.get(\n url,\n #headers=generate_ride_headers(session.get('access_token')),\n headers=generate_ride_headers(getAccessToken()),\n params=params,\n )\n\n if response.status_code != 200:\n return 'There was an error', response.status_code\n return render_template(\n 'results.html',\n endpoint='history',\n data=response.text,\n )", "title": "" }, { "docid": "ab0af8661a737532753c7c5f38161a51", "score": "0.5488398", "text": "def getHistoryToDate(self, date, nb_days):\n return self.__historical_data.loc[increase_date(date, - nb_days):date]", "title": "" }, { "docid": "bd00a0f0f6d90bda10f5e09756737b7e", "score": "0.54811907", "text": "def historic_data((sex, age, sline), days=30):\n historical_data, sd, ed = history(raw_data, (sex, age, sline), days=30)\n\n id = \"H[%4d-%02d-%02d:%4d-%02d-%02d]\" % (sd.year, sd.month, sd.day, ed.year, ed.month, ed.day)\n result = []\n for t in historical_data:\n result.append((id, t[0], t[1], t[2], t[3], t[4]))\n return result", "title": "" }, { "docid": "b1b40c241c4870453f933014a48d4615", "score": "0.5472992", "text": "def get(self, list_id, month):\n return self._mc_client._get(url=self._build_path(list_id, 'growth-history', month))", "title": "" }, { "docid": "d0aa243731870c970b61d53c5fb730f9", "score": "0.54715073", "text": "def getHistory(self)->Dict[str,Log]:\n return self.logs", "title": "" }, { "docid": "e7215e62f89a8c222cfb1e19226aab34", "score": "0.5460707", "text": "def get_histories(self, begin: str = '-1m', end: str = arrow.now(),\n period: str = 'day', adjust: str = 'before',\n indicator: str = 'kline,ma,pe,pb,ps,pcf,market_capital'):\n begin = len(begin)>5 and arrow.get(begin,tzinfo=\"Asia/Shanghai\").timestamp \\\n or self._str2date(begin).timestamp\n end = arrow.get(end).timestamp\n resp = sess.get(api.stock_history % (self.symbol, begin, end, period, adjust, indicator))\n dt = resp.ok and resp.json()\n df = pd.DataFrame(\n [[arrow.get(i[0]/1000).to('UTF-8').date()]+i[1:]\n for i in dt['data']['item']],\n columns=['date']+dt['data']['column'][1:])\n df.date = pd.to_datetime(df.date)\n self.history = df.set_index('date')", "title": "" }, { "docid": "93fbd30a576c4945357da88fc1817ae1", "score": "0.5459564", "text": "def getHistoricalMaxReturn(self,start,end):\n delta = datetime.timedelta(days=-365)\n stockReturns = []\n for i in range(Stock.MAX_YEARS_LOOKBACK):\n start = start+delta\n end = end+delta\n if not self.containsDate(start):\n break\n stockReturns.append(self.getMaxReturnForPeriod(start,end))\n return stockReturns", "title": "" }, { "docid": "2fc27fc9d35c23cd873d866844a0e447", "score": "0.5431054", "text": "def get_it_by_ay(author,year, delta=2):\n con = lite.connect(meta_db)\n\n recs = []\n with con:\n cur = con.cursor()\n cur.execute(\"SELECT arxiv_id, title FROM ayit_lookup WHERE author='%s' AND '%d' <= year AND year <= '%d' \" % (author,year-delta,year))\n recs = [ (to_ascii(x_id), to_ascii(x_title)) for x_id,x_title in cur.fetchall()]\n return recs", "title": "" }, { "docid": "c9e01a61551a675bff7564eca58322e3", "score": "0.5430925", "text": "def history(self) -> defaultdict:\n return self._model.history_", "title": "" }, { "docid": "186153a1010aa4abd8a71ad123e288e1", "score": "0.54249907", "text": "def history(self):\r\n if not self._history:\r\n self._parse_headers_raw()\r\n return self._history", "title": "" }, { "docid": "ca80cdcbff6303ef5c25fc1b4d7255a1", "score": "0.54245335", "text": "def do_history(self,\n request,\n result,\n extractor):\n self.check_scheme(request.get_object_id())\n args = self.args_history(request)\n sql = [self.get_sql_bundle().get_sql('History', args), self.get_sql_bundle().get_sql('HistoryCount', args)]\n self.search_with_paging(request.get_paging_request(), sql, args, extractor, result)\n return result", "title": "" }, { "docid": "07e917d2a4447bb85a5d2e0248dd4dde", "score": "0.54174423", "text": "def __get_all_history(bot: Text):\n for value in TrainingDataGenerator.objects(bot=bot).order_by(\"-start_timestamp\"):\n item = value.to_mongo().to_dict()\n if item.get('document_path'):\n item['document_path'] = item['document_path'].replace(TRAINING_DATA_GENERATOR_DIR + '/', '').__str__()\n item.pop(\"bot\")\n item.pop(\"user\")\n item[\"_id\"] = item[\"_id\"].__str__()\n yield item", "title": "" }, { "docid": "82dc8e6a7d2cc8b84353b4751525642d", "score": "0.5369093", "text": "def get_hist(self):\n return list(map(self.df.shift, range(0, Config.MAX_HIST)))", "title": "" }, { "docid": "96e82fd8a78a81397fbcc44164bea167", "score": "0.5368821", "text": "def history(self) -> List[int]:\n return self._result_log", "title": "" }, { "docid": "fc31ec2e02b78819d96b1f833951d83d", "score": "0.53644365", "text": "def history():\n histories = db.execute(\"SELECT * FROM transactions WHERE id = :user_id ORDER BY date DESC\" , user_id=session['user_id'])\n return render_template(\"history.html\", histories=histories)", "title": "" }, { "docid": "4d4c4cc62bda412c1c0eaf0673056fd2", "score": "0.5354994", "text": "def _extract_history(self, env):\n history = env._get_history() # pylint: disable=protected-access\n if flags.FLAGS.validate_history:\n self._validate_history(history)\n if self._realign_fn is not None:\n return self._realign_fn(history)\n return history", "title": "" }, { "docid": "1eb11b7b1cb789ff0a13239f8773d904", "score": "0.53523344", "text": "def history(self, days: int) -> List[sqlite3.Row]:\n\n sql = \"\"\"WITH RECURSIVE calendar(days_ago, dt) AS\n (SELECT 1, date('now', '-1 day')\n UNION ALL SELECT days_ago+1, date(dt, '-1 day')\n FROM calendar LIMIT ?)\n SELECT days_ago, dt as 'date [date]',\n SUM(COALESCE(duration_seconds / 3600.0, 0)) as hours\n FROM calendar\n LEFT JOIN sleeplog ON date(start_utc)=dt\n AND end_utc IS NOT NULL\n GROUP BY dt\n ORDER BY dt\"\"\"\n\n placeholders = (days,)\n\n return self._select(sql, placeholders)", "title": "" }, { "docid": "98231986cc100d5c9f49e048770c767e", "score": "0.53505653", "text": "def history_query(url, is_secure, username, password, project, queries, dates): \n _log.debug(\"queries %s, dates %s\", queries, dates)\n mingle = MingleApi(url, is_secure, username, password, project)\n return mingle.historical_queries(queries, dates)", "title": "" }, { "docid": "1f8f7b070d2e6a405de7337a91fdadf5", "score": "0.534252", "text": "def _get_fin_months(self):\n years = []\n for h in range(self.history):\n years.append(self.cur_yr-h)\n return [ str(y) + m for y in years for m in self.months ]", "title": "" }, { "docid": "312093b603414e870c0c96164f515812", "score": "0.5326156", "text": "def history():\n\n # Get list length\n length = len(usr.transactions())\n\n # Check if there is transaction history available\n if(length == 0):\n return render_template(\"no-history.html\")\n\n # Keep stock_id:symbol here\n symbols = {}\n\n # Keep stock_id:company name here\n company = {}\n\n # Fill the dictionaries\n for pair in db.execute(\"SELECT * FROM stocks\"):\n symbols[pair[\"stock_id\"]] = pair[\"symbol\"]\n company[pair[\"stock_id\"]] = lookup(pair[\"symbol\"])[\"name\"]\n\n share_price = [usd(x[\"share_price\"]) for x in usr.transactions()]\n return render_template(\"history.html\", share_price=share_price, transactions=usr.transactions(), length=length, symbols=symbols, company=company)", "title": "" }, { "docid": "b735a30b3a570659519ce43c416cfade", "score": "0.53233474", "text": "def stats_history(self, query=None):\n if query is None:\n query = {}\n path = \"/stats/history\"\n return self.call_api(path=path, query=query, verb=\"GET\")", "title": "" }, { "docid": "3d6b9dd08095a552f359facc4ae0f692", "score": "0.5321415", "text": "def history():\n histories = get_histories(session['user_id'])\n return render_template(HISTORY_PAGE, histories=histories)", "title": "" }, { "docid": "8d283417ff42c32360ec3e9288af32c8", "score": "0.5316599", "text": "def DBHistories():", "title": "" }, { "docid": "7158ae2c60b88295fcad1b735ed676d6", "score": "0.5277842", "text": "def history(self, real_time=None):\n if self.__real_end__ != TIME_INF:\n raise TypeError(\"historical object can't be modified\")\n\n if real_time and not isinstance(real_time, datetime.datetime):\n raise TypeError(\"real time must be datetime object\")\n\n try:\n hist = pychronos._raw_timeseries_api_instance.app_api_timeseries_object_history(self.__coll_id__,\n real_start=real_time)\n\n except ApiException as e:\n api_exception_helper(e)\n\n out = []\n for h in hist:\n c = TimeSeries(self.__space_name__, self.__coll_name__, self.__ts_name__)\n c._fetch(obj=h)\n out += [c]\n\n return out", "title": "" }, { "docid": "a9d6fb9c37f2d778e67b3feb41ebbdda", "score": "0.52764153", "text": "def history(self):\n return _data_equalizing_swig.data_equalizing_sptr_history(self)", "title": "" }, { "docid": "874a604ffb0791abb99e318442853dfb", "score": "0.52727664", "text": "def history():\n\n # Retrieve information from the TRANSACTIONS table\n TRANSACTIONS_data = db.execute(\"SELECT symbol, shares, price, date FROM transactions WHERE person_id = :id;\",\n id=session[\"user_id\"])\n\n # Return HTML page with retreived data (looping and displaying is in index.html)\n return render_template(\"history.html\", TRANSACTIONS_data=TRANSACTIONS_data)", "title": "" }, { "docid": "bc84efc11cb503f9b2c8696cf9579389", "score": "0.5262469", "text": "def history():\n h = db.execute(\"SELECT * FROM thistory WHERE user_id=:user_id\", user_id=session[\"user_id\"])\n history = []\n for his in h:\n n = lookup(his[\"symbol\"])\n item = dict(date=his[\"timestamp\"], name=n[\"name\"], symbol=his[\"symbol\"],\n shares=his[\"shares\"], price=his[\"price\"], total=his[\"total\"], typ=his[\"typ\"])\n history.append(item)\n return render_template(\"history.html\", history=history)", "title": "" }, { "docid": "5aed6a60fbd8a9d6da5c8e45373cbd05", "score": "0.52616185", "text": "def older_issue(self):\n data_list = [\n self.project_name,\n self.sensor,\n 'Older than a week',\n convert(self.diff),\n self.last_updated\n ]\n if options.PrintOld:\n store(self.project_name, data_list)\n else:\n logger.debug(data_list)", "title": "" }, { "docid": "38923f38c6f3635f18753bd1016f7880", "score": "0.5261192", "text": "def test_more_than_a_year(self):\n with self.assertRaises(ValueError):\n self.stock.get_historical(self.start_date, '2017-03-03')", "title": "" }, { "docid": "b7b91498139a526e88172a14530332f1", "score": "0.5260088", "text": "def main():\n geep_str = 'grep -w date d3_commits_1yrfromtoday.txt > date_in_year'\n grep_str = 'grep -n2 message d3_commits_1yrfromtoday.txt > tmp.txt'\n os.system(grep_str)\n grep_str = 'grep -w date tmp.txt > dates_in_year'\n os.system(grep_str)\n commit_dates = []\n # open new file and append commit dates to new list\n fp = open('dates_in_year')\n # readlines in input, should contain dates from the github api d3.js curl output\n for line in fp.readlines():\n c = shlex.split(line)\n commit_date = translate_commit_date(c[-1])\n commit_dates.append(commit_date)\n fp.close()\n # Call the date histogram function and term hist values and date ranges\n week_array = create_date_hist(commit_dates)\n week_days= ['Mon','Tues','Wed','Thurs','Fri','Sat','Sun']\n # Find the day with max commits\n max_freq = max(week_array)\n max_bin_indicies = [i for i, freq in enumerate(week_array) if freq == max_freq]\n for bin in max_bin_indicies:\n print \"most frequent commit day in week\"\n print week_days[bin]\n x = range(0,7)\n plot_hist_data(x,week_array,xlabel = 'days of week',ylabel = '# commits per day of week', LABELS = week_days, title = 'Commits Per Day of Week - Year Summary', savestr = 'commits_per_weekday.png')", "title": "" }, { "docid": "fe47530149672c962ed4947ecc4f8a98", "score": "0.5254533", "text": "def get_historical_data_copy(self):\n return copy.deepcopy(self._historical_data)", "title": "" }, { "docid": "a6e7277b0d8dad6ad96398af27fb94c9", "score": "0.52523553", "text": "def stock_history(symbol):\n api_key = app.config['API_KEY']\n output = 'json'\n url = (\"https://api.worldtradingdata.com/api/v1/history?symbol=%s&sort=newest&output=%s&api_token=%s\"\n % (symbol, output, api_key))\n page = urllib.request.urlopen(url)\n rawdata = page.read()\n return json.loads(rawdata)", "title": "" }, { "docid": "65039f3da685ef5a15606526089e5695", "score": "0.52499473", "text": "def get_years():\n\n sql_years = \"\"\"SELECT DISTINCT(YEAR(time)) FROM nms_log ORDER BY YEAR(time)\"\"\"\n years = conn.get_data(sql_years)\n return years", "title": "" }, { "docid": "bb4b1f804c6aa86c783a78d41bdf6d82", "score": "0.52405196", "text": "def tobs():\n #find last date\n last_date = session.query(Measurement.date).\\\n order_by(Measurement.date.desc()).\\\n first()\n last_date = dt.datetime.strptime(last_date.date, '%Y-%m-%d')\n #find year before last date\n yr_ago = last_date - dt.timedelta(days = 365)\n\n last_yr_data = session.query(Measurement.date, Measurement.prcp).\\\n filter(Measurement.date >= yr_ago).all()\n return(jsonify(last_yr_data))", "title": "" }, { "docid": "3b2c703292fdebcbecf462cc2a3f8434", "score": "0.5239264", "text": "def fill_history(self):\n raise NotImplementedError, \"Implement fetch all history for a stock\"\n pass", "title": "" }, { "docid": "85a95ac6bcefb253449bdcc253462449", "score": "0.5238", "text": "def get_last_three_months(self, today):\n entries = []\n start = today - datetime.timedelta(weeks=13)\n for ent in self.entries:\n if ent.date >= start and ent.date <= today:\n entries.append(ent)\n return entries", "title": "" }, { "docid": "71a9da56552faa7d31ba80fbb1b1463a", "score": "0.52325773", "text": "def history(self):\n return _data_equalizing_swig.data_test_sptr_history(self)", "title": "" }, { "docid": "7fbc1ba33e422119e8e0192e9adcb381", "score": "0.52306575", "text": "def get_history(self, history_start_date, history_end_date, **matchargs):\n\n return self.historian.get_history(db.GSR_TYPE, country=self.country,\n start_date=history_start_date,\n end_date=history_end_date, **matchargs)", "title": "" }, { "docid": "2b14f74e0c1d4b666b5e515b7533ea44", "score": "0.5229664", "text": "def history():\n story = db.execute(\"SELECT * FROM history WHERE user_id = :user_id\", user_id=session[\"user_id\"])\n for row in story:\n\n row[\"dates\"] = time.ctime(row[\"dates\"])\n return render_template(\"history.html\", story=story) # returns table", "title": "" }, { "docid": "9b2c956b3ace3c0f469d07edf1b96ead", "score": "0.5229273", "text": "def history(self) -> keras.callbacks.History:\n if self._model is None:\n return None\n\n return self._model.model.history", "title": "" }, { "docid": "104cf41121162ac67934267150890f89", "score": "0.52267385", "text": "def last_known_year(self):\n return self._last_known_year", "title": "" }, { "docid": "792abafffaf5bde55a00b7f1fb1ea01a", "score": "0.52248806", "text": "def get_history(self, halfmove):\n return copy.deepcopy(self.history[halfmove])", "title": "" }, { "docid": "6e8ded1da9d4108ee59b7a99cc41b85a", "score": "0.52199286", "text": "def get_history(self, end_date: pd.Datetime, duration: int) -> BarDataList:\n start_date = end_date - pd.Timedelta(duration, 'D')\n if start_date < self.df.index[0]:\n self.pull_data(start_date)\n log.debug(f'history data from {start_date} to {end_date}')\n self.bars = self.get_BarDataList(self.df.loc[start_date: end_date])\n return self.bars", "title": "" }, { "docid": "9089f7bed67928805f01c1e90f27c443", "score": "0.5212771", "text": "def get_market_history(self, market, count):\n return self.api_query('getmarkethistory', {'market': market, 'count': count})", "title": "" }, { "docid": "08121317fb9844e0c5fca59280e709f4", "score": "0.52056456", "text": "def get_historical(self):\n if os.path.exists(self.data_path):\n data = pd.read_csv(self.data_path, index_col=0)\n data['Date'] = pd.to_datetime(data['Date'])\n first_date, end_date = data.iloc[0, 0], data.iloc[-1, 0]\n\n # Fetch new data if we do not have the dates required on our\n # cache yet\n if first_date > self.start or end_date < self.end:\n data = self.fetch()\n # Or simply filter to avoid calling the API again\n else:\n mask = (self.start < data['Date']) & (data['Date'] < self.end)\n data = data[mask]\n data.reset_index(drop=True, inplace=True)\n else:\n data = self.fetch()\n return data", "title": "" }, { "docid": "e6605f84b468c20fcec57fe07a4eccc6", "score": "0.5205499", "text": "def get_curr_prior_fiscal_year_end(company):\n\n current_and_prior_fye = {}\n # reverse sort, so the prior fiscal year end always comes next, after the current\n fiscal_year_end_list = FiscalYearEnd.objects.filter(company=company).order_by('-fye_start_date')\n\n print('^^^^^^^^^ got fye list')\n for index, fye in enumerate(fiscal_year_end_list):\n print('^^^^^^^^^^^^ index is ', index)\n if fye.is_active:\n print('^^^^^^^^^^^^^ setting current and prior when index is ', index)\n current_and_prior_fye[\"current\"] = fye\n current_and_prior_fye[\"prior\"] = fiscal_year_end_list[index + 1]\n break\n\n print(\"^^^^^^^^^ current is \", current_and_prior_fye[\"current\"].fye_start_date, \" to \",\n current_and_prior_fye[\"current\"].fye_end_date)\n\n print(\"^^^^^^^^^ previous is \", current_and_prior_fye[\"prior\"].fye_start_date, \" to \",\n current_and_prior_fye[\"prior\"].fye_end_date)\n\n return current_and_prior_fye", "title": "" }, { "docid": "3bf4eae225578c42c6283dad6f5c9d60", "score": "0.52018374", "text": "def _get_history(self, axes):\n return self.history.setdefault(axes, [])", "title": "" }, { "docid": "5304f016ac3a1b9710fc65d8fcccc781", "score": "0.51938826", "text": "def get(self, save):\n\n releases = []\n\n year = 2012\n current_url = self.START_URL + \"?tab1=\" + str(year)\n overview_page = requests.get(current_url)\n while overview_page.text.find(\"list-item list-item-even\") != -1:\n\n tree = fromstring(overview_page.text)\n\n linkobjects = tree.xpath('//*/p[@class=\"article-list-item-date\"]//a')\n links = [\n self.BASE_URL + l.attrib[\"href\"]\n for l in linkobjects\n if \"href\" in l.attrib\n ]\n\n for link in links:\n logger.debug(\"ik ga nu {} ophalen\".format(link))\n current_page = requests.get(link)\n tree = fromstring(current_page.text)\n try:\n title = \" \".join(tree.xpath('//*[@class=\"default\"]/h2/text()'))\n except:\n print(\"no title\")\n title = \"\"\n try:\n d = tree.xpath('//*/time[@class=\"date\"]//text()')[0].strip()\n jaar = int(d[-4:])\n maand = MAAND2INT[d[2:-4].strip()]\n dag = int(d[:2])\n datum = datetime.datetime(jaar, maand, dag)\n except Exception as e:\n print(\"could not parse date\")\n print(e)\n datum = None\n try:\n text = \" \".join(tree.xpath('//*[@class=\"default\"]/p//text()'))\n except:\n logger.info(\"oops - geen textrest?\")\n text = \"\"\n text = \"\".join(text)\n releases.append(\n {\n \"text\": text.strip(),\n \"title\": title.strip(),\n \"date\": datum,\n \"url\": link.strip(),\n }\n )\n\n year += 1\n current_url = self.START_URL + \"?tab1=\" + str(year)\n overview_page = requests.get(current_url)\n\n return releases", "title": "" }, { "docid": "8c5cb94cad359de209a569dde05f72f6", "score": "0.5191609", "text": "def history():\n records = []\n\n # get dictionary of symbol and shares that user owns\n rows = db.execute(\"SELECT symbol, shares, prices, tran_date FROM trans where id = :id ORDER BY tran_date;\", id = session['user_id'])\n\n # change dictionary to list and all into rows of records\n for each in rows:\n records.append([each['symbol'], each['shares'], usd(each['prices']), each['tran_date']])\n return render_template(\"history.html\", records=records)", "title": "" }, { "docid": "1093a9b7dc510c38bde8461d05301c0e", "score": "0.51911044", "text": "def getObsYears():\n return {1:1990,11:2000,17:2006,23:2012,29:2018}", "title": "" }, { "docid": "84040661264aa781c696fd722d81cda1", "score": "0.51884395", "text": "def get_recent_proxy_list(self):", "title": "" }, { "docid": "be9503e8a5d996fcfefe7e5f1fea73c7", "score": "0.5186664", "text": "def history(self):\n return _data_equalizing_swig.large_fft_sptr_history(self)", "title": "" }, { "docid": "707f0a5568beac01aebf9fa54abdb4dd", "score": "0.5180572", "text": "async def get_gameweek_history(self, gameweek=None):\n if hasattr(self, \"_history\"):\n history = self._history\n else:\n history = await fetch(\n self._session, API_URLS[\"user_history\"].format(self.id))\n\n self._history = history\n\n if gameweek is not None:\n valid_gameweek(gameweek)\n return next(gw for gw in history[\"current\"]\n if gw[\"event\"] == gameweek)\n\n return history[\"current\"]", "title": "" }, { "docid": "1ff2ead62591aeb87f6a298178c0e421", "score": "0.5178022", "text": "def history(self,key):\n self.check()\n checker = Checker(self)\n self.checkers.append(checker)\n return self.player.history(self.index,key,checker=checker)", "title": "" }, { "docid": "167cbd58a3281372151fc60e9b6e7e3f", "score": "0.5171644", "text": "def compute_historic_dates():\n if gosm_options.historic_data_start is not None:\n historic_start = pd.Timestamp(gosm_options.historic_data_start)\n else:\n historic_start = None\n\n if gosm_options.historic_data_end is not None:\n historic_end = pd.Timestamp(gosm_options.historic_data_end)\n else:\n historic_end = None\n\n return historic_start, historic_end", "title": "" }, { "docid": "75a62b2d25146a5534e0b59bafa3f41e", "score": "0.5169473", "text": "def get_history(stock_ticker):\n\tif (len(stock_ticker) == 4 and stock_ticker.isalpha()):\n\n\t\t#This if statment will be replaced, instead it will do a different cursor\n\t\t#select to find out if stock_id in Stock_Info then will pull the data.\n\n\t\tquery = (\"SELECT Date, High, Low, Open ,Close FROM \"+stock_ticker.upper()+\"_HIST\")\n\t\ttry:\n\t\t\tdata = execute_query(query)\n\t\texcept:\n\t\t\tdata = None\n\n\t\tdf = pd.DataFrame.from_records(data)\n\t\tdf.columns = ['Date','High','Low','Open','Close']\n\t\treturn(df)\n\n\telse:\n\t\tprint('Invalid Name')\n\t\treturn None", "title": "" }, { "docid": "26de61895e0c8e020d626b73aeadacec", "score": "0.5162718", "text": "def history():\n\n history = db.execute(\n \"SELECT date, time, symbol, price, shares, name FROM shares WHERE userid = :userid ORDER BY date ASC\", userid=session[\"user_id\"])\n\n return render_template(\"history.html\", history=history)", "title": "" }, { "docid": "4229e1e92dfb187f404eb392323888ee", "score": "0.516085", "text": "def get_history(self):\n response = self.execute_service_call(self.service.users().history().list(\n userId='me',\n startHistoryId=self.history_id,\n ))\n\n # Get the messageIds.\n history = response.get('history', [])\n\n if len(history) and self.history_id < history[-1]['id']:\n self.history_id = history[-1]['id']\n\n return history", "title": "" } ]
680bbb3f8eef67b3cf95a7ab8b726022
method returns data on a particular row from the database
[ { "docid": "6a7b364b630db5ea4e7a786272d9323e", "score": "0.0", "text": "def retrieve_one(cls, query_string):\n cls.cur.execute(query_string)\n return cls.cur.fetchone()", "title": "" } ]
[ { "docid": "a67f11b8cab53f3c4a2e0c3f3678feb8", "score": "0.73097724", "text": "def GetItem(self, row):", "title": "" }, { "docid": "a67f11b8cab53f3c4a2e0c3f3678feb8", "score": "0.73097724", "text": "def GetItem(self, row):", "title": "" }, { "docid": "f21e043b20c4889fc58c2e01f5e0ae69", "score": "0.7084692", "text": "def _fetch_single_row(self, **kwargs) -> Any:\n pass", "title": "" }, { "docid": "ee6318a3aad55756991dfd146db39de4", "score": "0.70612884", "text": "def GetRow(self, item):", "title": "" }, { "docid": "ed2b306cdbefc781c8cab9556be296a3", "score": "0.7055806", "text": "def get_row(self):\r\n return self.__row", "title": "" }, { "docid": "13aafb8213b9a414e06d2f04ae758e02", "score": "0.68389535", "text": "def get_data(self, row, column):\n uid = self.get_uid_by_row(row)\n # To save on function calls, format one whole row in one step and cache\n # the result.\n try:\n row_content = self._row_cache[uid]\n except KeyError:\n run = self._catalog[uid]\n row_content = self._row_factory(run)\n self._row_cache[uid] = row_content\n item_content = row_content[column] # content for one cell of the grid\n return item_content", "title": "" }, { "docid": "ede89607e9d5106b616e79e580919119", "score": "0.67952484", "text": "def get_rows():", "title": "" }, { "docid": "5bb274c872d2c481572a5f7fa083d396", "score": "0.67231226", "text": "def _get(self, what):\n\n with sqlite3.connect(self._path) as conn:\n try:\n cursor = conn.cursor()\n cursor.execute(self._get_query, what)\n record = cursor.fetchone()\n return record[0]\n\n except sqlite3.Error as e:\n print('_get ERROR: %s: ' % e.args[0])", "title": "" }, { "docid": "243164d9ac9e3a81fae165d23de00a25", "score": "0.6682757", "text": "def __getitem__(self, orcid_id):\n c = self._conn.cursor()\n c.execute(\"\"\"\n select orcid_id, active, last_update, person_uri, person_id, person_class, confirmed from orcid_ids where \n orcid_id=?\n \"\"\", (orcid_id,))\n row = c.fetchone()\n if not row:\n raise IndexError\n return row", "title": "" }, { "docid": "1b377672874454ea869322b52a0ec17b", "score": "0.66520226", "text": "def get(self, column, row):\n return self._data[self._data_index(column, row)]", "title": "" }, { "docid": "24c7b778d7249d3db818963368cdd22f", "score": "0.6631874", "text": "def getRecordById(connection, rowid):\n cursor = connection.cursor()\n cursor.execute(\"select time, data from ecg where rowid=?\", (rowid,))\n return cursor.fetchone()", "title": "" }, { "docid": "663b1e7a9f3075bce874c60851569a0e", "score": "0.6592978", "text": "def row_data(self, index):\n return self[index].data", "title": "" }, { "docid": "663b1e7a9f3075bce874c60851569a0e", "score": "0.6592978", "text": "def row_data(self, index):\n return self[index].data", "title": "" }, { "docid": "da0bacf0aba1bbdae01f27aa62c3c2ae", "score": "0.6575031", "text": "def select_directly(self, row_id):\n rows = self.engine_raw.execute(\n sa.select([test_table.c.data]).where(test_table.c.id == row_id))\n return rows.first()", "title": "" }, { "docid": "618d05d28cf736b3ae90546d47abedda", "score": "0.6567003", "text": "def row(self, row_index):\n return self._rows[row_index]", "title": "" }, { "docid": "710a7268a9a10e81d8d2d381ec58518a", "score": "0.6528739", "text": "def fetchone(self):\n result = self._rows[self._pos]\n self._pos = self._pos+1\n return result", "title": "" }, { "docid": "e8e7d6e9f8e8ef698b12c8d3207d88b3", "score": "0.6495013", "text": "def get_table_cell(self, table, index, param):\n script = 'SELECT %s FROM %s WHERE id=%d' % (param, table, index)\n result = self.__db.get(script)\n return result[0][0]", "title": "" }, { "docid": "4ae396092651659baa7af7df3a1621c4", "score": "0.64684284", "text": "def row(self, index):\n return self[index]", "title": "" }, { "docid": "4ae396092651659baa7af7df3a1621c4", "score": "0.64684284", "text": "def row(self, index):\n return self[index]", "title": "" }, { "docid": "08555d2240e3de0ae0abbccec9c285ea", "score": "0.64653116", "text": "def get_data(self, row_factory, query):\n \n with sqlite3.connect(self.db_path) as conn:\n # This handles situations where we don't need a row_factory\n if row_factory != None:\n conn.row_factory = row_factory\n db_cursor = conn.cursor()\n db_cursor.execute(query)\n response = db_cursor.fetchall()\n \n return response", "title": "" }, { "docid": "87db15e84bf195f9b86e6e824f8d10c4", "score": "0.64581555", "text": "def get(self, query, *parameters):\n #basic method query\n rows = self.query(query, *parameters)\n if not rows:\n return None\n elif len(rows) > 1:\n raise SqliteError(\"Multiple rows returned for Database.get() query\")\n else:\n return rows[0]", "title": "" }, { "docid": "1d7e72ee0a6477ba30e08e34ea814cd8", "score": "0.6455562", "text": "def get_return_row(self):\n return self._get_return_row()", "title": "" }, { "docid": "1d7e72ee0a6477ba30e08e34ea814cd8", "score": "0.6455562", "text": "def get_return_row(self):\n return self._get_return_row()", "title": "" }, { "docid": "76a04c804c5751aba7ffc09187e17b10", "score": "0.64343655", "text": "def select(self,**kwargs):\n recs = self.db.execute(self._select_sql(**kwargs)).fetchall()\n if recs:\n return self.rows_to_data_row(recs)\n return None", "title": "" }, { "docid": "30fe35265bb0182d88cea8793e796cc2", "score": "0.6419892", "text": "def __getRow(self, row):\n\n\t\tself._notifyAccess()\n\n\t\t# test if row is outside range\n\t\tif row < 0 or row >= self.__total:\n\t\t\treturn None\n\n\t\t# check if it exists in a list of already made objects\n\t\tif row > 0 and row < len(self.__objects):\n\t\t\treturn self.__objects[row]\n\n\t\t# if not empty create and return the object made from that data\n\t\tif self.__raw[row] is not None:\n\t\t\tself.__objects.insert(row, self.__mapper.createObject(self.__raw[row]))\n\t\t\treturn self.__objects[row]\n\t\telse:\n\t\t\treturn None", "title": "" }, { "docid": "5e6fa9b6759380c96178dfb4e82c421d", "score": "0.6411359", "text": "def retrieve(self):\n conn = sqlite3.connect(\"health_records.db\") # Define Database\n cursor = conn.cursor()\n sql = \"SELECT * FROM h_records WHERE id=2;\"\n cursor.execute(sql)\n self.results = cursor.fetchone()\n return self.results", "title": "" }, { "docid": "9eb6c03da596f463814fe8bab935d244", "score": "0.63964915", "text": "def SelectRow(self, row):", "title": "" }, { "docid": "70677baa29df89235a5cbb3c4cf520b8", "score": "0.6373136", "text": "def get_row(self, index):\n row = self._table.getRow(index)\n return dict(row.getData()) if row else None", "title": "" }, { "docid": "a87b8e6a0a24b9bf39e97fc0de5b3f8b", "score": "0.63695586", "text": "def select_raw(self,sql,params=''):\n return self.rows_to_data_row(self.db.execute(sql,params).fetchall())", "title": "" }, { "docid": "0fffeb3baaccdbdeafca7f5c96ea913f", "score": "0.6367452", "text": "def _single_row(self,rows): \n if rows:\n if len(rows) > 0:\n return rows[0]\n return None", "title": "" }, { "docid": "64fefc4fac0c0556f5a7c41c71cdea44", "score": "0.63611126", "text": "def getRow(table, id):\n q = \"SELECT * FROM \" + cleanTable(table) + \" WHERE id = ?\"\n return query_db(q, [id,], True)", "title": "" }, { "docid": "65c6774a0adb25a76a358b663f25fbd3", "score": "0.6347156", "text": "def row(self):\n return self._row", "title": "" }, { "docid": "3b2e1fc9ab320fc451056476604e458c", "score": "0.6343751", "text": "def get(self):\n\n return rows([\"world\"])", "title": "" }, { "docid": "5ddac130e38fd53d66d82e05161e2da5", "score": "0.6340951", "text": "def fetch(self, table_name, primary_id):\n return r.db(self._name).table(table_name)\\\n .get(primary_id).run(self._conn)", "title": "" }, { "docid": "a5e89abc3c12a39d21680a33c73db925", "score": "0.6340839", "text": "def get(self, query, *parameters):\r\n rows = self.query(query, *parameters)\r\n if not rows:\r\n return None\r\n elif len(rows) > 1:\r\n raise Exception(\"Multiple rows returned for Database.get() query\")\r\n else:\r\n return rows[0]", "title": "" }, { "docid": "c8047259045a684ed7da384540458f45", "score": "0.63341814", "text": "def GetValueByRow(self, row, col):", "title": "" }, { "docid": "c8047259045a684ed7da384540458f45", "score": "0.63341814", "text": "def GetValueByRow(self, row, col):", "title": "" }, { "docid": "ba61c33765e27195bd61d5d9b4247c4e", "score": "0.63171655", "text": "def get_field_value(cls, db, META, id, field):\n try:\n db.open()\n db.curs.execute(\"SELECT {0} from {1} where {2}={3}\".format(field, META[\"table\"], META[\"id\"], id))\n row = db.curs.fetchone()\n finally:\n db.close()\n return row", "title": "" }, { "docid": "cf2b2fd3fd19c2aeac029fabb14b1658", "score": "0.63092864", "text": "def get(self,id,**kwargs):\n return self._single_row(self.select(where='{}.id = {}'.format(self.table_name,cleanRecordID(id),)))", "title": "" }, { "docid": "1497e15ed184788183f04c8b756a526a", "score": "0.6307543", "text": "def select_row(self, sql, params=()):\n if sql == \"\":\n raise Exception(\"select_row: SQL cannot be empty.\")\n return None\n \n try:\n c = self.conn.cursor()\n c.execute(sql, params)\n result = c.fetchone()\n c.close()\n except Exception as e:\n raise Exception(e)\n\n if result:\n return result\n else:\n return None", "title": "" }, { "docid": "79b57b11553edff1b8c7ee52b8abeecd", "score": "0.6301499", "text": "def get(self, cid):\n db = sqlite3.connect(self._dbfile)\n curs = db.cursor()\n sql = (\n \"SELECT id, post, user, content, datetime_int \"\n \"FROM %s \"\n % (self._table)\n )\n try:\n sql += \"WHERE id is 2;\"\n curs.execute(sql)\n db.commit()\n res = curs.fetchall()\n except sqlite3.Error as err:\n print(err)\n return False\n finally:\n db.close()\n return res", "title": "" }, { "docid": "95819ca07b04a81eec6cc0caa94a614d", "score": "0.62878305", "text": "def get_first_row(com, sql,*params):\n db = com.env.get_db_cnx()\n cur = db.cursor()\n data = None;\n try:\n cur.execute(sql, params)\n data = cur.fetchone();\n db.commit();\n except Exception, e:\n com.log.error('There was a problem executing sql:%s \\n \\\n with parameters:%s\\nException:%s'%(sql, params, e));\n db.rollback()\n try:\n db.close()\n except:\n pass\n return data;", "title": "" }, { "docid": "d82c30d107ba44094dbcade761dc4384", "score": "0.62871814", "text": "def row(self):\r\n return self._row", "title": "" }, { "docid": "7ba7ae918fd67cc33fa6056cbf5f0f32", "score": "0.6282097", "text": "def GetSelectedRow(self):", "title": "" }, { "docid": "0ce76a7ca4a65e2e046f1d1cb397fbb6", "score": "0.6240871", "text": "def value(self, row):\n raise NotImplementedError", "title": "" }, { "docid": "6c4ec4d175670092fa327f31bd6c1944", "score": "0.6228841", "text": "def row(self, id_):\n try:\n row = self.data.loc[id_]\n except KeyError:\n msg = u\"{} is missing under the domain '{}'\".format(id_, self.domain_name)\n raise KeyError(msg)\n\n if isinstance(row, pd.DataFrame):\n if len(row) > 1:\n raise Exception(u\"Multiple rows with id '{}' in this domain.\"\\\n .format(id_))\n\n elif isinstance(row, pd.Series):\n return row.to_dict()\n\n else:\n raise Exception(\"Unexpected error. Please debug.\")", "title": "" }, { "docid": "6a90cc3927f9b2b8905361da67c33c19", "score": "0.6211474", "text": "def row(self):\n return self._row", "title": "" }, { "docid": "6a90cc3927f9b2b8905361da67c33c19", "score": "0.6211474", "text": "def row(self):\n return self._row", "title": "" }, { "docid": "23bf7d67079b461e35e85e13d14caf53", "score": "0.62079775", "text": "def db_get(user_id,column):\n c.execute(\"SELECT {} FROM game WHERE id=?\".format(column),(user_id,))\n value = c.fetchone()\n if value == None:\n return None\n return value[0]", "title": "" }, { "docid": "55b69b3c0aefc19b0aa80fcab01166ad", "score": "0.6198264", "text": "def query(self,sql):\n # import pdb;pdb.set_trace()\n out = None\n \n data = self.db.execute(sql).fetchall()\n if data != None and len(data) > 0:\n out = self.rows_to_data_row(data)\n return out", "title": "" }, { "docid": "7ad356e91a036fda995d49b9ba9424d7", "score": "0.6189273", "text": "def process(self, row):", "title": "" }, { "docid": "a83c09aa1009ebb78078a3750f715326", "score": "0.6153561", "text": "def select_one(self,**kwargs):\n rows = self.rows_to_data_row(\n [self.db.execute(\n self._select_sql(**kwargs)\n ).fetchone()]\n )\n return self._single_row(rows)", "title": "" }, { "docid": "97dd92ec6f4a8456cffc93a7d38e035f", "score": "0.6124418", "text": "def getValue(table, column, id):\n row = getRow(table, id)\n return row[column] if row is not None else None", "title": "" }, { "docid": "bdbc5f4958a59cd73fa4f91d4db1e82f", "score": "0.6122619", "text": "def __getitem__(self, item: int):\n\n return self.rows[item]", "title": "" }, { "docid": "bdbc5f4958a59cd73fa4f91d4db1e82f", "score": "0.6122619", "text": "def __getitem__(self, item: int):\n\n return self.rows[item]", "title": "" }, { "docid": "6874e75eb29619565348459c782b415c", "score": "0.6116439", "text": "async def fetchrow(\n self,\n query: str,\n *args,\n json: bool = False,\n params: Optional[Dict[str, Any]] = None,\n query_id: str = None,\n decode: bool = True,\n ) -> Optional[Record]:\n async for row in self._execute(\n query,\n *args,\n json=json,\n query_params=params,\n query_id=query_id,\n decode=decode,\n ):\n return row\n return None", "title": "" }, { "docid": "5db4612174ac25a9fed0c88c54624c63", "score": "0.6112494", "text": "def read(table, id_):\n for record in table:\n if record[ID] == id_:\n return record", "title": "" }, { "docid": "d4cf9b6d3b3e2997321f8b9db585849b", "score": "0.6111613", "text": "def fetchone(self):\n self._check_open()\n try:\n return self._fetch_row()\n except AttributeError:\n raise ProgrammingError, \"no query executed yet\"", "title": "" }, { "docid": "62b42dc0a719716479f9260d141e1903", "score": "0.60950094", "text": "def get_data(self, sql):\n try:\n self.open_connection()\n self._cursor_.execute(sql)\n result = self._cursor_.fetchall()\n self._conn_.commit()\n self._cursor_.close()\n self._conn_.close()\n return result\n except MySQLdb.Error, message:\n print \"<br>SQL query was not successful: <i> %s </i><br>Error %s\" % (sql, message)\n return 0", "title": "" }, { "docid": "ad6bcc09953061ed27f742d652c354ef", "score": "0.6077956", "text": "def getItem(self, row, col):\n return self.rows.get(row, {}).get(col, None)", "title": "" }, { "docid": "9f55083bc78871fe7f1cc0ad7621f5a1", "score": "0.60753596", "text": "def get_row_by_id(cls, _id):\n db_type = Config.get_db_type()\n if db_type == 0:\n pass\n elif db_type == 1:\n return cls.sqlite_get_row_by_id(_id)\n elif db_type == 2:\n return cls.postgres_get_row_by_id(_id)", "title": "" }, { "docid": "9f55083bc78871fe7f1cc0ad7621f5a1", "score": "0.60753596", "text": "def get_row_by_id(cls, _id):\n db_type = Config.get_db_type()\n if db_type == 0:\n pass\n elif db_type == 1:\n return cls.sqlite_get_row_by_id(_id)\n elif db_type == 2:\n return cls.postgres_get_row_by_id(_id)", "title": "" }, { "docid": "00dc74a156ee9c3221dac8523992c89b", "score": "0.60692984", "text": "def fetchone(self) -> Optional[sql.Row]:\n return self.cursor.fetchone()", "title": "" }, { "docid": "e0247d9567a989f82ab876c85d9c2cf6", "score": "0.6063761", "text": "def __getitem__(self, key):\n\t\trow = self.__getRow(key)\n\n\t\tif row is not None:\n\t\t\treturn row\n\t\telse:\n\t\t\traise IndexError", "title": "" }, { "docid": "e63b69c1e2de5ea2fa6db2e1e7ab17fe", "score": "0.6060502", "text": "def select_one_raw(self,sql,params=''):\n return self._single_row(self.select_raw(sql,params))", "title": "" }, { "docid": "5c48b32aca3d80cd55b4f8efcc730280", "score": "0.60408175", "text": "def retrieveData(self, index=0):\r\n\r\n pass", "title": "" }, { "docid": "c4c0adcb14efca32bcc0164cc2a980ff", "score": "0.603671", "text": "def __getitem__(self, row: int):\r\n return self._array[row]", "title": "" }, { "docid": "f8667802ff757b80b027cbab1340ee8c", "score": "0.6031457", "text": "def database_get_records(self, authorName):\n query = self.db_query % (authorName)\n sql = text(query) # SQL Query\n rows = self.engine.execute(sql) # Get Rows\n rows = [dict(row) for row in rows] # Render as dict\n return rows[0]", "title": "" }, { "docid": "bcbc2cff1110b340982e93625fdafaa0", "score": "0.602574", "text": "def get_one(self, sql, param=()):\n try:\n cursor, conn = self.execute(sql, param)\n res = cursor.fetchone()\n return res\n except Exception as e:\n self.logger.debug(sql)\n raise e\n finally:\n self.close(cursor, conn)", "title": "" }, { "docid": "dcad24b35f3b367387a61233e9015be8", "score": "0.60247964", "text": "def fetch_single_data(self, query):\n try:\n DatabaseConnect.cursor.execute(query)\n response = DatabaseConnect.cursor.fetchone()\n return response\n except Exception as error:\n print(error)\n return False", "title": "" }, { "docid": "e1f9af44cc5d155e13424e24a8ec7ae7", "score": "0.60112745", "text": "def fetchone(self, sql, *params):\n vs = self.fetchall(sql, *params)\n return vs[0] if vs else None", "title": "" }, { "docid": "338affe12382b279e5f597777314d647", "score": "0.60077196", "text": "def get(self, query, parameters = None):\n\t\trows = self.query(query, parameters)\n\t\tif not rows:\n\t\t\treturn None\n\t\telif len(rows) > 1:\n\t\t\traise Exception('Multiple rows returned for Database.get() query')\n\t\telse:\n\t\t\treturn rows[0]", "title": "" }, { "docid": "e3a6612d24c76c2c254cfb1e6c41c7b3", "score": "0.60032344", "text": "def get_table_row(self, key_field, key_value):\n\n for row in self.table:\n if row[key_field] == key_value:\n return row\n return None", "title": "" }, { "docid": "89c9a453c4072dea59ed3b2811db4b91", "score": "0.60026413", "text": "def get_row(self, external_id):\n response = solr.query(settings.SOLR_DATA_CORE, 'dataset_slug:%s AND external_id:%s' % (self.slug, external_id), limit=1)\n\n if len(response['response']['docs']) < 1:\n return None\n\n return response['response']['docs'][0]", "title": "" }, { "docid": "7501d86de9262eee05dd98469971b618", "score": "0.59805685", "text": "def readRow(self, **kwargs ):\n kwargsLength = len(kwargs)\n if ( kwargsLength > 1 ):\n returnColumns = \",\".join(kwargs.get(\"returnColumns\", \"*\"))\n query = f'SELECT {returnColumns} FROM {self._tablename} WHERE {kwargs[\"whereColumn\"]}=?'\n parameters = (kwargs[\"equals\"],)\n for row in self._cursor.execute(query, parameters):\n yield row\n else:\n query = f'SELECT * FROM {self._tablename}'\n for row in self._cursor.execute(query):\n yield row", "title": "" }, { "docid": "b2cf63e023f64e70aa794de5745276af", "score": "0.597741", "text": "def select_from_test_table(self):\n\n cursor = self.conn.cursor()\n\n sql = \"SELECT test_id, test_data FROM test_table\"\n\n cursor.execute(sql)\n\n row = cursor.fetchall()\n return row", "title": "" }, { "docid": "7219e9e2b2fab61f8be12d3e391341cc", "score": "0.59676945", "text": "def getItem(self, col, row):\n\t\treturn self.matrix[col][row]", "title": "" }, { "docid": "b108821b6010079e476e705cf1114a3f", "score": "0.59603363", "text": "def check_row(self):\n\n row0 = self.row0()\n print(\"row0 : \", row0)\n row1 = self.row1()\n print(\"row1 : \", row1)\n row2 = self.row2()\n print(\"row2 : \", row2)\n if row0 is True:\n return row0[1]\n elif row1 is True:\n return row1[1]\n elif row2 is True:\n return row2[1]\n else:\n return None", "title": "" }, { "docid": "c58d244525204aef2d67173142ef1119", "score": "0.59534657", "text": "def GetColumn(self):", "title": "" }, { "docid": "c58d244525204aef2d67173142ef1119", "score": "0.59534657", "text": "def GetColumn(self):", "title": "" }, { "docid": "3340da634f6227bdda6b60f19dcbeeb9", "score": "0.5936199", "text": "def query_one(self, sql, params=(), error=b''):\n # Used as a KV store\n cursor = self.db.execute(sql, params)\n row = cursor.fetchone()\n if row is None: # zero rows\n raise ValueError(error)\n return row", "title": "" }, { "docid": "03e9b67c08c8663ba0515a14ce4c96a8", "score": "0.5929703", "text": "def get_user(row):\n user = row[0]\n id = row[2]\n return user, id", "title": "" }, { "docid": "2bda8e313a091fc64364f0f5299b853a", "score": "0.5927328", "text": "def read(self,id):\n connection = sqlite3.connect(SQLITE_DB)\n q = web.input()\n try:\n format = q.format\n except:\n format = 'html'\n if format=='json':\n connection.row_factory = dict_factory\n cursor = connection.cursor()\n rows = cursor.execute(\"SELECT * FROM Game WHERE rowid=?\", (id,))\n if format=='json':\n return cursor.fetchall()\n else:\n for row in rows:\n aGame = row\n return aGame", "title": "" }, { "docid": "7ab40876d353592f07acfec32f4e6894", "score": "0.59164697", "text": "def get_row(self) -> int:\n return self._row", "title": "" }, { "docid": "ba2fda4cc1bc97ebe6a2441ffbd7e66a", "score": "0.5910969", "text": "def get_row(self, row_number):\n return self.board[row_number]", "title": "" }, { "docid": "914d2e4c046df6056ae78bc681fa400c", "score": "0.59079474", "text": "def get_row(data_frame, row):\n return (data_frame.iloc[[row]]).values[0][0]", "title": "" }, { "docid": "e3810569cff24c90d9b1378031390e7e", "score": "0.5884903", "text": "def get_by_id(self, area_id):\n sql = \"SELECT * FROM \"+self._tablename_+\" WHERE id=:area_id;\"\n data = engine.execute(text(sql),area_id=area_id)\n #print(proxy2list(data))\n return proxy2list(data)[0]", "title": "" }, { "docid": "a9493614736bf38efd4e3c91e55e3315", "score": "0.5874293", "text": "def getItem(self, row, col):\n return self.item(row+1, col)", "title": "" }, { "docid": "a6bb5929c48e1e72f89fb34b13880e70", "score": "0.5867006", "text": "def __getitem__(self, item):\n return self.table[item]", "title": "" }, { "docid": "54603ab1f9dabfb10091cd76df4661d7", "score": "0.5866964", "text": "def get_user_by_row(row):\n\n # rs.search_id, rs.user_id, u.age, u.sex, u.interests, u.books, u.music,\n # rs.cnt_common_interests, rs.cnt_common_books, rs.cnt_common_music, rs.cnt_common_friends, rs.cnt_common_groups\n user_info = vk_user.get_info_dict(row[1], row[2], row[3], row[4], row[5], row[6], row[7], row[8], row[9], row[10],\n row[11], row[12], row[13])\n return user_info", "title": "" }, { "docid": "dafcb13fc179a77a103578708d4743e0", "score": "0.58656484", "text": "def get_row_by_username(cls, username):\n db_type = Config.get_db_type()\n if db_type == 0:\n pass\n elif db_type == 1:\n return cls.sqlite_get_row_by_username(username)\n elif db_type == 2:\n return cls.postgres_get_row_by_username(username)", "title": "" }, { "docid": "9d14c2e3d160172c35aee4f0d3a7930a", "score": "0.5864134", "text": "def returnData(self):\n try:\n data = []\n for row in self.db.execute('SELECT * FROM DATA'):\n data.append(list(row))\n return data\n except sqlite3.Error as e:\n print(e)", "title": "" }, { "docid": "7e3a12883f977e945fda538cd54dc578", "score": "0.5850886", "text": "def gets(self, table_name, row_id):\n key = '%s-%d' % (table_name, row_id)\n if key not in self.values:\n self.stats['cache_miss'] += 1\n return None, None, None\n self.stats['cache_hit'] += 1\n entry = self.values[key]\n return entry['version'], entry['token'], entry['value']", "title": "" }, { "docid": "e1f87e26478968b0a3c118b7b8457cb1", "score": "0.58383095", "text": "def findOne(self):\n return self.cur.fetchone()", "title": "" }, { "docid": "dc6980240a67310e388e14654c5ae1e0", "score": "0.58253485", "text": "def select_row_dict(self, sql, params=()):\n if sql == \"\":\n raise Exception(\"select_row: SQL cannot be empty.\")\n return None\n \n try:\n c = self.conn.cursor(pymysql.cursors.DictCursor)\n c.execute(sql, params)\n result = c.fetchone()\n c.close()\n except Exception as e:\n raise Exception(e)\n\n if result:\n return result\n else:\n return None", "title": "" }, { "docid": "49487c437884d952d2a1824bf663821a", "score": "0.58235", "text": "def fetch_thing(self, name):\n\n thing = (\n db.DBSession.query(self.table)\n .filter(sqla.func.lower(self.table.name) == name.lower())\n .one()\n )\n\n return (thing, thing.name)", "title": "" }, { "docid": "aab90ed0b9a42128996b9db481eb9b4f", "score": "0.5817548", "text": "def get(request, pk):\n\t\ttable_model = Table.objects.get(id=pk)\n\t\tsale_table = sqlTable(table_model.name, meta, autoload=True)\n\t\tselect_statement = sale_table.select()\n\t\tresult = connection.execute(select_statement)\n\t\tto_be_sent_result = [dict(_) for _ in list(result)]\n\t\t# return HttpResponse(json.dumps(to_be_sent_result), content_type='application/json')\n\t\treturn JsonResponse(to_be_sent_result, safe=False)", "title": "" }, { "docid": "2257cb88b66b47bee8b1fde8d1973fe8", "score": "0.5812104", "text": "def get_data_by_id(row_id):\n\n\tcon = sqlite3.connect(str(db_file))\n\tstatement = f'SELECT * FROM from_nano WHERE rowid = \"{row_id}\";'\n\tdf = pd.read_sql_query(statement, con)\n\treturn df", "title": "" }, { "docid": "d0b372002073361781ae76356b77b22e", "score": "0.5807263", "text": "def get_data_by_specify_row_col(self, row=0, col=0):\r\n all_data = self.get_all_excel_data_by_row()\r\n return all_data[row][col]", "title": "" }, { "docid": "60f35555c00a0bb3ff63e82baeec0421", "score": "0.58062273", "text": "def get(self, query, *parameters):\n rows = self._query(query, parameters)\n if not rows:\n return None\n elif not isinstance(rows, list):\n raise MySQLError(\"Query is not a select query\")\n elif len(rows) > 1:\n raise MySQLError(\"Multiple rows returned for Database.get() query\")\n else:\n return rows[0]", "title": "" }, { "docid": "6ebbaa7deed7752934ccb27650f67e34", "score": "0.5805839", "text": "def __db_fetch_single_value(query):\n cursor = connection.cursor()\n cursor.execute(query)\n fetch_val = cursor.fetchone()\n cursor.close()\n return fetch_val[0]", "title": "" } ]
ca76e70edbf7c76a72017aa3721eb59e
Verify email reset token. Loads the user id and the requested email and simultaneously checks token age. If not too old, get user with id and set email.
[ { "docid": "5e6d7a14706f367fa7bd5d591c364ec5", "score": "0.67778265", "text": "def verify_token(token):\n ts = URLSafeTimedSerializer(flask.current_app.config[\"SECRET_KEY\"])\n\n try:\n user_id, email = ts.loads(token, salt='verify-email', max_age=900)\n except SignatureExpired:\n flask.flash(\"Länken har gått ut, var vänlig försök igen.\", 'error')\n return flask.redirect(flask.url_for('auth.login'))\n except: # noqa: E722 (ignore 'bare except' warning)\n flask.abort(404)\n\n user = models.User.query.get_or_404(user_id)\n user.email = email\n models.db.session.commit()\n\n flask.flash(\"{} är nu verifierad!\".format(email), 'success')\n return flask.redirect(flask.url_for('auth.login'))", "title": "" } ]
[ { "docid": "1426832ab2106935047455b4d985ed59", "score": "0.7034363", "text": "def verify_reset_token(token):\r\n s = Serializer(app.config['SECRET_KEY'])\r\n try:\r\n user_id = s.loads(token)['user_id']\r\n except:\r\n return None\r\n return User.query.get(user_id)", "title": "" }, { "docid": "45226c9c9c7f0180ab79a9949354a79a", "score": "0.6752095", "text": "def verify_reset_password_token(token):\n s = Serializer(current_app.config['SECRET_KEY'])\n try:\n data = s.loads(token)\n except (BadSignature, SignatureExpired):\n return False\n id = data.get('reset')\n return User.query.get(id)", "title": "" }, { "docid": "9f38e0329b686ef5a11be00e0a9e60fd", "score": "0.6750285", "text": "def test_email_verification_token(self):\n user = add_user()\n token = user.encode_email_token().decode()\n user = set_user_email_token_hash(user, token)\n\n with self.client:\n response = self.client.get(\n f'{self.url}{token}',\n content_type='application/json',\n headers=[('Accept', 'application/json')]\n )\n data = json.loads(response.data.decode())\n self.assertEqual(response.status_code, 200)\n self.assertEqual(data['message'], 'email verified')\n self.assertIsNotNone(user.email_validation_date)", "title": "" }, { "docid": "c89f351b493894b3e94e35dbdb06495b", "score": "0.6555734", "text": "def auth_verify_email(user_id, email_token):\n user = User.get_by_id(int(user_id))\n if user.is_email_verified:\n responseObject = {\n 'status': 'fail',\n 'message': 'Email already verified'\n }\n return make_response(jsonify(responseObject)), 202\n \n email_auth_data = Database.find_one(collection='email_token', query={'user_id': int(user_id)})\n if email_auth_data['email_token'] == email_token:\n Database.update_one(collection=\"users\", query=[{'user_id': int(user_id)}, {\"$set\": { \"is_email_verified\": True }} ])\n responseObject = {\n 'status': 'success',\n 'message': 'Email verified'\n }\n return make_response(jsonify(responseObject)), 201", "title": "" }, { "docid": "509eaf3dfbabe318ef0bbc635ba0dedb", "score": "0.65329397", "text": "def auth_passwordreset_request(email):\n\n u_id, secret_code = generate_reset_code(email)\n # store the reset_code (secret_code) into the users database.\n\n users[u_id]['reset_code'] = secret_code\n\n send_email(email, secret_code)\n return {}", "title": "" }, { "docid": "3e829ecda064748f899ab559aac0b3f1", "score": "0.64959383", "text": "def verify_email(token):\n user = User.validate_token(token)\n\n # If the token is invalid, redirect to 'users.send_verify_request'\n # so that the user may attempt to send the verification URL again.\n if not user:\n flash('That token is invalid.', 'bad')\n return redirect(url_for('users.send_verify_request'))\n\n # If a user is already validated, redirect to 'users.email_login'\n if user.validated:\n flash(\n 'That email address has already been verified!', 'neutral')\n return redirect(url_for('users.email_login'))\n\n # If a different user is logged in at the time the email is\n # verified, log out that user\n if current_user.is_authenticated:\n logout()\n\n user.validated = True\n db.session.add(user)\n db.session.commit()\n flash(\n 'Your email address has been verified! You may now log in.',\n 'good')\n\n # Redirect to 'users.email_login' so that the user may log in after\n # verification\n return redirect(url_for('users.email_login'))", "title": "" }, { "docid": "a5238094f2e9e876f2a5135bbab79633", "score": "0.6388387", "text": "def verify_reset_token(self, token):\n\n expired, invalid, data = self._verify_token(token)\n if data and data.get('id') == self.id and data.get('op') == 'reset':\n data = True\n else:\n data = False\n return expired, invalid, data", "title": "" }, { "docid": "114999cebb58b9d724b3bf46a733f30a", "score": "0.6351135", "text": "def reset_with_token(token):\n email = confirm_token(token)\n\n if not email:\n return json.dumps({\n 'message': 'Link has been expired'\n }), 400\n\n data = request.get_json()\n schema = UserSchema.reg_pass\n password = data['password']\n\n if not re.match(schema, password):\n return json.dumps({\n 'message': 'Password in invalid'\n }), 400\n\n password = generate_password_hash(data['password'])\n\n if password:\n user = User.query.filter(User.email == email).first()\n if user:\n user.password = password\n db.session.add(user)# pylint: disable=E1101\n db.session.commit()# pylint: disable=E1101\n return json.dumps({\n 'token': token\n }), 200\n\n return json.dumps({\n 'message': 'user doesnt exist'\n }), 404", "title": "" }, { "docid": "3ed5e6b35873b6ab80413fbd78fabf02", "score": "0.6223072", "text": "def test_old_token(self):\n user = get_user_model().objects.filter(email=self.user_1_data[\"email\"]).first()\n user.passwordResetToken = PasswordResetTokenGeneratorHandler.handle()\n user.passwordResetTokenExpiresAt = DateService.yesterday()\n user.save()\n self.assertTrue(user.is_password_reset_token_expired())\n\n password = faker.pystr_format()\n\n payload_user = {\"password\": password, \"password_confirm\": password}\n\n response = self.client.post(\n reverse(\"password_reset_token\", args=[user.passwordResetToken]),\n payload_user,\n )\n self.assertEqual(204, response.status_code)\n\n payload = {\"email\": self.user_1_data[\"email\"], \"password\": password}\n response = self.client.post(self.login_url, payload)\n self.assertEqual(401, response.status_code)", "title": "" }, { "docid": "1b12928c9fd18db75973b7921e076a46", "score": "0.6138936", "text": "def verify_email_view(user_id: str, email_id: str):\n try:\n verify_email(user_id, email_id, request.form.get('token'))\n return make_response('', requests.codes.ok)\n except UnknownUser as e:\n return log_error_and_make_response(e, e.msg, requests.codes.forbidden)\n except UnknownEmail as e:\n return log_error_and_make_response(e, e.msg, requests.codes.not_found)\n except UnknownToken as e:\n return log_error_and_make_response(e, e.msg, requests.codes.bad_request)", "title": "" }, { "docid": "ac8cdd97049b5ec482f47431f352ce57", "score": "0.61270976", "text": "def request_password_reset(self,email):\n user = self.find_user_by_email(email)\n\n if user is None:\n raise serializers.ValidationError(\"User with that email does not exist.\")\n\n self.receiver_email = user.email\n\n #generate user token to be used in password reset link\n token = jwt.encode({'email': user.email,\n 'exp': datetime.datetime.utcnow() + datetime.timedelta(minutes=30)\n },\n settings.SECRET_KEY\n ,algorithm='HS256')\n\n #render password reset email from template\n context = {\n 'username' : user.username,\n 'action_url' : self.account_recovery_endpoint+token.decode('ascii')\n }\n rendered_string = render_to_string('password_reset_email.txt', context)\n\n\n #send password reset email to user\n return (self.send(rendered_string),token.decode('ascii'))", "title": "" }, { "docid": "81bfe47e80e934c27c8fcf4f74be3749", "score": "0.6087834", "text": "def test_verify_expired_token(self):\n token = generate_confirmation_token('dummy@email.com')\n time.sleep(1)\n with self.assertRaises(URLTokenExpired):\n email = confirm_token(token, 0)\n self.assertEqual(email, False)", "title": "" }, { "docid": "a8bf954556d2131a4140da7a918c9f6e", "score": "0.6059575", "text": "def request_reset_user(email: str) -> str:\n if get_username(email):\n reset_token = generate_token()\n userAdapter.set_user_reset_token(email, reset_token)\n username = userAdapter.get_user_username(email)\n link = 'https://api.tracemap.info/auth/reset_password/%s/%s' % (email, reset_token)\n if mailService.send_reset_mail(username, email, link):\n return \"A reset email was successfully sent to you.\"\n else:\n return \"Something went wrong. Please try again later.\"\n else:\n return \"This email is not registered.\"", "title": "" }, { "docid": "79a620e6d49c9d901cc74d9af9f15b7a", "score": "0.6027039", "text": "def reset_with_token(token):\n timed_serializer = URLSafeTimedSerializer(current_app.config['SECRET_KEY'])\n try:\n email = timed_serializer.loads(token, salt='recovery-token', max_age=3600)\n except BadSignature:\n abort(404)\n form = PasswordForm()\n if form.validate_on_submit():\n user = User.select_by_email(email=email)\n user.password = form.password.data\n flash(const.RESET_PASSWORD_SUCCESS, 'success')\n return redirect(url_for('user.login'))\n return render_template('user/password.html', form=form)", "title": "" }, { "docid": "d5c313c57cd93c837e2c9b367a9c9a31", "score": "0.6023899", "text": "def get(self, request, *args, **kwargs):\n try:\n # get token and username\n uid = force_text(urlsafe_base64_decode(kwargs[\"uidb64\"]))\n token = kwargs[\"token\"]\n\n # check token and user existence\n\n try:\n user_to_verify = DbExistenceChecker().check_return_user_existence(\n pk=uid\n )\n except (TypeError, ValueError, OverflowError, CustomUser.DoesNotExist):\n raise NoneExistenceError(\n \"User\",\n create_400(\n 404,\n \"Non existence\",\n \"User associated to token does not exist\",\n \"token\",\n ),\n )\n\n try:\n email_token = DbExistenceChecker().check_return_token_existence(\n token=token\n )\n except (TypeError, ValueError, OverflowError, EmailToken.DoesNotExist):\n raise NoneExistenceError(\n \"Email Token\",\n create_400(400, \"Non existence\", \"Token does not exist\", \"token\"),\n )\n\n # check expired\n if email_token.check_expired:\n raise ExpiredError(\n \"Email Token\",\n create_400(400, \"Expired\", \"Email Token already expired!\", \"token\"),\n )\n\n # check token user and uid match\n if email_token.user.id != user_to_verify.id:\n raise UnmatchedFieldsError(\n \"User\",\n create_400(\n 400, \"Not matching\", \"Url user and Token user don't match!\"\n ),\n )\n\n user_to_verify.email_verified = True\n user_to_verify.save()\n\n if request.accepted_renderer.format == \"json\":\n\n return Response(\n create_200(\n 200,\n \"Email verified\",\n \"Email has been verified. Now you can login to your account\",\n )\n )\n else:\n\n return Response(\n {\"user\": user_to_verify},\n template_name=\"users/email_verified.html\",\n )\n\n except (NoneExistenceError, ExpiredError, UnmatchedFieldsError) as error:\n return Response(error.message, status=status.HTTP_400_BAD_REQUEST)\n\n except Exception as error:\n return Response(\n create_500(\n cause=error.args[0] or None,\n verbose=f\"Could not verify due to an unknown error.\",\n ),\n status=status.HTTP_500_INTERNAL_SERVER_ERROR,\n )", "title": "" }, { "docid": "ed33b6cd28a95324bada3edafe0246e4", "score": "0.6013224", "text": "def reset_with_token(token):\n try:\n email = ts.loads(token, salt=\"recover-key\", max_age=86400)\n except:\n abort(404)\n form = PasswordForm()\n if form.validate_on_submit():\n user = User.get(email)\n password = form.password.data\n user.change_password(user.set_password(password))\n login_user(user)\n flash('Password changed successfully!')\n return redirect(url_for('index', _external=True, _scheme='https'))\n return render_template('reset_with_token.html', form=form, token=token)", "title": "" }, { "docid": "c3bddd1cff016f64e94bd02522150eca", "score": "0.59992105", "text": "def new_email(token):\n user = User.validate_token(token)\n if not user:\n flash('That token is invalid.', 'bad')\n else:\n\n # Prevent users from setting 'email' to 'None', since\n # 'temp_email' becomes 'None' after a successful login\n if not user.temp_email:\n flash('Your new email address could not be verified. '\n 'Please try again.', 'bad')\n return redirect(url_for('users.settings'))\n\n user.email = user.temp_email\n user.temp_email = None\n db.session.add(user)\n db.session.commit()\n flash('Your new email address has been verified!', 'good')\n return redirect(url_for('users.email_login'))", "title": "" }, { "docid": "ec3217e5ab8aa8484fb32b4182051008", "score": "0.598913", "text": "def test_check_auth_token_expired(self):\n now = timezone.now()\n anhourago = now - datetime.timedelta(hours=1)\n\n token = 'RZ4o9v5ASVOrEr5Z4Yyi'\n accesstoken = AccessToken(user=self.data['user2'], token=token, application=self.data['application1'],\n expires=anhourago)\n accesstoken.save()\n\n self.send_and_compare_request('checkAuthToken', [], token, False)", "title": "" }, { "docid": "9b101e8213b1699fd5d31ccdacccc2a4", "score": "0.598026", "text": "def post(self, request, *args, **kwargs):\n try:\n # get token , email, password\n email = request.data.get(\"email\", False)\n token = request.data.get(\"token\", False)\n password = request.data.get(\"password\", False)\n\n if email and token and password:\n\n try:\n user_to_reset = DbExistenceChecker().check_return_user_existence(\n email=email\n )\n except (TypeError, ValueError, OverflowError, CustomUser.DoesNotExist):\n raise NoneExistenceError(\n \"User\",\n create_400(\n 400,\n \"Non existence\",\n \"User associated to token does not exist\",\n ),\n )\n try:\n email_token = DbExistenceChecker().check_return_token_existence(\n token=token\n )\n except (TypeError, ValueError, OverflowError, EmailToken.DoesNotExist):\n raise NoneExistenceError(\n \"Email Token\",\n create_400(\n 400,\n \"Non existence\",\n \"Token does not exist\",\n ),\n )\n\n # check expired\n if email_token.check_expired:\n raise ExpiredError(\n \"Email Token\",\n create_400(\n 400, \"Expired\", \"Email Token already expired or used!\"\n ),\n )\n\n # check token user and uid match\n if email_token.user.id != user_to_reset.id:\n raise UnmatchedFieldsError(\n \"User\",\n create_400(\n 400, \"Not matching\", \"Url user and Token user don't match!\"\n ),\n )\n\n user_to_reset.set_password(password)\n user_to_reset.save()\n\n email_token.expired = True\n email_token.save()\n\n return Response(\n create_200(\n 200,\n \"Password Resetted!\",\n \"Password has been resetted. Now you can login to your account\",\n )\n )\n else:\n raise MissingFieldsError(\n instance=request,\n message=create_400(\n status.HTTP_400_BAD_REQUEST,\n \"Missing fields\",\n \"Either of the required fileds: email, password, and/or token missing\",\n ),\n )\n except (MissingFieldsError, NoneExistenceError, ExpiredError) as error:\n return Response(error.message, status=status.HTTP_400_BAD_REQUEST)\n\n except Exception as error:\n return Response(\n create_500(\n verbose=f\"Could not reset password due to an unknown error.\",\n cause=error.args[0] or None,\n ),\n status=status.HTTP_500_INTERNAL_SERVER_ERROR,\n )", "title": "" }, { "docid": "b4676577dff357203e41eaee05a3a303", "score": "0.591417", "text": "def checkEmail():\n email = request.args.get('email')\n existing_match = db.session.query(User)\\\n .filter(User.email == email).all()\n\n if existing_match:\n msg = ('Email already in use. ' +\n 'Please sign in or recover your account information')\n return jsonify(success=False, message=msg)\n\n return jsonify(success=True)", "title": "" }, { "docid": "f0b0c1405338fd68fad6d65f0e6b2bed", "score": "0.58853006", "text": "def test_reset_with_wrong_email(self):\n\n res = self.client().post(self.base_url+self.user_reset_password_endpoint,\n data=json.dumps(self.reset_payload['wrong_email']),\n headers={'Authorization': 'Bearer {}'.format(self.tokens[0]) },\n content_type='application/json')\n\n self.assertEqual(res.status_code, 400)", "title": "" }, { "docid": "d88f7f6e7f872c0096d7cbc05cfabaaf", "score": "0.58848983", "text": "def my_expired_token_callback():\n\n log.debug(\"-@- expired token checker\")\n\n ### if user is not confirmed, delete user from DB\n ### otherwise return a link to refresh refresh_token\n\n return jsonify({\n 'msg' : 'The token has expired',\n 'status' : 401,\n 'sub_status': 42,\n }), 401", "title": "" }, { "docid": "c22ec169a43e55c32ccf6a8a8bad0422", "score": "0.58714193", "text": "def test_expired_auth_token(self):\n user = add_user('test', 'test@test.com', 'test')\n auth_token = user.encode_auth_token(user.id)\n time.sleep(3)\n self.assertEqual(str(user.decode_auth_token(auth_token)), 'Expired token, please login again.')", "title": "" }, { "docid": "6f703f816f5c377ae6d344ebe9f91bee", "score": "0.5869825", "text": "def _verify_token(self):\n token_expiration = datetime.fromtimestamp(self._decoded_token[\"exp\"])\n time_difference = datetime.now() + timedelta(hours=12) # noqa: WPS432\n LOGGER.debug(\"Token expiration time: %s\", token_expiration) # noqa: WPS323\n LOGGER.debug(\"Token comparison time: %s\", time_difference) # noqa: WPS323\n\n if token_expiration <= time_difference:\n self.refresh_token()", "title": "" }, { "docid": "1e602b68f2b1f6f958e8e65c8f65f03f", "score": "0.58425635", "text": "def reset_password(email: str, reset_token: str) -> str:\n db_reset_object = userAdapter.get_user_reset_token(email)\n if reset_token == db_reset_object['token']:\n password = __generate_random_pass()\n password_hash = generate_password_hash(password)\n userAdapter.set_user_password_hash(email, password_hash)\n username = userAdapter.get_user_username(email)\n mailService.send_new_password(username, email, password)\n return 'You received an email with a new password.'\n else:\n return 'The request token did not match. Please request a new passwort reset at https://tracemap.info'", "title": "" }, { "docid": "1847c94b5db4852eaa30a14bb44bee9e", "score": "0.5824096", "text": "def test_api__test_cookie_auth_token__ok__change_email_dont_break_cookie(self):\n dbsession = get_tm_session(self.session_factory, transaction.manager)\n admin = dbsession.query(User).filter(User.email == \"admin@admin.admin\").one()\n with freeze_time(\"1999-12-31 23:59:58\"):\n params = {\"email\": \"admin@admin.admin\", \"password\": \"admin@admin.admin\"}\n res = self.testapp.post_json(\"/api/v2/auth/login\", params=params, status=200)\n assert \"Set-Cookie\" in res.headers\n assert \"session_key\" in self.testapp.cookies\n user_session_key_1 = self.testapp.cookies[\"session_key\"]\n\n # change own email\n with freeze_time(\"1999-12-31 23:59:59\"):\n params = {\n \"email\": \"mysuperemail@email.fr\",\n \"loggedin_user_password\": \"admin@admin.admin\",\n }\n self.testapp.put_json(\n \"/api/v2/users/{}/email\".format(admin.user_id), params=params, status=200\n )\n assert \"Set-Cookie\" in res.headers\n assert \"session_key\" in self.testapp.cookies\n user_session_key_2 = self.testapp.cookies[\"session_key\"]\n assert user_session_key_1 == user_session_key_2\n\n # session_id should not be return before x time\n with freeze_time(\"2000-01-01 00:00:00\"):\n res = self.testapp.get(\"/api/v2/auth/whoami\", status=200)\n assert \"Set-Cookie\" not in res.headers\n assert \"session_key\" in self.testapp.cookies\n user_session_key_3 = self.testapp.cookies[\"session_key\"]\n assert user_session_key_3 == user_session_key_2\n\n # after x time session_id should be renew\n with freeze_time(\"2000-01-01 00:02:01\"):\n res = self.testapp.get(\"/api/v2/auth/whoami\", status=200)\n assert \"Set-Cookie\" in res.headers\n assert \"session_key\" in self.testapp.cookies\n user_session_key_4 = self.testapp.cookies[\"session_key\"]\n assert user_session_key_4 != user_session_key_3\n\n # after too much time, session_id should be revoked\n with freeze_time(\"2000-01-01 00:12:02\"):\n res = self.testapp.get(\"/api/v2/auth/whoami\", params=params, status=401)\n assert \"Set-Cookie\" in res.headers", "title": "" }, { "docid": "9fe87fb4830e4b1c5618fc8d8dbfbcce", "score": "0.5822107", "text": "def get_verified_user_email(self, app_config, http_client, token, user_info):\n pass", "title": "" }, { "docid": "dd26f4071ac0fb7761f6d856826c2209", "score": "0.5818941", "text": "def reset_password(self, request):\n email = request.GET['email']\n try:\n user = CurationUser.objects.get(email=email)\n user.reset_password_requested_by = request.user\n user.otp_secret_key = pyotp.random_base32()\n user.save()\n SendEmailUtil().generate_reset_password_link(\n user, EmailTemplate.Templates.reset_password_email)\n return Response({\"message\": \"forgot password email sent\"}, status=status.HTTP_200_OK)\n except CurationUser.DoesNotExist:\n return Response({\"message\": \"user details not found with this email\"}, status=status.HTTP_404_NOT_FOUND)", "title": "" }, { "docid": "ddc1a845243fcbd46cf7d34d5452d4f9", "score": "0.5818537", "text": "def _verify_token(self, access_token, request):\n # First check if this request was already verified.\n # `request.bound_data` is an attribute provided by Kinto to store\n # some data that is shared among sub-requests (e.g. default bucket\n # or batch requests)\n if REIFY_KEY not in request.bound_data:\n settings = request.registry.settings\n hmac_secret = settings['userid_hmac_secret']\n\n cache_ttl = float(facebook_conf(request, 'cache_ttl_seconds'))\n\n hmac_token = core_utils.hmac_digest(hmac_secret, access_token)\n cache_key = 'facebook:verify:{}'.format(hmac_token)\n\n payload = request.registry.cache.get(cache_key)\n\n if payload is None:\n # Verify token from Facebook\n url = facebook_conf(request, 'userinfo_endpoint')\n params = {\n 'input_token': access_token,\n 'access_token': facebook_conf(request, 'app_access_token'),\n }\n\n # XXX: Implement token validation for Facebook\n resp = requests.get(url, params=params)\n try:\n resp.raise_for_status()\n except requests.exceptions.HTTPError:\n logger.exception(\"Facebook Token Protocol Error\")\n raise httpexceptions.HTTPServiceUnavailable()\n else:\n body = resp.json()\n if not body['data']['is_valid']:\n payload = {}\n else:\n payload = body['data']\n\n request.registry.cache.set(cache_key, payload, ttl=cache_ttl)\n\n # Save for next call.\n request.bound_data[REIFY_KEY] = payload.get('user_id')\n\n return request.bound_data[REIFY_KEY]", "title": "" }, { "docid": "e227a5252cdc0311cd6e5bc7c0de82c7", "score": "0.57992566", "text": "def password_reset_update(self, request, uid, token):\n decoded_token = jwt_auth.decode_token(token)\n now = int(datetime.now().strftime('%s'))\n if now > decoded_token['exp']:\n # TODO: add generate new link endpoint\n return custom_reponse('error', 400, message='Link has expired')\n serializer = PasswordResetSerializer(\n data=request.data, context={'request': request})\n if serializer.is_valid():\n uid = force_text(urlsafe_base64_decode(uid))\n user = get_object(User, uid, \"User\")\n password = request.data.get('password')\n user.set_password(password)\n user.save()\n return custom_reponse(\n 'success', 200, message='Password successfully updated')\n return custom_reponse(\n 'error', 400, serializer=serializer, error_type='bad_request')", "title": "" }, { "docid": "839b960a9cad5ae85823af2c1f59dd78", "score": "0.57984406", "text": "def confirm_reset(token):\n\n email = confirm_email_token(token)\n if not email:\n flash(\"The confirmation link in invalid or has expired. Please try again.\", \"danger\")\n return redirect(url_for('auth.reset'))\n\n form = PasswordForm()\n\n if form.validate_on_submit():\n user = User.query.filter_by(email=email).first_or_404()\n user.set_password(form.password.data)\n db.session.add(user)\n db.session.commit()\n flash(\"Password successfully changed.\", \"success\")\n return redirect(url_for('auth.signin'))\n\n return render_template('reset_envoke.html', form=form, token=token)", "title": "" }, { "docid": "0c27ad74f876be451c9b45b6221703d3", "score": "0.5797797", "text": "def post(request):\n serializer = serializers.PasswordResetSerializer(data=request.data)\n\n if not serializer.is_valid():\n return Response(serializer.errors)\n\n user = User.objects.filter(email=serializer.validated_data['email']).first()\n\n if user is not None:\n email_password_reset.delay(\n user_pk=user.pk,\n domain=get_current_site(request).domain,\n uid_encoded=urlsafe_base64_encode(force_bytes(user.pk)),\n token=tokens.password_reset_token.make_token(user)\n )\n\n return redirect('password_reset_done')", "title": "" }, { "docid": "e38255a533040eda6afae3de97e3318a", "score": "0.5770414", "text": "def validate_email():\n incoming = request.get_json()\n password_hash = incoming[\"hash\"]\n user=get_user_by_hash(password_hash)\n if user is not None:\n return jsonify(result=True,user_id=user),200\n else:\n return jsonify(result=False),401", "title": "" }, { "docid": "8d4be1a3ab3f4750e1d983dd947b82cc", "score": "0.57698405", "text": "def test_request_token_existing_email_inhibit_error(self) -> None:\n user_id = self.register_user(\"kermit\", \"monkey\")\n self.login(\"kermit\", \"monkey\")\n\n email = \"test@example.com\"\n\n # Add a threepid\n self.get_success(\n self.hs.get_datastores().main.user_add_threepid(\n user_id=user_id,\n medium=\"email\",\n address=email,\n validated_at=0,\n added_at=0,\n )\n )\n\n channel = self.make_request(\n \"POST\",\n b\"register/email/requestToken\",\n {\"client_secret\": \"foobar\", \"email\": email, \"send_attempt\": 1},\n )\n self.assertEqual(200, channel.code, channel.result)\n\n self.assertIsNotNone(channel.json_body.get(\"sid\"))", "title": "" }, { "docid": "8e82c0b841a6ff39349166e495bce640", "score": "0.57240623", "text": "def auth_reset_password():\n # get the post data\n post_data = request.get_json()\n if post_data is None:\n post_data = request.form\n\n try:\n user = User.get_by_email(post_data.get('email'))\n if user:\n token = user.get_reset_token()\n msg = Message('Password Reset Request',\n sender='teamaerouta@gmail.com',\n recipients=[user.email])\n \n msg.body = f'''To reset your password, visit the following link:\n {url_for('reset_token', token=token, _external=True)}\n If you did not make this request then simply ignore this email and no changes will be made.\n Sincerely, \n StateFarm\n '''\n mail.send(msg)\n responseObject = {\n 'status': 'success',\n 'message': 'Reset link sent.' \n }\n return make_response(jsonify(responseObject)), 201\n else:\n responseObject = {\n 'status': 'fail',\n 'message': 'Some error occurred with database. Please try again.'\n }\n return make_response(jsonify(responseObject)), 500\n except Exception as e:\n print(e)\n responseObject = {\n 'status': 'fail',\n 'message': 'Try again'\n }\n return make_response(jsonify(responseObject)), 500", "title": "" }, { "docid": "43016408e0d2708ef3e4e1359434dacf", "score": "0.57216436", "text": "def verify_email(user, email):\n ts = URLSafeTimedSerializer(flask.current_app.config[\"SECRET_KEY\"])\n\n token = ts.dumps([user.id, email], 'verify-email')\n\n verify_link = flask.url_for('auth.verify_token', token=token,\n _external=True)\n\n email_body = flask.render_template('auth/email_verification.jinja2',\n link=verify_link)\n\n subject = \"Verifiera din e-postaddress på Strequelistan\"\n\n util.send_email(email, subject, email_body)", "title": "" }, { "docid": "0087911b07e7a6dbbb5afb9fff586be4", "score": "0.57188576", "text": "def get_reset_password_token():\n if \"email\" in request.form.keys():\n try:\n reset_token = AUTH.get_reset_password_token(request.form['email'])\n return jsonify({\n \"email\": \"{}\".format(request.form['email']),\n \"reset_token\": \"{}\".format(reset_token)})\n except ValueError:\n return Response(status=403)\n return Response(status=403)", "title": "" }, { "docid": "14b9a28541fae2d8a6fc2e748248e0ff", "score": "0.5709763", "text": "def test_verify_token(self):\n token = generate_confirmation_token('dummy@email.com')\n email = confirm_token(token)\n self.assertEqual(email, 'dummy@email.com')", "title": "" }, { "docid": "a0215137dafb1203afb95322ca6ad7eb", "score": "0.57029766", "text": "def change_email(self, token):\n s = Serializer(current_app.config['SECRET_KEY'])\n try:\n data = s.loads(token)\n except:\n return False\n if data.get('change_email') != self.id:\n return False\n new_email = data.get('new_email')\n if new_email is None:\n return False\n if self.query.filter_by(email=new_email).first() is not None:\n return False\n self.email = new_email\n db.session.add(self)\n db.session.commit()\n return True", "title": "" }, { "docid": "9ca753c795e8c8303fe0ab4ef365c88a", "score": "0.56968564", "text": "def reset(self, user, email):\n if not self.user_exists(user):\n return 'User {0!r} not registered'.format(user), False\n u = self.get_user(user)\n if email != u.email:\n return 'User email does not match registered email address', False\n self.remove_user(user)\n token, flag = self.register(user, email)\n return token, flag", "title": "" }, { "docid": "44ab0bce5fdef6c5fe264a6e97ef5409", "score": "0.5693674", "text": "def change_email(token):\n # TODO: !!!!\n if current_user.change_email(token):\n flash('Your email address has been updated.', 'success')\n else:\n flash('The confirmation link is invalid or has expired.', 'error')\n return redirect(url_for('main.index'))", "title": "" }, { "docid": "48e9f2e9b324f2a9f9915430ba05c76e", "score": "0.56669575", "text": "def get_reset_password_token() -> str:\n email = request.form.get('email')\n try:\n token = AUTH.get_reset_password_token(email)\n return jsonify({\"email\": email, \"reset_token\": token}), 200\n except Exception:\n abort(403)", "title": "" }, { "docid": "e6d6b7274506fe29a1fda5191797e0b0", "score": "0.5666426", "text": "def check_email(email):\n return user_datastore.find_user(email=email)", "title": "" }, { "docid": "9ef69cb6f2d8a1a683a318a86260c942", "score": "0.56646764", "text": "def test_used_token(self):\n data = {\n \"email\": \"kiki@gmail.com\",\n \"password\": \"Kiki123455\",\n \"confirm_password\": \"Kiki123455\"\n }\n data2 = {\n \"email\": \"kiki@gmail.com\",\n \"password\": \"Kiki1234\",\n \"confirm_password\": \"Kiki1234\"\n }\n response = self.client.put(self.reset_url, data=data, format=\"json\")\n self.assertEqual(response.status_code, status.HTTP_200_OK)\n self.assertEqual(\n response.content,\n b'{\"message\":\"Your password has been successfully reset. You can now log in.\"}')\n response2 = self.client.put(self.reset_url, data=data2, format=\"json\")\n self.assertEqual(response2.status_code, status.HTTP_400_BAD_REQUEST)\n self.assertEqual(\n response2.content,\n b'{\"errors\":{\"error\":[\"Something went wrong. Try again.\"]}}'\n )", "title": "" }, { "docid": "312bfb3de12a06955c9a3b6fc7a705c0", "score": "0.56638974", "text": "def verify_email(self, email):\n signature = email.get(\"signature\")\n token = email.get(\"token\")\n timestamp = email.get(\"timestamp\")\n\n if timestamp is None or token is None or signature is None:\n raise MailGunException(\"Mailbox Error: credential verification failed.\", \"Not enough parameters\")\n\n message = '{}{}'.format(timestamp, token).encode('utf-8')\n signature_calc = hmac.new(key=self.api_key,\n msg=message,\n digestmod=hashlib.sha256).hexdigest()\n if signature != signature_calc:\n raise MailGunException(\"Mailbox Error: credential verification failed.\", \"Signature doesn't match\")", "title": "" }, { "docid": "aa3508c93e6b5278642dc2b4274febb9", "score": "0.56589955", "text": "def test_email_verification_resend(self):\n password = self.data_generator.password()\n user = add_user(password=password)\n with self.client:\n resp_login = self.client.post(\n f'/{self.version}/auth/login',\n data=json.dumps(dict(\n email=user.email,\n password=password\n )),\n content_type='application/json',\n headers=[('Accept', 'application/json')]\n )\n\n with self.client:\n response = self.client.get(\n f'{self.url}resend',\n content_type='application/json',\n headers=[('Accept', 'application/json'),\n (Constants.HttpHeaders.AUTHORIZATION,\n 'Bearer ' + json.loads(resp_login.data.decode())['auth_token'])]\n )\n self.assertEqual(response.status_code, 200)\n data = json.loads(response.data.decode())\n self.assertEqual(data['message'], 'verification email resent')", "title": "" }, { "docid": "c8eae9f33fa98977486100c6717f4492", "score": "0.5657432", "text": "def decode_email_token(token, duration=None):\n logger.info(\"Decoding email verification token\", token=token)\n\n timed_serializer = URLSafeTimedSerializer(current_app.config[\"SECRET_KEY\"])\n email_token_salt = current_app.config[\"EMAIL_TOKEN_SALT\"]\n\n result = timed_serializer.loads(token, salt=email_token_salt, max_age=duration)\n logger.info(\"Successfully decoded email verification token\", token=token)\n return result", "title": "" }, { "docid": "bda7fab87727409685556e50d0a4563d", "score": "0.5657004", "text": "def validate_token_age(callback_token):\n\n try:\n token = CallbackToken.objects.get(key=callback_token, is_active=True)\n seconds = (timezone.now() - token.created_at).total_seconds()\n token_expiry_time = api_settings.PASSWORDLESS_TOKEN_EXPIRE_TIME\n if token.user.pk in api_settings.PASSWORDLESS_DEMO_USERS.keys():\n return True\n if seconds <= token_expiry_time:\n return True\n else:\n # Invalidate our token.\n token.is_active = False\n token.save()\n return False\n\n except CallbackToken.DoesNotExist:\n # No valid token.\n return False", "title": "" }, { "docid": "4bf0b2a4eb44b6536cefc5aa6db2d182", "score": "0.56510943", "text": "def test_email_verification(self):\n password = self.data_generator.password()\n user = add_user(password=password)\n with self.client:\n resp_login = self.client.post(\n f'/{self.version}/auth/login',\n data=json.dumps(dict(\n email=user.email,\n password=password\n )),\n content_type='application/json',\n headers=[('Accept', 'application/json')]\n )\n\n with self.client:\n response = self.client.get(\n f'{self.url}',\n content_type='application/json',\n headers=[('Accept', 'application/json'),\n (Constants.HttpHeaders.AUTHORIZATION,\n 'Bearer ' + json.loads(resp_login.data.decode())['auth_token'])]\n )\n self.assertEqual(response.status_code, 200)\n data = json.loads(response.data.decode())\n self.assertEqual(data['message'], 'sent email with verification token')", "title": "" }, { "docid": "abb7734ceb1fe570fb5bff1114921931", "score": "0.56474376", "text": "def get(self, request, *args, **kwargs):\n\n try:\n user = User.objects.get_by_base64_email(kwargs.get(\"b64_email\"))\n if user.is_active:\n messages.success(request, \"Your account has already been verified\")\n return redirect(\"accounts:login\")\n\n if user.verify_crypto_id(kwargs.get(\"crypto_id\")):\n user.activate()\n messages.success(\n request,\n \"You have successfully verified your email address. You can login now.\",\n )\n return redirect(\"accounts:login\")\n else:\n messages.success(\n request,\n \"The link has expired. We have sent a new link to your email address.\",\n )\n user.send_verification_email()\n return redirect(\"home\")\n except User.DoesNotExist:\n raise Http404\n\n return super().get(request, *args, **kwargs)", "title": "" }, { "docid": "0949b98cc296de54796ede40f4e58196", "score": "0.564542", "text": "def verify_auth_token(token):\n User._init_token_attr()\n if User._is_black_token(token):\n return None\n s = Serializer(current_app.config['SECRET_KEY'])\n try:\n data = s.loads(token)\n except:\n return None\n return User.query.get(data['id'])", "title": "" }, { "docid": "069f6e7b75610ac23e8717f967d9cc40", "score": "0.5634756", "text": "def verify_auth_token(token):\n\n s = Serializer(current_app.config['SECRET_KEY'])\n try:\n data = s.loads(token)\n except SignatureExpired:\n return None\n\n # valid token, but expired\n except BadSignature:\n return None\n\n # invalid token\n user = User.query.get(data['id'])\n return user", "title": "" }, { "docid": "ffb4de47753864134fc9a96f96a81e5d", "score": "0.56287473", "text": "def test_verify_email(self):\n response = self.register_user(data=self.user)\n token = response.data['user_info']['token']\n # hit the api endpoint\n verify_url = \"http://127.0.0.1:8000/api/users/verify/{}\".format(token)\n res = self.client.get(verify_url)\n message = 'Email verified successfully'\n self.assertEqual(res.status_code, status.HTTP_200_OK)\n self.assertIn(message.encode(), res.content)", "title": "" }, { "docid": "3e59023aee2116ae2b4ad450a84a0c21", "score": "0.5614441", "text": "def reset():\n form = ResetPasswordForm()\n if form.validate_on_submit():\n email = form.email.data\n user = User.select_by_email(email=email)\n if user:\n timed_serializer = URLSafeTimedSerializer(current_app.config['SECRET_KEY'])\n token = timed_serializer.dumps(email, salt='recovery-token')\n url = url_for('user.reset_with_token', token=token, _external=True)\n body = render_template('email/recover.txt', url=url)\n html = render_template('email/recover.html', url=url)\n msg = Message(body=body, html=html, recipients=[email],\n subject=const.RESET_EMAIL_SUBJECT)\n mail.send(msg)\n flash(const.RESET_PASSWORD_REQUEST_FLASH, 'success')\n return redirect(url_for('user.login'))\n return render_template('user/reset.html', form=form)", "title": "" }, { "docid": "d7245e2eebdbd3ec420e6d6d330c729f", "score": "0.5611907", "text": "def test_valid_tokens_expired(password_reset_token_factory, app_settings):\n app_settings.PASSWORD_RESET_EXPIRATION = datetime.timedelta()\n\n password_reset_token_factory()\n\n assert not models.PasswordResetToken.valid_tokens.exists()", "title": "" }, { "docid": "01445f1029aae80235a6f8e30bb609aa", "score": "0.56112677", "text": "def test_user_password_reset_confirm_invalid_token(self):\n\n # GIVEN invalid token data\n data = {\n 'uidb64': urlsafe_base64_encode(force_bytes(self.user.pk)),\n 'token': 'aaa',\n 'password': 'new-password'\n }\n\n # WHEN confirming password reset\n response = self.auth_api.password_reset_confirm(data)\n\n # THEN it should fail\n self.assertTrue(response.error)\n self.assertEqual(response.data['errors'][0], u'Password reset unsuccessful')", "title": "" }, { "docid": "4d33d8f6a996572b21cf57f4689eee8a", "score": "0.5609719", "text": "def validate_password_reset(cls, code, new_password):\r\n password_reset_model = \\\r\n PasswordResetModel.where_code(code)\r\n if password_reset_model is None:\r\n return None\r\n jwt = JWT()\r\n if jwt.verify_token(password_reset_model.token):\r\n user = cls.where_id(jwt.data['data']['user_id'])\r\n if user is not None:\r\n user.set_password(new_password)\r\n PasswordResetModel.delete_where_user_id(user.id)\r\n return user\r\n password_reset_model.delete() # delete expired/invalid token\r\n return None", "title": "" }, { "docid": "16e3df7b22b0ce5814225d2717b9da48", "score": "0.56089616", "text": "def reset_password_request():\n\n if current_user.is_authenticated:\n\n # Prevent non-email users from requesting a new password\n if current_user.login != 'email':\n return redirect(url_for('main.home'))\n\n send_pw_reset_email(current_user)\n flash(\n f'Check your email! We sent a link to '\n f'\"{current_user.email}\".', 'good')\n return redirect(url_for('users.settings'))\n\n form = EmailVerifyForm()\n if form.validate_on_submit():\n user = User.query.filter_by(email=form.email.data).first()\n if user:\n send_pw_reset_email(user)\n flash(\n f'Check your email! We sent a link to '\n f'\"{form.email.data}\".', 'good')\n else:\n flash(\n 'That email address has no matches in our system.',\n 'bad')\n return render_template('reset_password_request.html', form=form)", "title": "" }, { "docid": "f36fd79fb89c1675dcb56d860c51fef7", "score": "0.5598844", "text": "def validate_token_hash(self):\r\n if self.token_request_at and self.token_hash and self.token_hash.get(\"expires_in\") is not None:\r\n delta = datetime.datetime.now() - self.token_request_at\r\n duration = (delta.microseconds + (delta.seconds + delta.days * 24 * 3600) * 10**6) / 10**6\r\n if duration > self.token_hash.get(\"expires_in\"):\r\n self.token_hash = None", "title": "" }, { "docid": "64b8eea187ac6853afb36646bafae783", "score": "0.5594756", "text": "def verify_token(token):\n return User.check_token(token) if token else None", "title": "" }, { "docid": "0e128d022906509268cc3a62aa331b5c", "score": "0.55896115", "text": "def verify_auth_token(token):\n s = Serializer(app.config['SECRET_KEY'])\n try:\n data = s.loads(token)\n except SignatureExpired:\n return None # valid token, but expired\n except BadSignature:\n return None # invalid token\n user = User.query.get(data['id'])\n return user", "title": "" }, { "docid": "602958c372a129659eda30aaf802b43a", "score": "0.55735314", "text": "def test_reset_good_token(self):\n\n jtw_payload = jwt_payload_handler(self.test_user)\n\n token = jwt_encode_handler(jtw_payload)\n\n payload = {\n 'token': token,\n }\n\n response = self.client.get(\n self.USER_VALID_URL,\n data=payload,\n )\n\n self.assertEquals(\n response.status_code,\n 200,\n 'It should return an error response')", "title": "" }, { "docid": "3a6682e0eabeb87f3e734aa9a161c499", "score": "0.55667204", "text": "def verify_token():\n if request.path == '/accounts/new':\n return\n\n try:\n token = request.environ['HTTP_X_API_TOKEN']\n except (KeyError, IndexError, TypeError):\n raise BadRequest('Missing or invalid token')\n\n g.cursor.execute(\n \"SELECT username FROM accounts WHERE token=? and username=?\",\n (token, g.username)\n )\n user = g.cursor.fetchone()\n if not user:\n raise Unauthorized(\"Invalid X-Api-Token\")", "title": "" }, { "docid": "f3a7eda4dc7cfbe4bc51681d852756be", "score": "0.5554374", "text": "def _change_email_and_activate(self, user_id, email):\n ctx = yield self.begin()\n try:\n user_hash = hashlib.md5(email).hexdigest()\n cur1 = yield ctx.execute(\n \"UPDATE sys_user \"\n \"SET user_hash=%s, email=%s where _id=%s \",\n (user_hash, email, user_id)\n )\n cur2 = yield ctx.execute(\n \"UPDATE sys_user_info \"\n \"SET email_token='1', user_hash=%s \"\n \"WHERE _id=%s\",\n (user_hash, user_id)\n )\n yield ctx.commit()\n except Exception as e:\n app_log.error(e)\n yield ctx.rollback()\n raise gen.Return(-1)\n else:\n raise gen.Return(1)", "title": "" }, { "docid": "f78cf0b248c4668c8ab2a6d0fa848528", "score": "0.554797", "text": "def resetForgotPassword(self, request):\n try:\n code = request.data.get('code')\n email = request.data.get('email')\n except:\n return Response(data={'status': False, 'msg': 'email and code are required!'},\n status=status.HTTP_400_BAD_REQUEST)\n try:\n user = User.objects.get(email = email)\n except:\n return Response(data={'status': False, 'msg': 'email not exist'},status=status.HTTP_404_NOT_FOUND)\n if not UserProfile.objects.filter(user = user, forgot_code = code).exists():\n return Response(data={'status': False, 'msg': 'code not exist'},status=status.HTTP_404_NOT_FOUND)\n try:\n new_password = request.data.get('new_password')\n confirm_password = request.data.get('confirm_password')\n except:\n return Response(data={'status': False, 'msg': 'new_password and confirm_password required both'},status=status.HTTP_400_BAD_REQUEST)\n if not new_password:\n return Response(data={'status': False, 'msg': 'new_password and confirm_password required both'},status=status.HTTP_400_BAD_REQUEST)\n if not new_password == confirm_password:\n return Response(data={'status': False, 'msg': 'new_password and confirm_password do not match'},\n status=status.HTTP_400_BAD_REQUEST)\n # reset password code here\n user.set_password(new_password)\n user.save()\n user.userprofile.forgot_code=''\n user.userprofile.save()\n return Response(data={'status': True, 'msg': 'password changed successfully'},status=status.HTTP_200_OK)", "title": "" }, { "docid": "45c473bac4b831e982e51fb172a0f3b3", "score": "0.55434746", "text": "def post(self):\n parser = reqparse.RequestParser()\n parser.add_argument('email', type=str, required=True)\n args = parser.parse_args()\n email = args.get('email')\n user = User.query.filter_by(email=email).first()\n if user:\n user.send_email('Flask simple chats reset password',\n render_template('authentication/emails/reset_password_rest.txt',\n user=user, token=user.get_reset_password_token()))\n return {'email': email, 'message': 'Check Your e-email to reset the password!'}, 200\n else:\n abort(400, message=f\"User with e-mail '{email}' does not exist\")", "title": "" }, { "docid": "259521ded5ba962948251857db6db86c", "score": "0.5528197", "text": "def get_reset_password_token() -> str:\n try:\n email = request.form['email']\n except KeyError:\n abort(403)\n\n try:\n reset_token = AUTH.get_reset_password_token(email)\n except ValueError:\n abort(403)\n\n msg = {\"email\": email, \"reset_token\": reset_token}\n\n return jsonify(msg), 200", "title": "" }, { "docid": "348400117d7380ffafca29d17522704d", "score": "0.55237347", "text": "def get(self, token):\n\n email = verify_token(token, salt='activate')\n if not email:\n return {'message': 'Invalid token or token expired'}, HTTPStatus.BAD_REQUEST\n user = User.query.filter_by(email=email).first()\n if not user:\n return {'message': 'User not found'}, HTTPStatus.NOT_FOUND\n if user.is_activate:\n return {'message': 'The user account is already activated'}, HTTPStatus.BAD_REQUEST\n user.is_activate = True\n user.save()\n return {}, HTTPStatus.NO_CONTENT", "title": "" }, { "docid": "7d93a826c5883d3aec0f5fb0d83e881d", "score": "0.5521945", "text": "def test_email_change_confirm_invalid(self):\n utils.login(self)\n old_email = \"oldfoo@bar.com\"\n token = UserEmailChangeTokenGenerator().generate(self.user, old_email)\n new_email = \"newfoo@bar.com\"\n User.objects.filter(pk=self.user.pk).update(email=new_email)\n response = self.client.get(reverse('spirit:email-change-confirm', kwargs={'token': token}))\n expected_url = reverse(\"spirit:profile-update\")\n self.assertRedirects(response, expected_url, status_code=302)\n self.assertEqual(User.objects.get(pk=self.user.pk).email, new_email)", "title": "" }, { "docid": "9ae145f1bb32fce26da6a6c6414badf7", "score": "0.5507617", "text": "def change_email_request():\n form = ChangeEmailForm()\n if form.validate_on_submit():\n # TODO : call rest\n url = backend_url+'users/me'\n backend_authed_headers = {\n 'Content-Type':'application/x-www-form-urlencoded', \n 'Accept': 'application/json',\n 'Authorization': 'bearer ' + current_user.token\n }\n data = {\n 'change_email': form.change_email.data,\n 'changeEmailPassword': form.changeEmailPassword.data\n }\n\n try:\n start_time = time.time()\n\n h = httplib2.Http(\".cache\")\n (resp, content) = h.request(url, \"PUT\", body=urllib.parse.urlencode(data), headers=backend_authed_headers)\n r = loads(content)\n logger.info(r)\n\n end_time = time.time()\n logger.info('change email start time5 >> '+str(end_time-start_time))\n\n if resp.status in (404,405) or resp.status < 200:\n raise httplib2.ServerNotFoundError('restful api uri not found. {}'.format(r['message']))\n else:\n if r['status'] == 'fail':\n if r['field'] == 'changeEmailPassword':\n r['field'] = 'Password'\n flash(r['field']+' '+r['message'], 'form-error')\n else:\n new_email = form.change_email.data\n token = current_user.generate_email_change_token(new_email)\n change_email_link = url_for('account.change_email', token=token, _external=True)\n get_queue().enqueue(\n send_email,\n recipient=new_email,\n subject='Confirm Your New Email',\n template='account/email/change_email',\n # current_user is a LocalProxy, we want the underlying user\n # object\n user=current_user._get_current_object(),\n change_email_link=change_email_link)\n \n logout_user()\n flash('A confirmation link has been sent to {}.'.format(new_email), 'warning')\n flash('You have been logged out. relogin again with new email', 'info')\n\n return redirect(url_for('main.index'))\n\n except Exception as e:\n flash('oops...'+'{'+str(e)+'}', 'form-error')\n\n return render_template('account/manage.html', form=form)", "title": "" }, { "docid": "252c67b3e9dedfa41e5d222a729266fa", "score": "0.55002147", "text": "def check_email_match(self, email: str) -> int:\n query = f\"select * from Users where email='{email}';\"\n data = self._db.select_data(query)\n if data:\n data = data[0]\n else:\n return -1\n return int(data['id'])", "title": "" }, { "docid": "6c9366ab6314d6af51cb212f471327f4", "score": "0.54837996", "text": "def test_expire_by_email(self):\n email_address = 'testing@example{0}.com'.format(random_alphanum(5))\n reason = 'BECAUSE_I_AM_TESTING'\n num_tokens = 3\n created_tokens = []\n\n for num in range(num_tokens):\n created_tokens.append(TokenAuthorization.objects.create(\n email_address=email_address,\n reason=reason,\n created_user=self.user\n ))\n\n TokenAuthorization.objects.expire_by_email(email_address=email_address,\n reason=reason)\n\n token_auths = TokenAuthorization.objects.filter(\n email_address=email_address\n )\n\n self.assertEqual(len(token_auths), num_tokens)\n\n for auth in token_auths:\n self.assertTrue(auth.is_expired())", "title": "" }, { "docid": "79d9627cfff65c8bd8dd6460d0d9474b", "score": "0.54755545", "text": "def reset_password_request():\n form = RequestResetPasswordForm()\n\n if form.validate_on_submit():\n # TODOO: call to restful\n url = backend_url+'auth'\n data = {\n 'email': form.email.data\n }\n try:\n h = httplib2.Http(\".cache\")\n (resp, content) = h.request(url, \"PUT\", body=urllib.parse.urlencode(data), headers=backend_headers)\n r = loads(content)\n logger.info(r)\n\n if resp.status in (404,405) or resp.status < 200:\n raise httplib2.ServerNotFoundError('restful api uri not found')\n else:\n if r['status'] == 'fail':\n flash(r['message'], 'form-error')\n else:\n # for send mail\n user = User(\n id = r['data']['id'],\n username=r['data']['username']\n )\n\n token = user.generate_password_reset_token() # reset token\n reset_link = url_for('account.reset_password', token=token, _external=True)\n\n get_queue().enqueue(\n send_email,\n recipient=form.email.data,\n subject='Reset Your Password',\n template='account/email/reset_password',\n user=user,\n reset_link=reset_link,\n next=request.args.get('next'))\n\n flash('A password reset link has been sent to {}.'\n .format(form.email.data), 'warning')\n return redirect(url_for('account.login'))\n\n except Exception as e:\n logger.error(e)\n flash('oops...'+'{'+str(e)+'}', 'form-error')\n return render_template('account/reset_password.html', form=form)", "title": "" }, { "docid": "b091c3ddccd1b6f5b6d079e19b5b4978", "score": "0.54709387", "text": "def resend_email_verification(self, email):\n\n if(self._lr_object.is_null_or_whitespace(email)):\n raise Exception(self._lr_object.get_validation_message(\"email\"))\n\n query_parameters = {}\n query_parameters[\"apiKey\"] = self._lr_object.get_api_key()\n\n body_parameters = {}\n body_parameters[\"email\"] = email\n\n resource_path = \"/v1/email/resendverify\"\n return self._lr_object.execute(\"PUT\", resource_path, query_parameters, body_parameters)", "title": "" }, { "docid": "b89f1ab334f9bedaa6eba4532a6e025a", "score": "0.54678404", "text": "def verify_auth_token(token):\n s = TimedJSONWebSignatureSerializer(current_app.config['SECRET_KEY'])\n try:\n data = s.loads(token)\n except:\n current_app.logger.debug('Could not load token, token verification failed - returning None')\n return None\n\n current_app.logger.debug('Loaded token data is {}'.format(data))\n return User.query.get(data['id'])", "title": "" }, { "docid": "340df11c363d69acef1de86d608e350b", "score": "0.5449572", "text": "def confirm_email(token):\n # Verify token\n user_manager = current_app.user_manager\n db_adapter = user_manager.db_adapter\n is_valid, has_expired, object_id = user_manager.verify_token(\n token,\n user_manager.confirm_email_expiration)\n\n if has_expired:\n flash(_('Your confirmation token has expired.'), 'error')\n return redirect(user_manager.login_url)\n\n if not is_valid:\n flash(_('Invalid confirmation token.'), 'error')\n return redirect(user_manager.login_url)\n\n # Confirm email by setting User.active=True and User.confirmed_at=utcnow()\n if not db_adapter.EmailClass:\n user = user_manager.find_user_by_id(object_id)\n if user:\n db_adapter.update_object(user,\n active=True,\n confirmed_at=datetime.utcnow(),\n )\n db_adapter.commit()\n else: # pragma: no cover\n flash(_('Invalid confirmation token.'), 'error')\n return redirect(user_manager.login_url)\n else:\n raise NotImplementedError # TODO:\n\n # Send email_confirmed signal\n signals.user_confirmed_email.send(current_app._get_current_object(), user=user)\n\n # Prepare one-time system message\n flash(_('Your email has been confirmed. Please sign in.'), 'success')\n\n # Retrieve 'next' query parameter\n next = request.args.get('next', '/')\n\n # Redirect to the login page with the specified 'next' query parameter\n return redirect(user_manager.login_url+'?next='+next)", "title": "" }, { "docid": "4b34fb4e83df57b55e723e393d7303cd", "score": "0.54457295", "text": "def test_reset_password_invalid_email(self):\n mail.outbox = []\n\n response = self.client.post(\n reverse('account:recover'), {\n 'email': \"nobody@example.invalid\",\n 'username': self.user1.username,\n 'captcha': \"captchaResult\"\n })\n\n join_all_threads()\n self.assertEqual(response.status_code, status.HTTP_201_CREATED)\n self.assertEqual(len(mail.outbox), 0)\n self.assertIsNotNone(response.data['transaction'])", "title": "" }, { "docid": "f0012ff546949242f726b720815c2be2", "score": "0.5439253", "text": "def reset():\n reset_flash = (\"Om {} är en registrerad adress så har vi skickat en \"\n \"återställningslänk till den.\")\n\n ts = URLSafeTimedSerializer(flask.current_app.config[\"SECRET_KEY\"])\n\n form = forms.ExistingEmailForm()\n\n if form.validate_on_submit():\n user = models.User.query.filter_by(email=form.email.data).first()\n token = ts.dumps(user.id, salt='recover-key')\n\n recover_url = flask.url_for('.reset_token', token=token,\n _external=True)\n\n email_body = flask.render_template(\n 'auth/password_reset_email.jinja2',\n name=user.first_name,\n link=recover_url)\n\n subject = \"Återställ ditt lösenord hos Strequelistan\"\n\n util.send_email(user.email, subject, email_body)\n\n flask.flash(reset_flash.format(form.email.data), 'info')\n return flask.redirect(flask.url_for('.login'))\n\n elif form.email.data:\n flask.flash(reset_flash.format(form.email.data), 'info')\n return flask.redirect(flask.url_for('.login'))\n\n elif form.errors:\n flask.flash(\"Please enter your email.\", 'error')\n\n return flask.render_template('auth/reset.html', form=form)", "title": "" }, { "docid": "65f8202e1491cd8e13d228d9f2ac71db", "score": "0.54383004", "text": "def expired_token():\n token = request.headers.get('token', None)\n token_found = get_token(token)\n if token_found:\n return token_found", "title": "" }, { "docid": "0bcc139688c39269101f32d7d8543569", "score": "0.54358464", "text": "def confirm_email(token):\n\n email = confirm_email_token(token)\n if not email:\n flash('The confirmation link in invalid or has expired.', 'danger')\n return redirect(url_for('auth.resend'))\n\n user = User.query.filter_by(email=email).first_or_404()\n\n if user.confirmed:\n flash('Account already confirmed. Please Login.', 'success')\n\n else:\n user.confirmed = True\n user.confirmed_on = dt.now()\n db.session.add(user)\n db.session.commit()\n\n login_user(user)\n\n return redirect(url_for('main.dashboard'))", "title": "" }, { "docid": "831291f8b8c321618d3cb61a02ba78e5", "score": "0.54328334", "text": "def test_post_valid_token(\n api_client,\n email_verification_factory,\n user_factory):\n user = user_factory(password=PASSWORD)\n verification = email_verification_factory(email__user=user)\n email = verification.email\n\n data = {\n 'password': PASSWORD,\n 'token': verification.token,\n }\n\n url = reverse('account:email-verification')\n response = api_client.post(url, data)\n\n email.refresh_from_db()\n\n assert response.status_code == status.HTTP_200_OK\n assert response.data == {}\n assert email.is_verified", "title": "" }, { "docid": "3330cdaf3d6ce44979b1041776f861d4", "score": "0.5432271", "text": "def flask_user_profile_setemail():\n token = request.form.get('token')\n email = request.form.get('email')\n return dumps(user_profile_setemail(token, email))", "title": "" }, { "docid": "d6eee30711917eaaf3ec1e5e2fa3781d", "score": "0.5431277", "text": "def email_check_api(request):\n try:\n\n email = request.POST['email']\n costumer_id = int(request.POST['costumer_id'])\n\n if not email or not costumer_id or request.POST['ldsocial_api_key'] != settings.CRM_UPDATE_USER_API_KEY:\n return HttpResponseForbidden()\n\n subscriber_from_crm = Subscriber.objects.filter(costumer_id=costumer_id)\n\n if not subscriber_from_crm.exists():\n return HttpResponse('OK')\n\n user = User.objects.select_related('subscriber').get(email=email)\n\n user_costumer_id = getattr(user.subscriber, 'costumer_id', None)\n\n if (user_costumer_id and user_costumer_id != costumer_id) or (user_costumer_id is None):\n msg = u'Ya existe otro usuario en la web con ese email'\n else:\n msg = u'OK'\n\n except KeyError:\n msg = u'Parameter missing'\n except ValueError:\n msg = u'Wrong values'\n except User.DoesNotExist:\n msg = u'OK'\n except MultipleObjectsReturned:\n msg = u'Hay más de un usuario con ese email en la web'\n\n return HttpResponse(msg)", "title": "" }, { "docid": "b159b27d61a0057026fe10cba1d60290", "score": "0.54243475", "text": "def forget_password(request):\n params = request.POST\n email = params[\"email\"]\n\n res = requests.get(mcs.users_url + \"?email=\" + str(email))\n if res.status_code >= 300:\n return HttpResponse(\"unknown_error\")\n\n user_info = res.json()[\"results\"]\n\n if len(user_info) == 0:\n return HttpResponse(\"unknown_email\")\n\n userid = user_info[0][\"id\"]\n new_password = mcm.generate_random_string(6)\n\n # send email to inform new password\n content = \"Your new password is \\\"\" + new_password + \"\\\". \"\n content += \"This is the auto-generated password, please change it for the secure of your account after login.\"\n\n msg = MIMEMultipart('alternative')\n msg[\"from\"] = mcm.get_mail_account(request)[\"email\"]\n msg[\"to\"] = email\n msg[\"subject\"] = \"Your new password is set.\"\n\n token = mcm.generate_random_string(12)\n mcs.notification_email_token[\"token\"] = token\n mcs.notification_email_token[\"created_time\"] = datetime.now().strftime(\"%Y-%m-%d %H:%M:%S\")\n\n email_content = render_to_string(\"email_template.html\", {\"subject\": msg[\"subject\"],\n \"new_password\": new_password,\n \"content\": content,\n \"time\": datetime.now().strftime(\"%Y-%m-%d %H:%M:%S\"),\n \"type\": \"forgetpassword\",\n \"token\": token})\n\n part1 = MIMEText(email_content, 'html')\n\n msg.attach(part1)\n\n res = mcm.send_email(request, msg)\n\n if res == -1:\n return HttpResponse(\"fail_send_email\")\n\n\n # update database with new password\n new_params = {}\n new_params[\"password\"] = hashlib.md5(new_password.encode()).hexdigest()\n\n res = requests.put(mcs.users_url + str(userid) + \"/\", json=new_params)\n\n if res.status_code >= 300:\n return HttpResponse(\"fail_update\")\n\n return HttpResponse(\"success\")", "title": "" }, { "docid": "4419b6cff22be581b069ff21f811354d", "score": "0.5420458", "text": "def verify_token(self, entered_token):\n is_valid = self.totp.verify(entered_token)\n if is_valid:\n self.verified_time = datetime.datetime.utcnow().replace(tzinfo=pytz.utc)\n return is_valid", "title": "" }, { "docid": "040caba70d46924087a0c148bcf60b7b", "score": "0.5410104", "text": "def check_token(self, user, token):\n try:\n user_info_b36, hash = token.split(\"-\")\n except ValueError:\n return False\n\n try:\n base36_to_int(user_info_b36)\n except ValueError:\n return False\n\n # check that the user_info/uid has not been tampered with\n # and that the user is still inactive\n if not constant_time_compare(self.make_token(user), token):\n return False\n return True", "title": "" }, { "docid": "3a3bf175c041836fdb13ee647b4b51bc", "score": "0.5393215", "text": "def test_reset_password_invalid_email(self):\n mail.outbox = []\n\n response = self.client.post(\n reverse('account:recover'), {\n 'email': self.user1.email,\n 'username': \"thisIsNotMyUserName\",\n 'captcha': \"captchaResult\"\n })\n\n join_all_threads()\n self.assertEqual(response.status_code, status.HTTP_201_CREATED)\n self.assertEqual(len(mail.outbox), 0)\n self.assertIsNotNone(response.data['transaction'])", "title": "" }, { "docid": "0e88a797ab07cab3c844d033077d8bc6", "score": "0.5388716", "text": "def reset_token(token):\n user=User.verify_reset_token(token)\n if user is None:\n flash('An invalid token','warning')\n return redirect(url_for('web.reset_request'))\n form = ResetPasswordForm()\n if form.validate_on_submit():\n pw_hash = form.password.data\n Database.update_one(collection=\"users\", query=[{'user_id':user.user_id}, {\"$set\": { \"password\": pw_hash }} ])\n flash('Your password has been updated! you are now able to login')\n return redirect(url_for('web.login'))\n return render_template('pages/reset_token.html', title='Reset password', form=form)", "title": "" }, { "docid": "0b5330d67c27eafd53a57d39b064eb8a", "score": "0.5387305", "text": "def check_email(self, request):\n email = request.data.get('email')\n if User.objects.filter(email=email).exists():\n return Response(data={'status': False, 'msg': 'email id already exists'},status=status.HTTP_400_BAD_REQUEST)\n return Response(data={'status': True, 'msg': 'email Available'},status=status.HTTP_200_OK)", "title": "" }, { "docid": "cb897f7f3ae9115a87aea7d37c8954c9", "score": "0.5384364", "text": "def reset_token(token):\n expired = \"Länken har gått ut, var vänlig försök igen.\"\n invalid = \"Länken verkar vara trasig eller felaktig,\\\n var vänlig försök igen.\"\n\n ts = URLSafeTimedSerializer(flask.current_app.config[\"SECRET_KEY\"])\n\n try:\n data, timestamp = ts.loads(token, salt='recover-key', max_age=3600,\n return_timestamp=True)\n user = models.User.query.get(data)\n except SignatureExpired:\n flask.flash(expired, 'error')\n return flask.redirect(flask.url_for('.login'))\n except: # noqa: E722 (ignore 'bare except' warning)\n flask.flash(invalid, 'error')\n return flask.redirect(flask.url_for('.login'))\n\n if timestamp < user._password_timestamp:\n flask.flash(expired, 'error')\n return flask.redirect(flask.url_for('.login'))\n\n form = forms.NewPasswordForm()\n\n if form.validate_on_submit():\n user.password = form.new_password.data\n models.db.session.commit()\n flask.flash(\"Ditt lösenord har återställts!\", 'success')\n return flask.redirect(flask.url_for('.login'))\n else:\n forms.flash_errors(form)\n\n return flask.render_template('auth/reset_token.html', form=form)", "title": "" }, { "docid": "c8b636d9f6d2e076646654a099d5c9ac", "score": "0.5372858", "text": "def test_expire_by_email_proxy_model(self):\n email_address = 'testing@example{0}.com'.format(random_alphanum(5))\n num_tokens = 3\n created_tokens = []\n\n for num in range(num_tokens):\n created_tokens.append(TestTokenAuthorization.objects.create(\n email_address=email_address,\n created_user=self.user\n ))\n\n TestTokenAuthorization.objects.expire_by_email(\n email_address=email_address\n )\n\n token_auths = TestTokenAuthorization.objects.filter(\n email_address=email_address\n )\n\n self.assertEqual(len(token_auths), num_tokens)\n\n for auth in token_auths:\n self.assertTrue(auth.is_expired())", "title": "" }, { "docid": "28da36d8755bc51a0421ed253e71eafc", "score": "0.5365226", "text": "def generate_token(email, password):\n user = User.objects(email=email).first()\n if user:\n if str(bcrypt.hashpw(password.encode('utf-8'),\n user.password.encode('utf-8'))) == user.password:\n return check_user_token(user)\n else:\n return None\n else:\n return None", "title": "" }, { "docid": "c5dd2475efa2a416bb10e5c72e4d1fd4", "score": "0.5362734", "text": "def post(self):\n email = request.form['email']\n try:\n validate_email(email)\n except ValidationError as error:\n flash(error.message)\n return render_template('authentication/forgot_password.html')\n user = User.query.filter_by(email=email).first()\n if user:\n flash('Check Your e-email to reset the password!')\n user.send_email('Flask simple chats reset password',\n render_template('authentication/emails/reset_password.txt',\n user=user, token=user.get_reset_password_token()))\n return redirect(url_for('authentication.login'))\n else:\n flash('User with such an e-mail does not exist')\n return render_template('authentication/forgot_password.html')", "title": "" }, { "docid": "43cfc20123bab7b114f969ec2a4be029", "score": "0.53581226", "text": "def verify_code(self, request):\n try:\n code = request.data.get('code')\n email = request.data.get('email')\n except:\n return Response(data={'status': False, 'msg': 'code and email both required'},\n status=status.HTTP_422_UNPROCESSABLE_ENTITY)\n try:\n user = User.objects.get(email = email)\n except:\n return Response(data={'status': False, 'msg': 'email not exist'},\n status=status.HTTP_404_NOT_FOUND)\n if UserProfile.objects.filter(user = user, forgot_code = code).exists():\n return Response(data={'status': True, 'msg': 'code exists'},status=status.HTTP_200_OK)\n else:\n return Response(data={'status': False, 'msg': 'code not exists'},status=status.HTTP_404_NOT_FOUND)", "title": "" }, { "docid": "c4740353270248ab3bcd63a2b9d5a3c5", "score": "0.53555876", "text": "def test_get_reset_password(self):\n response = self.client.post('/password_request/', {'email': 'kegan@example.com'})\n response = self.client.get('/password_reset/%s-%s/' % (response.context['uid'], response.context['token']))\n self.assertTemplateUsed(response, 'registration/password_reset.html')", "title": "" }, { "docid": "6cc80bcade06ef931b676e7425de0513", "score": "0.53483456", "text": "def test_user_password_reset_confirm(self):\n\n # GIVEN valid data\n data = {\n 'uidb64': urlsafe_base64_encode(force_bytes(self.user.pk)),\n 'token': default_token_generator.make_token(self.user),\n 'password': 'new-password'\n }\n\n # WHEN confirming password reset\n response = self.auth_api.password_reset_confirm(data)\n\n # THEN it should succeed\n self.assertTrue(response.success)", "title": "" }, { "docid": "317f3885440e8ac76d0dbef86d8ddd0d", "score": "0.53468156", "text": "def forgot_password():\n if request.method == \"GET\":\n return render_template(\"forgot_password_temp.html\", sent=request.args.get('sent'))\n\n if request.method == \"POST\":\n email = request.form.get('email')\n if not email:\n return render_template(\"forgot_password_temp.html\", email_error=True)\n\n user = firebase_api.find_user_by_email(email)\n if not user:\n return render_template(\"forgot_password_temp.html\", user_error=True)\n\n token = firebase_api.create_password_reset_token(email)\n print(\"is testing?\", app.testing)\n if not app.testing:\n response = email_utils.send_reset_password_email(email, token)\n print(response.content)\n return render_template(\"forgot_password_temp.html\", sent=True)", "title": "" }, { "docid": "a892b8eec5f84346b2a61a79b27e1f2f", "score": "0.5343945", "text": "def reset_password(token):\n # Verify token\n user_manager = current_app.user_manager\n db_adapter = user_manager.db_adapter\n\n is_valid, has_expired, user_id = user_manager.verify_token(\n token,\n user_manager.reset_password_expiration)\n\n if has_expired:\n flash(_('Your reset password token has expired.'), 'error')\n return redirect(user_manager.login_url)\n\n if not is_valid:\n flash(_('Your reset password token is invalid.'), 'error')\n return redirect(user_manager.login_url)\n\n user = user_manager.find_user_by_id(user_id)\n if user:\n # Avoid re-using old tokens\n if hasattr(user, 'reset_password_token'):\n verified = user.reset_password_token == token\n else:\n verified = True\n if not user or not verified:\n flash(_('Your reset password token is invalid.'), 'error')\n return redirect(user_manager.login_url)\n\n # Initialize form\n form = user_manager.reset_password_form(request.form)\n\n # Process valid POST\n if request.method=='POST' and form.validate():\n # Invalidate the token by clearing the stored token\n if hasattr(user, 'reset_password_token'):\n db_adapter.update_object(user, reset_password_token='')\n\n # Change password\n hashed_password = user_manager.hash_password(form.new_password.data)\n db_adapter.update_object(user, password=hashed_password)\n db_adapter.commit()\n\n # Send 'password_changed' email\n if user_manager.enable_email and user_manager.send_password_changed_email:\n emails.send_password_changed_email(user.email, user)\n\n # Prepare one-time system message\n flash(_(\"Your password has been reset successfully. Please sign in with your new password\"), 'success')\n\n # Redirect to the login page\n return redirect(url_for('user.login'))\n\n # Process GET or invalid POST\n return render_template(user_manager.reset_password_template, form=form)", "title": "" }, { "docid": "9a883f58f84a6c020c398b55cb9492b3", "score": "0.53363734", "text": "def validate_token(token):\n user_id = get_redis().get(\"auth.%s\" % token)\n user = get_hash(\"users.%s\" % user_id)\n if not len(user):\n return False\n return user['auth'] == token", "title": "" }, { "docid": "0c5bf81bdcc2fdde076be934e10baefa", "score": "0.53215957", "text": "def check_return_token_existence(self, token: str, *args, **kwargs) -> bool:\n return EmailToken.objects.get(token=token)", "title": "" } ]
63786851c7e7ae73d467536316bbf110
Renders the current game state to the screen.
[ { "docid": "b2fb00ada0d03815848d460f37515c6e", "score": "0.6745435", "text": "def draw_game_state(self, win):\n state = self.get_game_state()\n output = ''\n if state == 'UNFINISHED':\n turn = self.get_turn()\n if turn == 'b':\n player = 'Blue'\n else:\n player = 'Red'\n output = f\"{player}'s Turn\"\n elif state == 'RED_WON':\n output = 'Red Wins!'\n else:\n output = 'Blue Wins!'\n\n font = pygame.font.Font('freesansbold.ttf', 32)\n text = font.render(output, True, BLACK, WOOD)\n textRect = text.get_rect()\n textRect.center = (WIDTH//2, HEIGHT+45)\n win.blit(text, textRect)", "title": "" } ]
[ { "docid": "22595c50b0cb7ee71aca591ea16d2c4d", "score": "0.77084374", "text": "def render(self):\n self.screen.fill(WHITE) # Fill background before drawing scene\n #-- Add rendering code here --#\n\n #-----------------------------#\n pg.display.flip() # Draw the screen onto the display window", "title": "" }, { "docid": "aaac8f9d94f831f4a8ce05a2050f52aa", "score": "0.76338255", "text": "def render(self):\n dirty = self.all_sprites.draw(self.screen)\n pg.display.update(dirty)", "title": "" }, { "docid": "fc34c36a6be06a4666af46f3ee05bbb8", "score": "0.75871235", "text": "def render(self):\n self.screen.fill(pg.Color(\"white\"))\n self.player.draw(self.screen)\n pg.display.update()", "title": "" }, { "docid": "80166c9b58f26803a3545ebba44a7071", "score": "0.75779504", "text": "def render(self):\n self._draw_board()\n self._draw_stone()\n pygame.display.update()", "title": "" }, { "docid": "3c73f31737c9168f9ee6d5f36ecc664d", "score": "0.75630575", "text": "def _draw_screen(self):\n\n self._draw_game()\n\n if self.state == 'menu':\n self.menu.draw_menu()\n\n if self.vars.show_fps:\n self.fps_display.update()\n self.fps_display.blit_self()", "title": "" }, { "docid": "b1b61f543f198fc4ee71dfa492f315ec", "score": "0.75536555", "text": "def render(self):\n self.screen.fill(BACKGROUNDCOLOR)\n self.screen.blit(self.background, (0, 0))\n\n self.infobox.render(self.screen, self.info_label)\n\n for button in self.buttons:\n button.render(self.screen, LINECOLOR)\n\n for hero_box in self.hero_boxes:\n hero_box.render(self.screen, self.hc)\n\n self.stats_box.render(self.screen)\n self.skills_box.render(self.screen)\n self.inventory_box.render(self.screen)\n self.pouch_box.render(self.screen)\n self.spells_box.render(self.screen)\n if self.invclick_box:\n self.invclick_box.render(self.screen)", "title": "" }, { "docid": "0b28b3dc339f7bef028484b361301516", "score": "0.7492987", "text": "def renderall(self):\n\n if not self.isinitialized:\n return\n # # clear display\n # self.screen.fill((0, 0, 0))\n # # draw some words on the screen\n # somewords = self.smallfont.render(\n # 'The View is busy drawing on your screen', \n # True, \n # (0, 255, 0))\n # self.screen.blit(somewords, (0, 0))\n # flip the display to show whatever we drew\n #pygame.display.flip()\n\n pygame.display.update(self.dirty_rects)\n self.dirty_rects = []", "title": "" }, { "docid": "94790d129bb51b9c5dd29ff8f8d46618", "score": "0.74496365", "text": "def render(self):\n self.game.screen.blit(self.game.assets[\"bg_minigame\"], (0, 0))\n if self.counting_down:\n self.game.draw_text('Boss battle! Get ready to click!', 40, self.game.WIDTH / 2, self.game.HEIGHT / 2 - 80)\n self.game.draw_text(self.counter_str, 80, self.game.WIDTH / 2, self.game.HEIGHT / 2)\n\n difficulty_string = \"Difficulty: \"\n if self.difficulty == 0:\n difficulty_string += \"Easy\"\n elif self.difficulty == 1:\n difficulty_string += \"Medium\"\n else:\n difficulty_string += \"Hard\"\n\n self.game.draw_text(difficulty_string, 30, self.game.WIDTH / 2, self.game.HEIGHT / 2 + 80)\n else:\n for ring in self.active_rings:\n ring.render()", "title": "" }, { "docid": "28a5774aef772840b928667047b9f6a7", "score": "0.74323356", "text": "def display(self):\n\n self.window.blit(self.background, (0, 0))\n\n self.display_text()\n self.display_maze()\n self.display_status()\n\n pygame.display.update()", "title": "" }, { "docid": "c8af7b0c681a95aec8e34f6b88987333", "score": "0.74251986", "text": "def draw(self):\n if self._state == STATE_INACTIVE:\n self._message.draw(self.view)\n elif self._state == STATE_COUNTDOWN:\n self._game.draw(self.view)\n self._message.draw(self.view)\n self._score.draw(self.view)\n elif self._state == STATE_PAUSED:\n self._game.draw(self.view)\n self._message.draw(self.view)\n self._score.draw(self.view)\n elif self._state == STATE_COMPLETE:\n self._message.draw(self.view)\n else:\n self._game.draw(self.view)\n self._game.drawBall(self.view)\n self._score.draw(self.view)", "title": "" }, { "docid": "62d6e7b374586a47e2cc3621c9afafda", "score": "0.73974925", "text": "def draw(self):\n self.renderer.draw(self.game_data)", "title": "" }, { "docid": "b168b24f9cbf56b5614b805d7e5ee690", "score": "0.7371526", "text": "def _update_screen(self):\r\n\r\n # Set bg color\r\n self.game.screen.fill(self.game.settings.bg_color)\r\n\r\n # Draw entities to the screen\r\n self.game.player.draw_player()\r\n self.game.enemy.draw_enemy()\r\n self.game.ammo.draw_bullet()\r\n if self.game.stats.game_over:\r\n self.game.game_over_button.draw_button()\r\n self.game.play_again_button.draw_button()\r\n if not self.game.stats.game_active:\r\n self.game.play_button.draw_button()\r\n self.game.stats.draw_score()\r\n self.game.stats.draw_lives()\r\n\r\n # Make the most recently drawn screen visibile.\r\n pygame.display.flip()", "title": "" }, { "docid": "f379fb4beb5dc35d71e8e13ee1d57ba0", "score": "0.7370259", "text": "def render(self, game_window):\n self.whole_station.draw(game_window)", "title": "" }, { "docid": "e2da73e59ce2d8b93f7940cf370076d5", "score": "0.73039114", "text": "def _update_screen(self):\r\n # Redraw the screen during each pass through the loop.\r\n self.screen.fill(self.settings.bg_color)\r\n self.maze.blitme()\r\n for portal in self.portals:\r\n portal.blitme()\r\n self.puck.blitme()\r\n if not self.timer_gameover.count:\r\n for fruit in self.fruit:\r\n fruit.blitme()\r\n for ghost in self.ghosts:\r\n ghost.blitme()\r\n for score in self.score_text:\r\n score.blitme()\r\n self.write_text()\r\n # Make the most recently drawn screen visible.\r\n pygame.display.flip()", "title": "" }, { "docid": "a273118490a805e75582844b340dc267", "score": "0.7301276", "text": "def render(self):\n if self.iscene != -1:\n self.render_events = self.scenes[self.iscene].render(self.screen)\n get_plugin().doupdate()", "title": "" }, { "docid": "4500726cc900822e707faf5048e7c5ec", "score": "0.7294919", "text": "def draw(self):\n self.screen.fill(pygame.Color(63,63,63)) #makes a grey screen\n #iterates through faces in the model to draw them to the screen\n self.screen.blit(self.faceImages[self.model.face.face_index],\n (self.model.face.x, self.model.face.y))\n #iterates through a dictionary that tracks where heads are on each level\n for key, value in self.model.foundation.items():\n self.screen.blit(self.faceImages[value.face_index], (value.x,\n value.y))\n #displays instructions to use spacebar to play\n if self.model.won_game == 0:\n self.screen.blit(self.text_playing, (0,0))\n else: #shows a user the text after winning, to press enter to play again\n self.screen.blit(self.text_won, (0,0))\n self.model.won_game = 0\n self.model.new_game = 1\n pygame.display.update()", "title": "" }, { "docid": "d7d4078a68537dbc64f7c6ef6c2df158", "score": "0.7289812", "text": "def display(self):\n self.game_manager.display()", "title": "" }, { "docid": "cb79f1420054dfae9e7868d342cb9f90", "score": "0.7273013", "text": "def render(self):\r\n\r\n # Draw the background texture\r\n self.screen.blit(self.bg, [0,0])\r\n\r\n # Draw the orbit ellipses for the player and target objects\r\n if self.draw_orbits_toggle:\r\n self.draw_orbits()\r\n\r\n # Draw the game scene (all bodies)\r\n self.draw_scene()\r\n\r\n # Draw the HUD on top of the scene\r\n self.draw_hud()\r\n\r\n # Update window contents to draw changes\r\n pygame.display.update()", "title": "" }, { "docid": "71e481379e866dafc02debb8a70c49df", "score": "0.71944904", "text": "def _draw(self):\n GL.glClear(GL.GL_STENCIL_BUFFER_BIT |\n GL.GL_DEPTH_BUFFER_BIT |\n GL.GL_COLOR_BUFFER_BIT)\n # self._menu.draw()\n self._game.draw()\n sdl2.SDL_GL_SwapWindow(self._window.window)", "title": "" }, { "docid": "59c5ed948f93411c446a81160253e828", "score": "0.7179412", "text": "def _update_screen(self):\n # Redraw the screen during each pass through the loop.\n self.screen.fill(self.settings.bg_color)\n # Add the wizard to the game\n self.wizard.blitme()\n # Add balls to the game\n for ball in self.balls.sprites():\n ball.draw_ball()\n # Add ghosts to the game\n self.ghosts.draw(self.screen)\n # Draw the score information.\n self.sb.show_score()\n # Draw the play button if the game is inactive.\n if not self.stats.game_active:\n self.play_button.draw_button()\n # Make the most recently drawn screen visible.\n pygame.display.flip()", "title": "" }, { "docid": "bcc778413cb39889acab16f9058dbca8", "score": "0.7161751", "text": "def _update_screen(self):\n self.screen.fill(self.bg_color)\n # Draw Stars\n self.stars.draw(self.screen)\n\n # Draw ship\n self.ship.blitme()\n\n # Display score\n self.sb.show_score()\n \n # Draw bullet\n for bullet in self.bullets.sprites():\n bullet.draw_bullet()\n \n # Draw Aliens\n self.aliens.draw(self.screen)\n\n # Draw Button\n if not self.game_stats.game_active:\n self.button.draw_button()\n\n pygame.display.flip()", "title": "" }, { "docid": "8daa8470673eb22a91a463eb640d6ad3", "score": "0.7149643", "text": "def render(self) -> None:\n # Stop accepting player inputs when game is lost\n if not self._lost:\n for x in range(self.game_board.width):\n for y in range(self.game_board.height):\n box = pygame.Rect(x * ICON_SIZE, y * ICON_SIZE, ICON_SIZE,\n ICON_SIZE)\n self.screen.blit(self.game_board.board[x][y].icon, box)\n pygame.display.update()", "title": "" }, { "docid": "7948e1f86ad3406b1098bdffbef855c3", "score": "0.7116076", "text": "def render(self):\n print(\"Currently at %s\" % self._current_state)", "title": "" }, { "docid": "bbcddb2762d9de4307b5531ed230e64e", "score": "0.70962435", "text": "def draw(self):\r\n self.screen.fill(BGCOLOR)\r\n self.draw_grid()\r\n self.all_sprites.draw(self.screen)\r\n pygame.display.flip()", "title": "" }, { "docid": "90bb62d39e6096000de9290a16262bbb", "score": "0.70841396", "text": "def draw(self):\n self.screen.fill(bgcolor=BLACK)\n self.model.mario.draw(self.screen)\n self.model.level.draw(self.screen)\n self.screen.update()", "title": "" }, { "docid": "5a41eb08bf8af6ec8972c02f0ccd2f00", "score": "0.7070715", "text": "def on_draw(self):\n # Clear the window\n self.clear()\n # Draw batch with the frame contents\n self.idsgame_config.render_config.batch.draw()\n # Make this window the current OpenGL rendering context\n self.switch_to()", "title": "" }, { "docid": "a7be9ea73135a78b88de1396ebde3ae8", "score": "0.7048239", "text": "def generate_display(self):\n self.screen.fill((0, 0, 0))\n pygame.display.update()", "title": "" }, { "docid": "84e594c97c70c90baaaaffb6e1a208c2", "score": "0.7044345", "text": "def render(self):\r\n pg.display.set_caption(f\"FPS: {floor(self.clock.get_fps())}\")\r\n self.active_scene.render()\r\n pg.display.flip()", "title": "" }, { "docid": "ac60fa099c9fe41a3f1e316963a90d18", "score": "0.7032672", "text": "def _update_screen(self):\r\n self.screen.fill(self.settings.bg_color)\r\n self.ship.blitme()\r\n for bullet in self.bullets.sprites():\r\n bullet.draw_bullet()\r\n self.target.draw_target()\r\n # Draw the play button if the is inactive.\r\n if not self.stats.game_active:\r\n self.play_button.draw_button()\r\n # Make the most recently drawn screen visible.\r\n pygame.display.flip()", "title": "" }, { "docid": "7a39e3bbb30ba9324758acebf7b43f54", "score": "0.7010744", "text": "def run(self):\n # Make the screen state active.\n self.active = True\n\n while self.active:\n # Check for events.\n self.catch_events()\n\n # Draw the screen.\n self.display()", "title": "" }, { "docid": "471add0ed5c9836add4d33662c1eee78", "score": "0.7000562", "text": "def _update_screen(self):\n self.screen.fill(self.settings.bg_color)\n self.player.blitme()\n for bullet in self.bullets.sprites():\n bullet.draw_bullet()\n self.zombies.draw(self.screen)\n self.sb.show_score()\n\n if not self.stats.game_active: \n self.play_button.draw_button()\n\n pygame.display.flip()", "title": "" }, { "docid": "f3aae30d7102bff47652f34fabeb80ba", "score": "0.6990154", "text": "def update(self):\n self._board.draw(self._win)\n self.draw_valid_moves(self._valid_moves)\n self.draw_game_state(self._win)\n pygame.display.update()", "title": "" }, { "docid": "0b2dc344aa720e89081ec30966826c21", "score": "0.6986687", "text": "def draw(self):\n\n self.screen.blit(self.surface, (0,0))", "title": "" }, { "docid": "6e3b605d8d905d62f3d1ec056ccc90fc", "score": "0.6968394", "text": "def update_screen(self):\r\n #redraw screen during each pass through loop.\r\n self.screen.fill(self.settings.bg_color)\r\n\r\n #draw the play button if game is inactive.\r\n if not self.round_name or self.round_mc or self.round_img:\r\n self.play_button.draw_button()\r\n\r\n #make most recently drawn screen visible.\r\n #pygame.display.flip()\r", "title": "" }, { "docid": "ac9534e6525679b3bb89385cf9898b27", "score": "0.6965635", "text": "def _update_screen(self):\n self.screen.fill(self.settings.bg_color)\n self.ship.blitme()\n for bullet in self.bullets.sprites():\n bullet.draw_bullet()\n self.aliens.draw(self.screen)\n\n # Draw the score information.\n self.sb.show_score() # Call show_score() just before we draw the Play button.\n\n # Draw the play button if the game is inactive.\n if not self.stats.game_active:\n self.play_button.draw_button()\n self.easy_button.draw_button()\n self.medium_button.draw_button()\n self.difficult_button.draw_button()\n\n\n pygame.display.flip() # When we move game elements around, pygame.display.flip() continually updates display to show new positions of game elements and hides old ones -> smooth movement", "title": "" }, { "docid": "499dbeb0ad7ba4ab70a7366e2a704aca", "score": "0.696442", "text": "def _draw_game(self):\n\n self.screen.blit(self.bg, (0, 0))\n\n # FX\n self.asteroids.draw(self.screen)\n\n # Player ship\n for bullet in self.ship.bullets:\n bullet.draw_bullet()\n self.ship.blit_self()\n\n # Alien fleet\n self.alien_fleet.draw(self.screen)\n\n # Overlays\n self.scoreboard.blit_self()", "title": "" }, { "docid": "1576e65554fd4fbfb0e5b69c78dcf4f5", "score": "0.6959858", "text": "def draw(self) -> None:\n game_information = self._game.get_game_information()\n dungeon_size = self._game._dungeon_size\n display = Display(game_information, dungeon_size)\n\n player_pos = self._game.get_player().get_position()\n display.display_game(player_pos)\n\n moves = self._game.get_player().moves_remaining()\n display.display_moves(moves)", "title": "" }, { "docid": "4b574486e0f4cda52ab90e59a9271842", "score": "0.6945787", "text": "def renderplay(self):\r\n self.screen.fill((0,0,0))\r\n for tile in self.model.tileMap.tilesOnScreen:\r\n self.renderTile(tile)\r\n # render fps\r\n fpsText = self.smallfont.render(\r\n \"FPS: \" + str(self.clock.get_fps()),\r\n True, (255,0,255)\r\n )\r\n self.screen.blit(fpsText, (0, 0))\r\n pygame.display.flip()", "title": "" }, { "docid": "2885ad64c4eb15e2c308daa2c88ab766", "score": "0.6929911", "text": "def draw(self):\n pygame.draw.rect(self.screen, colors[self.color], [self.obs_x, self.obs_y, self.obs_len, self.obs_len])", "title": "" }, { "docid": "5c7561d0ef0e5d220ca3a8e902780c16", "score": "0.69155115", "text": "def draw(self):\n\n # Draw every frame\n self.gameDisplay.blit(self.img, self.rect)", "title": "" }, { "docid": "e9853f26856370ab3c23c2d2ca37d1e0", "score": "0.69103837", "text": "def draw(self, screen, clock=None):\n pass", "title": "" }, { "docid": "06f2b60c67c3a6cee81f2c234ba1ce68", "score": "0.68817246", "text": "def _update_screen(self):\n self.screen.fill(self.settings.bg_color)\n self.player_ship.draw()\n self.aliens.draw(self.screen)\n for bullet in self.bullets.sprites():\n bullet.draw_bullet()\n for laser in self.lasers.sprites():\n laser.draw()\n if not self.stats.game_active:\n self.play_button.draw()\n self.scoreboard.draw()\n pygame.display.flip()", "title": "" }, { "docid": "775ca052f57b75be82ab37a45bb26762", "score": "0.6877875", "text": "def draw(self):\n self.win.blit(self.background, (0, 0))\n for pipe in self.pipes:\n pipe.draw(self.win)\n self.base.draw(self.win)\n self.score.draw(self.win, self.width // 2, 40)\n for bird in self.birds:\n bird.draw(self.win)\n pygame.display.update()", "title": "" }, { "docid": "00e2a2e56b1676cb0cc8a704b4a624f1", "score": "0.68774796", "text": "def display(self):\n # Fill background color.\n self.bg.surface.fill(self.bg.color)\n\n # Draw objects.\n self.draw_objects()\n\n # Make the most recently drawn bg.surface visible.\n pygame.display.flip()", "title": "" }, { "docid": "3d7737668dc2244b5188a41fa4a177cf", "score": "0.6874249", "text": "def draw(self):\r\n if not pygame.display.get_init():\r\n return False\r\n self.screen.fill(self.bgcolor)\r\n for o in self.objects:\r\n o.move()\r\n pygame.display.flip()\r\n if self.fps != None:\r\n self.clock.tick(self.fps)\r\n else:\r\n self.clock.tick()", "title": "" }, { "docid": "cfe9a32cf09beb84e7c5358eb56de85f", "score": "0.6872715", "text": "def draw(self):\n\t\t#Fills the screen with white color \n\t\tself.screen.fill(constants.WHITE)\n\n\t\t# Draws all the platforms for the game \n\t\tself.platform_zero.draw(self.screen)\n\t\tself.platform_one.draw(self.screen)\n\t\tself.platform_two.draw(self.screen)\n\t\tself.platform_three.draw(self.screen)\n\t\tself.platform_four.draw(self.screen)\n\n\t\t# Draws all the ladders for the game\n\t\tladder.Ladder.draw(self.screen)\n\t\tladder.BrokenLadder.draw(self.screen)\n\n\t\t# Draws the cage for the game \n\t\tself.cage_one.draw(self.screen)\n\n\t\t# Draws all the fireballs\n\t\tfireball.Fireball.draw(self.screen)\n\n\t\t# Draws the player,donkey and princess \n\t\tself.active_sprite_list.draw(self.screen)\n\n\t\t# Draws the score board\n\t\tself.score_board.draw(self.screen)", "title": "" }, { "docid": "a5653489af39c0b5a8e60548d362d5a9", "score": "0.6867204", "text": "def draw(self):\n self.screen.fill(self.background)\n\n for game_object in self.object_pool:\n game_object.draw(self.screen)", "title": "" }, { "docid": "c9ff5bd2e0a61067c4533366ae867ba0", "score": "0.6865818", "text": "def draw(self):\n ##set up the sky color\n self._setBGColor(*self.game.world.skyColor())\n self._setup_glbasic()\n self._setup_fog(*self.game.world.skyColor(),20.0,60.0)\n self._setup_3d(self.game.player.rotation, self.game.player.position)\n glColor3d(1, 1, 1)\n self.game.world.batch.draw()\n self._drawFocusedBlock()\n self._setup_2d()\n self._draw_label()\n self._draw_reticle()", "title": "" }, { "docid": "0d85b416eeea40aec869d7ddc93391bd", "score": "0.68596125", "text": "def on_render(self):\n self._display_surf.fill((0, 0, 0))\n self.agent.draw(self._display_surf, self._image_surf,self._head_surf)\n self.agent.game.apple.draw(self._display_surf, self._apple_surf)\n pygame.display.flip()", "title": "" }, { "docid": "6d67313e4bef0cb21b48cd6e2a2cdbe8", "score": "0.68431365", "text": "def draw(self):\n\n # Clear the screen.\n self.screen.fill(BLACK)\n\n # Draw the sprites.\n self.bullets.draw(self.screen) # Want bullets below ship.\n self.ships.draw(self.screen)\n self.asteroids.draw(self.screen)\n\n # Show the score display.\n self.screen.blit(self.score_display, (5, 5))", "title": "" }, { "docid": "27b2d7038d52b652d1f73d34c5edb42c", "score": "0.68406636", "text": "def _update_screen(self):\n\t\tself.screen.fill(self.settings.bg_color)\n\t\tself.ship.blitme()\n\t\tself.rect._draw_rect()\n\t\tfor bullet in self.bullets.sprites():\n\t\t\tbullet.draw_bullet()\n\t\tif not self.stats.game_active:\n\t\t\tself.play_button.draw_button()\n\t\t\tpygame.mouse.set_visible(True)\n\t\t\t\n\t\t\n\t\t\t\t\n\t\tpygame.display.flip()", "title": "" }, { "docid": "e20de40be3291ba5c2eb083801f59b73", "score": "0.68334264", "text": "def _update_screen(self):\r\n self.screen.fill(self.settings.bg_color)\r\n self.ship.blitme()\r\n #to draw all fired bullets to the screen, we loop through the sprites in the bullets and call draw_bullet() on each one\r\n for bullet in self.bullets.sprites():\r\n bullet.draw_bullet()\r\n self.aliens.draw(self.screen)\r\n\r\n #here we draw the score info\r\n self.sb.show_score()\r\n\r\n #here, if the game is inactive, we draw the play button\r\n if not self.stats.game_active:\r\n self.play_button.draw_button()\r\n\r\n #now, display the most recently drawn/created screen visible\r\n #for this specific case, it just draws an empty screen on each pass through the loop\r\n #it erases the old screen so that only the new screen is visible\r\n #this function below is responsible for the smooth movement of the game elements because it continually updates the display to show new positions and erases the old positions\r\n pygame.display.flip()", "title": "" }, { "docid": "8d53d0da76e7b24a8139b39dddff8059", "score": "0.68322515", "text": "def draw(self):\n if self.color:\n self.screen.fill(self.color)\n if self.image:\n self.screen.blit(self.image, self.image.get_rect())\n for sp in self.sprites:\n sp.draw(self.screen)", "title": "" }, { "docid": "691a05cdaed0faf0c18d9515242198db", "score": "0.6831777", "text": "def run(self):\n while not self.state.is_game_over():\n player = self.state.get_current_player()\n action = player.get_action(self.state)\n self.state.take_action(action)\n\n if self.gui:\n for event in pygame.event.get():\n if event.type == pygame.QUIT:\n pygame.quit()\n return\n self.render()\n\n ## handling game end\n if self.state.winner is not None:\n message = f\"Player {self.state.winner} wins!\"\n else:\n message = \"Draw!\"\n\n if self.gui:\n font = pygame.font.Font('freesansbold.ttf', 32)\n text = font.render(message, True, (255, 255, 255), (0, 0, 0))\n text_rect = text.get_rect()\n text_rect.center = (320,320)\n while True:\n self.screen.blit(text, text_rect)\n pygame.display.update()\n for event in pygame.event.get():\n if event.type == pygame.MOUSEBUTTONDOWN:\n pygame.quit()\n return\n else:\n print(message)", "title": "" }, { "docid": "3a9f298bdf69b0db5936b0aea4775a82", "score": "0.6831212", "text": "def render(self, screen):\n self.OH.draw_objects(screen)", "title": "" }, { "docid": "705019a2b23cd0118fbba1c8f0cb56f7", "score": "0.6830048", "text": "def draw(self, screen: pygame.display):\n self._game_board_sprite.draw(screen)\n self._chat_box.draw(screen)\n self._active_sprites.draw(screen)\n self._player_hud_sprites.draw(screen)\n\n self._command_notification.draw(screen)\n\n self._permission_prompt.draw(screen)\n self._knockdown_prompt.draw(screen)\n self._victim_lost_prompt.draw(screen)\n self._victim_saved_prompt.draw(screen)\n self._dodge_prompt.draw(screen)\n\n if self._menu and not self._menu.is_closed:\n self._menu.draw(screen)", "title": "" }, { "docid": "6bd068a3eab109903ac94a795b5ba087", "score": "0.68268657", "text": "def draw(self, screen):\n\t\tpass", "title": "" }, { "docid": "2f99c5461ce479e77f909ffcc15a5104", "score": "0.68212295", "text": "def render(self):\n # Refill the screen\n self.screen.fill((22, 29, 31))\n\n # Render the snake\n for i in range(0, self.snake.len):\n self.screen.blit(\n self.snake.img, (\n self.snake.x[i], self.snake.y[i]\n )\n )\n self.screen.blit(\n self.snake.bord1, (\n self.snake.x[i], self.snake.y[i]\n )\n )\n self.screen.blit(\n self.snake.bord2, (\n self.snake.x[i], self.snake.y[i]\n )\n )\n # Render the food\n self.screen.blit(self.food.img, self.food.pos)\n # Render the score\n txt = f'Score: {self.score}'\n t = self.f.render(txt, True, (255, 255, 255))\n self.screen.blit(t, (630, 10))\n\n txt = f'Episode: {self.episode} Survival: {self.step_ctr}'\n t = self.f.render(txt, True, (255, 255, 255))\n self.screen.blit(t, (630, 30))\n\n txt = (f'Epsilon: {round(self.eps, 3)} Explore: {self.explore_ctr}'\n f' Exploit: {self.exploit_ctr}')\n t = self.f.render(txt, True, (255, 255, 255))\n self.screen.blit(t, (630, 50))\n\n # Border\n # Left bar\n # pylint: disable=too-many-function-args\n bord = pygame.Surface((10, H))\n bord.fill((255, 255, 255))\n self.screen.blit(bord, (0, 0))\n # Right bar\n bord = pygame.Surface((10, H))\n bord.fill((255, 255, 255))\n self.screen.blit(bord, (W-10, 0))\n # Up bar\n bord = pygame.Surface((W, 10))\n bord.fill((255, 255, 255))\n self.screen.blit(bord, (0, 0))\n # Down bar\n bord = pygame.Surface((W, 10))\n bord.fill((255, 255, 255))\n self.screen.blit(bord, (0, H-10))\n\n # Plot metrics\n if self.train:\n loss, lsize = self.stat.plotLoss()\n surf = pygame.image.fromstring(loss, lsize, 'RGB')\n\n self.screen.blit(surf, (630, 80))\n\n acc, asize = self.stat.plotAccuracy()\n surf = pygame.image.fromstring(acc, asize, 'RGB')\n\n self.screen.blit(surf, (630, 350))\n \n # Plot sample network\n else:\n for i in range(len(self.state)):\n y = self.state[i]\n color = (72*(1-y)+255*y, 156*(1-y)+255*y, 81*(1-y)+255*y)\n pygame.draw.circle(self.screen, color, (670, 120+40*i), 14)\n\n for i in range(12):\n pygame.draw.circle(\n self.screen, (255, 255, 255), (820, 100+40*i), 14)\n\n for i in range(4):\n if i == self.action:\n y = 0\n else:\n y = 1\n color = (72*(1-y)+255*y, 156*(1-y)+255*y, 81*(1-y)+255*y)\n pygame.draw.circle(self.screen, color, (970, 260+40*i), 14)\n\n for i in range(len(self.state)):\n for j in range(12):\n pygame.draw.line(self.screen, (255, 255, 255),\n (670+15, 120+40*i), (820-15, 100+40*j), 1)\n for k in range(4):\n pygame.draw.line(\n self.screen, (255, 255, 255), \n (820+15, 100+40*j), (970-15, 260+40*k), 1\n )\n\n pygame.display.update()", "title": "" }, { "docid": "a7e5153c4451003003c9c46d0c112225", "score": "0.6813945", "text": "def draw(self):\n\n self.user_inteface.render(self.current_loop.get_sprites())", "title": "" }, { "docid": "b4e78bce594e7fa10f58949e61d94b69", "score": "0.6802593", "text": "def draw(self):\n self.screen.blit(self.game_window, self.rect)\n\n self.bg.draw()\n self.ship.draw()\n for pipe in self.pipes:\n pipe.draw()\n self.hourglass.draw()\n\n text_font = pygame.font.Font(font[\"bradbunr\"], 25)\n\n string1 = \"Score {0} : {1}\".format(self.username, self.ship.score)\n textSurface, textRect = createTextObj(string1, text_font)\n self.game_window.blit(textSurface, textRect)\n\n if not self.solo:\n x_rect_split = self.game_window_width if self.is_left else 0\n pygame.draw.rect(self.game_window, colors[\"black\"], (x_rect_split, 0, 3, self.game_window_height))", "title": "" }, { "docid": "eaf8ddfe64de1a5b3499c52e81e58ea3", "score": "0.6800561", "text": "def render(self):\n self._canvas.update()", "title": "" }, { "docid": "7ba0cd7e46103ddbc028cb9ca49f71d8", "score": "0.6795805", "text": "def render( self):\n\t\tself.board.fill( TEST_BG)\n#\t\tGame._window.draw()\n\t\tself._window.fill( BACKGROUND)\n\n\t\timage = pygame.Surface( (20, 20))\n\t\trect = image.get_rect()\n\n\t\tfor row in range( self.numRows):\n\t\t\tfor col in range( self.numCols):\n\t\t\t\tif self.grids[ self.activeGrid][row][col] == 1:\n\t\t\t\t\tstate = COLOR_ALIVE\n\t\t\t\t\timage.fill( (TEST_BG))\n#\t\t\t\t\tpygame.draw.circle( image, (COLOR_ALIVE), \n#\t\t\t\t\t\t\t\t\t(int( col * 16),\n#\t\t\t\t\t\t\t\t\tint( row * 16)),\n#\t\t\t\t\t\t\t\t\tint( 15)\n#\t\t\t\t\t)\n\t\t\t\telse:\n\t\t\t\t\tstate = COLOR_DEAD\n\t\t\t\t\timage.fill( (TEST_BG))\n#\t\t\t\tpygame.draw.circle( self.image, (COLOR_ALIVE), (10, 10), 11)\n\t\t\t\tpygame.draw.circle( self._window, state, \n\t\t\t\t\t\t\t\t\t(int( col * 16),\n\t\t\t\t\t\t\t\t\tint( row * 16)),\n\t\t\t\t\t\t\t\t\tint( 15)\n\t\t\t\t)\n#\t\tself._window.blit( self.board, (self.pos.x, self.pos.y))\n\t\tself.board.blit( image, (self.gridX*20, self.gridY*20))", "title": "" }, { "docid": "3168044341e12e910e57ad3433b19006", "score": "0.679264", "text": "def on_draw(self):\n\n # clear the screen to begin drawing\n arcade.start_render()\n\n # Draws different things depending on state of game\n\n # Draws instructions at start of game\n if self.current_state == INSTRUCTIONS:\n self.draw_instructions()\n\n # Draws the game\n if self.current_state == GAME_RUNNING:\n # draw each object\n self.rifle.draw()\n for bullet in self.bullets:\n bullet.draw()\n for target in self.targets:\n target.draw()\n for power_up in self.power_ups:\n power_up.draw()\n for one_up in self.one_ups:\n one_up.draw()\n self.draw_score()\n self.draw_lives()\n\n # Completes transitional actions before game over.\n elif self.current_state == TRANSITION:\n self.final_scores.append(self.score)\n self.get_final_value()\n\n # Displays game over screen\n elif self.current_state == GAME_OVER:\n self.draw_final()", "title": "" }, { "docid": "f264f11b57a9550eb08ea4a6e0282e39", "score": "0.67764986", "text": "def render(self):\n for w in self.subwins:\n self.buffer.bitblt(w.get_blit())\n\n self.screen.blit(self.buffer.surf, (0, 0))\n pygame.display.flip()", "title": "" }, { "docid": "687bda9b70784b8d525f11b08cb5a41a", "score": "0.676757", "text": "def draw(self):\n self.screen.fill(pygame.Color('white'))\n\n for j in self.model.chessboard:\n for r in j:\n pygame.draw.rect(self.screen, pygame.Color('black'), r, 1)\n self.color_even()\n\n pygame.display.update()", "title": "" }, { "docid": "78d6df7812d38a8c06354d7fff02873f", "score": "0.67610157", "text": "def render(self):\n time.sleep(0.1)\n self.window.update()", "title": "" }, { "docid": "1cff2f6240a0aab4f988df83bb1440e4", "score": "0.67500454", "text": "def draw(self):\n # If front_color defined, draw front bar as percentage of back bar\n if self._front_color and self.background:\n if self.stat > 0:\n stat_width = int(self._width * self.stat // self.start_stat)\n else:\n stat_width = 0\n pygame.draw.rect(\n gameDisplay, self._background_color,\n (self.x, self.y, self._width, self._height))\n pygame.draw.rect(\n gameDisplay, self._front_color,\n (self.x, self.y, stat_width, self._height))\n # If no background color\n elif self.background and not self._front_color:\n pygame.draw.rect(\n gameDisplay, self._background_color,\n (self.x, self.y, self._width, self._height))\n # Write text\n self.set_text()", "title": "" }, { "docid": "b7f184786a2859cece1eb943f5babc09", "score": "0.67475635", "text": "def render(self):\n self._display_surf.fill((32, 32, 32))\n self.draw_surface()\n self.draw_item()", "title": "" }, { "docid": "5ddc657100822cf072a3866fbf929867", "score": "0.6737831", "text": "def draw(self, interpolate):\n if not self.state_machine.state.done:\n self.state_machine.draw(self.screen, interpolate)\n pg.display.flip()", "title": "" }, { "docid": "3cebf1a6b301369f94301940c0603289", "score": "0.67306656", "text": "def Draw(self, screen):\n self.ui.Draw(screen)", "title": "" }, { "docid": "da797aa90703bf4d9ba1e281f6385727", "score": "0.672446", "text": "def render(self,screen):\r\n self.Tela.render(screen)\r\n pygame.display.flip()", "title": "" }, { "docid": "42facab9a8cec1d66332a893b961e467", "score": "0.67244357", "text": "def draw(self):\n pygame.draw.rect(self.screen, colors[self.color], [self.start, 20, self.bars, 10])", "title": "" }, { "docid": "4d48ee7ff6e9821f8ceac22fb215ee2b", "score": "0.6719924", "text": "def draw(self):\n self.screen.fill(pygame.Color(255,255,255))\n for wall in self.model.myWalls:\n self.drawWall(wall)\n self.drawRobot(self.model.robot)\n \n pygame.display.update()", "title": "" }, { "docid": "c3908faaa0ec381232ae9bfcc19b6c7a", "score": "0.6718701", "text": "def updateState(self):\n pygame.event.pump()", "title": "" }, { "docid": "60c9bbd77acabc61da64482375589107", "score": "0.6715662", "text": "def draw(self):\n if not self.screen:\n return\n\n pygame.draw.rect(self.screen, self.color,\n (self.pos[0], self.pos[1], self.size[0], self.size[1]), 0)\n\n for child in self.children:\n if child.is_active:\n child.draw()", "title": "" }, { "docid": "3ba3fcc3483726b6c4c1042ab10af1a5", "score": "0.66876274", "text": "def _update_display(self):\r\n\r\n # Fill the background with color.\r\n self.screen.fill(self.settings.bgcolor)\r\n \r\n # Display Bullets.\r\n for bullet in self.bullets.sprites():\r\n bullet.display_bullets()\r\n \r\n # Display Ship and target.\r\n self.ship.display_ship()\r\n self.target.display_target()\r\n \r\n self._display_menus_buttons()\r\n \r\n # if game not active display play button.\r\n if not self.stats.game_active:\r\n self.play_button.display_button()\r\n \r\n # Continiously update the screen.\r\n pygame.display.flip()", "title": "" }, { "docid": "65c55d17bd84700564665bb4ad17ed4f", "score": "0.6687364", "text": "def update(self):\n if self.limited_update:\n self.window.setActive(True)\n self.gameEngine.graphicsEngine.render_frame()\n self.window.setActive(False)\n else:\n pass", "title": "" }, { "docid": "075f985f604df009b8bd96822ee22be1", "score": "0.66791236", "text": "def render(self):\n if self.viewer:\n # check for window closed\n if self.gym.query_viewer_has_closed(self.viewer):\n sys.exit()\n\n # check for keyboard events\n for evt in self.gym.query_viewer_action_events(self.viewer):\n if evt.action == \"QUIT\" and evt.value > 0:\n sys.exit()\n elif evt.action == \"toggle_viewer_sync\" and evt.value > 0:\n self.enable_viewer_sync = not self.enable_viewer_sync\n\n # fetch results\n if self.device != 'cpu':\n self.gym.fetch_results(self.sim, True)\n\n # step graphics\n if self.enable_viewer_sync:\n self.gym.step_graphics(self.sim)\n self.gym.draw_viewer(self.viewer, self.sim, True)\n\n # Wait for dt to elapse in real time.\n # This synchronizes the physics simulation with the rendering rate.\n self.gym.sync_frame_time(self.sim)\n\n else:\n self.gym.poll_viewer_events(self.viewer)", "title": "" }, { "docid": "3c3b55ef55c914ff09b2a34594b9b000", "score": "0.6664911", "text": "def draw(self):\n self.screen.blit(self.image, self.rect)", "title": "" }, { "docid": "3c3b55ef55c914ff09b2a34594b9b000", "score": "0.6664911", "text": "def draw(self):\n self.screen.blit(self.image, self.rect)", "title": "" }, { "docid": "cbb4a8de4a200e755db1f25a049434a4", "score": "0.66531336", "text": "def draw(self):\n self.screen.fill(pygame.Color(0,0,0))\n\n pygame.draw.rect(self.screen,\n pygame.Color(255, 0, 0),\n pygame.Rect(self.model.paddle.x,\n self.model.paddle.y,\n self.model.paddle.width,\n self.model.paddle.height))\n pygame.draw.rect(self.screen,\n pygame.Color(255, 255, 255),\n pygame.Rect(self.model.paddle2.x,\n self.model.paddle2.y,\n self.model.paddle2.width,\n self.model.paddle2.height))\n pygame.draw.rect(self.screen,\n pygame.Color(255, 255, 255),\n pygame.Rect(self.model.puck.height,\n self.model.puck.w,\n self.model.puck.x,\n self.model.puck.y))\n\n pygame.display.update()", "title": "" }, { "docid": "27cef9dda69c94d9b991c13b06aa732b", "score": "0.6630577", "text": "def update(self):\n self.screen.update()", "title": "" }, { "docid": "3eb75758e7b3f5336821164453fe8061", "score": "0.6626294", "text": "def on_draw(self):\n self.game_view.on_draw()\n\n arcade.draw_lrtb_rectangle_filled(\n left=0,\n right=SCREEN_WIDTH,\n top=SCREEN_HEIGHT,\n bottom=0,\n color=self.fill_color,\n )\n\n current_score = f\"Current score: {self.score:9}\"\n arcade.draw_text(\n current_score,\n SCREEN_WIDTH / 2,\n SCREEN_HEIGHT / 2 + 250,\n arcade.color.BLACK,\n font_size=20,\n anchor_x=\"center\",\n )\n arcade.draw_text(\n \"Press P to resume\",\n SCREEN_WIDTH / 2,\n SCREEN_HEIGHT / 2 + 200,\n arcade.color.BLACK,\n font_size=20,\n anchor_x=\"center\",\n )\n arcade.draw_text(\n \"Press ESC to quit\",\n SCREEN_WIDTH / 2,\n SCREEN_HEIGHT / 2 + 150,\n arcade.color.BLACK,\n font_size=20,\n anchor_x=\"center\",\n )\n\n self.manager.draw()", "title": "" }, { "docid": "431fc9267882e358b61bca0ad925ee9b", "score": "0.6621476", "text": "def refresh(self):\n self.display.blit(self.background, (0,0))\n if self.foreground:\n self.display.blit(self.foreground, (0,0))\n if pygame.display.get_init(): pygame.display.flip()", "title": "" }, { "docid": "8428e93bfc2f58dcde217191b95c831a", "score": "0.662143", "text": "def draw_screen(self, game_state):\n # Get reference to the Actors\n ball = game_state.data['actors']['ball']\n paddle = game_state.data['actors']['player1']\n paddle2 = game_state.data['actors']['player2']\n\n # Convert PIL image strings to pygame images\n ball_image = pygame.image.fromstring(game_state.data['assets'][ball.image], (ball.rect.width, ball.rect.height), 'RGBA')\n paddle_image = pygame.image.fromstring(game_state.data['assets'][paddle.image], (paddle.rect.width, paddle.rect.height), 'RGBA')\n paddle2_image = pygame.image.fromstring(game_state.data['assets'][paddle2.image], (paddle.rect.width, paddle.rect.height), 'RGBA')\n\n # Draw the screen.\n self.screen.fill([0, 0, 0])\n self.screen.blit(ball_image, ball.rect)\n self.screen.blit(paddle_image, paddle.rect)\n self.screen.blit(paddle2_image, paddle2.rect)\n\n pygame.display.update()\n #print 'Drawing screen'", "title": "" }, { "docid": "4d8f949c66cd255e39f3c88c74208c65", "score": "0.6610244", "text": "def draw(self):\n pygame.draw.rect(self.screen, colors['WHITE'], [self.play_x, self.play_y, self.play_len, self.play_len])", "title": "" }, { "docid": "8b7eb8423e19f44a797cb531540cafbf", "score": "0.6608904", "text": "def render(self):\n self.screen.blit(self.img, (self.x, self.y))", "title": "" }, { "docid": "7eeee96c419a34ac571663e2f9d89721", "score": "0.66013324", "text": "def draw(self):\n # Clear the effects surface.\n self.fx_surface.fill(pygame.Color(0, 0, 0, 0))\n\n # Draw the map\n self.map.draw(self.game_surface)\n\n # Draw the enemies\n self.enemy_mgr.draw(self.game_surface)\n\n # Draw the towers\n self.tower_mgr.draw(self.game_surface, self.fx_surface)\n\n # Draw effects\n self.fx_mgr.draw(self.game_surface, self.fx_surface, self.ui_surface)\n\n # Clear the UI surface to transparent and then draw the UI\n self.ui_surface.fill(pygame.Color(0, 0, 0, 0))\n self.ui.draw(self.ui_surface, self.game_surface, self.fx_surface)\n\n # Blit the effects surface onto the game surface.\n self.game_surface.blit(self.fx_surface, (0, 0))\n\n # Now, we draw the game and UI surfaces onto the screen.\n self.screen_surface.blit(pygame.transform.scale(self.game_surface, self.camera.getSurfaceSize()), self.camera.getSurfacePos())\n\n self.screen_surface.blit(self.ui_surface, (0, 0))", "title": "" }, { "docid": "b72093dff34a8873ab04afb0be2a5588", "score": "0.6598423", "text": "def on_draw(self):\n # clear the screen to begin drawing\n arcade.start_render()\n\n if self.current_state == QUIT:\n raise EndGame()\n\n elif self.quit:\n self.draw_quit()\n self.game_over_player.pause()\n\n else:\n if self.current_state == INSTRUCTIONS:\n self.draw_instructions()\n\n if self.current_state == GAME_RUNNING:\n arcade.draw_texture_rectangle(self.screen_width // 2, self.screen_height // 2,\n self.screen_width, self.screen_height, self.background)\n\n if self.play_sound:\n self.game_over_player.pause()\n self.media_player.seek(0)\n self.media_player.play()\n self.play_sound = False\n\n if self.play_power_up:\n if not self.power_up[0] and not self.power_up[1]:\n self.power_up_player.pause()\n self.media_player.play()\n self.play_power_up = False\n\n if not self.play_power_up:\n if self.power_up[0] or self.power_up[1]:\n self.media_player.pause()\n self.power_up_player.seek(0)\n self.power_up_player.play()\n self.play_power_up = True\n\n if not self.hide_hud:\n self.draw_score()\n self.draw_health()\n self.draw_speed()\n\n for player in self.ship:\n if self.ship[player].alive:\n self.ship[player].draw()\n\n for bullet in self.bullets:\n bullet.draw()\n\n for asteroid in self.asteroids:\n asteroid.draw()\n\n for enemy in self.enemies:\n enemy.draw()\n\n for power in self.power_ups:\n power.draw()\n\n if self.pause:\n if not self.power_up[0] and not self.power_up[1]:\n self.media_player.pause()\n else:\n self.power_up_player.pause()\n self.draw_pause()\n\n elif self.current_state == TRANSITION:\n self.final_score[P1] = self.score[P1]\n self.final_score[P2] = self.score[P2]\n self.get_final_value()\n self.play_game_over = True\n\n elif self.current_state == GAME_OVER:\n if self.play_game_over:\n self.game_over_player.seek(0)\n self.media_player.pause()\n self.game_over_player.play()\n self.play_game_over = False\n self.draw_final()\n\n if self.mute:\n self.draw_mute()", "title": "" }, { "docid": "dc0989774ffb37aeeed717137cb5d66f", "score": "0.65866214", "text": "def update(self, world):\n self.draw(world)\n if self.render:\n pygame.display.update()", "title": "" }, { "docid": "ecf43b40d4ea4b14d918d279e5e0e1a6", "score": "0.65738285", "text": "def draw(self):\n self.screen.blit(Assets.background, (self.draw_offset[0], self.draw_offset[1]))\n\n self.screen.blit(Assets.border, (self.draw_offset[0], self.draw_offset[1]))\n self.screen.blit(Assets.border, (self.draw_offset[0] + LEVEL_WIDTH - PLAYFIELD_PADDING[0],\n self.draw_offset[1]))\n if self.blocks_surface_dirty:\n self.blocks_surface = Surface((LEVEL_WIDTH, LEVEL_HEIGHT), SRCALPHA, 32)\n self.blocks_surface = self.blocks_surface.convert_alpha()\n self.blocks_surface_dirty = False\n for row in self.blocks:\n for block in row:\n if block is not None and not block.dead:\n block.draw(self.blocks_surface)\n self.screen.blit(self.blocks_surface, self.draw_offset)\n self.paddle.draw(self.screen, self.draw_offset)\n\n if not self.ball.dead:\n self.ball.draw(self.screen, self.draw_offset)\n\n # draw entities\n for entity in self.entities:\n if not entity.dead:\n entity.draw(self.screen, self.draw_offset)\n\n # draw upper bar\n draw.rect(self.screen, (0, 0, 0), (self.draw_offset[0] + PLAYFIELD_PADDING[0], self.draw_offset[1],\n LEVEL_WIDTH - PLAYFIELD_PADDING[0] * 2, PLAYFIELD_PADDING[1]))\n\n self.screen.blit(self.score_label,\n (self.draw_offset[0] + PLAYFIELD_PADDING[0] + 10, self.draw_offset[1]))\n self.screen.blit(self.lives_label,\n (self.draw_offset[0] + PLAYFIELD_PADDING[0] + 150, self.draw_offset[1]))\n self.screen.blit(self.level_label,\n (self.draw_offset[0] + LEVEL_WIDTH - 100, self.draw_offset[1]))", "title": "" }, { "docid": "d8bc0ef71abccb09f71e4c282197b180", "score": "0.6562551", "text": "def draw_game(self):\n for y in range(self.settings.block_num):\n for x in range(self.settings.block_num):\n # Make a rectangle of given size at given position\n rect = pygame.Rect(x*(self.settings.block_side + 1),\n y*(self.settings.block_side + 1),\n self.settings.block_side,\n self.settings.block_side)\n # Draw it with the appropriate color\n if (self._get_status(x, y)):\n pygame.draw.rect(self.screen,\n self.settings.alive_color, rect)\n else:\n pygame.draw.rect(self.screen,\n self.settings.dead_color, rect)", "title": "" }, { "docid": "1914075809d22f12a580337c23137a79", "score": "0.65533817", "text": "def draw(self):\n self.sprite.draw()", "title": "" }, { "docid": "a08bd6ce14ffbeae4163a823a81b4257", "score": "0.6550216", "text": "def render(self, mode='human', close=False):\n if visualize:\n print(\"current state: {}\".format(self.state))\n print(\"Action: {}\".format(self.action_tuples[self.action]))\n print(\"{} = d_reward: {} + r_reward: {}\".format(self.reward, self.d_reward, self.r_reward))", "title": "" }, { "docid": "2da74b7f556c4c766b2b3e2703877d7a", "score": "0.65500206", "text": "def draw(self, screen):\n if self.visible:\n screen.blit(self.image, self.rect)", "title": "" }, { "docid": "d0b4090b426192b105bfcdb83cf7c21c", "score": "0.65375537", "text": "def draw(self):\n\n # draw the grid \n self.draw_grid()\n\n # draw the background grass\n self.screenimage = pygame.image.load(\"src/img/grass2.png\")\n self.rect = self.screenimage.get_rect()\n screen.blit(self.screenimage, self.rect)\n \n\n # draw sprites\n self.all_sprites.draw(self.screen)\n \n # dialogue\n self.Isabelle_talk()\n \n # fruit collection function\n self.collect_fruit()\n\n # flip everything once at the very end\n pygame.display.flip()", "title": "" }, { "docid": "02b36eb3139d66b9bf5d2c981c0d7aab", "score": "0.6524062", "text": "def draw(self):\r\n self.screen.blit(self.bckimg, self.rect)\r\n self.wall_list.draw(self.screen)\r\n self.door_list.draw(self.screen)\r\n self.coin_list.draw(self.screen)", "title": "" }, { "docid": "44b933c2f6e8f51e06d7c67b90f6e214", "score": "0.6523249", "text": "def render(self, screen):\n screen.fill((255, 255, 255))\n screen.blit(self.resources['background'], (0, 0))\n w, h = pygame.display.get_surface().get_size()\n self.render_centered(screen, w, h, self.resources['title'])\n if self.wait_tick <= 0:\n img_w, img_h = self.resources['anykey'].get_rect().size\n screen.blit(self.resources['anykey'],\n ((w - img_w) // 2, (h - img_h)))\n return None", "title": "" }, { "docid": "179504d0c66b964cc55e20f70bdf8150", "score": "0.65179896", "text": "def render(self):\n label = self.font.render(str(self.value), 1, (255, 255, 255))\n lose = self.font.render('YOU LOSE', 1, (255, 0, 0))\n self.screen.blit(label, (self.pos[0], self.pos[1]))\n if(self.dead):\n self.screen.blit(lose, (500, 500))", "title": "" }, { "docid": "2e35945b4c5aa67e9d0d668be2bf1cac", "score": "0.6512101", "text": "def draw(self):\n text = self.font.render(self.prompt+self._value, 1, self.fgcolor)\n self.screen.blit(text, (self.x, self.y))", "title": "" } ]
904764c2259b3ba951b292527cd2236d
Returns an sdf object with center in the world frame of reference
[ { "docid": "a3e19fbe645b06d7e0165558833d8e2f", "score": "0.0", "text": "def transform_to_world(self):\n return self.transform(self.pose_, scale=self.scale_)", "title": "" } ]
[ { "docid": "4eca910060e666cb12d863fec6f2d3cc", "score": "0.6257024", "text": "def center_world(self):\n return self.transform_pt_grid_to_obj(self.center_)", "title": "" }, { "docid": "d705a42fbaf32da90b68f8f2ddea5cc9", "score": "0.5883293", "text": "def getReferencePoint(self):\n return self._center", "title": "" }, { "docid": "61f31fa53a3eaa78837727fbb6682b37", "score": "0.5856669", "text": "def world(self):\n return self._snapshot.world", "title": "" }, { "docid": "1feb6dcc6e2cacdd2e10bb2b97824256", "score": "0.5803188", "text": "def center_geo(self) -> Vertex:\n v = Vertex()\n\n x = self.get_axis_extremity(x=False).x\n y = self.get_axis_extremity(y=False).y\n z = self.get_axis_extremity(z=False).z\n\n size = self.size\n size.divide(2)\n\n v.set(x, y, z)\n v.move(*size.export())\n\n return v", "title": "" }, { "docid": "716dab9f808d0982528e858a7092ef7e", "score": "0.5739398", "text": "def center_skydir(self):\n if self.region is None:\n return SkyCoord(np.nan * u.deg, np.nan * u.deg)\n\n return self._rectangle_bbox.center", "title": "" }, { "docid": "a02bc5b316f89af746ba9ebb1541e797", "score": "0.57126266", "text": "def getCenter(self):\n return self.getCenter()", "title": "" }, { "docid": "783fb805b70a8956dee3f663f8e78246", "score": "0.57057196", "text": "def center(self):\n return self.center_", "title": "" }, { "docid": "a41051e1e30c897359ec4e470137c440", "score": "0.56798625", "text": "def center(self):\n return {'x': wcs.get_center(self.shape[1], self.scale['x'],\n self.reference_pixel['x'],\n self.reference_coordinate['x']),\n 'y': wcs.get_center(self.shape[2], self.scale['y'],\n self.reference_pixel['y'],\n self.reference_coordinate['y']),}", "title": "" }, { "docid": "4d59fcb77d7e917fa60336930c6221be", "score": "0.5663897", "text": "def getCenter(self):\n return self.center", "title": "" }, { "docid": "ab4ba13bc2f00b7596b96b2e5cd17ebe", "score": "0.56637394", "text": "def PointToWorld(self):\r\n pass", "title": "" }, { "docid": "9355fe2c1e137243af533e7b5b21abac", "score": "0.5633549", "text": "def _getCenter(self):\n\t\traise NotImplementedError", "title": "" }, { "docid": "aafd4333df7982bad393b3d34f095c9f", "score": "0.56292707", "text": "def gs_object_to_frame(name, obj, is_root=False):\n # Camera has a different frame in GazeSense\n droll = 180\n dpitch = 0\n dyaw = 0\n if name.startswith(GS_CAMERA):\n droll = 0\n dpitch = 0\n dyaw = 180\n origin = np.array([0.0, 0.0, 0.0])\n else:\n # We need to know the origin (top left corner of screen) of\n # the frame\n w, h = obj[\"Size\"][:2]\n origin = np.array([-w / 2, h / 2, 0, 1]).reshape(4, 1)\n\n rotation = tf.transformations.euler_matrix(\n math.radians(obj[\"Rotation\"][0]),\n math.radians(obj[\"Rotation\"][1]),\n math.radians(obj[\"Rotation\"][2]),\n axes=\"sxyz\",\n )\n origin = np.matmul(rotation, origin)\n\n t = TransformStamped()\n\n t.header.stamp = rospy.Time.now()\n\n t.transform.translation.x = origin[0] + obj[\"Center\"][0]\n t.transform.translation.y = origin[1] + obj[\"Center\"][1]\n t.transform.translation.z = origin[2] + obj[\"Center\"][2]\n\n euler = [math.radians(a) for a in obj[\"Rotation\"]]\n quat = tf.transformations.quaternion_from_euler(*euler)\n\n rot = tf.transformations.quaternion_from_euler(math.radians(droll), math.radians(dpitch), math.radians(dyaw),)\n quat = tf.transformations.quaternion_multiply(quat, rot)\n\n if is_root:\n # We know the transformation world -> object, but we need\n # object -> world, because we want the object at the root of\n # the TF tree\n t.header.frame_id = name\n t.child_frame_id = GS_WORLD\n hom = tf.transformations.quaternion_matrix(quat)\n quat = tf.transformations.quaternion_from_matrix(hom.transpose())\n T = np.array([t.transform.translation.x, t.transform.translation.y, t.transform.translation.z,]).reshape(-1, 1)\n d = -np.matmul(hom[0:3, 0:3], T).squeeze()\n t.transform.translation.x = d[0]\n t.transform.translation.y = d[1]\n t.transform.translation.z = d[2]\n else:\n t.header.frame_id = GS_WORLD\n t.child_frame_id = name\n\n t.transform.rotation.x = quat[0]\n t.transform.rotation.y = quat[1]\n t.transform.rotation.z = quat[2]\n t.transform.rotation.w = quat[3]\n return t", "title": "" }, { "docid": "65edf11463cfa3690a9c5f87b4a5a765", "score": "0.56235886", "text": "def testCenter(self):\n circle = Workplane(\"XY\").circle(10.0)\n cylinder = circle.extrude(10.0)\n\n self.assertTupleAlmostEquals((0.0, 0.0, 0.0), circle.val().Center().toTuple(), 3)\n self.assertTupleAlmostEquals((0.0, 0.0, 5.0), cylinder.val().Center().toTuple(), 3)", "title": "" }, { "docid": "2978a873b7282c96be3fbf0ef5ab4011", "score": "0.5605248", "text": "def position(sphere):\n return sphere[:3].copy()", "title": "" }, { "docid": "e282a1d5f51d0f5e50d837fde162979c", "score": "0.55976045", "text": "def to_camera(self):\n if self.world_cam is None:\n return self.to_xyzwhlr()[[1, 2, 0, 4, 5, 3, 6]]\n\n bbox = np.zeros((7,))\n # In camera space, we define center as center of the bottom face of bounding box.\n bbox[0:3] = self.center - [0, 0, self.size[1] / 2]\n # Transform center to camera frame of reference.\n bbox[0:3] = (np.array([*bbox[0:3], 1.0]) @ self.world_cam)[:3]\n bbox[3:6] = [self.size[1], self.size[0], self.size[2]] # h, w, l\n bbox[6] = self.yaw\n return bbox", "title": "" }, { "docid": "d6da15f11e22022cc7fbc0d45d4584ff", "score": "0.55956894", "text": "def get_centered_molecule(self):\n center = self.center_of_mass\n new_coords = np.array(self.cart_coords) - center\n return self.__class__(self.species_and_occu, new_coords,\n charge=self._charge,\n spin_multiplicity=self._spin_multiplicity,\n site_properties=self.site_properties)", "title": "" }, { "docid": "35e24098a96745d09279d454397a6b1b", "score": "0.55615866", "text": "def center(self):\n cl = self.coordsLocal\n return (cl[0] + cl[2] * 0.5, cl[1] + cl[3] * 0.5)", "title": "" }, { "docid": "87da15d544dc88b9e62e6562de769235", "score": "0.55460185", "text": "def getQuadCenter(self):\n #return self._quadcenter[:]\n pass", "title": "" }, { "docid": "bbc6565263c18ad90eac45fc1b562a71", "score": "0.55421513", "text": "def getReferencePoint(self):\n return self._face.getCenter()", "title": "" }, { "docid": "828325c0183b27f4021412ea5b8d2473", "score": "0.5532688", "text": "def updateReference(self):\n \n # The node is the center of a local bench mark.\n relative_position = Function.relativePosition(self.position, globalvars.me.position)\n self.local_position = [ long(relative_position[0] - globalvars.me.position[0]), long(relative_position[1] - globalvars.me.position[1]) ]", "title": "" }, { "docid": "2aebca70dd297a7c4386e289affa460a", "score": "0.55307776", "text": "def center(self):\n return self.origin + self.size / 2", "title": "" }, { "docid": "3c33f945dfe8895a43218a74bb126526", "score": "0.5507521", "text": "def center(self):\r\n if self._center is None:\r\n # we can use a cache on our accessor objects, because accessors\r\n # themselves are cached on instances that access them.\r\n lat, lon = self.latlon\r\n self._center = (float(np.nanmean(lon)), float(np.nanmean(lat)))\r\n return self._center", "title": "" }, { "docid": "a4e247c16ef63ea9c4f0089816412fae", "score": "0.54962957", "text": "def getSphereCentre(self):\n if self.ref_atoms is None:\n self.raiseError(\"No reference atoms defined, cannot get sphere coordinates...\")\n\n # Calculate the mean coordinate\n self.sphere_centre = np.zeros(3) * unit.nanometers\n for i, atom in enumerate(self.ref_atoms):\n # Need to add on a correction in case the atoms get separated\n correction = np.zeros(3) * unit.nanometers\n if i != 0:\n # Vector from the first reference atom\n vec = self.positions[self.ref_atoms[0]] - self.positions[atom]\n # Correct for PBCs\n for j in range(3):\n if vec[j] > 0.5 * self.simulation_box[j]:\n correction[j] = self.simulation_box[j]\n elif vec[j] < -0.5 * self.simulation_box[j]:\n correction[j] = -self.simulation_box[j]\n\n # Add vector and correction onto the running sum\n self.sphere_centre += self.positions[atom] + correction\n\n # Calculate the average coordinate\n self.sphere_centre /= len(self.ref_atoms)\n\n return None", "title": "" }, { "docid": "1964b0428a7002c6d36dcc50623ad04d", "score": "0.5487019", "text": "def getWorldCoordinates(self):\r\n skel=dict()\r\n for key in self.joins.keys():\r\n skel[key]=self.joins[key][0]\r\n return skel", "title": "" }, { "docid": "7ae1454d4a4aef229c23180dcb6b2daa", "score": "0.546034", "text": "def frame(self):\n sc = self.table\n components = tuple(getattr(sc.data, comp) for comp in sc.data.components)\n ref_frame = sc.frame.replicate_without_data()\n units = list(c.unit for c in components)\n\n # TODO: Currently this limits you to 2D due to gwcs#120\n return cf.CelestialFrame(reference_frame=ref_frame,\n unit=units,\n axes_names=self.names,\n axis_physical_types=self.physical_types,\n name=\"CelestialFrame\")", "title": "" }, { "docid": "d619e525fe1e798d0383acb9d8057b7e", "score": "0.54536796", "text": "def get_coord(self, mode=\"center\", frame=None, sparse=False, axis_name=None):\n if mode == \"edges\" and axis_name is None:\n raise ValueError(\"Mode 'edges' requires axis name\")\n\n coords = self.axes.get_coord(mode=mode, axis_name=axis_name)\n coords[\"skycoord\"] = self.center_skydir.reshape((1, 1))\n\n if frame is None:\n frame = self.frame\n\n coords = MapCoord.create(coords, frame=self.frame).to_frame(frame)\n\n if not sparse:\n coords = coords.broadcasted\n\n return coords", "title": "" }, { "docid": "ac0f4e7036ab88b35fdd5fe4f9cabffd", "score": "0.5448592", "text": "def compute_center():\n return _center", "title": "" }, { "docid": "8c86d8b6d0119cb4ec9f8b11577d2b5d", "score": "0.5440145", "text": "def getWorldCoordinates(self):\n skel=dict()\n for key in self.joins.keys():\n skel[key]=self.joins[key][0]\n return skel", "title": "" }, { "docid": "676596495194f3249ba010efa1b72cb9", "score": "0.54218245", "text": "def spatialReference(self):\n if self._spatialReference is None:\n self.init()\n return self._spatialReference", "title": "" }, { "docid": "ed2fb51b3993a4c7b4338fc85ee75180", "score": "0.5404585", "text": "def _create_reference_geometry(self, region, center):\n frame = region.center.frame.name\n\n # width is the full width of an image (not the radius)\n width = 4 * region.center.separation(center) + Angle(\"0.3 deg\")\n\n return WcsGeom.create(\n skydir=center, binsz=self.binsz, width=width, frame=frame, proj=\"TAN\"\n )", "title": "" }, { "docid": "b7ab3e5362e8b6614847f9a611aa7e91", "score": "0.53850234", "text": "def default() -> \"WorldCRS\":\n return WorldCRS.EQUIRECTANGULAR", "title": "" }, { "docid": "4d604d7ab24fc8ba05af2d6440dad807", "score": "0.53696775", "text": "def center(self, srs=None):\n x,y = (self.xMax+self.xMin)/2, (self.yMax+self.yMin)/2\n if not srs is None:\n srs=SRS.loadSRS(srs)\n if not srs.IsSame(self.srs):\n xy = SRS.xyTransform(x, y, fromSRS=self.srs, toSRS=srs, outputFormat=\"xy\")\n\n x = xy.x\n y = xy.y\n return x,y", "title": "" }, { "docid": "ffe07647db3c65a4ce8a16cd14cc5e65", "score": "0.5358575", "text": "def create_astropy_skycoord(py_obj, h_group, name, **kwargs):\n\n lat = py_obj.data.lat.value\n lon = py_obj.data.lon.value\n dd = np.stack((lon, lat), axis=-1)\n\n d = h_group.create_dataset(name, data=dd, dtype='float64', **kwargs)\n lon_unit = str(py_obj.data.lon.unit).encode('ascii')\n lat_unit = str(py_obj.data.lat.unit).encode('ascii')\n d.attrs['lon_unit'] = lon_unit\n d.attrs['lat_unit'] = lat_unit\n return(d)", "title": "" }, { "docid": "0400f5127447f7c0c677db9e29ec2c7a", "score": "0.5353047", "text": "def _get_origin(self):\n\t\txyz = (0,0,0)\n\t\trpy = None\n\n\t\tif self.inertial:\n\t\t\txyz = self.inertial.origin.xyz\n\t\t\trpy = self.inertial.origin.rpy\n\t\telif self.collision:\n\t\t\txyz = self.collision.origin.xyz\n\t\t\trpy = self.collision.origin.rpy\n\t\telif self.visual:\n\t\t\txyz = self.visual.origin.xyz\n\t\t\trpy = self.visual.origin.rpy\n\n\t\tself.xyz = Vector(xyz)\n\t\tif rpy:\n\t\t\tself.rot = Euler(rpy, 'XYZ').to_quaternion()\n\t\telse:\n\t\t\tself.rot = Euler((0,0,0)).to_quaternion()", "title": "" }, { "docid": "9da453d7a1d5fc13bcb6373213cc1ee3", "score": "0.5352673", "text": "def make_sphere(self):\n self.render()", "title": "" }, { "docid": "9968742bc9bf7c750527230aad979784", "score": "0.53507465", "text": "def getCenter(self):\n return (self.bb_x+self.w/2,self.bb_y+self.h/2)", "title": "" }, { "docid": "2dbe0ac268a4ad983c931c0486211faa", "score": "0.5344638", "text": "def center(self):", "title": "" }, { "docid": "4236547d386bb57ec85f5fd09b522429", "score": "0.53443176", "text": "def __init__(self, centralBody):\r\n self.canvas = canvas(height=450, width=1000)\r\n self.objects = []\r\n self.centralBody = centralBody\r\n centralBody.momentum = vector(0,0,0)\r\n centralBody.pos = vector(0,0,0)", "title": "" }, { "docid": "f8095ccfb9a50a9ab674b3f29dd0d31a", "score": "0.5343932", "text": "def screen_pos_center(self):\n return camera.pos(self.pos_center())", "title": "" }, { "docid": "69ad535a8679d78e44f334c2f1c12f03", "score": "0.5334397", "text": "def get_orbital_ref_frame(self):\n self.orbital_ref_frame = self.vessel.reference_frame", "title": "" }, { "docid": "1a367d2ab05c35ae2277c07d80030faf", "score": "0.5329177", "text": "def getCenter(self):\n # Recursive call until Pattern type is\n # reached. Both types implement the\n #'getCenter()' method.\n return self.rule1.getCenter()", "title": "" }, { "docid": "6a70fc7945a2b69402aded993c468dd1", "score": "0.5311789", "text": "def __init__(self):\n self.origin = (0,0,0)\n self.x = (1,0,0)\n self.y = (0,1,0)\n self.z = (0,0,1)", "title": "" }, { "docid": "d9e233182729ce7e4dd37b6b6b2ebec3", "score": "0.530479", "text": "def getBoundingBoxCenter(self, *args, **kwargs):\r\n uvBB = pm.polyEvaluate(boundingBoxComponent2d=True)\r\n uvCenter=[((uvBB[0][1]+uvBB[0][0])/2),((uvBB[1][1]+uvBB[1][0])/2)]\r\n return uvCenter", "title": "" }, { "docid": "7c5dbb33aa3903ca501b13a6515a772f", "score": "0.53047884", "text": "def get_celestial_body_frame(self):\n self.celestial_body_frame = self.vessel.orbit.body.reference_frame", "title": "" }, { "docid": "273510578ad914df6aa14091842542d8", "score": "0.5301606", "text": "def updatecenter(self):\n self.centro=Vector(self.pos.x+53.0/2,self.pos.y+46.0/2)", "title": "" }, { "docid": "adc361c35ff7066943efd57e16a922f2", "score": "0.53009427", "text": "def getcenter(self):\n return self.centro.cartesianas()", "title": "" }, { "docid": "f326d8291a59ed74b2b5a39f242c5585", "score": "0.52943116", "text": "def worldXY(cls):\n return cls([0, 0, 0], [0, 0, 1])", "title": "" }, { "docid": "d7567c2c649bfe9f26268b2403cc831d", "score": "0.5283132", "text": "def get_object_xy(self, wcs):\n ras, decs = self.get_locations()\n\n # Get pixel of each location, rounding\n pixel_x, pixel_y = wcs.world_to_pixel(SkyCoord(ras, decs, unit='deg'))\n object_x, object_y = pixel_x.round().astype(int), pixel_y.round().astype(int)\n \n # add padding\n return object_x + self.pad, object_y + self.pad", "title": "" }, { "docid": "44c6398cfc154272b8050bb9a65e0561", "score": "0.527649", "text": "def iso_center(self):\n\n if len(self._iso_center) == 0:\n find_iso_center(self)\n\n return self._iso_center", "title": "" }, { "docid": "09bbb9dd066e44ea9ed65ad4b3857ff3", "score": "0.5258396", "text": "def THB_CenterFOS(self):\n box1 = thbspline.Box( ia(.0, 1.) , ia(.0, 1.) )\n box2 = thbspline.Box( ia(0., 1.) , ia(0., .1) ) \n bounds = thbspline.BoxList( box1,box2)\n \n self.THBhull.child.bounds = bounds\n return", "title": "" }, { "docid": "eedb67e47625053c35272f72ea9ed163", "score": "0.52459556", "text": "def worldToSurfMat(self):\n return self.__worldToSurfMat", "title": "" }, { "docid": "ec12cc04576a17c11b9fbb3b04e13e9f", "score": "0.52442676", "text": "def xyz(self):\n return self.molecule.xyz", "title": "" }, { "docid": "8622c42bd907c6880f0179c49329beec", "score": "0.52322286", "text": "def __realToLocal(self, x, y):\n return((self.w * ((x-self.lbp) / self.deltaf), (self.h - 30) + y))", "title": "" }, { "docid": "c6ceaaa46f89de790f067e1632b23021", "score": "0.5231063", "text": "def get_local_center_coordinate() -> \"board_position.BoardPosition\":\n board = app.App.running_board\n return board_position.Position(\"Position\", x=board.get_tile_width() / 2, y=board.get_tile_height() / 2)", "title": "" }, { "docid": "dfad7ed15d7cb78d8e0b586526bf85ed", "score": "0.5230439", "text": "def worldYZ(cls):\n return cls([0, 0, 0], [1, 0, 0])", "title": "" }, { "docid": "261cdaafdf57d4c188f4afb0d0ab559c", "score": "0.52301204", "text": "def anchor_a(self):\r\n return self._body_a.get_world_point(self._local_anchor_a)", "title": "" }, { "docid": "001b9c598b2e9736be069f73ec7a8ac0", "score": "0.5214686", "text": "def _physical_coordinates(self):\n\n # First, the RA & dec.\n self.ra, self.dec = [Uncertain(x) for x in self.imagedata.wcs.p2s(\n [self.x.value, self.y.value])]\n if numpy.isnan(self.dec.value) or abs(self.dec) > 90.0:\n raise ValueError(\"object falls outside the sky\")\n\n # First, determine local north.\n help1 = numpy.cos(numpy.radians(self.ra.value))\n help2 = numpy.sin(numpy.radians(self.ra.value))\n help3 = numpy.cos(numpy.radians(self.dec.value))\n help4 = numpy.sin(numpy.radians(self.dec.value))\n center_position = numpy.array([help3 * help1, help3 * help2, help4])\n\n # The length of this vector is chosen such that it touches\n # the tangent plane at center position.\n # The cross product of the local north vector and the local east\n # vector will always be aligned with the center_position vector.\n if center_position[2] != 0:\n local_north_position = numpy.array(\n [0., 0., 1. / center_position[2]])\n else:\n # If we are right on the equator (ie dec=0) the division above\n # will blow up: as a workaround, we use something Really Big\n # instead.\n local_north_position = numpy.array([0., 0., 99e99])\n # Next, determine the orientation of the y-axis wrt local north\n # by incrementing y by a small amount and converting that\n # to celestial coordinates. That small increment is conveniently\n # chosen to be an increment of 1 pixel.\n\n endy_ra, endy_dec = self.imagedata.wcs.p2s(\n [self.x.value, self.y.value + 1.])\n help5 = numpy.cos(numpy.radians(endy_ra))\n help6 = numpy.sin(numpy.radians(endy_ra))\n help7 = numpy.cos(numpy.radians(endy_dec))\n help8 = numpy.sin(numpy.radians(endy_dec))\n endy_position = numpy.array([help7 * help5, help7 * help6, help8])\n\n # Extend the length of endy_position to make it touch the plane\n # tangent at center_position.\n endy_position /= numpy.dot(center_position, endy_position)\n\n diff1 = endy_position - center_position\n diff2 = local_north_position - center_position\n\n cross_prod = numpy.cross(diff2, diff1)\n\n length_cross_sq = numpy.dot(cross_prod, cross_prod)\n\n normalization = numpy.dot(diff1, diff1) * numpy.dot(diff2, diff2)\n\n # The length of the cross product equals the product of the lengths of\n # the vectors times the sine of their angle.\n # This is the angle between the y-axis and local north,\n # measured eastwards.\n # yoffset_angle = numpy.degrees(\n # numpy.arcsin(numpy.sqrt(length_cross_sq/normalization)))\n # The formula above is commented out because the angle computed\n # in this way will always be 0<=yoffset_angle<=90.\n # We'll use the dotproduct instead.\n yoffs_rad = (numpy.arccos(numpy.dot(diff1, diff2) /\n numpy.sqrt(normalization)))\n\n # The multiplication with -sign_cor makes sure that the angle\n # is measured eastwards (increasing RA), not westwards.\n sign_cor = (numpy.dot(cross_prod, center_position) /\n numpy.sqrt(length_cross_sq))\n yoffs_rad *= -sign_cor\n yoffset_angle = numpy.degrees(yoffs_rad)\n\n # Now that we have the BPA, we can also compute the position errors\n # properly, by projecting the errors in pixel coordinates (x and y)\n # on local north and local east.\n errorx_proj = numpy.sqrt(\n (self.x.error * numpy.cos(yoffs_rad)) ** 2 +\n (self.y.error * numpy.sin(yoffs_rad)) ** 2)\n errory_proj = numpy.sqrt(\n (self.x.error * numpy.sin(yoffs_rad)) ** 2 +\n (self.y.error * numpy.cos(yoffs_rad)) ** 2)\n\n # Now we have to sort out which combination of errorx_proj and\n # errory_proj gives the largest errors in RA and Dec.\n try:\n end_ra1, end_dec1 = self.imagedata.wcs.p2s(\n [self.x.value + errorx_proj, self.y.value])\n end_ra2, end_dec2 = self.imagedata.wcs.p2s(\n [self.x.value, self.y.value + errory_proj])\n # Here we include the position calibration errors\n self.ra.error = self.eps_ra + max(\n numpy.fabs(self.ra.value - end_ra1),\n numpy.fabs(self.ra.value - end_ra2))\n self.dec.error = self.eps_dec + max(\n numpy.fabs(self.dec.value - end_dec1),\n numpy.fabs(self.dec.value - end_dec2))\n except RuntimeError:\n # We get a runtime error from wcs.p2s if the errors place the\n # limits outside of the image.\n # In which case we set the RA / DEC uncertainties to infinity\n self.ra.error = float('inf')\n self.dec.error = float('inf')\n\n # Estimate an absolute angular error on our central position.\n self.error_radius = utils.get_error_radius(\n self.imagedata.wcs, self.x.value, self.x.error, self.y.value,\n self.y.error\n )\n\n # Now we can compute the BPA, east from local north.\n # That these angles can simply be added is not completely trivial.\n # First, the Gaussian in gaussian.py must be such that theta is\n # measured from the positive y-axis in the direction of negative x.\n # Secondly, x and y are defined such that the direction\n # positive y-->negative x-->negative y-->positive x is the same\n # direction (counterclockwise) as (local) north-->east-->south-->west.\n # If these two conditions are matched, the formula below is valid.\n # Of course, the formula is also valid if theta is measured\n # from the positive y-axis towards positive x\n # and both of these directions are equal (clockwise).\n self.theta_celes = Uncertain(\n (numpy.degrees(self.theta.value) + yoffset_angle) % 180,\n numpy.degrees(self.theta.error))\n if not numpy.isnan(self.theta_dc.value):\n self.theta_dc_celes = Uncertain(\n (self.theta_dc.value + yoffset_angle) % 180,\n numpy.degrees(self.theta_dc.error))\n else:\n self.theta_dc_celes = Uncertain(numpy.nan, numpy.nan)\n\n # Next, the axes.\n # Note that the signs of numpy.sin and numpy.cos in the\n # four expressions below are arbitrary.\n self.end_smaj_x = (self.x.value - numpy.sin(self.theta.value) *\n self.smaj.value)\n self.start_smaj_x = (self.x.value + numpy.sin(self.theta.value) *\n self.smaj.value)\n self.end_smaj_y = (self.y.value + numpy.cos(self.theta.value) *\n self.smaj.value)\n self.start_smaj_y = (self.y.value - numpy.cos(self.theta.value) *\n self.smaj.value)\n self.end_smin_x = (self.x.value + numpy.cos(self.theta.value) *\n self.smin.value)\n self.start_smin_x = (self.x.value - numpy.cos(self.theta.value) *\n self.smin.value)\n self.end_smin_y = (self.y.value + numpy.sin(self.theta.value) *\n self.smin.value)\n self.start_smin_y = (self.y.value - numpy.sin(self.theta.value) *\n self.smin.value)\n\n def pixel_to_spatial(x, y):\n try:\n return self.imagedata.wcs.p2s([x, y])\n except RuntimeError:\n logger.debug(\"pixel_to_spatial failed at %f, %f\" % (x, y))\n return numpy.nan, numpy.nan\n\n end_smaj_ra, end_smaj_dec = pixel_to_spatial(self.end_smaj_x,\n self.end_smaj_y)\n end_smin_ra, end_smin_dec = pixel_to_spatial(self.end_smin_x,\n self.end_smin_y)\n\n smaj_asec = coordinates.angsep(self.ra.value, self.dec.value,\n end_smaj_ra, end_smaj_dec)\n scaling_smaj = smaj_asec / self.smaj.value\n errsmaj_asec = scaling_smaj * self.smaj.error\n self.smaj_asec = Uncertain(smaj_asec, errsmaj_asec)\n\n smin_asec = coordinates.angsep(self.ra.value, self.dec.value,\n end_smin_ra, end_smin_dec)\n scaling_smin = smin_asec / self.smin.value\n errsmin_asec = scaling_smin * self.smin.error\n self.smin_asec = Uncertain(smin_asec, errsmin_asec)", "title": "" }, { "docid": "d571e02d2f126841f8157df562aebfca", "score": "0.52128667", "text": "def world_to_local(self, pos):\n if self._rect.collidepoint(pos):\n pos = spyral.point.sub(pos, self._offset)\n pos = spyral.point.unscale(pos, self._scale)\n return pos\n return None", "title": "" }, { "docid": "7b7c0d1b3b0fc7b9ff73a2ba466e8b0d", "score": "0.52126163", "text": "def get_mass_center(self):\n x_mass = self.gather_coordinate('x').mean()\n y_mass = self.gather_coordinate('y').mean()\n z_mass = (self.gather_coordinate('z').mean()\n if self.dimensions == 3 else None)\n self.mass_center = Point(x_mass, y_mass, z_mass)\n return self.mass_center", "title": "" }, { "docid": "6429daf010a6b04bd5fe871ca809535e", "score": "0.52099633", "text": "def getCenterPosition(self):\n c1 = yield self.server.getcomp(1)\n c2 = yield self.server.getcomp(2)\n centerc1 = c1 - self.amplitude * math.cos(self.angle * math.pi / 180.)\n centerc2 = c2 - self.amplitude * math.sin(self.angle * math.pi / 180.)\n returnValue( (centerc1,centerc2) )", "title": "" }, { "docid": "258a26a82fe25f8a922ac8989e439c0b", "score": "0.52090335", "text": "def __call__( self , theta ):\r\n offset = np.dot( z_rot( theta ) , [ self.radius , 0 , 0 ] )\r\n # print \"Offset:\" , offset\r\n return np.add( self.center , offset )", "title": "" }, { "docid": "342cdadbc67314ab8c649c47421abd27", "score": "0.51897776", "text": "def GetOrigin(self):\n ...", "title": "" }, { "docid": "c09dc043b25ab345d095923a6848a480", "score": "0.5189147", "text": "def center(self,indx_center,indx_x,indx_y,debug=False,**kwargs):\r\n \r\n New_coor,Phi,Psi,Chi,center=CenterMolecule(self.coor.value,indx_center,indx_x,indx_y,print_angles=True,debug=debug,**kwargs)\r\n self.move(-center[0],-center[1],-center[2])\r\n self.rotate(Phi,Psi,Chi)\r\n \r\n return Phi,Psi,Chi,center", "title": "" }, { "docid": "11297c60e8587e11489dabc0461bc722", "score": "0.5184545", "text": "def translate_to_origin(idf):\n # type: (IDF) -> None\n surfaces = idf.getsurfaces()\n subsurfaces = idf.getsubsurfaces()\n shading_surfaces = idf.getshadingsurfaces()\n\n min_x = min(min(Polygon3D(s.coords).xs) for s in surfaces)\n min_y = min(min(Polygon3D(s.coords).ys) for s in surfaces)\n\n translate(surfaces, (-min_x, -min_y))\n translate(subsurfaces, (-min_x, -min_y))\n translate(shading_surfaces, (-min_x, -min_y))", "title": "" }, { "docid": "81331899caee4f0a7b3a6efbc5ebc636", "score": "0.5181993", "text": "def center(self):\n return self.box.center", "title": "" }, { "docid": "ebf8b10fd59e625ca50d75473f96147e", "score": "0.5181358", "text": "def _get_sp(self):\r\n return self._coordinates[0]", "title": "" }, { "docid": "14a1e90004e168312d852cbbb5a6394e", "score": "0.51810527", "text": "def update_sky(self, context):\n \n#---Get the duplivert parent of the sun lamp \n dupli = context.active_object\n\n#---Get the lamp or the softbox link to the duplivert\n lamp = get_lamp(context, dupli.Lumiere.lightname)\n \n#---Credits : https://www.youtube.com/watch?v=YXso7kNzxIU\n xAng = bpy.data.objects[dupli.name].rotation_euler[0]\n yAng = bpy.data.objects[dupli.name].rotation_euler[1]\n zAng = bpy.data.objects[dupli.name].rotation_euler[2]\n \n vec = Vector((0.0,0.0,1.0))\n xMat = Matrix(((1.1,0.0,0.0), (0.0, math.cos(xAng), -math.sin(xAng)), (0.0, math.sin(xAng), math.cos(xAng))))\n yMat = Matrix(((math.cos(yAng), 0.0, math.sin(yAng)), (0.0, 1.0, 0.0), (-math.sin(yAng), 0.0, math.cos(yAng))))\n zMat = Matrix(((math.cos(zAng), -math.sin(zAng), 0.0), (math.sin(zAng), math.cos(zAng), 0.0), (0.0, 0.0, 1.0)))\n \n vec = xMat * vec\n vec = yMat * vec\n vec = zMat * vec\n\n bpy.data.worlds['Lumiere_world'].node_tree.nodes['Sky Texture'].sun_direction = vec \n blackbody = lamp.data.node_tree.nodes['Blackbody']\n #4000 -> HORIZON // 5780 -> Daylight\n blackbody.inputs[0].default_value = 4000 + (1780 * vec.z)", "title": "" }, { "docid": "5ec71fbd702dbb51fb202ee4c0a5141e", "score": "0.51723874", "text": "def ProjectPointToContainer(coords, entities):", "title": "" }, { "docid": "e122bd493ce4d16f3da414cb369a52cc", "score": "0.5167991", "text": "def apply_static_tf_to_gripper_center(tf_ee_link):\n tf_ee_link_to_gripper_center = vector_quaternion_array_to_ptransform([0.19, -0.020, -0.010, 0, 0, 0, 1])\n\n tf_gripper_center = tf_ee_link_to_gripper_center * tf_ee_link\n\n return tf_gripper_center", "title": "" }, { "docid": "e6273ba1b8889c32b24f432bdaf1c7af", "score": "0.51672256", "text": "def tcutil_domain_center(self,logger=None):\n\n if self._cenlo is not None and self._cenla is not None:\n return (self._cenlo,self._cenla)\n\n storm_lon=self.lon\n assert(storm_lon is not None)\n storm_lat=self.lat\n if self.havefcstloc:\n assert(self.flon is not None)\n avglon=self.flon\n else:\n avglon=storm_lon-20.0\n assert(avglon is not None)\n\n # Decide center latitude.\n cenla=storm_lat\n if storm_lat<0: cenla=-cenla\n ilat=math.floor(cenla)\n if ilat < 15: cenla=15.0\n if ilat > 25: cenla=25.0\n if ilat >= 35: cenla=30.0\n if ilat >= 40: cenla=35.0\n if ilat >= 44: cenla=40.0\n if ilat >= 50: cenla=45.0\n if storm_lat<0: cenla=-cenla\n\n # Decide the center longitude.\n if logger is not None:\n logger.info('Averaging storm_lon=%f and avglon=%f'%(storm_lon,avglon))\n diff=storm_lon-avglon\n if(diff> 360.): storm_lon -= 360.0\n if(diff<-360.): avglon -= 360.0\n result=int((10.0*storm_lon + 10.0*avglon)/2.0)/10.0\n if(result > 180.0): result-=360.0\n if(result < -180.0): result+=360.0\n cenlo=result\n if logger is not None:\n logger.info('Decided cenlo=%f cenla=%f'%(cenlo,cenla))\n logger.info('Storm is at lon=%f lat=%f'%(storm_lon,storm_lat))\n # Lastly, some sanity checks to avoid outer domain centers too\n # far from storm centers:\n moved=False\n if(int(cenlo)>int(storm_lon)+5):\n cenlo=storm_lon+5.0\n if logger is not None:\n logger.info(\n 'Center is too far east of storm. Moving it to %f'\n %(cenlo,))\n moved=True\n if(int(cenlo)<int(storm_lon)-5):\n cenlo=storm_lon-5.0\n if logger is not None:\n logger.info(\n 'Center is too far west of storm. Moving it to %f'\n %(cenlo,))\n moved=True\n if logger is not None and not moved:\n logger.info('Center is within +/- 5 degrees longitude of storm.')\n logger.info('Final outer domain center is lon=%f lat=%f'\n %(cenlo,cenla))\n # Return results as a tuple:\n ( self._cenlo, self._cenla ) = ( cenlo, cenla )\n return ( cenlo, cenla )", "title": "" }, { "docid": "4f7cd71a2d6619ab35c9feef57504778", "score": "0.5161004", "text": "def get_obj(self):\n obj_scene = self._create_scene()\n camera_pose = self.sample_camera_pose()\n obj_scene.camera_pose = camera_pose\n points = np.array([])\n while not points.any():\n obj_scene.reset()\n obj_mesh, obj_info = obj_scene.sample_obj()\n if obj_mesh is None:\n continue\n stps = obj_info[\"stps\"]\n probs = obj_info[\"probs\"]\n pose = stps[np.random.choice(len(stps), p=probs)].copy()\n z_rot = tra.rotation_matrix(\n 2 * np.pi * np.random.rand(), [0, 0, 1], point=pose[:3, 3]\n )\n pose = z_rot @ pose\n obj_scene.add_object(\"obj\", obj_mesh, pose)\n points = obj_scene.render_points()\n\n pt_inds = np.random.choice(\n points.shape[0], size=self.n_obj_points, replace=True\n )\n points_batch = np.repeat(\n points[None, pt_inds], self.batch_size, axis=0\n )\n points_center = np.mean(points_batch[0, :, :3], axis=0)\n del obj_scene\n\n return points_batch, points_center, obj_mesh, pose, camera_pose", "title": "" }, { "docid": "91ed2c413845c1571c49cd5e9b47f935", "score": "0.5159902", "text": "def representative_point(self):\n return pygeos.point_on_surface(self)", "title": "" }, { "docid": "8911d2466b3490476cf3b5882b16900a", "score": "0.5159887", "text": "def _center_on_origin(self):\n glMatrixMode(GL_PROJECTION)\n glLoadIdentity()\n x, y = self.w / 2, self.h / 2\n glOrtho(-x, x, -y, y, self.NEAR, self.FAR)\n glMatrixMode(GL_MODELVIEW)", "title": "" }, { "docid": "c10677af6d1b1ce8aa3f5c7c87d2ebe9", "score": "0.51593816", "text": "def center_to(center_surface, surface):\n pos = surface.get_rect()\n pos.center = center_surface.get_rect().center\n return pos", "title": "" }, { "docid": "1d3dfa6c938cd6305ce636a7a879353a", "score": "0.5155735", "text": "def getBoundingBoxCenterShell(self, *args, **kwargs):\r\n self.grabShell()\r\n uvBB = pm.polyEvaluate(boundingBoxComponent2d=True)\r\n uvCenter=[((uvBB[0][1]+uvBB[0][0])/2),((uvBB[1][1]+uvBB[1][0])/2)]\r\n return uvCenter", "title": "" }, { "docid": "aa7c905add5edbc78f2697ef7cc28568", "score": "0.5152362", "text": "def pos_center(self):\n return self.x + int(self.w / 2), self.y + int(self.h / 2)", "title": "" }, { "docid": "3766c7fed5005d93a0f7ce0194336817", "score": "0.51462716", "text": "def get_xyz(self):\n # xyz = SkyCoord(frame=\"galactic\", l=self.gl, b=self.gb,\n # distance=self.distance,\n # z_sun = z_sun*us.kpc,\n # unit=\"deg, deg, kpc\").galactocentric.\n # cartesian.xyz.value\n # return xyz\n return galactic_to_galactocentric(l=self.gl, b=self.gb,\n distance=self.distance,\n xyz_sun=XYZ_SUN)", "title": "" }, { "docid": "630b867fa48bbca29e98f5c97dbde678", "score": "0.5143933", "text": "def _get_center(self):\n\n return self.x + self.width / 2, self.y + self.height / 2", "title": "" }, { "docid": "fb600d2ef84b3c9f3cc5469448595ca5", "score": "0.5141232", "text": "def draw_center(self):\n x0, y0 = self.maze.world_to_screen(self.x_pos, self.y_pos)\n self.center_render_component = self.canvas.create_oval(x0 - self.maze.pixel_center, y0 - self.maze.pixel_center, x0 + self.maze.pixel_center, y0+ self.maze.pixel_center, fill=\"Black\")\n return", "title": "" }, { "docid": "cfb96888356408b8a5c91824056e76eb", "score": "0.5140661", "text": "def get_surface_line_center(self):\n obj_size = self.dims\n center = self.gravity_center.view(-1, 1, 3)\n batch_size = center.shape[0]\n rot_sin = torch.sin(-self.yaw)\n rot_cos = torch.cos(-self.yaw)\n rot_mat_T = self.yaw.new_zeros(tuple(list(self.yaw.shape) + [3, 3]))\n rot_mat_T[..., 0, 0] = rot_cos\n rot_mat_T[..., 0, 1] = -rot_sin\n rot_mat_T[..., 1, 0] = rot_sin\n rot_mat_T[..., 1, 1] = rot_cos\n rot_mat_T[..., 2, 2] = 1\n offset = obj_size.new_tensor([[0, 0, 1], [0, 0, -1], [0, 1, 0], [0, -1, 0], [1, 0, 0], [-1, 0, 0]])\n offset = offset.view(1, 6, 3) / 2\n surface_3d = (offset * obj_size.view(batch_size, 1, 3).repeat(1, 6, 1)).reshape(-1, 3)\n offset = obj_size.new_tensor([[1, 0, 1], [-1, 0, 1], [0, 1, 1], [0, -1, 1], [1, 0, -1], [-1, 0, -1], [0, 1, -1], [0, -1, -1], [1, 1, 0], [1, -1, 0], [-1, 1, 0], [-1, -1, 0]])\n offset = offset.view(1, 12, 3) / 2\n line_3d = (offset * obj_size.view(batch_size, 1, 3).repeat(1, 12, 1)).reshape(-1, 3)\n surface_rot = rot_mat_T.repeat(6, 1, 1)\n surface_3d = torch.matmul(surface_3d.unsqueeze(-2), surface_rot).squeeze(-2)\n surface_center = center.repeat(1, 6, 1).reshape(-1, 3) + surface_3d\n line_rot = rot_mat_T.repeat(12, 1, 1)\n line_3d = torch.matmul(line_3d.unsqueeze(-2), line_rot).squeeze(-2)\n line_center = center.repeat(1, 12, 1).reshape(-1, 3) + line_3d\n return surface_center, line_center", "title": "" }, { "docid": "87af4751282519bc229aa50ab1d27309", "score": "0.51361907", "text": "def spawn_objects(world):\n\n print \"Initializing world objects\"\n\n obj = world.makeTerrain('object1')\n bmin = [0,0,0]\n bmax = [0.08,0.08,0.3]\n\n\n\n\n load_item_geometry(bmin,bmax,obj.geometry())\n\n obj.geometry().transform(so3.identity(),[0,-0.2,0.155])\n return obj", "title": "" }, { "docid": "e44dcb1050645aab7dd16bbc04085412", "score": "0.51356333", "text": "def point(self, ra=None, dec=None):\n\n # if (ra,dec) given, then point the whole Camera there\n if ra is not None and dec is not None:\n self.ra, self.dec = ra, dec\n\n # make sure an (ra,dec) are defined\n try:\n self.ra\n self.dec\n logger.info('pointing the camera at (ra,dec) = {0:.6f},{1:.6f}'.format(self.ra, self.dec))\n except:\n self.report(\"Please point your telescope somewhere. No RA or DEC defined.\")\n\n # create a blank WCS object, for converting between (ra,dec) and (x,y)\n self.wcs = astropy.wcs.WCS(naxis=2)\n\n # the focalxy coordinates of the reference position [taken to be center of field, which is (x,y) = (0.0, 0.0) in focalxy coordinates]\n self.wcs.wcs.crpix = [0.0, 0.0]\n\n # the pixel scale, in degrees\n self.wcs.wcs.cdelt = [-self.pixelscale / 60.0 / 60.0, self.pixelscale / 60.0 / 60.0]\n\n\n\n # the celestial coordinates at the reference position (input by user)\n nudged_ra, nudged_dec = zachopy.spherical.rotate(self.ra, self.dec, self.nudge['x'] / 60.0 / 60.0,\n self.nudge['y'] / 60.0 / 60.0)\n self.wcs.wcs.crval = [nudged_ra, nudged_dec]\n\n # the rotation of the field (currently just a pure rotation, no shear)\n # rot = self.nudge['z']/60.0/60.0*np.pi/180.0\n # w.wcs.pc = [[np.cos(rot), -np.sin(rot)],[np.sin(rot), np.cos(rot)]]\n\n # the coordinate system type - what should I use?\n self.wcs.wcs.ctype = [\"RA---TAN\", \"DEC--TAN\"]\n\n # set this to be the WCS\n # self.populateHeader()", "title": "" }, { "docid": "d3b974ec71565656143174620d540f3c", "score": "0.5129372", "text": "def origin2Center(self):\n if self.parent is None:\n return False\n self.x = self.parent.pl + self.parent.pw/2\n return True", "title": "" }, { "docid": "c3e9cdf492d082bf3235c0330b6fe1f9", "score": "0.51214474", "text": "def __init__(self, model_frame, sky_coord, observations):\n spectrum = get_pixel_spectrum(sky_coord, observations, correct_psf=True)\n spectrum = TabulatedSpectrum(model_frame, spectrum)\n\n center = model_frame.get_pixel(sky_coord)\n morphology = PointSourceMorphology(model_frame, center)\n self.center = morphology.center\n super().__init__(model_frame, spectrum, morphology)", "title": "" }, { "docid": "5717cabe0277e86b427eeaba21f22176", "score": "0.5121323", "text": "def sdf_from_xyz(df, x_col, y_col, z_col=None, sr=None):\n\n if not z_col:\n return pd.DataFrame.spatial.from_xy(df, x_col, y_col, sr)\n \n def point_for_row(x, y, z, sr):\n return Point({'x' : x, 'y' : y, 'z': z, \"spatialReference\" : sr})\n \n if sr is None:\n sr = SpatialReference({'wkid' : 4326})\n\n df_geom = df.apply(lambda row: point_for_row(row[x_col],\n row[y_col],\n row[z_col],\n sr), axis=1)\n sdf = df.spatial.set_geometry(df_geom, sr=sr)\n return sdf", "title": "" }, { "docid": "08fe70129b5e34f98e963a2bdccafc96", "score": "0.5120539", "text": "def center(objs=None,camera=None):\n scene=Scene.GetCurrent()\n if objs==None:\n objs=scene.objects\n if objs==None:\n return\n if type(objs).__name__!='list':\n objs = [objs]\n elif len(objs)<1:\n return\n if camera==None:\n camera = scene.objects.camera\n if camera==None:\n camera = scene.objects.new (Blender.Camera.New())\n camera.setLocation (10.0, 10.0, 10.0)\n camera.setEuler (Euler(-45.0,0.0,45.0))\n # figure out the space occupied by everything togehter\n xmax=-65535;xmin=65535;ymax=-65535;ymin=65535;zmax=-65535;zmin=65535;\n for obj in objs:\n bb=obj.getBoundBox()\n if bb == None:\n print dir(obj)\n pass\n else:\n for pt in bb:\n xmin=min(xmin,pt[0])\n xmax=max(xmax,pt[0])\n ymin=min(ymin,pt[1])\n ymax=max(ymax,pt[1])\n zmin=min(zmin,pt[2])\n zmax=max(zmax,pt[2])\n if xmax == None:\n print \"ERROR: No objects found to center to.\"\n return -1\n xspan=xmax-xmin\n yspan=ymax-ymin\n zspan=zmax-zmin\n xcenter=xmin+(xspan/2)\n ycenter=xmin+(yspan/2)\n zcenter=xmin+(zspan/2)\n camera.clearTrack()\n camx,camy,camz=camera.getLocation()\n # Rotate camera to face center point\n rx=cos((camy-ycenter)/(camz-zcenter))\n ry=cos((camz-zcenter)/(camx-xcenter))\n rz=cos((camx-xcenter)/(camy-ycenter))\n camera.RotX=rx\n camera.RotY=ry\n camera.RotZ=rz\n # TODO: Zoom in/out until we fill the frame", "title": "" }, { "docid": "c338f633c936588c692998ecec78f125", "score": "0.51171494", "text": "def project(self, win_width, win_height, fov, viewer_distance):\n factor = fov / (viewer_distance + self.z)\n x = self.x * factor + win_width / 2\n y = -self.y * factor + win_height / 2\n return Point3D(x, y, 1)", "title": "" }, { "docid": "5fe86824ac309d39415168defc366d1e", "score": "0.5112167", "text": "def wcsTest(dr):\n import astropy.units as u\n from astropy import wcs\n from astropy.coordinates import SkyCoord, FK5\n\n sciences = dr._scienceIma\n ccd0 = sciences[0]\n hdr0 = ccd0.header\n ccd0wcs = wcs.WCS(hdr0)\n pxs = np.array([[0, 0], [1024, 1024], [512, 1024]], np.float)\n ccd0wcs.all_pix2world(pxs, 1)\n\n px = np.arange(ccd0.shape[1])\n py = np.arange(ccd0.shape[0])\n wx, wy = ccd0wcs.all_pix2world(px, py, 1)\n\n if hdr0['OBJRADEC'] == 'FK5':\n frameType = FK5()\n c = SkyCoord(ccd0.header['OBJRA'], ccd0.header['OBJDEC'],\n frame=frameType, unit=(u.hourangle, u.deg))\n\n # AO guide star. Find it in image\n aoStarCoordW = SkyCoord(ccd0.header['AORA'], ccd0.header['AODEC'],\n frame=frameType, unit=(u.hourangle, u.deg))\n aoStarCoordPx = ccd0wcs.world_to_pixel(aoStarCoordW)", "title": "" }, { "docid": "798e20bbbace55f33c88df39e048f41d", "score": "0.51091045", "text": "def world_to_local(self, v):\r\n return Vec2d._fromcffi(cp.cpBodyWorldToLocal(self._body, tuple(v)))", "title": "" }, { "docid": "5c969940eaf0d687309b342debaff067", "score": "0.5105461", "text": "def __init__(self, *args, **kwargs):\n try:\n self._near = kwargs['near']\n except KeyError:\n self._near = 1\n\n try:\n self._far = kwargs['far']\n except KeyError:\n self._far = 1000\n\n\n #Now we use same approach as in VisualizationFrame\n #for setting reference_frame and origin\n i = 0\n #If first arg is not str, name the visualization frame 'unnamed'\n if isinstance(args[i], str):\n self._name = args[i]\n i += 1\n else:\n self._name = 'unnamed'\n\n try:\n self._reference_frame = args[i].get_frame()\n self._origin = args[i].get_masscenter()\n\n except AttributeError:\n #It is not a rigidbody, hence this arg should be a\n #reference frame\n self._reference_frame = args[i]\n i += 1\n\n #Now next arg can either be a Particle or point\n try:\n self._origin = args[i].get_point()\n except AttributeError:\n self._origin = args[i]\n\n #basic thing required, transform matrix\n self._transform = Identity(4).as_mutable()", "title": "" }, { "docid": "ddad9d9ac891955f3fbcecb523796705", "score": "0.51028305", "text": "def __init__(self, *args, **kwargs):\n try:\n self._fov = kwargs['fov']\n except KeyError:\n self._fov = 45\n\n try:\n self._near = kwargs['near']\n except KeyError:\n self._near = 1\n\n try:\n self._far = kwargs['far']\n except KeyError:\n self._far = 1000\n\n\n #Now we use same approach as in VisualizationFrame\n #for setting reference_frame and origin\n i = 0\n #If first arg is not str, name the visualization frame 'unnamed'\n if isinstance(args[i], str):\n self._name = args[i]\n i += 1\n else:\n self._name = 'unnamed'\n\n try:\n self._reference_frame = args[i].get_frame()\n self._origin = args[i].get_masscenter()\n\n except AttributeError:\n #It is not a rigidbody, hence this arg should be a\n #reference frame\n try:\n dcm = args[i]._dcm_dict\n self._reference_frame = args[i]\n i += 1\n except AttributeError:\n raise TypeError(''' A ReferenceFrame is to be supplied\n before a Particle/Point. ''')\n\n #Now next arg can either be a Particle or point\n try:\n self._origin = args[i].get_point()\n except AttributeError:\n self._origin = args[i]\n\n #basic thing required, transform matrix\n self._transform = Identity(4).as_mutable()", "title": "" }, { "docid": "bb2e687f69ac5bfcdf479b54fde9f908", "score": "0.5096715", "text": "def get_mol_object(self, id=0):\n coord0 = self.mol.cart_coords.dot(self.orientation.matrix.T) #\n # Obtain the center in absolute coords\n if id <= len(self.wp.generators):\n op = self.wp.generators[id]\n center_relative = op.operate(self.position)\n center_relative -= np.floor(center_relative)\n #print(center_relative)\n center_absolute = np.dot(center_relative, self.lattice.matrix)\n # Rotate the molecule (Euclidean metric)\n op_m = self.wp.generators_m[id]\n rot = op_m.affine_matrix[0:3][:, 0:3].T\n tau = op_m.affine_matrix[0:3][:, 3]\n tmp = np.dot(coord0, rot) + tau\n # Add absolute center to molecule\n tmp += center_absolute\n return Molecule(self.symbols, tmp) \n else:\n raise ValueError(\"id is greater than the number of molecules\")", "title": "" }, { "docid": "879a41ffea4169968853655c2cbd5d1d", "score": "0.5090065", "text": "def center(self):\n return self.model(0.5 * np.sum(self.model.domain))", "title": "" }, { "docid": "894aad3d4984d6aa64bedea10222c150", "score": "0.50862116", "text": "def __init__(self, pt_or_z = 0.0, cl = 0.0225 , tf= 5.0 ,cm = -0.0225, ts = 2.0, cr = 0.0 ,rad = 10.0,crownindex=\"BK7\", \\\n flintindex = \"F4\"):\n Lens.__init__(self,pt_or_z)\n\n if isinstance(crownindex,str): # Lookup crownindex if given string key\n crownindex = MaterialIndex(crownindex)\n if isinstance(flintindex,str): # Lookup flintindex if given string key\n flintindex = MaterialIndex(flintindex)\n\n # Add the three surfaces\n sl = SphericalSurface(0.0,cl,rad,crownindex) # Front surface at 0.0\n self.add(sl)\n sm = SphericalSurface(tf,cm,rad,flintindex) # Common middle surface\n self.add(sm)\n sr = SphericalSurface(tf + ts,cr,rad,AirIndex()) # Back surface\n self.add(sr)\n\n self.minThickness = 2.0 # Sanity thickness\n\n #", "title": "" }, { "docid": "80e1a374c6ca28533ca490b0ecc11187", "score": "0.50838953", "text": "def sensor_in_world_frame(self, world_frame_pos, world_frame_orien):\n world_T_base = tfs.quaternion_matrix([world_frame_orien.x , world_frame_orien.y, world_frame_orien.z, world_frame_orien.w])\n world_T_base[:3,3] = [world_frame_pos.x, world_frame_pos.y, world_frame_pos.z]\n sensor_in_world = np.dot(world_T_base, self.tf)\n sensor_in_world_orien = tfs.quaternion_from_matrix(sensor_in_world)\n sensor_in_world_position = tfs.translation_from_matrix(sensor_in_world)\n sensor_in_world_pose = Pose()\n sensor_in_world_pose.orientation.x = sensor_in_world_orien[0]\n sensor_in_world_pose.orientation.y = sensor_in_world_orien[1]\n sensor_in_world_pose.orientation.z = sensor_in_world_orien[2]\n sensor_in_world_pose.orientation.w = sensor_in_world_orien[3]\n sensor_in_world_pose.position.x = sensor_in_world_position[0]\n sensor_in_world_pose.position.y = sensor_in_world_position[1]\n sensor_in_world_pose.position.z = sensor_in_world_position[2]\n return sensor_in_world_pose", "title": "" }, { "docid": "d969b9ae952d2b415a9c743854afa793", "score": "0.5080039", "text": "def __init__(self, world):\n self.world = world", "title": "" }, { "docid": "b4b41740682519c39d2a8abfa018e43e", "score": "0.5076341", "text": "def anchor_b(self):\r\n return self._body_b.get_world_point(self._local_anchor_b)", "title": "" }, { "docid": "4743229dc64694cc9a621e9b5c2befa5", "score": "0.5073929", "text": "def centering(self):\n center = np.mean(self.vertices,axis=0) # center of geometry\n self.vertices = self.vertices - center\n return", "title": "" }, { "docid": "c27359d4b07b717a813df139eb20d69a", "score": "0.50732523", "text": "def find_furthest_point(server, uuid, instance, body, starting_coord_zyx, *, session=None):\n # Determine furthest block via sparsevol-coarse\n svc = (2**6) * fetch_sparsevol_coarse(server, uuid, instance, body, format='coords', session=session)\n svc_centers = svc + (2**5)\n i = np.argmax(np.linalg.norm(starting_coord_zyx - svc_centers, axis=1))\n\n # Pick the center of the segment within that block\n # (Proofreaders don't like it if the point is on the segment edge)\n box = (svc[i], svc[i] + (2**6))\n vol = fetch_labelmap_voxels(server, uuid, instance, box, session=session)\n mask = (vol == body)\n dt = distance_transform(mask.astype(np.uint32), background=False, pad=True)\n c = np.unravel_index(np.argmax(dt), dt.shape)\n c += box[0]\n dist = np.linalg.norm(starting_coord_zyx - c)\n return (c, dist)", "title": "" }, { "docid": "5faa506b4962aba621baa2abfaa73f2e", "score": "0.5066402", "text": "def project(self, win_width, win_height, fov, viewer_distance):\r\n factor = fov / (viewer_distance + self.z)\r\n x = self.x * factor + win_width / 2\r\n y = -self.y * factor + win_height / 2\r\n return Point3D(x, y, self.z)", "title": "" }, { "docid": "36cb8d3344c646242761b915a1160777", "score": "0.5050575", "text": "def get_geom_center(self):\n from itertools import chain\n\n #if not computed\n if not hasattr(self, \"geom_center\"):\n #save the result\n self.geom_center = Point(np.average(np.array([a.xyz for r in chain(self.find_paratope(), self.find_epitope()) for a in r.atom]), axis = 0))\n\n #return it\n return self.geom_center", "title": "" } ]
bdc24c088c12848d5b231d245b1f0744
finds all strings in files in folder needing translations
[ { "docid": "13a6c79eac27cc87bbd719f558c434b7", "score": "0.0", "text": "def find_matches(folder, name=\"T\", extensions=[\"py\", \"js\", \"html\"]):\n matches_found = set()\n re_string_t = (\n r\"(?<=[^\\w]%s\\()(?P<name>\"\n r\"[uU]?[rR]?(?:'''(?:[^']|'{1,2}(?!'))*''')\"\n r\"|(?:'(?:[^'\\\\]|\\\\.)*')\"\n r'|(?:\"\"\"(?:[^\"]|\"{1,2}(?!\"))*\"\"\")'\n r'|(?:\"(?:[^\"\\\\]|\\\\.)*\"))'\n ) % name\n regex_t = re.compile(re_string_t)\n for root, dirs, files in os.walk(folder):\n for name in files:\n if name.split(\".\")[-1] in extensions:\n path = os.path.join(root, name)\n with open(path) as fp:\n data = fp.read()\n items = regex_t.findall(data)\n matches_found |= set(map(ast.literal_eval, items))\n return list(matches_found)", "title": "" } ]
[ { "docid": "b74b547fe120d671ecae5ebbd8d893da", "score": "0.7044006", "text": "def read_languages_from_filenames(dir_to_search):\n pattern = re.compile(\"([a-z]{2}-[A-Z]{2}).txt\")\n for file in os.listdir(dir_to_search):\n find = pattern.findall(file)\n if len(find) == 1 and find[0] not in languages:\n languages.append(find[0])\n languages.sort()", "title": "" }, { "docid": "6e9ad014f3942aef45c58a4e8258052e", "score": "0.6989482", "text": "def get_translatable_strings():\n\n def _scan_file(path):\n \"\"\"\n Scan the file at the given path for translatable strings\n and append it to translatable_strings\n \"\"\"\n data = \"\"\n with open(path, \"r\") as f:\n for line in f.readlines():\n strings = re.findall(r'\"%[^\"]+\"', line) + re.findall(r'[[%[^\"]+]]', line) + re.findall(r'\\'%[^\"]+\\'', line)\n for string in strings:\n raw_string = string[2:-2] if string.startswith(\"[[\") else string[1:-1]\n trans_string = TranslatableString(raw_string)\n line = line.replace(string, f\"EAL.translations[\\\"{ADDON_NAME}\\\"][EAL.language][\\\"{trans_string.string}\\\"]\")\n translatable_string_manager.add_string(trans_string)\n data += line\n\n with open(path, \"w\") as f:\n f.write(data)\n\n # We are using a dict so we have added benefit of auto-merging any same strings together\n translatable_string_manager = TranslatableStringManager()\n \n for lua_file in ADDON_PATH.glob('**/*.lua'):\n _scan_file(lua_file)\n \n return translatable_string_manager", "title": "" }, { "docid": "efd996f1f7f37decfd83257dfb1cba9d", "score": "0.69865173", "text": "def getLanguagesInformations(path):\n list_po_name = []\n list_total_msgids = []\n po_path = path\n for product in products:\n po_path = os.path.join(path, '%s/i18n/' % product)\n if os.path.exists(po_path):\n for f in os.listdir(po_path):\n if f.endswith('.po'):\n file_name = os.path.splitext(f)[0]\n if file_name not in list_po_name:\n list_po_name.append(file_name)\n\n if f.endswith('.pot') and not f.startswith('.'):\n file = open(os.path.join(po_path, f), 'r')\n file_content = file.read()\n file.close()\n match_translation = TRANSLATION_REGEXP.findall(file_content)\n for m in match_translation:\n msgid = m[0].split(\"\\nmsgstr\")[0]\n if msgid not in list_total_msgids:\n list_total_msgids.append(msgid)\n\n else:\n continue\n\n if len(list_po_name) == 0:\n log(\"Directory not found: %s\" % po_path)\n sys.exit(1)\n else:\n return [list_po_name, len(list_total_msgids)]", "title": "" }, { "docid": "ff7062071ef50c237cd22875c53a4320", "score": "0.6856998", "text": "def _scan_xgettext_from_files(target, source, env, files = None, path = None):\r\n import re\r\n import SCons.Util\r\n import SCons.Node.FS\r\n\r\n if files is None:\r\n return 0\r\n if not SCons.Util.is_List(files):\r\n files = [ files ]\r\n\r\n if path is None:\r\n if env.has_key('XGETTEXTPATH'):\r\n path = env['XGETTEXTPATH']\r\n else:\r\n path = []\r\n if not SCons.Util.is_List(path):\r\n path = [ path ]\r\n\r\n path = SCons.Util.flatten(path)\r\n\r\n dirs = ()\r\n for p in path:\r\n if not isinstance(p, SCons.Node.FS.Base):\r\n if SCons.Util.is_String(p):\r\n p = env.subst(p, source = source, target = target)\r\n p = env.arg2nodes(p, env.fs.Dir)\r\n dirs += tuple(p)\r\n # cwd is the default search path (when no path is defined by user)\r\n if not dirs:\r\n dirs = (env.fs.getcwd(),)\r\n\r\n # Parse 'POTFILE.in' files.\r\n re_comment = re.compile(r'^#[^\\n\\r]*$\\r?\\n?', re.M)\r\n re_emptyln = re.compile(r'^[ \\t\\r]*$\\r?\\n?', re.M)\r\n re_trailws = re.compile(r'[ \\t\\r]+$')\r\n for f in files:\r\n # Find files in search path $XGETTEXTPATH\r\n if isinstance(f, SCons.Node.FS.Base) and f.rexists():\r\n contents = f.get_text_contents()\r\n contents = re_comment.sub(\"\", contents)\r\n contents = re_emptyln.sub(\"\", contents)\r\n contents = re_trailws.sub(\"\", contents)\r\n depnames = contents.splitlines()\r\n for depname in depnames: \r\n depfile = SCons.Node.FS.find_file(depname, dirs)\r\n if not depfile:\r\n depfile = env.arg2nodes(depname, dirs[0].File)\r\n env.Depends(target, depfile)\r\n return 0", "title": "" }, { "docid": "8de0803df41a6473536e55253ae92b1e", "score": "0.679107", "text": "def _scan_file(path):\n data = \"\"\n with open(path, \"r\") as f:\n for line in f.readlines():\n strings = re.findall(r'\"%[^\"]+\"', line) + re.findall(r'[[%[^\"]+]]', line) + re.findall(r'\\'%[^\"]+\\'', line)\n for string in strings:\n raw_string = string[2:-2] if string.startswith(\"[[\") else string[1:-1]\n trans_string = TranslatableString(raw_string)\n line = line.replace(string, f\"EAL.translations[\\\"{ADDON_NAME}\\\"][EAL.language][\\\"{trans_string.string}\\\"]\")\n translatable_string_manager.add_string(trans_string)\n data += line\n\n with open(path, \"w\") as f:\n f.write(data)", "title": "" }, { "docid": "e30167de3c2f580ae7a0f0aeeb1c9dee", "score": "0.6776747", "text": "def i18n_available():\n base = os.path.join(BASE, \"i18n\", \"langs\")\n for path in glob.glob(\"%s/*.po\" % base):\n yield os.path.basename(path).rsplit(\".\", 1)[0]", "title": "" }, { "docid": "06730eda24c7d25e9e29db5888c3627d", "score": "0.67577386", "text": "def list_translations(self):\n dirname = os.path.join(self.app.root_path, TRANSLATIONS_PATH)\n if not os.path.isdir(dirname):\n return []\n return [name for name in os.listdir(dirname)\n if os.path.isdir(os.path.join(dirname, name))]", "title": "" }, { "docid": "0675e432bce0aaa9a7bd16cab48d081d", "score": "0.6703839", "text": "def python_po_files(self):\n\t\ttry:\n\t\t\tpath = '%(Python)s/%(Module)s/' % self\n\t\texcept KeyError:\n\t\t\treturn\n\t\tfor lang in LANGUAGES:\n\t\t\tyield os.path.join(path, '%s.po' % lang)", "title": "" }, { "docid": "e395168705a93888fed342c41a4adf62", "score": "0.66903836", "text": "def get_available_translations():\n languages = []\n\n if LOCALEDIR is None:\n return languages\n\n for langdir in os.listdir(LOCALEDIR):\n mofilename = os.path.join( LOCALEDIR, langdir,\n \"LC_MESSAGES\", \"addon.mo\")\n _LOG = logging.getLogger()\n msg = \"MOD file \" + mofilename\n _LOG.debug(\"message %s\\n\", msg)\n if os.path.exists(mofilename):\n _LOG = logging.getLogger()\n msg = \"LANG file \" + langdir\n _LOG.debug(\"message %s\\n\", msg)\n languages.append(langdir)\n\n languages.sort()\n\n return languages", "title": "" }, { "docid": "4b16bd10fd1c119ed436427833266afa", "score": "0.66317827", "text": "def fileLocales(self) -> Iterable[str]:\n for path in self.root.joinpath('common/main').glob('*.xml'):\n if path.stem != 'root':\n yield path.stem", "title": "" }, { "docid": "f5e210f2c5608e7a311129300cafe2be", "score": "0.6629439", "text": "def translate_files(self, files):\n\n if self.dataPrefix.split('/')[-1] not in files[0]:\n raise Exception('given files do not seem in current dataset')\n oldRoot = files[0][:re.search(self.dataPrefix.split('/')[-1], files[0]).regs[0][1]]\n translated_files = list({re.sub(oldRoot, self.dataPrefix, k) for k in files})\n translated_files = list(filter(lambda x: x in self.hash.keys(), translated_files))\n return translated_files", "title": "" }, { "docid": "aa30a485db6c0c605fb21829eb9b064e", "score": "0.6504221", "text": "def get_resource_texts_files(directory=\"meta-share/resource-texts\"):\n resource_text_files = set()\n for filename in sorted(os.listdir(directory)):\n resource_text_files.add(filename)\n return resource_text_files", "title": "" }, { "docid": "ae7f2f4c5381dfffc086a22884513773", "score": "0.6462383", "text": "def js_po_files(self):\n\t\ttry:\n\t\t\tpath = self[JAVASCRIPT]\n\t\texcept KeyError:\n\t\t\treturn\n\t\tfor lang in LANGUAGES:\n\t\t\tyield os.path.join(path, '%s.po' % lang)", "title": "" }, { "docid": "c59c2a429108de10d8d81b4b73dd01ec", "score": "0.6422797", "text": "def translations():\n pass", "title": "" }, { "docid": "842d3e90f8457a0ec0e9087445dbcccc", "score": "0.6332633", "text": "def compile_po_files(locale_dir, langs):\n for lang in langs:\n curdir = os.path.join(locale_dir, lang, 'LC_MESSAGES')\n if not os.path.exists(curdir):\n continue\n files = os.listdir(curdir)\n for fname in files:\n if fname.endswith('.po'):\n fname_mo = fname.replace('.po', '.mo')\n args = [\n 'msgfmt',\n os.path.join(curdir, fname),\n '-o', os.path.join(curdir, fname_mo)\n ]\n _run_or_die(args)", "title": "" }, { "docid": "2a51af1211e7484150f0da40b588e691", "score": "0.631357", "text": "def merge_translations(localization_bundle_path):\n logging.info(\"Merging translations\")\n for lang_dir in os.listdir(localization_bundle_path):\n if lang_dir == DEFAULT_LANGUAGE_DIRECTORY_NAME:\n continue\n for translated_path in glob.glob(os.path.join(localization_bundle_path, lang_dir, \"*\" + TRANSLATED_SUFFIX)):\n strings_path = translated_path[:-1 * len(TRANSLATED_SUFFIX)]\n localizable_path = os.path.join(localization_bundle_path,\n DEFAULT_LANGUAGE_DIRECTORY_NAME,\n os.path.basename(strings_path))\n\n localization_merge_back(localizable_path, strings_path, translated_path, strings_path)", "title": "" }, { "docid": "ad0275ba759f4821f5aac45f9b1f61c1", "score": "0.627959", "text": "async def package_lang(pak_id: str, fsys: FileSystem) -> None:\n for code in expanded:\n try:\n file = fsys[f'resources/i18n/{code}.mo']\n except FileNotFoundError:\n continue\n LOGGER.debug('Found localisation file {}:{}', pak_id, file.path)\n try:\n with file.open_bin() as f:\n lang_map[pak_id] = await trio.to_thread.run_sync(gettext_mod.GNUTranslations, f)\n return\n except OSError:\n LOGGER.warning('Invalid localisation file {}:{}', pak_id, file.path, exc_info=True)", "title": "" }, { "docid": "2e3739033f5f67ab2f50c8da95b36359", "score": "0.62573385", "text": "def loadLocFiles(self):\n\t\tlogger.debug('loading lang folder path..')\n\t\t# create Path obj with the lang file path\n\t\tfolder = Path( config.load('l18nFolderPath') )\n\t\t# if it doesn't exist, download the default lang file: en_US.jlang\n\t\tif not folder.exists():\n\t\t\tlogger.error(f'NO LANG FOLDER FOUND!')\n\t\t\tfolder.mkdir()\n\t\t\tlogger.info('downloading english translation from github!')\n\t\t\tself.downloadLanguage( 'en_US' )\n\t\tlogger.info(f'langs folder path is \"{folder.absolute()}\"')\n\t\tlangFile: Path\n\t\t# iterate in all jlang files in the lang folder\n\t\tfor langFile in folder.glob('*.jlang'):\n\t\t\tlogger.debug(f'loading lang file \"{langFile.name}\"')\n\t\t\t# open the file\n\t\t\twith langFile.open('r') as file:\n\t\t\t\t# save the file into the dictionary (l18ns[langcode][textid])\n\t\t\t\tself.localizations[langFile.name.replace('.jlang', '')] = json.load(file)\n\t\t\tlogger.info(f'loaded lang file {langFile.name}!')\n\t\t# repeat for all files", "title": "" }, { "docid": "9370062007781475c75da86fdeea33e1", "score": "0.6232859", "text": "def xml_po_files(self):\n\t\tif self.xml_definition is None:\n\t\t\treturn\n\t\tdirpath = os.path.dirname(self.xml_definition)\n\t\tfor lang in LANGUAGES:\n\t\t\tpath = os.path.join(dirpath, '%s.po' % lang)\n\t\t\tyield (lang, path)", "title": "" }, { "docid": "29cf4c5901579e743d8b3af07e73cb45", "score": "0.62219554", "text": "def lang_extract(): # Run as lang-extract\n\n # TODO use python path environment instead of venv\n runshell(\n \"venv/bin/pybabel extract --no-wrap --sort-by-file -F lore/translations/babel.cfg -o temp.pot lore/ plugins/\"\n )\n runshell(\"venv/bin/pybabel update -i temp.pot -d lore/translations -l sv --no-fuzzy-matching\")\n runshell(\"rm temp.pot\")\n print()\n print(\"New strings needing translation:\")\n print(\"------------------------\")\n with open(\"lore/translations/sv/LC_MESSAGES/messages.po\") as f:\n s = f.read()\n for m in re.findall(r'msgid ((\".*\"\\s+)+)msgstr \"\"\\s\\s', s):\n print(m[0].split(\"/n\")[0]) # avoid too long ones", "title": "" }, { "docid": "7bbe479b178d1a110474516239d81621", "score": "0.61800236", "text": "def get_strings(self):\n\n lines = []\n\n filename = renpy.parser.elide_filename(self.filename)\n\n for line, s in scan_strings(self.filename):\n\n stl = renpy.game.script.translator.strings[None] # @UndefinedVariable\n\n if s in stl.translations:\n continue\n\n stl.translations[s] = s\n\n if self.notags:\n s = notags_filter(s)\n\n if self.escape:\n s = quote_unicode(s)\n\n elif self.tdf:\n s = s.replace(\"\\\\\", \"\\\\\\\\\")\n s = s.replace(\"\\t\", \"\\\\t\")\n s = s.replace(\"\\n\", \"\\\\n\")\n\n if self.tdf:\n lines.append([\"\", \"\", s, filename, str(line)])\n\n else:\n lines.append([s])\n\n return lines", "title": "" }, { "docid": "24a0d054e5805046f98dc62f30e7933f", "score": "0.61522657", "text": "def load_text_files(dirname, encoding='utf-8'):\n all_contents = []\n filenames = os.listdir(dirname)\n for filename in filenames:\n file = open(os.path.join(dirname,filename), mode='r', encoding=encoding)\n content = file.read()\n file.close()\n all_contents.append(content)\n return all_contents", "title": "" }, { "docid": "f9571a9e7952e0b1e2bdf18b2cd607ee", "score": "0.61422175", "text": "def getTranslations(localeConfig, projectName, key):\n crowdin_request(projectName, 'export', key)\n\n result = crowdin_request(projectName, 'download/all.zip', key, raw=True)\n zip = ZipFile(StringIO(result))\n dirs = {}\n\n normalizedDefaultLocale = localeConfig['default_locale'].replace('_', '-')\n normalizedDefaultLocale = CROWDIN_LANG_MAPPING.get(normalizedDefaultLocale,\n normalizedDefaultLocale)\n\n for info in zip.infolist():\n if not info.filename.endswith('.json'):\n continue\n\n dir, file = os.path.split(info.filename)\n if not re.match(r'^[\\w\\-]+$', dir) or dir == normalizedDefaultLocale:\n continue\n if file.count('.') == 1:\n origFile = file\n else:\n origFile = os.path.splitext(file)[0]\n\n for key, value in CROWDIN_LANG_MAPPING.iteritems():\n if value == dir:\n dir = key\n dir = dir.replace('-', '_')\n\n data = zip.open(info.filename).read()\n if data == '[]':\n continue\n\n if not dir in dirs:\n dirs[dir] = set()\n dirs[dir].add(origFile)\n\n path = os.path.join(localeConfig['base_path'], dir, origFile)\n if not os.path.exists(os.path.dirname(path)):\n os.makedirs(os.path.dirname(path))\n if file.endswith('.json'):\n postprocessChromeLocale(path, data)\n else:\n data = json.loads(data)\n if origFile in data:\n fileHandle = codecs.open(path, 'wb', encoding='utf-8')\n fileHandle.write(data[origFile]['message'])\n fileHandle.close()\n\n # Remove any extra files\n for dir, files in dirs.iteritems():\n baseDir = os.path.join(localeConfig['base_path'], dir)\n if not os.path.exists(baseDir):\n continue\n for file in os.listdir(baseDir):\n path = os.path.join(baseDir, file)\n valid_extension = file.endswith('.json')\n if os.path.isfile(path) and valid_extension and not file in files:\n os.remove(path)", "title": "" }, { "docid": "42a66582c84ebe590ebfbcc09563a570", "score": "0.61313885", "text": "def update_po_files(potfile, locale_dir, langs):\n if not potfile.endswith('.pot'):\n raise I18nException('Your .pot file must end with .pot %r' % potfile)\n po_name = os.path.split(potfile)[1].replace('.pot', '.po')\n missing = []\n for lang in langs:\n curdir = os.path.join(locale_dir, lang, 'LC_MESSAGES')\n if not os.path.exists(curdir):\n os.makedirs(curdir)\n outfile = os.path.join(curdir, po_name)\n if not os.path.exists(outfile):\n missing.append((lang, outfile))\n continue\n args = [\n 'msgmerge',\n outfile,\n potfile,\n '-o', outfile\n ]\n _run_or_die(args)\n return missing", "title": "" }, { "docid": "92b67f04bfce7b804ceec4ccf8b49eda", "score": "0.6117443", "text": "def read_resource_texts(directory=\"meta-share/resource-texts\"):\n resource_texts = {}\n for res_id, res in resource_mappings.items():\n new_dict = {}\n # Collect Swedish texts\n sv_list = []\n for i in res.get(\"sv\", []):\n path = os.path.join(directory, i)\n with open(path, \"r\") as f:\n sv_list.append(f.read())\n if sv_list:\n new_dict[\"sv\"] = \"\\n\".join(sv_list)\n # Collect English texts\n en_list = []\n for i in res.get(\"en\", []):\n path = os.path.join(directory, i)\n with open(path, \"r\") as f:\n en_list.append(f.read())\n if en_list:\n new_dict[\"en\"] = \"\\n\".join(en_list)\n resource_texts[res_id] = new_dict\n\n return resource_texts", "title": "" }, { "docid": "b34e12249007fa1a88b19c1e0c6685ed", "score": "0.61137295", "text": "def search_by_text(text : str, files : []) -> []:\n outfiles = []\n for file in files:\n try:\n infile = open(file, 'r')\n if text in infile.read():\n outfiles.append(file)\n except UnicodeDecodeError:\n continue\n return outfiles", "title": "" }, { "docid": "02524f2f2e8e010894193bf24e14c01a", "score": "0.60194296", "text": "def available_languages(cls):\n available_languages = []\n for root, dirs, files in os.walk(pkg_resources.resource_filename(\n cls.package,\n cls.dir\n )):\n components = root.split(os.sep)\n if components[-1] == 'LC_MESSAGES':\n if cls.package + '.po' in files:\n available_languages.append(components[-2])\n return available_languages", "title": "" }, { "docid": "a63942fc0ffd2ed3ac055e26a9ea486a", "score": "0.5984852", "text": "def load_translations(language_codes: List[AnyStr]) -> List[Translation]:\n translation_list: List[Translation] = list()\n with open(\"data/translations.txt\") as translations:\n translations = DictReader(translations)\n for translation in translations:\n if translation['language_code'] in language_codes:\n translation_list.append(Translation(\n translation['language'],\n translation['language_code'],\n translation['family'],\n translation['branch'],\n translation['man'],\n translation['woman']))\n return translation_list", "title": "" }, { "docid": "08b1335171d363005beebc19395da9d3", "score": "0.5979562", "text": "def generate(self, files: list, profile: Profile): \n if len(files) > 0:\n entries = list()\n for filename in files:\n f = open(filename)\n entries.append(self.extract_entries(f, profile.pattern))\n f.close()\n \n if len(entries) > 0:\n self.po_helper.create_file(entries, profile)\n print('Translation file generated')", "title": "" }, { "docid": "d3a2595799354df30ec3aafd8bf3d873", "score": "0.59766287", "text": "def translation(messages_dir, languages = None):\n\tmofile = find(messages_dir, languages)\n\tif not mofile:\n\t\treturn lambda x: x\n\timport gettext\n\treturn gettext.GNUTranslations(file(mofile)).ugettext", "title": "" }, { "docid": "3b38fc09ea6cd931ca98dd41d06b8929", "score": "0.5960832", "text": "def available(cls):\n available = []\n for langfile in os.listdir(Lang.LANG_PATH):\n if os.path.isfile(Lang.LANG_PATH + langfile):\n available.append(langfile.replace('.json', ''))\n\n return available", "title": "" }, { "docid": "335ed66617b503b78a05b60b0c3c0628", "score": "0.59416264", "text": "def load(self, folder):\n self.languages = {}\n for filename in os.listdir(folder):\n if re_language.match(filename):\n with open(os.path.join(folder, filename), \"r\", encoding=\"utf-8\") as fp:\n self.languages[filename[:-5].lower()] = json.load(fp)", "title": "" }, { "docid": "459152ec0cbf2e424f75e138dfc6492a", "score": "0.59158975", "text": "def _retrieve_texts_directory(self):\n filenames = self._files_from_directory()\n for filename in filenames:\n self._retrieve_single_file(filename)", "title": "" }, { "docid": "e291b18d51b223459b989c2d8b66a5f3", "score": "0.59079456", "text": "def translations_for(self, src_phrase):\n ...", "title": "" }, { "docid": "88e0e087eb3c4c033fd9f6d1f63a762b", "score": "0.58409196", "text": "def get_translation_file_code(self):\n\n code = \"\"\n\n for trans_string in self.translatable_strings:\n code += f\"\\t[\\\"{trans_string.string}\\\"] = \\\"{trans_string.translation}\\\",\"\n if (trans_string.comment != None):\n code += f\" // {trans_string.comment}\"\n code += \"\\n\"\n \n return code", "title": "" }, { "docid": "245027933b8dc6e24d6bd944be78e259", "score": "0.5820164", "text": "def getlang(folder):\n\n\tlgnames = [] # list of language names\n\tlgfreqs = [] # list of frequency lists (for letters a-z)\n\n\t# Get filenames\n\tlst = os.listdir(folder) # list of filenames\n\n\tfor fname in lst:\n\t\t\n\t\t#Get language name from file name, add to list\n\t\tlgnames.append(str.capitalize(fname.split(\".\")[0]))\n\n\t\t# Open file in folder to read\n\t\tf = open(folder + \"/\" + fname)\n\n\t\t# Read Reference texts and close file\n\t\tlines = f.readlines()\n\t\tf.close()\n\n\t\t# Get reference frequencies add to list of lists\n\t\tlgfreqs.append(getfreq(lines))\n\n\treturn lgnames, lgfreqs", "title": "" }, { "docid": "7b22ac440775ae34a524875d3af237a5", "score": "0.57996035", "text": "def _condense_translations(self, file_type: FileType):\n condensed = []\n for translations in self.translations.values():\n condensed.extend(translations[file_type])\n return condensed", "title": "" }, { "docid": "2b56f16db32a3f22766ffb263127acb5", "score": "0.5773605", "text": "def read_paragraphs(path):\n corpus = {}\n for file_name in glob.glob(\"%s/*\" % path):\n name = os.path.splitext(os.path.basename(file_name))[0]\n text = unidecode(open(file_name, \"rt\", encoding=\"utf8\").read().lower())\n corpus[name] = text\n return corpus", "title": "" }, { "docid": "50a7c6aa120af63b8e7ff24db2970f5d", "score": "0.5768661", "text": "def listJsFilesWithChinese(rootpath):\n matches = []\n print(\"Enumerating code files with Chinese strings ...\")\n for filepath in glob.glob(os.path.join(rootpath, '*.js')):\n with open(filepath, \"r\", encoding=\"utf-8\") as f:\n if containsChineseCharacters(f.read()):\n matches.append(filepath)\n print(\"Found \"+str(len(matches)))\n return matches", "title": "" }, { "docid": "ccccd3fe44112790444a992bfa1f9452", "score": "0.57608783", "text": "def _messages_found_in_admin(file_name):\n\n main_messages_in_admin = rf.read_csv_file(file_name, MainMessageInAdmin)\n main_messages_no = []\n\n for m in main_messages_in_admin:\n if m.LangKey == 1:\n main_messages_no.append(fe.text_cleanup(m.Text))\n\n return main_messages_no", "title": "" }, { "docid": "5ba97ac377833287e95ebb34ddfbc06b", "score": "0.5720677", "text": "def list(self, filenames, languages):\n filepath = filenames[0]\n fname = self.getFileName(filepath)\n subs = self.query(fname, languages)\n if not subs and fname.rfind(\".[\") > 0:\n # Try to remove the [VTV] or [EZTV] at the end of the file\n teamless_filename = fname[0:fname.rfind(\".[\")]\n subs = self.query(teamless_filename, languages)\n return subs\n else:\n return subs", "title": "" }, { "docid": "74a17796855d37e8d4aa9771e27d0e15", "score": "0.57170606", "text": "def build_get_text(out_pot, dirs):\n args = [\n 'pygettext',\n '--output', out_pot,\n ] + dirs\n _run_or_die(args)", "title": "" }, { "docid": "6d347a6618acf02132596a036512512e", "score": "0.5711362", "text": "def get_translation_for(package_name: str) -> gettext.NullTranslations:\n localedir = None\n for localedir in pkg_resources.resource_filename(package_name, 'i18n'), None:\n localefile = gettext.find(package_name, localedir) # type: ignore\n if localefile:\n break\n else:\n pass\n return gettext.translation(package_name, localedir=localedir, fallback=True) # type: ignore", "title": "" }, { "docid": "421236f531fc080c3f96ba0428ffb38d", "score": "0.57046616", "text": "def test_load_str_po(self):\n MasterTranslation(text=u\"This — that\", language_code=\"en\").save()\n MasterTranslation(text=u\"something something something something translate\", language_code=\"en\").save()\n\n pofile = \"\"\"%s\n\nmsgid \"This — that\"\nmsgstr \"Deise — dass\"\n \"\"\" % POFILE\n\n errors = import_translations_from_po(pofile, \"de\", \"en\")\n self.assertEqual(errors, [])", "title": "" }, { "docid": "421236f531fc080c3f96ba0428ffb38d", "score": "0.57046616", "text": "def test_load_str_po(self):\n MasterTranslation(text=u\"This — that\", language_code=\"en\").save()\n MasterTranslation(text=u\"something something something something translate\", language_code=\"en\").save()\n\n pofile = \"\"\"%s\n\nmsgid \"This — that\"\nmsgstr \"Deise — dass\"\n \"\"\" % POFILE\n\n errors = import_translations_from_po(pofile, \"de\", \"en\")\n self.assertEqual(errors, [])", "title": "" }, { "docid": "4262f0383e8d616dbd12b61e8dd9c020", "score": "0.56759924", "text": "def find(messages_dir, languages = None):\n\t# select a language\n\tfor lang in expand_languages(languages):\n\t\tif lang == 'C':\n\t\t\tbreak\n\t\tmofile = os.path.join(messages_dir, '%s.gmo' % lang)\n\t\tif os.path.exists(mofile):\n\t\t\treturn mofile\n\treturn None", "title": "" }, { "docid": "a123d94e9c278de9ad9b941a650166ea", "score": "0.56716937", "text": "def _get_nereid_template_messages(cls):\r\n extract_options = cls._get_nereid_template_extract_options()\r\n\r\n for module, directory in cls._get_installed_module_directories():\r\n template_dir = os.path.join(directory, 'templates')\r\n if not os.path.isdir(template_dir):\r\n # The template directory does not exist. Just continue\r\n continue\r\n\r\n # now that there is a template directory, load the templates\r\n # using a simple filesystem loader and load all the\r\n # translations from it.\r\n loader = FileSystemLoader(template_dir)\r\n for template in loader.list_templates():\r\n file_obj = open(loader.get_source({}, template)[1])\r\n for message_tuple in babel_extract(\r\n file_obj, GETTEXT_FUNCTIONS,\r\n ['trans:'], extract_options):\r\n yield (module, template) + message_tuple", "title": "" }, { "docid": "1f942cf53845718351d51e170080f8c2", "score": "0.56642634", "text": "def load_messages(locale):\n ctx = _request_ctx_stack.top\n if ctx is None:\n return None\n dirname = os.path.join(ctx.app.root_path, TRANSLATIONS_PATH)\n if not os.path.isdir(dirname):\n raise Exception('Unable to find the translations path.')\n locales_list = [name for name in os.listdir(dirname)\n if os.path.isdir(os.path.join(dirname, name))]\n messages = {}\n if locale not in locales_list:\n raise Exception('No locale ICU message files found for the locale: {}'.format(locale))\n else:\n for subdir, dirs, files in os.walk(dirname + '/' + locale):\n for file in files:\n with open(subdir + '/' + file) as data_file:\n data = json.load(data_file)\n messages.update(data)\n return messages", "title": "" }, { "docid": "a53a4cbc84543f126f41a8e7889337fc", "score": "0.56503034", "text": "def _read_linguas_from_files(env, linguas_files = None):\r\n import SCons.Util\r\n import SCons.Environment\r\n global _re_comment\r\n global _re_lang\r\n if not SCons.Util.is_List(linguas_files) \\\r\n and not SCons.Util.is_String(linguas_files) \\\r\n and not isinstance(linguas_files, SCons.Node.FS.Base) \\\r\n and linguas_files:\r\n # If, linguas_files==True or such, then read 'LINGUAS' file.\r\n linguas_files = [ 'LINGUAS' ]\r\n if linguas_files is None:\r\n return [] \r\n fnodes = env.arg2nodes(linguas_files)\r\n linguas = []\r\n for fnode in fnodes:\r\n contents = _re_comment.sub(\"\", fnode.get_text_contents())\r\n ls = [ l for l in _re_lang.findall(contents) if l ]\r\n linguas.extend(ls)\r\n return linguas", "title": "" }, { "docid": "08380d1dc08507debd17936c540dac71", "score": "0.56270486", "text": "def find_directory_subtitles(path, args):\n if not os.path.isdir(path):\n raise IOError('find_directory_subtitles was called with an invalid path!')\n logger.info('py-subs started! Searching subtitles for directory: {}'.format(path))\n results_list = list()\n for directory_name, _, files in os.walk(path):\n for file_name in files:\n results_list.extend(find_file_subtitles(os.path.join(directory_name, file_name), args))\n return results_list", "title": "" }, { "docid": "2d94af76535201000e48822eb86f0773", "score": "0.56249607", "text": "def search_logs(string, test_data):\n logs_path = test_data.logs_dir\n results = []\n for file in os.listdir(logs_path):\n log = codecs.open(make_path(logs_path, file), \"r\", \"utf_8\")\n try:\n for line in log:\n if string in line:\n results.append(line)\n log.close()\n except UnicodeDecodeError:\n pass\n return results", "title": "" }, { "docid": "39fde2dc3449939af178b17b2163cb73", "score": "0.562244", "text": "def count_translations(dir_with_translations, print_info, reference_file):\n en_gb = file_to_dict(reference_file)\n\n missing_counters = dict.fromkeys(languages, 0)\n same_counters = dict.fromkeys(languages, 0)\n not_needed = dict.fromkeys(languages, 0)\n\n for lang in languages:\n filename = lang + '.txt'\n translations = file_to_dict(os.path.join(dir_with_translations, filename))\n\n for base_string in en_gb:\n if base_string in KEYS_TO_IGNORE:\n if base_string in translations and en_gb[base_string] != translations[base_string] and print_info:\n print(f'[{lang}] Unexpected translation: {base_string}')\n elif base_string not in translations:\n missing_counters[lang] += 1\n if print_info:\n print(f'[{lang}] Missing translation: {base_string}')\n elif en_gb[base_string] == translations[base_string]:\n same_counters[lang] += 1\n if print_info:\n print(f'[{lang}] Same translation: {base_string}')\n\n for translation in translations:\n if translation not in en_gb:\n not_needed[lang] += 1\n if print_info:\n print(f'[{lang}] Unnecessary translation: {translation}')\n\n result = {'missing': missing_counters, 'same': same_counters, 'not_needed': not_needed}\n return result", "title": "" }, { "docid": "bfd96b2294b56cd84f78bef87b3dcbec", "score": "0.5604359", "text": "def read_texts(path):\n texts = {}\n files = [p for p in path.iterdir() if '.txt' in p.name]\n for f in files:\n bs = bytes([b for b in f.read_bytes() if b != 0xa0 and b != 0x0d])\n texts[f.name] = bs.decode(encoding='utf8')\n return texts", "title": "" }, { "docid": "bdc06803a94b53b8af39b2942834eba1", "score": "0.5571911", "text": "def vectorize_files(self):\n\n for locale, language in (('de_CH', 'german'), ('fr_CH', 'french')):\n files = [\n SwissVote.__dict__[file].__get_by_locale__(self, locale)\n for file in self.indexed_files\n ]\n text = ' '.join([\n extract_pdf_info(file.reference.file)[1] or ''\n for file in files if file\n ]).strip()\n setattr(\n self,\n f'searchable_text_{locale}',\n func.to_tsvector(language, text)\n )", "title": "" }, { "docid": "12f0de4d4c6496cf1db1acdbd185a125", "score": "0.55679727", "text": "def pull_translations(self):\n raise NotImplementedError()", "title": "" }, { "docid": "f53ac95a8a57c5c878bed78e5af0c807", "score": "0.5547429", "text": "def load_test_en(self):\n with open('paths.json', 'r') as f:\n paths = json.load(f)\n return load_data(paths['test_en'])", "title": "" }, { "docid": "73376ed0551d519636c176afdc93f624", "score": "0.5539003", "text": "def translate(path):\n has_sys_init, filepaths, outpath = retrive_files(path)\n writer = CodeWriter(outpath)\n if has_sys_init:\n writer.write_sys_init()\n for filepath in filepaths:\n filename = os.path.basename(filepath)\n writer.set_file_name(filename)\n parser = Parser(filepath)\n translate_file(parser, writer)\n writer.close()", "title": "" }, { "docid": "89a78bbe51e8a0b6089c8e62c663cb36", "score": "0.551407", "text": "def _get_translations(self, messages: List[Message], session: Session) -> List[TranslatedMessage]:\n locales = dict(session.query(model.SelectedLocale.chat_id, model.SelectedLocale.locale)\n .filter(model.SelectedLocale.chat_id.in_(set(m.chat_id for m in messages)))\n .all())\n return [TranslatedMessage(m.chat_id,\n m.text.get_translation(gettext.translation('qaqa_bot',\n LOCALE_DIR,\n (locales.get(m.chat_id, 'en'),),\n fallback=True)))\n for m in messages]", "title": "" }, { "docid": "e0bad116e2c755f5815f7575275fcc77", "score": "0.54827017", "text": "def localize():\n from sys import path # I know it is bad style to load modules in function\n import os.path as os_path\n containerDir = os_path.dirname(__file__)\n if containerDir not in path:\n path.append( containerDir )", "title": "" }, { "docid": "173ccd5fac4a2edfa9101cc9c34b4c30", "score": "0.5479198", "text": "def dir_sents(directory,encoding = \"utf8\"):\n #get a list of the files in a directory\n corpus = os.listdir(directory)\n #join the file path to the directory to the file names\n for i in range(len(corpus)):\n corpus[i] = os.path.join(directory,corpus[i])\n #get the sentences and return\n sents = corpus_sents(corpus)\n return(sents)", "title": "" }, { "docid": "ffcd0e7be6f4096fb10078cd373333fa", "score": "0.5473276", "text": "def get_text_filenames():\n filenames = []\n file_folder = os.getcwd() + \"\\\\txts\"\n for file in os.listdir(file_folder):\n if file.endswith(\".txt\"):\n filenames.append('txts\\\\' + file)\n return filenames", "title": "" }, { "docid": "9336dd9ca387d9a655dfe9695a0d9167", "score": "0.5470017", "text": "def iterate_folder_get_text(folder_path: str) -> str:\n files = dict()\n directory = os.fsencode(folder_path)\n for file in os.listdir(directory):\n filename = os.fsdecode(file)\n if filename.endswith(\".txt\"):\n file_path = folder_path + \"/\" + filename\n text = get_text(file_path)\n files[filename.title().removesuffix(\".Txt\")] = text\n return files", "title": "" }, { "docid": "3b861f710d452ff303a3089154c4fb66", "score": "0.5468362", "text": "def loadAllTextOLD(self):\n \n \n if not self.exists():\n raise Exception(\"Problem on open; this file does not exist! \")\n elif not self.isFile():\n raise Exception(\"Problem on open; this is a directory! \")\n \n fIn = open(self.filePath,'r')\n \n # Don't read unicode... inputFileTemplate=unicode(fIn.read(),'utf-8')\n self.fileData=fIn.read().decode('ISO-8859-1')\n #self.fileData.decode()\n #self.fileData = self.fileData.encode('utf-8')\n fIn.close()\n logString = \"{} lines of text loaded for {}\".format(len(self.fileData),self)\n logging.debug(logString)", "title": "" }, { "docid": "f2c97750c8915f0e8a48e7aaabd05b4a", "score": "0.5459218", "text": "def WalkLocales(self, locales_path):\n generator = os.walk(locales_path)\n try:\n generator.next()\n except StopIteration:\n return\n for root,dirs,names in generator:\n dirs[:] = []\n if 'search.ini' in names:\n lang = os.path.split(root)[1]\n self.ProcessSearchIni(None, lang, os.path.join(root, 'search.ini'))", "title": "" }, { "docid": "e3402806c46382dc7719e2e7c18a0c80", "score": "0.545632", "text": "def get_templates(root):\n\n template_file = os.path.join(\n root, \"contributing-guides/translation-templates/alias-pages.md\"\n )\n with open(template_file) as f:\n lines = f.readlines()\n\n # Parse alias-pages.md\n templates = {}\n i = 0\n while i < len(lines):\n if lines[i].startswith(\"###\"):\n lang = lines[i][4:].strip(\"\\n\").strip(\" \")\n while True:\n i = i + 1\n if lines[i].startswith(\"Not translated yet.\"):\n is_translated = False\n break\n elif lines[i].startswith(\"```markdown\"):\n i = i + 1\n is_translated = True\n break\n\n if is_translated:\n text = \"\"\n while not lines[i].startswith(\"```\"):\n text += lines[i]\n i = i + 1\n templates[lang] = text\n\n i = i + 1\n\n return templates", "title": "" }, { "docid": "114cba1d4a76895af62edb0e5c0dc346", "score": "0.5455372", "text": "def load_files(rapper):\n\tpath = os.path.abspath(\"lyrics/%s.txt\" %rapper)\n\tlyrics = open(path, \"r\")\n\ttext = lyrics.read()\n\treturn text", "title": "" }, { "docid": "acc917442ae26662d307f0a4d53bb0ab", "score": "0.54551667", "text": "def test_language(self):\n self.write_fakelang_pofile()\n build_prod_main.main(['js'], ['en'], readable=True)\n symlinks = (self._filename('genfiles', 'javascript', 'en',\n 'corelibs-package-*.js'),\n self._filename('genfiles', 'javascript', 'en',\n 'shared-package-*.js'),\n self._filename('genfiles', 'javascript', 'en',\n 'third-party-package-*.js'),\n self._filename('genfiles', 'javascript', 'en',\n 'video-package-*.js'),\n )\n for symlink in symlinks:\n self.assertIn('readable_js_packages_prod',\n os.readlink(ka_root.join(symlink)))\n\n build_prod_main.main(['js'], ['fakelang'], readable=True)\n # Now check the 'en' files again -- they shouldn't have been\n # deleted just because we built fakelang.\n for symlink in symlinks:\n self.assertIn('readable_js_packages_prod',\n os.readlink(ka_root.join(symlink)))", "title": "" }, { "docid": "f0cf9c47f4d4482239dd0982a09f10dd", "score": "0.54549325", "text": "def test_get_language_names_with_native_names_english(self, mock_listdir: MagicMock):\n\n mock_listdir.return_value = [\n 'es',\n 'fr',\n 'de',\n ]\n expected_names = [\n ('en', 'English'),\n ('fr', 'French (français)'),\n ('de', 'German (Deutsch)'),\n ('es', 'Spanish (español)'),\n ]\n\n names = get_language_names(TestConfiguration.TRANSLATION_DIR)\n mock_listdir.assert_called()\n self.assertListEqual(expected_names, list(names))", "title": "" }, { "docid": "108519de64a40951453214bd79115470", "score": "0.54527044", "text": "def parse_files_sequential(file_folder, language, sentences):\n for file in os.listdir(file_folder):\n if os.path.isfile(file_folder + '/' + file):\n with open(file_folder + '/' + file) as f:\n result_sents = find_and_clean_sentences(f.read(), language)\n sentences.extend(result_sents)", "title": "" }, { "docid": "5c4affef1ce5bedef77a06e6379bb12e", "score": "0.5444692", "text": "def source_to_locale_path(path):\n if path.endswith(\"pot\"):\n path = path[:-1]\n return path", "title": "" }, { "docid": "2d5900964c91179b588ac809fbc2004a", "score": "0.543987", "text": "def get_librispeech_texts(folder_paths):\n transcriptions = []\n for fpath in [os.path.join(LIBRISPEECH_DIR, f) for f in folder_paths]:\n if not os.path.exists(fpath):\n raise ValueError(\"{0} doesn't exist\".format(fpath))\n\n master_path = os.path.join(fpath, 'master.pkl')\n try:\n master = pickle.load(open(master_path, 'rb'))\n except:\n raise RuntimeError(\"\"\"\n There was a problem with loading the master file, {0}.\\n\n Make sure librispeech_initialize.py is run in /scripts\n \"\"\".format(master_path)) \n\n transcriptions = master['transcriptions']\n\n return transcriptions", "title": "" }, { "docid": "9c7cb2209a035a734ba688f982b47ca5", "score": "0.5424458", "text": "def getAllSourceFiles(dirName, registry):\n assert(registry.lower() in libLF.registryToPrimaryLanguages)\n\n # Get source files for all languages present in dir\n _language2files = getFileSummaryFromCLOC(dirName)\n\n # Copy out the records of interest\n lang2sourceFiles = {}\n for lang in libLF.registryToPrimaryLanguages[registry]:\n lang2sourceFiles[lang] = _language2files[lang]\n return lang2sourceFiles", "title": "" }, { "docid": "92d677634e420b5d47c77a707cab7d6e", "score": "0.5419804", "text": "async def rebuild_app_langs() -> None:\n def build_file(filename: Path) -> None:\n \"\"\"Synchronous I/O code run as a backround thread.\"\"\"\n with filename.open('rb') as src:\n catalog = read_po(src, locale=filename.stem)\n with filename.with_suffix('.mo').open('wb') as dest:\n write_mo(dest, catalog)\n\n async def build_lang(filename: Path) -> None:\n try:\n await trio.to_thread.run_sync(build_file, fname)\n except (IOError, OSError):\n LOGGER.warning('Could not convert \"{}\"', filename, exc_info=True)\n else:\n LOGGER.info('Converted \"{}\"', filename)\n\n async with trio.open_nursery() as nursery:\n for fname in FOLDER.iterdir():\n if fname.suffix == '.po':\n nursery.start_soon(build_lang, fname)\n tk_tools.showinfo(TransToken.ui('BEEMod'), TransToken.ui('UI Translations rebuilt.'))", "title": "" }, { "docid": "255e35067c237d6851efa6d52fca1b4f", "score": "0.5417038", "text": "def localize(): # For some reason this is needed in Windows 10 Spyder (Py 2.7)\n from sys import path # I know it is bad style to load modules in function\n import os.path as os_path\n containerDir = os_path.dirname(__file__)\n if containerDir not in path:\n path.append( containerDir )", "title": "" }, { "docid": "432965cd5ab56bbe2a472475bfa4d17f", "score": "0.5415022", "text": "def run():\n for filename in os.listdir('texts'):\n read_file(filename)\n with open('res.json', 'w') as fp:\n st = json.dumps(INFO, indent=4)\n fp.write(st)", "title": "" }, { "docid": "bbc2549ed829ccbbb50354838a0ccb39", "score": "0.54142493", "text": "def test_search_incorrect_extensions():\n assert not get_files([cases_path], ['java'], [])", "title": "" }, { "docid": "cf8151f5bc535948246586722f848868", "score": "0.5413186", "text": "def findTools(self, query, langs):\n def markup(word):\n escaped = word.replace('\\\\', '\\\\\\\\').replace('%', '\\\\%').replace('_', '\\\\_')\n return '%' + escaped + '%'\n\n hits = []\n with self.db.connect() as cu:\n infoFromId = self.db.getInfoFromIdMap(cu=cu) # id -> (name, subDir)\n sql = [\n \"SELECT path_id, type, name FROM common_details\",\n \"WHERE type != 'folder'\",\n ]\n args = []\n for word in query.split():\n sql.append(\"AND name LIKE ? ESCAPE '\\\\'\")\n args.append(markup(word))\n cu.execute(' '.join(sql), tuple(args))\n # (id, type, name, subdir, matched-in-name)\n hits = [(x[0], x[1], x[2], infoFromId.get(x[0], ['', ''])[1], True)\n for x in cu.fetchall()]\n \n # Add any hits in the subdir.\n # - TODO:PERF This could be made faster by reversing `infoFromId`\n # and finding all subDirs that have matches. Then get the\n # corresponding ids. Otherwise we are duplicating \"word in subDir\"\n # for the same values of \"subDir\".\n nameHitIds = set(h[0] for h in hits)\n subDirHitIds = []\n words = [w.lower() for w in query.split()]\n for id, info in infoFromId.items():\n if id in nameHitIds:\n continue\n name, subDir = info\n nameLower = name.lower()\n subDirLower = subDir.lower()\n for word in words:\n if word not in subDirLower and word not in nameLower:\n break\n else:\n subDirHitIds.append(id)\n if subDirHitIds:\n sql = \"SELECT path_id, type, name FROM common_details WHERE type != 'folder'\"\n if len(subDirHitIds) == 1:\n sql += \" AND path_id = %s\" % subDirHitIds[0]\n else:\n sql += \" AND path_id IN %s\" % repr(tuple(subDirHitIds))\n cu.execute(sql)\n hits += [(x[0], x[1], x[2], infoFromId.get(x[0], ['', ''])[1], False)\n for x in cu.fetchall()]\n\n # Sorting results:\n # - Prefer matches in the name (vs. a match in the subDir)\n # - Sort tools to the top that are in a toolbox dir that\n # matches the name of the current cursor sublang.\n def indexof(lst, item):\n try:\n return lst.index(item)\n except ValueError:\n return 999 # higher than anything\n def sortkey(hit):\n subDirParts = hit[3].split('/')\n langIndex = min(indexof(langs, p) for p in subDirParts)\n return (not hit[4], langIndex,)\n hits.sort(key=sortkey)\n\n return [KoToolInfo(self._toolsMgrSvc, *hit[:-1]) for hit in hits]", "title": "" }, { "docid": "fbd1b1583f697391a9c46cdac6407ee4", "score": "0.54076093", "text": "def translate_path(self, path):\n # Remove all parameters from filenames.\n path = re.sub(r\"\\?.*$\", \"\", path)\n for prefix, dest_dir in Translator.DIRECTORY_MAP.items():\n translatedPath = dest_dir + path[len(prefix):]\n if path.startswith(prefix) and os.path.isfile(translatedPath):\n logging.debug(\"Translator.translate_path:%s\", translatedPath)\n return translatedPath\n logging.debug(\"Translator.translate_path:NOT_FOUND:%s. Returned: %s\", path, \"build/index.html\")\n return \"build/index.html\"", "title": "" }, { "docid": "34d6ba2d74e463f8429ab8ebf66d238a", "score": "0.54028964", "text": "def generateLocaleStrings(translationCache, translator, rootpath, newLanguage='en'):\n # ensure directories exist\n if not os.path.exists(os.path.join(rootpath, 'js/common/locales/'+newLanguage)):\n os.makedirs(os.path.join(rootpath, 'js/common/locales/'+newLanguage))\n # do the translation work for both dict objects\n zhDict = parseAsDict(os.path.join(rootpath, 'js/common/locales/zh/index.js'))\n translatedDict = translateDictionary(translationCache, translator, zhDict, newLanguage, 0)\n # save the new dictionaries as JS objects\n saveDictAsJsObject(os.path.join(rootpath, 'js/common/locales/'+newLanguage+'/index.js'), translatedDict)", "title": "" }, { "docid": "2b291906b3e76c1de54cc443f10be893", "score": "0.53940743", "text": "def collect_filenames(self, s):\n filenames = []\n if os.path.isdir(s):\n for root, dirs, files in os.walk(s):\n for fname in files:\n if not fname.lower().endswith(('.txt', '.yaml')):\n continue\n filenames.append(os.path.join(root, fname))\n else:\n if os.path.exists(s):\n filenames = [s]\n return filenames", "title": "" }, { "docid": "13a7b04da77c613b87e4cad6b4df6996", "score": "0.53915054", "text": "def findyaml(basedir):\n return [str(path.resolve()) for path in pathlib.Path(basedir).rglob('*.yml')]", "title": "" }, { "docid": "a757d1fe1b4a85aa4cd090c4b22f0ce9", "score": "0.5387295", "text": "def extract_from_json():\n for root, _, files in os.walk(\"./json\"):\n for name in files:\n try:\n with open(os.path.join(root, name), 'r', encoding='utf-8') as f:\n line = f.readline()\n\n while line:\n loaded = json.loads(line)\n\n if 'user' in loaded.keys():\n code = str(loaded['user']['lang'])\n\n if code in language_codes:\n text = str(loaded['text']).encode('utf-8').decode('utf-8') + \" \" + \\\n str(loaded['user']['description']).encode('utf-8').decode('utf-8')\n clean_text(text, code + \"_twitter\")\n\n line = f.readline()\n\n except UnicodeDecodeError:\n continue", "title": "" }, { "docid": "7a1e737d33b2cdc727ebb293b62c74dc", "score": "0.5386963", "text": "def _get_file_names(self, level):\n\n directory = os.path.join(self.dialogflow_project_directory, level)\n files = os.listdir(directory)\n\n w = {\"entities\": \"entries\", \"intents\": \"usersays\"}\n p = r\".+(?<=(_\" + w[level] + \"_))(.*)(?=(.json))\"\n language = \"en\"\n\n info = {}\n for name in files:\n match = re.match(p, name)\n\n if match:\n isbase = False\n base = name[: match.start(1)]\n language = str(match.group(2))\n else:\n isbase = True\n base = name[:-5]\n\n if base not in info:\n info[base] = {}\n\n if not isbase:\n info[base][language] = name[:-5]\n\n return info", "title": "" }, { "docid": "44b3e38d1cb9a762e939013844de781b", "score": "0.53869426", "text": "async def rebuild_package_langs(packset: packages.PackagesSet) -> None:\n tok2pack: dict[str | tuple[str, str], set[str]] = defaultdict(set)\n pack_paths: dict[str, tuple[trio.Path, messages.Catalog]] = {}\n\n for pak_id, pack in packset.packages.items():\n if isinstance(pack.fsys, RawFileSystem):\n pack_paths[pak_id.casefold()] = trio.Path(pack.path, 'resources', 'i18n'), messages.Catalog(\n project=pack.disp_name.token,\n version=utils.BEE_VERSION,\n )\n\n LOGGER.info('Collecting translations...')\n async for orig_tok, source in get_package_tokens(packset):\n for tok in _get_children(orig_tok):\n if not tok:\n continue # Ignore blank tokens, not important to translate.\n try:\n pack_path, catalog = pack_paths[tok.namespace.casefold()]\n except KeyError:\n continue\n # Line number is just zero - we don't know which lines these originated from.\n if tok.namespace.casefold() != tok.orig_pack.casefold():\n # Originated from a different package, include that.\n loc = [(f'{tok.orig_pack}:{source}', 0)]\n else: # Omit, most of the time.\n loc = [(source, 0)]\n\n if isinstance(tok, PluralTransToken):\n catalog.add((tok.token, tok.token_plural), locations=loc)\n tok2pack[tok.token, tok.token_plural].add(tok.namespace)\n else:\n catalog.add(tok.token, locations=loc)\n tok2pack[tok.token].add(tok.namespace)\n\n for pak_id, (pack_path, catalog) in pack_paths.items():\n LOGGER.info('Exporting translations for {}...', pak_id.upper())\n await pack_path.mkdir(parents=True, exist_ok=True)\n catalog.header_comment = PACKAGE_HEADER\n with open(pack_path / 'en.pot', 'wb') as f:\n write_po(f, catalog, include_previous=True, sort_output=True, width=120)\n for lang_file in await pack_path.iterdir():\n if lang_file.suffix != '.po':\n continue\n data = await lang_file.read_text()\n existing: messages.Catalog = read_po(io.StringIO(data))\n existing.update(catalog)\n catalog.header_comment = PACKAGE_HEADER\n existing.version = utils.BEE_VERSION\n LOGGER.info('- Rewriting {}', lang_file)\n with open(lang_file, 'wb') as f:\n write_po(f, existing, sort_output=True, width=120)\n with open(lang_file.with_suffix('.mo'), 'wb') as f:\n write_mo(f, existing)\n\n LOGGER.info('Repeated tokens:\\n{}', '\\n'.join([\n f'{\", \".join(sorted(tok_pack))} -> {token!r} '\n for (token, tok_pack) in\n sorted(tok2pack.items(), key=lambda t: len(t[1]), reverse=True)\n if len(tok_pack) > 1\n ]))", "title": "" }, { "docid": "57e3952039365f01b093a0891576bfc6", "score": "0.53842765", "text": "def test_model_to_po(self):\n locale_dir = join(settings.LOCALE_DIR, 'locale')\n if isdir(locale_dir):\n shutil.rmtree(locale_dir)\n \n assert(not isdir(locale_dir))\n i = models.TestModel(charfield='ik word serialized to po')\n i.save()\n assert(isdir(locale_dir))\n \n result = ffindgrep(locale_dir, ['^msgid \"ik word serialized to po\"$'])\n assert(result != {})", "title": "" }, { "docid": "15f7d9be949782e1a94c9eb803b43a4d", "score": "0.53819513", "text": "def includeme(config):\n translators.append(TranslationStringFactory('ringo_trashbin'))\n config.add_translation_dirs('ringo_trashbin:locale/')", "title": "" }, { "docid": "67ad39f2a4f78be2e38a52f04747d6c7", "score": "0.5370209", "text": "def getFileNames():\n files = os.listdir(\"evaluated\")\n # files = filter(lambda ob: \".txt\" in ob, files)\n return files", "title": "" }, { "docid": "957b49e1f436b1499fdb0ea98912db62", "score": "0.53697", "text": "def test_translation_text1_ru_en(self):\n response = self.yapi.get(self.d['text1_ru'], 'ru-en')\n assert self.yapi.get_returned_text(response) == self.d['text1_en']", "title": "" }, { "docid": "2dec4b09765ac9a6d41fddc1b15e3352", "score": "0.5369371", "text": "def test_translation_text1_en_ru(self):\n response = self.yapi.get(self.d['text1_en'], 'en-ru')\n assert self.yapi.get_returned_text(response) == self.d['text1_ru']", "title": "" }, { "docid": "79f1cc7cb08e7f08056abcaee437e9c9", "score": "0.5367025", "text": "def compile_messages():\n\n def comp():\n system(\"django-admin.py\", \"compilemessages\")\n def test(dirname):\n for path in os.listdir('.'):\n if os.path.isdir(path) and os.path.exists(path+'/models.py'):\n with_dir(path, comp)\n\n with_all_dirs(test)", "title": "" }, { "docid": "bfbb8eca97c13b64967fbef94c731e46", "score": "0.53619325", "text": "def determine_paragraph_lang(self):\n for file_name in glob.glob(\"paragraphs/*\"):\n name = os.path.splitext(os.path.basename(file_name))[0]\n text = unidecode(open(file_name,\"rt\",encoding=\"utf8\").read().lower())\n print(\"Text in language: \",name)\n print(\"Detected languages with probability:\")\n for lang,prob in self.determine_language(text):\n print(lang,\":\",prob)\n print()", "title": "" }, { "docid": "bc2261dc1dfd028a4efa6da7d631d5f8", "score": "0.5361353", "text": "def get_data():\n\t\n\tdata = []\n\t\n\tfor path_dir, list_dir, list_file in os.walk('po/'):\n\t\tif list_file != []:\n\t\t\tname_f = list_file[0]\n\t\tif path_dir != 'po/':\n\t\t\tpo_file = os.path.join(path_dir, name_f)\n\t\t\tmo_file = po_file.replace(\".po\", '.mo')\n\t\t\tmsgfmt = 'msgfmt -c %s -o %s' %(po_file, mo_file)\n\t\t\tsubprocess.call(msgfmt, shell = True)\n\t\t\t\n\t\t\tdata.append(('share/locale/' + mo_file.split('/')[1] + '/LC_MESSAGES/', [mo_file]))\n\t\n\tdata += (('share/icons/hicolor/48x48/apps/', ['icon/rosa-crypto-tool.svg']),\n\t\t ('share/doc/rosa-crypto-tool', ['doc/help.pdf']))\n\t\n\treturn data", "title": "" }, { "docid": "5186f3e39463f6f35e67a8a464735a89", "score": "0.5355997", "text": "def get_locale_file() -> \"str | None\":\n SEARCH_DIRS = get_search_dirs(\"locale\")\n\n for directory in SEARCH_DIRS:\n if directory.is_dir():\n return str(directory)\n\n return None", "title": "" }, { "docid": "85985c7303b9351629d72f4ace1381cb", "score": "0.53453445", "text": "def transliterate_indictrans(files, text_type, src_lang=\"eng\", tgt_lang=\"hin\", langids_type=None,\n src_folder=None, dest_folder=None):\n\n if not is_module_available(\"indictrans\"):\n raise ImportError(\"Install `indictrans` by following install-extras in the docs\")\n\n text_type_tail = \"_D\" if tgt_lang == \"hin\" else \"_E\"\n\n if not dest_folder and not src_folder:\n raise Exception(\"one of `dest_folder` or `src_folder` need to be a valid path\")\n\n if src_folder:\n assert os.path.exists(src_folder)\n files = [os.path.join(src_folder, file) for file in files]\n\n if dest_folder:\n if not os.path.exists(dest_folder):\n os.makedirs(dest_folder)\n else:\n dest_folder = os.path.split(src_folder)[0]\n\n for path in files:\n print(f\"reading from {path}\")\n samples = [line for line in jsonlines.open(path, \"r\")]\n new_samples = []\n for sample in tqdm(samples):\n new_sample = copy.deepcopy(sample)\n tokens = sample[text_type].split(\" \")\n src2tgt = _INDICTRANS_TRANSLITERATORS[f\"{src_lang}_to_{tgt_lang}\"]\n new_tokens = [src2tgt.transform(token) for token in tokens]\n new_sample[text_type + text_type_tail] = \" \".join(new_tokens)\n if langids_type and langids_type in sample and sample[langids_type]:\n langids = sample[langids_type].split(\" \")\n assert len(langids) == len(tokens) == len(new_tokens)\n non_english = [token for token, langid in zip(tokens, langids) if langid != \"en\"]\n non_hindi = [token for token, langid in zip(tokens, langids) if langid != \"hi\"]\n non_english_devanagari = [token for token, langid in zip(new_tokens, langids) if langid != \"en\"]\n new_sample[text_type + \"_non_english\"] = \" \".join(non_english)\n new_sample[text_type + \"_non_hindi\"] = \" \".join(non_hindi)\n new_sample[text_type + \"_non_english_D\"] = \" \".join(non_english_devanagari)\n new_samples.append(new_sample)\n with jsonlines.open(os.path.join(dest_folder, os.path.split(path)[-1]), 'w') as writer:\n for new_sample in new_samples:\n writer.write(new_sample)\n\n return", "title": "" }, { "docid": "033ba891f68528143dc81f788aefce71", "score": "0.5335292", "text": "def test_unicode_files_where(self):\n self.base_where_clause(SAMPLE_UNICODE_FILE_HASH)", "title": "" }, { "docid": "2d996afa2435aca40833017feb160856", "score": "0.53331757", "text": "def translate_file_to_en(f):\n translated = []\n \n # the first line indicates the number of cases\n cases = int(f.readline())\n \n # translate every caes\n for c in range(1, cases + 1):\n googlerese = f.readline()\n translated.append(to_en(c, googlerese))\n \n return translated", "title": "" }, { "docid": "5bd25de888e18f9df25eca4ea0fa0b5b", "score": "0.5332921", "text": "def search_log_set(type, string, test_data):\n logs_path = test_data.logs_dir\n results = []\n for file in os.listdir(logs_path):\n if type in file:\n log = codecs.open(make_path(logs_path, file), \"r\", \"utf_8\")\n try:\n for line in log:\n if string in line:\n results.append(line)\n log.close()\n except UnicodeDecodeError:\n pass\n return results", "title": "" }, { "docid": "e693cbf0c35d5e9884642b7b9b09114f", "score": "0.5332322", "text": "def resource_ls(path):\n return resource_listdir(MODULE, path)", "title": "" }, { "docid": "5a003839f12dade5bc0cc365248293f4", "score": "0.53301686", "text": "def resource_files(pat):\n from fnmatch import fnmatch\n return resource_dir(lambda f: fnmatch(f, pat))", "title": "" }, { "docid": "0d8bdce20944e226335ebe6c87ebf855", "score": "0.53293717", "text": "def generate_resource_files(self):\n strings_path = \"%s/%s\" % (self.resource_dir(),\n self.resource_strings_filename())\n if(os.path.exists(strings_path) and not self.force):\n print \"Warning: driver resource file exists (\" + strings_path + \") not overwriting\"\n else:\n try:\n shutil.copyfile(self.strings_template(), strings_path)\n except IOError as e:\n print \"Encountered problem writing strings template, complete by hand\"", "title": "" }, { "docid": "a56ff1c37e9becdcca53b189527c539f", "score": "0.5327026", "text": "def _scan_recipe_directory(path):\n for root, dirs, files in os.walk(path):\n dirs[:] = [x for x in dirs\n if not x.endswith(('.expected', '.resources'))]\n for file_name in files:\n if not file_name.endswith('.py'):\n continue\n file_path = os.path.join(root, file_name)\n # raw_recipe_name has native path separators (e.g. '\\\\' on windows)\n raw_recipe_name = file_path[len(path)+1:-len('.py')]\n yield raw_recipe_name.replace(os.path.sep, '/')", "title": "" } ]
88343d661da9053d368c91949b04f973
Encode the resolved_object_ids and post_publics fields from updates. ...and cap them at MAX_RESOLVED_OBJECT_IDS and MAX_POST_PUBLICS. Tries to keep the latest ones by assuming that ids are roughly monotonically increasing.
[ { "docid": "234fd8edec74def44765105566c158e5", "score": "0.55814445", "text": "def _pre_put_hook(self):\n self._save_cache('resolved_object_ids')\n self._save_cache('post_publics')", "title": "" } ]
[ { "docid": "4bf963b0be4c4c0c53a7b45bd19c7bd6", "score": "0.5439419", "text": "def fix_ids(self, objs, many, **kwargs):\n\n def _replace_id(obj):\n obj.unfreeze()\n obj.id = obj.id.replace(\"//plans/\", \"/\")\n obj.freeze()\n\n if many:\n for obj in objs:\n _replace_id(obj)\n return objs\n\n _replace_id(objs)\n return objs", "title": "" }, { "docid": "0c55082bc79e178aa1c9331ae645246f", "score": "0.5091061", "text": "def fix_ids(self, objs: Union[WorkflowFilePlan, List[WorkflowFilePlan]], many, **kwargs):\n\n def _replace_id(obj):\n obj.unfreeze()\n obj.id = obj.id.replace(\"//plans/\", \"/\")\n\n for child in chain(obj.inputs, obj.outputs, obj.parameters):\n child.id = child.id.replace(\"//plans/\", \"/\")\n obj.freeze()\n\n if many:\n for obj in objs:\n _replace_id(obj)\n return objs\n\n _replace_id(objs)\n return objs", "title": "" }, { "docid": "afc8cf6495f88942e1ad8a27ede19e9f", "score": "0.48573208", "text": "def update_pubmed_content(ids: List[int]):\n Identifiers = apps.get_model(\"lit\", \"identifiers\")\n fetcher = pubmed.PubMedFetch(ids)\n contents = fetcher.get_content()\n for d in contents:\n content = json.dumps(d)\n Identifiers.objects.filter(unique_id=str(d[\"PMID\"]), database=constants.PUBMED).update(\n content=content\n )\n ids_str = [str(id) for id in ids]\n Identifiers.objects.filter(unique_id__in=ids_str, database=constants.PUBMED, content=\"\").update(\n content='{\"status\": \"failed\"}'\n )", "title": "" }, { "docid": "cf92e9307be953407c86fa8ee2110f44", "score": "0.48074692", "text": "def _save_cache(self, name):\n if self.updates is None:\n return\n\n assert name in ('resolved_object_ids', 'post_publics')\n max = globals()['MAX_' + name.upper()]\n val = self.updates.get(name)\n if val:\n keep = heapq.nlargest(max,\n (int(id) if util.is_int(id) else str(id) for id in val.keys()))\n setattr(self, name + '_json',\n json.dumps({str(id): val[str(id)] for id in keep}))", "title": "" }, { "docid": "07c989bc4ac008f7d7a71034b31d9b38", "score": "0.47781965", "text": "def commit_objects( self, real_guid, app, args ):\n\t\t# update guids by map\n\t\tself.guid_map[ real_guid ] = app.guid\n\n\t\tfor obj in args:\n\t\t\tobj[1]['guid'] \t\t= self.guid( obj[1]['guid'] )\n\t\t\tobj[1]['parent']\t= self.guid( obj[1]['parent'] )\n\n\t\tRemoteSync.apply_objects_structures( args )", "title": "" }, { "docid": "b820555fc3204e0bbdede0dbbb737d17", "score": "0.47006547", "text": "def _fix_all(self):\n for vardata in self._fixed_vardata:\n if not vardata.fixed:\n self._fixed_ids.add(id(vardata))\n vardata.fixed = True", "title": "" }, { "docid": "b6c177ef4705f08fe808bcc9b27fcc62", "score": "0.4665377", "text": "def _publisher_save_public(self, obj):\r\n public_parent = self.parent.publisher_public if self.parent_id else None\r\n filters = dict(publisher_public__isnull=False)\r\n if public_parent:\r\n filters['publisher_public__parent__in'] = [public_parent]\r\n else:\r\n filters['publisher_public__parent__isnull'] = True\r\n prev_sibling = self.get_previous_filtered_sibling(**filters)\r\n public_prev_sib = prev_sibling.publisher_public if prev_sibling else None\r\n\r\n if not self.publisher_public_id: # first time published\r\n # is there anybody on left side?\r\n if not self.parent_id:\r\n obj.insert_at(self, position='right', save=False)\r\n else:\r\n if public_prev_sib:\r\n obj.insert_at(public_prev_sib, position='right', save=False)\r\n else:\r\n if public_parent:\r\n obj.insert_at(public_parent, position='first-child', save=False)\r\n else:\r\n # check if object was moved / structural tree change\r\n prev_public_sibling = obj.get_previous_filtered_sibling()\r\n if self.level != obj.level or \\\r\n public_parent != obj.parent or \\\r\n public_prev_sib != prev_public_sibling:\r\n if public_prev_sib:\r\n obj.move_to(public_prev_sib, position=\"right\")\r\n elif public_parent:\r\n # move as a first child to parent\r\n obj.move_to(public_parent, position='first-child')\r\n else:\r\n # it is a move from the right side or just save\r\n next_sibling = self.get_next_filtered_sibling(**filters)\r\n if next_sibling and next_sibling.publisher_public_id:\r\n obj.move_to(next_sibling.publisher_public, position=\"left\")\r\n\r\n return obj", "title": "" }, { "docid": "a0acd5131f236927e4b0d3a39f9fde20", "score": "0.46328107", "text": "def _format_primary_key_data_for_put(self, request):\n fields = ['fabric', 'items']\n for f in [('customer', Customer), ('project', Project), ('employee', User)]:\n try:\n pass#request.data[f[0]] = f[1].objects.get(pk=request.data[f[0]]['id'])\n except (AttributeError, KeyError, IndexError) as e:\n pass\n\n for field in fields:\n if field in request.data:\n try:\n if 'id' in request.data[field]:\n request.data[field] = request.data[field]['id']\n except TypeError:\n if field == 'acknowledgement':\n request.data[field] = None\n \n if field == 'items':\n for index, item in enumerate(request.data['items']):\n try:\n request.data['items'][index]['fabric'] = item['fabric']['id']\n except (KeyError, TypeError):\n pass\n \n if 'product' not in request.data['items'][index]:\n try:\n request.data['items'][index]['product'] = {'id': item['id']}\n del request.data['items'][index]['id']\n except KeyError as e:\n request.data['items'][index]['product'] = {'id': 10436}\n\n\n \"\"\" \n try:\n request.data['items'][index]['image'] = item['image']['id']\n except (KeyError, TypeError) as e:\n request.data['items'][index]['image'] = None\n \"\"\"\n\n elif field == 'project':\n try:\n if \"codename\" in request.data['project'] and \"id\" not in request.data['project']:\n project = Project(codename=request.data['project']['codename'])\n project.save()\n request.data['project'] = project.id\n \n except TypeError:\n pass\n \n return request", "title": "" }, { "docid": "e06048fc4d19aa9b7125e2e9e7307387", "score": "0.46305516", "text": "def update_pubmed_content(ids):\n Identifiers = apps.get_model('lit', 'identifiers')\n fetcher = pubmed.PubMedFetch(ids)\n contents = fetcher.get_content()\n for d in contents:\n content = json.dumps(d)\n Identifiers.objects\\\n .filter(\n unique_id=d['PMID'],\n database=constants.PUBMED\n )\\\n .update(content=content)", "title": "" }, { "docid": "a41cb57e55f246881e7eed8c3ad20e16", "score": "0.4599415", "text": "def cleanup_objects(self):\n new_object_dict = {}\n new_object_id_dict = {}\n all_objects = self.object_names\n all_unclassified = self.unclassified_objects\n for old_id, obj in self.objects.items():\n if obj.name in all_objects or obj.name in all_unclassified:\n updated_id = obj.id # By calling the object property we get the new id\n new_object_id_dict[obj.name] = updated_id\n new_object_dict[updated_id] = obj\n\n self.objects = new_object_dict\n self.object_id_dict = new_object_id_dict", "title": "" }, { "docid": "56c856ef978b7deb604e043c8f8c623d", "score": "0.4554205", "text": "def _update_link_resolved(jira, gh_issue, jira_issue):\n resolved = gh_issue[\"state\"] != \"open\"\n for link in jira.remote_links(jira_issue):\n if hasattr(link, \"globalId\") and link.globalId == gh_issue[\"html_url\"]:\n new_link = dict(link.raw[\"object\"]) # RemoteLink update() requires all fields as a JSON object, it seems\n new_link[\"title\"] = gh_issue[\"title\"]\n new_link[\"status\"][\"resolved\"] = resolved\n link.update(new_link, globalId=link.globalId, relationship=link.relationship)", "title": "" }, { "docid": "4094cb555e7402914b8fe544e677d3f3", "score": "0.4512237", "text": "def updated(self, **kwargs):\n\t\treturn self.published(**kwargs).extra(where=['date_modified > published']).order_by('-date_modified')", "title": "" }, { "docid": "f779fea36330e20acd6fcd687857c9a7", "score": "0.44996592", "text": "def update_metadata(metadata, public_ids, **options):\n params = {\n \"timestamp\": utils.now(),\n \"metadata\": utils.encode_context(metadata),\n \"public_ids\": utils.build_array(public_ids),\n \"type\": options.get(\"type\"),\n \"clear_invalid\": options.get(\"clear_invalid\")\n }\n\n return call_api(\"metadata\", params, **options)", "title": "" }, { "docid": "f7f033ae7ce6f609e490802b51f2f841", "score": "0.44897404", "text": "def update(self, new_ids):\n # for i in range(len(self.pieces)):\n # self.pieces[i] = new_ids[i]\n new = {}\n for i, id in enumerate(new_ids):\n new[id] = self.pieces[i]\n new[id].id = id\n self.pieces = new", "title": "" }, { "docid": "6a4eedfcf84908334882f76bd166693a", "score": "0.4478592", "text": "def make_affected_commits_public(bug):\n to_update = []\n\n query = osv.AffectedCommit.query(osv.AffectedCommit.bug_id == bug.key.id())\n for affected_commit in query:\n affected_commit.public = True\n to_update.append(affected_commit)\n\n if to_update:\n ndb.put_multi(to_update)", "title": "" }, { "docid": "1d4cf81640c9a1235f15504722efd6ad", "score": "0.44705158", "text": "async def _update_data(self, data: dict, update_data: dict) -> dict:\n data = {**data, **self.UPDATE_SCHEMA(update_data)}\n # make last_scanned JSON serializeable\n if LAST_SCANNED in update_data:\n data[LAST_SCANNED] = data[LAST_SCANNED].isoformat()\n return data", "title": "" }, { "docid": "723a84a04eec69506060d04bcb6e0a1f", "score": "0.44634816", "text": "def override(mongo):\n # fix posts\n broken_posts = mongo.Posts.find({'total_payout_value': {}}, {'identifier': 1}).limit(1000)\n for identifier in pluck('identifier', broken_posts):\n upsert_post(mongo, identifier)\n\n # fix comments\n broken_comments = mongo.Comments.find({'total_payout_value': {}}, {'identifier': 1}).limit(1000)\n for identifier in pluck('identifier', broken_comments):\n upsert_comment(mongo, identifier)\n\n # fix custom_json\n for op in mongo.Operations.find({'type': 'custom_json', 'json': {'$type': 'string'}}):\n if type(op['json']) != str:\n continue\n with suppress(TypeError):\n mongo.Operations.update(op, {'$set': {'json': json.loads(op['json'])}})\n for op in mongo.AccountOperations.find({'type': 'custom_json', 'json': {'$type': 'string'}}):\n if type(op['json']) != str:\n continue\n with suppress(TypeError):\n mongo.AccountOperations.update(op, {'$set': {'json': json.loads(op['json'])}})\n\n # dont hog the loop\n time.sleep(600)", "title": "" }, { "docid": "de250e80d4be40a0e460cadb104035c7", "score": "0.44350028", "text": "def fill_in_update_points(self, user_id, user, entry_meta, issue_meta, all_days, points):\n\n # Look at updates\n for key, day in user['days'].items():\n update_points = [self.config[\"update\"][\"default\"]]\n reasons = []\n\n # Any comments to any of the updated issues?\n comment_sum = sum([issue['comment_length'] for id, issue in day.items() if isinstance(id, int)])\n\n if comment_sum > 0:\n update_points += [self.config[\"update\"][\"for_any_comment\"]]\n elif self.config[\"update\"][\"for_any_comment\"] > 0:\n reasons += [\"Penalty for not commenting on issues\"]\n\n # All issues excluding Meeting and above ids\n issues = [id for id, issue in day.items() if isinstance(id, int) and id in issue_meta and issue_meta[id]['tracker'] in self.config[\"trackers\"]]\n\n # How many issues have logged time but no done ratio?\n without_done_ratio = len([issue_id for issue_id in issues if not issue_meta[issue_id]['done_ratio'] > 0 and day[issue_id]['hours'] > 0])\n\n # It's OK to have one issue without done ratio\n without_done_ratio = max(0, without_done_ratio - 1)\n\n update_points += [self.config[\"update\"][\"for_done_ratio\"] * (1/(1+without_done_ratio))]\n if without_done_ratio > 0:\n reasons += [\"Penalty for not updating done ratio on issue with logged time\"]\n\n # Were there any attachments?\n number_of_attachments = sum([issue['attachments'] for id, issue in day.items() if id in issue_meta])\n\n update_points += [self.config[\"update\"][\"for_attachment\"] * (1 - (1/(1+number_of_attachments)))]\n\n if number_of_attachments > 0 and self.config[\"update\"][\"for_attachment\"] > 0.0:\n reasons += [\"Reward for attaching files\"]\n\n # Were there any nicely formatted comments ?\n formatted_comments = sum([issue['comment_extra'] for id, issue in day.items() if id in issue_meta])\n\n update_points += [self.config[\"update\"][\"for_nice_comments\"] * (1 - (1/(1+formatted_comments/2)))]\n\n if formatted_comments > 0 and self.config[\"update\"][\"for_nice_comments\"] > 0:\n reasons += [\"Reward for nicely formatted comments\"]\n\n # Rewards loads of comments\n comment_extra = max(0, comment_sum - 150)\n\n update_points += [self.config[\"update\"][\"for_story_teller\"] * (1 - (1/(1+number_of_attachments/50)))]\n\n if comment_extra > 0 and self.config[\"update\"][\"for_story_teller\"] > 0.0:\n reasons += [\"Reward for being a story teller\"]\n\n if key in points[user_id]:\n points[user_id][key]['update_points'] = {\n 'sum': sum(update_points),\n 'reasons': reasons\n }", "title": "" }, { "docid": "91745b266b083b6b03cc8c4e04d36de6", "score": "0.4434073", "text": "def resolve_fields_to_resolve(self) -> None:\n for field_to_resolve, values_of_field_to_resolve in self.fields_to_resolve.items():\n # Get the extra parameters to pass to the resolver\n extra_parameters = {}\n for extra_parameter in values_of_field_to_resolve[\"extra_parameters\"]:\n extra_parameters[extra_parameter] = self.data.get(extra_parameter, None)\n\n # Must pop from data to avoid problems with database\n self.data.pop(extra_parameter, None)\n\n self.data[field_to_resolve] = values_of_field_to_resolve[\"resolver\"](self.data[field_to_resolve], **extra_parameters)", "title": "" }, { "docid": "04b405c29caf306cb9036b733f6360a6", "score": "0.44261813", "text": "def post_save_bulk(self, queryset, update_bulk_dict):\n pass", "title": "" }, { "docid": "905de01f0e78155ef62a25a4375c9a0e", "score": "0.44217643", "text": "def all_update(cls, submissions):\n\t\tnew = 0\n\t\tsession = get_config('db_session')\n\t\tsession.query(Submission).update({'current_rank': None})\n\t\tfor sub in submissions:\n\t\t\ttry:\n\t\t\t\tsubmission = session.query(Submission).get(sub['hn_id'])\n\t\t\t\tsubmission.update_from_crawl(sub)\n\t\t\texcept AttributeError:\n\t\t\t\tsubmission = Submission(date_created=datetime.datetime.now(), **sub)\n\t\t\t\tsession.add(submission)\n\t\t\t\tprint \"New submission found... getting %s's karma percentile\" % submission.submitter\n\t\t\t\tsubmission.submitter_color # to warm up the cache\n\t\t\t\tnew += 1\n\n\t\tsession.commit()\n\t\treturn new", "title": "" }, { "docid": "30d8a655d4386581d540ff30b573be0a", "score": "0.44202757", "text": "def _recompute_object_maps(self):\n self._primary_map = {}\n self._tag_set = set()\n\n for item in self._items:\n self._update_object_maps(item)", "title": "" }, { "docid": "77cc7de366c9304658c389acbab2990c", "score": "0.44191024", "text": "def update_key_value_metadata_public(request, id):\n if request.method == \"GET\":\n res, _, _ = authorize(\n request, id, needed_permission=ACTION_TO_AUTHORIZE.VIEW_RESOURCE\n )\n return HttpResponse(status=200, content=json.dumps(res.extra_metadata))\n\n res, _, _ = authorize(\n request, id, needed_permission=ACTION_TO_AUTHORIZE.EDIT_RESOURCE\n )\n\n post_data = request.data.copy()\n res.extra_metadata = post_data\n\n is_update_success = True\n\n try:\n res.save()\n except Error:\n is_update_success = False\n\n if is_update_success:\n resource_modified(res, request.user, overwrite_bag=False)\n\n if is_update_success:\n return HttpResponse(status=200)\n else:\n return HttpResponse(status=400)", "title": "" }, { "docid": "72b3397b0f5d1c244a67c45b5a6595d7", "score": "0.44165114", "text": "def get_updated_objects(self, last_export):\n self.log_fetch_start(\"objects\", last_export)\n for archival_object in self.repo.archival_objects(with_params={'all_ids': True, 'modified_since': last_export}):\n archival_object_id = archival_object.uri.split(\"/\")[-1]\n if archival_object.publish:\n self.save_data_file(archival_object_id, archival_object.json(), config.destinations['objects'])\n breadcrumbs = self.aspace.client.get(\n \"/repositories/{}/resources/{}/tree/node_from_root?node_ids[]={}&published_only=true\".format(\n config.archivesSpace['repository'],\n archival_object.resource.ref.split(\"/\")[-1],\n archival_object_id))\n if breadcrumbs.status_code == 200:\n self.save_data_file(archival_object_id, breadcrumbs.json(), config.destinations['breadcrumbs'])\n else:\n self.remove_data_file(archival_object_id, config.destinations['objects'])\n self.remove_data_file(archival_object_id, config.destinations['breadcrumbs'])", "title": "" }, { "docid": "95ef19aee8dfd8e978d954ee5f0e078f", "score": "0.4415822", "text": "def _encode_for_cloud_api(self, obj: Any) -> Any:\n value = self._encode_cloud_object(obj)\n if self._is_compound:\n # Wrap the scopes and final value into an Expression.\n value = _ExpressionOptimizer(value, self._scope).optimize()\n # Clear state in case of future encoding.\n self._scope = []\n self._encoded = {}\n self._hashcache = {}\n else:\n value = _ExpressionOptimizer(value).optimize()\n return value", "title": "" }, { "docid": "95a1d439e2fc346f9e296971f75d4b2c", "score": "0.44088933", "text": "def objects_updated(self, objects):\n self._objects = objects.objects", "title": "" }, { "docid": "26afe48e2a0511e313bccdbc77daa2ac", "score": "0.4368259", "text": "def enhance_with_referenced_foreign_ids(data):\n result = copy.deepcopy(data)\n for x in result:\n group_data = x['data']\n for y in group_data:\n y['referenced'] = {}\n raw = y['raw']\n refs = raw.get('refs')\n if refs:\n for k, v in refs.items():\n if v[0] == '.':\n v = point_to_alias(\n v, x['group'], group_data\n )\n y['referenced'][k] = v\n\n return result", "title": "" }, { "docid": "651a555f4aed0c446c2f95a37bacb02a", "score": "0.43642086", "text": "def _publish_managed(self, scheme, provider, is_required, draft_pid,\n record_pids, draft):\n identifier_value = draft_pid.get(\"identifier\")\n pid = None\n if is_required:\n if not identifier_value:\n pid = provider.create(draft)\n provider.reserve(pid, draft)\n else:\n pid = provider.get(identifier_value)\n\n url = self.service.links_item_tpl.expand(draft)[\"record_html\"]\n provider.register(pid, draft, url=url)\n else:\n if identifier_value:\n # must be already created and reserved\n pid = provider.get(identifier_value)\n assert pid.is_reserved() or pid.is_registered()\n # PIDS-FIXME: this should update meta to datacite???\n provider.register(pid, draft)\n\n if pid: # ignore not required & no given id value\n record_pids[scheme] = {\n \"identifier\": pid.pid_value,\n \"provider\": provider.name,\n \"client\": provider.client.name\n }", "title": "" }, { "docid": "a809d50be7ec99575446219745ce3568", "score": "0.43448263", "text": "def update_records(self, qs, batch_size=1000, **kwargs):\n tmp = {}\n for key, value in kwargs.items():\n name = self.__translate_fields.get(key, None)\n if name:\n tmp[name] = value\n\n batch = []\n objectsIDs = qs.only(self.custom_objectID).values_list(\n self.custom_objectID, flat=True)\n for elt in objectsIDs:\n tmp['objectID'] = elt\n batch.append(dict(tmp))\n\n if len(batch) >= batch_size:\n self.__index.partial_update_objects(batch)\n batch = []\n\n if len(batch) > 0:\n self.__index.partial_update_objects(batch)", "title": "" }, { "docid": "35ced9e3fef9d428c5bbdd80cc67ba7c", "score": "0.4329177", "text": "def perform_updates(objs):\n collector = get_collector(objs)\n for model, updates in collector.field_updates.items():\n for (field, value), objects in updates.items():\n if is_safedelete_cls(model):\n pks = [o.pk for o in objects if not is_deleted(o)]\n else:\n pks = [o.pk for o in objects]\n if len(pks) != 0:\n logger.info(\" > cascade update {} {} ({}={})\".format(len(objects), model.__name__, field.name, value))\n logger.debug(\" {}\".format([o.pk for o in objects]))\n # bulk update the field (this means that we dont)\n model.objects.filter(pk__in=[o.pk for o in objects]).update(**{field.name: value})", "title": "" }, { "docid": "c34c8c36019afc7c780361e384cd7d20", "score": "0.4321699", "text": "def _get_pubs(self, entry, graph):\n\n ref_to_pmid = {}\n entry_num = entry['mimNumber']\n\n if 'referenceList' in entry:\n reflist = entry['referenceList']\n for rlst in reflist:\n if 'pubmedID' in rlst['reference']:\n pub_id = 'PMID:' + str(rlst['reference']['pubmedID'])\n ref = Reference(\n graph, pub_id, self.globaltt['journal article'])\n else:\n # make blank node for internal reference\n pub_id = '_:OMIM' + str(entry_num) + 'ref' + str(\n rlst['reference']['referenceNumber'])\n\n ref = Reference(graph, pub_id)\n title = author_list = source = citation = None\n if 'title' in rlst['reference']:\n title = rlst['reference']['title']\n ref.setTitle(title)\n if 'authors' in rlst['reference']:\n author_list = rlst['reference']['authors']\n ref.setAuthorList(author_list)\n citation = re.split(r'\\.\\,', author_list)[0] + ' et al'\n if 'source' in rlst['reference']:\n source = rlst['reference']['source']\n citation = '; '.join(\n [tok for tok in [citation, title, source] if tok is not None])\n ref.setShortCitation(citation)\n ref.addRefToGraph()\n ref_to_pmid[rlst['reference']['referenceNumber']] = pub_id\n\n # add is_about for the pub\n omim_id = 'OMIM:' + str(entry_num)\n graph.addTriple(omim_id, self.globaltt['mentions'], pub_id)\n\n return ref_to_pmid", "title": "" }, { "docid": "b28bc89377220214fda821d74a10c838", "score": "0.4311714", "text": "def list_submissions(page, limit, published, sort='modified', order='desc', is_fabs=False, filters=None):\n sess = GlobalDB.db().session\n submission_updated_view = SubmissionUpdatedView()\n offset = limit * (page - 1)\n publishing_user = aliased(User)\n\n # List of all the columns to gather\n submission_columns = [Submission.submission_id, Submission.cgac_code, Submission.frec_code, Submission.user_id,\n Submission.publish_status_id, Submission.is_fabs, Submission.number_of_warnings,\n Submission.number_of_errors, Submission.updated_at, Submission.reporting_start_date,\n Submission.reporting_end_date, Submission.publishing_user_id,\n Submission.reporting_fiscal_year, Submission.reporting_fiscal_period,\n Submission.is_quarter_format, Submission.published_submission_ids, Submission.certified,\n Submission.test_submission]\n cgac_columns = [CGAC.cgac_code, CGAC.agency_name.label('cgac_agency_name')]\n frec_columns = [FREC.frec_code, FREC.agency_name.label('frec_agency_name')]\n user_columns = [User.user_id, User.name, publishing_user.user_id.label('publishing_user_id'),\n publishing_user.name.label('publishing_user_name')]\n view_columns = [submission_updated_view.submission_id, submission_updated_view.updated_at.label('updated_at')]\n max_pub = sess.query(PublishHistory.submission_id, func.max(PublishHistory.created_at).label('max_date')).\\\n group_by(PublishHistory.submission_id).cte('max_pub')\n max_cert = sess.query(CertifyHistory.submission_id, func.max(CertifyHistory.created_at).label('max_date')). \\\n group_by(CertifyHistory.submission_id).cte('max_cert')\n pub_query = sess.query(max_pub.c.submission_id,\n case([(func.coalesce(max_cert.c.max_date, '1/1/1973') > max_pub.c.max_date,\n max_cert.c.max_date)], else_=max_pub.c.max_date).label('last_pub_or_cert')).\\\n outerjoin(max_cert, max_pub.c.submission_id == max_cert.c.submission_id).cte('pub_query')\n\n columns_to_query = (submission_columns + cgac_columns + frec_columns + user_columns + view_columns\n + [pub_query.c.last_pub_or_cert])\n\n # Base query that is shared among all submission lists\n query = sess.query(*columns_to_query).\\\n outerjoin(User, Submission.user_id == User.user_id).\\\n outerjoin(publishing_user, Submission.publishing_user_id == publishing_user.user_id).\\\n outerjoin(CGAC, Submission.cgac_code == CGAC.cgac_code).\\\n outerjoin(FREC, Submission.frec_code == FREC.frec_code).\\\n outerjoin(submission_updated_view.table, submission_updated_view.submission_id == Submission.submission_id).\\\n outerjoin(pub_query, Submission.submission_id == pub_query.c.submission_id).\\\n filter(Submission.is_fabs.is_(is_fabs))\n min_mod_query = sess.query(func.min(submission_updated_view.updated_at).label('min_last_mod_date')). \\\n join(Submission, submission_updated_view.submission_id == Submission.submission_id).\\\n filter(Submission.is_fabs.is_(is_fabs))\n\n # Limit the data coming back to only what the given user is allowed to see\n query = permissions_filter(query)\n min_mod_query = permissions_filter(min_mod_query)\n\n # Determine what types of submissions (published/unpublished/both) to display\n if published != 'mixed':\n if published == 'true':\n query = query.filter(Submission.publish_status_id != PUBLISH_STATUS_DICT['unpublished'])\n min_mod_query = min_mod_query.filter(Submission.publish_status_id != PUBLISH_STATUS_DICT['unpublished'])\n else:\n query = query.filter(Submission.publish_status_id == PUBLISH_STATUS_DICT['unpublished'])\n min_mod_query = min_mod_query.filter(Submission.publish_status_id == PUBLISH_STATUS_DICT['unpublished'])\n\n # Add additional filters where applicable\n if filters:\n try:\n query = add_list_submission_filters(query, filters, submission_updated_view)\n except (ResponseException, ValueError) as e:\n return JsonResponse.error(e, StatusCode.CLIENT_ERROR)\n\n # Determine what to order by, default to \"modified\"\n options = {\n 'submission_id': {'model': Submission, 'col': 'submission_id'},\n 'modified': {'model': submission_updated_view, 'col': 'updated_at'},\n 'reporting_start': {'model': Submission, 'col': 'reporting_start_date'},\n 'reporting_end': {'model': Submission, 'col': 'reporting_end_date'},\n 'agency': {'model': CGAC, 'col': 'agency_name'},\n 'submitted_by': {'model': User, 'col': 'name'},\n 'last_pub_or_cert': {'model': pub_query.c, 'col': 'last_pub_or_cert'},\n 'quarterly_submission': {'model': Submission, 'col': 'is_quarter_format'}\n }\n\n if not options.get(sort):\n sort = 'modified'\n\n sort_order = getattr(options[sort]['model'], options[sort]['col'])\n\n # Determine how to sort agencies using FREC or CGAC name\n if sort == 'agency':\n sort_order = case([\n (FREC.agency_name.isnot(None), FREC.agency_name),\n (CGAC.agency_name.isnot(None), CGAC.agency_name)\n ])\n\n # Set the sort order\n if order == 'desc':\n sort_order = sort_order.desc()\n\n query = query.order_by(sort_order)\n\n total_submissions = query.count()\n min_last_mod = min_mod_query.one()[0]\n\n query = query.slice(offset, offset + limit)\n\n return JsonResponse.create(StatusCode.OK, {\n 'submissions': [serialize_submission(submission) for submission in query],\n 'total': total_submissions,\n 'min_last_modified': str(min_last_mod) if min_last_mod else None\n })", "title": "" }, { "docid": "9f2c99a8a6c71061b88b20c4dc2e7912", "score": "0.4311537", "text": "def _condenser_post_object(row, truncate_body=0):\n paid = row['is_paidout']\n\n # condenser#3424 mitigation\n if not row['category']:\n row['category'] = 'undefined'\n\n post = {}\n post['post_id'] = row['post_id']\n post['author'] = row['author']\n post['permlink'] = row['permlink']\n post['category'] = row['category']\n\n post['title'] = row['title']\n post['body'] = row['body'][0:truncate_body] if truncate_body else row['body']\n post['json_metadata'] = json.loads(row['json'])\n\n post['created'] = json_date(row['created_at'])\n post['updated'] = json_date(row['updated_at'])\n post['depth'] = row['depth']\n post['children'] = row['children']\n post['net_rshares'] = row['rshares']\n\n post['is_paidout'] = row['is_paidout']\n post['payout_at'] = json_date(row['payout_at'])\n post['payout'] = float(row['payout'])\n post['pending_payout_value'] = _amount(0 if paid else row['payout'])\n post['author_payout_value'] = _amount(row['payout'] if paid else 0)\n post['curator_payout_value'] = _amount(0)\n post['promoted'] = _amount(row['promoted'])\n\n post['replies'] = []\n post['active_votes'] = _hydrate_active_votes(row['votes'])\n post['author_reputation'] = row['author_rep']\n\n post['stats'] = {\n 'hide': row['is_hidden'],\n 'gray': row['is_grayed'],\n 'total_votes': row['total_votes'],\n 'flag_weight': row['flag_weight']} # TODO: down_weight\n\n # import fields from legacy object\n assert row['raw_json']\n assert len(row['raw_json']) > 32\n raw_json = json.loads(row['raw_json'])\n\n # TODO: move to core, or payout_details\n post['beneficiaries'] = raw_json['beneficiaries']\n post['max_accepted_payout'] = raw_json['max_accepted_payout']\n post['percent_steem_dollars'] = raw_json['percent_steem_dollars'] # TODO: systag?\n if paid:\n curator_payout = sbd_amount(raw_json['curator_payout_value'])\n post['author_payout_value'] = _amount(row['payout'] - curator_payout)\n post['curator_payout_value'] = _amount(curator_payout)\n\n # TODO: re-evaluate\n if row['depth'] > 0:\n post['parent_author'] = raw_json['parent_author']\n post['parent_permlink'] = raw_json['parent_permlink']\n post['title'] = 'RE: ' + raw_json['root_title'] # PostSummary & comment context\n #else:\n # post['parent_author'] = ''\n # post['parent_permlink'] = ''\n post['url'] = raw_json['url']\n\n return post", "title": "" }, { "docid": "384df8f697bbdf4ce56ba2e34c270023", "score": "0.4310643", "text": "def save(self) :\r\n l = self.map_list.get_raw()\r\n\r\n result = list(self._iterFlatten( l ))\r\n result = sorted(result, key=lambda x: x.offset)\r\n\r\n idx = 0\r\n buff = \"\"\r\n for i in result :\r\n# print idx, i.offset, \"--->\", i.offset + i.size\r\n if idx == i.offset :\r\n buff += i.buff\r\n else :\r\n# print \"PATCH @ 0x%x %d\" % (idx, (i.offset - idx))\r\n buff += '\\x00' * (i.offset - idx)\r\n buff += i.buff\r\n idx += (i.offset - idx)\r\n\r\n idx += i.size\r\n\r\n return self.fix_checksums(buff)", "title": "" }, { "docid": "3882a96e8013701c24acf14ec140f10b", "score": "0.43009883", "text": "def get_company_updates(\n self, public_id=None, urn_id=None, max_results=None, results=[]\n ):\n params = {\n \"companyUniversalName\": {public_id or urn_id},\n \"q\": \"companyFeedByUniversalName\",\n \"moduleKey\": \"member-share\",\n \"count\": Linkedin._MAX_UPDATE_COUNT,\n \"start\": len(results),\n }\n\n res = self._fetch(f\"/feed/updates\", params=params)\n\n data = res.json()\n\n if (\n len(data[\"elements\"]) == 0\n or (max_results is not None and len(results) >= max_results)\n or (\n max_results is not None\n and len(results) / max_results >= Linkedin._MAX_REPEATED_REQUESTS\n )\n ):\n return results\n\n results.extend(data[\"elements\"])\n self.logger.debug(f\"results grew: {len(results)}\")\n\n return self.get_company_updates(\n public_id=public_id, urn_id=urn_id, results=results, max_results=max_results\n )", "title": "" }, { "docid": "9e4b3c544475296994b99a7f5b25c51c", "score": "0.42917898", "text": "def sync_exported_bugs(self):\n # TODO(rsalin): update the release date of milestone in LP after\n # the release in JIRA\n\n milestones = [self.milestone] + self.mu_milestones\n jira_bugs = self.jira.get_bugs(milestones)\n lp_bugs = self.lp.get_bugs(\n self.project, milestones,\n status=json.loads(config.get('LP', 'sync_statuses')))\n\n synced = []\n for jbug in jira_bugs:\n for lbug in lp_bugs:\n if not self._is_bugs_match(lbug, jbug):\n continue\n\n # TODO(rsalin): what if the both was changed since the last\n # sync? Investigate more precise way to resolve it\n if jbug['updated'] < lbug['updated']:\n log.info('Update bug \"%s\" in JIRA', lbug['title'])\n\n fields = self._get_jira_fields_to_update(jbug, lbug)\n if fields:\n synced.append(fields)\n bug = self.jira.bug(jbug['key'])\n\n if not DRY_RUN:\n self.jira.update_bug(bug, fields)\n else:\n log.info('Update bug \"%s\" in Launchpad', jbug['title'])\n\n fields = self._get_lp_fields_to_update(jbug, lbug)\n if fields:\n synced.append(fields)\n bug = self.lp.bug(lbug['key'])\n\n if not DRY_RUN:\n self.lp.update_bug(bug, fields)\n\n self.check_mu_milestone(jbug, lbug)\n break\n return synced", "title": "" }, { "docid": "14478237b70cb316cc8bf347ccea74e3", "score": "0.42801705", "text": "def api_fixes():\n models = get_api_all_model()\n\n fix = {\"deleted_flag\": \"integer_types\"}\n models[\"SavedQuestion\"].API_SIMPLE.update(fix)\n\n fix = {\"external_flag\": \"integer_types\"}\n models[\"User\"].API_SIMPLE.update(fix)\n\n fix = {\n \"details\": \"string_types\",\n \"last_modified_by\": \"string_types\",\n \"modification_time\": \"string_types\",\n \"modifier_user_id\": \"integer_types\",\n \"object_id\": \"integer_types\",\n \"type\": \"integer_types\",\n \"creation_time\": \"string_types\",\n }\n models[\"AuditLog\"].API_SIMPLE.update(fix)\n\n fix = {\"cache_row_id\": \"integer_types\"}\n models[\"AuditData\"].API_SIMPLE.update(fix)\n\n fix = {\"mod_user\": \"User\", \"cache_info\": \"CacheInfo\"}\n models[\"AuditLog\"].API_COMPLEX.update(fix)\n\n models[\"AuditLogList\"].API_ITEM_ATTR = \"entries\"\n\n fix = {\"id\": \"integer_types\", \"type\": \"string_types\"}\n models[\"AuditLogList\"].API_SIMPLE.update(fix)\n\n fix = {\"selects\": \"SelectList\", \"sensor_references\": \"SensorReferenceList\"}\n models[\"ParseQuestionResult\"].API_COMPLEX = fix", "title": "" }, { "docid": "6c2e8ce85fe7154fdb88e76482c34e16", "score": "0.4279038", "text": "def _ensure_resources(jwp_model, resource_queryset):\n\n jwp_queryset = jwp_model.objects.all()\n\n # A query which returns all the cached video resources which do not correspond to an existing\n # JWP video.\n new_resources = (\n resource_queryset\n .exclude(key__in=jwp_queryset.values_list('key', flat=True))\n )\n\n # Start creating a list of all JWP video object which were touched in this update\n updated_jwp_keys = [v.key for v in new_resources.only('key')]\n\n # Bulk insert objects for all new resources.\n jwp_queryset.bulk_create([\n jwp_model(key=resource.key, updated=resource.data.get('updated', 0), resource=resource)\n for resource in new_resources\n ])\n\n # A subquery which returns the corresponding CachedResource's updated timestamp for a JWP\n # Video. We cannot simply use \"data__updated\" here because Django by design\n # (https://code.djangoproject.com/ticket/14104) does not support joined fields with update()\n # but the checks incorrectly interpret \"data__updated\" as a join and not a transform. Until\n # Django is fixed, we use a horrible workaround using RawSQL. See\n # https://www.postgresql.org/docs/current/static/functions-json.html for the Postgres JSON\n # operators.\n matching_resource_updated = models.Subquery(\n resource_queryset\n .filter(key=models.OuterRef('key'))\n .values_list(\n functions.Cast(expressions.RawSQL(\"data ->> 'updated'\", []), models.BigIntegerField())\n )[:1]\n )\n\n # Add to our list of updated JWP videos\n updated_jwp_keys.extend([\n v.key\n for v in jwp_queryset\n .filter(updated__lt=matching_resource_updated)\n .only('key')\n ])\n\n # For all objects whose corresponding CachedResource's updated field is later than the object's\n # updated field, update the object.\n (\n jwp_queryset\n .annotate(resource_updated=matching_resource_updated)\n .filter(updated__lt=models.F('resource_updated'))\n .update(updated=models.F('resource_updated'))\n )\n\n return updated_jwp_keys", "title": "" }, { "docid": "ef7f78bf0d77cc6f37c83af0c472507b", "score": "0.42773038", "text": "def _update_entities(entities: List[List[Any]], fields: List[str]):\n optimade_fields = [\n self.resource_mapper.get_optimade_field(_) for _ in fields\n ]\n for entity in entities:\n field_to_entity_value = dict(zip(optimade_fields, entity))\n retrieved_attributes = field_to_entity_value.copy()\n for missing_attribute in self._extras_fields:\n retrieved_attributes.pop(missing_attribute)\n self.resource_mapper.build_attributes(\n retrieved_attributes=retrieved_attributes,\n entry_pk=field_to_entity_value[\"id\"],\n node_type=field_to_entity_value[\"type\"],\n missing_attributes=self._extras_fields,\n )", "title": "" }, { "docid": "6307efc5d1d2bf4121a0009baf93d2ce", "score": "0.42732438", "text": "def bugfix327_apply(request):\n organization_id = 158001 #channel 2\n org = Organization.objects.get(id = organization_id)\n photos = Photo.objects.filter(organization = org)\n\n start = int(request.GET.get('start', 0))\n limit = int(request.GET.get('limit', 10))\n\n count = 0\n for p in photos[start:limit]:\n p.owner_username = org.name\n p.save(no_cache_handling=True)\n count += 1\n\n return HttpResponse('done. updated %d photos' % count)", "title": "" }, { "docid": "70147ef39157fb12cdb6a3287a7a77a4", "score": "0.42689684", "text": "def update_replenishmentcollection(admin, id):\n # Check ReplenishmentCollection\n replcoll = (ReplenishmentCollection.query.filter_by(id=id).first())\n if not replcoll:\n raise exc.EntryNotFound()\n # Which replenishments are not revoked?\n repls = replcoll.replenishments.filter_by(revoked=False).all()\n\n data = json_body()\n\n if data == {}:\n raise exc.NothingHasChanged()\n\n updateable = {'revoked': bool, 'comment': str, 'timestamp': int}\n check_forbidden(data, updateable, replcoll)\n check_fields_and_types(data, None, updateable)\n\n updated_fields = []\n # Handle replenishmentcollection revoke\n if 'revoked' in data:\n if replcoll.revoked == data['revoked']:\n raise exc.NothingHasChanged()\n # Check if the revoke was caused through the replenishment_update and\n # therefor cant be changed\n if not data['revoked'] and not repls:\n raise exc.EntryNotRevocable()\n replcoll.toggle_revoke(revoked=data['revoked'], admin_id=admin.id)\n del data['revoked']\n updated_fields.append('revoked')\n\n # Handle new timestamp\n if 'timestamp' in data:\n try:\n timestamp = datetime.datetime.fromtimestamp(data['timestamp'])\n assert timestamp <= datetime.datetime.now()\n replcoll.timestamp = timestamp\n updated_fields.append('revoked')\n except (AssertionError, TypeError, ValueError, OSError, OverflowError):\n \"\"\"\n AssertionError: The timestamp lies in the future.\n TypeError: Invalid type for conversion.\n ValueError: Timestamp is out of valid range.\n OSError: Value exceeds the data type.\n OverflowError: Timestamp out of range for platform time_t.\n \"\"\"\n raise exc.InvalidData()\n del data['timestamp']\n\n # Handle all other fields\n updated_fields = update_fields(data, replcoll, updated_fields)\n\n # Apply changes\n try:\n db.session.commit()\n except IntegrityError:\n raise exc.CouldNotUpdateEntry()\n\n return jsonify({\n 'message': 'Updated replenishmentcollection.',\n 'updated_fields': updated_fields\n }), 201", "title": "" }, { "docid": "ee6794586433633053d69c56edbb3059", "score": "0.4264839", "text": "def _apply_updated(self, root_obj, buffer):\n # TODO: What should happen in the engine\n # when the user updates a certain cuds?\n # The given buffer contains all the updated CUDS object in a dictionary", "title": "" }, { "docid": "7c30d3a9333819777294a15788aa14cb", "score": "0.42628098", "text": "def publishable_post_save(sender, instance, **kwargs):\n\n # print(\"publishable_post_save::: %s - %s\" % (str(sender), str(instance)))\n\n if implements(instance, PublishableContent):\n\n # MJB 20190430 Pagina plaatsen in gesloten groep leverde helemaal geen\n # timeline-entries meer op. Zelfs niet voor de plaatser zelf.\n # Probleem zat in de is_public method die in de Base class kijkt of\n # het een gesloten groep is. Conclusie is_public is in zo'n geval wel\n # False, maar hier moet gekeken worden naar de published status\n # (i.c.m. is_tmp)\n # if instance.is_public:\n if instance.is_published and not instance.is_tmp and \\\n get_state(instance).name != \"private\" and not instance.is_deleted:\n\n changed = False\n\n # Have we not been published before?\n #\n\n if not History.objects.has_been(instance, PUBLISHED, UNPUBLISHED):\n\n publish.send(sender, instance=instance, first_edition=True)\n changed = True\n\n # Yes we have. But maybe currently we're unpublished?\n #\n elif History.objects.get_last(\n instance, PUBLISHED, UNPUBLISHED,\n as_flag=True) == UNPUBLISHED:\n\n publish.send(sender, instance=instance)\n changed = True\n\n if changed:\n History.objects.log(instance, PUBLISHED,\n user=instance.changed_by)\n instance.__class__.objects.filter(pk=instance.pk).update(\n publish_notified=True, unpublish_notified=False)\n\n else:\n # We're are not public. So if the last state was\n # 'published', actively unpublish.\n #\n last_state = History.objects.get_last(\n instance, PUBLISHED, UNPUBLISHED,\n as_flag=True)\n\n if last_state == PUBLISHED:\n\n unpublish.send(sender, instance=instance)\n instance.__class__.objects.filter(pk=instance.pk).update(\n unpublish_notified=True)\n\n # The instance may not be there anymore...\n #\n if not instance.is_deleted:\n History.objects.log(instance, UNPUBLISHED,\n user=instance.changed_by)", "title": "" }, { "docid": "42ccc0e9000d369683271155a27b4646", "score": "0.4262148", "text": "def __update_referrals(self):\n for doc_id in self.document_dict.keys():\n if doc_id in self.referrals_counter.keys():\n self.document_dict[doc_id][1] = self.referrals_counter[doc_id]", "title": "" }, { "docid": "3228f317b912e6eab25d716fa7545b58", "score": "0.42500812", "text": "def pre_save_bulk(self, queryset, update_bulk_dict):\n pass", "title": "" }, { "docid": "b39a28cfa776c3a80265737820c8c816", "score": "0.42468932", "text": "def partial_update(self):\n return {}, 200", "title": "" }, { "docid": "1f7861563654fc6c7374a7cf677286a8", "score": "0.42439488", "text": "def admin_obj_serialize_fields(obj: object, field_names: Sequence[str], cls=DjangoJSONEncoder, max_serialized_field_length: Optional[int] = None) -> str:\n out: Dict[str, Any] = {}\n for k in field_names:\n val = getattr(obj, k) if hasattr(obj, k) else None\n if val is not None:\n if hasattr(val, \"pk\"):\n val = {\"pk\": val.pk, \"str\": str(val)}\n elif not isinstance(val, (Decimal, float, int, bool)):\n val = str(val)\n if max_serialized_field_length is not None and isinstance(val, str) and len(val) > max_serialized_field_length:\n val = val[:max_serialized_field_length] + \" [...]\"\n out[k] = val\n return json.dumps(out, cls=cls)", "title": "" }, { "docid": "c786c9da1e56575e940c8c9fa1c1b16e", "score": "0.42399237", "text": "def flush(cls):\n # Post in chunks to avoid overflowing the max BSON document\n # size when the Monq task is created:\n # cls.to_delete - contains solr index ids which can easily be over\n # 100 bytes. Here we allow for 160 bytes avg, plus\n # room for other document overhead.\n # cls.to_add - contains BSON ObjectIds, which are 12 bytes each, so\n # we can easily put 1m in a doc with room left over.\n if cls.to_delete:\n for chunk in chunked_list(list(cls.to_delete), 100 * 1000):\n cls._post(index_tasks.del_artifacts, chunk)\n\n if cls.to_add:\n for chunk in chunked_list(list(cls.to_add), 1000 * 1000):\n cls._post(index_tasks.add_artifacts, chunk)\n cls.to_delete = set()\n cls.to_add = set()", "title": "" }, { "docid": "d0ab16c60f5e6aaec59c42832586527b", "score": "0.4235899", "text": "def _recompute_object_maps(self):\n self._tag_set = set()\n for item in self.items:\n self._update_object_maps(item)", "title": "" }, { "docid": "b7f62b5d23691a24592789a391b77414", "score": "0.42336634", "text": "def update_dict_by_analytic(self, update_dict):\n for d in self._analytic_dimensions():\n update_dict.update({d: self[d].id})", "title": "" }, { "docid": "f3336a0c4b313b23ea9320da3bb4fb0d", "score": "0.42288274", "text": "def json_advanced_dumps(\n obj: EncodableObject,\n sort_keys: bool = True,\n encoders: List[Callable] = ENCODE_HOOKS,\n use_primitives: bool = True,\n allow_nan: bool = True,\n ndarray_compact: Optional[bool] = None,\n compression: bool = False,\n **jsonkwargs,\n) -> str:\n combined_encoder = AttributionSerializer(\n encoders=encoders,\n use_primitives=use_primitives,\n sort_keys=sort_keys,\n allow_nan=allow_nan,\n ndarray_compact=ndarray_compact,\n compression=compression,\n **jsonkwargs,\n )\n return combined_encoder.encode(obj)", "title": "" }, { "docid": "0f2d32bcab189888e02c950a7933435e", "score": "0.42157385", "text": "def sync_videos_foreignkeys_with_metadata(self):\r\n #Clear out the old assocations first\r\n prev_videos = self.video_set.all()\r\n for video in prev_videos:\r\n video.exam = None\r\n video.save()\r\n \r\n new_video_slugs = videos_in_exam_metadata(self.xml_metadata)['video_slugs']\r\n new_videos = Video.objects.filter(course=self.course, mode=self.mode, is_deleted=False, slug__in=new_video_slugs)\r\n for new_video in new_videos:\r\n new_video.exam = self\r\n new_video.save()\r\n\r\n video_slugs_set = map(lambda li:li.slug, list(new_videos))\r\n video_slugs_not_set = list(set(new_video_slugs)-(set(video_slugs_set)))\r\n \r\n return {'video_slugs_set':video_slugs_set, 'video_slugs_not_set':video_slugs_not_set}", "title": "" }, { "docid": "e1e911015a9787c8e815ac340da8ba74", "score": "0.42117387", "text": "def update_objects(self, objs):\n table = objs[0]._table\n dicts = map(lambda o: o.db_dict(), objs)\n self.update_many(table, dicts)", "title": "" }, { "docid": "471973dd07db3289defb269431b700f8", "score": "0.42049167", "text": "async def load_posts_keyed(db, ids, truncate_body=0):\n # pylint: disable=too-many-locals\n assert ids, 'no ids passed to load_posts_keyed'\n\n # fetch posts and associated author reps\n sql = \"\"\"SELECT post_id, community_id, author, permlink, title, body, category, depth,\n promoted, payout, payout_at, is_paidout, children, votes,\n created_at, updated_at, rshares, raw_json, json,\n is_hidden, is_grayed, total_votes, flag_weight\n FROM hive_posts_cache WHERE post_id IN :ids\"\"\"\n result = await db.query_all(sql, ids=tuple(ids))\n author_map = await _query_author_map(db, result)\n\n # TODO: author affiliation?\n ctx = {}\n posts_by_id = {}\n author_ids = {}\n post_cids = {}\n for row in result:\n row = dict(row)\n author = author_map[row['author']]\n author_ids[author['id']] = author['name']\n\n row['author_rep'] = author['reputation']\n post = _condenser_post_object(row, truncate_body=truncate_body)\n\n post['blacklists'] = Mutes.lists(post['author'], author['reputation'])\n\n posts_by_id[row['post_id']] = post\n post_cids[row['post_id']] = row['community_id']\n\n cid = row['community_id']\n if cid:\n if cid not in ctx:\n ctx[cid] = []\n ctx[cid].append(author['id'])\n\n # TODO: optimize\n titles = {}\n roles = {}\n for cid, account_ids in ctx.items():\n sql = \"SELECT title FROM hive_communities WHERE id = :id\"\n titles[cid] = await db.query_one(sql, id=cid)\n sql = \"\"\"SELECT account_id, role_id, title\n FROM hive_roles\n WHERE community_id = :cid\n AND account_id IN :ids\"\"\"\n roles[cid] = {}\n ret = await db.query_all(sql, cid=cid, ids=tuple(account_ids))\n for row in ret:\n name = author_ids[row['account_id']]\n roles[cid][name] = (row['role_id'], row['title'])\n\n for pid, post in posts_by_id.items():\n author = post['author']\n cid = post_cids[pid]\n if cid:\n post['community'] = post['category'] # TODO: True?\n post['community_title'] = titles[cid] or post['category']\n role = roles[cid][author] if author in roles[cid] else (0, '')\n post['author_role'] = ROLES[role[0]]\n post['author_title'] = role[1]\n else:\n post['stats']['gray'] = ('irredeemables' in post['blacklists']\n or len(post['blacklists']) >= 2)\n post['stats']['hide'] = 'irredeemables' in post['blacklists']\n\n\n sql = \"\"\"SELECT id FROM hive_posts\n WHERE id IN :ids AND is_pinned = '1' AND is_deleted = '0'\"\"\"\n for pid in await db.query_col(sql, ids=tuple(ids)):\n if pid in posts_by_id:\n posts_by_id[pid]['stats']['is_pinned'] = True\n\n return posts_by_id", "title": "" }, { "docid": "08239f8d3ec52ddadd9a7cc201f1b9c2", "score": "0.42040014", "text": "def award_id_lookup_post_delete(self, possibly_modified_award_ids):\n\n # Of those possibly_modified_award_ids, find those that remain after deleting transactions. Those are\n # the award_ids which have had some, but not all, transactions deleted from them.\n # This function will always append to int.award_ids_delete_modified because award_id_lookup ETL\n # level could be run more than once before awards ETL level is run.\n # Avoid SQL error if possibly_modified_award_ids is empty\n if possibly_modified_award_ids:\n # TODO: see award_id_lookup_pre_delete\n self.spark.sql(\n f\"\"\"\n INSERT INTO int.award_ids_delete_modified\n SELECT award_id\n FROM int.award_id_lookup\n WHERE award_id IN ({\", \".join(possibly_modified_award_ids)})\n \"\"\"\n )", "title": "" }, { "docid": "88681305f1e7df70340c56535c4cef53", "score": "0.42022237", "text": "def build_partial(self, keys_to_update):\n data_to_update = {}\n\n for key in keys_to_update:\n if key == 'title':\n #the description as tokens for the keyword search and as complete string for sorting\n data_to_update.update({\"title\": self.gstore_object.description})\n data_to_update.update({\"title_search\": self.gstore_object.description})\n \n elif key == 'date_added':\n data_to_update.update({\"date_added\": self.gstore_object.dateadded.strftime('%Y-%m-%d')})\n \n elif key == 'date_published':\n data_to_update.update(self.build_date_element(\"date_published\", self.gstore_object.date_published))\n \n elif key == 'valid_start':\n if self.gstore_object.begin_datetime:\n data_to_update.update(self.build_date_element(\"valid_start\", self.gstore_object.begin_datetime))\n \n elif key == 'valid_end':\n if self.gstore_object.end_datetime:\n data_to_update.update(self.build_date_element(\"valid_end\", self.gstore_object.end_datetime))\n\n elif key == 'metadata_date': \n if self.gstore_object.gstore_metadata:\n metadata_date = self.gstore_object.gstore_metadata[0].date_modified\n if metadata_date:\n data_to_update.update(self.build_date_element(\"gstore_metadata\", metadata_date)) \n elif key == 'formats':\n formats = self.gstore_object.get_formats(self.request)\n data_to_update.update({\"formats\": formats})\n \n elif key == 'services':\n services = self.gstore_object.get_services(self.request)\n data_to_update.update({\"services\": services})\n \n elif key == 'standards':\n standards = self.gstore_object.get_standards(self.request)\n data_to_update.update({\"standards\": standards})\n \n elif key == 'supported_repositories':\n repos = self.gstore_object.get_repositories()\n data_to_update.update({\"supported_repositories\": [{\"app\": k, \"repos\": v} for k,v in repos.iteritems()]})\n \n elif key == 'applications':\n data_to_update.update({\"applications\": self.gstore_object.apps_cache})\n \n elif key == 'isotopic':\n isotopic = self.gstore_object.gstore_metadata[0].get_isotopic() if self.gstore_object.gstore_metadata else ''\n data_to_update.update({\"isotopic\": isotopic})\n \n elif key == 'abstract':\n abstract = self.gstore_object.gstore_metadata[0].get_abstract() if self.gstore_object.gstore_metadata else ''\n data_to_update.update({\"abstract\": abstract})\n \n elif key == 'keywords':\n keywords = self.gstore_object.gstore_metadata[0].get_keywords() if self.gstore_object.gstore_metadata else ''\n data_to_update.update({\"keywords\": keywords})\n \n elif key == 'aliases':\n data_to_update.update({\"aliases\": self.gstore_object.aliases if self.gstore_object.aliases else []})\n \n elif key == 'embargo':\n data_to_update.update({\"embargo\": self.gstore_object.is_embargoed})\n \n elif key == 'active':\n data_to_update.update({\"active\": not self.gstore_object.inactive})\n \n elif key == 'available':\n data_to_update.update({\"available\": self.gstore_object.is_available})\n\n elif key == 'model_run_uuid':\n data_to_update.update({\"model_run_uuid\": self.gstore_object.model_run_uuid})\n\n elif key == 'model_run_name':\n data_to_update.update({\"model_run_name\": self.gstore_object.model_run_name})\n\n elif key == 'model_vars':\n data_to_update.update({\"model_vars\": self.gstore_object.model_vars})\n\n elif key == 'parent_model_run_uuid':\n data_to_update.update({\"parent_model_run_uuid\": self.gstore_object.parent_model_run_uuid})\n\n elif key == 'externaluserid':\n data_to_update.update({\"externaluserid\": self.gstore_object.externaluserid})\n\n elif key == 'model_set':\n data_to_update.update({\"model_set\": self.gstore_object.model_set})\n\n elif key == 'model_set_type':\n data_to_update.update({\"model_set_type\": self.gstore_object.model_set_type})\n\n elif key == 'model_set_taxonomy':\n data_to_update.update({\"model_set_taxonomy\": self.gstore_object.model_set_taxonomy})\n\n elif key == 'taxonomy':\n data_to_update.update({\"taxonomy\": self.gstore_object.taxonomy})\n if self.gstore_object.geomtype and self.gstore_object.taxonomy == 'vector':\n data_to_update.update({\"geomtype\": self.gstore_object.geomtype.lower()})\n \n elif key == 'categories':\n #nested category structure\n categories = []\n for category in self.gstore_object.categories:\n categories.append({\"theme\": str(category.theme), \"subtheme\": str(category.subtheme), \"groupname\": str(category.groupname), \"apps\": self.gstore_object.apps_cache})\n data_to_update.update({\"category_facets\": categories})\n\n #and the original structure just in case\n cat = self.gstore_object.categories[0]\n data_to_update.update({\"category\": {\"theme\": str(cat.theme), \"subtheme\": str(cat.subtheme), \"groupname\": str(cat.groupname), \"apps\": self.gstore_object.apps_cache}})\n \n elif key == 'category_hierarchy':\n #for the 1..3 structure not in place\n pass\n elif key == 'location':\n if self.gstore_object.taxonomy not in ['table']:\n area, loc = self.build_location(self.gstore_object.box)\n data_to_update.update({\"location\": loc})\n data_to_update.update({\"area\": area})\n \n elif key == 'collections':\n data_to_update.update({\"collections\": [str(c.uuid) for c in self.gstore_object.collections]}) \n \n else:\n pass\n\n self.partial = data_to_update", "title": "" }, { "docid": "d8f5c62c50075aef51818d698fc8e62e", "score": "0.419967", "text": "def _update():\n ndb.put_multi_async(platforms_to_store)\n campaign.put_async()", "title": "" }, { "docid": "31c6d3fda24ababf203b283784fe1dcc", "score": "0.41908705", "text": "def _condenser_post_object(row):\n paid = row['is_paidout']\n\n post = {}\n post['post_id'] = row['post_id']\n post['author'] = row['author']\n post['permlink'] = row['permlink']\n post['category'] = row['category']\n post['parent_permlink'] = ''\n post['parent_author'] = ''\n\n post['title'] = row['title']\n post['body'] = row['body']\n post['json_metadata'] = row['json']\n\n post['created'] = _json_date(row['created_at'])\n post['depth'] = row['depth']\n post['children'] = row['children']\n post['net_rshares'] = row['rshares']\n\n post['last_payout'] = _json_date(row['payout_at'] if paid else None)\n post['cashout_time'] = _json_date(None if paid else row['payout_at'])\n post['total_payout_value'] = _amount(row['payout'] if paid else 0)\n post['curator_payout_value'] = _amount(0)\n post['pending_payout_value'] = _amount(0 if paid else row['payout'])\n post['promoted'] = \"%.3f SBD\" % row['promoted']\n\n post['replies'] = []\n post['body_length'] = len(row['body'])\n post['active_votes'] = _hydrate_active_votes(row['votes'])\n post['author_reputation'] = _rep_to_raw(row['author_rep'])\n\n # import fields from legacy object\n assert row['raw_json']\n assert len(row['raw_json']) > 32\n raw_json = json.loads(row['raw_json'])\n\n if row['depth'] > 0:\n post['parent_permlink'] = raw_json['parent_permlink']\n post['parent_author'] = raw_json['parent_author']\n\n post['root_title'] = raw_json['root_title']\n post['max_accepted_payout'] = raw_json['max_accepted_payout']\n post['percent_steem_dollars'] = raw_json['percent_steem_dollars']\n post['url'] = raw_json['url']\n\n # not used by condenser, but may be useful\n #post['net_votes'] = post['total_votes'] - row['up_votes']\n #post['allow_replies'] = raw_json['allow_replies']\n #post['allow_votes'] = raw_json['allow_votes']\n #post['allow_curation_rewards'] = raw_json['allow_curation_rewards']\n #post['beneficiaries'] = raw_json['beneficiaries']\n #post['curator_payout_value'] = raw_json['curator_payout_value'] if paid else _amount(0)\n #post['total_payout_value'] = _amount(row['payout'] - amount(raw_json['curator_payout_value'])) if paid else _amount(0)\n\n return post", "title": "" }, { "docid": "8c6a96b9328f0d29f60ead4968495602", "score": "0.41718742", "text": "def recent_updates():\n\n procurements = Procurement.query.filter_by(approved=True).order_by(Procurement.added_on.desc(), Procurement.procurement_id.desc()).limit(20).all()\n return serializers.queryset_to_json(procurements)", "title": "" }, { "docid": "fe2c6d70e56636c83bdc5f83393ddc6a", "score": "0.41601953", "text": "def __update_publications_from_mongo(self) -> None:\n\n client = pymongo.MongoClient(self.host)\n db = client.Scopus\n col = db.Data\n data = col.find(batch_size=10)\n\n con = psycopg2.connect(database=self.postgre_database, user=self.postgre_user, host=self.postgre_host, password=self.postgre_password, port=self.postgre_port)\n cur = con.cursor()\n\n blank_dict = {\n '1': '', '2': '', '3': '', '4': '', '5': '', '6': '',\n '7': '', '8': '', '9': '', '10': '', '11': '', '12': '',\n '13': '', '14': '', '15': '', '16': '', '17': '', '18': '',\n 'DOI': '',\n 'IHE': [],\n 'SVM': [],\n 'Title': '',\n 'Validation': {\n 'ColorRed': 0, 'ColorBlue': 0, 'ColorGreen': 0, 'Similarity': 0,\n 'StringCount': [['1', 0.0], ['2', 0.0], ['3', 0.0], ['4', 0.0], ['5', 0.0], ['6', 0.0],\n ['7', 0.0], ['8', 0.0], ['9', 0.0], ['10', 0.0], ['11', 0.0], ['12', 0.0],\n ['13', 0.0], ['14', 0.0], ['15', 0.0], ['16', 0.0], ['17', 0.0], ['18', 0.0]],\n 'SDG_Keyword_Counts': []\n },\n 'ModelResult': '',\n 'StringResult': '',\n 'IHE_Prediction': '',\n 'SVM_Prediction': ''\n }\n\n c, l = 0, 90000\n for i in data:\n self.__progress(c, l, \"Syncing scraped publications to Django\")\n del i['_id']\n doi = i['DOI'].replace(\"\\'\", \"\\'\\'\")\n title = i['Title'] = i['Title'].replace(\"\\'\", \"\\'\\'\")\n\n cur.execute(\"SELECT exists (SELECT 1 FROM public.app_publication WHERE doi = \\'{}\\')\".format(doi))\n existing_pub = cur.fetchone()[0]\n\n if not existing_pub:\n if i['Abstract']:\n i['Abstract'] = i['Abstract'].replace(\"\\'\", \"\\'\\'\")\n if i['Source']:\n i['Source'] = i['Source'].replace(\"\\'\", \"\\'\\'\")\n if i['AuthorData']:\n for key, val in i['AuthorData'].items():\n if val['Name']:\n val['Name'] = val['Name'].replace(\"\\'\", \"\\'\\'\")\n if val['AffiliationName']:\n val['AffiliationName'] = val['AffiliationName'].replace(\"\\'\", \"\\'\\'\")\n if i['IndexKeywords']:\n for index, val in enumerate(i['IndexKeywords']):\n i['IndexKeywords'][index] = val.replace(\"\\'\", \"\\'\\'\")\n if i['AuthorKeywords']:\n for index, val in enumerate(i['AuthorKeywords']):\n i['AuthorKeywords'][index] = val.replace(\"\\'\", \"\\'\\'\")\n\n query = \"\"\"INSERT INTO public.app_publication (title, data, \\\"assignedSDG\\\", doi) VALUES ('{0}', '{1}', '{2}', '{3}') ON CONFLICT (doi) DO NOTHING\"\"\".format(title, json.dumps(i), json.dumps(blank_dict), doi)\n cur.execute(query)\n con.commit()\n c += 1\n print()\n client.close()\n con.close()", "title": "" }, { "docid": "5cf93a3a8f33ec00b640c3109700f96f", "score": "0.41594234", "text": "def test_update_finds_all(self):\n # A nice big prime number of posts.\n self.add_long_blog(111)\n segments = self.make_segments(\"long_blog\")\n self.assertEqual(segments.count(), 111)\n oldest = segments.earliest(\"published\")\n newest = segments.latest(\"published\")\n self.assertEqual(oldest.title, \"Post #1\")\n self.assertEqual(newest.title, \"Post #111\")", "title": "" }, { "docid": "96209b21a902f2f1d720a5bed071f9e4", "score": "0.41576985", "text": "def fix_cache(self):\n journal_id_list = []\n cursor = self.db['cache'].find()\n for article in cursor:\n journal_id_list.append(article['journal_id'])\n unique_jrl = set(journal_id_list)\n for jrl in unique_jrl:\n self.update_cache(jrl)", "title": "" }, { "docid": "5cc10710903fb12dc5c42fc5d44db5eb", "score": "0.4157234", "text": "def load_last_post(objects):\n\n pk_list = [x.last_post_id for x in objects]\n qs = Post.objects.filter(pk__in=pk_list).select_related('user')\n posts = dict((x.pk, x) for x in qs)\n for obj in objects:\n obj.last_post = posts.get(obj.last_post_id)", "title": "" }, { "docid": "12fb3b7cc7d06337ef9ee432187db5ce", "score": "0.4147182", "text": "def _update_object(self, model, data, request=None):\n if not self.update_fields:\n msg = '%s.update_fiels is not defined' % self.__class__.__name__\n raise RequestError(msg, rc.NOT_IMPLEMENTED.status_code)\n\n fks = [i for i in data if i[-3:] == '_id']\n update_fields = list(self.update_fields) + fks\n\n for att in update_fields:\n if att not in data:\n continue\n value = data.get(att)\n # many-to-many must be threated after a pk\n if isinstance(value, list):\n modelatt = getattr(self.model, att)\n if hasattr(modelatt, 'field') \\\n or (hasattr(modelatt, 'related') \\\n and hasattr(modelatt.related, 'field')):\n continue\n\n # one-to-one must get the object, do not use the ID\n if att[-3:] == '_id':\n att = att[:-3]\n # dict has priority to represents a relation\n if att in data:\n continue\n\n try:\n cls_att = model.__class__._meta.get_field_by_name(att)[0]\n relation_classes = (ForeignKey, OneToOneField,\n SingleRelatedObjectDescriptor,\n ReverseSingleRelatedObjectDescriptor)\n\n if isinstance(cls_att, relation_classes) and not value:\n value = None\n\n if isinstance(cls_att, relation_classes) and value is not None:\n related_cls = cls_att.rel.to\n if isinstance(value, dict):\n value = value['id']\n value = related_cls.objects.get(pk=int(value))\n except django.db.models.exceptions.ObjectDoesNotExist, e:\n raise\n except Exception, e:\n pass\n\n setattr(model, att, value)\n return model", "title": "" }, { "docid": "ee139304eb22c91535d79cd0e8540494", "score": "0.414527", "text": "def _update_related_codes(self, ds_db, ds_queue_codes_stat):\n\n # This case can be if on some day we got only one outdated duration\n # json input\n if not ds_queue_codes_stat:\n return None\n\n # Update current codes\n for code_db in ds_db.codes:\n cur_code_from_db = code_db.phone_code\n\n # Pop code data from ds_code_stat\n cur_code_data = ds_queue_codes_stat.pop(cur_code_from_db)\n\n code_db.number_of_calls = cur_code_data[\"number_of_calls\"]\n code_db.summary_duration = cur_code_data[\"summary_duration\"]\n\n self.s.add(code_db)\n\n # If there are some new codes\n for code, data in ds_queue_codes_stat.items():\n new_code = CodeStat(\n phone_code=code,\n number_of_calls=data[\"number_of_calls\"],\n summary_duration=data[\"summary_duration\"],\n daily_stat=ds_db,\n )\n self.s.add(new_code)", "title": "" }, { "docid": "f9a09c3c55dac573f4e5e76a98db334c", "score": "0.4143655", "text": "def to_update(self):\n queryset = self.published()\n return [source for source in queryset if source.can_update()]", "title": "" }, { "docid": "ca5f8a1b5247f8b263acacb2401becf2", "score": "0.4139011", "text": "def _filter_inplace(ordering, munge):\n for obj_type, objects in cp.items():\n ordering_list = getattr(ordering, obj_type, None)\n if ordering_list:\n munge_result = munge(objects, ordering_list)\n if munge_result:\n cp[obj_type] = munge_result", "title": "" }, { "docid": "5c3b894bdc0f6fb85900b54cce081c3c", "score": "0.4137119", "text": "def dump(self):\n dumped = {\"update\": self._update_object}\n if self._id is not None:\n dumped[\"id\"] = self._id\n elif self._external_id is not None:\n dumped[\"externalId\"] = self._external_id\n return dumped", "title": "" }, { "docid": "1e16a9baeb144b12fa1fbda02ec440f5", "score": "0.41366172", "text": "def test_set_issue_is_public_bulk_setting_reason(self):\n unpublish_reason = \"plágio\"\n self._make_one(attrib={\"_id\": \"012ijs9y24\", \"is_public\": True})\n self._make_one(attrib={\"_id\": \"2183ikos90\", \"is_public\": True})\n self._make_one(attrib={\"_id\": \"9298wjso89\", \"is_public\": True})\n\n ids = [\"012ijs9y24\", \"2183ikos90\", \"9298wjso89\"]\n\n controllers.set_issue_is_public_bulk(ids, is_public=False, reason=\"plágio\")\n\n issues = controllers.get_issues_by_iid(ids)\n\n for issue in issues.values():\n self.assertEqual(unpublish_reason, issue.unpublish_reason)", "title": "" }, { "docid": "1cf7937226e6f84b1e485cbc186daeae", "score": "0.41353673", "text": "async def test_updating_bigints(client: Client) -> None:\n models = [\n await client.lists.create({}),\n await client.lists.create(\n data={\n 'bigints': [539506179039297536, 281454500584095754],\n },\n ),\n ]\n\n model = await client.lists.update(\n where={\n 'id': models[0].id,\n },\n data={\n 'bigints': {\n 'push': [538075535121842179],\n },\n },\n )\n assert model is not None\n assert model.bigints == [538075535121842179]\n\n model = await client.lists.update(\n where={\n 'id': models[1].id,\n },\n data={\n 'bigints': {\n 'push': [186214420957888512],\n },\n },\n )\n assert model is not None\n assert model.bigints == [539506179039297536, 281454500584095754, 186214420957888512]\n\n model = await client.lists.update(\n where={\n 'id': models[1].id,\n },\n data={\n 'bigints': {\n 'set': [129003276736659456],\n },\n },\n )\n assert model is not None\n assert model.bigints == [129003276736659456]\n\n model = await client.lists.update(\n where={\n 'id': models[1].id,\n },\n data={\n 'bigints': [298490675715112960],\n },\n )\n assert model is not None\n assert model.bigints == [298490675715112960]", "title": "" }, { "docid": "bdb022903029566fb22846c6816ea8ea", "score": "0.41289362", "text": "def _consolidate_ipms(self):\n self.names = []\n self.ids = {}\n self.ipm_mapper = {}\n self.replaced = defaultdict(list)\n for i, ipm in enumerate(self.ipms):\n for n, name in enumerate(ipm.names):\n if name not in self.names:\n self.names.append(name)\n else:\n old_ipm = self.ipms[self.ipm_mapper[name]]\n ind_index = list(old_ipm.mapping.keys()).index(name)\n self.replaced[name].append((old_ipm.ids[ind_index][0], old_ipm.ids[ind_index][1]))\n self.ids[name] = ipm.ids[n]\n self.ipm_mapper[name] = i", "title": "" }, { "docid": "9a56e2765ca45785f4cbe92a362ce5a9", "score": "0.41282576", "text": "def update_objects(self):\n object_list = [v for v in self._GOs.values()]\n object_list.sort()\n for obj in object_list:\n obj[1].update(self.event_list)", "title": "" }, { "docid": "4e7c6f792ea7f951d83a0839d0cbb2f7", "score": "0.41197026", "text": "def resolve_last_modified_at(self, info):\n\n # select\n # max((value ->> 'deleted_at') :: float)\n #\n # from\n # ObjectTOC,\n # jsonb_each(additional_metadata)\n #\n # where\n # client_id = <client_id> and\n # object_id = <object_id>;\n\n deleted_at_query = (\n\n DBSession\n\n .query(\n func.max(cast(\n column('value').op('->>')('deleted_at'),\n sqlalchemy.Float)))\n\n .select_from(\n ObjectTOC,\n func.jsonb_each(ObjectTOC.additional_metadata))\n\n .filter(\n ObjectTOC.client_id == self.id[0],\n ObjectTOC.object_id == self.id[1],\n ObjectTOC.additional_metadata != JSONB.NULL))\n\n # Query for last modification time of the dictionary's perspectives, lexical entries and entities.\n\n sql_str = ('''\n\n select\n\n max(\n greatest(\n\n extract(epoch from P.created_at),\n\n (select\n max((value ->> 'deleted_at') :: float)\n\n from\n jsonb_each(OP.additional_metadata)),\n\n (select\n\n max(\n greatest(\n\n extract(epoch from L.created_at),\n\n (select\n max((value ->> 'deleted_at') :: float)\n\n from\n jsonb_each(OL.additional_metadata)),\n\n (select\n\n max(\n greatest(\n\n extract(epoch from E.created_at),\n\n (select\n max((value ->> 'deleted_at') :: float)\n\n from\n jsonb_each(OE.additional_metadata))))\n\n from\n public.entity E,\n ObjectTOC OE\n\n where\n E.parent_client_id = L.client_id and\n E.parent_object_id = L.object_id and\n OE.client_id = E.client_id and\n OE.object_id = E.object_id and\n OE.additional_metadata != 'null' :: jsonb)))\n\n from\n lexicalentry L,\n ObjectTOC OL\n\n where\n L.parent_client_id = P.client_id and\n L.parent_object_id = P.object_id and\n OL.client_id = L.client_id and\n OL.object_id = L.object_id and\n OL.additional_metadata != 'null' :: jsonb)))\n\n from\n dictionaryperspective P,\n ObjectTOC OP\n\n where\n P.parent_client_id = :client_id and\n P.parent_object_id = :object_id and\n OP.client_id = P.client_id and\n OP.object_id = P.object_id and\n OP.additional_metadata != 'null' :: jsonb\n\n ''')\n\n # Complete query for the dictionary, excluding created_at which we already have.\n\n DBSession.execute(\n 'set extra_float_digits to 3;');\n\n result = (\n\n DBSession\n\n .query(\n func.greatest(\n deleted_at_query.label('deleted_at'),\n Grouping(sqlalchemy.text(sql_str))))\n\n .params({\n 'client_id': self.id[0],\n 'object_id': self.id[1]})\n\n .scalar())\n\n if result is not None:\n\n return max(\n self.dbObject.created_at,\n result)\n\n else:\n\n return self.dbObject.created_at", "title": "" }, { "docid": "7d947f607f81a84f34d89ddf3557c158", "score": "0.41095978", "text": "def update_public_attrs(self, attrs_dict: dict) -> None:\n public_attrs = dict()\n for attr in attrs_dict:\n if attr not in self.public_attrs:\n continue\n public_attrs[attr] = attrs_dict[attr]\n self.update_or_set_public_attrs(public_attrs)", "title": "" }, { "docid": "3216dc4021000ac81ae779228a13bcbc", "score": "0.41069672", "text": "def update_rpki_status(\n self,\n rpsl_objs_now_valid: List[Dict[str, Any]] = [],\n rpsl_objs_now_invalid: List[Dict[str, Any]] = [],\n rpsl_objs_now_not_found: List[Dict[str, Any]] = [],\n ) -> None:\n self._check_write_permitted()\n table = RPSLDatabaseObject.__table__\n if rpsl_objs_now_valid:\n pks = {o[\"pk\"] for o in rpsl_objs_now_valid}\n stmt = table.update().where(table.c.pk.in_(pks)).values(rpki_status=RPKIStatus.valid)\n self.execute_statement(stmt)\n if rpsl_objs_now_invalid:\n pks = {o[\"pk\"] for o in rpsl_objs_now_invalid}\n stmt = table.update().where(table.c.pk.in_(pks)).values(rpki_status=RPKIStatus.invalid)\n self.execute_statement(stmt)\n if rpsl_objs_now_not_found:\n pks = {o[\"pk\"] for o in rpsl_objs_now_not_found}\n stmt = table.update().where(table.c.pk.in_(pks)).values(rpki_status=RPKIStatus.not_found)\n self.execute_statement(stmt)\n\n for rpsl_obj in rpsl_objs_now_valid + rpsl_objs_now_not_found:\n visible_previously = object_is_visible(\n rpki_status=rpsl_obj[\"old_status\"],\n scopefilter_status=rpsl_obj[\"scopefilter_status\"],\n route_preference_status=rpsl_obj[\"route_preference_status\"],\n )\n visible_now = object_is_visible(\n rpki_status=rpsl_obj[\"rpki_status\"],\n scopefilter_status=rpsl_obj[\"scopefilter_status\"],\n route_preference_status=rpsl_obj[\"route_preference_status\"],\n )\n if visible_now and not visible_previously:\n self.status_tracker.record_operation_from_rpsl_dict(\n operation=DatabaseOperation.add_or_update,\n origin=JournalEntryOrigin.rpki_status,\n rpsl_obj=rpsl_obj,\n )\n self.changed_objects_tracker.object_modified_dict(\n rpsl_obj, origin=JournalEntryOrigin.rpki_status\n )\n for rpsl_obj in rpsl_objs_now_invalid:\n visible_previously = object_is_visible(\n rpki_status=rpsl_obj[\"old_status\"],\n scopefilter_status=rpsl_obj[\"scopefilter_status\"],\n route_preference_status=rpsl_obj[\"route_preference_status\"],\n )\n visible_now = object_is_visible(\n rpki_status=rpsl_obj[\"rpki_status\"],\n scopefilter_status=rpsl_obj[\"scopefilter_status\"],\n route_preference_status=rpsl_obj[\"route_preference_status\"],\n )\n if not visible_now and visible_previously:\n self.status_tracker.record_operation_from_rpsl_dict(\n operation=DatabaseOperation.delete,\n origin=JournalEntryOrigin.rpki_status,\n rpsl_obj=rpsl_obj,\n )\n self.changed_objects_tracker.object_modified_dict(\n rpsl_obj, origin=JournalEntryOrigin.rpki_status\n )", "title": "" }, { "docid": "d9a1f02b6eadc0aa748310a72dcb2ca1", "score": "0.41067183", "text": "def test_discussion_filter_updated(self):\n qs = {'a': 1, 'w': 4, 'format': 'json',\n 'sortby': 1, 'updated_date': '05/04/2010'}\n updated_vals = (\n (1, '/1'),\n (2, '/4'),\n )\n\n for updated, url_id in updated_vals:\n qs.update({'updated': updated})\n response = self.client.get(reverse('search'), qs)\n result = json.loads(response.content)['results'][0]\n url_end = result['url'].endswith(url_id)\n assert url_end, ('URL was \"%s\", expected to end with \"%s\"' %\n (result['url'], url_id))", "title": "" }, { "docid": "76f6ec9fc87536813f12c97f57fa21d1", "score": "0.4106185", "text": "def collate_formed_groups(created_groups):\n collated_result = {}\n \n for group in created_groups:\n if group.get_size() not in collated_result:\n collated_result[group.get_size()] = 1\n else:\n collated_result[group.get_size()] += 1\n\n return collated_result", "title": "" }, { "docid": "f765e4c2e8462c72e5408794d8a71fa2", "score": "0.41017678", "text": "def sync(self):\n upds = self.reserve\n self.reserve = []\n upds.sort(key=get_when)\n while len(self.get_buffer()) < 10 and len(upds) > 0:\n update = upds.pop(0)\n try:\n self.post(update)\n except buffpy.exceptions.BuffpyRestException:\n print(self.name() + \" did not post: \" + get_when(update).strftime(dtformat()))\n self.reserve.extend(upds)", "title": "" }, { "docid": "447984316bf6ad4104969bfe26de0bfb", "score": "0.41001135", "text": "def encode_proposal(data, desired_share_ids=None):", "title": "" }, { "docid": "dad2743a09bfbd36860363fea278f887", "score": "0.40969074", "text": "def _convert_obj_ids_to_strings(data):\n if isinstance(data, list):\n for doc in data:\n doc['_id'] = str(doc['_id'])\n elif isinstance(data, dict):\n data['_id'] = str(data['_id'])\n\n return data", "title": "" }, { "docid": "ac5e25ac6bd5410b510ed32f320dc1a9", "score": "0.40961373", "text": "def update_all_id_we_vote(request):\n candidate_campaign_list = CandidateCampaign.objects.all()\n for candidate_campaign in candidate_campaign_list:\n candidate_campaign.save()\n\n contest_measure_list = ContestMeasure.objects.all()\n for contest_measure in contest_measure_list:\n contest_measure.save()\n\n contest_office_list = ContestOffice.objects.all()\n for contest_office in contest_office_list:\n contest_office.save()\n\n measure_campaign_list = MeasureCampaign.objects.all()\n for measure_campaign in measure_campaign_list:\n measure_campaign.save()\n\n organization_list = Organization.objects.all()\n for organization in organization_list:\n organization.save()\n\n position_list = PositionEntered.objects.all()\n for position in position_list:\n position.save()", "title": "" }, { "docid": "2353567650f96566c1d02423f17bda49", "score": "0.4095834", "text": "def update_post(prev_data, data, db_conn):\n\n schema = get_post_schema(data)\n data2 = omit(data, ('id', 'created', 'modified'))\n data2 = extend({}, prev_data, data2)\n data, errors = validate_post(data2, db_conn)\n if errors:\n return data, errors\n data, errors = update_document(schema, prev_data, data, db_conn)\n if not errors:\n add_post_to_es(data, db_conn)\n return data, errors", "title": "" }, { "docid": "35b93825ff38dd7b2ffeffec779c8378", "score": "0.40925023", "text": "def _format_primary_key_data(self, request):\n fields = ['fabric', 'items']\n \n for f in [('customer', Customer), ('project', Project), ('employee', User)]:\n try:\n pass#request.data[f[0]] = f[1].objects.get(pk=request.data[f[0]]['id'])\n except (AttributeError, KeyError, IndexError) as e:\n pass\n\n for field in fields:\n if field in request.data:\n if 'id' in request.data[field]:\n request.data[field] = request.data[field]['id']\n \n \n if field == 'items':\n for index, item in enumerate(request.data['items']):\n try:\n request.data['items'][index]['fabric'] = item['fabric']['id']\n except (KeyError, TypeError):\n pass\n \n try:\n request.data['items'][index]['product'] = {'id': item['id']}\n del request.data['items'][index]['id']\n except KeyError as e:\n request.data['items'][index]['product'] = {'id': 10436}\n \n \"\"\"\n try:\n request.data['items'][index]['image'] = item['image']['id']\n except (KeyError, TypeError) as e:\n request.data['items'][index]['image'] = None\n \"\"\"\n \n elif field == 'project':\n try:\n if \"codename\" in request.data['project'] and \"id\" not in request.data['project']:\n project = Project(codename=request.data['project']['codename'])\n project.save()\n request.data['project'] = project.id\n except TypeError:\n pass\n \n return request", "title": "" }, { "docid": "2a441a5420f74d53c7809b3b60371608", "score": "0.4091055", "text": "def CollectObjectIDs(ids, obj):\n if id(obj) in ids:\n return\n ids.add(id(obj))\n if isinstance(obj, (list, tuple, set, frozenset)):\n for e in obj:\n CollectObjectIDs(ids, e)\n elif isinstance(obj, dict):\n for k, v in obj.items():\n CollectObjectIDs(ids, k)\n CollectObjectIDs(ids, v)\n return len(ids)", "title": "" }, { "docid": "08e50a03a255a1367e07725ec5584132", "score": "0.4091049", "text": "def test_set_issue_is_public_bulk(self):\n\n self._make_one(attrib={\"_id\": \"012ijs9y24\", \"is_public\": True})\n self._make_one(attrib={\"_id\": \"2183ikos90\", \"is_public\": True})\n self._make_one(attrib={\"_id\": \"9298wjso89\", \"is_public\": True})\n\n controllers.set_issue_is_public_bulk(\n [\"012ijs9y24\", \"2183ikos90\", \"9298wjso89\"], is_public=False\n )\n\n ids = [\"012ijs9y24\", \"2183ikos90\", \"9298wjso89\"]\n\n issues = controllers.get_issues_by_iid(ids)\n\n for issue in issues.values():\n self.assertFalse(issue.is_public)", "title": "" }, { "docid": "84b156261079f1c72155fc47ff185a1f", "score": "0.40908474", "text": "def post_apply(self, updated: t.Dict = {}):\n table = dynamodb.Table(self.data_store_table)\n\n for update in updated.values():\n row: t.List[str] = update.as_csv_row()\n # example row format: ('<raw_indicator>', '<indicator-id>', '<descriptor-id>', '<time added>', '<space-separated-tags>')\n # e.g (10736405276340','096a6f9...064f', '1234567890', '2020-07-31T18:47:45+0000', 'true_positive hma_test')\n new_tags = row[4].split(\" \") if row[4] else []\n\n metadata = PDQSignalMetadata.get_from_signal_and_ds_id(\n table,\n int(row[1]),\n S3ThreatDataConfig.SOURCE_STR,\n str(self.privacy_group),\n )\n\n if metadata:\n new_pending_opinion_change = self.get_new_pending_opinion_change(\n metadata, new_tags\n )\n else:\n # If this is a new indicator without metadata there is nothing for us to update\n return\n\n metadata = PDQSignalMetadata(\n signal_id=row[1],\n ds_id=str(self.privacy_group),\n updated_at=datetime.now(),\n signal_source=S3ThreatDataConfig.SOURCE_STR,\n signal_hash=row[0], # note: not used by update_tags_in_table_if_exists\n tags=new_tags,\n pending_opinion_change=new_pending_opinion_change,\n )\n # TODO: Combine 2 update functions into single function\n if metadata.update_tags_in_table_if_exists(table):\n logger.info(\n \"Updated Signal Tags in DB for indicator id: %s source: %s for privacy group: %d\",\n row[1],\n S3ThreatDataConfig.SOURCE_STR,\n self.privacy_group,\n )\n if metadata.update_pending_opinion_change_in_table_if_exists(table):\n logger.info(\n \"Updated Pending Opinion in DB for indicator id: %s source: %s for privacy group: %d\",\n row[1],\n S3ThreatDataConfig.SOURCE_STR,\n self.privacy_group,\n )", "title": "" }, { "docid": "48739b4cdcc2eac6824a9c6ee542ad71", "score": "0.40898195", "text": "def _update_object_maps(self, item):\n if item.primary not in self._primary_map.keys():\n self._primary_map[item.primary] = []\n self._primary_map[item.primary].append(item)\n\n for tag in item.tags:\n self._tag_set.add(tag)\n self._tag_set.add(item.primary)", "title": "" }, { "docid": "795c90644371cc94f73aede47ba0ad95", "score": "0.4084332", "text": "def not_postings_list(left_posting_list: dict) -> dict:\n result_dict: dict = dict()\n prev_key: int = -1\n sorted_keys = np.array(list(left_posting_list.keys()), dtype=int)\n # sorted_keys.sort()\n for current_key in sorted_keys:\n if int(current_key) > int(prev_key) + 1: # add chunk from prev_key to current_key\n keys_list: np.array(dtype=str) = np.array( # update every time\n range(prev_key + 1, int(current_key)), dtype=str)\n if len(keys_list) > 1:\n result_dict.update(dict.fromkeys(keys_list, 0)) # make tf zero\n else:\n result_dict[keys_list[0]] = 0 # in case no diff between prev_key and current_key\n prev_key = current_key\n\n if int(max_doc_id) > int(prev_key) + 1: # left after last key to max\n result_dict.update(dict.fromkeys(\n np.array(range(prev_key + 1, max_doc_id + 1), dtype=str), 0))\n elif max_doc_id == prev_key + 1: # eof\n result_dict[str(max_doc_id)] = 0\n return dict(sorted(result_dict.items()))", "title": "" }, { "docid": "50d57666b466afe1c6efa247dc4da663", "score": "0.4080271", "text": "def check_updated_ids(action, doc_ids):\n orig_dates = {}\n for doc_id in doc_ids:\n response = action.api.get_document(doc_id)\n if response.status_code == 200:\n orig_date = response.json()['properties']['modified_date']\n orig_dates[doc_id] = orig_date\n # orig_count = response.json()['entities'][1]['properties']['count']['character']\n else:\n print(\"Document id not found on Lingotek Cloud: \"+str(doc_id))\n return False\n for doc_id in doc_ids:\n if not check_updated(action, doc_id, orig_dates[doc_id]):\n return False\n return True", "title": "" }, { "docid": "c3648b6cfdfec40532416b0b077048f7", "score": "0.40758374", "text": "def _ManageHarnessUpdateScheduleForHosts(\n hostnames, max_concurrent_update_percentage):\n hostnames = sorted(hostnames)\n\n metadatas = ndb.get_multi(\n ndb.Key(datastore_entities.HostMetadata, hostname)\n for hostname in hostnames)\n\n if all(metadata and metadata.allow_to_update\n for metadata in metadatas):\n logging.debug('All hosts are released to start update, skipping.')\n return\n\n update_states = ndb.get_multi(\n ndb.Key(datastore_entities.HostUpdateState, hostname)\n for hostname in hostnames)\n\n max_updates = max(len(hostnames) * max_concurrent_update_percentage // 100, 1)\n\n # Three lists of HostMetadata entities to track\n # 1. Hosts that are allowed to start update, but are not in a final state.\n # Update jobs is running or expected to start running on them.\n # 2. Hosts that are not yet allowed to start update. Update jobs should not\n # have started on them yet. And hosts to be start updates should be\n # selected from this list.\n # 3. Hosts that are set to allowed already, and in a final state. This list\n # is only for debugging purposes.\n allowed_but_unfinished = []\n not_allowed_yet = []\n finished = []\n for metadata, update_state in zip(metadatas, update_states):\n if not metadata or not update_state:\n continue\n if not metadata.allow_to_update:\n not_allowed_yet.append(metadata)\n elif update_state.state in common.NON_FINAL_HOST_UPDATE_STATES:\n allowed_but_unfinished.append(metadata)\n else:\n finished.append(metadata)\n\n num_expect_updating = len(allowed_but_unfinished)\n num_can_be_added = max(max_updates - num_expect_updating, 0)\n\n logging.info(\n 'Scheduling with max update limit %d.\\n'\n 'Hosts update schedule summary:\\n'\n ' allowed_but_unfinished: %s\\n'\n ' not_allowed_yet: %s\\n'\n ' finished: %s\\n'\n 'Adding %d more hosts.',\n max_updates,\n [metadata.hostname for metadata in allowed_but_unfinished],\n [metadata.hostname for metadata in not_allowed_yet],\n [metadata.hostname for metadata in finished],\n num_can_be_added)\n\n if num_can_be_added <= 0:\n return\n\n # Mark hosts as allow_to_update.\n\n metadatas_to_allow = not_allowed_yet[:num_can_be_added]\n for metadata in metadatas_to_allow:\n metadata.allow_to_update = True\n _PutHostMetadata(metadata)\n\n # Touch remaining PENDING hosts, to prevent from being marked TIMED_OUT.\n\n hosts_still_not_allowed = set(\n metadata.hostname for metadata in not_allowed_yet[num_can_be_added:])\n\n for update_state in update_states:\n if (update_state and\n update_state.state == common.HostUpdateState.PENDING and\n update_state.hostname in hosts_still_not_allowed):\n _TouchHostUpdateState(update_state)", "title": "" }, { "docid": "201cf7c935162234c4248878601a77cd", "score": "0.4074068", "text": "def cache_spawn_syncers(doc_class, doc_id, modified_fields):\n try:\n document = doc_class.get(pk = doc_id)\n except doc_class.DoesNotExist:\n return\n\n for (d_class, d_id), fields in document.get_reverse_references(modified_fields).iteritems():\n cache_resync.delay(doc_class, doc_id, d_class, d_id, fields)", "title": "" }, { "docid": "c88d3491c0c44c08357fec116d1d1a7a", "score": "0.40522897", "text": "def sync_bangladesh_ogx_realized(apps, ex_api, *args, **kwargs):\n p_api = api.PodioApi(19600457) #With credentials for the VD application\n modified_apps = {} #Here it saves the EPs that have already been searched and found in PODIO, so it is not done again\n for app in apps['items']:\n print(\"Updating realizations of %s in Value Delivery space\" % app['person']['full_name'])\n if app['person']['email'] not in modified_apps:\n search = p_api.search_in_application_v2(app_id=19600457, ref_type='item', query=app['id'])\n if len(search['results']) == 1: #Found exactly one, as it should be\n # Initializes variables that may or may not be in the consideration space, to be transferred later to the VD space if they exist\n # gets the item\n item_id = search['results'][0]['id']\n p_api.updateItem(item_id, {\n 163451022:{'start_utc':app['date_realized'].replace('T', ' ').replace('Z', '').replace('+00:00', '')},\n 168656289:{'start_utc':app['experience_end_date'].replace('T', ' ').replace('Z', '').replace('+00:00', '')}\n })\n continue #TODO: Everything after this should be cleaned up\n print(app['status'])\n item = p_api.get_item(item_id, external_id=False)\n # Finds the status of the exchange\n stage = item['values'][157141798]['value']\n stage_number = int(stage.split(' - ')[0])\n breaker = False\n print(stage)\n if stage_number == 1:\n p_api.updateItem(item_id, {\n 157141798:'2 - Realized',\n 163451022:{'start_utc':app['date_realized'].replace('T', ' ').replace('Z', '').replace('+00:00', '')}})\n print(\"EP has been realized, updated\")\n elif stage_number < 0:\n print(\"Inelegible EP was realiZed?\")\n p_api.comment('item', item_id, {'value': \"ALERT: An ep who was marked as break approval or break realization in PODIO has been realizeded in EXPA. This case should bechecked immediately @[%s](user:%s) @[%s](user:%s)\" % (di_responsible['name'],di_responsible['user_id'], ogx_responsible['name'], ogx_responsible['user_id'])})\n elif stage_number > 1: #Higher than an approval already\n p_api.comment('item', item_id, {'value': \"NOTE: An ep who was in a sate higher than realized has been marked as just realized. CHeck this out, @[%s](user:%s)\" % (di_responsible['name'],di_responsible['user_id']) })\n elif len(search['results']) == 0: #Found 0, not ideal but not unexpected, gotta create it and generate a warning\n print(\"Wow check this out\")\n\n attributes = { # This is for creating a new realization at the VD space\n 157131079:app['person']['first_name'],\n 157141792:app['person']['last_name'],\n 157141793:unicode(app['id']),\n 157141794:unicode(app['person']['id']),\n 157141800:{'value':app['person']['email'], 'type': 'work'},\n 157141801:app['person']['home_lc']['name'],\n 157141802:app['opportunity']['title'], #Nombre del proyecto\n 157141795:unicode(app['opportunity']['id']),\n 157141803:app['opportunity']['programmes']['short_name'],\n 157141804:app['opportunity']['office']['name'],\n 157141805:app['opportunity']['office']['country'], #País origen\n 157141796:{'start_utc':app['date_approved'].replace('T', ' ').replace('Z', '').replace('+00:00', '')},\n 163451022:{'start_utc':app['date_realized'].replace('T', ' ').replace('Z', '').replace('+00:00', '')},\n }\n new_item = p_api.create_item({'fields':attributes})\n if breaker:\n print(app)\n break\n else:\n breaker = True\n item_id = new_item['item_id']\n print(\"No EP was found, created in the space\")\n else: #FOund more than one, why, which one is it, help, abort\n print(\"######ERROR#####\")\n print('Error, more than one item found')\n print(\"\")\n break\n modified_apps[app['id']] = item_id\n else: #We already know this EP's PODIO ID\n print (\"Found in previously loaded approvals, just add the comment\")\n continue\n item_id = modified_apps[app['id']]", "title": "" }, { "docid": "c057f764f334ca0615bc67110c78f96d", "score": "0.40496948", "text": "def simplify_listing(self, jobject, subrredit):\n if 'error' in jobject:\n return jobject\n \"\"\"\n Simplifies the JSON Object retrieved from the Reddit API into\n post title, url, score, gilded, and comments\n :param jobject:\n :param limit:\n :return:\n \"\"\"\n listings = {\n \"subreddit\": subrredit,\n \"posts\":[],\n \"posts_size\":0\n }\n\n size = len(jobject[\"data\"][\"children\"])\n for i in range(0, size):\n post_object = jobject[\"data\"][\"children\"][i][\"data\"]\n title = ''\n link = ''\n upvotes = 0\n gilded = 0\n num_comments = 0\n if \"title\" in post_object:\n title = post_object[\"title\"]\n if \"url\" in post_object:\n if \"imgur\" in post_object[\"url\"] and \"i.imgur\" not in post_object[\"url\"]:\n link = self.alter_imgur_links(post_object[\"url\"])\n else:\n link = post_object[\"url\"]\n if \"score\" in post_object:\n upvotes = int(post_object[\"score\"])\n if \"gilded\" in post_object:\n gilded = int(post_object[\"gilded\"])\n if \"num_comments\" in post_object:\n num_comments = int(post_object[\"num_comments\"])\n\n post = {\n \"title\": title,\n \"link\": link,\n \"upvotes\": upvotes,\n \"num_comments\": num_comments,\n \"gilded\": gilded\n\n }\n listings[\"posts\"].append(post)\n listings[\"posts_size\"] += 1\n print(listings)\n return listings", "title": "" }, { "docid": "904fae61f230f0a23d94a3cef3adbfb9", "score": "0.40451306", "text": "def prep_drafts(data):\n\n data['id'] = data['draft_id'].astype(str) + '.' \\\n + data['team_id'].astype(str) + '.' \\\n + data['player_id'].astype(str)\n\n data['pick_time'] = pd.to_datetime(\n data['pick_time'],\n infer_datetime_format=True,\n format=DATETIME_FORMAT,\n errors='coerce'\n )\n\n return data", "title": "" }, { "docid": "65e5269cdf24af3d4f3d2d92f2db7693", "score": "0.40382427", "text": "def _replace_resource_ids_with_models(class_, object_dict):\n for relation in class_._get_relationship_properties():\n relation_class = relation.mapper.class_\n\n # only do replacement is object_dict has this property\n if relation.key in object_dict:\n if relation.uselist:\n model_ids = object_dict.get(relation.key)\n object_dict[relation.key] = [\n relation_class.query.get(id)\n for id in model_ids\n ]\n else:\n id = object_dict.get(relation.key)\n if id is None:\n object_dict[relation.key] = None\n else:\n model = relation_class.query.get(id)\n if model is None:\n raise ValueError(\n \"The relationship {} has no id {}\".format(\n relation.key,\n id))\n object_dict[relation.key] = model\n\n return object_dict", "title": "" }, { "docid": "414b8b9cac87dd009156ba67b0d1e5cd", "score": "0.40370405", "text": "def _fix_tweet_object(self, tweet_dict):\n # self.logger.info('Fixing tweet object')\n if tweet_dict.get('created_at'):\n tweet_dict['created_at'] = self._tweet_time_to_iso(\n tweet_dict['created_at'])\n self.logger.debug('Fixed tweet object: \\n {}'.format(\n json.dumps(tweet_dict, indent=2)))\n return tweet_dict", "title": "" }, { "docid": "a53e40f3b62c2a52064de956664d5791", "score": "0.40337875", "text": "def update_revisions(conn):\n sql = (\"\"\"\n SELECT id\n FROM proposals\n WHERE instance_id = 0;\n \"\"\")\n\n deleted_proposals = conn.execute(sql)\n deleted_ids = [proposal.id for proposal in deleted_proposals]\n\n # add objects to objects without revisions\n if deleted_ids:\n utils.add_to_objects_without_revisions_bulk(conn,\n deleted_ids,\n \"Proposal\",\n action='deleted',\n )", "title": "" }, { "docid": "45a46f98e151849ea205450926ee5f19", "score": "0.4027482", "text": "def save_embedded(self):\t\t\t\n\t\tself.changed_objects = []\n\t\tself.deleted_objects = []\n\t\tself.new_objects = []\n\t\t\n\t\toutput_objects = []\n\t\t\n\t\tif not self.is_valid():\n\t\t\treturn []\n\t\t\t\n\t\tfor form in self.initial_forms:\n\t\t\tembedded_instance = form.save(commit=False)\n\t\t\tif self.can_delete == True and form._raw_value(\"DELETE\"):\n\t\t\t\tself.deleted_objects.append(embedded_instance)\n\t\t\telse:\n\t\t\t\toutput_objects.append(embedded_instance)\n\t\t\t\tif form.has_changed():\n\t\t\t\t\tself.changed_objects.append((embedded_instance, form.changed_data))\n\t\t\t\n\t\tfor form in self.extra_forms:\n\t\t\tif form.has_changed() and not (self.can_delete and form._raw_value(\"DELETE\")): # it has new data and has not been marked for deletion\n\t\t\t\tembedded_instance = form.save(commit=False)\n\t\t\t\tself.new_objects.append(embedded_instance)\n\t\t\t\toutput_objects.append(embedded_instance)\n\t\t\n\t\treturn output_objects", "title": "" }, { "docid": "895d0479c1c3092f4e994d9bc72f9ec6", "score": "0.40233478", "text": "def rest_sanitize_put(poll_list, json):\n\tfor poll in poll_list:\n\t\tpoll.total_votes += 1\n\treturn poll_list", "title": "" }, { "docid": "fa5732b1c0863139985cc519e16cb8aa", "score": "0.4022081", "text": "def save_state(self) -> int:\n objects2attached = {}\n # ToDo find out what this is for and where it is used\n for o in self.objects:\n objects2attached[o] = (o.attachments.copy(), o.cids.copy())\n return p.saveState(self.client_id), objects2attached", "title": "" } ]
990a3fbb10a1f962868fc5cf396a5296
Translate Resource Format from gcloud values to configconnector values.
[ { "docid": "0dbf48ed7e0eef5fb333f99a5106a04f", "score": "0.5140977", "text": "def _NormalizeResourceFormat(resource_format):\n if resource_format == 'terraform':\n return 'hcl'\n return resource_format", "title": "" } ]
[ { "docid": "a5f9c7d808fa23ca55c7477f25ad0d10", "score": "0.5538266", "text": "def cr_to_config(cluster_resource: Dict[str, Any]) -> Dict[str, Any]:\n config = translate(cluster_resource[\"spec\"], dictionary=CONFIG_FIELDS)\n config[\"available_node_types\"] = get_node_types(cluster_resource)\n config[\"cluster_name\"] = cluster_resource[\"metadata\"][\"name\"]\n config[\"provider\"] = PROVIDER_CONFIG\n return config", "title": "" }, { "docid": "31df8bb8029d37a1f71cf3c72d6769f3", "score": "0.55055887", "text": "def cr_to_config(cluster_resource: Dict[str, Any]) -> Dict[str, Any]:\n config = translate(cluster_resource[\"spec\"], dictionary=CONFIG_FIELDS)\n cluster_name = cluster_resource[\"metadata\"][\"name\"]\n config[\"available_node_types\"] = get_node_types(cluster_resource,\n cluster_name)\n config[\"cluster_name\"] = cluster_name\n config[\"provider\"] = get_provider_config(cluster_name)\n return config", "title": "" }, { "docid": "e55f7d850f84f7e9e40e44c4f6bc7272", "score": "0.54871607", "text": "def transform(cls, clients, resource_config):\n\n with open(os.path.expanduser(resource_config), \"r\") as f:\n return f.read()", "title": "" }, { "docid": "8241b8ad450acd572eec0fd3fad21e61", "score": "0.51306844", "text": "def format_resources(self):", "title": "" }, { "docid": "115ee9f0f710a74547471bf6336117d9", "score": "0.5004231", "text": "def _get_auth_cloud(cloud: str, config: IO[Any] = None) -> str:\n if config:\n cfg = configparser.ConfigParser()\n cfg.read_file(config)\n\n custom = {}\n custom[\"ARM\"] = cfg[\"ENDPOINTS\"][\"Resource_Manager\"]\n custom[\"AD\"] = cfg[\"ENDPOINTS\"][\"AD\"]\n custom[\"AAD\"] = cfg[\"ENDPOINTS\"][\"AD_Graph_ResourceId\"]\n custom[\"GRAPH\"] = cfg[\"ENDPOINTS\"][\"MS_Graph\"]\n custom[\"MGMT\"] = cfg[\"ENDPOINTS\"][\"Management\"]\n return custom\n return CLOUD_MAP[cloud]", "title": "" }, { "docid": "784fe8c92747462f147854cb975d3154", "score": "0.4976655", "text": "def _translate_legacy_settings(\n mp_config: Dict[str, Any], translate: Dict[str, str]\n) -> Dict[str, Any]:\n for src, target in translate.items():\n src_value = _get_config(src, mp_config)\n _set_config(target, mp_config, src_value, True)\n _del_config(src, mp_config)\n return mp_config", "title": "" }, { "docid": "292c3c1b25e46841c93d746f43b48240", "score": "0.49288023", "text": "def resource_file_format(value):\n try:\n file_name, http_url = value.split('=', 1)\n except ValueError:\n message = (\"Incorrectly formatted resource reference. \"\n \"Argument values should be in the format filename=httpurl\")\n raise ValueError(message)\n return {'file_path': file_name, 'http_url': http_url}", "title": "" }, { "docid": "5fe2ea0c76880c931a5cc96da594c22a", "score": "0.49273717", "text": "def converter(val):\n if isinstance(val, cl):\n return val\n else:\n return cl[val]", "title": "" }, { "docid": "3d2fe6ea06dca8a223f456286445f86e", "score": "0.48532087", "text": "def to_python(self, value):\n if value is None:\n return None\n if isinstance(value, self.of):\n return value\n if isinstance(value, dict):\n return create_resource_from_dict(value, self.of, full_clean=False)\n msg = self.error_messages[\"invalid\"] % self.of\n raise exceptions.ValidationError(msg)", "title": "" }, { "docid": "0e39050078a795ecfa3e23003b7551ce", "score": "0.48426044", "text": "def translate(self, data={}):\n return {\n \"resource_id\": data.get(\"resourceId\", \"\"),\n \"resource_name\": data.get(\"metadata\", [\"\"])[0], # access key ID\n }", "title": "" }, { "docid": "a465724becc595889805855f974bfbd4", "score": "0.48391703", "text": "def GenerateConfig(context):\n\n resources = [{\n 'name': 'cloud-sql-proxy-external-ip',\n 'type': 'compute.v1.address',\n 'properties': {\n 'region': context.properties['region']\n }\n }]\n return {'resources': resources}", "title": "" }, { "docid": "cba0d921ba028f43741b3ce1a223ef60", "score": "0.4832624", "text": "def transform(cls, clients, resource_config):\n if \"name\" not in resource_config and \"regex\" not in resource_config:\n # NOTE(wtakase): gets resource name from OpenStack id\n glanceclient = clients.glance()\n resource_name = _name_from_id(resource_config=resource_config,\n resources=list(\n glanceclient.images.list()),\n typename=\"image\")\n resource_config[\"name\"] = resource_name\n\n # NOTE(wtakase): gets EC2 resource id from name or regex\n ec2client = clients.ec2()\n resource_ec2_id = _id_from_name(resource_config=resource_config,\n resources=list(\n ec2client.get_all_images()),\n typename=\"ec2_image\")\n return resource_ec2_id", "title": "" }, { "docid": "394a10d5b4c8eb671720ea019850445a", "score": "0.4810268", "text": "def _map_raw_example_value_to_language(self, raw_value):\n language = self.language()\n\n has_language = False\n mapping = None\n if (\"language_type_mappings\" in self.examples) and (language in self.examples[\"language_type_mappings\"]):\n has_language = True\n mapping = self.examples[\"language_type_mappings\"][language]\n\n value = \"\";\n if isinstance(raw_value, basestring):\n # differentiate between enums and strings here\n if raw_value[:2] == \"e:\":\n value = \"\\\"{}\\\"\".format(raw_value.split(\".\")[-1])\n else:\n if has_language:\n value = \"{}{}{}\".format(mapping[\"string_prefix\"], raw_value, mapping[\"string_suffix\"])\n else:\n value = \"\\\"{}\\\"\".format(raw_value)\n elif isinstance(raw_value, bool):\n if has_language:\n value = mapping[\"true_value\"] if raw_value else mapping[\"false_value\"];\n else:\n value = \"true\" if raw_value else \"false\";\n elif isinstance(raw_value, list):\n if has_language:\n value = \"{}{}{}\".format(mapping[\"array_prefix\"], raw_value, mapping[\"array_suffix\"])\n else:\n value = \"[{}]\".format(raw_value)\n else:\n value = \"{}\".format(raw_value)\n\n return value", "title": "" }, { "docid": "b22da42758a1db09118ec13ba9538ac5", "score": "0.47885987", "text": "def GenerateConfig(context):\n\n resources = [{\n 'name': 'cloud-sql-proxy-internal-ip',\n 'type': 'compute.v1.address',\n 'properties': {\n 'region': context.properties['region'],\n 'addressType': 'INTERNAL',\n 'purpose': 'GCE_ENDPOINT',\n 'subnetwork': '$(ref.cloud-sql-proxy-subnetwork.selfLink)'\n }\n }]\n return {'resources': resources}", "title": "" }, { "docid": "8f09401efad05039b6f6efc8d2bb0559", "score": "0.47808254", "text": "def convert(fmt: str, value: str) -> str:\n if fmt == \"time\":\n reformatter = _to_time\n elif fmt == \"date\":\n reformatter = _to_date\n elif fmt == \"string\":\n reformatter = lambda n: n and str(n)\n elif fmt == \"integer\":\n reformatter = lambda n: n and int(n)\n elif fmt == \"boolean\":\n reformatter = _to_bool\n elif fmt == \"number\":\n reformatter = lambda n: n and float(n)\n else:\n # If we don't have a specified reformatter, use the identity function\n reformatter = id\n\n try:\n return reformatter(value)\n except Exception as e:\n raise jsonschema.ValidationError(e)", "title": "" }, { "docid": "9590bc4717be087328acf8c9318a239c", "score": "0.47775352", "text": "def translate_path(self, path):\n global CONFIGURATIONS_PATH\n if type(path) != str:\n return path\n if 'CONFIGURATIONS_PATH' in str(path):\n path = path.replace('CONFIGURATIONS_PATH', CONFIGURATIONS_PATH)\n if 'DATA_PATH' in str(path):\n path = path.replace('DATA_PATH', self.DATA_PATH)\n if 'RESULTS_PATH' in str(path):\n path = path.replace('RESULTS_PATH', self.RESULTS_PATH)\n if 'NUM' in str(path):\n path = path.replace('NUM', self.NUM)\n return path", "title": "" }, { "docid": "43a1631e845e006fb599f6448152089c", "score": "0.47364503", "text": "def config(context, mapping, args):\n fn = context.resource(mapping, b'ui').config\n return _config(context, mapping, args, fn, evalstring)", "title": "" }, { "docid": "c89637d1091dcde7e90930acce3fad89", "score": "0.47036982", "text": "def _config_values_to_strings(cls, config):\n\n options = {option.key: option for option in cls.config_spec}\n dependencies = set(dependency.key for dependency in cls.dependencies)\n\n config_as_strings = {}\n for key in config:\n if key in dependencies:\n continue\n elif key == \"name\" or key == \"seed\":\n val = config[key]\n else:\n val = options[key].string_representation(config[key])\n\n reconverted_typed_value = options[key].type(val)\n current_typed_value = config[key]\n if current_typed_value != reconverted_typed_value:\n raise RuntimeError(\n f\"value changed during type conversion: '{current_typed_value}' became '{reconverted_typed_value}'\"\n )\n\n config_as_strings[key] = val\n\n return config_as_strings", "title": "" }, { "docid": "5cac521caa96797144a64d6fff42259b", "score": "0.46931568", "text": "def GenerateConfig(context):\n name_prefix = context.properties['namePrefix']\n machine_type = ''.join(['https://www.googleapis.com/compute/v1/projects/',\n context.env['project'], '/zones/',\n context.properties['zone'], '/machineTypes/',\n context.properties['machineType']])\n \n\n resources = {\n 'type': 'compute.v1.instance',\n 'name': name_prefix,\n 'properties': {\n 'zone': context.properties['zone'],\n 'machineType': machine_type,\n 'image': context.properties['image'],\n }\n }\n return {'resources': resources}", "title": "" }, { "docid": "4f0eb9b9e343110a97c5e9d485480528", "score": "0.46801832", "text": "def transform_config_name(config_name):\n return config_name", "title": "" }, { "docid": "1bfb975cb8de18c117b1ffa1c8cdbcca", "score": "0.4678424", "text": "def get_values(self):\n res = super(ResConfigSettings, self).get_values()\n res.update(\n currency_update=self.env['ir.config_parameter'].sudo().get_param('currency_update'),\n service_provider=self.env['ir.config_parameter'].sudo().get_param('service_provider')\n )\n return res", "title": "" }, { "docid": "14ffddd75f14bcbb798bdeb29bb3467c", "score": "0.46752363", "text": "def generate_config(context):\n\n properties = context.properties\n project_id = properties.get('project', context.env['project'])\n config_name = context.properties.get('config')\n\n props = {\n 'variable': properties.get('name', properties.get('variable')),\n 'parent': properties['parent'],\n # TODO: uncomment after gcp type is fixed\n # 'project': project_id,\n }\n\n optional_properties = ['text', 'value']\n props.update({\n p: properties[p]\n for p in optional_properties if p in properties\n })\n\n resources = [{\n 'name': context.env['name'],\n # https://cloud.google.com/deployment-manager/runtime-configurator/reference/rest/v1beta1/projects.configs.variables\n 'type': 'gcp-types/runtimeconfig-v1beta1:projects.configs.variables',\n 'properties': props,\n 'metadata': {\n 'dependsOn': [config_name]\n }\n }]\n\n outputs = [{\n 'name': 'updateTime',\n 'value': '$(ref.{}.updateTime)'.format(context.env['name'])\n }]\n\n return {'resources': resources, 'outputs': outputs}", "title": "" }, { "docid": "806cbf8cf827bd25208d02aaf30eb366", "score": "0.46696132", "text": "def generate_config(context):\n\n project_id = context.env['project']\n name = context.properties.get('name', context.env['name'])\n\n if context.properties['destinationType'] == 'pubsub':\n destination = 'pubsub.googleapis.com/projects/{}/topics/{}'.format(\n project_id,\n context.properties['destinationName']\n )\n elif context.properties['destinationType'] == 'storage':\n destination = 'storage.googleapis.com/{}'.format(\n context.properties['destinationName']\n )\n elif context.properties['destinationType'] == 'bigquery':\n destination = 'bigquery.googleapis.com/projects/{}/datasets/{}'.format(\n project_id,\n context.properties['destinationName']\n )\n\n properties = {\n 'name': name,\n 'sink': name,\n 'destination': destination,\n 'uniqueWriterIdentity': context.properties['uniqueWriterIdentity']\n }\n\n sink_filter = context.properties.get('filter')\n if sink_filter:\n properties['filter'] = sink_filter\n\n resources = [\n {\n 'name': context.env['name'],\n 'type': 'logging.v2.sink',\n 'properties': properties\n }\n ]\n\n return {'resources': resources}", "title": "" }, { "docid": "dbfa20e750e28d78cf34e2896b11bb40", "score": "0.46485394", "text": "def _patch_fmt_config(conf, ctx):\n if isinstance(conf, str):\n return conf.format(**ctx)\n if isinstance(conf, dict):\n conf.update({k: _patch_fmt_config(v, ctx) for k, v in conf.items()})\n return conf\n if isinstance(conf, list):\n return [_patch_fmt_config(v, ctx) for v in conf]\n return conf", "title": "" }, { "docid": "e7280b5dc8a07bc7642a4789ffb28c71", "score": "0.46286938", "text": "def generate_config(context):\n\n properties = context.properties\n name = properties.get('name', context.env['name'])\n project = properties.get('project', context.env['project'])\n target = properties['target']\n\n policy = {}\n\n autoscaler = {\n 'type': None, # Will be set up at a later stage.\n 'name': context.env['name'],\n 'properties': {\n 'name': name,\n 'project': project,\n 'autoscalingPolicy': policy,\n 'target': target\n }\n }\n\n policy_props = ['coolDownPeriodSec',\n 'minNumReplicas',\n 'maxNumReplicas',\n 'customMetricUtilizations',\n 'loadBalancingUtilization',\n 'cpuUtilization']\n\n for prop in policy_props:\n set_optional_property(policy, properties, prop)\n\n is_regional = 'region' in properties\n location = properties['region'] if is_regional else properties['zone']\n location_output = set_autoscaler_location(autoscaler, is_regional, location)\n\n set_optional_property(autoscaler['properties'], properties, 'description')\n\n return {\n 'resources': [autoscaler],\n 'outputs': [\n {\n 'name': 'name',\n 'value': name\n },\n {\n 'name': 'selfLink',\n 'value': '$(ref.{}.selfLink)'.format(context.env['name'])\n }\n ] + [location_output]\n }", "title": "" }, { "docid": "28eda5082edf2ef8912608cde15eb812", "score": "0.4623517", "text": "def labels_str_to_rule_format(labels_string: str, gc_api: RESTManagementAPI) -> Dict[str, List[Dict[str, List[str]]]]:\r\n structured_labels = {\"or_labels\": list()}\r\n # Normalize spaces\r\n labels_string = labels_string.replace(\", \", \",\").replace(\r\n \" ,\", \",\").replace(\"& \", \"&\").replace(\" &\", \"&\")\r\n labels_string = labels_string.replace(\": \", \":\").replace(\" :\", \":\").strip()\r\n\r\n for or_label in labels_string.split(','):\r\n and_labels = {\"and_labels\": list()}\r\n for and_label in or_label.split('&'):\r\n key, value = and_label.split(':')\r\n and_labels[\"and_labels\"].append(get_label_id(key, value, gc_api))\r\n structured_labels[\"or_labels\"].append(and_labels)\r\n return structured_labels", "title": "" }, { "docid": "89160b4517e35020652a16620d5df5c4", "score": "0.4573189", "text": "def _get_value_from_conf_file(self, kind, converter=None):\n file_name = self.get_conf_file_name(kind)\n msg = _('Error while reading %s')\n\n try:\n with open(file_name, 'r') as f:\n try:\n return converter and converter(f.read()) or f.read()\n except ValueError, e:\n msg = _('Unable to convert value in %s')\n except IOError, e:\n msg = _('Unable to access %s')\n\n LOG.debug(msg % file_name)\n return None", "title": "" }, { "docid": "501562fcdcb47413890f6a6f94196275", "score": "0.4561057", "text": "def _decode_cfg_value(cls, value):\n # Configs parsed from raw yaml will contain dictionary keys that need to be\n # converted to CfgNode objects\n if isinstance(value, dict):\n return cls(value)\n # All remaining processing is only applied to strings\n if not isinstance(value, str):\n return value\n # Try to interpret `value` as a:\n # string, number, tuple, list, dict, boolean, or None\n try:\n value = literal_eval(value)\n # The following two excepts allow v to pass through when it represents a\n # string.\n #\n # Longer explanation:\n # The type of v is always a string (before calling literal_eval), but\n # sometimes it *represents* a string and other times a data structure, like\n # a list. In the case that v represents a string, what we got back from the\n # yaml parser is 'foo' *without quotes* (so, not '\"foo\"'). literal_eval is\n # ok with '\"foo\"', but will raise a ValueError if given 'foo'. In other\n # cases, like paths (v = 'foo/bar' and not v = '\"foo/bar\"'), literal_eval\n # will raise a SyntaxError.\n except ValueError:\n pass\n except SyntaxError:\n pass\n return value", "title": "" }, { "docid": "617d8ba4630b8b5dccf8094438264703", "score": "0.45576397", "text": "def transform(cls, clients, resource_config):\n\n file_type_dict = {}\n for file_path in resource_config:\n file_path = os.path.expanduser(file_path)\n with open(file_path, \"r\") as f:\n file_type_dict[file_path] = f.read()\n\n return file_type_dict", "title": "" }, { "docid": "ec0fa5c994ff8db6e091c286d4e37482", "score": "0.45281568", "text": "def test_construct_from_properties_with_host_and_scheme(self):\n self.config.set(\"ConnectSDK\", \"connect.api.endpoint.scheme\", \"http\")\n\n communicator_config = CommunicatorConfiguration(self.config)\n\n self.assertEqual(\"http://eu.sandbox.api-ingenico.com\", communicator_config.api_endpoint.geturl())", "title": "" }, { "docid": "1bb8aacfb0168e5cc673e5deeeaaefa4", "score": "0.45243716", "text": "def generate_config(context):\n\n resources = []\n properties = context.properties\n project_id = properties.get('projectId', context.env['project'])\n name = properties.get('config', context.env['name'])\n\n properties['postgres']['image'] = 'gcr.io/{}/{}'.format(project_id, \\\n properties['postgres']['image'])\n properties['worker']['image'] = 'gcr.io/{}/{}'.format(project_id, \\\n properties['worker']['image'])\n properties['webserver']['image'] = 'gcr.io/{}/{}'.format(project_id, \\\n properties['webserver']['image'])\n\n outputs = [\n { 'name': 'region', 'value': properties['region'] },\n { 'name': 'zone', 'value': properties['zone'] },\n { 'name': 'postgres', 'value': properties['postgres'] },\n { 'name': 'worker', 'value': properties['worker'] },\n { 'name': 'webserver', 'value': properties['webserver'] }\n ]\n\n return {'resources': resources, 'outputs': outputs}", "title": "" }, { "docid": "2fe442f64a30a020657d54e841ccce97", "score": "0.4488771", "text": "def generate_config(context):\n\n recordset = {\n 'name': context.env['name'],\n # https://cloud.google.com/dns/docs/reference/v1/resourceRecordSets\n 'type': 'gcp-types/dns-v1:resourceRecordSets',\n 'properties': {\n 'name': context.properties['dnsName'],\n 'managedZone': context.properties['zoneName'],\n 'records': context.properties['resourceRecordSets'],\n }\n }\n\n return {'resources': [recordset]}", "title": "" }, { "docid": "8f01b5d7f3790b935a9a25c37f47172b", "score": "0.4486447", "text": "def GetFormatLinksForTemplate(api_query, hostname):\r\n query_id = api_query.key()\r\n format_links = {}\r\n format_links_list = {}\r\n\r\n for transform, config in co.SUPPORTED_FORMATS.items():\r\n format_links_list.update({\r\n config.get('label'): '%s%s?id=%s&format=%s' % (\r\n hostname, co.LINKS['public_query'], query_id, transform)\r\n })\r\n\r\n format_links['format_links'] = format_links_list\r\n\r\n return format_links", "title": "" }, { "docid": "243ee8c638f5dfc531e2343a4e355657", "score": "0.44853792", "text": "def test_getString_with_python_formating(self):\n config_file = StringIO(\n u'[section]\\n'\n u'value: do %(some)s value\\n'\n u'',\n )\n config = FileConfigurationProxy(\n configuration_file=config_file)\n config.load()\n self.assertEqual(\n u'do %(some)s value',\n config.getString(u'section', u'value'))", "title": "" }, { "docid": "de2b534b4bd4a224401fa61558228701", "score": "0.44709867", "text": "def get_resource_config (self, resource_key, schema=None):\n\n if resource_key in self._resource_aliases :\n logger.warning (\"using alias '%s' for deprecated resource key '%s'\" \\\n % (self._resource_aliases[resource_key], resource_key))\n resource_key = self._resource_aliases[resource_key]\n\n if resource_key not in self._resource_configs:\n error_msg = \"Resource key '%s' is not known.\" % resource_key\n raise PilotException(error_msg)\n\n resource_cfg = copy.deepcopy (self._resource_configs[resource_key])\n\n if not schema :\n if 'schemas' in resource_cfg :\n schema = resource_cfg['schemas'][0]\n\n if schema:\n if schema not in resource_cfg :\n raise RuntimeError (\"schema %s unknown for resource %s\" \\\n % (schema, resource_key))\n\n for key in resource_cfg[schema] :\n # merge schema specific resource keys into the\n # resource config\n resource_cfg[key] = resource_cfg[schema][key]\n\n return resource_cfg", "title": "" }, { "docid": "af83a978af086ce6b2c6623c32cbe85f", "score": "0.446761", "text": "def format_create_config(request_data: dict):\n formatted_data = {\n \"id\": str(uuid.uuid4()),\n \"organization\": request_data.get('organization'),\n \"repo\": request_data.get('repository'),\n \"pipeline_steps\": request_data.get('config'),\n \"status\": Status.pending_status.value,\n \"created_by\": request_data.get('user'),\n \"created_date_time\": datetime.now().strftime(\n StatConf.date_format.value\n ),\n \"updated_by\": request_data.get('user'),\n \"updated_date_time\": datetime.now().strftime(\n StatConf.date_format.value\n ),\n \"verified\": Status.verified_status_no.value,\n \"outdated\": Status.outdated_status_no.value\n }\n return formatted_data", "title": "" }, { "docid": "d9387cc8050452b49f9cda5a2bfd76a3", "score": "0.4442421", "text": "def generate_config(context):\n\n properties = context.properties\n project_name = properties.get('name', context.env['name'])\n project_id = properties.get('projectId', project_name)\n\n # Ensure that the parent ID is a string.\n properties['parent']['id'] = str(properties['parent']['id'])\n\n resources = [\n {\n 'name': '{}-project'.format(context.env['name']),\n # https://cloud.google.com/resource-manager/reference/rest/v1/projects/create\n 'type': 'gcp-types/cloudresourcemanager-v1:projects',\n 'properties':\n {\n 'name': project_name,\n 'projectId': project_id,\n 'parent': properties['parent'],\n 'labels' : properties.get('labels', {})\n }\n },\n {\n 'name': '{}-billing'.format(context.env['name']),\n # https://cloud.google.com/billing/reference/rest/v1/projects/updateBillingInfo\n 'type': 'deploymentmanager.v2.virtual.projectBillingInfo',\n 'properties':\n {\n 'name':\n 'projects/$(ref.{}-project.projectId)'.format(context.env['name']),\n 'billingAccountName':\n 'billingAccounts/' +\n properties['billingAccountId']\n }\n }\n ]\n\n api_resources, api_names_list = activate_apis(context)\n resources.extend(api_resources)\n resources.extend(create_service_accounts(context, project_id))\n\n resources.extend(create_shared_vpc(context))\n\n return {\n 'resources':\n resources,\n 'outputs':\n [\n {\n 'name': 'projectId',\n 'value': '$(ref.{}-project.projectId)'.format(context.env['name'])\n },\n {\n 'name': 'projectNumber',\n 'value': '$(ref.{}-project.projectNumber)'.format(context.env['name'])\n },\n {\n 'name': 'serviceAccountDisplayName',\n 'value':\n '$(ref.{}-project.projectNumber)@cloudservices.gserviceaccount.com'.format(context.env['name']) # pylint: disable=line-too-long\n },\n {## This is a workaround to avoid the need of string concatenation in case of referenving to this output.\n 'name': 'containerSA',\n 'value': 'serviceAccount:service-$(ref.{}-project.projectNumber)@container-engine-robot.iam.gserviceaccount.com'.format(context.env['name'])\n },\n {\n 'name': 'containerSADisplayName',\n 'value': 'service-$(ref.{}-project.projectNumber)@container-engine-robot.iam.gserviceaccount.com'.format(context.env['name'])\n },\n {\n 'name':\n 'resources',\n 'value':\n [resource['name'] for resource in resources]\n }\n ]\n }", "title": "" }, { "docid": "1f8f9304288f2307ce8f262873b7dcb3", "score": "0.44404075", "text": "def get_resource_id(self, value):\n return super().to_representation(value)", "title": "" }, { "docid": "98458b6cc743c7054c5050be61cc1475", "score": "0.44366366", "text": "def _generate_lookup_configs(self):\n for key, values in self.data.lists():\n for value in values:\n yield LookupConfig(key, six.moves.reduce(\n lambda a, b: {b: a},\n (key.replace('!', '').split(LOOKUP_SEP) + [value])[::-1]\n ))", "title": "" }, { "docid": "071c66b5e000aaa3d130e76f2ee92cd0", "score": "0.4433352", "text": "def ConvertConfig(self, request, global_params=None):\n config = self.GetMethodConfig('ConvertConfig')\n return self._RunMethod(\n config, request, global_params=global_params)", "title": "" }, { "docid": "071c66b5e000aaa3d130e76f2ee92cd0", "score": "0.4433352", "text": "def ConvertConfig(self, request, global_params=None):\n config = self.GetMethodConfig('ConvertConfig')\n return self._RunMethod(\n config, request, global_params=global_params)", "title": "" }, { "docid": "400b3979540929b98a288f685fc47934", "score": "0.44332087", "text": "def interpolate(self, alternative):\n # ConfigObj with ConfigSpec does not seem to work as intended\n # so have to implement the idea with for loops (messy)\n output = {}\n for key in self.Config.keys():\n output[key] = {}\n if key.startswith('DW_'):\n dw = key.split('DW_')[1]\n for name_key, name_config in self.Config[key].items():\n if isinstance(name_config, dict):\n output[key][name_key] = {}\n output[key][name_key]['name'] = name_key\n spec = ConfigSpecCreator.construct_dw_spec(self, dw, {name_key: name_config})\n for param_key, param in self.Config[key][name_key].items():\n if isinstance(param, dict):\n output[key][name_key][param_key] = {}\n for sub_key, sub in param.items():\n output[key][name_key][param_key][sub_key] = \\\n self.validator.check(spec[name_key][param_key][sub_key], sub)\n else:\n output[key][name_key][param_key] = self.validator.check(spec[name_key][param_key], param)\n else:\n output[key][name_key] = self.validator.check(self.meta_dw.get_any_check(dw, name_key), name_config)\n else:\n output[key] = self.Config[key]\n if alternative:\n alt_output = {k: v for k, v in output.items() if not k.startswith('DW_')}\n alt_output.update({k.removeprefix('DW_'): v for k, v in output.items() if k.startswith('DW_')})\n output = alt_output\n return output", "title": "" }, { "docid": "43a2bc7ae709f9b2944cff2f7cbca48a", "score": "0.44331792", "text": "def opt_resource(self, res):\n url_part, _sep, import_name = res.partition('=')\n self['resources'][url_part] = import_name", "title": "" }, { "docid": "1524c3a3cf3956ad2e0f4e1ff9e3478a", "score": "0.44321972", "text": "def _parse_plugin_configuration(cls, v, values):\n if \"type\" in values:\n return VerificationType(values[\"type\"]).metadata.schema(**v)\n else:\n return v", "title": "" }, { "docid": "cd8aa0156b46f7ad5164e77c32189ec8", "score": "0.4429879", "text": "def adapt_value(cls, prop: PropertiesLike, value: Any) -> Any:\n\n prop = Properties.get(prop)\n return cls.general_map[prop].str_to_value(value)", "title": "" }, { "docid": "7bef26619fb76087ff5bd9c7361d11fe", "score": "0.44231808", "text": "def to_config(self):\n return {\n keys.DatasourceKeys.NAME: self.name,\n keys.DatasourceKeys.SOURCE: self._source,\n keys.DatasourceKeys.ID: self._id\n }", "title": "" }, { "docid": "a3922719f423d891095b7f5c9f1e82c9", "score": "0.44216752", "text": "def values(config, key):", "title": "" }, { "docid": "7f3adb650ec76935d1b682ab3a7d789c", "score": "0.44163606", "text": "def test_construct_from_properties_with_host_scheme_port(self):\n self.config.set(\"ConnectSDK\", \"connect.api.endpoint.scheme\", \"http\")\n self.config.set(\"ConnectSDK\", \"connect.api.endpoint.port\", \"8080\")\n\n communicator_config = CommunicatorConfiguration(self.config)\n\n self.assertEqual(\"http://eu.sandbox.api-ingenico.com:8080\", communicator_config.api_endpoint.geturl())", "title": "" }, { "docid": "0b967cea3b86a1de3482a01cacfb0573", "score": "0.44152966", "text": "def decode_config(dct):\r\n if Config.json_type_key in dct:\r\n configs = Config(\r\n dct['workspace'], \r\n dct['collars_table'], \r\n dct['date_field'], \r\n dct['globalid_field'], \r\n dct['related_guid_field'], \r\n dct['output_table'])\r\n return configs\r\n else:\r\n return dct", "title": "" }, { "docid": "4c84d2cf1642f60d1f84f3e93cf31686", "score": "0.44082645", "text": "def _make_resource(self, resdict):\n # name' should have been defined by the caller, so if it's\n # missing it's an internal coherency error\n assert 'name' in resdict, (\n \"Invalid resource definition '%s': missing required key 'name'.\"\n % resdict)\n\n # default values\n resdict.setdefault('enabled', True)\n if not resdict['enabled']:\n gc3libs.log.info(\n \"Dropping computational resource '%s'\"\n \" because of 'enabled=False' setting\"\n \" in configuration file.\",\n resdict['name'])\n return None\n\n # minimal sanity check\n for key in self._resource_required_keys:\n if key not in resdict:\n raise gc3libs.exceptions.ConfigurationError(\n \"Missing required parameter '{key}'\"\n \" in definition of resource '{name}'.\"\n .format(key=key, name=resdict['name']))\n for key in self._resource_keys_value_required:\n if key in resdict and not resdict[key]:\n raise gc3libs.exceptions.ConfigurationError(\n \"Found empty configuration key `{key}`\"\n \" in definition of resource '{name}'.\"\n .format(key=key, name=resdict['name']))\n\n if resdict['type'] in self._removed_resource_types:\n resdict['enabled'] = False\n gc3libs.log.warning(\n \"Dropping computational resource '{name}':\"\n \" resource type '{type}' is no longer supported.\"\n \" Please update your configuration file.\"\n .format(**resdict))\n return None\n\n if __debug__:\n gc3libs.log.debug(\n \"Creating resource '%s' defined by: %s.\",\n resdict['name'], str.join(', ', [\n (\"%s=%r\" % (k, v)) for k, v in sorted(resdict.iteritems())\n ]))\n\n for auth_param in 'auth', 'vm_auth':\n if auth_param in resdict:\n resdict[auth_param] = self.make_auth(resdict[auth_param])\n\n try:\n # valid strings can be, e.g., `shellcmd+ssh` or `sge`\n resource_type = resdict['type'].split('+')[0]\n cls = self._get_resource_constructor(resource_type)\n # check that required parameters are given, and try to\n # give a sensible error message if not; if we do not\n # do this, users see a traceback like this::\n #\n # gc3.gc3libs: ERROR: Could not create resource \\\n # 'schroedinger-via-ssh': __init__() takes at least 10 \\\n # non-keyword arguments (9 given). Configuration file \\\n # problem?\n #\n # which gives no clue about what to correct!\n args, varargs, keywords, defaults = inspect.getargspec(\n cls.__init__)\n if defaults is not None:\n # `defaults` is a list of default values for the last N args\n defaulted = dict((argname, value)\n for argname, value in zip(reversed(args),\n reversed(defaults)))\n else:\n # no default values at all\n defaulted = {}\n for argname in args[1:]: # skip `self`\n if argname not in resdict and argname not in defaulted:\n raise gc3libs.exceptions.ConfigurationError(\n \"Missing required configuration parameter '%s'\"\n \" for resource '%s'\" % (argname, resdict['name']))\n\n # finally, try to construct backend class...\n return cls(**dict(resdict))\n\n except Exception as err:\n gc3libs.log.error(\n \"Could not create resource '%s': %s. Configuration file\"\n \" problem?\" % (resdict['name'], str(err)))\n raise", "title": "" }, { "docid": "48ada46bb7a9664704f8ea93db4ba53b", "score": "0.44045466", "text": "def show_cfg(resource_url, escape=...):\n ...", "title": "" }, { "docid": "551db155ae146036b745b86548a68ec2", "score": "0.43882695", "text": "def build_convertor(cfg):\n return build_from_cfg(cfg, CONVERTORS)", "title": "" }, { "docid": "daa76f69532dd141363f6cc0d3f11375", "score": "0.43778312", "text": "def get_config():", "title": "" }, { "docid": "fa87703fc31f0889204812e61966d999", "score": "0.43665248", "text": "def set_values(self):\n super(ResConfigSettings, self).set_values()\n self.env['ir.config_parameter'].sudo().set_param('service_provider', self.service_provider)\n self.env['ir.config_parameter'].sudo().set_param('currency_update', self.currency_update)", "title": "" }, { "docid": "bae5b9ec6cb21f163b8f908eb7c8a0d5", "score": "0.43538365", "text": "def transform(cls, clients, resource_config):\n resource_id = resource_config.get(\"id\")\n if not resource_id:\n glanceclient = clients.glance()\n resource_id = _id_from_name(resource_config=resource_config,\n resources=list(\n glanceclient.images.list()),\n typename=\"image\")\n return resource_id", "title": "" }, { "docid": "23474e7c1e9f0ae2c0f0cfa298464279", "score": "0.4351763", "text": "def _reformat_enum(rest_param):\n\n type = rest_param['type']\n options = rest_param['options']\n size = rest_param['size']\n\n return {'type':type,'options':options,'size':size}", "title": "" }, { "docid": "e69fb72c3687bf35e0090b3dd07d0673", "score": "0.43480092", "text": "def _translate_valid_const(self,const:dict):\n \n logger.debug(f\"checking constraint {const}\")\n if const[\"const_name\"] not in self.valid_const:\n logger.warning(f\"ignoring invalid constraint {const} \")\n return None\n valid_arg = self.valid_const[const[\"const_name\"]]\n if 'blocks' in const:\n const['blocks'] = const[\"blocks\"].replace(']','').replace('[','').split(',')\n self._check_type(const['blocks'],valid_arg['blocks'])\n if 'nets' in const:\n const['nets'] = const[\"nets\"].replace(']','').replace('[','').split(',')\n self._check_type(const['nets'],valid_arg['nets'])\n if 'pins1' in const:\n const['pins1'] = const[\"pins1\"].replace(']','').replace('[','').split(',')\n self._check_type(const['pins1'],valid_arg['pins2'])\n if 'pins2' in const:\n const['pins2'] = const[\"pins2\"].replace(']','').replace('[','').split(',')\n self._check_type(const['pins2'],valid_arg['pins2'])\n if 'ports' in const:\n const['ports'] = const[\"ports\"].replace(']','').replace('[','').split(',')\n self._check_type(const['ports'],valid_arg['ports'])\n if 'pairs' in const:\n groups=[]\n for blocks in const[\"pairs\"].split('],'):\n groups.append(blocks.replace(']','').replace('[','').split(','))\n const['pairs'] = groups\n self._check_type(const['pairs'],valid_arg['pairs'])\n if 'name' in const:\n self._check_type(const['name'],valid_arg['name'])\n if 'net1' in const:\n self._check_type(const['net1'],valid_arg['net1'])\n if 'net2' in const:\n self._check_type(const['net2'],valid_arg['net2'])\n if 'style' in const:\n self._check_type(const['style'],valid_arg['style'])\n if 'abs_distance' in const:\n const['abs_distance']=int(const['abs_distance'])\n if 'criticality' in const:\n const['abs_distance'] = int(const['criticality'])\n if 'multiplier' in const:\n const['multiplier'] = int(const['multiplier'])\n if 'weight' in const:\n const['weight'] = int(const['weight'])\n if 'direction' in const:\n self._check_type(const['direction'],valid_arg['direction'])\n if 'location' in const:\n self._check_type(const['location'],valid_arg['location'])\n if 'unit_cap' in const:\n self._check_type(const['unit_cap'],valid_arg['unit_cap'])\n if 'shield' in const:\n self._check_type(const['shield'],valid_arg['shield']) \n if 'num_units' in const:\n const['num_units'] = [int(x) for x in const[\"num_units\"].replace(']','').replace('[','').split(',')]\n self._check_type(const['num_units'],valid_arg['num_units']) \n if 'dummy' in const:\n const['dummy'] = (const['dummy']==True)\n return const", "title": "" }, { "docid": "31d526c65b55dd1f34793db4aac0fcd0", "score": "0.43455446", "text": "def _prep_sge_resource(resource):\n resource = resource.strip()\n k, v = resource.split(\"=\")\n if k in set([\"ar\"]):\n return \"#$ -%s %s\" % (k, v)\n else:\n return \"#$ -l %s\" % resource", "title": "" }, { "docid": "369ebdb8eaeccd5b0011c13250c14890", "score": "0.43443277", "text": "def _create_config(root_resource, cluster_name, type, tag , properties):\n cpath = paths.CLUSTERS_CONFIG_PATH % cluster_name\n data = {\"type\":type, \"tag\":tag, \"properties\":properties}\n resp = root_resource.put(path=cpath , payload=data)\n return utils.ModelUtils.create_model(status.StatusModel, resp, root_resource, \"NO_KEY\")", "title": "" }, { "docid": "c2fc1398311bdfcb741658130f2b6c17", "score": "0.43384135", "text": "def _GetOptionDecoderConstructions(cls):\n result = super()._GetOptionDecoderConstructions()\n result.update({\n 'cloud': (option_decoders.EnumDecoder, {\n # Uncomment when there are other cloud implementations\n # 'valid_values': provider_info.VALID_CLOUDS\n 'valid_values': ['GCP']\n }),\n })\n return result", "title": "" }, { "docid": "b7a918156146ee51e7ef8ccc804de5b7", "score": "0.43323964", "text": "def test_protocol_constants_should_return_values_from_settings(self):\n\n protocol_constants_settings = {\n 'CONCENT_MESSAGING_TIME': settings.CONCENT_MESSAGING_TIME,\n 'FORCE_ACCEPTANCE_TIME': settings.FORCE_ACCEPTANCE_TIME,\n 'MINIMUM_UPLOAD_RATE': settings.MINIMUM_UPLOAD_RATE,\n 'DOWNLOAD_LEADIN_TIME': settings.DOWNLOAD_LEADIN_TIME,\n 'PAYMENT_DUE_TIME': settings.PAYMENT_DUE_TIME,\n 'ADDITIONAL_VERIFICATION_CALL_TIME': settings.ADDITIONAL_VERIFICATION_CALL_TIME,\n }\n\n expected_protocol_constants = {name.lower(): value for name, value in protocol_constants_settings.items()}\n\n response = self.client.get(\n reverse('core:protocol_constants'),\n )\n\n self.assertEqual(response.json(), expected_protocol_constants)", "title": "" }, { "docid": "f6f2deeb6f635b4c9c8efd9c22d0d241", "score": "0.4321222", "text": "def manage_runtime_params(param1):\n if get_arcpy_runtime() == \"python\":\n returnparam1 = r\"Database Connections\\GISCloudPROD_pods_os.sde\"\n elif get_arcpy_runtime() == \"arcmap\":\n returnparam1 = param1.valueAsText\n return returnparam1", "title": "" }, { "docid": "3fecbc3a6bdbb878ec4d435688adabf4", "score": "0.4301711", "text": "def relation_configs_from_yaml():\n # mapping {relation => config}\n relation_definitions = declarative.rdflib_load_relcfg(Fixtures.us_geo_rel_cfg.yaml)\n\n config_attr_casts = Fixtures.us_geo_rel_cfg.expects.attr_casts\n\n parsed_next_to_cfg = relation_definitions[Fixtures.us_geo_rel_cfg.relations.next_to]\n for config_attr in config_attr_casts:\n cast = config_attr_casts[config_attr]\n assert (\n cast(parsed_next_to_cfg[config_attr]) ==\n cast(Fixtures.us_geo_rel_cfg.expects.next_to[config_attr])\n )\n\n parsed_far_from_cfg = relation_definitions[Fixtures.us_geo_rel_cfg.relations.far_from]\n for config_attr in config_attr_casts:\n cast = config_attr_casts[config_attr]\n assert (\n cast(parsed_far_from_cfg[config_attr]) ==\n cast(Fixtures.us_geo_rel_cfg.expects.far_from[config_attr])\n )", "title": "" }, { "docid": "1c1a65c71dbf827d99a83235eef4612f", "score": "0.42905265", "text": "def model_to_rest_resource(self, model, verbose=False):\n return Resource(model, ASSOCIATION_RULES_FIELDS).to_dict(verbose)", "title": "" }, { "docid": "1c1a65c71dbf827d99a83235eef4612f", "score": "0.42905265", "text": "def model_to_rest_resource(self, model, verbose=False):\n return Resource(model, ASSOCIATION_RULES_FIELDS).to_dict(verbose)", "title": "" }, { "docid": "d7dee8e8bb65b69968ecdbe8c0f50163", "score": "0.42882705", "text": "def resource_to_parameters(self, resource):\n parameters = {}\n for key, value in iteritems(resource):\n if key in ('apiVersion', 'kind', 'status'):\n continue\n elif key == 'metadata' and isinstance(value, dict):\n for meta_key, meta_value in iteritems(value):\n if meta_key in ('name', 'namespace', 'labels', 'annotations'):\n parameters[meta_key] = meta_value\n elif key in self.helper.argspec and value is not None:\n parameters[key] = value\n elif isinstance(value, dict):\n self._add_parameter(value, [to_snake(key)], parameters)\n return parameters", "title": "" }, { "docid": "7fa7a27a07bd3ac811b97e38993b6e41", "score": "0.42872646", "text": "def scan_resource_conf(self, conf):\n\n if 'Properties' in conf.keys():\n if 'Protocol' in conf['Properties'].keys():\n if conf['Properties']['Protocol'] == 'HTTPS' or conf['Properties']['Protocol'] == 'TLS':\n return CheckResult.PASSED\n else:\n if 'DefaultActions' in conf['Properties'].keys():\n if conf['Properties']['DefaultActions'][0]['Type'] == 'redirect':\n if conf['Properties']['DefaultActions'][0]['RedirectConfig']['Protocol'] == \"HTTPS\":\n return CheckResult.PASSED\n return CheckResult.FAILED", "title": "" }, { "docid": "11349cfa73c975034b900b102120277a", "score": "0.42822754", "text": "def transform(cls, clients, resource_config):\n resource_id = resource_config.get(\"id\")\n if resource_id:\n return resource_id\n else:\n neutronclient = clients.neutron()\n for net in neutronclient.list_networks()[\"networks\"]:\n if net[\"name\"] == resource_config.get(\"name\"):\n return net[\"id\"]\n\n raise exceptions.InvalidScenarioArgument(\n \"Neutron network with name '{name}' not found\".format(\n name=resource_config.get(\"name\")))", "title": "" }, { "docid": "32eb804b7d44adebd17b63cfeaea4c95", "score": "0.42822498", "text": "def get_config(self):", "title": "" }, { "docid": "2e3dab53513e483ea28c54349e57b455", "score": "0.42782378", "text": "def apply_config(namespace, list_resource):\n oc(\"apply\", \"-f\", \"-\", \"-n\", namespace, _in=json.dumps(list_resource))", "title": "" }, { "docid": "0a816e6fe2b74fbdcc08b4526efe4d70", "score": "0.42778665", "text": "def encode_read_config():\n return encode_command(CommandType.ReadConfig)", "title": "" }, { "docid": "8ae2ecd77af32a991473077dd071519e", "score": "0.42705652", "text": "def create_config(rest_base, suffix, cs_puid, member_id, vlans):\n member_ids = []\n for member in vlans:\n member_ids.append({'@odata.id': member['@odata.id']})\n\n return {\n '@odata.context': '{0}$metadata#{1}/Links/Members/{2}/Links/SimpleNetwork/Links/Members/{3}/Links/VLANs/$entity'.format(rest_base, suffix, cs_puid, member_id),\n '@odata.id': '{0}{1}/{2}/SimpleNetwork/{3}/VLANs'.format(rest_base, suffix, cs_puid, member_id),\n '@odata.type': '#VLanNetworkInterface.1.00.0.VLANNetworkInterfaceCollection',\n 'Name': 'VLAN Network Interface Collection',\n 'Links': {\n 'Members@odata.count': len(member_ids),\n 'Members': member_ids\n }\n }", "title": "" }, { "docid": "55cf06d9fc0715a1e37104abd8991410", "score": "0.42652622", "text": "def config_section_data():\n\n config_data = u\"\"\"[fn_google_cloud_scc]\n# base url to the google cloud console\ngoogle_cloud_base_url=https://console.cloud.google.com\n# path to google application credentials JSON file\ngoogle_application_credentials_path=\n# organization id of your google cloud organization (found in the cloud console UI)\ngoogle_cloud_organization_id=\n\n# boolean to send SOAR ID as a Security Mark when case is sent to SOAR\n# change to false or remove to turn off\nadd_soar_id_as_security_mark=True\n\n# optional findings filter -- used when poller is active and is default if no filter is provided on manual actions\n# Example: findings_filter=category=MFA_NOT_ENFORCED AND state=ACTIVE\n# findings_filter=\n\n# Optional override value for templates used for creating/updating/closing SOAR cases\n#soar_create_case_template=\n#soar_update_case_template=\n#soar_close_case_template=\n\n# Number of seconds between poller cycles. A value of 0 disables the poller\npolling_interval=10\n# Number of minutes to lookback for queries the first time the poller runs.\npolling_lookback=120\n\"\"\"\n return config_data", "title": "" }, { "docid": "9f0d15e33cfbaf76f03215d7f571a169", "score": "0.42634994", "text": "def cloud_config(self) -> \"ConfigMapFileReference\":\n return self.__cloud_config", "title": "" }, { "docid": "bae1b032532613b8a738718f46d3e483", "score": "0.4262912", "text": "def test_config_match_spec(self, actual_connector_spec: ConnectorSpecification, connector_config: SecretDict):\n # Getting rid of technical variables that start with an underscore\n config = {key: value for key, value in connector_config.data.items() if not key.startswith(\"_\")}\n\n try:\n jsonschema.validate(instance=config, schema=actual_connector_spec.connectionSpecification)\n except jsonschema.exceptions.ValidationError as err:\n pytest.fail(f\"Config invalid: {err}\")\n except jsonschema.exceptions.SchemaError as err:\n pytest.fail(f\"Spec is invalid: {err}\")", "title": "" }, { "docid": "d5dffd165dc76f40b97777e354f30273", "score": "0.42628083", "text": "def replaceConstants(value, constants):\n formatted_array = []\n split_values = value.split()\n for val in split_values:\n captured_patterns = re.findall('\\$\\{\\w*\\}', val)\n if not captured_patterns:\n formatted_array.append('\\\"{}\\\"'.format(val))\n elif captured_patterns:\n val_format = re.sub(r'\\$\\{(\\w*)\\}', r'%(\\1)s', val)\n val_format = '\\\"{}\\\" % CONSTANTS'.format(val_format)\n formatted_array.append(val_format)\n\n return \", \".join(formatted_array)", "title": "" }, { "docid": "e46dbcfe588bb55ee2dec23c8362a399", "score": "0.42597544", "text": "def get_config(self):\n config = {\"name\": self._name}\n if self.clipnorm is not None:\n config[\"clipnorm\"] = self.clipnorm\n if self.clipvalue is not None:\n config[\"clipvalue\"] = self.clipvalue\n if self.global_clipnorm is not None:\n config[\"global_clipnorm\"] = self.global_clipnorm\n return config", "title": "" }, { "docid": "f9c998c3104f09642a73197dfe331205", "score": "0.4256806", "text": "def _GenConfig(self, cfg):\n # Some setting names may have a + or - suffix. These indicate that the\n # settings modify the default values.\n merged = self.default.copy()\n for setting, vals in cfg.items():\n option, operator = (setting.split(None, 1) + [None])[:2]\n vals = set(vals)\n default = set(self.default.get(option, []))\n # If there is an operator, updated values accordingly.\n if operator == \"+\":\n vals = default.union(vals)\n elif operator == \"-\":\n vals = default.difference(vals)\n merged[option] = list(vals)\n return rdf_protodict.AttributedDict(**merged)", "title": "" }, { "docid": "fc118a11a81017b5debb7714c5568630", "score": "0.4243485", "text": "def get_config(self, handle, as_json=True, timestamp_format=APITimestampFormat.NANOSECOND):\n return self._xjtrans(\"/clips/%s/config\" % handle, \"GET\", None, as_json, timestamp_format)", "title": "" }, { "docid": "dda9d3774960f872207a5fb73c155a4d", "score": "0.4241009", "text": "def add_format(self, keys, values):\n \n self.format = dict(zip(keys.split(\":\"), values.split(\":\")))", "title": "" }, { "docid": "25b7b948daf104b651802d1378368416", "score": "0.42369938", "text": "def _transform(self, resource_from_api):\n for org in resource_from_api:\n # org_name is the unique identifier for the org, formatted as\n # \"organizations/<organization_id>\".\n org_name = org.get('name')\n org_id = org_name[len('%s/' % self.RESOURCE_NAME):]\n\n yield {'org_id': org_id,\n 'name': org_name,\n 'display_name': org.get('displayName'),\n 'lifecycle_state': org.get('lifecycleState'),\n 'raw_org': parser.json_stringify(org),\n 'creation_time': parser.format_timestamp(\n org.get('creationTime'),\n self.MYSQL_DATETIME_FORMAT)}", "title": "" }, { "docid": "6625a1d0b16db7d9ef11b76932af2b14", "score": "0.42342365", "text": "def converter(val):\n if isinstance(val, cl):\n return val\n else:\n return cl(**val)", "title": "" }, { "docid": "01457ef2c3bfb1c8f6d85a442f8ae931", "score": "0.42337826", "text": "def override_resource_config(\n self, instance_configs: Union[List[InstanceConfig], Dict[str, List[InstanceConfig]]]\n ):\n if isinstance(instance_configs, dict):\n self._validate_dict_argument(\n name=\"instance_configs\",\n value=instance_configs,\n allowed_keys=list(self.estimator_dict.keys()),\n )\n self.instance_configs_dict = instance_configs\n else:\n self.instance_configs = instance_configs\n if self.estimator_dict is not None and self.estimator_dict.keys():\n estimator_names = list(self.estimator_dict.keys())\n self.instance_configs_dict = {estimator_names[0]: instance_configs}", "title": "" }, { "docid": "17875a07020fc0f3f7baf4c8cf7d3a74", "score": "0.4232142", "text": "def _convert_syncbn(self, cfg: ConfigType):\n if isinstance(cfg, dict):\n for item in cfg:\n if item == 'norm_cfg':\n cfg[item]['type'] = cfg[item]['type']. \\\n replace('naiveSyncBN', 'BN')\n else:\n self._convert_syncbn(cfg[item])", "title": "" }, { "docid": "33dc98ba8981a21dfb3b805324cb3a22", "score": "0.42285395", "text": "def _convert_to_config(self, inventory_list, include_unused_adapters):\n cpc_uri = self.get_property('object-uri')\n cpc_uris = [cpc_uri]\n\n config_dict = OrderedDict()\n\n config_dict['se-version'] = self.prop('se-version')\n config_dict['available-features-list'] = self.prop(\n 'available-features-list', [])\n config_dict['cpc-properties'] = {\n 'auto-start-list': self.prop('auto-start-list'),\n 'description': self.prop('description'),\n 'management-world-wide-port-name':\n self.prop('management-world-wide-port-name'),\n }\n config_dict['capacity-groups'] = [\n dict(group.properties) for group in\n self.capacity_groups.list(full_properties=True)]\n\n partitions = extractByParent(\n RC_PARTITION, cpc_uris, inventory_list)\n # This item is required by the \"Import DPM Configuration\" operation\n config_dict['partitions'] = partitions\n partition_uris = [x['object-uri'] for x in partitions]\n\n adapters = extractAdapters(cpc_uri, inventory_list)\n if adapters:\n config_dict['adapters'] = adapters\n adapter_uris = [x['object-uri'] for x in adapters]\n\n nics = extractByParent(\n RC_NIC, partition_uris, inventory_list)\n if nics:\n config_dict['nics'] = nics\n\n hbas = extractByParent(\n RC_HBA, partition_uris, inventory_list)\n if hbas:\n config_dict['hbas'] = hbas\n\n virtual_functions = extractByParent(\n RC_VIRTUAL_FUNCTION, partition_uris, inventory_list)\n if virtual_functions:\n config_dict['virtual-functions'] = virtual_functions\n\n virtual_switches = extractByParent(\n RC_VIRTUAL_SWITCH, cpc_uris, inventory_list)\n if virtual_switches:\n config_dict['virtual-switches'] = virtual_switches\n\n storage_sites = extractByValueInListProperty(\n RC_STORAGE_SITE, cpc_uri, 'cpc-uris', inventory_list)\n if storage_sites:\n config_dict['storage-sites'] = storage_sites\n storage_site_uris = [x['object-uri'] for x in storage_sites]\n\n storage_subsystems = extractByPropertyInListValue(\n RC_STORAGE_SUBSYSTEM, 'storage-site-uri', storage_site_uris,\n inventory_list)\n if storage_subsystems:\n config_dict['storage-subsystems'] = storage_subsystems\n storage_subsystem_uris = [x['object-uri'] for x in storage_subsystems]\n\n storage_fabrics = extractByPropertyInListValue(\n RC_STORAGE_FABRIC, 'cpc-uri', cpc_uris, inventory_list)\n if storage_fabrics:\n config_dict['storage-fabrics'] = storage_fabrics\n\n storage_switches = extractByPropertyInListValue(\n RC_STORAGE_SWITCH, 'storage-site-uri', storage_site_uris,\n inventory_list)\n if storage_switches:\n config_dict['storage-switches'] = storage_switches\n\n storage_control_units = extractByPropertyInListValue(\n RC_STORAGE_CONTROL_UNIT, 'parent', storage_subsystem_uris,\n inventory_list)\n if storage_control_units:\n config_dict['storage-control-units'] = storage_control_units\n storage_control_unit_uris = [x['object-uri']\n for x in storage_control_units]\n\n storage_paths = extractByPropertyInListValue(\n RC_STORAGE_PATH, 'parent', storage_control_unit_uris,\n inventory_list)\n if storage_paths:\n config_dict['storage-paths'] = storage_paths\n\n storage_ports = extractByPropertyInListValue(\n RC_STORAGE_PORT, 'parent', adapter_uris, inventory_list)\n if storage_ports:\n config_dict['storage-ports'] = storage_ports\n\n network_ports = extractByPropertyInListValue(\n RC_NETWORK_PORT, 'parent', adapter_uris, inventory_list)\n if network_ports:\n config_dict['network-ports'] = network_ports\n\n storage_groups = extractByPropertyInListValue(\n RC_STORAGE_GROUP, 'cpc-uri', cpc_uris, inventory_list)\n if storage_groups:\n config_dict['storage-groups'] = storage_groups\n storage_group_uris = [x['object-uri'] for x in storage_groups]\n\n storage_volumes = extractByPropertyInListValue(\n RC_STORAGE_VOLUME, 'parent', storage_group_uris, inventory_list)\n if storage_volumes:\n config_dict['storage-volumes'] = storage_volumes\n\n storage_templates = extractByPropertyInListValue(\n RC_STORAGE_TEMPLATE, 'cpc-uri', cpc_uris, inventory_list)\n if storage_templates:\n config_dict['storage-templates'] = storage_templates\n storage_template_uris = [x['object-uri'] for x in storage_templates]\n\n storage_template_volumes = extractByPropertyInListValue(\n RC_STORAGE_TEMPLATE_VOLUME, 'parent', storage_template_uris,\n inventory_list)\n if storage_template_volumes:\n config_dict['storage-template-volumes'] = storage_template_volumes\n\n virtual_storage_resources = extractByPropertyInListValue(\n RC_VIRTUAL_STORAGE_RESOURCE, 'parent', storage_group_uris,\n inventory_list)\n if virtual_storage_resources:\n config_dict['virtual-storage-resources'] = virtual_storage_resources\n\n tape_links = extractByPropertyInListValue(\n RC_TAPE_LINK, 'cpc-uri', cpc_uris, inventory_list)\n if tape_links:\n config_dict['tape-links'] = tape_links\n tape_link_uris = [x['object-uri'] for x in tape_links]\n\n tape_libraries = extractByPropertyInListValue(\n RC_TAPE_LIBRARY, 'cpc-uri', cpc_uris, inventory_list)\n if tape_libraries:\n config_dict['tape-libraries'] = tape_libraries\n\n virtual_tape_resources = extractByParent(\n RC_VIRTUAL_TAPE_RESOURCE, tape_link_uris, inventory_list)\n if virtual_tape_resources:\n config_dict['virtual-tape-resources'] = virtual_tape_resources\n\n classname_for_partition_links = 'partition-link'\n partition_links = extractByPropertyInListValue(\n classname_for_partition_links, 'cpc-uri', cpc_uris, inventory_list)\n if partition_links:\n config_dict['partition-links'] = partition_links\n\n certificates = extractByPropertyInListValue(\n RC_CERTIFICATE, 'parent', cpc_uris, inventory_list)\n if certificates:\n _add_encoded(self.manager.console, certificates)\n config_dict['certificates'] = certificates\n\n if not include_unused_adapters:\n _remove_unreferenced_adapters(config_dict)\n\n sort_lists(config_dict)\n\n return config_dict", "title": "" }, { "docid": "5e25ef7a35938201e051cbc4b790e662", "score": "0.42283645", "text": "def GenerateConfig(context):\n\n resources = [{\n 'name': 'qubole-bastion-host',\n 'type': 'compute.v1.instance',\n 'properties': {\n 'zone': context.properties['zone'],\n 'machineType': '/zones/'+context.properties['zone']+'/machineTypes/'+context.properties['bastion-vm-type'],\n 'canIpForward': True,\n 'tags': {\n 'items': [\n 'qubole-bastion-host'\n ]\n },\n 'disks': [{\n 'deviceName': 'boot',\n 'type': 'PERSISTENT',\n 'boot': True,\n 'autoDelete': True,\n 'initializeParams': {\n 'sourceImage': 'projects/'+'debian-cloud/global/'+'images/family/debian-9'\n }\n }],\n 'networkInterfaces': [{\n 'network': '$(ref.qubole-dedicated-vpc.selfLink)',\n 'subnetwork': '$(ref.qu-vpc-public-subnetwork.selfLink)',\n 'accessConfigs': [{\n 'name': 'External NAT',\n 'type': 'ONE_TO_ONE_NAT',\n 'natIP': '$(ref.qubole-bastion-external-ip.address)'\n }]\n }],\n 'metadata': {\n 'items': [{\n 'key': 'startup-script',\n 'value': ''+getBootstrapAsArray(context.properties['public_ssh_key'], context.properties['qubole_public_key'])\n }]\n }\n }\n }]\n return {'resources': resources}", "title": "" }, { "docid": "ab63df23f263162844beebedca0c4565", "score": "0.42264277", "text": "def __init__(__self__, *,\n resource_group_name: pulumi.Input[str],\n connector_name: Optional[pulumi.Input[str]] = None,\n credentials_key: Optional[pulumi.Input[str]] = None,\n credentials_secret: Optional[pulumi.Input[str]] = None,\n display_name: Optional[pulumi.Input[str]] = None,\n kind: Optional[pulumi.Input[str]] = None,\n location: Optional[pulumi.Input[str]] = None,\n report_id: Optional[pulumi.Input[str]] = None,\n status: Optional[pulumi.Input[Union[str, 'ConnectorStatus']]] = None,\n tags: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]] = None):\n pulumi.set(__self__, \"resource_group_name\", resource_group_name)\n if connector_name is not None:\n pulumi.set(__self__, \"connector_name\", connector_name)\n if credentials_key is not None:\n pulumi.set(__self__, \"credentials_key\", credentials_key)\n if credentials_secret is not None:\n pulumi.set(__self__, \"credentials_secret\", credentials_secret)\n if display_name is not None:\n pulumi.set(__self__, \"display_name\", display_name)\n if kind is not None:\n pulumi.set(__self__, \"kind\", kind)\n if location is not None:\n pulumi.set(__self__, \"location\", location)\n if report_id is not None:\n pulumi.set(__self__, \"report_id\", report_id)\n if status is not None:\n pulumi.set(__self__, \"status\", status)\n if tags is not None:\n pulumi.set(__self__, \"tags\", tags)", "title": "" }, { "docid": "9efc6ced110290c2ca1ce61d1ca85b17", "score": "0.42099494", "text": "def test_different_resource_formats(self):\r\n #javaproperties\r\n fh = open('%s/../lib/javaproperties/complex.properties'%os.path.split(__file__)[0],)\r\n self.resource.i18n_method = 'PROPERTIES'\r\n self.resource.save()\r\n resp = self.test_resource_edit(fh)\r\n self.assertTemplateUsed(resp, 'resources/resource_detail.html')\r\n self.assertEqual(SourceEntity.objects.filter(resource=self.resource).count(), 25)\r\n\r\n #Qt\r\n fh = open('%s/../lib/qt/en.ts'%os.path.split(__file__)[0],)\r\n self.resource.i18n_method = 'QT'\r\n self.resource.save()\r\n resp = self.test_resource_edit(fh)\r\n self.assertTemplateUsed(resp, 'resources/resource_detail.html')\r\n self.assertEqual(SourceEntity.objects.filter(resource=self.resource).count(), 43)\r\n\r\n #Joomla\r\n fh = open('%s/../lib/joomla_ini/example1.5.ini'%os.path.split(__file__)[0],)\r\n self.resource.i18n_method = 'INI'\r\n self.resource.save()\r\n resp = self.test_resource_edit(fh)\r\n self.assertTemplateUsed(resp, 'resources/resource_detail.html')\r\n self.assertEqual(SourceEntity.objects.filter(resource=self.resource).count(), 1)\r\n\r\n #Desktop\r\n fh = open('%s/../lib/desktop/data/okular.desktop'%os.path.split(__file__)[0],)\r\n self.resource.i18n_method = 'DESKTOP'\r\n self.resource.save()\r\n resp = self.test_resource_edit(fh)\r\n self.assertTemplateUsed(resp, 'resources/resource_detail.html')\r\n self.assertEqual(SourceEntity.objects.filter(resource=self.resource).count(), 2)\r\n\r\n\r\n #bad file\r\n fh = open('%s/../lib/pofile/wrong.pot'%os.path.split(__file__)[0],)\r\n self.resource.i18n_method = 'POT'\r\n self.resource.save()\r\n resp = self.test_resource_edit(fh)\r\n self.assertContains(resp, 'Syntax error in po file', status_code=200)\r\n #Since source entities will not be updated\r\n self.assertEqual(SourceEntity.objects.filter(resource=self.resource).count(), 2)\r\n self.resource.i18n_method = 'PO'\r\n self.resource.save()", "title": "" }, { "docid": "ad7f497332bd141bd3154bd0736c38a3", "score": "0.42089364", "text": "def _decode(self):\n charset = 'utf-8'\n\n # TODO: add to translation in 1.4?\n message = u'''\n\"%(name)s\" configuration variable is a string, but should be\nunicode. Use %(name)s = u\"value\" syntax for unicode variables.\n\nAlso check your \"-*- coding -*-\" line at the top of your configuration\nfile. It should match the actual charset of the configuration file.\n'''\n \n decode_names = (\n 'sitename', 'logo_string', 'navi_bar', 'page_front_page',\n 'page_category_regex', 'page_dict_regex', 'page_form_regex',\n 'page_group_regex', 'page_template_regex', 'page_license_page',\n 'page_local_spelling_words', 'acl_rights_default',\n 'acl_rights_before', 'acl_rights_after',\n )\n \n for name in decode_names:\n attr = getattr(self, name, None)\n if attr:\n # Try to decode strings\n if isinstance(attr, str):\n try:\n setattr(self, name, unicode(attr, charset)) \n except UnicodeError:\n raise error.ConfigurationError(message %\n {'name': name})\n # Look into lists and try to decode string inside them\n elif isinstance(attr, list):\n for i in xrange(len(attr)):\n item = attr[i]\n if isinstance(item, str):\n try:\n attr[i] = unicode(item, charset)\n except UnicodeError:\n raise error.ConfigurationError(message %\n {'name': name})", "title": "" }, { "docid": "d0b012759594a32f3c1066f02f26dc44", "score": "0.42070284", "text": "def mct_config_str(config):\n int_parameters = {\n 'overlap': 6,\n 'r1_left_cut': 10,\n 'r1_right_cut': 10,\n 'r2_left_cut': 10,\n 'r2_right_cut': 10,\n 'quality_threshold': 20,\n 'length_threshold': 30,\n 'total_read_pairs_min': 1,\n 'total_read_pairs_max': 6000000,\n 'mapq_threshold': 10,\n 'num_upstr_bases': 0,\n 'num_downstr_bases': 2,\n 'compress_level': 5,\n 'dna_cov_min_threshold': 3,\n 'rna_cov_min_threshold': 3\n }\n\n float_parameters = {\n 'mc_rate_max_threshold': 0.5,\n 'mc_rate_min_threshold': 0.9\n }\n bool_parameters = {'unmapped_fastq': False}\n\n str_parameters = {\n 'mode': 'mc',\n 'barcode_version': 'required',\n 'r1_adapter': 'AGATCGGAAGAGCACACGTCTGAAC',\n 'r2_adapter': 'AGATCGGAAGAGCGTCGTGTAGGGA',\n 'bismark_reference': 'required',\n 'hisat3n_dna_reference': 'required',\n 'hisat3n_rna_reference': 'required',\n 'hisat3n_repeat_index_type': 'no-repeat',\n 'reference_fasta': 'required',\n 'star_reference': 'required',\n 'gtf_path': 'required',\n 'feature_type': 'gene',\n 'id_type': 'gene_id',\n 'nome_flag_str': 'required'\n }\n if 'hisat3n_dna_reference' in config:\n del str_parameters['bismark_reference']\n del str_parameters['star_reference']\n else:\n del str_parameters['hisat3n_dna_reference']\n del str_parameters['hisat3n_rna_reference']\n del str_parameters['hisat3n_repeat_index_type']\n\n typed_config = {}\n for k, default in int_parameters.items():\n if k in config:\n typed_config[k] = int(config[k])\n else:\n if default != 'required':\n typed_config[k] = default\n else:\n raise ValueError(f'Required parameter {k} not found in config.')\n\n for k, default in float_parameters.items():\n if k in config:\n typed_config[k] = float(config[k])\n else:\n if default != 'required':\n typed_config[k] = default\n else:\n raise ValueError(f'Required parameter {k} not found in config.')\n\n for k, default in bool_parameters.items():\n if k in config:\n v = config[k]\n if v.lower().startswith('t'):\n v = True\n else:\n v = False\n typed_config[k] = v\n else:\n if default != 'required':\n typed_config[k] = default\n else:\n raise ValueError(f'Required parameter {k} not found in config. '\n f'You can print the newest mapping config template via \"yap default-mapping-config\".')\n # judge unmapped_fastq specifically\n unmapped_param_str = '--un' if typed_config['unmapped_fastq'] else ''\n typed_config['unmapped_param_str'] = f\"'{unmapped_param_str}'\"\n\n for k, default in str_parameters.items():\n if k in config:\n typed_config[k] = f\"'{config[k]}'\"\n else:\n if default != 'required':\n typed_config[k] = f\"'{default}'\"\n else:\n raise ValueError(f'Required parameter {k} not found in config. '\n f'You can print the newest mapping config template via \"yap default-mapping-config\".')\n\n config_str = \"\"\n for k, v in typed_config.items():\n config_str += f\"{k} = {v}\\n\"\n return config_str", "title": "" }, { "docid": "e1f4a0ba6eb3fc65c6cc53ae9f8e67bf", "score": "0.4201005", "text": "def transform(cls, clients, resource_config):\n resource_id = resource_config.get(\"id\")\n if not resource_id:\n cinderclient = clients.cinder()\n resource_id = _id_from_name(resource_config=resource_config,\n resources=cinderclient.\n volume_types.list(),\n typename=\"volume_type\")\n return resource_id", "title": "" }, { "docid": "5632e2878a0cc9d6b5fd37d7104fbec2", "score": "0.4200134", "text": "def encode_update_config(data):\n return encode_command(CommandType.UpdateConfig, data)", "title": "" }, { "docid": "a25a10a4e045f983be197ea7cd99b91d", "score": "0.41967875", "text": "def format_case_style(self, s: str, class_: T[Definition]) -> str:\n mapping = self.case_style_mapping()\n v = mapping.get(class_, \"keep\")\n\n if isinstance(v, str):\n case_style_name = cast(str, v)\n case_style = CaseStyle.from_name(case_style_name)\n converter = case_style.converter()\n return converter(s)\n elif isinstance(v, tuple):\n case_style_names = cast(Tuple[str, ...], v)\n for case_style_name in case_style_names:\n case_style = CaseStyle.from_name(case_style_name)\n converter = case_style.converter()\n s = converter(s)\n return s\n elif callable(v):\n converter = cast(CaseStyleConverter, v)\n return converter(s)\n else:\n raise InternalError(f\"invalid case_style mapping value {v}\")", "title": "" }, { "docid": "ccb0e597b55964992e495bde8e5fca8c", "score": "0.41967192", "text": "def canonicalize_reward_cfg(reward_cfg: RewardCfg, data_root: str) -> RewardCfg:\n kind, path = reward_cfg\n if path != \"dummy\":\n path = os.path.join(data_root, path)\n return (kind, path)", "title": "" }, { "docid": "a711cc9406e831020747a77ef9b17409", "score": "0.41954875", "text": "def prepare(ctx, resource_config, **_):\n # Save the parameters\n ctx.instance.runtime_properties['resource_config'] = resource_config", "title": "" }, { "docid": "a711cc9406e831020747a77ef9b17409", "score": "0.41954875", "text": "def prepare(ctx, resource_config, **_):\n # Save the parameters\n ctx.instance.runtime_properties['resource_config'] = resource_config", "title": "" }, { "docid": "2a4f052d77473322aa4e70d5fb01e38c", "score": "0.4188311", "text": "def cast_to_gcp_resources(resources_to_cast):\n\n cast_resources = []\n for resource_to_cast in resources_to_cast:\n resource_id = resource_to_cast['resourceId']['id']\n resource_type = resource_to_cast['resourceId']['type']\n cast_resource = create_resource(resource_id, resource_type)\n cast_resources.append(cast_resource)\n return cast_resources", "title": "" }, { "docid": "c7aaa437ad961c376775d472f9096d14", "score": "0.41761163", "text": "def _build_resource(self, filter_fields):\n resource = {\n 'tableReference': {\n 'projectId': self._project,\n 'datasetId': self._dataset_id,\n 'tableId': self.table_id},\n }\n for f in filter_fields:\n if f in self.custom_resource_fields:\n self.custom_resource_fields[f](self, resource)\n else:\n api_field = _snake_to_camel_case(f)\n resource[api_field] = getattr(self, f)\n return resource", "title": "" }, { "docid": "4c4216b5ba77ac56574e629a303db98c", "score": "0.41699377", "text": "def test_schema_charmhub_registry_url_bad_format(create_config, check_schema_error):\n create_config(\n \"\"\"\n type: charm # mandatory\n charmhub:\n registry-url: stuff.com\n \"\"\"\n )\n check_schema_error(\n dedent(\n \"\"\"\\\n Bad charmcraft.yaml content:\n - invalid or missing URL scheme in field 'charmhub.registry-url'\"\"\"\n )\n )", "title": "" }, { "docid": "40815a6bbffa5891ed39c1eeae3e3198", "score": "0.41639066", "text": "def magic_fixes(self, config):\n\n # Infer token plugin if a token was given\n if (\n ('auth' in config and 'token' in config['auth'])\n or ('auth_token' in config and config['auth_token'])\n or ('token' in config and config['token'])\n ):\n config.setdefault('token', config.pop('auth_token', None))\n\n # Infer passcode if it was given separately\n # This is generally absolutely impractical to require setting passcode\n # in the clouds.yaml\n if 'auth' in config and 'passcode' in config:\n config['auth']['passcode'] = config.pop('passcode', None)\n\n # These backwards compat values are only set via argparse. If it's\n # there, it's because it was passed in explicitly, and should win\n config = self._fix_backwards_api_timeout(config)\n if 'endpoint_type' in config:\n config['interface'] = config.pop('endpoint_type')\n\n config = self._fix_backwards_auth_plugin(config)\n config = self._fix_backwards_project(config)\n config = self._fix_backwards_interface(config)\n config = self._fix_backwards_networks(config)\n config = self._handle_domain_id(config)\n\n for key in BOOL_KEYS:\n if key in config:\n if type(config[key]) is not bool:\n config[key] = get_boolean(config[key])\n\n for key in CSV_KEYS:\n if key in config:\n if isinstance(config[key], str):\n config[key] = config[key].split(',')\n\n # TODO(mordred): Special casing auth_url here. We should\n # come back to this betterer later so that it's\n # more generalized\n if 'auth' in config and 'auth_url' in config['auth']:\n config['auth']['auth_url'] = config['auth']['auth_url'].format(\n **config\n )\n\n return config", "title": "" } ]
aa2ee5f1f74bfde6f685a36a0bd012c2
This is a function that prints colored text to the terminal from bash evaluate statements. Usefull for debugging. This should monkeypatch the logging module.
[ { "docid": "6bd01b8d59927d5dbaae1f4bea3109f4", "score": "0.59094536", "text": "def tprint(self, msg, color, indent=0):\n\n if isinstance(color, TCOLOR):\n color = color.value\n else:\n if isinstance(color, str):\n color_map = {k.name: k.value for k in TCOLOR}\n color = color_map[color]\n\n spaces = \"----\" * indent\n print(f\"\"\"echo -e \"\\033[1;{color}{spaces}{msg}\\033[0m\";\"\"\")\n print(\"echo \\n;\")", "title": "" } ]
[ { "docid": "225953f55941efe14144cfd23f423c83", "score": "0.7001603", "text": "def pp(text, value):\n print(text, color.highlight(str(value)), end='')", "title": "" }, { "docid": "66c955a449659697e657d941a867d827", "score": "0.6782162", "text": "def pl_red(cls, text):\n cls.println(cls.red(text))", "title": "" }, { "docid": "203a84ba40ad8e116ee2eb6146d1e8e6", "score": "0.66082275", "text": "def inner(text):\n return \"\\033[%s\" % _colors[name][0] + text + \"\\033[%s\" % _colors[name][1]", "title": "" }, { "docid": "09f23f46f02454849a4dab434aba51d9", "score": "0.65946275", "text": "def debug(text):\n\n print()\n cprint(text, \"yellow\", attrs=[\"bold\"])", "title": "" }, { "docid": "412181bf7172dc9d405d4fe7ba395db0", "score": "0.65269417", "text": "def print_color(string, cor):\n\t#string(str, int, float, bool)\n\t#cor(cores em inglês)\n\tglobal install_termcolor\n\t\n\tif install_termcolor and sys.platform == \"linux\":\n\t\tprint(colored(str(string), cor))\n\telse:\n\t\tprint(string)", "title": "" }, { "docid": "6993f658177b8f4971fa73eaea36f52c", "score": "0.64050895", "text": "def print_colored(message, color):\n # using term color to print string in specified color\n print(colored(message, color))", "title": "" }, { "docid": "bfb79757fd4ea2eea66eb89112bf5188", "score": "0.6401346", "text": "def std(msg):\n cprint(msg, \"white\")", "title": "" }, { "docid": "2404680dd6bdb4847a546cfee65414ac", "score": "0.6382881", "text": "def colored_text(line: str, color: int):\n if color == 0:\n print(\"\\033[91m{}\\033[00m\".format(line))\n elif color == 1:\n print(\"\\033[96m{}\\033[00m\".format(line))\n elif color == 2:\n print(\"\\033[92m{}\\033[00m\".format(line))\n elif color == 3:\n print(\"\\033[93m{}\\033[00m\".format(line))\n elif color == 4:\n print(\"\\033[95m{}\\033[00m\".format(line))\n elif color == 5:\n print(\"\\033[31m{}\\033[00m\".format(line))", "title": "" }, { "docid": "a3563e05ee654d89fc0dc30c4d835eac", "score": "0.6379009", "text": "def __printincolor(self, text, clr, end=' '):\r\n try:\r\n # This shell will be IDLE shell\r\n self.shell_connect.write(text + end, self.colormap[clr][0])\r\n except AttributeError:\r\n # This will work for other terminals like linux terminal, powershell, cmd etc.,\r\n cprint(text, self.colormap[clr], attrs=['bold'], file=sys.stderr, end=end)\r\n except Exception as e:\r\n # It will print if any other exception occurs\r\n cprint(\" \".join(e.args), attrs=['blink'], file=sys.stderr, end=end)", "title": "" }, { "docid": "8841060058d0e81a4669fab986ae6587", "score": "0.6366483", "text": "def pl_green(cls, text):\n cls.println(cls.green(text))", "title": "" }, { "docid": "7dca00284d8acbe7613363562f809f99", "score": "0.63660693", "text": "def print_yellow(string):\n print(Fore.YELLOW + string)", "title": "" }, { "docid": "8220b737d76ea6b97ae26fb7f9d6c6a3", "score": "0.6352585", "text": "def colour_print(text: str, effect: str) -> None:\n output_string = \"{0}{1}{2}\".format(effect, text, RESET)\n print(output_string)", "title": "" }, { "docid": "9dc46896ec04311474cd8bc0199bbfa3", "score": "0.63483155", "text": "def red(text):\n return PrettyPrinter._format(PrettyPrinter.Colors.RED, text)", "title": "" }, { "docid": "1fe0bef16ee721c85ce7806bc11ea535", "score": "0.63449365", "text": "def in_red(s: str) -> str:\n return f\"\\033[1;31;40m{str(s)}\\033[0m\"", "title": "" }, { "docid": "2b54e4f8a0c6201348e813385d95a814", "score": "0.63427395", "text": "def cprint(text, color=None, on_color=None, attrs=None, **kwargs):\r\n\r\n print((colored(text, color, on_color, attrs)), **kwargs)", "title": "" }, { "docid": "2b54e4f8a0c6201348e813385d95a814", "score": "0.63427395", "text": "def cprint(text, color=None, on_color=None, attrs=None, **kwargs):\r\n\r\n print((colored(text, color, on_color, attrs)), **kwargs)", "title": "" }, { "docid": "d7ec6be03081f4172956e6fa234cee43", "score": "0.63330346", "text": "def cout(text, col='93'):\n print '\\033[' + col + 'm' + text + '\\033[0m'", "title": "" }, { "docid": "d62768796059c2812f69e365f073e1a4", "score": "0.6316109", "text": "def test_termcolor():\n _banner('termcolor')\n\n import termcolor\n print termcolor.colored('hello', 'red'), termcolor.colored('world',\n 'green')", "title": "" }, { "docid": "45c974ad88b51d37bf5c923aa9738446", "score": "0.6274227", "text": "def status(s):\r\n print('\\033[1m{0}\\033[0m'.format(s))", "title": "" }, { "docid": "24d9ac5ac4a97264ba256bd89ddcd5df", "score": "0.62571645", "text": "def log(self, msg, color='green'):\n print(colorize(msg, color, bold=True))", "title": "" }, { "docid": "a39276944f6be6af91dbef80073384ec", "score": "0.6248132", "text": "def ccprint(text, color=None, verbose=True):\n if not verbose:\n return\n\n if color is not None:\n try:\n print(colors[color] + text + '\\033[0m')\n except KeyError:\n raise ValueError(\"Unrecognized color\")\n else:\n print(text)", "title": "" }, { "docid": "f3effda2b766e418237dcfc813dbd0df", "score": "0.6191213", "text": "def status(s):\n print('\\033[1m{0}\\033[0m'.format(s))", "title": "" }, { "docid": "a0d00be76526d8214d943c4c5a2dd0b8", "score": "0.6188115", "text": "def status(s):\n print(\"\\033[1m{0}\\033[0m\".format(s))", "title": "" }, { "docid": "a0d00be76526d8214d943c4c5a2dd0b8", "score": "0.6188115", "text": "def status(s):\n print(\"\\033[1m{0}\\033[0m\".format(s))", "title": "" }, { "docid": "a0d00be76526d8214d943c4c5a2dd0b8", "score": "0.6188115", "text": "def status(s):\n print(\"\\033[1m{0}\\033[0m\".format(s))", "title": "" }, { "docid": "75f0df336390576d3d067a26e7bc45d8", "score": "0.61643475", "text": "def printcolors() -> None:\n from efro.error import CleanError\n from efro.terminal import TerminalColor, Clr\n\n if Clr.RED == '':\n raise CleanError('Efro color terminal output is disabled.')\n\n clrnames = {getattr(Clr, s): s for s in dir(Clr) if s.isupper()}\n\n # Print everything in Clr (since that's what users should be using\n # but do it in the order of TerminalColor (since Clr is just a class\n # so is unordered)\n for value in TerminalColor:\n if value is TerminalColor.RESET:\n continue\n shortname = f'Clr.{clrnames[value.value]}'\n longname = f'({value.name})'\n print(f'{shortname:<12} {longname:<20} {value.value}'\n f'The quick brown fox jumps over the lazy dog.'\n f'{TerminalColor.RESET.value}')", "title": "" }, { "docid": "79b20e33795ebf9d95ec7fd2ce9d2bea", "score": "0.6147331", "text": "def green(text):\n return PrettyPrinter._format(PrettyPrinter.Colors.GREEN, text)", "title": "" }, { "docid": "0bf1639163a1d12584b180ceffd5463e", "score": "0.6145214", "text": "def Debug(text, tip):\n if tip == 0:\n print(colorama.Fore.WHITE, end=\"\")\n elif tip == 1:\n print(colorama.Fore.YELLOW, end = \"\")\n elif tip == 2:\n print(colorama.Fore.RED, end=\"\")\n\n print(text)\n\n print(colorama.Fore.WHITE, end=\"\")", "title": "" }, { "docid": "cb347960d7a3515510961585909a9de1", "score": "0.6113111", "text": "def YELLOW() -> str:\n return '\\033[33m'", "title": "" }, { "docid": "ec3a52cbf99a3cab61d24931f384e39a", "score": "0.61032164", "text": "def lsassy_highlight(self, msg):\r\n if self.no_color:\r\n return msg\r\n return \"\\033[1;33m{}\\033[0m\".format(msg)", "title": "" }, { "docid": "862d47ef1ac737b543c5097b584be1cc", "score": "0.6089623", "text": "def GREEN() -> str:\n return '\\033[32m'", "title": "" }, { "docid": "50fc871ba05d41b1a4ce26bb042fb866", "score": "0.60704046", "text": "def dprint(msg):\n print console.colorize('red', 'debug: ' + str(msg))", "title": "" }, { "docid": "7631b58b1f1ebf80c6ca2464e944af54", "score": "0.6068349", "text": "def pretty_print_synthesis(passed, failed):\n\n print(f\"[{colored('==', 'red' if failed else 'blue')}] Synthesis:\", end='')\n color_tested = colored(f\"{passed + failed:2d}\", 'blue')\n print(f\" Tested: {color_tested} | \", end='')\n color_passed = colored (f\"{passed:2d}\", 'green')\n print(f\"Passing: {color_passed} | \", end='')\n color_failed = colored (f\"{failed:2d}\", 'red' if failed else 'blue')\n print(f\"Failing: {color_failed}\")", "title": "" }, { "docid": "022e98c2a1675e51c1d31a82934f6f24", "score": "0.60680765", "text": "def print_info(text, r=0, g=0, b=255):\r\n print(\"\\033[38;2;{};{};{}m{} \\033[38;2;255;255;255m\".format(r, g, b, text))", "title": "" }, { "docid": "85d5c3a8934440e11715c0805355f0dc", "score": "0.60256696", "text": "def cprint(*args, color=33, **kwargs):\n start_escape = '\\x1b[{}m'.format(color)\n\n args = [start_escape] + list(args) + ['\\x1b[0m']\n\n return print(*args, **kwargs)", "title": "" }, { "docid": "6261a45b9dac3aaf914d3cd534b0aa16", "score": "0.60220546", "text": "def color(color, string):\n return \"\\033[\" + str(color) + \"m\" + string + \"\\033[0m\"", "title": "" }, { "docid": "ee0a18174a17fd8ada6ce5aa6a744352", "score": "0.59973687", "text": "def warning_print(text: str) -> None:\n print(f'\\033[93m{text}\\033[0m')", "title": "" }, { "docid": "4698d41a8e9744d98112a9aa4f036d1e", "score": "0.59886074", "text": "def colorize(self, color, message):\n return \"\\033[{0}m{1}\\033[0m\".format(COLORS[color], message)", "title": "" }, { "docid": "ddf6b5b5c585805422ceecf8dc8d7504", "score": "0.59752315", "text": "def eprint(to_print):\n print(\n u\"{0}{1}{2}\".format(Colors.RED, to_print, Colors.ENDC), file=sys.stderr\n )", "title": "" }, { "docid": "db158159b7a74df0e35b46370eba20a2", "score": "0.5973186", "text": "def print_error(text, r=255, g=0, b=0):\r\n print(\"\\033[38;2;{};{};{}m{} \\033[38;2;255;255;255m\".format(r, g, b, text))", "title": "" }, { "docid": "d418c6074b7211f1139d7a8da6f2ce91", "score": "0.596226", "text": "def _printColor(r, g, b, fg=True):\n try:\n if fg:\n print(f'\\033[38;2;{r};{g};{b}m', end='')\n else:\n print(f'\\033[48;2;{r};{g};{b}m', end='')\n except:\n _resetColor()", "title": "" }, { "docid": "d1e3f4e122400412aa8688530bfeed81", "score": "0.5939385", "text": "def display(message, color):\n print(color + bcolors.BOLD + message + bcolors.ENDC)", "title": "" }, { "docid": "3229fe2f40df4013a9cab9f4321a853b", "score": "0.59317833", "text": "def getGreenText(text, shouldPrint=True):\n if not printInColor:\n print(text)\n return\n\n greenText = Fore.GREEN + text + Style.RESET_ALL\n if shouldPrint:\n print(greenText)\n\n return greenText\n #print(f\"{Fore.GREEN}Heeloo{Style.RESET_ALL}\")", "title": "" }, { "docid": "463e6038e5429d58e0a7890bc6e6c873", "score": "0.5923501", "text": "def print_with_color(s, color=Fore.WHITE, brightness=Style.NORMAL, **kwargs):\n print(f\"{brightness}{color}{s}{Style.RESET_ALL}\", **kwargs)", "title": "" }, { "docid": "b2ebf1441bd2bfd6510e909481b13896", "score": "0.591907", "text": "def color_print(\r\n *args,\r\n sep=' ',\r\n end='\\n',\r\n file=stdout,\r\n flush=False,\r\n fg='white',\r\n bg='black'\r\n):\r\n message = \"\"\r\n\r\n for i, arg in enumerate(args):\r\n message += str(arg)\r\n if i < len(args) - 1:\r\n message += sep\r\n \r\n print(color_string(message, fg, bg), end=end, file=file, flush=flush)", "title": "" }, { "docid": "90518e871d7635c6cf0d60b0a4f53f29", "score": "0.5919026", "text": "def getRedText(text, shouldPrint=True):\n if not printInColor:\n print(text)\n return\n\n redText = Fore.RED + text + Style.RESET_ALL\n if shouldPrint:\n print(redText)\n\n return redText", "title": "" }, { "docid": "dd8c6710b758223f149e38aac963936d", "score": "0.5910864", "text": "def colorify(color: str, *, text=None) -> str:\n\n color = color.upper()\n\n if not text:\n to_color = PyFunceble.cli.storage.ASCII_PYFUNCEBLE\n else:\n to_color = text\n\n if not hasattr(colorama.Fore, color):\n raise ValueError(f\"<color> ({color!r}) is not supported.\")\n\n color_to_apply = getattr(colorama.Fore, color)\n result = []\n\n if (\n PyFunceble.facility.ConfigLoader.is_already_loaded()\n and PyFunceble.storage.CONFIGURATION.cli_testing.display_mode.colour\n ):\n for line in to_color.split(\"\\n\"):\n result.append(f\"{color_to_apply}{line}{colorama.Fore.RESET}\")\n\n return \"\\n\".join(result)\n return to_color", "title": "" }, { "docid": "3ebcd99c2b958a337b4354e51ca2ea39", "score": "0.5900491", "text": "def colored_hook(home_dir):\n\n def hook(type_, value, tb):\n def colorize(text, color, own=0):\n \"\"\"Returns colorized text.\"\"\"\n endcolor = \"\\x1b[0m\"\n codes = {\n \"green\": \"\\x1b[0;32m\",\n \"green_own\": \"\\x1b[1;32;40m\",\n \"red\": \"\\x1b[0;31m\",\n \"red_own\": \"\\x1b[1;31m\",\n \"yellow\": \"\\x1b[0;33m\",\n \"yellow_own\": \"\\x1b[1;33m\",\n \"black\": \"\\x1b[0;90m\",\n \"black_own\": \"\\x1b[1;90m\",\n \"cyan\": \"\\033[1;36m\",\n }\n return codes[color + (\"_own\" if own else \"\")] + text + endcolor\n\n for filename, line_num, func, text in traceback.extract_tb(tb):\n basename = os.path.basename(filename)\n own = (home_dir in filename) or (\"/\" not in filename)\n\n print(colorize(\"\\\"\" + basename + '\"', \"green\", own) + \" in \" + func)\n print(\"%s: %s\" % (\n colorize(\"%5d\" % line_num, \"red\", own),\n colorize(text, \"yellow\", own)))\n print(\" %s\" % colorize(filename, \"black\", own))\n\n print(colorize(\"%s: %s\" % (type_.__name__, value), \"cyan\"))\n return hook", "title": "" }, { "docid": "8f6d6238494097b6fbb24ba8a1beba7d", "score": "0.58976305", "text": "def log(color=None, message='Your message goes here!'):\n NOW = datetime.datetime.now()\n CODING = { 'Red': 'emask', 'Yellow': 'kill', 'Blue': '_sse', 'Green': ': md'}\n if color in CODING:\n print (NOW.strftime(\"%m/%d/%y %I:%M%p\") + \" - %s (%s) \") % (message, CODING[color])\n else:\n print (NOW.strftime(\"%m/%d/%y %I:%M%p\") + \" - %s\") % (message)", "title": "" }, { "docid": "b73b3114b1d0774776182f8bdab8d769", "score": "0.5895167", "text": "def colored(fmt, us) -> str:\n return \" \\033[9{0}m{1} \\033[3{0}m\\033[90m{2}\\033[3{0}m +{3}\\033[0m\" \\\n .format(c, name, fmt, humanize(us))", "title": "" }, { "docid": "33d8e966229752bc6e30e6cc281c3c98", "score": "0.5886258", "text": "def colorprint(cls, message, color=\"\"):\n print(color + message + Style.RESET_ALL)", "title": "" }, { "docid": "53f36a55b32a61d45c596b4a6b071ed1", "score": "0.5869857", "text": "def main():\n myformat1 = ('%(asctime)s - %(levelname)-8s - ' \n '%(module)s:%(funcName)s:%(lineno)d - %(message)s')\n \n myformat2 = ('$COLOR%(levelname)s -$RESET %(asctime)s - ' \n '$BOLD$COLOR%(name)s$RESET - %(message)s')\n \n myformat3 = ('$COLOR%(asctime)s - %(levelname)-8s - ' \n '%(module)s:%(funcName)s:%(lineno)d - %(message)s$RESET')\n \n myformat4 = ('$BG-GREEN$COLOR%(asctime)s - %(levelname)-8s - ' \n '%(module)s:%(funcName)s:%(lineno)d - %(message)s$RESET')\n\n logging.basicConfig(\n level = logging.DEBUG,\n format = myformat1,\n filename = 'colorlog.test.log',\n filemode = 'w' \n ) \n \n console = logging.StreamHandler()\n console.setFormatter(ColorFormatter(use_color=True, format_string=myformat3))\n console.setLevel(logging.DEBUG) \n logging.getLogger().addHandler(console)\n\n mylog = logging.getLogger('TESTLOGGER')\n mylog.debug(\"This is a test debug message\")\n mylog.info(\"This is a test info message\")\n mylog.warn(\"This is a test warn message\")\n mylog.error(\"This is a plain vanilla error message\")\n mylog.error(\"$BG-WHITEThis is a test error message - with white background forced in the message\")\n mylog.critical(\"$BG-GREENThis is a test critical message with green background forced in the message\")\n mylog.critical(\"This is a test critical message with a default red background set in the class\")", "title": "" }, { "docid": "a4c8a85a9191a8b7ce7da482f3816eed", "score": "0.58663577", "text": "def _pcolor(text, color, indent=0):\n esc_dict = {\n 'black':30, 'red':31, 'green':32, 'yellow':33, 'blue':34, 'magenta':35,\n 'cyan':36, 'white':37, 'none':-1\n }\n if esc_dict[color] != -1:\n return (\n '\\033[{color_code}m{indent}{text}\\033[0m'.format(\n color_code=esc_dict[color],\n indent=' '*indent,\n text=text\n )\n )\n return '{indent}{text}'.format(indent=' '*indent, text=text)", "title": "" }, { "docid": "68b446d463dc5f076fa433123cdef2fe", "score": "0.58489084", "text": "def __call__(self, message: str) -> str:\n\n return '\\033[{}m{}\\033[0m'.format(self.value, message)", "title": "" }, { "docid": "990d68256f37f668d266675c38f3fcd5", "score": "0.58453196", "text": "def print_msg(self, message, key_words=None):\n if not self.disable_color_print:\n color_begin = '\\033[1;31m'\n color_end = '\\033[0m'\n if key_words:\n message = message.replace(key_words, color_begin + key_words + color_end)\n print(message)", "title": "" }, { "docid": "607f6db94833f492c8463d87bf196e90", "score": "0.58345556", "text": "def test_basic():\n _banner('core')\n\n testString = \"This is a test.\"\n\n colors = Colors()\n\n # Simple example\n colors.cprint(['FG_GREEN', 'BG_RED'], testString)\n\n # Bad example (unsupported font attributes)\n colors.cprint(['FG_CYAN', 'BG_WHITE', 'INVERSE_ON', 'BOLD_ON',\n 'ITALICS_ON', 'NOTHING'], testString)\n\n # Print without trailing newline\n colors.cprin(['FG_GREEN'], \"This \")\n colors.cprin(['UNDERLINE_ON'], \"is\")\n colors.cprin(['NORMAL'], \" \")\n colors.cprin(['NORMAL'], \"an \")\n colors.cprin(['BG_RED', 'FG_WHITE'], \"ERROR\")\n colors.cprin(['NORMAL'], \".\")\n print\n\n # Fancy example\n testNumbers = range(30, 170, 3)\n for index, item in enumerate(testNumbers):\n print \"%-30s %-30s\" % (\n \"\" + colors.colorize(['BG_CYAN', 'FG_WHITE'], str(index)),\n \"\" + colors.colorize(['FG_MAGENTA'],str(item)))", "title": "" }, { "docid": "2e9462b30087c2cad036556e437f9357", "score": "0.581174", "text": "def decorate_color(color: str):\n\n def debug_message(logger, message: str) -> None:\n logger.debug(f\"\\033{color} {message}\\033[00m\")\n\n return debug_message", "title": "" }, { "docid": "8869d3fdc76361223691327e5f52f721", "score": "0.58084995", "text": "def c(colour: t.Union[str, t.Iterable[str]], message: str) -> str:\n if isinstance(colour, str):\n colour = [colour]\n escape = '\\033[' + ';'.join(colour) + 'm'\n reset = '\\033[0m'\n return escape + message + reset", "title": "" }, { "docid": "5399fec72df1fe6fe77333ea2b4f23ee", "score": "0.580743", "text": "def colorize(msg, color, nocolor=False):\n # The nocolor is added to shut off completely. You may ask the point of this\n # someone want to pipe the output, but the asci characters will also printed\n if nocolor:\n return msg\n else:\n colors = {'green' : '\\x1b[32;01m%s\\x1b[0m',\n 'red' : '\\x1b[31;01m%s\\x1b[0m',\n 'yellow' : '\\x1b[33;01m%s\\x1b[0m',\n 'bold' : '\\x1b[1;01m%s\\x1b[0m',\n 'none' : '\\x1b[0m%s\\x1b[0m',\n }\n return colors[color if sys.stdout.isatty() else 'none'] % msg", "title": "" }, { "docid": "c80a008eeed0c525d8d84be6bdd98622", "score": "0.5804183", "text": "def printColor(string_to_print,color):\n if color== \"green\":\n print bcolors.OKGREEN + string_to_print + bcolors.ENDC\n elif color== \"red\":\n print bcolors.FAIL + string_to_print + bcolors.ENDC\n else:\n print bcolors.FAIL + \"Bad color argument to print\" + bcolors.ENDC", "title": "" }, { "docid": "fdf948169ce1185ae89f7671855d7692", "score": "0.57893676", "text": "def success(text):\n\n cprint(\"[+] {}\".format(text).ljust(80), \"cyan\")", "title": "" }, { "docid": "839c8978d777689fcbb2b12f3b9f2b91", "score": "0.5784462", "text": "def escape(self, s):\n if self.color:\n return \"\\033[%sm\" % s\n else:\n return \"\"", "title": "" }, { "docid": "26e9ce2218a2c09c72adf6a4beb223b7", "score": "0.57820994", "text": "def info(msg):\n cprint(msg, \"cyan\")", "title": "" }, { "docid": "c14ce3d5de4f8a1a3cf98bedafce87d6", "score": "0.5774559", "text": "def print_info(text):\n print(COLORS['info'](text))", "title": "" }, { "docid": "bbaa7bebfc8389101e9fa5d417169998", "score": "0.5764394", "text": "def colored(text, color): #, on_color=None, attrs=None):\n # Copyright (c) 2008-2011 Volvox Development Team\n #\n # Permission is hereby granted, free of charge, to any person obtaining a copy\n # of this software and associated documentation files (the \"Software\"), to deal\n # in the Software without restriction, including without limitation the rights\n # to use, copy, modify, merge, publish, distribute, sublicense, and/or sell\n # copies of the Software, and to permit persons to whom the Software is\n # furnished to do so, subject to the following conditions:\n #\n # The above copyright notice and this permission notice shall be included in\n # all copies or substantial portions of the Software.\n #\n # THE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR\n # IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,\n # FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE\n # AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER\n # LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,\n # OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN\n # THE SOFTWARE.\n #\n # Author: Konstantin Lepa <konstantin.lepa@gmail.com>\n COLORS = dict(list(zip(['grey',\n 'red',\n 'green',\n 'yellow',\n 'blue',\n 'magenta',\n 'cyan',\n 'white',],\n list(range(30, 38)))))\n\n RESET = '\\033[0m'\n\n if os.getenv('ANSI_COLORS_DISABLED') is None:\n fmt_str = '\\033[%dm%s'\n\n text = fmt_str % (COLORS[color], text)\n\n text += RESET\n\n return text", "title": "" }, { "docid": "7524d6783a2d11b90fe3827b8fd1a040", "score": "0.5739957", "text": "def __insert_color(txt,s,c):\n# print '<span style=\"color: %s;\">'%c+\\\n# txt +'</span>'\n return '<span class=\"%s\">'%c+\\\n txt +'</span>'", "title": "" }, { "docid": "72da9b83b2220db5a9038d6cbcbd5045", "score": "0.57355475", "text": "def alert(string):\n return '\\033[91m{0}\\033[0m'.format(string)", "title": "" }, { "docid": "fc8c5e223226758ac9a34ced0da2646a", "score": "0.573461", "text": "def _colorize(stack_status, args):\n if not args.color:\n return stack_status\n end = \"0m\"\n status_to_color = {\"COMPLETE\": \"0;32m\", \"FAILED\": \"0;31m\", \"IN_PROGRESS\": \"10;33m\"}\n for status_label in status_to_color:\n if status_label in stack_status:\n return \"\\033[%s%s\\033[%s\" % (status_to_color[status_label], stack_status, end)", "title": "" }, { "docid": "0b17eb2a106a5586eb2af478fff8a81f", "score": "0.57270294", "text": "def format(c):\n os.system(\"black .\")", "title": "" }, { "docid": "6d49b8b632856ab1992ec92845dd31e5", "score": "0.5724723", "text": "def _print(text):\n if VERBOSE:\n print(text)", "title": "" }, { "docid": "d08d6d8c0f22424f165b13faeab00506", "score": "0.5721955", "text": "def displayhook_hack(string):\n # This function is all so the last line (or single lines) will\n # implicitly print as they should, unless they are an assignment.\n # If anybody knows a better way to do this, please tell me!\n \n # The essential problem seems to be that exec executes the code as\n # if the code was in a file. However, we want the last statement\n # to print out as if it was interactive. So we have to generate\n # the code as if it was an interactive statement (by compiling a\n # \"single\" interactive statement) and executing that code object.\n\n # There is a patch on trac that uses the ast module to print out\n # each line's output or the last line's output. Alternatively, we\n # could fork a python process and feed the code in as standard\n # input and just capture the stdout.\n\n string = string.splitlines()\n i = len(string)-1\n if i >= 0:\n # skip lines that are either empty or start with whitespace\n while len(string[i])==0 or string[i][0] in ' \\t':\n i -= 1\n final_lines = unicode_str('\\n'.join(string[i:]))\n if not final_lines.startswith('def '):\n try:\n compile(final_lines + '\\n', '', 'single')\n string[i] = \"exec compile(%r + '\\\\n', '', 'single')\" % final_lines\n string = string[:i+1]\n except SyntaxError, msg:\n pass\n return '\\n'.join(string)", "title": "" }, { "docid": "232173fd230f536f88d9c19da2c3d89a", "score": "0.5718671", "text": "def red(text):\n return '<span foreground=\"red\">{}</span>'.format(text)", "title": "" }, { "docid": "7c4c6667e07076bdddd8173c6a27a26c", "score": "0.56833994", "text": "def print_color(message: str, color: str, verbose: bool = True):\n if verbose:\n print(colored(message, color, attrs=[\"bold\"]))", "title": "" }, { "docid": "ba0cae904033f323df8d371dc080b3f2", "score": "0.5683238", "text": "def colorize(message):\n with _PASTEL.colorized():\n return _PASTEL.colorize(message)", "title": "" }, { "docid": "71c6eb874e363edda3c3be2adfc66d31", "score": "0.56780434", "text": "def print_failure(msg):\n\n print(\"\\033[91m\" + msg + \"\\033[0m\")", "title": "" }, { "docid": "b9376721fa1fbaf6afe46ccfea1306de", "score": "0.56777346", "text": "def message_color1(msg):\n return f\"\\033[48;5;232m\\033[38;5;12m{msg}\\033[0m\"", "title": "" }, { "docid": "3d33e82df6ece642b5a915d3ba75be8a", "score": "0.56586015", "text": "def show_colors():\n\n max_name_length = max(\n [len(color_name) for color_name in COLORS]\n )\n\n def colorize(msg, fg, bg):\n _fg = '\\x1b[38;2;{};{};{}m'.format(*fg)\n _bg = '\\x1b[48;2;{};{};{}m'.format(*bg)\n _reset = '\\x1b[0m'\n return '{}{}{}{}'.format(_fg, _bg, msg, _reset)\n\n for color_name, (r, g, b) in COLORS.items():\n color_name = color_name.ljust(max_name_length)\n white_on_color = colorize(color_name, fg=(255, 255, 255), bg=(r, g, b))\n black_on_color = colorize(color_name, fg=(0, 0, 0), bg=(r, g, b))\n print('{} {} {}'.format(color_name, white_on_color, black_on_color))", "title": "" }, { "docid": "b61a5662c76f21292d415320e9497112", "score": "0.5653296", "text": "def make_bold_red(text):\n return '\\033[91m' + text + '\\033[0m'", "title": "" }, { "docid": "be0f443b2525e4db012bb66dacab2f5f", "score": "0.56509507", "text": "def print_script_output(_text, _type):\n if _type == 'error':\n print('\\033[91m' + _text + '\\033[m')\n elif _type == 'job_done':\n print('\\033[92m' + _text + '\\033[m')", "title": "" }, { "docid": "8eab94248f75162bb4bcb0d8f8e79bd0", "score": "0.5650016", "text": "def shim_print(main_msg, additional_msg=\"\", level=None):\n if not debug and not level:\n return\n\n cols, rows = shutil.get_terminal_size()\n stars = round((cols - len(\" NEAT PYTHON SHIM \")) / 2)\n minusen = False\n\n starsRight = \"*\" * stars\n if (stars * 2) + len(\" NEAT PYTHON SHIM \") > cols:\n starsRight = \"*\" * (stars - 1)\n\n\n color = colors[level]\n print(color)\n print(f'{\"*\" * stars} NEAT PYTHON SHIM {starsRight}')\n print()\n print(f\"🐍 ({datetime.datetime.now()}) - [{main_msg}{Fore.RESET}{color}]\")\n if additional_msg:\n print(f'{Fore.LIGHTMAGENTA_EX}{additional_msg}{Fore.RESET}{color}')\n print()\n print(\"*\" * cols)\n print(Fore.RESET)\n #mport time; time.sleep(5)", "title": "" }, { "docid": "2ea8da79c4d1f4235b68194cb6804fce", "score": "0.5644468", "text": "def print_term_colors(only = None):\n only = only.split() if only else 'system color-cube gray-ramp'.split()\n RGB = _ANSITerm.term_colors()\n ESC = _ANSITerm.ESC\n RESET = ESC + '0m'\n fg = ESC + '38;5;{0}m {0:03d}#{1}'\n\n def reset():\n print(RESET, end = '')\n\n def pfmt(i):\n print(fg.format(i, RGB[i]), sep = '', end = '')\n\n if 'system' in only:\n print('System colors:')\n for i in xrange(16):\n pfmt(i)\n if 3 == divmod(i, 4)[1]: print()\n reset()\n\n if 'color-cube' in only:\n print('Color cube (6x6x6):')\n for i in xrange(16, 232):\n pfmt(i)\n if 3 == divmod(i, 6)[1]: print()\n reset()\n\n if 'gray-ramp' in only:\n print('Gray-scale ramp:')\n for i in xrange(232, 256):\n pfmt(i)\n if 3 == divmod(i, 6)[1]: print()\n reset()", "title": "" }, { "docid": "198cb456fe4cf4d5db714e773a9e0006", "score": "0.5627874", "text": "def html_output(colors):\n t = ('<div class=\"color\">'\n '<span class=\"count\">%(occurrences)s</span>'\n '<span class=\"value\" style=\"background: #%(c)s; color: #%(p)s\">'\n '#%(c)s'\n '</span>'\n '</div>')\n lines = []\n lines.append('<h1>%s color(s) found</h1>' % len(colors))\n for color, occurrences in colors:\n lines.append(t % {\n 'c': color,\n 'p': hsv_to_hex(rotate_value(hex_to_hsv(color))),\n 'occurrences': occurrences,\n })\n print HTML_TEMPLATE % '\\n'.join(lines)", "title": "" }, { "docid": "9b51040ca221c80f1447d75ddd4957c1", "score": "0.5614793", "text": "def printer(text, color=None, **kwargs):\n if nocolor:\n # import sys\n # sys.stdout.write(text + \"\" if (\"end\" in kwargs and kwargs[\"end\"] == \"\") else '\\n')\n # sys.stdout.flush()\n print(text, **kwargs)\n else:\n if color is None:\n cprint(text, **kwargs)\n else:\n cprint(text, color, **kwargs)", "title": "" }, { "docid": "7a2d98c8da75b977179215f15b1acfd7", "score": "0.5606178", "text": "def BLUE() -> str:\n return '\\033[34m'", "title": "" }, { "docid": "48aa84cf2a94845ef0c8adc229406165", "score": "0.55807745", "text": "def logger(msg):\n\t# pending more intelligent logging\n\tnow = dt.datetime.now().strftime('%H:%M:%S')\n\tcaller = sys._getframe(1).f_code.co_name\n\n\t# https://pypi.org/project/colorama/\n\t# https://misc.flogisoft.com/bash/tip_colors_and_formatting\n\t# https://gist.github.com/chrisopedia/8754917\n\t# \n\t# red = '\\033[31m'\n\t# reset = '\\033[30m'\n\n\tprint('\\033[31m %s \\033[0m-\\033[36m %s()\\033[0m - %s' % (now, caller, msg))", "title": "" }, { "docid": "0f44f887102c9f4ae55d875c4525ce97", "score": "0.5579339", "text": "def display(target_dict, message: str):\n \n message = color_target_names(target_dict, message)\n \n print(message)", "title": "" }, { "docid": "432f693641e0eddc3691e59d5687f2ef", "score": "0.55784017", "text": "def tests_color_on_color_functions_wrap_text_with_multiple_escape_codes():\n string = 'This is sample text.'\n desired_string = sgr.create(code.BG_WHITE, code.RED) + 'This is sample text.' + sgr.reset()\n assert color.red_on_white(string) == desired_string\n\n desired_string = sgr.create(code.BG_BLUE, code.WHITE) + 'This is sample text.' + sgr.reset()\n assert color.white_on_blue(string) == desired_string", "title": "" }, { "docid": "ec3d5082d89ffad9bfdae1f42dad62c6", "score": "0.55606055", "text": "def highlight_white(text):\n return u'\\x1b[1;37;7m%s\\x1b[22;27;39m' % text", "title": "" }, { "docid": "d7fd0cc288509b716d262c8346b5ff6c", "score": "0.5558769", "text": "def printInfo(msg):\n if not QUIET_OUTPUT:\n print(bold(green(f'{msg}')))", "title": "" }, { "docid": "81fb0f490d3feeb989c8fbc356b369b2", "score": "0.55587107", "text": "def color(string, color, do_color):\n\n if do_color:\n return '\\033[{}m{}\\033[0m'.format(color, string)\n else:\n return string", "title": "" }, { "docid": "a1630b612a99125344df112f148f5aae", "score": "0.5550213", "text": "def colorize(code: str, message: str) -> str:\n return f'{code}{message}{RESET}'", "title": "" }, { "docid": "0a09ad86b2ef409fb4f74008360c9a24", "score": "0.55452317", "text": "def tests_color_functions_wrap_text_with_escape_code():\n string = 'This is sample text.'\n desired_string = sgr.create(code.RED) + 'This is sample text.' + sgr.reset()\n assert color.red(string) == desired_string\n\n desired_string = sgr.create(code.UNDERLINE) + 'This is sample text.' + sgr.reset()\n assert color.underline(string) == desired_string\n\n desired_string = sgr.create(code.BG_RED) + 'This is sample text.' + sgr.reset()\n assert color.bg_red(string) == desired_string", "title": "" }, { "docid": "a5aebcad3e3ca47ad2bcb3b5c3c74e70", "score": "0.55302197", "text": "def success(string):\n print(colored('SUCCESS: ' + string, 'green'))", "title": "" }, { "docid": "b723e583327f37ccb874b9dc9294576f", "score": "0.55266255", "text": "def message_color(msg):\n global color_lst_indx\n answer = \"\"\n\n for i in range(len(msg)):\n\n if color_lst_indx >= len(color_lst):\n color_lst_indx = 0\n \n answer += f\"\\033[48;5;232m\\033[38;5;{color_lst[color_lst_indx]}m{msg[i]}\\033[0m\"\n color_lst_indx += 1\n\n return answer", "title": "" }, { "docid": "01f4b51a36bfade5be465dc5c3ffb60f", "score": "0.5509357", "text": "def log(cls, message):\n\n cls.colorprint(\"[log] %s\" % message, Fore.CYAN)", "title": "" }, { "docid": "c1c40778eaa50c49b3d99141926d078c", "score": "0.5508543", "text": "def bold_print(string: str) -> None:\n print(\"\\033[1m\" + string + \"\\033[0m\")", "title": "" }, { "docid": "17d4f0817472aff3cdf17aa5df50ddbd", "score": "0.5507362", "text": "def okay(msg):\n cprint(msg, \"green\")", "title": "" }, { "docid": "3603169a496d43a8f4a38d6be7efef10", "score": "0.5499348", "text": "def RED() -> str:\n return '\\033[31m'", "title": "" }, { "docid": "df073e942be515f3cb3b362dfcaf827d", "score": "0.5495739", "text": "def stdOutCallback(self, text, color='black'):\n # Replace newlines with <br>\n text = text.replace('\\n', '<br>')\n self.ui.textConsole.insertHtml(\\\n QString('<br><span style=\"color:%2\">%1</span>').arg(text).arg(color))\n # Scroll to bottom \n cursor = self.ui.textConsole.textCursor()\n cursor.movePosition(QTextCursor.End)\n self.ui.textConsole.setTextCursor(cursor)\n self.ui.textConsole.update()", "title": "" }, { "docid": "55ec414a0c91408ea8c8234abc694ace", "score": "0.5488058", "text": "def test_all_colors():\n cp = Printer()\n for color_name in COLOR_NAMES:\n out = StringIO()\n cp.msg('msg', color_name, file=out)\n out.seek(0)\n assert out.read() == cp.colors[color_name] + 'msg' + '\\033[0m'", "title": "" } ]
30aa2d13687dba10443f4215686391e6
Returns the next valid NMEA string which contains the pattern
[ { "docid": "48c59ba11064ea30a83fab334ec66bb8", "score": "0.6115745", "text": "def get_nmea_pkt(self, pattern):\n pattern_bytes = bytes(pattern, 'utf-8')\n while True:\n line = self.device_tx_rx.readline()\n # logging.debug(\"L80GPS:readline returned - \"+str(line))\n if line == b'':\n raise NMEAPacketNotFoundError(\n \"Timed out before valid '{}'.\".format(pattern))\n elif not l80gps_checksum_is_valid(line):\n continue\n elif pattern_bytes in line:\n return str(line, 'utf-8')", "title": "" } ]
[ { "docid": "f94f8c7a2891abe1746fe91e9b7ab2a9", "score": "0.577851", "text": "def AcceptPattern(self) -> str:", "title": "" }, { "docid": "c4cefcff5d25eb9a5bcb6e0328c023ca", "score": "0.57108873", "text": "def onePattern(pattern):", "title": "" }, { "docid": "005a64d1021715c2a591f9b2e3beb300", "score": "0.5530082", "text": "def regen(pattern: str):\n\n def generator() -> str:\n return rstr.xeger(pattern)\n\n return generator", "title": "" }, { "docid": "0fa247f11c59bfbe55a98203e461d4c2", "score": "0.5502948", "text": "def match(self, pattern: Pattern[str]) -> str:\r\n m = pattern.match(self.s, self.i)\r\n if m is not None:\r\n return m.group(0)\r\n else:\r\n return ''", "title": "" }, { "docid": "981cd576fca39c6a9755b5cc3c879a2c", "score": "0.54906684", "text": "def match_4(string):\n if re.search(r'^[cmf][a][n].*', string):\n return True\n else:\n return False", "title": "" }, { "docid": "71fe89986dcc4d9df672e01b95c7b6a7", "score": "0.5431575", "text": "def get_pattern():\n return '-phon'", "title": "" }, { "docid": "afffa8b1babee87fd885edd4fd490141", "score": "0.5388481", "text": "def parse_aba(self, s):\n m = re.search(r\"(aba|routing)(\\-|\\s+)?(code|number|num|no|nr|#)?(\\.)?\\s*(:)?\\s*(\\w+)\", s, flags=re.I)\n if m:\n return m.group(6)\n\n return ''", "title": "" }, { "docid": "0b9a218668aa0310ab3cde94297b5b05", "score": "0.5368933", "text": "def pattern(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"pattern\")", "title": "" }, { "docid": "a3a7db491cf86746c97b9d9b5d3098fa", "score": "0.5350561", "text": "def non_repeated_substring(S, pattern):\n pass", "title": "" }, { "docid": "b067c1eac380d20df39536951b89a2c9", "score": "0.5332734", "text": "def get_next_token() -> str:\n token = EthAddressChecker.token[EthAddressChecker.token_index]\n EthAddressChecker.token_index = ((EthAddressChecker.token_index + 1) %\n len(EthAddressChecker.token))\n return token", "title": "" }, { "docid": "8c2ec73b9ebea93bf1ca4b484bcbd47f", "score": "0.5288799", "text": "def get_valid_string(prompt):", "title": "" }, { "docid": "025f1de99793b22ae887d789584e84fe", "score": "0.52580327", "text": "def HatchPattern(self) -> str:", "title": "" }, { "docid": "57481409b206bb03337e39b595a0254c", "score": "0.52526957", "text": "def regex_string():\n raise ValueError(\"This method is not implemented by the child class.\")", "title": "" }, { "docid": "4458d791b9a044baabd390c6ae373af6", "score": "0.51608276", "text": "def get_first_match(self, s: str):\n s += \" \"\n for (pattern, unit) in self.patterns:\n matches = re.findall(pattern, s)\n if matches:\n match = matches.pop(0).strip()\n return match, unit", "title": "" }, { "docid": "12df986f5554865d3c1e3c4bc63aa6c9", "score": "0.5156448", "text": "def get_pattern():\n return '-orepet'", "title": "" }, { "docid": "2e176b310fa9bc16558f338bbf76b441", "score": "0.5153585", "text": "def decode_nmea(self, strings):\n for msg in strings:\n msg_dict = ais_nmea_regex.search(msg).groupdict()\n\n if msg_dict['checksum'] != nmea_checksum_hex(msg):\n raise AisUnpackingException('Checksum failed')\n\n try:\n msgs = [ais_nmea_regex.search(line).groupdict() for line in strings]\n except AttributeError:\n raise AisUnpackingException('one or more NMEA lines did were malformed (1)' )\n if None in msgs:\n raise AisUnpackingException('one or more NMEA lines did were malformed')\n\n bits = []\n for msg in msgs:\n msg['fill_bits'] = int(msg['fill_bits'])\n bv = binary.ais6tobitvec(msg['body'])\n if int(msg['fill_bits']) > 0:\n bv = bv[:-msg['fill_bits']]\n bits.append(bv)\n bits = binary.joinBV(bits)\n self.decode_bits(bits)", "title": "" }, { "docid": "5b9855c985e2dc5eb720dab58b9cb352", "score": "0.513065", "text": "def isNtSequence(sequence):\n return re.match(r\"^[acgtn\\s]+$\", sequence, re.I)", "title": "" }, { "docid": "df8401129f7ab90fc5da41bbb94431fb", "score": "0.5093273", "text": "def fromString(str: unicode) -> ghidra.app.plugin.assembler.sleigh.sem.AssemblyPatternBlock:\n ...", "title": "" }, { "docid": "bd65fbc7370cbd41d1c4f83a3c1895a8", "score": "0.5091617", "text": "def get_input_pattern():\n return '-phon'", "title": "" }, { "docid": "97fee4af54194d046b41fb53ce6353db", "score": "0.5089016", "text": "def test_valid_full(self):\n record = 'IPA0000123400000023AC01234567890I-000000229-7A12345678LAST NAME FIRST NAME 009020500100300001102312'\n\n result = self.grammar.parseString(record)[0]\n\n self.assertEqual('IPA', result.record_type)\n self.assertEqual(1234, result.transaction_sequence_n)\n self.assertEqual(23, result.record_sequence_n)\n self.assertEqual('AC', result.agreement_role_code)\n self.assertEqual('I-000000229-7', result.ipi_base_n)\n self.assertEqual(1234567890, result.ipi_name_n)\n self.assertEqual('A12345678', result.ip_n)\n self.assertEqual('LAST NAME', result.ip_last_name)\n self.assertEqual('FIRST NAME', result.ip_writer_first_name)\n self.assertEqual(9, result.pr_society)\n self.assertEqual(20.5, result.pr_share)\n self.assertEqual(10, result.mr_society)\n self.assertEqual(30, result.mr_share)\n self.assertEqual(11, result.sr_society)\n self.assertEqual(023.12, result.sr_share)", "title": "" }, { "docid": "782aac2d61defbf0214c6656734cf4fe", "score": "0.5077233", "text": "def pattern(self) -> \"str\":\n return self._attrs.get(\"pattern\")", "title": "" }, { "docid": "782aac2d61defbf0214c6656734cf4fe", "score": "0.5077233", "text": "def pattern(self) -> \"str\":\n return self._attrs.get(\"pattern\")", "title": "" }, { "docid": "782aac2d61defbf0214c6656734cf4fe", "score": "0.5077233", "text": "def pattern(self) -> \"str\":\n return self._attrs.get(\"pattern\")", "title": "" }, { "docid": "782aac2d61defbf0214c6656734cf4fe", "score": "0.5077233", "text": "def pattern(self) -> \"str\":\n return self._attrs.get(\"pattern\")", "title": "" }, { "docid": "e2fdb8ba4313197096e691ec1e6ac1d0", "score": "0.5076868", "text": "def get_input_pattern():\n return '-token'", "title": "" }, { "docid": "c40f66b2ce2869174caaa41333617383", "score": "0.50552034", "text": "def generate_pattern():\n pattern_len = 0\n # keep asking for input until it's in range\n while(pattern_len < 3 or pattern_len > 5):\n str_input = raw_input(\n \"How Long should the pattern be? (min 3, max 5): \")\n # verify it is a number\n if(str_input.isdigit()):\n pattern_len = int(str_input)\n # Copy all valid possibilities and shuffle in random order.\n # Don't just pick random indecies because Mastermind requires uniqueness\n used_chars = VALID_CHARS\n random.shuffle(used_chars)\n # Take first up to our max len\n correct_pattern = used_chars[:pattern_len]\n return correct_pattern", "title": "" }, { "docid": "a807c1cbd1ad109a1ab6306d6a431f0c", "score": "0.5047847", "text": "def make_pattern(self, fullname):\n parts = fullname.split('.')\n return r'_ZN?' + r''.join([r'\\d+{}'.format(p) for p in parts])", "title": "" }, { "docid": "d8b237a01b079037d416b7283b8038d4", "score": "0.5025754", "text": "def test_valid_minimum(self):\n record = 'IPA0000123400000023AS00000000000 A12345678LAST NAME 00000 00000 00000'\n\n result = self.grammar.parseString(record)[0]\n\n self.assertEqual('IPA', result.record_type)\n self.assertEqual(1234, result.transaction_sequence_n)\n self.assertEqual(23, result.record_sequence_n)\n self.assertEqual('AS', result.agreement_role_code)\n self.assertEqual(None, result.ipi_base_n)\n self.assertEqual(0, result.ipi_name_n)\n self.assertEqual('A12345678', result.ip_n)\n self.assertEqual('LAST NAME', result.ip_last_name)\n self.assertEqual(None, result.ip_writer_first_name)\n self.assertEqual(None, result.pr_society)\n self.assertEqual(0, result.pr_share)\n self.assertEqual(None, result.mr_society)\n self.assertEqual(0, result.mr_share)\n self.assertEqual(None, result.sr_society)\n self.assertEqual(0, result.sr_share)", "title": "" }, { "docid": "598b912f0d1c47a5c68a71bfe149c4d4", "score": "0.4950538", "text": "def PatternName(self) -> str:", "title": "" }, { "docid": "97546a882b57bd7dc5490fa8a8416041", "score": "0.49402416", "text": "def _line_pattern(pattern: str) -> Pattern[str]:\n pattern = pattern.replace(\" \", r\"[ ]+\")\n pattern = pattern.replace(\"INT\", r\"[0-9]+\")\n pattern = pattern.replace(\"VALUE\", r\"[0-9.]+[%kMGTPEZY]?\")\n return re.compile(\"^\" + pattern + \"$\")", "title": "" }, { "docid": "924d94c6c0482fd12a6921fb248fe24a", "score": "0.49285832", "text": "def get_seq(self, pattern):\n return self.seq[pattern]", "title": "" }, { "docid": "9d8484bac67643503eba6c5c2db57937", "score": "0.49114734", "text": "def rest_of_ORF(dna):\n # YOUR IMPLEMENTATION HERE\n endcut = len(dna) #first assumes entire sequence is one reading frame\n aasequence = coding_strand_to_AA(dna) #finds amino acid sequence\n for i in range(len(aasequence)): \n if aasequence[i] == 'i': # checks if first letter of \"invalid base\" is present in aa sequence\n return 'invalid dna string'\n elif aasequence[i] == '|': #Checks for stop codon\n endcut = i*3;\n break\n return dna[0:endcut]", "title": "" }, { "docid": "f1d0f458063c383fc7e5b7f1f232eada", "score": "0.4887156", "text": "def _get_regex_simple(self) -> str:\n if not self._final_states or not self._start_state:\n return \"\"\n if len(self._final_states) != 1 or len(self._start_state) != 1:\n raise ValueError(\"The automaton is not simple enough!\")\n if self._start_state == self._final_states:\n # We are suppose to have only one good symbol\n for symbol in self._input_symbols:\n out_states = self._transition_function(\n list(self._start_state)[0], symbol)\n if out_states:\n return \"(\" + str(symbol.value) + \")*\"\n return \"epsilon\"\n start_to_start, start_to_end, end_to_start, end_to_end = \\\n self._get_bi_transitions()\n return get_regex_sub(start_to_start,\n start_to_end,\n end_to_start,\n end_to_end)", "title": "" }, { "docid": "913d83199cb64283952fa5805504daad", "score": "0.48795497", "text": "def nextString(strn):\r\n temp_str = strn\r\n if strn[-1] == \"_\":\r\n temp_str = temp_str[:-1] + alpha[1]\r\n elif strn[-1] != \"_\" and strn[-1] != \"z\":\r\n temp_str = temp_str[:-1] + chr((ord(strn[-1])+1))\r\n else:\r\n if len(strn) > 1:\r\n temp_str = nextString(strn[:-1])\r\n temp_str = temp_str + \"_\"\r\n else:\r\n temp_str = \"a\"\r\n return temp_str", "title": "" }, { "docid": "44ea75e40b60b0e229541662afc51e16", "score": "0.48775673", "text": "def pattern(word):\n pattern = \"\"\n\n def get_type(word, pos):\n if word[pos] in ['a', 'e', 'i', 'o', 'u']:\n return 'V'\n else:\n return 'C'\n\n pattern = current_type = get_type(word, 0)\n\n for i in range(1, len(word)):\n new_type = get_type(word, i)\n if new_type != current_type:\n pattern += new_type\n current_type = new_type\n\n return pattern", "title": "" }, { "docid": "54e71fb97ea9561918f0e60aa26cc4b2", "score": "0.48767567", "text": "def pattern_gen(length):\n if length > MAX_PATTERN_LENGTH:\n raise MaxLengthException(\n \"ERROR: Pattern length exceeds maximum of {0}\".format(MAX_PATTERN_LENGTH)\n )\n\n result = \"\"\n for p in pattern():\n if len(result) < length:\n result += p\n else:\n return result[:length]\n\n # If we end up here we've exhausted all characters so truncate the pattern\n return result[:length]", "title": "" }, { "docid": "43e996db382a20cda8279ad4570d0933", "score": "0.48606306", "text": "def prePare(aStr):\n \n headTest = \"*** START OF THIS PROJECT GUTENBERG EBOOK\"\n tailTest = \"*** END OF THIS PROJECT GUTENBERG EBOOK\"\n\n head = aStr.split(headTest)\n tail = head[1].split(tailTest)\n\n return tail[0]", "title": "" }, { "docid": "ac1538c1b34042dcdf3ff989064dd4e4", "score": "0.48594612", "text": "def nm_to_n(s):\r\n return s.split(\"!\")[0]", "title": "" }, { "docid": "6fa1fd18c44be48664f9e1d0f0dc41b3", "score": "0.48390988", "text": "def extractPattern(self, line):\n if re.search(self.tsAndProto, line):\n x = re.split(self.tsAndProto, line)\n return self.extractGroups(x[1])\n \n elif re.search(self.tsAndProto2, line):\n x = re.split(self.tsAndProto2, line)\n return self.extractGroups(x[1])\n elif re.match(r'^\\s*$', line):\n return None\n else:\n print 'BAD PARSING ERROR. Did format change\\n%s' % line\n #raise Exception('In case you want to be more strict about format')\n return None", "title": "" }, { "docid": "3b6350ff93c115aee5fedd33fe91c83f", "score": "0.48374867", "text": "def get_regex_unica_code():\n unica_msg = ERRORS[NotAllowedParameterValueException._unica_code]['details']\n return '^{0}'.format(unica_msg.format('(.*)', '(.*)'))", "title": "" }, { "docid": "346df1c30e14020a78c9c5912f079235", "score": "0.48111528", "text": "def test_pe_validation_invalid_digit_verification():\n\n invalid_number = '032141830'\n assert pe.start(invalid_number) is False", "title": "" }, { "docid": "466012540d4dda0290ecc5b47d9667b0", "score": "0.47937033", "text": "def extract(string):\n if len(string) == 1:\n return string\n elif len(string) == 2 and string[1] == '$': # end of reads looks like G$\n return string[0] \n elif len(string) == 3 and string[0] == '^': # begining of reads is like ^FG\n return string[2]\n else: \n return string", "title": "" }, { "docid": "e4ff4183c9e86530b748d45b9dfae40c", "score": "0.47816855", "text": "def decode_nmea(self, nmea_str: str, ens_index: int):\n\n # Verify the NMEA string is good\n if Nmea.check_nmea_checksum(nmea_str):\n # Add each message to the list\n # Decode the data\n if 'gga' in nmea_str or 'GGA' in nmea_str:\n self.gga[ens_index] += nmea_str\n self.last_gga = nmea_str\n if 'gsa' in nmea_str or 'GSA' in nmea_str:\n self.gsa[ens_index] += nmea_str\n if 'vtg' in nmea_str or 'VTG' in nmea_str:\n self.vtg[ens_index] += nmea_str\n self.last_vtg = nmea_str\n if 'dbt' in nmea_str or 'DBT' in nmea_str:\n self.dbt[ens_index] += nmea_str\n if 'hdt' in nmea_str or 'HDT' in nmea_str:\n self.hdt[ens_index] += nmea_str\n self.last_hdt = nmea_str", "title": "" }, { "docid": "f9b7c28401e3e5e3d92bc367911b1296", "score": "0.47800258", "text": "def answer_pattern(pattern, args):\n if pattern not in PATTERNS:\n return None\n if len(args) != 1:\n return None\n\n return {\n 'plaintxt': ''\n }", "title": "" }, { "docid": "65dbb5eea68a5348dad0a1eff2b878a3", "score": "0.47784787", "text": "def test_firstNonStrict(self):\n self.assertEqual(self.RNA('').firstNonStrict(), None)\n self.assertEqual(self.RNA('A').firstNonStrict(), None)\n self.assertEqual(self.RNA('ACGUACGUcgaucagu').firstNonStrict(), None)\n self.assertEqual(self.RNA('N').firstNonStrict(), 0)\n self.assertEqual(self.RNA('-').firstNonStrict(), 0)\n self.assertEqual(self.RNA('ACGUcgAUGUGCAUcagu-').firstNonStrict(),18)", "title": "" }, { "docid": "456df93bb374a68dcb64ea97df4df8f1", "score": "0.47687158", "text": "def ParseNext(self, text, start, end):\n # First, look for the first one or two degree types.\n degree_m1 = self.degrees_re.search(text[start:end], re.IGNORECASE)\n if not degree_m1:\n return None\n\n\n degree_m2 = self.degrees_re.search(text[next_start:end], re.IGNORECASE)\n\n # Next, look for the major before the second (if any) degree type.\n majors = []\n m = self.degrees_re.search(text[start:(start + next_start)], re.IGNORECASE)\n while m:\n print \"major: \", m.group(1)\n canonical = self.names_to_majors[m.group(1)]\n majors.append(canonical)\n next_start = start + m.end()\n m = self.degrees_re.search(text[(start + next_start):end], re.IGNORECASE)\n\n degrees = []\n m = self.degrees_re.search(text[start:end], re.IGNORECASE)\n while m:\n print \"degree: \", m.group(1)\n canonical = self.degrees_to_canonical[m.group(1)]\n degrees.append(canonical)\n next_start = start + m.end()\n m = self.degrees_re.search(text[(start + next_start):end], re.IGNORECASE)\n\n dates = []\n m = self.degrees_re.search(text[start:end], re.IGNORECASE)\n while m:\n print \"degree: \", m.group(1)\n canonical = self.degrees_to_canonical[m.group(1)]\n degrees.append(canonical)\n next_start = start + m.end()\n m = self.degrees_re.search(text[(start + next_start):end], re.IGNORECASE)\n \n # If the degree type and major were separated by little-enough\n #space, add it to the list.\n if np.abs(degree_m1.end() - major_m1.end()) < 25:\n pass", "title": "" }, { "docid": "8cfe761aa109f2fe9201387598dcfd53", "score": "0.4764065", "text": "def is_valid_card(numb: str) -> str:\n\n pattern = '^([456][0-9]{3})-?([0-9]{4})-?([0-9]{4})-?([0-9]{4})$'\n match = re.fullmatch(pattern, numb)\n if match is None:\n return False\n return True", "title": "" }, { "docid": "f6f2587163b21c6367de00787168a608", "score": "0.47516102", "text": "def return_type(codice):\n if re.search('[AB]{2}[0-9]{5}', codice):\n return \"s\" #special\n return \"n\" #normal", "title": "" }, { "docid": "3f48adac920d070c00b0c719b88f3f55", "score": "0.47412145", "text": "def get_regex_unica_code():\n unica_msg = ERRORS[BadParameterValueException._unica_code]['details']\n return '^{0}'.format(unica_msg.format(''))", "title": "" }, { "docid": "7b4e211ef2b499e200e362fed1a9fc4f", "score": "0.4740358", "text": "def test_sp_validation_digit_verification_with_13_digits():\n\n invalid_number = 'P011004248003'\n assert sp.start(invalid_number) is False", "title": "" }, { "docid": "0c542c732ef6421306b760769d12a583", "score": "0.47359058", "text": "def demo4():\r\n pat = r\"h.{3}l\"\r\n s = \"apple hot s zucchini hovel python\"\r\n m = re.search(pat,s)\r\n if m == None:\r\n print(\"The pattern '{}' was not found in string '{}'\".format(\r\n pat,\r\n s\r\n ))\r\n else:\r\n # there was a match\r\n print(\"The pattern '{}' was found in string '{}'; the match was '{}' at character {} to {}\".format(\r\n pat,\r\n s,\r\n m.group(),\r\n m.start(),\r\n m.end()\r\n ))", "title": "" }, { "docid": "87fde0a7002497cf62d754a4089ee8ea", "score": "0.47162625", "text": "def scan_ident(self):\r\n \r\n lista = [self.actualizar_Caracter()]\r\n \r\n while self.caracter.isalnum():\r\n lista.append(self.actualizar_Caracter())\r\n \r\n string_Lista = ''.join(lista)\r\n \r\n if string_Lista in PalabrasReservadas:\r\n \r\n return Token(18, string_Lista, self.linea)\r\n #print(\"Imprimiendo tipo \"+TOKENS[0]+\" imprimiendo lo que es: \"+string_Lista +\" imprimiendo linea:\" + str(self.linea) )\r\n \r\n return Token(0, string_Lista, self.linea)", "title": "" }, { "docid": "85f8a6ee80141bb6384f8e2bfb809983", "score": "0.47142023", "text": "def get_next_password_candidate(string):\n return re.sub(r\"([a-y])(z*)$\", lambda x: chr(ord(x.group(1)) + 1) + len(x.group(2)) * \"a\", string)", "title": "" }, { "docid": "12d8917b18f3c304e589205e4dd401c7", "score": "0.4710493", "text": "def get_next_bot_phrase(self):\n logging.error('Unimplemented')\n return \"\"", "title": "" }, { "docid": "059148d18686b792dbbd6845ca2b15f9", "score": "0.4708414", "text": "def cal_semantic_pattern(self):\r\n\r\n\r\n pattern = ['please check', 'pls check', 'you should',\r\n 'you can try', 'you could try', 'check out',\r\n 'in short', 'the most important', 'i\\'d recommend',\r\n 'in summary', 'keep in mind', 'i suggest']\r\n\r\n lower_plain_text = self.raw_text.lower()\r\n\r\n for p in pattern:\r\n if lower_plain_text.find(p) != -1:\r\n self.semantic_pattern = 1\r\n break", "title": "" }, { "docid": "44bed5feabd1572216c4258fbfa392e3", "score": "0.469927", "text": "def valid_visa_format(visa_code):\n valid_regex = re.compile(r'^[\\dA-Za-z]{5}-[\\dA-Za-z]{5}$')\n valid_regex_match = valid_regex.search(visa_code)\n valid = True\n if valid_regex_match is None:\n valid = False\n return valid", "title": "" }, { "docid": "5474b11fa4c0aa4017bd62370f4165aa", "score": "0.46976018", "text": "def parse_seqid(temp_string):\n temp_string=temp_string.replace(\"_DNA_end_none\",\"\") #replace end string with nothing \n start_index=temp_string.index(\"strt_\")\n return temp_string[start_index+len(\"strt_\"):] #return same names\n\n #-----------------------------\n #Added June 14 2017\n #------------------------------", "title": "" }, { "docid": "e4325914da300036adce5ffb97117c19", "score": "0.46940646", "text": "def pattern_finditer_test(self):\n text = 'You can try to find an ant in this string'\n print('text =', text)\n pattern = re.compile('an?\\w') # find 'an' either with or without a following word \n print('pattern =', pattern)\n result = pattern.finditer(text)\n print('pattern.finditer(text):', result)\n for match in result:\n # Start index of match (integer)\n sStart = match.start()\n # Final index of match (integer)\n sEnd = match.end()\n # Complete match (string)\n sGroup = match.group()\n # Print match\n print('Match \"{}\" found at: [{},{}]'.format(sGroup, sStart,sEnd))\n return", "title": "" }, { "docid": "5f24940ab0ff3b0d8d279e7a8ece7967", "score": "0.46939394", "text": "def code(invoerstring):", "title": "" }, { "docid": "ea6be0c6b24ac571094bff9e6c561b96", "score": "0.4693698", "text": "def is_valid_sequence_start(text, i):\n return i > 0 and (text[i].isdigit() and not text[i - 1].isdigit())", "title": "" }, { "docid": "d1bd08c9d01a6b999ac0d9ea7cfb961b", "score": "0.4692746", "text": "def _parseSequence(line):\n return ''.join(re.findall(SEQ_PATTERN, line))", "title": "" }, { "docid": "c1e4fb05641920db879ebc53e9bea312", "score": "0.46927062", "text": "def test_am_validation_right_size_valid_number():\n\n valid_number = '100000010'\n assert am.start(valid_number) is True", "title": "" }, { "docid": "0a1819f713648db8735f17ea9ebf4fcc", "score": "0.46910796", "text": "def genAlt(att, pattern, extent=''):\n if len(pattern) == 0:\n return ConstGenerator(att)\n # handle '.' shortcut and other special (character) classes\n if pattern == '.':\n pattern = '[' + RE_DOT + ']'\n elif pattern[:2] == '[:' and pattern[-2:] == ':]':\n desc = pattern[2:-2]\n if desc in RE_POSIX_CC:\n pattern = '[' + RE_POSIX_CC[desc] + ']'\n else:\n # allow something like [:count start=10 format=08X step=2:]\n g = createGenerator(att, None, getParams(desc), msg=pattern)\n return Pattern.repeat(att, g, extent)\n if pattern[0] == '(':\n # alternative, AltGenerator\n l = Pattern.altSplit(pattern)\n assert len(l) > 0\n if len(l) == 1:\n g = Pattern.genCat(att, l[0])\n else:\n g = AltGenerator(att, gens=[Pattern.genCat(att, p) for p in l])\n return Pattern.repeat(att, g, extent)\n elif pattern[0] == '[':\n # character class, CharGenerator...\n if pattern[1] == '^':\n assert len(pattern)>3 and pattern[-1] == ']'\n notch = CharsGenerator.parseCharSequence(att, pattern[2:-1])\n allch = CharsGenerator.parseCharSequence(att, RE_DOT)\n difch = ''.join(sorted(list(set(allch) - set(notch))))\n g = CharsGenerator(att, chars=difch)\n else:\n assert len(pattern)>2 and pattern[-1] == ']'\n g = CharsGenerator(att, params={'chars':pattern[1:-1]})\n g.size, g.lenmin, g.lenmax = 0xffffffff, 1, 1\n return Pattern.repeat(att, g, extent)\n else:\n # possibly repeated constant text\n return Pattern.repeat(att, ConstGenerator(att, cst=pattern), extent)", "title": "" }, { "docid": "3f3c54c377533213186ecba73a068beb", "score": "0.46900722", "text": "def matches(self, string):\n return self.simulateNfa(string)", "title": "" }, { "docid": "5efaabfd34a74ab1133c9617520d9671", "score": "0.4689997", "text": "def _get_next_string_data(self, offset):\n while offset < len(self._firmware):\n if self._firmware[offset] != '\\x00':\n start_address = offset\n end_address = offset\n while offset <= len(self._firmware):\n offset += 1\n if self._firmware[offset] == '\\x00':\n end_address = offset\n break\n data = self._firmware[start_address:end_address]\n return data, start_address, end_address\n else:\n offset += 1\n return None, None, None", "title": "" }, { "docid": "a65f195f14ca53f52701248219ea9cc5", "score": "0.4689868", "text": "def test_ma_validation_invalid_digit_verification():\n\n invalid_number = '120000387'\n assert ma.start(invalid_number) is False", "title": "" }, { "docid": "3942db08f9a8c80a532fcab047bc598f", "score": "0.46675974", "text": "def getRegEx(pattern):\n\t# Translate OSC-address syntax to python 're' syntax\n\tpattern = pattern.replace(\".\", r\"\\.\")\t\t# first, escape all '.'s in the pattern.\n\tpattern = pattern.replace(\"(\", r\"\\(\")\t\t# escape all '('s.\n\tpattern = pattern.replace(\")\", r\"\\)\")\t\t# escape all ')'s.\n\tpattern = pattern.replace(\"*\", r\".*\")\t\t# replace a '*' by '.*' (match 0 or more characters)\n\tpattern = pattern.translate(OSCtrans)\t\t# change '?' to '.' and '{,}' to '(|)'\n\n\treturn re.compile(pattern)", "title": "" }, { "docid": "1f9d34736184bc9e6e799415944e4a75", "score": "0.46664906", "text": "def get_next_valid_password(string):\n while not is_valid_password(string):\n string = get_next_password_candidate(string)\n return string", "title": "" }, { "docid": "a37db467ac4ec268416e10e4824f0edd", "score": "0.4664165", "text": "def prob2():\n return re.compile(r\"\\^\\{@\\}\\(\\?\\)\\[\\%\\]\\{\\.\\}\\(\\*\\)\\[\\_\\]\\{\\&\\}\\$\")", "title": "" }, { "docid": "8dc6f551f9f1a6dae18864d908b669a9", "score": "0.4658099", "text": "def find_genders(\n code: str,\n pattern: Pattern[str] = re.compile(r\"{{Pn\\|?w?}} ''([fm])[singvol ]*''\"),\n) -> List[str]:\n return uniq(pattern.findall(code))", "title": "" }, { "docid": "be4f716a023b8c8343622f85ada3e77e", "score": "0.4656044", "text": "def dna_rv_complement(pattern):\n project_d = {'A': 'T', 'C': 'G'}\n project_d.update({v: k for k, v in project_d.items()})\n rv_pattern = ''\n for i in range(-1, -len(pattern)-1, -1):\n rv_pattern = rv_pattern + project_d[pattern[i]]\n return rv_pattern", "title": "" }, { "docid": "55c4aa2071715d6f5b05d6eb4b0ae2c2", "score": "0.4646462", "text": "def readAmfString(self, stringWithoutMarker, noCache=False):\n \n if (self.verbose and stringWithoutMarker):\n print \"String(wm) \",\n \n ref = self.readUint29()\n \n if ref == None: return None\n if (ref & 1) == 0:\n if (self.verbose) : print \"Ref: %d\" %(ref>>1),\n return self.getString(ref >> 1);\n length = ref >> 1;\n if length == 0:\n return \"\"\n \n if (self.verbose) : print \" (%d) Len: %d\" % (len(self.stringList), (ref>>1)),\n s = self.readBytes(length)\n \n if not noCache: # for flash11 support\n self.addString(s)\n return s", "title": "" }, { "docid": "a851f2e2bef5f5311b16fd35eb903662", "score": "0.46457624", "text": "def test_pe_validation_valid_number_is_really_valid():\n\n valid_number = '032141840'\n assert pe.start(valid_number)", "title": "" }, { "docid": "8bc99a6df80a674b1f8f9bad9b6b9a76", "score": "0.46442527", "text": "def match_8(string):\n if re.search(r'.*[a]+[bc]+.*', string):\n return True\n else:\n return False", "title": "" }, { "docid": "795723ffb7750dec0bd5f8dd6d3fbe45", "score": "0.46436915", "text": "def test_UniqueNameSequence_StringInSequence_multi_char():\n g = control_flow_graph_generator.UniqueNameSequence('a')\n assert g.StringInSequence(26) == 'aa'\n assert g.StringInSequence(27) == 'ab'\n assert g.StringInSequence(28) == 'ac'", "title": "" }, { "docid": "0347dce40243a09bf50d63ae58ed087c", "score": "0.464305", "text": "def test_sp_validation_start_different_P_with_13_digits():\n\n invalid_number = 'U172030964897'\n assert sp.start(invalid_number) is False", "title": "" }, { "docid": "c0bf91484ac7e85245f1cdbeb1445858", "score": "0.46428016", "text": "def match_4(string):\n #Your Code Here\n pattern = ...\n\n #Do not edit following code\n prog = re.compile(pattern)\n return prog.search(string) is not None", "title": "" }, { "docid": "4a3315c13c2f285a0a2424a988f30a49", "score": "0.46423587", "text": "def testFoundAtEnd(self):\n seq = Seq(\"ACGT\")\n self.assertEqual([2], findPrimer(\"GT\", seq))", "title": "" }, { "docid": "89fa80f4b8360f574061ebb609443260", "score": "0.46396443", "text": "def test_valid_common(self):\n record = 'IPA0000000000000002AC00250165006000000000000066 SOCIETY MUSIC 61 0500061 1000061 10000'\n\n result = self.grammar.parseString(record)[0]\n\n self.assertEqual('IPA', result.record_type)\n self.assertEqual(0, result.transaction_sequence_n)\n self.assertEqual(2, result.record_sequence_n)\n self.assertEqual('AC', result.agreement_role_code)\n self.assertEqual(None, result.ipi_base_n)\n self.assertEqual(250165006, result.ipi_name_n)\n self.assertEqual('66', result.ip_n)\n self.assertEqual('SOCIETY MUSIC', result.ip_last_name)\n self.assertEqual(None, result.ip_writer_first_name)\n self.assertEqual(61, result.pr_society)\n self.assertEqual(50, result.pr_share)\n self.assertEqual(61, result.mr_society)\n self.assertEqual(100, result.mr_share)\n self.assertEqual(61, result.sr_society)\n self.assertEqual(100, result.sr_share)", "title": "" }, { "docid": "d7ef3cab8f9e6c770d9c7c40131acf01", "score": "0.4639484", "text": "def read_until_regexp(self, *expected):\n if not expected:\n raise RuntimeError('At least one pattern required')\n if self._is_valid_log_level(expected[-1]):\n loglevel = expected[-1]\n expected = expected[:-1]\n else:\n loglevel = None\n success, output = self._read_until_regexp(*expected)\n self._log(output, loglevel)\n if not success:\n expected = [exp if is_string(exp) else exp.pattern\n for exp in expected]\n raise NoMatchError(expected, self._timeout, output)\n return output", "title": "" }, { "docid": "bf786163ccfdc6a0c740df4f3e2132b4", "score": "0.46387336", "text": "def _parse_next_sequence_token_from_exception(self, ex):\n parsed_token = None\n if \"sequenceToken: \" in str(ex):\n # Format for DataAlreadyAcceptedExceptions\n parsed_token = str(ex).split(\"sequenceToken: \")[1]\n elif \"sequenceToken is: \" in str(ex):\n # Format for InvalidSequenceTokenException\n parsed_token = str(ex).split(\"sequenceToken is: \")[1]\n else:\n parsed_token = None\n\n if parsed_token == \"null\":\n # This happens when sending log events with a token to\n # a stream that doesn't expect a token.\n parsed_token = None\n return parsed_token", "title": "" }, { "docid": "eb0948fbade1aac0508a515310073cd2", "score": "0.46363676", "text": "def parseSerialNumber(self, output):\r\n if output:\r\n serialNumber = re.search('\\*SE\\s+([\\w \\-]+?)[\\r\\n]', output)\r\n if not serialNumber:\r\n raise ValueError, \"HMC not found\"\r\n return serialNumber.group(1).strip()", "title": "" }, { "docid": "a75dcbc8d353c12b1083d5b05653973c", "score": "0.46353972", "text": "def _parse_string(regex, source):\n return re.search(regex, source).group(1)", "title": "" }, { "docid": "d835e5d44060d9d578ce2e88ad80d60e", "score": "0.46347848", "text": "def regex():\n regex_list = []\n for command in ['DRIVE_CURRENT', 'DRIVE_VOLTAGE', 'TEMP_BEARING', 'TEMP_MOTOR', 'ROTATION_SPEED_ACTUAL']:\n regex_list.append(r'(?P<%s>00110%03d06\\d{9})\\r' % (command, getattr(InstrumentCommand, command)))\n return ''.join(regex_list)", "title": "" }, { "docid": "253fb4b89bb6a5234b275d0aceb267c7", "score": "0.46308357", "text": "def test_pa_validation_invalid_digit_verification():\n\n invalid_number = '159999990'\n assert pa.start(invalid_number) is False", "title": "" }, { "docid": "48ff8804fa2c38e9e7f26f4f20e1b125", "score": "0.46256134", "text": "def ttest_rn_validation_invalid_digit_verification():\n\n invalid_number = '200400405'\n assert rn.start(invalid_number) is False", "title": "" }, { "docid": "2f92341164c37a962efc6b783e303da9", "score": "0.46164072", "text": "def test_regex(self):\n return self.test_str.regex", "title": "" }, { "docid": "61de75d4701c2d45e3791a6839b788b4", "score": "0.4614663", "text": "def test_ba_validation_right_size_invalid_number_8_digits_and_second_digit_different_6_7_9():\n\n invalid_number = '12345665'\n assert ba.start(invalid_number) is False", "title": "" }, { "docid": "23c8958ada5b1a3447609ef125efbcb4", "score": "0.460868", "text": "def test_se_validation_invalid_digit_verification():\n\n invalid_number = '271234566'\n assert se.start(invalid_number) is False", "title": "" }, { "docid": "23469285f4d679d14e5db8ac97745fb3", "score": "0.46001017", "text": "def substring_checkc():\n nitrogenase = load_nitrogenase_seq()\n metagenome = load_metagenome()#So, this actually is a list of tubles with the name of a seqquence and then the sequence.\n \n for i in metagenome:\n print(i)\n pass", "title": "" }, { "docid": "f8294731bc283812355b5ab4cad0aab3", "score": "0.4597118", "text": "def getSyndrome(pattern):\n X22 = 1<<22 # vector representation of X^22\n X11 = 1<<11 # vector representation of X^11\n MASK12 = (1<<23)-(1<<11) # auxiliary vector for testing\n GENPOL = 0xc75 # generator polynomial, g(x) = x^11+x^10+x^6+x^5+x^4+x^2+1\n \n aux = X22\n if pattern >= X11:\n while pattern & MASK12: \n while not (aux & pattern): \n aux = aux >> 1\n pattern ^= (aux/X11) * GENPOL\n return pattern", "title": "" }, { "docid": "f507834c65173f01135182e61f1c97b5", "score": "0.45961547", "text": "def _readline(self, buffer: str) -> Tuple[str, str]:\n m = self._regex.match(buffer)\n\n if m is not None:\n start, end = m.span()\n return buffer[start:end], buffer[end:]\n else:\n return '', buffer", "title": "" }, { "docid": "7fd786f7de588716a997b5f129db9ead", "score": "0.45951673", "text": "def test_ap_validation_check_digit():\n\n invalid_number = '030123456'\n assert ap.start(invalid_number) is False", "title": "" }, { "docid": "6eac4983d035014939a1c88461051346", "score": "0.45933494", "text": "def next(self):\n next_token = self._next_token or get_next(self._string, self._idx, self._extended)\n\n self._idx = next_token['idx']\n self._next_token = False\n self.parse_string += str(next_token['token'].get('token'))\n\n return next_token['token']", "title": "" }, { "docid": "4235dbc20d1a996dedc7939ffb57b607", "score": "0.45932174", "text": "def search_next(self, pattern, case_sensitive=False, regexp=False):\n pass # implemented in Ada", "title": "" }, { "docid": "26a3cc514c3368ee07692bf3664ef518", "score": "0.45905387", "text": "def decode_nmea(self, nmea_str: str, ens_index: int, num_ens: int):\n\n gga_list = []\n vtg_list = []\n dbt_list = []\n hdt_list = []\n\n # Verify the NMEA string is good\n if Nmea.check_nmea_checksum(nmea_str):\n # Add each message to the list\n # Decode the data\n if 'gga' in nmea_str or 'GGA' in nmea_str:\n gga_list.append(nmea_str)\n #self.decode_gga(nmea_str, ens_index)\n #self.gga_index += 1\n if 'vtg' in nmea_str or 'VTG' in nmea_str:\n vtg_list.append(nmea_str)\n #self.decode_vtg(nmea_str, ens_index)\n #self.vtg_index += 1\n if 'dbt' in nmea_str or 'DBT' in nmea_str:\n dbt_list.append(nmea_str)\n #self.decode_dbt(nmea_str, ens_index)\n #self.dbt_index += 1\n if 'hdt' in nmea_str or 'HDT' in nmea_str:\n hdt_list.append(nmea_str)\n #self.decode_hdt(nmea_str, ens_index)\n #self.hdt_index += 1\n\n # Initialize the array based on the number of messages found\n empty_arr = RtbRowe.nans(len(gga_list))\n self.corr_qual[ens_index] = empty_arr", "title": "" }, { "docid": "292e47d6038d5ebf78f04233ec95df10", "score": "0.45903683", "text": "def fromPattern(pat: ghidra.app.plugin.processors.sleigh.pattern.DisjointPattern, minLen: int, context: bool) -> ghidra.app.plugin.assembler.sleigh.sem.AssemblyPatternBlock:\n ...", "title": "" }, { "docid": "998aa25ba4c1348f5524e377bb793baf", "score": "0.45897546", "text": "def test_ba_validation_right_size_invalid_number_9_digits_and_second_digit_different_6_7_9():\n\n invalid_number = '123456749'\n assert ba.start(invalid_number) is False", "title": "" }, { "docid": "0548c1fc66c5877d5461e611fb179516", "score": "0.45846367", "text": "def _st25_parse_nucl(self, sequence_lines, alphabet=generic_nucleotide):\r\n\r\n active_sequence_string = \"\"\r\n\r\n for line in sequence_lines:\r\n\r\n if \":\" in line:\r\n continue\r\n\r\n for char in line.strip():\r\n if char.lower() in self.allowed_nucl_char:\r\n active_sequence_string += char\r\n\r\n return Seq(active_sequence_string, alphabet=alphabet)", "title": "" }, { "docid": "427d3f003ba166d6c858d1e0a3283922", "score": "0.45833868", "text": "def until(self, token):\n if token == 'braces':\n pattern = re.compile(r'{|}')\n elif token == 'parens':\n pattern = re.compile(r'\\(|\\)')\n else:\n pattern, _ = self.patterns[token]\n\n m = pattern.search(self.string, self.pos)\n\n if m:\n scanned = m.group(0)\n self.advance(m)\n\n return self.string[self.lastpos:self.pos - 1], scanned\n else:\n rest = self.string[self.pos:]\n self.pos = len(self.string)\n\n return rest, ''", "title": "" } ]
79bf5f0140d5fc9c5aab83a7346697ae
Creates new task and adds it to self.tasks list.
[ { "docid": "58f29043119db2d1a970602d9f076cad", "score": "0.7127945", "text": "def add_task(self, name, deadline, description):\n self.tasks.append(Task(name, deadline, description))", "title": "" } ]
[ { "docid": "fff715377043acf3b789f9b33833fa82", "score": "0.78593135", "text": "def add_task(self, task):\n self.tasks.append(task)", "title": "" }, { "docid": "8cab1af511cd0fbcd1134dec97afbf29", "score": "0.78399205", "text": "def create_task(self, task):\n data = dict(task=task)\n path = '/{version}/tasks'.format(version=self.api_version)\n return super().post(path, data)", "title": "" }, { "docid": "fc9c887819e09608f1c21d3b5ff67345", "score": "0.7670519", "text": "def add_task(self, task):\r\n self._tasks.append(task)\r\n return task", "title": "" }, { "docid": "fc9c887819e09608f1c21d3b5ff67345", "score": "0.7670519", "text": "def add_task(self, task):\r\n self._tasks.append(task)\r\n return task", "title": "" }, { "docid": "fc9c887819e09608f1c21d3b5ff67345", "score": "0.7670519", "text": "def add_task(self, task):\r\n self._tasks.append(task)\r\n return task", "title": "" }, { "docid": "07abd8af30f3a87777aa30b753d71e13", "score": "0.7541821", "text": "def create_task(self, **kwargs):\n return self._create(_task.Task, **kwargs)", "title": "" }, { "docid": "ee758ecff632ce0d85af0c173f5936ff", "score": "0.7421763", "text": "def create_task(self, options=None):\n r = self._get('/task/new')\n task = Task(r.get('taskid'), options, self.addr) if r else None\n return task", "title": "" }, { "docid": "08dcebc7b5f7c13809294a75e1f782cd", "score": "0.74187815", "text": "def add_task(self, task):\n self.__tasks__.append(TaskEntry(task))\n self.logger.write(logging.INFO,\n \"A new task '{}' has been added\".format(task.name))", "title": "" }, { "docid": "05e47c121a633476e6aa43ae8439aadf", "score": "0.7348197", "text": "def create_task(self, task):\n response = self.session.put(f\"{self.base_url}/\", json=task.to_json())\n\n check_status(response, 200, \"Unable to create task\")\n\n return Task().from_json(response.json())", "title": "" }, { "docid": "2f061496b0518472e34c5b035d624683", "score": "0.73399925", "text": "def add_new_task(self, task_info):\n\n task_name = task_info['task_name']\n #task_description = task_info['task_description'] #TODO: Enable this\n task_input_directory = task_info['input_directory']\n task_output_directory = task_info['output_directory'] #TODO: Enable this\n project = task_info['project']\n #estimated_completion = task_info['estimated_completion']\n\n new_task = Tasklists(\n taskname=task_name,\n #task_description=None, # TODO: Enable this\n #task_output_directory=task_output_directory,\n project=project,\n task_input_directory=task_input_directory,\n task_output_directory=task_output_directory\n #estimated_completion=estimated_completion\n )\n\n self.session.add(new_task)\n self.session.commit()", "title": "" }, { "docid": "7967347c1a0aec6b6855d5ccfe8e20ed", "score": "0.72669715", "text": "def create_new_task(self):\n task_name = self.TaskLineEdit.text()\n project = self.ProjectComboBox.currentText()\n project_id = self.queries.get_project_id_by_name(project)\n blocks = self.BlocksComboBox.currentText()\n blocked_by = self.BlockedByComboBox.currentText()\n input_directory = self.InputDirectoryLineEdit.text()\n output_directory = self.OutputDirectoryLineEdit.text()\n #estimated_completion = self.EstimatedCompletionCalendar.selectedDate()\n\n if len(task_name) > 0 and len(project) > 0:\n new_task = {\n 'task_name': task_name,\n 'project': project,\n 'project_id': project_id,\n 'blocks': blocks,\n 'blocked_by': blocked_by,\n 'input_directory': input_directory,\n 'output_directory': output_directory,\n #'estimated_completion': estimated_completion\n }\n\n self.queries.add_new_task(new_task)", "title": "" }, { "docid": "d55732d3f5e9b16f9936ce489e7256a7", "score": "0.72003585", "text": "def create(self):\n self.desired_task_definition.create()\n self.from_aws(self.desired_task_definition.arn)", "title": "" }, { "docid": "8137bf4c8d60f374681f49c1f45b5408", "score": "0.71455806", "text": "def add_task(self, task: Task) -> None:\n if not self._task_exists(task.task_id):\n self._tasks[task.task_id] = task\n self._task_states[task.task_id] = TaskState.PENDING\n self._pending_queue.put_nowait(task)\n logging.info(f\"Task {task.task_id} was added to the registry\")\n else:\n with self._lock:\n if self._get_state(task.task_id) == TaskState.CANCELED:\n self._set_state(task.task_id, TaskState.RUNNING)", "title": "" }, { "docid": "951c76c4ee2730b41e5f0573107c19b9", "score": "0.7144233", "text": "def add_task(self, task):\n if not self.play.get('tasks'):\n self.play['tasks'] = []\n if isinstance(task, Task):\n self.play['tasks'].append(task.as_obj())\n elif isinstance(task, list):\n lst = [t.as_obj() for t in task]\n self.play['tasks'].extend(lst)", "title": "" }, { "docid": "ab6718491d4c8d2d1a20dda39be2985b", "score": "0.7131818", "text": "def insert(self, kwargs): # pylint: disable=no-self-use\n task = Tasks(**kwargs)\n DB_SESSION.add(task) # pylint: disable=maybe-no-member\n DB_SESSION.commit() # pylint: disable=maybe-no-member\n return task", "title": "" }, { "docid": "c29b906e1b87397890b2c7a38d6b218d", "score": "0.7131764", "text": "def test_create_task(self):\n self._create_task()", "title": "" }, { "docid": "351c4f7054d3a1ed53c1d49034254e41", "score": "0.711248", "text": "def add_new_tasks(self, tasks):\n \tself.tasks.extend(tasks)", "title": "" }, { "docid": "00d76b701bc626c0a9562dd12a1c009a", "score": "0.7066551", "text": "def create(self, task):\n with self._lock:\n task[\"id\"] = str(uuid.uuid4())\n task[\"status\"] = State.PENDING\n task[\"start\"] = time()\n task[\"count\"] = 0\n self._tasks[task[\"id\"]] = task\n self._payloads[task[\"id\"]] = []\n return task[\"id\"]", "title": "" }, { "docid": "9ae628e41b93ea8e295a7768c7744cbb", "score": "0.70598555", "text": "def addTask(ptask):", "title": "" }, { "docid": "72e4168d4a7c216a7b82bb5107dc0a30", "score": "0.7036343", "text": "def add(self, task):\n raise NotImplementedError()", "title": "" }, { "docid": "1fd5b819afaa87e693da33689adc37ce", "score": "0.70169646", "text": "def create_task(self):\n return self.app.post(\n 'add/',\n data=dict(\n name='Goto the bank',\n due_date='02/05/2014',\n priority='1',\n posted_date='02/04/2014',\n status=1\n ),\n follow_redirects=True\n )", "title": "" }, { "docid": "55fbad6a137e5fe846b7f632a7ece94b", "score": "0.70100033", "text": "def createTask(self):\n if os.path.isdir(self.taskRoot):\n raise TasksException(\"Task already exists\")\n os.makedirs(self.taskRoot + \"/docs\")\n os.makedirs(self.taskRoot + \"/logs\")\n self.config[\"STATUS\"] = \"Created\"\n self.config[\"CREATE_DATE\"] = str(datetime.datetime.now().strftime(\"%b %d %Y %H:%M\")) + \" \" + time.tzname[0]\n open(self.configFile, \"w\").write(json.dumps(self.config, indent=4))\n Tasks.refreshTasksData()\n return self", "title": "" }, { "docid": "19e2a95c6768e56e738e25c577f1be78", "score": "0.6992296", "text": "def add_task(self, func, *args, **kwargs):\n task = {\n 'module': func.__module__,\n 'name': func.__name__,\n 'args': args,\n 'kwargs': kwargs\n }\n self.tasks.append(task)\n self.save()", "title": "" }, { "docid": "02731e69fba50556db58b92bfb0e059a", "score": "0.69786096", "text": "def _add(self, task):\n self._lock.acquire()\n self._tasks[task._id] = task\n self._complete.clear()\n task._state = Pycos._Scheduled\n self._scheduled.add(task)\n if self._polling and len(self._scheduled) == 1:\n self._notifier.interrupt()\n self._lock.release()", "title": "" }, { "docid": "cf1084707502a776e1cd42715f382e74", "score": "0.6959324", "text": "def __new_task(self, task_id, prev_task, run_time, data, pipeline):\n parser = task_parser.Factory.from_id(task_id, prev_task, data, pipeline=pipeline)\n task = parser.create_new(data)\n self.__add_task(parser, task, run_time, data)", "title": "" }, { "docid": "09ff57017049a97c5690a01c5e048ecd", "score": "0.69218904", "text": "def add_task(body): # noqa: E501\n if connexion.request.is_json:\n body = Task.from_dict(connexion.request.get_json()) # noqa: E501\n return 'do some magic!'", "title": "" }, { "docid": "fb380af052002f507e30ffbfe118a8a0", "score": "0.68909115", "text": "def add_task(self, task):\n message = serialize(task)\n self._provider.add(self.name, message)", "title": "" }, { "docid": "33cf396600f8a7cca92016eb3c44ac31", "score": "0.68721807", "text": "def add(cls, task_title, task_description, status_id, tackyboard_id, deadline=None):\n\n new_task = Task(\n task_title=task_title,\n task_description=task_description,\n status_id=status_id,\n tackyboard_id=tackyboard_id,\n deadline=deadline,\n )\n\n try:\n db.session.add(new_task)\n db.session.commit() # use flush() to access the unique primary key `task_id`\n except Exception as e:\n return {\"error\": {\"Database add error\": e}}\n\n return new_task", "title": "" }, { "docid": "1466dd5682b21dfdd838df4194b13223", "score": "0.68613154", "text": "def add_task(self, task):\n __task_id = 0\n if (isinstance(task, Task)):\n __task_id = self._task_id_counter.next()\n task.set_id(__task_id)\n self._tasks[__task_id]=task\n return __task_id", "title": "" }, { "docid": "e1ddff476805d97bea161b74c843dd30", "score": "0.683223", "text": "def add_task(workflow_id, task_id):\n\treturn _workflows.add_task(_workflows.get_or_404(workflow_id), _tasks.get_or_404(task_id))", "title": "" }, { "docid": "639f3ee008e2d0866485293dbaafe7cb", "score": "0.6821208", "text": "def add_task(self, task=None):\n if type(task) != Task:\n raise TypeError(\"Object type is not Task\")\n\n task.key = self.__storage.get_free_key_task()\n\n if task.key:\n if task.parent:\n parent_task = self.__storage.get_task(task.parent)\n if not parent_task:\n self.error_key_task(task.parent)\n else:\n parent_task.subtasks.append(task.key)\n task.parent = parent_task.key\n task.host = parent_task.host\n task.admins = parent_task.admins.copy()\n for admin in task.admins:\n self.__storage.save_message(admin, \"Admin {0} was added to task N{1}\".format(admin, task.key))\n Logger.get_logger().debug(\"Task N{0} was added how subtask to task N{1}\".format(task.key,\n parent_task.key))\n self.__storage.save_task(parent_task)\n\n self.__storage.save_task(task)\n self.__storage.save_message(task.host, \"User {0} was added how host to task N{1}\".format(task.host, task.key))\n Logger.get_logger().debug(\"Task N{0} was added to self.__storage\".format(task.key))\n return task.key\n else:\n Logger.get_logger().error(\"The free number for the task was not found\")\n raise Exception(\"The task limit is exceeded\")", "title": "" }, { "docid": "90ca0c08ac30dd0f666f5c0d2b7fb38f", "score": "0.6816578", "text": "def add_running_task(task):\n TaskRunner.tasks.append(task)\n log.debug(TaskRunner.tasks)", "title": "" }, { "docid": "b2235a76c48e536d75b3f291881a6cbf", "score": "0.6815958", "text": "def create_task(self, backend):\n pass", "title": "" }, { "docid": "8674b64c11ea68e5fdc8cd55eaaed20d", "score": "0.6799532", "text": "def __add_task(self, parser, task, run_time, data):\n time = run_time + parser.time()\n data.game.tasks.insert(time, task)", "title": "" }, { "docid": "429832e7dd13ab0d78176ebc5ce35b4e", "score": "0.6773638", "text": "def add_task(self, host, port, task):\n server = self.servers[(host, port)]\n server[\"tasks\"].append(task)", "title": "" }, { "docid": "e6c9b537300ed514612e8474979cc8f4", "score": "0.6749381", "text": "def _save_task(self):\n self._new_task = self._project.create_task()\n self._new_task.name = self._entry_task_name.get()\n self._new_task.notes = self._entry_task_notes.get(\"1.0\", \"end-1c\")\n self._new_task.priority = self._lb_priority.get(\"active\")\n self.update_tasks()\n self._disable_bttns()\n self.task_window.destroy()", "title": "" }, { "docid": "5a1f55298fd0d5c9cb998bfccabee018", "score": "0.67387116", "text": "def add_task(self, name, deps, actions, slots_required):\n new_task = ParallelTask(self.alloc_task_uuid(), name, deps, actions, slots_required)\n for dep_task in deps:\n self.task_insts[dep_task].add_successor(new_task)\n\n self.task_insts[new_task.uuid] = new_task\n self.pending_tasks[new_task.uuid] = new_task\n\n self.unallocated_tasks_available = True\n \n logger.debug(f\"Added Task {new_task.uuid}: deps={deps}, actions={actions}, slots_required={slots_required}\")\n return new_task.uuid\n\n # See if we have spare runners", "title": "" }, { "docid": "fa298c9db560ae523f89de45abc965ed", "score": "0.6736201", "text": "def test_ops_v1_tasks_create(self):\n pass", "title": "" }, { "docid": "6b33cf7b5aca4bc72a49721c5483186c", "score": "0.6729903", "text": "def add_task(self, func, *args, **kargs):\n\t\tself.tasks.put((func, args, kargs))", "title": "" }, { "docid": "6b33cf7b5aca4bc72a49721c5483186c", "score": "0.6729903", "text": "def add_task(self, func, *args, **kargs):\n\t\tself.tasks.put((func, args, kargs))", "title": "" }, { "docid": "d806cc85becdf121e27eb211a7aaa7f9", "score": "0.6727944", "text": "def new(self, task):\n self.all().update({task.id: task})", "title": "" }, { "docid": "37a5f59ad58af17f2347a776264efad1", "score": "0.6711736", "text": "def add_a_task(self, task_name, task_content):\n if task_name == TPEnum.URL_FETCH:\n self.client.push()\n elif task_name == TPEnum.HTM_PARSE:\n self._parse_queue.put(task_content, block=True)\n self.update_number_dict(TPEnum.HTM_NOT_PARSE, +1)\n elif task_name == TPEnum.ITEM_SAVE:\n self._save_queue.put(task_content, block=True)\n self.update_number_dict(TPEnum.ITEM_NOT_SAVE, +1)\n else:\n logging.error(\"%s add_a_task error: parameter task_name[%s] is invalid\", self.__class__.__name__, task_name)\n exit()\n return", "title": "" }, { "docid": "97a87f14fae48af5505c9c862ca6b162", "score": "0.67085445", "text": "def new_task(tt):\n if not isinstance(tt, TaskType):\n raise Exception(\"Illegal tt class: not TaskType\")\n\n t = Task()\n t.creation_time = datetime.utcnow()\n t.modification_time = datetime.utcnow()\n t.status = 'defined'\n t.task_type = tt\n return t", "title": "" }, { "docid": "9a723f7dec98ebecf0bae0f58e266fa3", "score": "0.6698205", "text": "def create(self, *args, **kwargs):\n raise NotImplementedError(\n f\"Current {self._type} task does not support 'create' operation!\"\n )", "title": "" }, { "docid": "6fbfc602f6fa3b5083b92adf65189431", "score": "0.6687505", "text": "def _create_task(self, name):\n new_sg_task = self._sg.create(\n \"Task\",\n {\"content\": name, \"sg_sync_in_jira\": True, \"project\": self._sg_project},\n )\n jira_key = self._get_jira_key(new_sg_task)\n return new_sg_task, jira_key", "title": "" }, { "docid": "7819353d64210340c5fac960fac0d9ef", "score": "0.6675913", "text": "def post(self, id):\n payload = marshal(api.payload, task_request)\n tasks_service.create_task(id, payload)\n return {'status': \"Task created successfully\"}", "title": "" }, { "docid": "1588973cd13aaef0ad3e91a4eb8b1d0d", "score": "0.6674868", "text": "def add_task(self, func, *args, **kwargs):\n task = {\"func\": func, \"args\": args, \"kwargs\": kwargs, \"status\": \"waiting\"}\n _id = len(self.tasks)\n if self.log_path is not None:\n task[\"log_path\"] = os.path.join(self.log_path, \"task-{}.log\".format(_id))\n else:\n task[\"log_path\"] = None\n self.tasks[_id] = task\n self.waiting.put(_id)", "title": "" }, { "docid": "7e8a41f0acbc007601ed1d286ad19f92", "score": "0.6661255", "text": "def add_task(self, func, *args, **kargs):\n self.tasks.put((func, args, kargs))", "title": "" }, { "docid": "7e8a41f0acbc007601ed1d286ad19f92", "score": "0.6661255", "text": "def add_task(self, func, *args, **kargs):\n self.tasks.put((func, args, kargs))", "title": "" }, { "docid": "7e8a41f0acbc007601ed1d286ad19f92", "score": "0.6661255", "text": "def add_task(self, func, *args, **kargs):\n self.tasks.put((func, args, kargs))", "title": "" }, { "docid": "7e8a41f0acbc007601ed1d286ad19f92", "score": "0.6661255", "text": "def add_task(self, func, *args, **kargs):\n self.tasks.put((func, args, kargs))", "title": "" }, { "docid": "7e8a41f0acbc007601ed1d286ad19f92", "score": "0.6661255", "text": "def add_task(self, func, *args, **kargs):\n self.tasks.put((func, args, kargs))", "title": "" }, { "docid": "7e8a41f0acbc007601ed1d286ad19f92", "score": "0.6661255", "text": "def add_task(self, func, *args, **kargs):\n self.tasks.put((func, args, kargs))", "title": "" }, { "docid": "7e8a41f0acbc007601ed1d286ad19f92", "score": "0.6661255", "text": "def add_task(self, func, *args, **kargs):\n self.tasks.put((func, args, kargs))", "title": "" }, { "docid": "7e8a41f0acbc007601ed1d286ad19f92", "score": "0.6661255", "text": "def add_task(self, func, *args, **kargs):\n self.tasks.put((func, args, kargs))", "title": "" }, { "docid": "d59874e167a2f3ce4139e8bec7001c12", "score": "0.6650333", "text": "def create(cls, task_id, **options):\n return cls(task_id, **options)", "title": "" }, { "docid": "628f596a8eec6313e305f85d5dbd48ba", "score": "0.66447455", "text": "def make_tasks(self):\n raise NotImplementedError()", "title": "" }, { "docid": "3f1a1545f7200a7b6dd4240d84c0e07e", "score": "0.6633358", "text": "def add_task(request):\n if request.method == 'POST':\n form = forms.CreateTaskForm(data=request.POST, user=request.user)\n if form.is_valid():\n name, priority, whitelist, blacklist, max_links, expire, mime_type, start_links = \\\n [form.cleaned_data[x] for x in ['name', 'priority', 'whitelist', 'blacklist',\n 'max_links', 'expire', 'mime_type', 'start_links']]\n\n Task.objects.create_task(request.user, name, priority, expire, start_links, whitelist, blacklist, max_links, mime_type)\n messages.success(request, 'New task created.')\n return redirect('list_tasks')\n else:\n form = forms.CreateTaskForm(user=request.user)\n return render(request, 'tasks/add.html', {'form': form})", "title": "" }, { "docid": "67f75e5d828a3439f285f070cdcaa127", "score": "0.6614777", "text": "async def new(self, ctx, *, content: str):\n author_data = await ctx.author_data\n await author_data.add_task(content)\n await ctx.send(\"Successfully added task\")", "title": "" }, { "docid": "e5bdf38f3c2c44bb9184212fc896dcff", "score": "0.66058224", "text": "def Add(self, request, now):\n\n if self._LocateTaskByName(request.task_name()) is not None:\n raise apiproxy_errors.ApplicationError(\n taskqueue_service_pb.TaskQueueServiceError.TASK_ALREADY_EXISTS)\n if request.task_name() in self.task_name_archive:\n raise apiproxy_errors.ApplicationError(\n taskqueue_service_pb.TaskQueueServiceError.TOMBSTONED_TASK)\n\n now_sec = calendar.timegm(now.utctimetuple())\n task = taskqueue_service_pb.TaskQueueQueryTasksResponse_Task()\n task.set_task_name(request.task_name())\n task.set_eta_usec(request.eta_usec())\n task.set_creation_time_usec(_SecToUsec(now_sec))\n task.set_retry_count(0)\n task.set_method(request.method())\n\n if request.has_url():\n task.set_url(request.url())\n for keyvalue in request.header_list():\n header = task.add_header()\n header.set_key(keyvalue.key())\n header.set_value(keyvalue.value())\n if request.has_description():\n task.set_description(request.description())\n if request.has_body():\n task.set_body(request.body())\n if request.has_crontimetable():\n task.mutable_crontimetable().set_schedule(\n request.crontimetable().schedule())\n task.mutable_crontimetable().set_timezone(\n request.crontimetable().timezone())\n if request.has_retry_parameters():\n task.mutable_retry_parameters().CopyFrom(request.retry_parameters())\n if request.has_tag():\n task.set_tag(request.tag())\n self._InsertTask(task)", "title": "" }, { "docid": "3bfa252ca63f54de9e9a7f827c3aaa17", "score": "0.65635407", "text": "def add_task(self, func, *args, **kargs):\n self.logger.info(\"Adding a task: %s to the Queue %s. Currently in Queue: %s\" % (\n func, id(self.tasks), self.get_queue_length()))\n\n self.tasks.put((func, args, kargs))", "title": "" }, { "docid": "d1f963991405efacd6703f023ca1a212", "score": "0.65578514", "text": "def add_task(self, task):\n assert isinstance(task, Pipeline)\n self.tasks.append(task)\n return self", "title": "" }, { "docid": "f54a035c0249adeb133ae29752b7e0f6", "score": "0.6553048", "text": "def add_task(task, **flags):", "title": "" }, { "docid": "1763ac00d607333fc9a60ec19071dff0", "score": "0.65426207", "text": "def register_task_definition(self):\n if not self.active_task_definition:\n self.desired_task_definition.create()\n self.from_aws()", "title": "" }, { "docid": "6bdac086bf75fb56f7d7d0e728760cb0", "score": "0.6535785", "text": "def add_task(self, text=None, date=None, interval=None, done=False):\n tasks = self.get_tasks()\n tasks.append({\n 'text': text,\n 'date': date,\n 'interval': interval,\n 'done': done\n })\n self.save_tasks(tasks)", "title": "" }, { "docid": "fc74f9ea7673eb58b2432f7bde558c12", "score": "0.65277237", "text": "def insert_task(self, taskdict):\n # Decrease task_ix so we go back to the current task when this is done.\n if not self.on_temporary_task:\n self.task_ix -= 1\n self.on_temporary_task = True\n self.active_task = self._make_task(taskdict)\n self.active_task.start()\n self.log('info', \"Running intermediate task: {}\".format(taskdict['kind']))", "title": "" }, { "docid": "e46f4167c94cfa4461bc90f2f5380d86", "score": "0.65116864", "text": "def _add_task(self, task):\n if task.name in self._G:\n raise ValueError('DAGs cannot have Tasks with repeated names, '\n 'there is a Task with name \"{}\" '\n 'already'.format(task.name))\n\n if task.name is not None:\n self._G.add_node(task.name, task=task)\n else:\n raise ValueError('Tasks must have a name, got None')", "title": "" }, { "docid": "9fe72840a7bd7278d693356258956979", "score": "0.64853436", "text": "def add_task(self,task,requires=(),**kws):\n self._tasks[task.id()] = (task,kws)\n for req in requires:\n task.requires(req)\n if req.id() not in self.task_list():\n self.add_task(req,())\n return task", "title": "" }, { "docid": "130eab258e4567cbb8ff2eff51437f20", "score": "0.6478391", "text": "def add(self, task, flush=True):\n newid = self.store.save(task)\n self.tasks[newid] = task\n if flush:\n self.flush()\n return newid", "title": "" }, { "docid": "d7f839fd1aa8f47ad46291b5cd98c679", "score": "0.64593595", "text": "def create_task():\n\n\t# Get form data\n\ttask_data = {\n\t\t'user_id': g.user.id,\n\t\t'description': request.form.get('description'),\n\t\t'due_date': request.form.get('due_date'),\n\t\t'priority': request.form.get('priority')\n\t}\n\n\t# Input validation\n\t# Check if any field is null\n\trequired = []\n\tfor key, value in task_data.iteritems():\n\t\tif not value:\n\t\t\trequired.append(key)\n\n\tif required: # If any field is null\n\t\treturn error_handlers.bad_request( (\",\").join(required) + \" required\")\n\t\n\telse: # Input valid\n\n\t\tnew_task_id = tasks.create_task(task_data)\n\t\tresponse={}\n\t\t\n\t\tif new_task_id:\n\t\t\tlinks = '%s/%s' % (request.url, new_task_id)\n\t\t\tresponse['data'] = {\n\t\t\t\t\t'id' : new_task_id,\n\t\t\t 'type': 'tasks',\n\t\t\t 'attributes': task_data,\n\t\t\t 'links' : {\n\t\t\t \t'self': links\n\t\t\t }\n\t\t\t}\n\t\t\tstatus = 201\n\t\telse:\n\t\t\tstatus = 202\n\n\t\tresp = jsonify(response)\n\n\t\t# Header JSONAPI\n\t\tresp.mimetype = 'application/vnd.api+json' \n\t\tresp.location = links\n\t\tresp.status_code = status\n\n\t\treturn resp", "title": "" }, { "docid": "fc4f80101d46da673e780c60ef43ea08", "score": "0.64589447", "text": "def create_task():\n html_code_dict = {'Created': 201, 'Bad_Request': 400} # Maps messages to HTML codes\n\n if not request.json or 'title' not in request.json:\n abort(html_code_dict['Bad_Request'])\n task = {\n 'id': TASKS[-1]['id'] + 1,\n 'title': request.json['title'],\n 'description': request.json.get('description', \"\"),\n 'done': False\n }\n TASKS.append(task)\n\n # Write task to file\n task_nbr = task['id']\n file_path = PurePath(os.environ['TASK_PATH'], f\"T{task_nbr}.txt\")\n with open(file_path, 'w') as taskfile:\n taskfile.write(f'id = {task[\"id\"]}\\n')\n taskfile.write(f\"title = {task['title']}\\n\")\n taskfile.write(f\"Description = {task['description']}\\n\")\n taskfile.write(f\"Done = {task['done']}\\n\")\n return ff.convert_json({'task': task}), html_code_dict['Created']", "title": "" }, { "docid": "d632e74a041f66e3de2e4a21babda638", "score": "0.64474773", "text": "def create_task(request):\n json_data = json.loads(request.raw_post_data)\n if 'task' in json_data:\n task_string = json_data['task']\n categories = utils.get_categories(task_string)\n task = Task.objects.create(\n user=request.user, task=task_string,\n is_complete=False, created=datetime.today())\n task.save()\n for category in categories:\n # Add category if it doesn't exist\n obj, created = Category.objects.get_or_create(name=category)\n task.categories.add(obj)\n\n return HttpResponse(serializers.serialize('json', [task]))", "title": "" }, { "docid": "dadf07a4d1c2a30cc060dab55fbbd31f", "score": "0.64233404", "text": "def add_task(request):\n user = request.user\n description = request.POST.get('description')\n new_task = Task(owner=user, description=description)\n new_task.save()\n return HttpResponse(serializers.serialize('json', [new_task]))", "title": "" }, { "docid": "97f5085736315b3a13bd89b57c7980be", "score": "0.6419718", "text": "async def add(self, ctx, *, task: str):\n async with self.config.user(ctx.author).todos() as todos:\n todo_id = len(todos)\n todos.append([ctx.message.jump_url, task]) # using a list to support future todo edit\n await ctx.send(f\"Your todo has been added successfully with the id: **{todo_id}**\")", "title": "" }, { "docid": "e305be817b7067933828f3a37e8193f9", "score": "0.64025146", "text": "def add_startup_task(self, task: TaskType) -> None:\n self.settings.startup_tasks.append(task)", "title": "" }, { "docid": "d7a98169008cdec64bf48220127f0ea4", "score": "0.6401097", "text": "def _test_create_task(self):\n # Create a task and make sure it gets synced across\n self._sg_task, self._jira_key = self._create_task(\"Test\")\n print(\n \"Test Issue can be found at {0}/browse/{1}\".format(\n os.environ[\"SGJIRA_JIRA_SITE\"], self._jira_key\n )\n )", "title": "" }, { "docid": "42c1a36d1f8668b5f60dd43e73388a32", "score": "0.63893974", "text": "def insert_task(self, task: Task) -> None:\n with self.lock:\n self.__remove_finished_task()\n if not any(task.task_type == each.task_type for each in self.q):\n self.q.append(task)", "title": "" }, { "docid": "4b3c7880324b2745f0286a85df390edf", "score": "0.6384241", "text": "def add_task(self, task):\n if not self._valid_name(task.name):\n raise ValueError(task.name)\n\n self._tasks[task.name] = task\n\n incomplete_dependencies = set()\n\n for dependency in task.dependencies:\n if not self._valid_name(dependency) or dependency in self._failed:\n # there may already be tasks dependent on this one.\n self._cascade_failure(task.name)\n\n break\n\n if dependency not in self._completed:\n incomplete_dependencies.add(dependency)\n else: # task hasn't failed\n try:\n self._graph.add(task.name, incomplete_dependencies)\n except ValueError:\n self._cascade_failure(task.name)", "title": "" }, { "docid": "6eabfeffc3cf0e7460ad746c015a6feb", "score": "0.6382066", "text": "def _InsertTask(self, task):\n assert self._lock.locked()\n eta = task.eta_usec()\n name = task.task_name()\n bisect.insort_left(self._sorted_by_eta, (eta, name, task))\n if task.has_tag():\n bisect.insort_left(self._sorted_by_tag, (task.tag(), eta, name, task))\n bisect.insort_left(self._sorted_by_name, (name, task))\n self.task_name_archive.add(name)", "title": "" }, { "docid": "c53f4e2d5b8633bf95a05b13a1f1dfaf", "score": "0.6376573", "text": "def add_task(request):\n data = request.DATA\n user = request.user\n try:\n task = Task.objects.create_task(user=user, name=data['name'], priority=int(data['priority']),\n expire=data['expire'], start_links=data['start_links'],\n mime_type=data['mime_type'], whitelist=data['whitelist'],\n blacklist=data['blacklist'], max_links=int(data['max_links']))\n except KeyError as e:\n return Response(e.message, status=status.HTTP_400_BAD_REQUEST)\n except (QuotaException, ValidationError) as e:\n return Response(e.message, status=status.HTTP_412_PRECONDITION_FAILED)\n except Exception as e:\n return Response(e.message, status=status.HTTP_400_BAD_REQUEST)\n return Response({'id': task.id}, status=status.HTTP_201_CREATED)", "title": "" }, { "docid": "8e268a4af789e072e87619e382207785", "score": "0.6360413", "text": "def add(self, task: Coroutine) -> None:\n self.queue.append(task)", "title": "" }, { "docid": "0f96f22865a4f4afe40eea6289934f4d", "score": "0.63523626", "text": "def add_task(self, command, *args, channel=None, **kwargs):\n if channel is None:\n channel = self.default_user['im_id']\n\n if command in self.task_commands:\n self.slack.chat.post_message(\n text='Added task \"{}\"'.format(command),\n channel=channel)\n func = self.task_commands[command]\n self.tasks.append(partial(func, *args, channel=channel, **kwargs))\n else:\n self.slack.chat.post_message(\n text='Task command {} not understood'.format(command),\n channel=channel)", "title": "" }, { "docid": "15069e3d774104985a32400ea11ddf60", "score": "0.633236", "text": "def test_create_task(self):\n pass", "title": "" }, { "docid": "29dae2605a2018a65c0c621d55934406", "score": "0.63215303", "text": "def create_task(self, path, prefix, frames, ext, count, status=''):\n\n\t\t# Check if matching item already exists\n\t\tfor item in self.tasks:\n\t\t\tif item['path'] == path \\\n\t\t\tand item['prefix'] == prefix \\\n\t\t\tand item['ext'] == ext:\n\t\t\t\tif item['frames'] == frames:\n\t\t\t\t\tverbose.detail(\"Task item already exists.\")\n\t\t\t\telse:\n\t\t\t\t\tverbose.detail(\"Task item already exists but frame ranges differ. Updating item with new frame range.\")\n\t\t\t\t\titem['frames'] = frames\n\t\t\t\t\titem['count'] = count\n\t\t\t\treturn\n\n\t\t# Create new item\n\t\tnew_item = {\n\t\t\t'path': path, \n\t\t\t'prefix': prefix, \n\t\t\t'frames': frames, \n\t\t\t'ext': ext, \n\t\t\t'count': count, \n\t\t\t'status': status, \n\t\t}\n\t\tself.tasks.append(new_item)", "title": "" }, { "docid": "d7a7e071c1714d28f122212c8b4d39df", "score": "0.63176304", "text": "def new_task(self, context, payload):\n\n return SalesforceApi.get_task_data(payload['new'][0])", "title": "" }, { "docid": "c8f9c62a31c1204182ee26b0d6548709", "score": "0.63130516", "text": "def test_api_create_task(self):\n self.client.force_login(user=self.user)\n response = self.client.post(\n '/tasks/',\n data={'name': \"Test task\"},\n format='json',\n HTTP_AUTHORIZATION=self.token,\n )\n\n self.assertEqual(response.status_code, status.HTTP_201_CREATED)", "title": "" }, { "docid": "12165156fcb71dfa994c3bcc284d6137", "score": "0.6291682", "text": "def setup_task(self):\n\n # Get the schedule from the period\n schedule = schedules[self.period]\n\n # Schedule. We may have an interval schedule or a crontab schedule\n if isinstance(schedule, cm.IntervalSchedule):\n # Get or create the schedule\n schedule, created = cm.IntervalSchedule.objects.get_or_create(every=schedule.every, period=schedule.period)\n schedule.save()\n\n # Create the task\n self.task = cm.PeriodicTask.objects.create(\n name=self.task_name,\n task='retrieve_prices',\n interval=schedule,\n args=json.dumps([self.id])\n )\n elif isinstance(schedule, cm.CrontabSchedule):\n # Get or create the schedule\n schedule, created = cm.CrontabSchedule.objects.get_or_create(hour=schedule.hour, minute=schedule.minute,\n day_of_week=schedule.day_of_week,\n day_of_month=schedule.day_of_month)\n schedule.save()\n\n # Create the task\n self.task = cm.PeriodicTask.objects.create(\n name=self.task_name,\n task='retrieve_prices',\n crontab=schedule,\n args=json.dumps([self.id])\n )\n\n self.save()", "title": "" }, { "docid": "9c8991a9de4ca8126fa36ef87620426b", "score": "0.62868047", "text": "def append_task(self, taskname, task, since):\n if task.running and task.timeout > 0:\n return\n if not task.enabled:\n return\n taskinfo = TaskInfo(taskname, (SYSTEM_TASK if task.isinternal else REGULAR_TASK),\n task.get_schedule_time(since), task.priority)\n\n # for system tasks and tasks-without-timeout-set, task is appended again\n # in run_*_task() method, for tasks-with-timeout-set, a timeout task\n # with same name is appended in run_task()\n if taskinfo in self.queue:\n return\n self.queue.put(taskinfo)\n event_manager = component.get(\"EventManager\")\n event_manager.emit(mirror.event.TaskEnqueueEvent(taskname))", "title": "" }, { "docid": "e9d0e4c96d9f3f7b9a28a006c0314557", "score": "0.62827045", "text": "def add_task(self, func, *args, **kargs):\n self.tasks.put((func, args, kargs))", "title": "" }, { "docid": "e9d0e4c96d9f3f7b9a28a006c0314557", "score": "0.62827045", "text": "def add_task(self, func, *args, **kargs):\n self.tasks.put((func, args, kargs))", "title": "" }, { "docid": "3e62f066f01b0d9d2aa0da4a11cf62ed", "score": "0.62584263", "text": "def add_task(token, description, complete):\n new_task = {\"description\": description, \"complete\": complete}\n\n add_request = requests.post(SERVER_URL + \"/task\", headers=_gen_oauth_header(token), json=new_task)\n\n return add_request.json()", "title": "" }, { "docid": "2f535119dad77b34f32d94e9b7131b14", "score": "0.6257844", "text": "def add_task(self, task, priority=0):\n if len(self.pq) == self.length:\n self.pop_task()\n count = next(self.counter)\n entry = [- priority, count, task]\n self.entry_finder[task] = entry\n heapq.heappush(self.pq, entry)", "title": "" }, { "docid": "c0fde43fc5afc74dae00f86baf316e70", "score": "0.6252775", "text": "def create_task(self, e_id, task_type, floor=None, floor_from=None, \n\t\tfloor_to=None, count=None, specific_index=None):\n\t\t# two formats of task keys\n\t\tif task_type in ['hold', 'open door', 'close door']:\n\t\t\t# stationary tasks\n\t\t\tkey = (task_type, floor, round(self.env.now, 1))\n\t\telse:\n\t\t\t# move tasks\n\t\t\tif floor_from == floor_to:\n\t\t\t\traise ValueError(\"Floor from and to should not be same\")\n\n\t\t\tkey = (task_type, floor_from, floor_to, round(self.env.now, 1))\n\n\t\t# add key to list of task keys\n\t\tif specific_index is None:\n\t\t\tself.elevators[e_id].task_keys.append(key)\n\t\t\n\t\telse:\n\t\t\tself.elevators[e_id].task_keys.insert(specific_index, key)\n\n\t\t# add key, Task Object pair to task dictionary\n\t\ttry:\n\t\t\tself.elevators[e_id].tasks[key] = \\\n\t\t\t\tTask(self.elevators[e_id], task_type, floor, \n\t\t\t\t\tfloor_from, floor_to, count)\n\t\texcept ValueError as err:\n\t\t\tprint(f'Error occured creating task {task_type},'+\\\n\t\t\tf'floor {floor}, floor_from {floor_from} floor_to {floor_to}')", "title": "" }, { "docid": "8b12637e08e52f9a8a35a78eff8b86fd", "score": "0.6252192", "text": "def _create_task(self, coroutine: Coroutine[Any, Any, None]) -> None:\n task = self.loop.create_task(coroutine)\n task.add_done_callback(lambda f: self._active_events.remove(f))\n self._active_events.add(task)", "title": "" }, { "docid": "48f22e168957ecbdd7152a7d315014d6", "score": "0.624356", "text": "def create_task(self, task_struct: dict) -> str:\n return requests.post(self._url_prefix + 'task/', json=task_struct).json()['payload']['task_id']", "title": "" }, { "docid": "40ab0270c40c1ec0c2ed39c2cc7dbb6d", "score": "0.62312853", "text": "def append_tasks(self):\n now = time.time()\n for taskname in self.tasks:\n task = self.tasks[taskname]\n self.append_task(taskname, task, since = now)", "title": "" }, { "docid": "de9de657951a8f16fcc2bcb435e0ea99", "score": "0.6217577", "text": "def insertNewTask(task_form):\n counter = 0\n task_name = task_form.get('task_name')\n for trans in task_form.get('feat_sel'):\n for estim in task_form.get('estimator'):\n task_config = {}\n task_config['proj_name'] = task_form['proj_name']\n task_config['train_data'] = task_form['train_data']\n task_config['enable_test'] = task_form['enable_test']\n task_config['test_data'] = task_form['test_data']\n task_config['label'] = task_form['label']\n task_config['feat_sel'] = trans\n task_config['estimator'] = estim\n task_config['cv_type'] = task_form['cv_type']\n task_form['task_config'] = task_config\n \n task_form['task_id'] = 'TASK' + time.strftime('%y%m%d%H%M%S') + '{:02d}'.format(counter)\n\n model_abbrs = {\n 'Analysis of Variance': 'anova',\n 'Principal Component Analysis': 'pca',\n 'Recursive Feature Elimination': 'rfe',\n 'None': 'none',\n 'Support Vector Machine': 'svm',\n 'Random Forest': 'rf',\n 'Linear Discriminative Analysis': 'lda',\n 'Logistic Regression': 'lr',\n 'K Nearest Neighbor': 'knn',\n 'Support Vector Regression': 'svr',\n 'Elastic Net': 'en',\n 'Ordinary Least Square': 'ols',\n 'Lasso Regression': 'lasso',\n 'Ridge Regression': 'ridge'\n }\n if task_name:\n task_form['task_name'] = task_name + '_' + model_abbrs[trans] + '_' + model_abbrs[estim]\n else:\n task_form['task_name'] = task_form['task_id'] + '_' + model_abbrs[trans] + '_' + model_abbrs[estim]\n\n taskDao = TaskDao()\n taskDao.insertNewTask(task_form)\n\n new_ml_celery_task.delay(\n taskid=task_form['task_id'],\n projid=task_form['proj_id'],\n tasktype=task_form['task_type'],\n traindata=task_config['train_data'],\n enabletest=task_config['enable_test'],\n testdata=task_config['test_data'],\n label=task_config['label'],\n featsel=task_config['feat_sel'],\n estimator=task_config['estimator'],\n cv=task_config['cv_type']\n )\n\n counter += 1\n \n return", "title": "" }, { "docid": "d994cc70232d8358946817de78f0fba8", "score": "0.62091035", "text": "def task(self, f):\n self._tasks.add(f)\n return f", "title": "" }, { "docid": "93fb2f6a6dba7ded6a73aa7b5ef8d4d1", "score": "0.6207871", "text": "def create_task(self, task):\n transformer = TaskRabbitTaskTransformer()\n transformer.set_task(task)\n raw_task_dict = transformer.get_raw_task()\n\n # TODO: do this check in the base class\n raw_task_dict.get(TASK_RABBIT_FIELD.TASK).pop(FIELD.ID)\n\n new_raw_task_dict = self._post(\n TASK_RABBIT.PROTOCOL,\n TASK_RABBIT.DOMAIN,\n TASK_RABBIT.TASKS_PATH,\n raw_task_dict)\n\n # now update private description with TR's task id\n tr_id = new_raw_task_dict.get(FIELD.ID)\n path = unicode(\"{}/{}\").format(TASK_RABBIT.TASKS_PATH, tr_id)\n blurb = TaskRabbitTaskTransformer.get_jackalope_blurb(\n TASK_RABBIT.VENDOR_IN_HTML,\n tr_id)\n private_description = unicode(\"{}\\n{}\").format(\n new_raw_task_dict.get(FIELD.PRIVATE_DESCRIPTION),\n blurb)\n\n updated_fields_dict = {FIELD.PRIVATE_DESCRIPTION: private_description}\n new_raw_task_dict = self._put(\n TASK_RABBIT.PROTOCOL,\n TASK_RABBIT.DOMAIN,\n path,\n updated_fields_dict)\n\n new_transformer = TaskRabbitTaskTransformer()\n new_transformer.set_raw_task(new_raw_task_dict)\n\n return self._ready_spec(new_transformer.get_task())", "title": "" }, { "docid": "e272eb6b3442fca77cb31223de14ce91", "score": "0.6204585", "text": "def addTaskHolder(self, taskHolder):\n assert (isinstance(taskHolder, TaskHolder)), \\\n \"Invalid task holder object\"\n\n self.__taskHolders.append(taskHolder)", "title": "" } ]
8f480a818118d060dfba28738950ec74
Softens an image using one of several filters.
[ { "docid": "5d3cfea736e76f8320ee6dfb7e5d4a77", "score": "0.0", "text": "def __blur(src, type, radius):\n if(type is BlurType.Box_Blur):\n ksize = int(2 * round(radius) + 1)\n print(src)\n print(ksize)\n return cv2.blur(src, (ksize, ksize))\n elif(type is BlurType.Gaussian_Blur):\n ksize = int(6 * round(radius) + 1)\n return cv2.GaussianBlur(src, (ksize, ksize), round(radius))\n elif(type is BlurType.Median_Filter):\n ksize = int(2 * round(radius) + 1)\n return cv2.medianBlur(src, ksize)\n else:\n return cv2.bilateralFilter(src, -1, round(radius), round(radius))", "title": "" } ]
[ { "docid": "87599eb62b460574ed33b965f0c6fdc1", "score": "0.6943461", "text": "def custom_filter(image):\n image = image.filter(ImageFilter.Kernel(\n size=(3, 3), kernel=(1, 0, 1, 0, 0, 0, 1, 0, 1)))\n return image", "title": "" }, { "docid": "da7ead8256ef484dbcd90737a1f125bd", "score": "0.6538806", "text": "def filter(image, kernel, output=None): # real signature unknown; restored from __doc__\n pass", "title": "" }, { "docid": "49c3529ed509b83ba555ef9622222498", "score": "0.6448564", "text": "def reduce_image(image):\r\n img1 = np.copy(image)\r\n ddepth = -1\r\n k = [[ 0.0625, 0.25, 0.375, 0.25, 0.0625]]\r\n reducing_kernel = np.dot(np.asarray(k).T,np.asarray(k))\r\n out = cv2.filter2D(img1,ddepth,reducing_kernel)\r\n even = out[[slice(None, None, 2) for _ in range(out.ndim)]]\r\n return even\r\n pass", "title": "" }, { "docid": "3ac10ce681692c2e07122430a1a7f86d", "score": "0.64125204", "text": "def highPassFilter(self, image: numpy.uint8) -> None:\n image = cv2.cvtColor(src=image, code=cv2.COLOR_BGR2GRAY)\n cv2.imshow('test', image)\n kernal_3x3 = numpy.array([[-1, -1, -1],\n [-1, 8, -1],\n [-1, -1, -1]])\n sharpenImgUsingKernal = ndimage.convolve(input=image, weights=kernal_3x3);\n cv2.imshow(\"sharpened image using kernal\", sharpenImgUsingKernal);\n\n blurredImage = cv2.GaussianBlur(src=image, ksize=(11, 11), sigmaX=0)\n sharpnedImage = image - blurredImage\n cv2.imshow('sharpened using image reduction', sharpnedImage)\n return None", "title": "" }, { "docid": "716adda5aeb23a1a719da922d9bc1cf3", "score": "0.62992173", "text": "def filter_image(image, image_filter):\n image = image.filter(ImageFilter.UnsharpMask())\n\n if image_filter == 'max':\n image = image.filter(ImageFilter.MaxFilter())\n elif image_filter == 'median':\n image = image.filter(ImageFilter.MedianFilter())\n elif image_filter == 'custom-max':\n image = custom_filter(image)\n image = image.filter(ImageFilter.MaxFilter())\n elif image_filter == 'custom-median':\n image = custom_filter(image)\n image = image.filter(ImageFilter.MedianFilter())\n\n return image", "title": "" }, { "docid": "de98773d81da3a81a706e7141f2e322b", "score": "0.6255168", "text": "def whitening(image):\n K = whitening_filt(size=image.shape)\n white = FTfilter(image, K)\n # normalizing energy\n # white /= white.max()# std() # np.sqrt(sum(I**2))\n return white", "title": "" }, { "docid": "a0436e3cef80191b5b869ba5719c37e0", "score": "0.6242009", "text": "def my_imfilter(image, kernel):\n ##################\n # Your code here #\n dimensionProduct = kernel.shape[0] * kernel.shape[1]\n\n # Check if any of the dimensions is even\n if (dimensionProduct % 2) == 0:\n raise Exception('All the dimensions must be odd!')\n\n if len(image.shape) == 2:\n filteredImage = convol2d(image, kernel)\n else:\n trimShape = np.array([image.shape[0], image.shape[1]])\n\n redImage = np.zeros(trimShape)\n blueImage = np.zeros(trimShape)\n greenImage = np.zeros(trimShape)\n\n for i in range(0, image.shape[0]):\n for j in range(0, image.shape[1]):\n redImage[i, j] = image[i, j, 0]\n greenImage[i, j] = image[i, j, 1]\n blueImage[i, j] = image[i, j, 2]\n\n filteredRedImage = convol2d(redImage, kernel)\n filteredGreenImage = convol2d(greenImage, kernel)\n filteredBlueImage = convol2d(blueImage, kernel)\n\n filteredImage = np.zeros(image.shape)\n \n for i in range(0, image.shape[0]):\n for j in range(0, image.shape[1]):\n filteredImage[i, j] = [filteredRedImage[i, j], filteredGreenImage[i, j], filteredBlueImage[i, j]]\n\n return filteredImage\n ################", "title": "" }, { "docid": "64f32caa3ad236d25153441de7c718e8", "score": "0.6155304", "text": "def low_pass_filter(self, sigma=2):\n self.img = _gaussian(self.img, sigma=sigma)", "title": "" }, { "docid": "38875368e8b6d2419a5bc10f258a8603", "score": "0.6131724", "text": "def general_enhancement(img, method_type):\n if method_type[0] == \"guidedFilter\":\n # Guided Filter : Edge preserving filtering\n if len(method_type) == 3:\n img_filtered = cv2.guidedFilter(img, method_type[1], method_type[2])\n else:\n radius = max(5, 0.3*int(len(img)))\n # eps**2 is similar to sigmaColor in bilateralFilter\n eps = 10\n img_filtered = cv2.guidedFilter(img, radius, eps)\n elif method_type[0] == \"bilateralFilter\":\n # bilateralFilter : Edge preserving filtering\n if len(method_type) == 4:\n img_filtered = cv2.guidedFilter(img, method_type[1], method_type[2], method_type[3])\n else:\n \"\"\" \n Filter size: Large filters (d > 5) are very slow, so it is recommended to use d = 5 for real-time applications, \n and perhaps d = 9 for offline applications that need heavy noise filtering.\n \n Sigma values: For simplicity, you can set the 2 sigma values to be the same. \n If they are small (< 10), the filter will not have much effect, \n whereas if they are large (> 150), they will have a very strong effect, making the image look “cartoonish”.\n \"\"\"\n # The kernel size. This is the neighborhood where the local variance will be calculated,\n # and where pixels will contribute (in a weighted manner).\n d = 30\n # Filter sigma in the color space. A larger value of the parameter means that farther colors within\n # the pixel neighborhood (see sigmaSpace ) will be mixed together, resulting in larger\n # areas of semi-equal color\n sigmaColor = 50\n # Filter sigma in the coordinate space. A larger value of the parameter means that farther pixels\n # will influence each other as long as their colors are close enough (see sigmaColor ).\n # When d>0 , it specifies the neighborhood size regardless of sigmaSpace .\n # Otherwise, d is proportional to sigmaSpace .\n sigmaSpace = 0\n \n img_filtered = cv2.bilateralFilter(img, d, sigmaColor, sigmaSpace)", "title": "" }, { "docid": "ed7a2699464153134be40c6d81ad1274", "score": "0.6056553", "text": "def ep_filter(img, filter_type, sigma_s, sigma_r, iterations):\n\n # Normalize the image\n img_norm = img/255\n\n # Get the transformed signal for use in the filters\n # In the RF filter, we do not need to integrate the domain transform because\n # it uses the derivatives directly\n if filter_type == 'RF':\n [hor_differences, ver_differences] = domain_transform(\n img_norm, sigma_s, sigma_r, False)\n else:\n [hor_transform, ver_transform] = domain_transform(\n img_norm, sigma_s, sigma_r, True)\n\n # Initialize the H sigma to be used next\n sigma_h = sigma_s\n\n # Initialize the output image\n img_out = img_norm\n\n progress = iterations * 2\n step = 100 / progress\n elapsed = step\n\n # Aplly the choosen filter\n for i in range(iterations):\n # Calculate the current sigma H using equation 14 of the paper\n cur_sigma_h = sigma_h * \\\n math.sqrt(3) * (2**(iterations-(i+1))) / \\\n math.sqrt(4**iterations - 1)\n\n # Apply the filter\n if filter_type == 'RF':\n img_out = ft.recursive_filtering(\n img_out, hor_differences, cur_sigma_h)\n elif filter_type == 'IC':\n img_out = ft.interpolated_convolution(\n img_out, hor_transform, cur_sigma_h)\n elif filter_type == 'NC':\n img_out = ft.normalized_convolution(\n img_out, hor_transform, cur_sigma_h)\n else:\n raise ValueError(\"Unknown filter specified\")\n\n # Transpose the imagem so we can apply the filter vertically\n img_out = image_transpose(img_out)\n\n progress -= 1\n print(\"%.0f\" % elapsed, end=\"%...\")\n elapsed += step\n sys.stdout.flush()\n\n if filter_type == 'RF':\n img_out = ft.recursive_filtering(\n img_out, np.transpose(ver_differences), cur_sigma_h)\n elif filter_type == 'IC':\n img_out = ft.interpolated_convolution(\n img_out, np.transpose(ver_transform), cur_sigma_h)\n else:\n img_out = ft.normalized_convolution(\n img_out, np.transpose(ver_transform), cur_sigma_h)\n\n # Transpose it back\n img_out = image_transpose(img_out)\n\n progress -= 1\n print(\"%.0f\" % elapsed, end=\"%...\")\n elapsed += step\n sys.stdout.flush()\n\n print()\n return img_out", "title": "" }, { "docid": "b73bca4c3727973816c92f5bce1dac87", "score": "0.6048507", "text": "def x_snow_filter(image, snow_fract, image_filter):\n x = np.array(image).copy()\n x = add_snow(x, snow_fract)\n x = Image.fromarray(x)\n x = filter_image(x, image_filter)\n return x", "title": "" }, { "docid": "2d1204a958a3651a2b7686f1207b0989", "score": "0.60377574", "text": "def choose_adaptive_filter(self, image, filter_number, window_size, noise_variance):\n\n if filter_number == 32:\n filtered_image = self.adaptive_median_filter(image, window_size, noise_variance)\n else:\n filtered_image = self.adatptive_local_filter(image, window_size, noise_variance)\n\n return filtered_image", "title": "" }, { "docid": "51be87a7b1c48f814fb88e22743796ea", "score": "0.6019857", "text": "def apply_blur(img):\n img = tf.reshape(img,[1,256,256,3])\n blur = _gaussian_kernel(3, 2, 3, img.dtype)\n img = tf.nn.depthwise_conv2d(img, blur, [1,1,1,1], 'SAME')\n return tf.reshape(img,[256,256,3])", "title": "" }, { "docid": "6e934f10803c60294b27a7c4218002fd", "score": "0.6018687", "text": "def preprocess_img(img: np.array) -> np.array:\n #img = cv2.bilateralFilter(img, 5, 50, 50) TODO: change parameters.\n return img", "title": "" }, { "docid": "006aeb39d7df22742a0a382669ac9111", "score": "0.60107744", "text": "def reduce_image(image):\n out = None\n kernel = custom_kernel(0.375)\n outimage = cv2.filter2D(image,-1,kernel)\n #outimage = scipy.signal.convolve2d(image,kernel,'same')\n out = outimage[::2,::2]\n return out\n\n #raise NotImplementedError", "title": "" }, { "docid": "c2a47ff921107a6ff24f45e94bb88114", "score": "0.6004069", "text": "def filtering(self):\r\n \"///////////////////////////////// 1\"\r\n R, C = self.image.shape\r\n sizenum = self.filter_size\r\n sizenum = int(.5 * sizenum - .5)\r\n pad_image = np.zeros((R + (2*sizenum), C + (2*sizenum)))\r\n pad_newimage = np.zeros((R + (2*sizenum), C + (2*sizenum)))\r\n\r\n iimag = np.zeros((R, C))\r\n Rp , Cp = pad_image.shape\r\n #print(self.image.shape, \" \", pad_image.shape, \" \", sizenum)\r\n kernel = []\r\n for x in range(R):\r\n for y in range(C):\r\n pad_image[x+sizenum][y+sizenum] = self.image[x][y]\r\n \"///////////////////////////////// 2\"\r\n for x in range(sizenum+1,Rp - sizenum):\r\n for y in range(sizenum+1,Cp - sizenum):\r\n kernel.clear()\r\n #print(x, y)\r\n for xk in range(-sizenum,sizenum+1):\r\n for yk in range(-sizenum,sizenum+1):\r\n kernel.append(pad_image[x+xk][y+yk])\r\n \"\"\" used when i thought size was fixed\r\n kernel.append(pad_image[x-1][y-1])\r\n kernel.append(pad_image[x-1][y])\r\n kernel.append(pad_image[x-1][y+1])\r\n kernel.append(pad_image[x][y-1])\r\n kernel.append(pad_image[x][y])\r\n kernel.append(pad_image[x][y+1])\r\n kernel.append(pad_image[x+1][y-1])\r\n kernel.append(pad_image[x+1][y])\r\n kernel.append(pad_image[x+1][y+1])\r\n \"\"\"\r\n # trail ############################################\r\n \"///////////////////////////////// 3\"\r\n if self.filter_name == 'alpha_trimmed':\r\n Fvalue = self.filter(kernel, self.alpha_d)\r\n elif self.filter_name == 'contra_harmonic':\r\n Fvalue = self.filter(kernel, self.order)\r\n else:\r\n Fvalue = self.filter(kernel)\r\n \"///////////////////////////////// 4\"\r\n pad_newimage[x][y] = Fvalue\r\n \"///////////////////////////////// 5\"\r\n\r\n for x1 in range(R):\r\n for y1 in range(C):\r\n iimag[x1][y1] = pad_newimage[x1+sizenum][y1+sizenum]\r\n return iimag", "title": "" }, { "docid": "03fce7f720fdec4d8a75f4836f754e4d", "score": "0.5980035", "text": "def apply_smoothing(image, kernel_size=15):\r\n return cv2.GaussianBlur(image, (kernel_size, kernel_size), 0)", "title": "" }, { "docid": "fc996f1f3327133c0d34daca0f2dc9ab", "score": "0.5936044", "text": "def filter_im(self, *args):\n logger.debug(\"Interface.filter_image\")\n logger.debug(\"do_filter, default=%s\" % self.default_filter)\n if self.default_filter == \"ContrastMask\":\n self.filter_ContrastMask()\n elif self.default_filter == \"AutoWB\":\n self.filter_AutoWB()\n else:\n logger.error(\"Unknown filter: %s\", config.SelectedFilter)", "title": "" }, { "docid": "ffc36d1f8b577fe5a1833f2a23acca95", "score": "0.5932335", "text": "def add_filter(img, filter_type, value=10):\n if filter_type == \"black_and_white\":\n # Convert to black and white scale\n grey_img = cv2.cvtColor(img, cv2.COLOR_RGB2GRAY)\n (_, filtered_img) = cv2.threshold(grey_img,\n 128, 255, cv2.THRESH_BINARY | cv2.THRESH_OTSU)\n elif filter_type == \"gaussian\":\n filtered_img = cv2.GaussianBlur(img, (5, 5), 0)\n else:\n raise ValueError(\"Unknown filter: {}!\".format(filter_type))\n \"\"\"\n elif filter_type == \"box\":\n filtered_img = cv2.blur(img, (5,5))\n elif filter_type == \"median\":\n filtered_img = cv2.medianBlur(img,5)\n elif filter_type == \"bilateral\":\n filtered_img = cv2.bilateralFilter(img,9,75,75)\n \"\"\"\n return filtered_img", "title": "" }, { "docid": "b8a57a6d2402d441c424f5d92f488b60", "score": "0.5931971", "text": "def apply_filter(request):\n\t\n\timage_id = request.POST.get('image_id')\n\timage_filter = request.POST.get('image_filter')\n\timage = Image.objects.get(id=image_id)\n\t\n\tfiltered_image_url = settings.MEDIA_ROOT + '/' + eval(\"image.\" + image_filter + \".url\")\n\tpil_image = PILImage.open(filtered_image_url)\n\tpil_image.save(image.original_image.path)\n\t\n\t# Apply filter to thumbnail\n\tthumbnail_url = settings.MEDIA_ROOT + '/' + image.thumbnail.url\n\tif image_filter == 'original_filter':\n\t\toriginal_filter_thumbnail_url = settings.MEDIA_ROOT + '/' + image.original_filter_thumbnail.url\n\t\tthumbnail_image = PILImage.open(original_filter_thumbnail_url)\n\t\tthumbnail_image.save(thumbnail_url)\n\telse:\n\t\tthumbnail_image = PILImage.open(thumbnail_url)\n\t\tthumbnail_image = eval('do_' + image_filter + '(thumbnail_image)')\n\t\tthumbnail_image.save(thumbnail_url)\n\t\n\treturn HttpResponse('ok')", "title": "" }, { "docid": "3e7ad0ab239ecdf4367c2080fd91dc8d", "score": "0.59239465", "text": "def apply_filter(file_path, filter_name):\n i = Image.open(file_path)\n i.thumbnail((500, 500))\n i = i.filter(filter_types_dict.get(filter_name))\n i.save(file_path)", "title": "" }, { "docid": "cbba7a3ea4f39e50a815e70125e33681", "score": "0.5912495", "text": "def denoise_image(self,img):\n if(self.median_filter_width>0):\n if self.median_filter_width % 2 == 0: self.median_filter_width += 1 #guard agains even integers!\n #return cv2.blur(img,self.median_filter_width)\n return cv2.blur(img,(self.median_filter_width,self.median_filter_width))\n else:\n return img", "title": "" }, { "docid": "03b83d8dca523bc565a515aa6f5b3b03", "score": "0.59089524", "text": "def reduce_image(image, filter_vec):\r\n blur_im = convolve(convolve(image, filter_vec), filter_vec.T)\r\n return blur_im[::2, ::2]", "title": "" }, { "docid": "820e347674078d79370fec1a699906eb", "score": "0.59061927", "text": "def image_processor(input_image):\n blur = cv2.GaussianBlur(input_image, (3, 3), 0)\n\n denoised = cv2.fastNlMeansDenoising(blur, None, 15, 7, 49)\n denoised = cv2.fastNlMeansDenoising(denoised, None, 15, 7, 21)\n denoised = cv2.fastNlMeansDenoising(denoised, None, 15, 7, 21)\n \n kernel_sharpen = np.array([[-1,-1,-1],[-1,9,-1],[-1,-1,-1]]) \n sharpened = cv2.filter2D(denoised, -1, kernel_sharpen)\n \n #_, thr = cv2.threshold(sharpened, 150, 255, 0)\n img_clip = np.where(sharpened < 150, 0, sharpened)\n \n return img_clip", "title": "" }, { "docid": "a546633c452afe68628a1382db0dc30d", "score": "0.5891889", "text": "def low_pass(img):\r\n if not DO_LOW_PASS:\r\n return img\r\n img = PIL_to_cv2(img)\r\n img = cv.filter2D(img, -1, lpf)\r\n\r\n img = cv2_to_PIL(img)\r\n return img", "title": "" }, { "docid": "072c7ca8a57f32f561f3f4a3756cdd18", "score": "0.5854998", "text": "def preprocess(img):\r\n img = crop(img)\r\n img = low_pass(img)\r\n img = shrink(img)\r\n return img", "title": "" }, { "docid": "19a2ff838ec99a5b66ba0da27dbac147", "score": "0.58421266", "text": "def prewitt(image,gray_img,alpha = 1, beta=0.6):\r\n h, w = gray_img.shape\r\n\t\t\r\n\t# define filters\r\n horizontal = np.array([[-1, 0, 1], [-1, 0, 1], [-1, 0, 1]]) # s2\r\n vertical = np.array([[-1, -1, -1], [0, 0, 0], [1, 1, 1]]) # s1\r\n\t\t\r\n\t# define images with 0s\r\n newgradientImage = np.zeros((h, w))\r\n\t\t\r\n\t# offset by 1\r\n for i in range(1, h - 1):\r\n for j in range(1, w - 1):\r\n horizontalGrad = (horizontal[0, 0] * gray_img[i - 1, j - 1]) + \\\r\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t (horizontal[0, 1] * gray_img[i - 1, j]) + \\\r\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t (horizontal[0, 2] * gray_img[i - 1, j + 1]) + \\\r\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t (horizontal[1, 0] * gray_img[i, j - 1]) + \\\r\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t (horizontal[1, 1] * gray_img[i, j]) + \\\r\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t (horizontal[1, 2] * gray_img[i, j + 1]) + \\\r\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t (horizontal[2, 0] * gray_img[i + 1, j - 1]) + \\\r\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t (horizontal[2, 1] * gray_img[i + 1, j]) + \\\r\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t (horizontal[2, 2] * gray_img[i + 1, j + 1])\r\n verticalGrad = (vertical[0, 0] * gray_img[i - 1, j - 1]) + \\\r\n\t\t\t\t\t\t\t\t\t\t\t\t\t (vertical[0, 1] * gray_img[i - 1, j]) + \\\r\n\t\t\t\t\t\t\t\t\t\t\t\t\t (vertical[0, 2] * gray_img[i - 1, j + 1]) + \\\r\n\t\t\t\t\t\t\t\t\t\t\t\t\t (vertical[1, 0] * gray_img[i, j - 1]) + \\\r\n\t\t\t\t\t\t\t\t\t\t\t\t\t (vertical[1, 1] * gray_img[i, j]) + \\\r\n\t\t\t\t\t\t\t\t\t\t\t\t\t (vertical[1, 2] * gray_img[i, j + 1]) + \\\r\n\t\t\t\t\t\t\t\t\t\t\t\t\t (vertical[2, 0] * gray_img[i + 1, j - 1]) + \\\r\n\t\t\t\t\t\t\t\t\t\t\t\t\t (vertical[2, 1] * gray_img[i + 1, j]) + \\\r\n\t\t\t\t\t\t\t\t\t\t\t\t\t (vertical[2, 2] * gray_img[i + 1, j + 1])\r\n\t\t\r\n # Edge Magnitude\r\n mag = np.sqrt(pow(horizontalGrad, 2.0) + pow(verticalGrad, 2.0))\r\n newgradientImage[i - 1, j - 1] = mag\t\t\t\t\t\t\t\t\t\t\t\t\t\r\n\r\n # Creating Prewitt enhanced RGB image\r\n image1 = pil.fromarray(image)\r\n newgradientImage1 = pil.fromarray(newgradientImage)\r\n pil_overlay=pil.blend(image1.convert('RGBA'),newgradientImage1.convert('RGBA'),0.5)\r\n return newgradientImage,np.float32(pil_overlay)", "title": "" }, { "docid": "150a1dfba5ffeed015efa5959b03ceda", "score": "0.58373785", "text": "def apply_filter_channel(image: np.array, kernel: np.array) -> np.array:\n kernel = np.flipud(np.fliplr(kernel))\n output = np.zeros_like(image)\n offset = kernel.shape[0] // 2\n image_padded = np.zeros((image.shape[0] + 2 * offset, image.shape[1] + 2 * offset))\n image_padded[offset:-offset, offset:-offset] = image\n for x in range(image.shape[1]):\n for y in range(image.shape[0]):\n result = int((kernel * image_padded[y:y + kernel.shape[0], x:x + kernel.shape[0]]).sum()\n if result < 0:\n result = 0\n elif result > 255:\n result = 255\n output[y][x] = result", "title": "" }, { "docid": "34d1a33441b0189bab1141b234d540d1", "score": "0.5836979", "text": "def filtering(self):\n\n # np.set_printoptions(threshold=np.nan)\n\n # 1 compute the fft of the image\n dft = np.fft.fft2(self.image)\n\n # 2. shift the fft to center the low frequencies\n shiftedDFT = np.fft.fftshift(dft)\n\n # 3. get the mask\n filterName = self.filter.__name__\n\n if filterName == \"get_butterworth_low_pass_filter\" or filterName == \"get_butterworth_high_pass_filter\":\n mask = self.filter(self.image.shape, self.cutoff, self.order)\n else:\n mask = self.filter(self.image.shape, self.cutoff)\n\n # 4 Convolution theorem)\n row, col = self.image.shape\n filterShiftedDFT = np.zeros(self.image.shape, dtype=np.complex)\n for u in range(row):\n for v in range(col):\n filterShiftedDFT[u, v] = mask[u, v] * shiftedDFT[u, v]\n\n # 5 compute the inverse shift\n filterImageDFT = np.fft.ifftshift(filterShiftedDFT)\n\n # 6 compute the inverse fourier transform\n filteredImage = np.fft.ifft2(filterImageDFT)\n\n # 7 magnitude\n fcsShiftedDFT = self.processDFT(shiftedDFT)\n fcsFilterShiftedDFT = self.processDFT(filterShiftedDFT)\n\n\n #im = Image.fromarray(filterShiftedDFT.real)\n #im.show()\n\n return [filteredImage.real, fcsShiftedDFT.real, fcsFilterShiftedDFT.real]", "title": "" }, { "docid": "9bb9df5754b9869a7428037f91ca299b", "score": "0.58056724", "text": "def sepia_filter(filters_input:list)->list:\n sepia_output = []\n for single_array in filters_input:\n # Apply a transformation where we multiply each pixel\n # bgr with the matrix transformation for the sepia\n lmap = np.matrix([[ 00.272, 0.534, 0.131],\n [ 0.349, 0.686, 0.168],\n [ 0.393, 0.769, 0.189]])\n filt = np.array([x * lmap.T for x in single_array] )\n # Check which entries have a value greather than 255 and set it to 255\n filt[np.where(filt>255)] = 255\n sepia_output.append(filt)\n return sepia_output", "title": "" }, { "docid": "d9fde5310b03ba4f0438825cd1467950", "score": "0.5803839", "text": "def expand_image(image):\r\n img1 = np.copy(image)\r\n x = 2*img1.shape[0]\r\n y = 2*img1.shape[1]\r\n temp_img = np.zeros((x,y))\r\n temp_img[[slice(None, None, 2) for _ in range(temp_img.ndim)]] = img1\r\n ddepth = -1\r\n k = [[ 0.125, 0.5, 0.75, 0.5, 0.125]]\r\n my_kernel = np.dot(np.asarray(k).T,np.asarray(k))\r\n out = cv2.filter2D(temp_img,ddepth,my_kernel)\r\n return out\r\n #even = out[[slice(None, None, 2) for _ in range(out.ndim)]]\r\n\r\n pass", "title": "" }, { "docid": "ddaba90ce4ce1b5d81ec5714b9d905e6", "score": "0.57910293", "text": "def filter(self, filtername, x=0, y=0, width=0, height=0, show_filtered_img=False):\n\n \"\"\"\n 'inverse',\n 'inverse_pix',\n 'blue',\n 'red',\n 'green',\n 'blacknwhite'\n\n #################\n list of filters\n \"\"\"\n start_time = time.time()\n self.x = x\n self.y = y\n self.width = width if width != 0 else self.size[0]\n self.height = height if height != 0 else self.size[1]\n self.filtername = filtername\n print(self.size[0]*self.size[1], \"Pixels\\n\")\n # self.im.pixels[100,100] = (255,255,255)\n\n # massive list and maps\n list_start_time = time.time()\n\n print(\"Processing\")\n list(map(lambda _filter: self.im.putdata([(self.filterdict[_filter](self.pix[x_cord, y_cord]))for y_cord in range(round(self.y), min(\n self.size[1], round(self.y+self.height))) for x_cord in range(round(self.x), min(self.size[0], round(self.x+self.width)))]), self.filtername))\n \n # for _filter in self.filtername:\n # _filter_time = time.time()\n # print(self.filterdict[_filter].__name__, \"Processing\")\n\n # pixels_arr = [(self.filterdict[_filter](self.pix[x_cord, y_cord]))for y_cord in range(round(self.y), min(\n # self.size[1], round(self.y+self.height))) for x_cord in range(round(self.x), min(self.size[0], round(self.x+self.width)))]\n \n # self.im.putdata([(self.filterdict[_filter](self.pix[x_cord, y_cord]))for y_cord in range(round(self.y), min(\n # self.size[1], round(self.y+self.height))) for x_cord in range(round(self.x), min(self.size[0], round(self.x+self.width)))])\n\n print(\"list comprehension finished in\", time.time()-list_start_time )\n\n self.save()\n if show_filtered_img == True:\n self.show()\n print(str(filtername), \"Complete in\", time.time()-start_time)\n # print(self.imag.size)", "title": "" }, { "docid": "e50b4a8ee007a28d159cc5855f2dccda", "score": "0.57572097", "text": "def forward(self, x, filters):\n n, filter_prod, upsampling_square, h, w = filters.size()\n kh, kw = self.filter_size\n expanded_input = F.conv2d(x, self.expansion_filter, padding=(kh // 2, kw // 2), groups=3)\n expanded_input = expanded_input.view(n, 3, filter_prod, h, w).permute(0, 3, 4, 1, 2)\n filters = filters.permute(0, 3, 4, 1, 2)\n out = torch.matmul(expanded_input, filters)\n return out.permute(0, 3, 4, 1, 2).view(n, 3 * upsampling_square, h, w)", "title": "" }, { "docid": "a3d79ce1b6ab24380b09d8aad7480f10", "score": "0.57439154", "text": "def preprocess_fn(image: np.ndarray):\n if image.ndim == 3 and image.shape[2] == 3:\n image = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)\n image = np.squeeze(image)\n image = cv2.resize(image, (64, 64))\n image = image.astype(np.float)\n image = (image - 127.0) / 128\n image = np.expand_dims(image, axis=-1)\n return image", "title": "" }, { "docid": "174236aeab7e8dbc8c5f8b6140237271", "score": "0.5737519", "text": "def convolve_image(img, filters, plot=False):\n result = list()\n for f in filters:\n m = sg.convolve(img, f, \"same\")\n if plot:\n plt.imshow(m)\n result.extend(m)\n return np.array(result)", "title": "" }, { "docid": "5ce9c81320a12610796f92a8302566ca", "score": "0.5735412", "text": "def forward(self, x, filters):\n n, filter_prod, upsampling_square, h, w = filters.size()\n kh, kw = self.filter_size\n expanded_input = F.conv2d(\n x, self.expansion_filter.to(x), padding=(kh // 2, kw // 2), groups=3) # (n, 3*filter_prod, h, w)\n expanded_input = expanded_input.view(n, 3, filter_prod, h, w).permute(0, 3, 4, 1,\n 2) # (n, h, w, 3, filter_prod)\n filters = filters.permute(0, 3, 4, 1, 2) # (n, h, w, filter_prod, upsampling_square]\n out = torch.matmul(expanded_input, filters) # (n, h, w, 3, upsampling_square)\n return out.permute(0, 3, 4, 1, 2).view(n, 3 * upsampling_square, h, w)", "title": "" }, { "docid": "5f37a4b0507f7d43493cae2ac258448c", "score": "0.5729279", "text": "def apply(self, src, dst):\n cv2.filter2D(src, -1, self._kernel, dst)", "title": "" }, { "docid": "260a3136235b1229d5fa77a3418e4103", "score": "0.5703087", "text": "def inception(x, filters):\n if len(filters) != 4:\n raise ValueError('filters should have 4 components')\n if len(filters[1]) != 2 or len(filters[2]) != 2:\n raise ValueError('incorrect spec of filters')\n\n branch1x1 = conv2d_bn(x, filters[0], (1, 1))\n\n branch3x3 = conv2d_bn(x, filters[1][0], (1, 1))\n branch3x3 = conv2d_bn(branch3x3, filters[1][1], (3, 3))\n\n # branch5x5 is implemented with two 3x3 conv2d's\n branch5x5 = conv2d_bn(x, filters[2][0], (1, 1))\n branch5x5 = conv2d_bn(branch5x5, filters[2][1], (3, 3))\n branch5x5 = conv2d_bn(branch5x5, filters[2][1], (3, 3))\n\n # use AveragePooling2D here\n branchpool = layers.AveragePooling2D(\n pool_size=(3, 3), strides=(1, 1), padding='same')(x)\n branchpool = conv2d_bn(branchpool, filters[3], (1, 1))\n\n concat_axis = 1 if backend.image_data_format() == 'channels_first' else 3\n x = layers.concatenate(\n [branch1x1, branch3x3, branch5x5, branchpool], axis=concat_axis)\n return x", "title": "" }, { "docid": "5ad012d6fb64c06ee94e66ff64290a60", "score": "0.56840354", "text": "def preprocessing(img):\n kernel1 = np.ones((3, 3), np.uint8)\n kernel2 = np.ones((5, 5), np.uint8)\n gray = img[:, :, 0]\n gray = cv2.medianBlur(gray, 5)\n gray = cv2.morphologyEx(gray, cv2.MORPH_OPEN, kernel1, iterations=4)\n gray = cv2.morphologyEx(gray, cv2.MORPH_CLOSE, kernel2, iterations=3)\n return gray", "title": "" }, { "docid": "d396603a14426361521262b26047f7c8", "score": "0.56680375", "text": "def my_blur(img, kernel):\n tmp1 = scipy.ndimage.filters.convolve1d(img, kernel, axis= -1, mode=\"reflect\")\n return scipy.ndimage.filters.convolve1d(tmp1, kernel, axis=0, mode=\"reflect\")", "title": "" }, { "docid": "4b954977b76b1723e3c76ed64f0bbd0c", "score": "0.5667778", "text": "def apply_filters(input, nb_iter=2, sigma_r=60, flat_tresh=10, sharp_tresh=60, min_sharp_tresh=10,\n sharp_rate=.75, med_k_size=3, bil_k_size=3, sigma_s=5, early_stop = True, stop_criterion = 1., show=False):\n\n filt_loss = []\n x = pd.Series(Filters.median_filter(\n input.to_numpy(), kernel_size=med_k_size))\n x.index = input.index\n last = input\n level = sharp_tresh\n if show:\n plt.figure(figsize=(20, 8))\n for k in range(nb_iter):\n if k % 2 == 0:\n flattened = pd.Series(Filters.flatten(\n x.to_numpy(), treshold=flat_tresh))\n flattened.index = x.index\n x = flattened\n if show:\n plt.plot(x, color='b', alpha=.25 + k * .75 / nb_iter)\n x = Filters.bilat_sharp(\n x, bil_k_size=bil_k_size, sigma_s=sigma_s, sigma_r=sigma_r, sharpen=level)\n error = x - last\n if early_stop and len(filt_loss) > 0 and filt_loss[-1]<stop_criterion and error.std() < stop_criterion :\n break\n filt_loss.append(error.std())\n last = x\n\n if level > min_sharp_tresh:\n level = int(level * sharp_rate)\n\n flattened = pd.Series(Filters.flatten(\n last.to_numpy(), treshold=flat_tresh))\n flattened.index = last.index\n last = flattened\n\n energy_loss = input - last\n if show:\n plt.show()\n return energy_loss, filt_loss, last", "title": "" }, { "docid": "28613e2f544abbec9e6b341b46769887", "score": "0.56611025", "text": "def inception_17(base_tensor, filters, name=None):\n f1, f31, f317, f371, f51, f517, f571, f5_17, f5_71, fp1 = filters\n # conv1\n tower_0 = conv2d_bn(base_tensor, f1, (1, 1))\n # conv3\n tower_1 = conv2d_bn(base_tensor, f31, (1, 1))\n tower_1 = conv2d_bn(tower_1, f317, (1, 7))\n tower_1 = conv2d_bn(tower_1, f371, (7, 1))\n # conv5\n tower_2 = conv2d_bn(base_tensor, f51, (1, 1))\n tower_2 = conv2d_bn(tower_2, f517, (1, 7))\n tower_2 = conv2d_bn(tower_2, f571, (7, 1))\n tower_2 = conv2d_bn(tower_2, f5_17, (1, 7))\n tower_2 = conv2d_bn(tower_2, f5_71, (7, 1))\n # maxpool\n tower_3 = AveragePooling2D((3, 3),(1, 1), padding='same')(base_tensor)\n tower_3 = conv2d_bn(tower_3, fp1, (1, 1))\n # merge\n output = Concatenate(axis=-1, name=name)([tower_0, tower_1, tower_2, tower_3])\n return output", "title": "" }, { "docid": "b02432e5fb8a8e50aca193b64e0fd75f", "score": "0.5658213", "text": "def _preprocess(self, pipeline, imgs):\n # Pipeline empty\n if len(pipeline) == 0:\n return imgs\n\n # Checking valid input for filter(s)\n for fn in pipeline:\n if fn not in filters:\n print(\"Error: Filter '%s' was not found in filters\\n\" % fn)\n exit(1)\n \n # Applying filter(s) to each image\n new_imgs = {'Infected': [], 'Uninfected': []}\n for c in imgs:\n for img in imgs[c]:\n fImg = img\n for fn in pipeline:\n f = filters[fn]\n fImg = f(fImg)\n \n new_imgs[c].append(fImg)\n\n return new_imgs", "title": "" }, { "docid": "a5b77d2ec5e89e79ccc35218e30038c6", "score": "0.56535995", "text": "def expand_image(image):\n out = None\n kernel = custom_kernel(0.375)\n outimage = np.zeros((image.shape[0]*2, image.shape[1]*2), dtype=np.float64)\n outimage[::2,::2]=image[:,:]\n out = 4*cv2.filter2D(outimage,-1,kernel) #scipy.signal.convolve2d(outimage,kernel,'same')\n return out", "title": "" }, { "docid": "a53548d3e2fdb59f7c4ef2d8f7e72eba", "score": "0.56528574", "text": "def adjustBrightnessAndContrast(img,brightness,contrast):\r\n # create an empty image of same dimension\r\n filtered_image = image.EmptyImage(img.getWidth(), img.getHeight())\r\n \r\n # for each pixel in the image, img\r\n for row in range(img.getHeight()):\r\n for col in range(img.getWidth()):\r\n \r\n # get the pixel value\r\n p = img.getPixel(col, row)\r\n\r\n # apply contrast filter to the pixel\r\n p = brightnessAndContrastTransform(p, brightness, contrast)\r\n \r\n # set the corresponding pixel in the filtered image\r\n filtered_image.setPixel(col,row, p)\r\n \r\n return filtered_image", "title": "" }, { "docid": "dc42b59d90632d84bd252401c71ea91c", "score": "0.5647101", "text": "def filter(self):\n if sum(self.mask_code) == 0:\n self.mask_code[0] = 1\n mask_code = np.asarray(self.mask_code)\n idx = np.squeeze(np.argwhere(mask_code)).tolist()\n idx = [idx] if not isinstance(idx, list) else idx\n weights = self.layer.get_weights()\n self.layer.num_features = sum(mask_code)\n for name, weight in weights.items():\n self.layer.set_weights(name, weight[idx])", "title": "" }, { "docid": "4b9b3ff122ee98b9219c4119e624c6a0", "score": "0.56193614", "text": "def filter(self, method, mode, median_size=5, kernel_size=5, fwhm_size=5,\n btw_cutoff=0.2, btw_order=2, gauss_mode='conv'):\n if method == 'hp':\n self.image = frame_filter_highpass(self.image, mode, median_size,\n kernel_size, fwhm_size,\n btw_cutoff, btw_order)\n elif method == 'lp':\n self.image = frame_filter_lowpass(self.image, mode, median_size,\n fwhm_size, gauss_mode)\n else:\n raise ValueError('Filtering mode not recognized')\n print('Image successfully filtered')", "title": "" }, { "docid": "523d879c43b0ff8bc924d6f6bb6049c6", "score": "0.5615851", "text": "def adjustImage(image):\n image = Image.fromarray(image)\n #brighten\n image = image.point(lambda p: p * 1.2)\n image = ImageOps.grayscale(image)\n image = ImageOps.equalize(image)\n #image = ImageOps.autocontrast(image)\n\n image = ImageOps.colorize(image, (0,0,0), (255,255,255))\n return image", "title": "" }, { "docid": "ab72797d62a80f87ce61c5b76c706b86", "score": "0.5597074", "text": "def apply_blur(image, image2, x, y, gussiankernel, erosionkernel):\n\n height, width = image.shape[:2]\n filter = np.zeros((height, width))\n filter[x,y] = 1\n # Erosion to reduce blur size\n filter = cv2.GaussianBlur(filter, (gussiankernel, gussiankernel), 0)\n kernel = np.ones((erosionkernel, erosionkernel), np.uint8)\n filter = cv2.erode(filter, kernel, iterations=1)\n alpha = np.zeros([height, width, 3], dtype='float64')\n alpha[:, :, 0] = filter\n alpha[:, :, 1] = filter\n alpha[:, :, 2] = filter\n im_copy = (alpha * image2 + (1 - alpha) *image).astype('uint8')\n\n return im_copy", "title": "" }, { "docid": "b72fadaf8865a140211e579490bc6b17", "score": "0.55969113", "text": "def smooth_image(self, img):\n if self.smoothing_sigma > 0:\n return scipy.ndimage.gaussian_filter(\n img, sigma=self.smoothing_sigma / self.pixel_width\n )\n return img", "title": "" }, { "docid": "c3a9fd23684ffb1a62804689ac95adfa", "score": "0.55910397", "text": "def blur(self, img) -> np.array:\n n = self.random_choice(np.arange(1, 4))\n kernel = np.ones((n, n), np.float32) / n**2\n img = cv2.filter2D(img, -1, kernel)\n return img", "title": "" }, { "docid": "fdad86ae50fbac2c13cc9a274d324d73", "score": "0.55901444", "text": "def inception_s2(x, filters):\n if len(filters) != 2:\n raise ValueError('filters should have 2 components')\n if len(filters[0]) != 2 or len(filters[1]) != 2:\n raise ValueError('incorrect spec of filters')\n\n branch3x3 = conv2d_bn(x, filters[0][0], (1, 1))\n branch3x3 = conv2d_bn(branch3x3, filters[0][1], (3, 3), strides=(2, 2))\n\n branch5x5 = conv2d_bn(x, filters[1][0], (1, 1))\n branch5x5 = conv2d_bn(branch5x5, filters[1][1], (3, 3))\n branch5x5 = conv2d_bn(branch5x5, filters[1][1], (3, 3), strides=(2, 2))\n\n # use MaxPooling2D here\n branchpool = layers.MaxPooling2D(\n pool_size=(3, 3), strides=(2, 2), padding='same')(x)\n\n concat_axis = 1 if backend.image_data_format() == 'channels_first' else 3\n x = layers.concatenate(\n [branch3x3, branch5x5, branchpool], axis=concat_axis)\n return x", "title": "" }, { "docid": "d4dbccf63f33843ed8c63617ac813180", "score": "0.5574838", "text": "def adjustGrey(img):\r\n # create an empty image of same dimension\r\n filtered_image = image.EmptyImage(img.getWidth(), img.getHeight())\r\n \r\n # for each pixel in the image, img\r\n for row in range(img.getHeight()):\r\n for col in range(img.getWidth()):\r\n \r\n # get the pixel value\r\n p = img.getPixel(col, row)\r\n\r\n # apply contrast filter to the pixel\r\n p = greyTransform(p)\r\n \r\n # set the corresponding pixel in the filtered image\r\n filtered_image.setPixel(col,row, p)\r\n \r\n return filtered_image", "title": "" }, { "docid": "b052f98e7808140b200f22182cf426f6", "score": "0.5572079", "text": "def median_filtering(img):\n smooth_img = np.zeros(img.shape)\n smooth_img[:, :, 0] = signal.medfilt2d(img[:, :, 0])\n smooth_img[:, :, 1] = signal.medfilt2d(img[:, :, 1])\n smooth_img[:, :, 2] = signal.medfilt2d(img[:, :, 2])\n return smooth_img", "title": "" }, { "docid": "4f6ebc8189a8a892552c2000a8607aec", "score": "0.55664283", "text": "def _softening(band):\n from astropy import units\n assert band in filters, \"Band {} not in {}\".format(band,filters)\n _zp = filters[band].get('zeropoint')\n assert 'b' in _zp\n _b = _zp['b'] * units.one\n return _b", "title": "" }, { "docid": "9ecd770a3ce3564014e1b843c7996e1e", "score": "0.5551045", "text": "def apply_threshold_filter(image, thres=85):\n gray_image = cv.cvtColor(image, cv.COLOR_BGR2GRAY)\n filter_image, mask = cv.threshold(gray_image, thres, 255, cv.THRESH_BINARY_INV)\n filter_image = cv.bitwise_and(image, image, mask=mask)\n return filter_image, mask", "title": "" }, { "docid": "a2956b10704c17b9328cfbd522258f9e", "score": "0.5542067", "text": "def apply_filter2D_to_image(self, processed_img, kernel_size, kernel_type=0, depth=-1):\n verify_valid_image(processed_img)\n verify_valid_kernel_size(kernel_size)\n verify_valid_depth(depth)\n if kernel_type == 0:\n transformed_image = process_image_filtering_with_rect_kernel(processed_img, kernel_size, depth)\n elif kernel_type == 1:\n transformed_image = process_image_filtering_with_ellipse_kernel(processed_img, kernel_size, depth)\n elif kernel_type == 2:\n transformed_image = process_image_filtering_with_cross_kernel(processed_img, kernel_size, depth)\n else:\n return raise_invalid_kernel_type(kernel_type)\n return transformed_image", "title": "" }, { "docid": "edd64bb71d671addbb4df3c17426281e", "score": "0.55276734", "text": "def use_filter(signal, weight, which):\n import pywt\n\n if which == 1:\n filtered = gaussian_filter(signal, weight)\n return filtered\n elif which == 2:\n filtered = moving_average(signal, weight)\n return filtered\n elif which == 3:\n filtered = thresh_MAD(signal)\n return filtered\n else:\n return signal", "title": "" }, { "docid": "1235d0669e4e2e8df38eeb4e42e62be8", "score": "0.55192405", "text": "def basic_image_ops(image, brighten=1.0, sharpen=None, saturation=None):\n if brighten is not None and brighten != 1.0:\n logging.info('Applying brighten {}'.format(brighten))\n image = ImageEnhance.Brightness(image).enhance(brighten)\n if sharpen is not None:\n logging.info('Applying sharpen {}'.format(sharpen))\n image = ImageEnhance.Sharpness(image).enhance(sharpen)\n if saturation is not None:\n logging.info('Applying saturation {}'.format(saturation))\n image = ImageEnhance.Color(image).enhance(saturation)\n return image", "title": "" }, { "docid": "64111988628de8223a2b63033e911752", "score": "0.55142766", "text": "def short_cut(x, num_filters, num_blocks, filter_half=False):\n for i in range(num_blocks):\n y = compose(\n DarknetConv2D_BN_Mish(num_filters // 2 if filter_half else num_filters, (1, 1)),\n DarknetConv2D_BN_Mish(num_filters, (3, 3)))(x)\n x = layers.Add()([x, y])\n return x", "title": "" }, { "docid": "25acc87364d2bedd9dddfb0b8c5fe598", "score": "0.5508105", "text": "def edit_im(self, *args):\n logger.debug(\"Interface.edit_image\")\n logger.debug(\"do_filter, default=%s\" % self.default_filter)\n if self.default_filter == \"ContrastMask\":\n self.filter_ContrastMask()\n elif self.default_filter == \"AutoWB\":\n self.filter_AutoWB()\n else:\n logger.error(\"Unknown filter: %s\", config.SelectedFilter)", "title": "" }, { "docid": "c059af11fc8bab56a399d516df0ab8e3", "score": "0.55039334", "text": "def current(wf, sigma=5):\n\n wfc = ndimage.filters.gaussian_filter1d(wf, sigma=sigma, order=1) # lol\n\n return wfc", "title": "" }, { "docid": "9d7273420d943f737c55b2c0935f531d", "score": "0.5493017", "text": "def apply_filter(original_image_filepath, filter, output_path):\n\n original_image = img.imread(original_image_filepath)\n\n print(\"Starting application of filter\")\n\n # Get dimensions of the input image\n ROWS = original_image.shape[0]\n COLS = original_image.shape[1]\n\n print(\"Size of input image is: {}x{}\".format(ROWS, COLS))\n\n print(\"Beginning matrix multiplication\")\n d = np.zeros((ROWS, COLS, 3), dtype=int).tolist()\n for k in range(3):\n for i in range(ROWS - 2):\n for j in range(COLS - 2):\n s = 0\n for ii in range(3):\n for jj in range(3):\n s += original_image[i + ii][j + jj][k] * filter[ii][jj]\n d[i + 1][j + 1][k] = int(s)\n d = np.array(d)\n\n print(\"Done with matrix multiplication\")\n\n print(\"Saving file with '_edited.jpg' postfix\")\n edited_image = np.clip(d, 0, 255)\n edited_image = edited_image.astype('uint8')\n\n edited_image_filename = os.path.basename(original_image_filepath).split('.')[0] + \"_edited.jpg\"\n edited_image_filepath = os.path.join(output_path, edited_image_filename)\n\n img.imsave(edited_image_filepath, edited_image)\n print(\"Saved filtering file\")\n\n return edited_image_filename", "title": "" }, { "docid": "00b830745b89a8fa9a10a9beab153791", "score": "0.54928726", "text": "def filter_3(img, kernel=None) -> np.ndarray:\n if kernel is None: # default is a low pass\n kernel = np.ones((3, 3)) / 9\n filtered_img = ndimage.convolve(img, kernel)\n return filtered_img", "title": "" }, { "docid": "7b3de72f318489adbb478a0b85555658", "score": "0.5490322", "text": "def whitenning(img):\n # implementar whitening aqui\n b, g, r = cv2.split(img)\n bw = pca_scikit(b)\n gw = pca_scikit(g)\n rw = pca_scikit(r)\n\n img = cv2.merge((bw, gw, rw))\n return img", "title": "" }, { "docid": "e7938b825865bc72c46d7f741ccc540b", "score": "0.54902864", "text": "def sharpener(imname, alpha, savename='', show=True, grey=False, crop_edge = None, clip=True, gaus_ksize = 20, gaus_std = 3):\n\n\n\n # Read Image\n im = skio.imread(imname)\n\n # Convert to Double\n im = sk.img_as_float(im)\n\n #Kernel\n gauss1d = cv2.getGaussianKernel(gaus_ksize, gaus_std)\n gauss2d = gauss1d @ gauss1d.T\n\n if ~grey:\n blur_im = three_d_convolve(im, gauss2d)\n else:\n blur_im = convolve2d(im, gauss2d, mode='same')\n\n high_fq_im = im.copy()\n if ~grey:\n high_fq_im[:,:,0] = im[:,:,0] -blur_im[:,:,0]\n high_fq_im[:,:,1] = im[:,:,1] -blur_im[:,:,1]\n high_fq_im[:,:,2] = im[:,:,2] -blur_im[:,:,2]\n output = im + alpha * high_fq_im\n\n if clip:\n output = np.clip(output, a_min = 0, a_max = 1)\n\n if crop_edge != None:\n output = crop_edges(output, crop_edge)\n\n if show:\n skio.imshow(output)\n\n if savename == '':\n savename = f'outputs/{imname}_sharpened.jpg'\n skio.imsave(savename, output)", "title": "" }, { "docid": "09ba413450b7dccfe9d88f1f0d94a1f4", "score": "0.5483316", "text": "def sharpen_filter(image, a, sigma):\n blurred = blur_filter(image, sigma)\n sharper = np.clip(image * (1.0 + a) - blurred * a, 0, 1.0)\n return sharper", "title": "" }, { "docid": "c577cd5a08f3665c0ae87ee151d3118d", "score": "0.5478777", "text": "def filter(self):\n if sum(self.mask_code) == 0:\n self.mask_code[0] = 1\n mask_code = np.asarray(self.mask_code)\n idx_in = np.squeeze(np.argwhere(mask_code)).tolist()\n idx_in = [idx_in] if not isinstance(idx_in, list) else idx_in\n self.layer.in_features = sum(mask_code)\n weights = self.layer.get_weights()\n out_size = self.layer.out_features\n for name, weight in weights.items():\n if 'kernel' in name or 'weight' in name:\n if is_torch_backend():\n self.layer.set_weights(name, weight[:, idx_in])\n out_size = weight.shape[0]\n else:\n self.layer.set_weights(name, weight[idx_in, :])\n out_size = weight.shape[1]\n # fineTune out_feature value\n if self.layer.out_features == out_size:\n return\n idx_out = list(np.random.permutation(out_size)[:self.layer.out_features])\n for name, weight in self.layer.get_weights().items():\n if 'kernel' in name:\n self.layer.set_weights(name, weight[:, idx_out])\n else:\n self.layer.set_weights(name, weight[idx_out])\n self.layer.out_features = out_size", "title": "" }, { "docid": "f3fb181f88be2e71e6f9350194a29c2f", "score": "0.54673445", "text": "def FilterImg(ev):\n ev.XFormRand.Gen(ev.XForm)\n oimg = ev.Images[ev.ImageIdx.Cur]\n\n insz = ev.Vis.Geom.In.Mul(2)\n ibd = oimg.Bounds()\n isz = ibd.Size()\n irng = isz.Sub(insz)\n st = image.Point()\n st.X = rand.Intn(irng.X)\n st.Y = rand.Intn(irng.Y)\n ed = st.Add(insz)\n simg = oimg.SubImage(image.Rectangle(Min= st, Max= ed))\n img = ev.XForm.Image(simg)\n ev.Vis.Filter(img)", "title": "" }, { "docid": "f5443c0c918746e7f11a71a38a804a48", "score": "0.5452717", "text": "def whitening_filt(size, temporal=True, f_0=white_f_0, alpha=white_alpha, N=white_N):\n fx, fy, ft = np.mgrid[-1:1:1j*size[0], -1:1:1j*size[1], -1:1:1j*size[2]]\n if temporal:\n rho = np.sqrt(fx**2+ fy**2 + ft**2)\n else:\n rho = np.sqrt(fx**2+ fy**2)\n low_pass = np.exp(-(rho/f_0)**alpha)\n K = (N**2 + rho**2)**.5 * low_pass\n return K", "title": "" }, { "docid": "55e3f03b2e77e48dc7c85604fa0b6ae8", "score": "0.544992", "text": "def adjustBrightness(img, brightness):\r\n # create an empty image of same dimension\r\n filtered_image = image.EmptyImage(img.getWidth(), img.getHeight())\r\n \r\n # for each pixel in the image, img\r\n for row in range(img.getHeight()):\r\n for col in range(img.getWidth()):\r\n \r\n # get the pixel value\r\n p = img.getPixel(col, row)\r\n\r\n # apply contrast filter to the pixel\r\n p = brightnessTransform(p, brightness)\r\n \r\n # set the corresponding pixel in the filtered image\r\n filtered_image.setPixel(col,row, p)\r\n \r\n return filtered_image", "title": "" }, { "docid": "c41749da00e7de32ed186a4de0908555", "score": "0.54447144", "text": "def filter_bilateral( img_in, sigma_s, sigma_v, reg_constant=1e-8 ):\n\n # check the input\n if not isinstance( img_in, np.ndarray ) or img_in.dtype != 'float32' or img_in.ndim != 2:\n raise ValueError('Expected a 2D np.ndarray with float32 elements')\n\n\n # make a simple Gaussian function taking the squared radius\n gaussian = lambda r2, sigma: (np.exp( -0.5*r2/sigma**2 )*3).astype(int)*1.0/3.0\n\n # define the window width to be the 3 time the spatial std. dev. to \n # be sure that most of the spatial kernel is actually captured\n win_width = int( 3*sigma_s+1 )\n\n # initialize the results and sum of weights to very small values for\n # numerical stability. not strictly necessary but helpful to avoid\n # wild values with pathological choices of parameters\n wgt_sum = np.ones( img_in.shape )*reg_constant\n result = img_in*reg_constant\n\n # accumulate the result by circularly shifting the image across the\n # window in the horizontal and vertical directions. within the inner\n # loop, calculate the two weights and accumulate the weight sum and \n # the unnormalized result image\n for shft_x in range(-win_width,win_width+1):\n for shft_y in range(-win_width,win_width+1):\n # compute the spatial weight\n w = gaussian( shft_x**2+shft_y**2, sigma_s )\n\n # shift by the offsets\n off = np.roll(img_in, [shft_y, shft_x], axis=[0,1] )\n\n # compute the value weight\n tw = w*gaussian( (off-img_in)**2, sigma_v )\n\n # accumulate the results\n result += off*tw\n wgt_sum += tw\n\n # normalize the result and return\n return result/wgt_sum", "title": "" }, { "docid": "856bc655009b7beb3ec8c7de2852ca93", "score": "0.54342467", "text": "def inception_35(base_tensor, filters, name=None):\n f1, f31, f33, f51, f53, f533, fp1 = filters\n # conv1\n tower_0 = conv2d_bn(base_tensor, f1, (1, 1))\n # conv3\n tower_1 = conv2d_bn(base_tensor, f31, (1, 1))\n tower_1 = conv2d_bn(tower_1, f33, (3, 3))\n # conv5\n tower_2 = conv2d_bn(base_tensor, f51, (1, 1))\n tower_2 = conv2d_bn(tower_2, f53, (3, 3))\n tower_2 = conv2d_bn(tower_2, f533, (3, 3))\n # maxpool\n tower_3 = AveragePooling2D((3, 3),(1, 1), padding='same')(base_tensor)\n tower_3 = conv2d_bn(tower_3, fp1, (1, 1))\n # merge\n output = Concatenate(axis=-1, name=name)([tower_0, tower_1, tower_2, tower_3])\n return output", "title": "" }, { "docid": "3c718868dc71a680423b645d08581b22", "score": "0.54334074", "text": "def main():\n old_img = SimpleImage(\"images/smiley-face.png\")\n old_img.show()\n\n blurred_img = blur(old_img)\n for i in range(BLUR_TIMES):\n blurred_img = blur(blurred_img)\n blurred_img.show()", "title": "" }, { "docid": "3c718868dc71a680423b645d08581b22", "score": "0.54334074", "text": "def main():\n old_img = SimpleImage(\"images/smiley-face.png\")\n old_img.show()\n\n blurred_img = blur(old_img)\n for i in range(BLUR_TIMES):\n blurred_img = blur(blurred_img)\n blurred_img.show()", "title": "" }, { "docid": "d2f7f2c660f7d4368a085ad8c16d20dd", "score": "0.54298896", "text": "def stack_filter(base_filt, stack_filt):\n device = torch.device(\"cuda:0\") if base_filt.is_cuda else torch.device(\"cpu\")\n kb = base_filt.shape[-1]\n ks = stack_filt.shape[-1]\n new_filt = torch.zeros(stack_filt.shape[0], base_filt.shape[1], base_filt.shape[2]+(ks-1), base_filt.shape[3]+(ks-1))\n new_filt = new_filt.to(device)\n for out_chan in range(stack_filt.shape[0]):\n for in_chan in range(stack_filt.shape[1]): # same as out_chan in base_filt/new_filt\n for row in range(stack_filt.shape[2]):\n for col in range(stack_filt.shape[3]):\n new_filt[out_chan:out_chan+1, :, row:row+kb, col:col+kb] += base_filt[in_chan]*stack_filt[out_chan, in_chan, row, col]\n return new_filt", "title": "" }, { "docid": "fdcdeaa8383cca75ce14ab557d0eaadf", "score": "0.5425898", "text": "def passFilter(img, mask_size, lowPass=False):\n freqDomain = fourierTransform(img)\n x, y = img.shape\n if lowPass:\n mask = np.zeros((x, y, 2), np.uint8)\n mask[x / 2 - mask_size / 2:x / 2 + mask_size / 2, y / 2 - mask_size / 2:\n y / 2 + mask_size / 2] = 1\n else:\n mask = np.ones((x, y, 2), np.uint8)\n mask[x / 2 - mask_size / 2:x / 2 + mask_size / 2, y / 2 - mask_size / 2:\n y / 2 + mask_size / 2] = 0\n\n # apply mask and inverse DFT\n maskedFreqDomain = freqDomain * mask\n return invFourierTransform(maskedFreqDomain)", "title": "" }, { "docid": "672001999dbab0959e54cbbd2590efa5", "score": "0.5417338", "text": "def apply_filter(data, filter_type='gaussian'):\n\n if filter_type == 'gaussian':\n print '\\nApply a Gaussian filter...'\n sigma = 1 # standard deviation for Gaussian kernel\n data_filtered = scipy.ndimage.filters.gaussian_filter(data, sigma)\n return data_filtered", "title": "" }, { "docid": "47814a6152019be2fde8b429e1010b0b", "score": "0.541635", "text": "def _SSIMForMultiScale(img1, img2, max_val=255, filter_size=11,\r\n filter_sigma=1.5, k1=0.01, k2=0.03):\r\n if img1.shape != img2.shape:\r\n raise RuntimeError('Input images must have the same shape (%s vs. %s).', img1.shape, img2.shape)\r\n\r\n if img1.ndim != 4:\r\n raise RuntimeError('Input images must have four dimensions, not %d', img1.ndim)\r\n\r\n img1 = img1.astype(np.float64)\r\n img2 = img2.astype(np.float64)\r\n\r\n _, height, width, _ = img1.shape\r\n\r\n # Filter size can't be larger than height or width of images.\r\n size = min(filter_size, height, width)\r\n\r\n # Scale down sigma if a smaller filter size is used.\r\n sigma = size * filter_sigma / filter_size if filter_size else 0\r\n\r\n if filter_size:\r\n window = np.reshape(_FSpecialGauss(size, sigma), (1, size, size, 1))\r\n mu1 = signal.fftconvolve(img1, window, mode=\"valid\")\r\n mu2 = signal.fftconvolve(img2, window, mode=\"valid\")\r\n\r\n sigma11 = signal.fftconvolve(img1 * img1, window, mode='valid')\r\n sigma22 = signal.fftconvolve(img2 * img2, window, mode='valid')\r\n sigma12 = signal.fftconvolve(img1 * img2, window, mode='valid')\r\n else:\r\n # Empty blur kernel so no need to convolve.\r\n mu1, mu2 = img1, img2\r\n sigma11 = img1 * img1\r\n sigma22 = img2 * img2\r\n sigma12 = img1 * img2\r\n\r\n mu11 = mu1 * mu1\r\n mu22 = mu2 * mu2\r\n mu12 = mu1 * mu2\r\n sigma11 -= mu11\r\n sigma22 -= mu22\r\n sigma12 -= mu12\r\n\r\n # Calculate intermediate values used by both ssim and cs_map.\r\n c1 = (k1 * max_val) ** 2\r\n c2 = (k2 * max_val) ** 2\r\n v1 = 2.0 * sigma12 + c2\r\n v2 = sigma11 + sigma22 + c2\r\n ssim = np.mean((((2.0 * mu12 + c1) * v1) / ((mu11 + mu22 + c1) * v2)))\r\n cs = np.mean(v1 / v2)\r\n\r\n return ssim, cs", "title": "" }, { "docid": "d6c44440286bd46487f972559c5a154a", "score": "0.54129136", "text": "def apply(self, source_image, destination_image):\n cv2.filter2D(source_image, -1, self._kernel, destination_image)", "title": "" }, { "docid": "ddc27ae733771c937ed4e5cb04bb66fc", "score": "0.5400559", "text": "def _SSIMForMultiScale(img1, img2, max_val=255, filter_size=11,\r\n filter_sigma=1.5, k1=0.01, k2=0.03):\r\n if img1.shape != img2.shape:\r\n raise RuntimeError('Input images must have the same shape (%s vs. %s).',\r\n img1.shape, img2.shape)\r\n if img1.ndim != 4:\r\n raise RuntimeError('Input images must have four dimensions, not %d',\r\n img1.ndim)\r\n\r\n img1 = img1.astype(np.float64)\r\n img2 = img2.astype(np.float64)\r\n _, height, width, _ = img1.shape\r\n\r\n # Filter size can't be larger than height or width of images.\r\n size = min(filter_size, height, width)\r\n\r\n # Scale down sigma if a smaller filter size is used.\r\n sigma = size * filter_sigma / filter_size if filter_size else 0\r\n\r\n if filter_size:\r\n window = np.reshape(_FSpecialGauss(size, sigma), (1, size, size, 1))\r\n mu1 = signal.fftconvolve(img1, window, mode='valid')\r\n mu2 = signal.fftconvolve(img2, window, mode='valid')\r\n sigma11 = signal.fftconvolve(img1 * img1, window, mode='valid')\r\n sigma22 = signal.fftconvolve(img2 * img2, window, mode='valid')\r\n sigma12 = signal.fftconvolve(img1 * img2, window, mode='valid')\r\n else:\r\n # Empty blur kernel so no need to convolve.\r\n mu1, mu2 = img1, img2\r\n sigma11 = img1 * img1\r\n sigma22 = img2 * img2\r\n sigma12 = img1 * img2\r\n\r\n mu11 = mu1 * mu1\r\n mu22 = mu2 * mu2\r\n mu12 = mu1 * mu2\r\n sigma11 -= mu11\r\n sigma22 -= mu22\r\n sigma12 -= mu12\r\n\r\n # Calculate intermediate values used by both ssim and cs_map.\r\n c1 = (k1 * max_val) ** 2\r\n c2 = (k2 * max_val) ** 2\r\n v1 = 2.0 * sigma12 + c2\r\n v2 = sigma11 + sigma22 + c2\r\n ssim = np.mean((((2.0 * mu12 + c1) * v1) / ((mu11 + mu22 + c1) * v2)))\r\n cs = np.mean(v1 / v2)\r\n return ssim, cs", "title": "" }, { "docid": "c5cf5f2cce2e892c7fa79e31e67fae83", "score": "0.53852534", "text": "def two_perfect_codes() -> ImageStack:\n img = np.zeros((3, 2, 20, 50, 50), dtype=np.float32)\n\n # code 1\n img[0, 0, 5, 20, 35] = 10\n img[1, 1, 5, 20, 35] = 10\n img[2, 0, 5, 20, 35] = 10\n\n # code 1\n img[0, 0, 5, 40, 45] = 10\n img[1, 1, 5, 40, 45] = 10\n img[2, 0, 5, 40, 45] = 10\n\n # blur points\n gaussian_filter(img, (0, 0, 0.5, 1.5, 1.5), output=img)\n\n return ImageStack.from_numpy(img)", "title": "" }, { "docid": "4d8759a2daf1826a8fbf718dccf94985", "score": "0.5379533", "text": "def execute(self, image: sitk.Image, params: pymia_fltr.IFilterParams = None) -> sitk.Image:\n\n img_arr = sitk.GetArrayFromImage(image)\n\n # STUDENT: implementation of normalization\n if self.norm_method == 'z':\n print('Normalization method: Z-Score')\n mask = sitk.GetArrayFromImage(self.mask)\n mean = img_arr[mask == 1].mean()\n std = img_arr[mask == 1].std()\n img_arr = (img_arr - mean) / std\n\n elif self.norm_method == 'ws':\n print('Normalization method: White Stripe')\n indices = self.white_stripe(img_arr)\n plt.figure()\n # plt.title('White Stripe Mask of of ID ' + self.id_)\n plt.imshow(indices[100, :, :])\n plt.axis('off')\n plt.savefig('./mia-result/plots/WS_Mask_' + self.T_ + '_' + self.id_ + '.png')\n plt.close()\n # Normalization step\n mean = np.mean(img_arr[indices])\n std = np.std(img_arr[indices])\n img_arr = (img_arr - mean) / std\n\n elif self.norm_method == 'hm':\n print('Normalization method: Histogram Matching')\n # self.save_hist(img_arr) # optional for inspection\n img_arr = self.do_hist_norm(img_arr)\n # self.save_hist(img_arr, normalized=True) # optional for inspection\n\n elif self.norm_method == 'fcm':\n print('Normalization method: FCM White Matter Aligning')\n threshold = 0.6\n fcm_clusters = self.fcm_mask(img_arr, maxiter=30)\n # Create mask with white matter cluster\n if self.T_ is 'T1w':\n wm_mask = fcm_clusters[..., 2] > threshold\n elif self.T_ is 'T2w':\n wm_mask = fcm_clusters[..., 0] > threshold\n else:\n print('Wrong entry for image contrast')\n wm_mask = None\n # Plot clusters for visual inspection\n clusters = np.zeros(img_arr.shape)\n for i in range(3):\n clusters[fcm_clusters[..., i] > threshold] = i + 1\n plt.figure()\n # plt.title('White Matter Mask of of ID ' + self.id_)\n plt.imshow(clusters[100, :, :])\n plt.axis('off')\n plt.savefig('./mia-result/plots/WM_Mask_' + self.T_ + '_' + self.id_ + '.png')\n plt.close()\n # Normalization step\n wm_mean = img_arr[wm_mask == 1].mean()\n img_arr = (img_arr/wm_mean)\n\n elif self.norm_method == 'no':\n print('No Normalization')\n\n else:\n print('Normalization method not known. Pre processing runs without normalization.')\n\n # conversion to simpleITK image\n img_out = sitk.GetImageFromArray(img_arr)\n img_out.CopyInformation(image)\n\n return img_out", "title": "" }, { "docid": "1d1a8e2e054760d6caa589c53911d2c1", "score": "0.53780764", "text": "def stem(inputs, alpha, n_filters, \n filter_size):\n # Convolutional block\n x = layers.ZeroPadding2D(padding=((0, 1), (0, 1)))(inputs)\n x = layers.Conv2D(n_filters, (filter_size, filter_size), strides=(2, 2), padding='valid')(x)\n x = layers.BatchNormalization()(x)\n x = layers.ReLU()(x)\n\n # Depthwise Separable Convolution Block\n x = depthwise_block(x, 64, alpha, (1, 1))\n return x", "title": "" }, { "docid": "b3220041f7df8616c4f9d826f10362dc", "score": "0.53773856", "text": "def adjustContrast(img, contrast):\r\n # create an empty image of same dimension\r\n filtered_image = image.EmptyImage(img.getWidth(), img.getHeight())\r\n \r\n # for each pixel in the image, img\r\n for row in range(img.getHeight()):\r\n for col in range(img.getWidth()):\r\n \r\n # get the pixel value\r\n p = img.getPixel(col, row)\r\n\r\n # apply contrast filter to the pixel\r\n p = contrastTransform(p, contrast)\r\n \r\n # set the corresponding pixel in the filtered image\r\n filtered_image.setPixel(col,row, p)\r\n \r\n return filtered_image", "title": "" }, { "docid": "a06e5c12e46f545f042ba5464204f46f", "score": "0.537625", "text": "def onedim_pixtopix_variations(f_flat, filt='gaussian', filter_width=25):\n \n pix_sens = {}\n smoothed_flat = {}\n \n while filt.lower() not in ['g','gaussian','s','savgol','m','median']:\n print(\"ERROR: filter choice not recognised!\")\n filt = raw_input(\"Please try again: ['(G)aussian','(S)avgol','(M)edian']\")\n \n #loop over all orders\n for ord in sorted(f_flat.keys()): \n if filt.lower() in ['g','gaussian']:\n #Gaussian filter\n smoothed_flat[ord] = ndimage.gaussian_filter(f_flat[ord], filter_width) \n pix_sens[ord] = f_flat[ord] / smoothed_flat[ord]\n elif filt.lower() in ['s','savgol']:\n print('WARNING: SavGol filter not implemented yet!!!')\n break\n elif filt.lower() in ['m','median']:\n print('WARNING: Median filter not implemented yet!!!')\n break\n else:\n #This should never happen!!!\n print(\"ERROR: filter choice still not recognised!\")\n break\n \n return smoothed_flat, pix_sens", "title": "" }, { "docid": "cbd19d73f344151e60f1de48e7a824f0", "score": "0.53686017", "text": "def kernel_fn(kernels):\n var1, nngp, var2, ntk, is_height_width, marginal, cross = (\n kernels.var1, kernels.nngp, kernels.var2, kernels.ntk,\n kernels.is_height_width, kernels.marginal, kernels.cross)\n\n if cross > M.OVER_PIXELS and not is_height_width:\n filter_shape_nngp = filter_shape[::-1]\n strides_nngp = strides[::-1]\n else:\n filter_shape_nngp = filter_shape\n strides_nngp = strides\n\n if cross == M.OVER_PIXELS:\n def conv_nngp(x):\n if _is_array(x):\n x = _conv_nngp_4d(x, filter_shape_nngp, strides_nngp, padding)\n x = _affine(x, W_std, b_std)\n return x\n elif cross in [M.OVER_POINTS, M.NO]:\n def conv_nngp(x):\n if _is_array(x):\n x = _conv_nngp_5or6d_double_conv(x, filter_shape_nngp,\n strides_nngp, padding)\n x = _affine(x, W_std, b_std)\n return x\n\n is_height_width = not is_height_width\n else:\n raise NotImplementedError(\n \"Only implemented for `OVER_PIXELS`, `OVER_POINTS` and `NO`;\"\n \" supplied {}\".format(cross))\n\n if marginal == M.OVER_PIXELS:\n def conv_var(x):\n x = _conv_var_3d(x, filter_shape_nngp, strides_nngp, padding)\n x = _affine(x, W_std, b_std)\n return x\n elif marginal in [M.OVER_POINTS, M.NO]:\n def conv_var(x):\n if _is_array(x):\n x = _conv_nngp_5or6d_double_conv(x, filter_shape_nngp,\n strides_nngp, padding)\n x = _affine(x, W_std, b_std)\n return x\n else:\n raise NotImplementedError(\n \"Only implemented for `OVER_PIXELS`, `OVER_POINTS` and `NO`;\"\n \" supplied {}\".format(marginal))\n\n var1 = conv_var(var1)\n var2 = conv_var(var2)\n nngp = conv_nngp(nngp)\n ntk = conv_nngp(ntk) + nngp - b_std**2 if ntk is not None else ntk\n\n return kernels._replace(\n var1=var1, nngp=nngp, var2=var2, ntk=ntk, is_gaussian=True,\n is_height_width=is_height_width, marginal=marginal, cross=cross)", "title": "" }, { "docid": "79293f1c4560f110433fdeec3375f174", "score": "0.5362118", "text": "def apply_erode_filter(t, iterations, kernel):\n b_w_img = cv2.cvtColor(t, cv2.COLOR_BGR2GRAY)\n ret, thresh = cv2.threshold(b_w_img, 127, 255, cv2.THRESH_BINARY)\n kernel = np.ones((kernel, kernel), np.uint8)\n img = cv2.erode(thresh, kernel, iterations=iterations)\n return img", "title": "" }, { "docid": "cb00609bcd18099ccdb4081e58584454", "score": "0.5350219", "text": "def dn_image_p(img):\n img = img.filter(ImageFilter.MedianFilter(size=3))\n return img.filter(ImageFilter.GaussianBlur(radius=1))", "title": "" }, { "docid": "fb50f769ae92fd8bcd76a32225b99d4b", "score": "0.5342731", "text": "def plotWhiteNoiseActivateFilters(output_path):\n\tintRecordFrequent = 20\n\tintNumberSteps = 160\n\tintIterationSteps = 160\n\n\tdictLayer = dict([layer.name, layer] for layer in model.layers)\n\tinputImage = model.input\n\tlistLayerNames = [layer for layer in dictLayer.keys() if \"activation\" in layer or \"conv2d\" in layer][:8]\n\tlistCollectLayers = [dictLayer[name].output for name in listLayerNames]\n\n\tfilter_num = [32, 32, 32, 64, 64, 64, 128, 128]\n\n\tcnt = 2\n\tfn = listCollectLayers[cnt]\n\t#for cnt, fn in enumerate(listCollectLayers):\n\tlistFilterImages = []\n\tintFilters = filter_num[cnt]\n\tfor i in range(intFilters):\n\t\tarrayInputImage = np.random.random((1, 48, 48, 1)) # random noise\n\t\ttensorTarget = K.mean(fn[:, :, :, i])\n\n\t\ttensorGradients = makeNormalize(K.gradients(tensorTarget, inputImage)[0])\n\t\ttargetFunction = K.function([inputImage, K.learning_phase()], [tensorTarget, tensorGradients])\n\n\t\t# activate filters\n\t\tlistFilterImages.append(trainGradAscent(intIterationSteps, arrayInputImage, targetFunction, intRecordFrequent))\n\n\tfor it in range(8):\n\t\t#print(\"In the #{}\".format(it))\n\t\tfig = plt.figure(figsize=(16, 17))\n\t\tfor i in range(intFilters):\n\t\t\tax = fig.add_subplot(intFilters/8, 8, i+1)\n\t\t\tarrayRawImage = listFilterImages[i][it][0].squeeze()\n\t\t\tax.imshow(deprocessImage(arrayRawImage), cmap=\"Blues\")\n\t\t\tplt.xticks(np.array([]))\n\t\t\tplt.yticks(np.array([]))\n\t\t\tplt.xlabel(\"{:.3f}\".format(listFilterImages[i][it][1]))\n\t\t\tplt.tight_layout()\n\tfig.suptitle(\"Filters of layer {} (# Ascent Epoch {} )\".format(listLayerNames[cnt], it*intRecordFrequent))\n\tplt.savefig(\"fig2_1\")\n\tplt.savefig(os.path.join(output_path, \"fig2_1\"))\n\tos.rename(os.path.join(output_path, \"fig2_1.png\"), os.path.join(output_path, \"fig2_1.jpg\"))", "title": "" }, { "docid": "390bd12f1195c3fc4bde05cf768522e8", "score": "0.53397626", "text": "def reduce(image, kernel):\n blurred_im = convolve(image, kernel, mode='constant')\n blurred_im = convolve(blurred_im, kernel.T, mode='constant')\n reduced_im = np.ndarray((int(image.shape[0] / 2), int(image.shape[1] / 2)))\n reduced_im[:] = blurred_im[::2, ::2]\n return reduced_im", "title": "" }, { "docid": "42ce66035353ee9644234a67a8a3cfb4", "score": "0.5332255", "text": "def visualize_filters(model, layer_no=0, filter_dir='filters', final_size=(250,250)):\n weights = model.layers[layer_no].get_weights()[0]\n scaler = MinMaxScaler((0,255))\n print 'weights.shape:', weights.shape\n assert weights[0].shape[0] == 1, ('Expected 1-deep dimension for each weight, do you have 3d instead of 2d?')\n filters = [scaler.fit_transform(filter_array[0,:,:]) for filter_array in weights]\n # sort by variance\n # filters.sort(key=np.var)\n for idx, filt in enumerate(filters):\n assert len(filt.shape) == 2, ('Expected 2D filter, got:', filt.shape)\n img_path = os.path.join(filter_dir, str(idx)) + '.png'\n # scale up filter so you can see it on presentation slides, etc\n final = cv2.resize(filt, dsize=final_size, interpolation=cv2.INTER_NEAREST)\n cv2.imwrite(img_path, final, )\n print 'wrote %i filters to directory %s/.' % (len(filters), filter_dir)", "title": "" }, { "docid": "835c11fa8e52c33315263d8d22cc9d18", "score": "0.5322281", "text": "def post_processing(image):\n # rescale to dtype\n image = exposure.rescale_intensity(image, in_range='image', out_range='dtype')\n\n #TODO: Check for compatibility in general case (applied just for Homomorphic filter)\n # # remove local minima in histogram\n # hist = np.histogram(image, 512)\n #\n # # minVal is the first local minima in histogram\n # minInd = argrelextrema(hist[0], np.less)\n # minVal = hist[1][minInd[0][0]]\n #\n # # maxVal is the highest intensity in which we have more than 25 occurrence\n # maxInd = np.where(hist[0] > 25)\n # maxVal = hist[1][maxInd[0][-1]]\n #\n # image = exposure.rescale_intensity(image, in_range=(minVal, maxVal), out_range='dtype')\n\n return image", "title": "" }, { "docid": "829591aefeb9fd15f20c9457495ac8df", "score": "0.5322165", "text": "def process_base_image(img, kernel_size=(5, 5), show_image=False):\n processed_image = img.copy()\n processed_image = cv2.cvtColor(processed_image, cv2.COLOR_BGR2GRAY)\n processed_image = cv2.GaussianBlur(processed_image, kernel_size, 0)\n if show_image:\n display_img(processed_image, 'Gray Scale Image')\n return processed_image", "title": "" }, { "docid": "bb7fe3f938df7f4aa9da369079e07a28", "score": "0.5322071", "text": "def wiener_filter(self, H, gamma, mode=\"gamma\"):\n G = self.image_fft\n m, n = self.image_fft.shape\n\n H_complex_conj = np.conj(H)\n\n M = np.zeros(G.shape, dtype='complex')\n\n # Wiener filter without statistical properties of the image.\n if mode == \"gamma\":\n for u in range(m):\n for v in range(n):\n M[u, v] = H_complex_conj[u, v] / np.abs(H[u, v])**2 + gamma\n \n # Wiener filter with statistical properties of the image.\n if mode == \"spectrum\":\n\n # Identify the first zeros of the optical transfer function.\n u0, v0 = self.find_first_zeros(H)\n\n # Fourier spectrum of the degraded image.\n frequencies, Sgg = scipy.signal.periodogram(self.image, scaling='density')\n del frequencies\n\n # Identify some frequencies u2 > u0 and v2 > v0, beyond which the spectrum is flat.\n u2, v2 = self.find_values_beyond_flat_power_spectrum(H, u0, v0)\n \n # Fourier spectrum of noise.\n Svv = self.noise_spectrum(Sgg, u2, v2)\n\n # Pseudo-Fourier spectrum of unknown image.\n Sff, alpha, u1, v1 = self.unknown_image_spectrum(H, Sgg, Svv, u0, v0, u2, v2)\n\n # Finally, apply filter.\n for u in range(m):\n for v in range(n):\n if u < u1 and v < v1:\n M[u, v] = 1 / H[u, v]\n else:\n exp_term = np.exp(alpha * (np.sqrt(u**2 + v**2) - np.sqrt(u1**2 + u2**2))) - 1\n second_term = (Svv / Sff[u1, v1]) * exp_term\n M[u, v] = H_complex_conj[u, v] / np.abs(H[u, v])**2 + second_term \n \n return np.fft.ifft2(np.multiply(G, M))", "title": "" }, { "docid": "301241c5079bbfde0ea0b0b3187eb85d", "score": "0.53183186", "text": "def edges(image, filter):\r\n # Should work for non-RGB modes as wel (eg. \"L\")\r\n if image.mode != 'RGB':\r\n image = image.convet('RGB')\r\n\r\n # Dictionary which provides matrixes based on user's choice of filtration\r\n\r\n Prewitt = {\r\n\r\n '---' : [[-1,-1,-1],[0,0,0],[1,1,1]],\r\n '|' : [[-1,0,1],[-1,0,1],[-1,0,1]],\r\n '\\\\' : [[0,1,1],[-1,0,1],[-1,-1,0]],\r\n '/' : [[-1,-1,0],[-1,0,1],[0,1,1]]\r\n\r\n }\r\n\r\n try:\r\n filtr = Prewitt[filter]\r\n except KeyError:\r\n sys.exit('Filter not found')\r\n\r\n new_pic = Image.new('RGB', (image.width , image.height), color=0)\r\n\r\n R_channel = list()\r\n G_channel = list()\r\n B_channel = list()\r\n\r\n for i in range(image.width - 2):\r\n for j in range(image.height -2):\r\n\r\n all_channels_list = list()\r\n for k in range(3):\r\n pixel_value = list()\r\n for z in range(3):\r\n\r\n colour = image.getpixel((i+k, j + z))\r\n pixel_value.append(colour)\r\n all_channels_list.append(pixel_value)\r\n\r\n # No need to write the same logic for each colour channel, so additional function was made\r\n\r\n R_channel.append(colour_channel('R',all_channels_list, filtr))\r\n G_channel.append(colour_channel('G', all_channels_list, filtr))\r\n B_channel.append(colour_channel('B', all_channels_list, filtr))\r\n\r\n # Pixel value cannot exceed 255, so they should be normalized\r\n\r\n R_channel = normalization(R_channel)\r\n G_channel = normalization(G_channel)\r\n B_channel = normalization(B_channel)\r\n\r\n\r\n # new picture is drawn\r\n m = 0\r\n for i in range(image.width - 2):\r\n for j in range(image.height - 2):\r\n coordinate = (i, j)\r\n\r\n new_pic.putpixel(coordinate, (R_channel[m], G_channel[m], B_channel[m]))\r\n m += 1\r\n\r\n\r\n\r\n return new_pic", "title": "" }, { "docid": "278b5269133cd3a38ffa97177d0696eb", "score": "0.53137547", "text": "def init_filter_layer(self):\n\n\n # maybe the two functions do exactly the same...\n\n if self.filter_type in [\"out\",\"fix\"] :\n weight_init = self.get_filter_weights_en_dur()\n elif self.filter_type == \"unfix\":\n weight_init = self.get_filter_weights()\n C_in = 1\n stride = 1\n must_be_5 = 5\n padding = int(0.5 * ((C_in - 1) * stride - C_in + must_be_5)) + 23\n weight_init = weight_init.view((1, 1, -1))\n lowpass = torch.nn.Conv1d(C_in, self.output_dim, self.N, stride=1, padding=padding, bias=False)\n\n if self.filter_type == \"unfix\": # we let the weights move\n lowpass.weight = torch.nn.Parameter(weight_init,requires_grad=True)\n\n else : # \"out\" we don't care the filter won't be applied, or \"fix\" the wieghts are fixed\n lowpass.weight = torch.nn.Parameter(weight_init,requires_grad=False)\n\n lowpass = lowpass.double()\n self.lowpass = lowpass", "title": "" }, { "docid": "0a7b5bb36fbddd5a25fe81a9b108f190", "score": "0.5310421", "text": "def lightFiltering(_img, mode=0, method=0, solid_pct=0, grad_low_pct=0,grad_high_pct=100, grad_axis=0, grad_start=0, img_type='uint8'):\n # error handling\n if mode not in [0, 1]:\n if str(mode).upper() not in ['BRIGHTEN', 'DARKEN']:\n raise ValueError('lightFiltertering: mode must be a value of 0/brighten or 1/darken')\n if method not in [0, 1]:\n if str(method).upper() not in ['SOLID', 'GRADIENT']:\n raise ValueError('lightFiltertering: method must be a value of 0/solid or 1/gradient')\n if grad_axis > 1 or grad_axis < 0:\n raise ValueError('lightFiltertering: grad_axis must be a value of 0 (horizontal) or 1 (vertical)')\n if grad_start > 1 or grad_start < 0:\n raise ValueError('lightFiltertering: grad_start must be a value of 0 (top/left) or 1 (bottom/right)')\n\n img = copy.deepcopy(_img)\n \n \"\"\"\n Solid\n \"\"\"\n # if solid, just add/subtract from all channels\n if method == 0:\n\n \n # if percentage is 0, do not apply anything\n if solid_pct == 0:\n solid_pct = 0.0001\n \n # lambda function for updating brightness/darkness\n solid_update_b = lambda x: min(255, x + (x * (solid_pct/100)))\n solid_update_d = lambda x: max(0, x + (x * (solid_pct/100)*-1))\n \n if mode == 0:\n update = np.vectorize(solid_update_b)\n elif mode == 1:\n update = np.vectorize(solid_update_d)\n \n upd_img = update(img)\n\n \n \"\"\"\n Gradient\n \"\"\"\n # if gradient, needs a more complex approach \n if method == 1:\n \n # create a numpy array with same shape, but with percentages from low-high in selected order\n orig_shape = img.shape\n \n # define values for height (h), width (w)\n h = orig_shape[0]\n w = orig_shape[1]\n \n # determine which direction to grade on\n # g is the gradient number - when writing the value incrementation add this here\n # o_g means off gradient, and is the other value\n if grad_axis == 0:\n g = h\n o_g = w\n else:\n g = w\n o_g = h\n \n # define the grid of multiplicable numbers\n # if grad_start is at 0, lower - higher. If at 1, higher - lower\n # also create the grid of multiplicable numbers for the gradient step\n if grad_start == 0:\n grad_inc = (grad_high_pct-grad_low_pct)/(o_g)\n grad_grid = np.mgrid[grad_low_pct:grad_high_pct:grad_inc]\n elif grad_start == 1:\n grad_dec = (grad_low_pct-grad_high_pct)/(o_g)\n grad_grid = np.mgrid[grad_high_pct:grad_low_pct:grad_dec]\n \n # the above grid is a shape of (w or h, 1)\n # we must expand to form a shape of (h, w)\n # horizontal\n if grad_axis == 0:\n grad_grid = np.tile(grad_grid,(g,1))\n #vertical\n elif grad_axis == 1:\n _grads = []\n for i in range(g):\n _grads.append(grad_grid)\n _grads = tuple(_grads)\n gr = np.stack(_grads)\n grad_grid = gr.T\n \n #update the shape so it is broadcastable to the lambda\n grad_grid = grad_grid.reshape((h,w,1))\n \n #define lambdas for updating values - x is from the \n grad_update_b = lambda x, y: min(255, x + (x * (y/100)))\n grad_update_d = lambda x, y: max(0, x + (x * (y/100)*-1))\n if mode == 0:\n update = np.vectorize(grad_update_b)\n elif mode == 1:\n update = np.vectorize(grad_update_d)\n # now multiply this grid with the original image\n upd_img = update(img, grad_grid)\n \n \n # final output of the image \n \n\t# only mess with this if you know what you are doing.\n if img_type == 'uint8':\n upd_img = upd_img.astype(np.uint8)\n return upd_img", "title": "" }, { "docid": "750c02b705e60649361823b9d03814c0", "score": "0.5309802", "text": "def transition_layer(X, nb_filters, compression):\n\n init = K.initializers.he_normal(seed=None)\n\n Normal1 = K.layers.BatchNormalization()(X)\n FirstActivation = K.layers.Activation('relu')(Normal1)\n filters = int(nb_filters * compression)\n FirstConvo = K.layers.Conv2D(\n filters=filters,\n kernel_size=(1, 1),\n padding='same',\n kernel_initializer=init\n )(FirstActivation)\n\n avgpool = K.layers.AveragePooling2D(\n pool_size=(2, 2),\n strides=(2, 2),\n padding='same',\n )(FirstConvo)\n\n return avgpool, filters", "title": "" }, { "docid": "f35b5d78651585b40ee6d0e4ae653082", "score": "0.5303289", "text": "def gray_filter():\n # Initialize dimensions from weigths shape\n weights = np.array([0.21, 0.72, 0.07])\n weights = np.flip(weights) # Convert to BGR format\n\n return weights", "title": "" }, { "docid": "edca9b5e5cee705549decdbed1433c98", "score": "0.5291315", "text": "def adaptive_median_filter(image, w_max, w_0 = 3):\n assert w_max % 2 == 1, \"window size must be odd\"\n\n if len(image.shape)==2:\n return adaptive_median_filter_single_channel(image, w_max, w_0)\n\n elif len(image.shape)==3:\n # We assume the image is opened with opencv so is BGR\n blue_channel = image[:, :, 0]\n green_channel = image[:, :, 1]\n red_channel = image[:, :, 2]\n\n # Apply filtering on each channel\n filtered_blue_channel = adaptive_median_filter_single_channel(blue_channel, w_max, w_0)\n filtered_green_channel = adaptive_median_filter_single_channel(green_channel, w_max, w_0)\n filtered_red_channel = adaptive_median_filter_single_channel(red_channel, w_max, w_0)\n\n return np.dstack((filtered_blue_channel, filtered_green_channel, filtered_red_channel))\n \n else:\n raise ValueError(\"Input dimension {} is not supported (expecting 3D RGB or 2D Gray)\".format(image.shape))", "title": "" } ]
657f9833689eadd32ede8cb67f11aec8
Get the timestamp for user's site enrollment (utc aware)
[ { "docid": "7a30fdc32da68e4891f7ca477d3a5bdf", "score": "0.7706258", "text": "def get_user_site_enrollment_timestamp(user_uuid, site_uuid):\n user_site_metadata_response = baseline_service.get_user_site_metadata_by_user_sites(user_uuid, [site_uuid])\n if user_site_metadata_response['upsideCode'] != USER_SITE_METADATA_FOUND_RESPONSE_CODE:\n return None\n return user_site_metadata_response['payload'][0]['enrollTimestamp']", "title": "" } ]
[ { "docid": "efdc97fdc119fbe9011a06d2c4f6c7bf", "score": "0.66244805", "text": "def get_timestamp():\n from datetime import datetime\n from time import mktime\n\n return str(mktime(datetime.utcnow().timetuple()))[:-2]", "title": "" }, { "docid": "78f0512e3b85471fedeac4dd7a1a02ad", "score": "0.66021425", "text": "def get_timestamp():\n dt = datetime.datetime.utcnow()\n return dt.strftime(\"%Y%m%d-%H%M%S-UTC\")", "title": "" }, { "docid": "3a6155975a110d1c56727bf80f9900e0", "score": "0.6591599", "text": "def get_current_timestamp(self):\n if self.use_utc == True:\n curr_ts = strip_tzinfo(\n round_down_to_nearest_day(self.base.utc_ts)\n )\n elif self.use_utc == False:\n curr_ts = strip_tzinfo(\n round_down_to_nearest_day(self.base.est_ts)\n )\n return curr_ts", "title": "" }, { "docid": "9a827ea0308b6256ce75f6f0222ffa8e", "score": "0.64536035", "text": "def get_timestamp():\n return datetime.datetime.strftime(datetime.datetime.utcnow(), '%Y-%m-%dT%H:%M:%SZ')", "title": "" }, { "docid": "7e8750065ec6338249c2fffbbfeac499", "score": "0.6453372", "text": "def _get_timestamp(self):\n return time.strftime('%Y-%m-%d %H:%M:%S', time.gmtime())", "title": "" }, { "docid": "b51dc2741a1d3b33df7df11caf38bd28", "score": "0.64501363", "text": "def getTimeStamp():\n cdtTime = datetime.datetime.utcnow() + datetime.timedelta(hours=-5)\n return str(long(time.mktime(cdtTime.timetuple())))", "title": "" }, { "docid": "0cc401b18af064d52237a4228ba12e35", "score": "0.64217305", "text": "def utcnow_ts():\r\n if utcnow.override_time is None:\r\n # NOTE(kgriffs): This is several times faster\r\n # than going through calendar.timegm(...)\r\n return int(time.time())\r\n\r\n return calendar.timegm(utcnow().timetuple())", "title": "" }, { "docid": "efdf76bf2314aaf0f3dd77304e22c8a7", "score": "0.64002615", "text": "def timestamp_utcnow():\n return int(datetime.datetime.timestamp(datetime.datetime.utcnow()))", "title": "" }, { "docid": "022ecf409a07792e79fae9dfdb8d625c", "score": "0.6384065", "text": "def getUserSignupDate(self):\n userinfo = self.getUserInfo()\n timestamp = int(float(userinfo[\"signupTimeSec\"]))\n return time.strftime(\"%m/%d/%Y %H:%M\", time.gmtime(timestamp))", "title": "" }, { "docid": "3027627695ee868ad82d25ad9f0f17bb", "score": "0.6363829", "text": "def calculate_expiration_timestamp():\n expiration_timestamp = datetime.now(utc) + timedelta(days=7)\n return expiration_timestamp", "title": "" }, { "docid": "76971f7f1b51bfbd19a399a48650c194", "score": "0.635468", "text": "def _current_timestamp(self):\n return (datetime.datetime.now(pytz.utc)-self.UNIX_EPOCH).total_seconds()", "title": "" }, { "docid": "589a7ea1fa3147e862017fe65d25c5b4", "score": "0.63407", "text": "def utc_unix_timestamp():\n return int(time.mktime(datetime.utcnow().timetuple()))", "title": "" }, { "docid": "63161f0cbb94ddfc62974b21ae6ad8c5", "score": "0.62917376", "text": "def get_storage_timestamp(self, userid):", "title": "" }, { "docid": "c6198faca41398021ea04ac1a0074f0a", "score": "0.6285664", "text": "def get_timestamp():\n return datetime.now().strftime('%Y%m%d%H%M%S')", "title": "" }, { "docid": "66111c8829922406b9edf2a2685f62a9", "score": "0.62711424", "text": "def restore_timestamp_in_utc(self) -> str:\n return pulumi.get(self, \"restore_timestamp_in_utc\")", "title": "" }, { "docid": "52b6f76e70f2257985a30e5e9c10c3b7", "score": "0.6267729", "text": "def current_timestamp(config):\n tz = config['global']['newsroom_timezone']\n mytz = pytz.timezone(tz)\n return int(datetime.now(tz=mytz).strftime('%s'))", "title": "" }, { "docid": "fca2274ccfb855d3e180f5f5fbc04597", "score": "0.62374604", "text": "def get_time(self):\n\n return int(time.mktime(time.localtime()))", "title": "" }, { "docid": "7f556cbbe3c469bf8aae1e38caa604ad", "score": "0.62328225", "text": "async def get_timestamp(self) -> int:\n\n epoch = datetime.utcfromtimestamp(0)\n the_time = (datetime.utcnow() - epoch).total_seconds()\n return the_time", "title": "" }, { "docid": "a5491571f225d5df26df9734fe9e1adc", "score": "0.62116724", "text": "def get_timestamp(self): # pragma: no cover\n return self.info.timestamp", "title": "" }, { "docid": "de2b09605f7464b91b2c64e648b25867", "score": "0.62044626", "text": "def _get_current_utc_value():\r\n x = datetime.now()\r\n y = datetime.utcnow()\r\n x = x.replace(second=0, microsecond=0)\r\n y = y.replace(second=0, microsecond=0)\r\n return (x - y).total_seconds()/3600", "title": "" }, { "docid": "cdb9cb41b2525079ef5362718fdb4520", "score": "0.6191803", "text": "def get_current_unix_time():\n return int(t.time())", "title": "" }, { "docid": "5a63f412fda4fdbfe3273f5e22116a5a", "score": "0.6157651", "text": "def timestamp(self):\n return self._logged_dict[TIMESTAMP_FIELD]", "title": "" }, { "docid": "f353c33870f1ecb6b2b633521e0c2b93", "score": "0.61437505", "text": "def get_datetime(self):\n return UNIXEPOCH", "title": "" }, { "docid": "ab470025873344b861dbb1e46f96e2c2", "score": "0.6140052", "text": "def get_time_stamp():\n\n return datetime.now()", "title": "" }, { "docid": "86146203bfd43a7c745320030f613b8b", "score": "0.61292094", "text": "def get_server_time(self):\n stamp = self.client.get_server_time()['serverTime']\n return datetime.utcfromtimestamp(stamp/1000).strftime('%Y-%m-%d %H:%M:%S')", "title": "" }, { "docid": "95ed6cbcc0d28e76323932358154dd96", "score": "0.6111804", "text": "def get_login_time_of_user(user_id):\n return (Auth.query.filter_by(user_id=user_id).first()).get_login_time()", "title": "" }, { "docid": "f49c965b67abcf558a1277f8c12c0505", "score": "0.6094638", "text": "def generate_timestamp():\n timestamp = round(datetime.now().replace(tzinfo=timezone.utc).timestamp())\n return timestamp", "title": "" }, { "docid": "3754f5d46e244f350170e15e3efc0d80", "score": "0.6087619", "text": "def _get_timestamp(self):\n if self > MAX:\n return MAX.timestamp\n else:\n return int(time.mktime(self.dt.timetuple()))", "title": "" }, { "docid": "36af97e074b5b31aa9953cf20250cc78", "score": "0.6082523", "text": "def timestamp(self):\n time = datetime.now(utc)\n return time.strftime(self.timeformat)", "title": "" }, { "docid": "83bf861ec5e5a339f127f5a9a02418af", "score": "0.60745186", "text": "def get_timestamp():\n return firestore.firestore.SERVER_TIMESTAMP", "title": "" }, { "docid": "f97174b142bca71bb692ce882beac787", "score": "0.6068989", "text": "def get_timestamp():\n return int(time.time() * 1000)", "title": "" }, { "docid": "fa979cc36008ccf3766371cc24c86b01", "score": "0.60648423", "text": "def get_id_timestamp(uid):\n return ulid.from_str(uid).timestamp().datetime", "title": "" }, { "docid": "d43471cffc4a646c8a2867c16bcbd603", "score": "0.60621524", "text": "def nowTimestamp():\n return activity.Time._now", "title": "" }, { "docid": "f288280baa007dba9220350c908fc4c6", "score": "0.60608405", "text": "def get_time(self):\n return self.unix_time", "title": "" }, { "docid": "4f7ef525fc960b0c5cdd939cb2b7af92", "score": "0.60384095", "text": "def get_datetime_utc():\n return datetime.datetime.utcnow()", "title": "" }, { "docid": "10a2fe7228b94aba1a7266430f4a9dac", "score": "0.6030031", "text": "def _loom_timestamp(self):\n return datetime.datetime.utcnow().strftime(\"%Y%m%dT%H%M%S.%fZ\")", "title": "" }, { "docid": "abf6056efc63cab234a730c1e9f3745d", "score": "0.60174125", "text": "def get_suite_timestamp():\n return datetime.datetime.now().strftime (\"%Y-%m-%dT%H:%M:%S\")", "title": "" }, { "docid": "d52ab7f7e85a367241caba22b825892f", "score": "0.6012361", "text": "def get_wm_user_time(window):\n return util.PropertyCookieSingle(util.get_property(window,\n '_NET_WM_USER_TIME'))", "title": "" }, { "docid": "434ea3a7d5e97481b026c0677173780c", "score": "0.5998592", "text": "def _get_time_now(self):\n return timeutils.utcnow()", "title": "" }, { "docid": "0861560a8d066a2c3f365d444650283c", "score": "0.599297", "text": "def now_utc():\n return datetime_utcnow()", "title": "" }, { "docid": "a557950d2cc50b1f8f13aaa948967554", "score": "0.59806144", "text": "def _timestamp(self):\n return self._time()", "title": "" }, { "docid": "48d6505af8ef1e0d148e4c5e60a1b6bd", "score": "0.59756804", "text": "def timestamp(self):\n return self.enforcement.timestamp", "title": "" }, { "docid": "98d3e6c8d59f9c27f599ee398fb88513", "score": "0.5965708", "text": "def timestamp() -> datetime.datetime:\n tz = pytz.timezone('UTC')\n return tz.localize(datetime.datetime.utcnow())", "title": "" }, { "docid": "49630530cf4ad985f3a8750b20858901", "score": "0.59624785", "text": "def utc(self):\n return self._utc", "title": "" }, { "docid": "49630530cf4ad985f3a8750b20858901", "score": "0.59624785", "text": "def utc(self):\n return self._utc", "title": "" }, { "docid": "b32eaf169628944fa8726d54d568ba82", "score": "0.59617144", "text": "def get_timestamp(dt=None):\n if dt is None:\n dt = datetime.utcnow()\n return dt.timestamp()", "title": "" }, { "docid": "aef7bb30bfff46149b928a9a880ce6b4", "score": "0.5946648", "text": "def get_logout_time_of_user(user_id):\n return (Auth.query.filter_by(user_id=user_id).first()).get_logout_time()", "title": "" }, { "docid": "e6eebdd9632299ba0df70d3050541082", "score": "0.5940632", "text": "def _running_timestamp(self):\n return (datetime.datetime.now(pytz.utc)-self.init_datetime).total_seconds()", "title": "" }, { "docid": "464af02e1d469345f994d08272611fff", "score": "0.59395653", "text": "def get_now():\n return utc_now_str()", "title": "" }, { "docid": "66efa5b9a3f1c386a35383fd54f5ca2c", "score": "0.5936904", "text": "def utc_time():\n return str(datetime.utcnow()) + 'Z'", "title": "" }, { "docid": "8eb773bd2d936626cadcc8bfbc5f0f63", "score": "0.59361637", "text": "def get_utcnow():\n return datetime.utcnow()", "title": "" }, { "docid": "07fe3a11e13a67ea0835325d0931fcff", "score": "0.5917889", "text": "def restore_timestamp_in_utc(self) -> Optional[str]:\n return pulumi.get(self, \"restore_timestamp_in_utc\")", "title": "" }, { "docid": "fd6d2f19e4ec61600ac617b9d8576748", "score": "0.59126174", "text": "def get_timestamp(self):\n return self._timestamp", "title": "" }, { "docid": "4b0d6991fb2f5083a1237af48683f924", "score": "0.59065753", "text": "def _get_date(self):\r\n return datetime.utcnow().strftime('%a, %d %b %Y %H:%M:%S GMT')", "title": "" }, { "docid": "9fd67ef7b2f943aafda1bc5bea90e85a", "score": "0.5901927", "text": "def local_time(self):\n return time.mktime(time.gmtime())", "title": "" }, { "docid": "40cc664673c0fa376354271e6c8b4608", "score": "0.5895518", "text": "def smc_time(self):\n return millis_to_utc(int(self.make_request(resource=\"smc_time\").get(\"value\")))", "title": "" }, { "docid": "1e43816cc25189fc43e4bc9b55a258d8", "score": "0.5894749", "text": "def now_timestamp():\n return long(time.time()*10000)", "title": "" }, { "docid": "4b0645503792d0315cf6a741e45eddcd", "score": "0.5891652", "text": "def get_time_stamp(self):\n return self.__time_stamp", "title": "" }, { "docid": "9e11239ae30fe9772c4231f45b1180f0", "score": "0.5889814", "text": "def getCurrentTime():\n\n\treturn datetime.utcnow()", "title": "" }, { "docid": "b2d39479a9a8da961d7987a65ec1ed4a", "score": "0.58845615", "text": "def get_timestamp():\n return f'{datetime.utcnow().replace(microsecond=0).isoformat()}Z'", "title": "" }, { "docid": "c96cfba6efbb9d3b0946fbcac761c06d", "score": "0.5881592", "text": "def get_submission_timestamp():\n if BATCHMODE:\n return os.environ[\"CLUSTER_SUBMISSION_TIMESTAMP\"]\n else:\n from datetime import datetime\n\n logger.info(\"Local mode - returning current time\")\n return datetime.now().strftime(TIMESTAMP_FMT)", "title": "" }, { "docid": "b0e8f9c1e868a87a6eda77741b3afcfd", "score": "0.5877407", "text": "def _expiration_datetime(self):\n return datetime.datetime.now() - datetime.timedelta(seconds=nonce.SKEW)", "title": "" }, { "docid": "e400d774c95952e8ea8d1e063b302cac", "score": "0.5873307", "text": "def timestamp(self):\n return self.global_timestamp[self.time_start:self.time_end]", "title": "" }, { "docid": "e72a74e03bc3fbfd33472fcf95886700", "score": "0.5871648", "text": "def get_collection_timestamp(self, userid, collection):", "title": "" }, { "docid": "7e37fd016bcc3f867627b3e80f33d790", "score": "0.58545095", "text": "def getTimeStamp(self) -> int:\n ...", "title": "" }, { "docid": "0d53a87196f55d4ec081e3e05bc69bf1", "score": "0.585047", "text": "def ncsa_utcnow():\n return time.strftime(NCSA_FORMAT, time.gmtime())", "title": "" }, { "docid": "307955a3d2f962db3cb571b099fd5800", "score": "0.58486086", "text": "def log_time():\n return datetime.utcnow().isoformat(' ', 'seconds') + ' UTC'", "title": "" }, { "docid": "33bd926df4ffdf77260b817acb6ff24f", "score": "0.58340997", "text": "def GetTimeStamp():\n\n now = datetime.datetime.now()\n return now.strftime('%Y%m%d%H%M%S')", "title": "" }, { "docid": "d9128d7904c0c58491b879f3defd57ba", "score": "0.5829729", "text": "def getTime(self) -> datetime:\n\n try:\n resp = self.authAPI(\"GET\", \"api/v1/timestamp\")\n epoch = int(resp[\"data\"] / 1000)\n\n return datetime.fromtimestamp(epoch)\n except:\n return None", "title": "" }, { "docid": "255f83f1d497578071a1d6b876737fec", "score": "0.5829027", "text": "def unix_timestamp(self):\n\n return time.mktime(self.timestamp.timetuple())", "title": "" }, { "docid": "c0a9c5ea2b50eb4e38ae2de1f6398071", "score": "0.58260316", "text": "def event_timestamp(self) -> str:\n return pulumi.get(self, \"event_timestamp\")", "title": "" }, { "docid": "41d02d519a2810203154c1a075c16ee5", "score": "0.5817177", "text": "def _get_time(self):\n return datetime.utcnow().strftime(self.DATE_FORMAT)", "title": "" }, { "docid": "e2272b8e83048b5b6a682a7ad43d2adc", "score": "0.5816761", "text": "def timestamp():\n\n\treturn time.strftime(\"%Y%m%d-%H%M%S\", time.localtime())", "title": "" }, { "docid": "0a5995928cc4c345ede22f1d6b5c86ff", "score": "0.5799011", "text": "def time_stamp():\n return time.clock()", "title": "" }, { "docid": "4944d6495169e82c598522f6d2f8b974", "score": "0.5794121", "text": "def getTimeStamp(self):\n timestamp = (float(self._timeStampSec) + float(igtl_frac_to_nanosec(self._timeStampFraction)) / 10 ** 9)\n return timestamp", "title": "" }, { "docid": "0d630c54a06ceb449970d89fe3e2e357", "score": "0.578464", "text": "def utc():\n return datetime.datetime.utcnow()", "title": "" }, { "docid": "a9d834bbddb7107c4a854476db3b4eeb", "score": "0.57723033", "text": "def get_unix_from_time(self):\n return self.get_atts()['unix_from_time']", "title": "" }, { "docid": "f1a376780ffe65116d0dc7a93490d464", "score": "0.5761781", "text": "def utc(self):\n return Date(time.gmtime(self.timestamp))", "title": "" }, { "docid": "6d59098e971a88cc08868ffaa392990d", "score": "0.5759946", "text": "def expiration_time_if_not_activated_utc(self) -> pulumi.Output[Optional[str]]:\n return pulumi.get(self, \"expiration_time_if_not_activated_utc\")", "title": "" }, { "docid": "3c9abfed3115dd2adb99b57045cd7427", "score": "0.5757729", "text": "def utc_now():\n return datetime.utcnow()", "title": "" }, { "docid": "1da904f59e35583d83c9b1705ed48bdf", "score": "0.5755897", "text": "def now_ts():\n return _datetime.now(timezone.utc)", "title": "" }, { "docid": "a47707001d608d82f9050d51acec6980", "score": "0.5745696", "text": "def utcnow():\r\n if utcnow.override_time:\r\n try:\r\n return utcnow.override_time.pop(0)\r\n except AttributeError:\r\n return utcnow.override_time\r\n return datetime.datetime.utcnow()", "title": "" }, { "docid": "1f3dd8260130d9175a602e8365785264", "score": "0.5742916", "text": "def get_collection_timestamps(self, userid):", "title": "" }, { "docid": "5473c898b2e95e5fcf0d8c42ecb941c6", "score": "0.57410234", "text": "def unix_time():\n return int(time())", "title": "" }, { "docid": "77f532e7800e7454edab5399a161b5b3", "score": "0.57401115", "text": "def unixTime():\n\n return time.time()", "title": "" }, { "docid": "07718a7b483670d69493ec54e1f80ae2", "score": "0.5738896", "text": "def getTimestamp(self):\r\n\t\treturn self.context['timestamp']", "title": "" }, { "docid": "97f09d9f473a28fd12d658b3499dd21c", "score": "0.5731104", "text": "def timenow():\n return timestr(datetime.datetime.utcnow())", "title": "" }, { "docid": "d7a90119f4cbb4cf0cf4679121626e2f", "score": "0.57258064", "text": "def utc_now():\n import arrow\n\n return arrow.utcnow().datetime", "title": "" }, { "docid": "bb721d6cb2d102532c256c4cbb2b3a5d", "score": "0.5724105", "text": "def now():\n return dt_util.utcnow()", "title": "" }, { "docid": "bac382189164f0c4c8ac1ff4f56a79b3", "score": "0.5720717", "text": "def get_nightly_run_timestamp():\n portal = api.portal.get()\n last_run = IAnnotations(portal).get('last_nightly_run')\n\n if last_run:\n return last_run", "title": "" }, { "docid": "73c678504e6206cb79d6c7d2cccf9c29", "score": "0.5700419", "text": "def TimeStamp():\r\n pass", "title": "" }, { "docid": "8c0a9be7db9898a53df9f3c5e6df9938", "score": "0.5698749", "text": "def utcnow():\n return Delorean()", "title": "" }, { "docid": "38ccd47e5c3902326f7b798279fe4673", "score": "0.5679612", "text": "def start_hour_utc(self) -> int:\n return pulumi.get(self, \"start_hour_utc\")", "title": "" }, { "docid": "2596d10c6e473e1f1229579b77802be6", "score": "0.5676077", "text": "def generate_timestamp():\r\n return int(time.time())", "title": "" }, { "docid": "2596d10c6e473e1f1229579b77802be6", "score": "0.5676077", "text": "def generate_timestamp():\r\n return int(time.time())", "title": "" }, { "docid": "f0214b213ccc7ba957610ee636069758", "score": "0.56728464", "text": "def get_last_crawled_txn_timestamp(site_crawled_txns):\n return pd.to_datetime(site_crawled_txns['transactionTimestamp']).max()", "title": "" }, { "docid": "3daae15752165b8d15a60b7a45d9dccf", "score": "0.5662196", "text": "def GetTimestamp(self):\n return _btk.btkIMUsExtractor_GetTimestamp(self)", "title": "" }, { "docid": "34af0aa08fe1399bd0a420d461dbe9f3", "score": "0.5659325", "text": "def timestamp():\n return time.time()", "title": "" }, { "docid": "8c00de5b88d31e2bad2b5ff72d69cae9", "score": "0.5654923", "text": "def get_timestamp(dt):\n return (dt - datetime.datetime(1970, 1, 1)).total_seconds()", "title": "" }, { "docid": "49c65205b99f27fd8dc87681fe1ac73a", "score": "0.5654886", "text": "def last_timestamp(self) -> str:\n return pulumi.get(self, \"last_timestamp\")", "title": "" } ]
2668d5281c958faa32046850e7d7c55c
Creates a new association between place and amenity
[ { "docid": "8705a0904d83101d81d23cdca73d3e8e", "score": "0.71571684", "text": "def create_place_amenity(place_id, amenity_id):\n place = storage.get('Place', place_id)\n if place:\n amenity = storage.get('Amenity', amenity_id)\n if amenity:\n if amenity in place.amenities:\n return jsonify(amenity.to_dict())\n else:\n place.amenities.append(amenity)\n storage.save()\n response = jsonify(amenity.to_dict())\n response.status_code = 201\n return response\n else:\n abort(404)\n else:\n abort(404)", "title": "" } ]
[ { "docid": "6c177291a926dea00d249873ca4bcba6", "score": "0.73436147", "text": "def create_link_place_amenity(place_id, amenity_id):\n place = models.storage.get(Place, place_id)\n if not place:\n abort(404)\n amenity = models.storage.get(Amenity, amenity_id)\n if not amenity:\n abort(404)\n if models.storage_t == \"db\":\n if amenity in place.amenities:\n return jsonify(amenity.to_dict()), 200\n place.amenities.append(amenity)\n else:\n if amenity_id in place.amenity_ids:\n return jsonify(amenity.to_dict()), 200\n place.amenity_ids.append(amenity_id)\n models.storage.save()\n return jsonify(amenity.to_dict()), 201", "title": "" }, { "docid": "ebf0f9b9290196f39b63a2bee142f88b", "score": "0.6774756", "text": "def link_amenity_to(place_id, amenity_id):\n place = storage.get(\"Place\", place_id)\n amenity = storage.get(\"Amenity\", amenity_id)\n if place is None or amenity is None:\n abort(404)\n if amenity in place.amenities:\n return jsonify(amenity.to_json()), 200\n try:\n place.amenities.append(amenity)\n place.save()\n return jsonify(amenity.to_json()), 201\n except:\n abort(404)", "title": "" }, { "docid": "9f8ea8ad1df5e7a4b7da4c391f2661e2", "score": "0.67745334", "text": "def post_place_amenity(place_id, amenity_id):\n place = storage.get(\"Place\", place_id)\n if place is None:\n abort(404)\n amenity = storage.get(\"Amenity\", amenity_id)\n if amenity is None:\n abort(404)\n if amenity in place.amenities:\n return amenity.to_dict(), 200\n place.amenities.append(amenity)\n storage.save()\n return amenity.to_dict(), 201", "title": "" }, { "docid": "7afae7b66702963f19ee86d272014c76", "score": "0.6767658", "text": "def api_addAmenityToPlace(place_id, amenity_id):\n place = storage.get('Place', place_id)\n amenity = storage.get('Amenity', amenity_id)\n if amenity is None or place is None:\n flask.abort(404)\n if amenity in place.amenities:\n return flask.jsonify(amenity.to_dict())\n place.amenities.append(amenity)\n storage.save()\n return flask.make_response(flask.jsonify(amenity.to_dict()), 201)", "title": "" }, { "docid": "ea918fdee81370141cafc220b71d9413", "score": "0.67273647", "text": "def new_amenity_for_place(place_id, amenity_id):\n place = storage.get(Place, place_id)\n if place is None:\n abort(404)\n\n amenity = storage.get(Amenity, amenity_id)\n if amenity is None:\n abort(404)\n\n if amenity in place.amenities:\n return jsonify(amenity.to_dict()), 200\n\n if models.storage_t == 'db':\n place.amenities.append(amenity)\n else:\n place.amenity_ids.append(amenity_id)\n storage.save()\n storage.reload()\n return jsonify(amenity.to_dict()), 201", "title": "" }, { "docid": "c065cf0bf9a5341c0ca5b8ea4f3dbf71", "score": "0.6376634", "text": "def post(self, request, place_id, amenity_id, format=None):\n\n try:\n place = Place.objects.get(id=place_id)\n except ObjectDoesNotExist:\n raise NotFound(detail=\"Place not found\")\n\n try:\n amenity = Amenity.objects.get(id=amenity_id)\n except ObjectDoesNotExist:\n raise NotFound(detail=\"Amenity not found\")\n\n if amenity in place.amenities.all():\n return Response(AmenitySerializer(amenity).data, status=status.HTTP_200_OK)\n else:\n place.amenities.add(amenity)\n\n return Response(AmenitySerializer(amenity).data, status=status.HTTP_201_CREATED)", "title": "" }, { "docid": "3c5a59bd2378436829ca0b24d4dd9baa", "score": "0.58877295", "text": "def manage_single_amenity(place_id, amenity_id):\n place = storage.get('Place', place_id)\n amenity = storage.get('Amenity', amenity_id)\n if place is None:\n abort(404)\n if amenity is None:\n abort(404)\n if request.method == 'POST':\n if amenity_id in [a.id for a in place.amenities]:\n return jsonify(amenity.to_dict()), 200\n if storage_t == 'db':\n place.amenities.append(amenity)\n else:\n place.amenity_ids.append(amenity_id)\n place.save()\n return jsonify(amenity.to_dict()), 201\n\n if request.method == 'DELETE':\n list_ids = [a.id for a in place.amenities]\n if amenity_id not in list_ids:\n abort(404)\n if storage_t == 'db':\n place.amenities.remove(amenity)\n place.save()\n return jsonify({}), 200\n else:\n place.amenity_ids.remove(amenity_id)\n place.save()\n return jsonify({}), 200", "title": "" }, { "docid": "374d6acca4387576a4fb2cfbcda1bcc3", "score": "0.56397223", "text": "def create_association(Name=None, DocumentVersion=None, InstanceId=None, Parameters=None, Targets=None, ScheduleExpression=None, OutputLocation=None):\n pass", "title": "" }, { "docid": "3c06ea78509f9292f776a2e6aa2979af", "score": "0.56182843", "text": "def add_place(self, place_id, cc, std, adm1, grid):\n alt_adm1, adm2 = self.adjust_admin1(cc, adm1)\n if adm2:\n print(\"CORRECTION:\", cc, alt_adm1, adm2)\n\n obj = self.places[cc].get(place_id, {})\n if not obj:\n self.places[cc][place_id] = obj\n # TODO: detect errors where a place has a standard set already, but the adm1 value conflicts\n obj[std] = adm1\n\n crd = self.coords[cc].get(grid, {})\n if not crd:\n self.coords[cc][grid] = crd\n crd[std] = adm1\n\n if adm2:\n mapping = self.admin2[cc].get(adm2, {})\n mapping[adm2] = adm1", "title": "" }, { "docid": "48411d2993980a9b297eb7f7249d5762", "score": "0.5615134", "text": "def amenities(self, obj):\n if isinstance(obj, Amenity):\n self.amenities.append(obj.id)", "title": "" }, { "docid": "c727d4eb1e544f9f88af5feaefa658a5", "score": "0.5550723", "text": "def ensure_organism(\r\n store: Store,\r\n organism_id: str = \"organism_test\",\r\n name: str = \"organism_name\",\r\n reference_genome: str = \"reference_genome_test\",\r\n ) -> Organism:\r\n organism = StoreHelpers.add_organism(\r\n store, internal_id=organism_id, name=name, reference_genome=reference_genome\r\n )\r\n store.session.add(organism)\r\n store.session.commit()\r\n return organism", "title": "" }, { "docid": "13e50aa04688410df6355ac5d732f2a2", "score": "0.5548587", "text": "def create_amenity(request):\n body_request = request.get_json()\n if (body_request is None):\n abort(400, 'Not a JSON')\n try:\n amenity_name = body_request['name']\n except KeyError:\n abort(400, 'Missing name')\n new_amenity = Amenity(name=amenity_name)\n storage.new(new_amenity)\n storage.save()\n return jsonify(new_amenity.to_dict())", "title": "" }, { "docid": "4c89fec06a2dca4d8529a83e9384f192", "score": "0.55191284", "text": "def create_amenity():\n amenity_json = request.get_json(silent=True)\n if not amenity_json:\n return jsonify({'error': 'Not a JSON'}), 400\n if 'name' not in amenity_json:\n return jsonify({'error': 'Missing name'}), 400\n amenity = Amenity(**amenity_json)\n amenity.save()\n return jsonify(amenity.to_dict()), 201", "title": "" }, { "docid": "cf4745b599b92fd3f0471d567868ca9d", "score": "0.54876053", "text": "def testCreatePlace(self):\n\t\tlagos = Place.objects.create(\n\t\t\tname = 'Lagos',\n\t\t\tslug = 'lagos',\n\t\t\tdescription='A big city in Nigeria.', \n\t\t\tpoint = Point(3.416061, 6.448706)\n\t\t)\n\t\tlagos.save()", "title": "" }, { "docid": "986069a16759d0f292414e82a5f2d86e", "score": "0.54475546", "text": "def addAssociation(self,assoc):\n \n #INDENT LEVEL\n indent_lvl = 2\n indent_here = self.indent * indent_lvl\n \n \n #first we determine which member of the association is this class\n #and which member is the other class\n member = assoc.whichMemberIs(self.cls)\n member_dict = None\n \n other = None\n other_dict = None\n\n \n if member == \"A\":\n member_dict = assoc.A_dict\n other_dict = assoc.B_dict\n other = \"B\"\n \n else:\n member_dict = assoc.B_dict\n other_dict = assoc.A_dict\n other = \"A\"\n \n \n #NEW ATTRIBUTE NAME \n name = None\n \n #using the other table's role if it was named in the association\n if not other_dict[\"role\"] == \"\": #format: \"rolename_othername_association\"\n \n role = other_dict[\"role\"]\n role = role.replace(\" \",\"_\") #precaution in cae white spaces present, replaces with _\n \n name = \"{}_{}_association\".format(role,other_dict[\"class\"].name)\n \n else: #we must manage with format: \"othername_association\"\n name = \"{}_association\".format(other_dict[\"class\"].name)\n \n \n \n #this class is the \"member\" class and it will have attribute referencing the \"other\" class\n #thus the multiplicity of the other class matters\n s = \"\"\n \n if assoc.isSingleMultiplicity(other): #new attribute with no static prefix and no comment\n s = self.attribute_format.format(\"\",other_dict[\"class\"].name,\n name,\"\")\n #s = \"{} {}\".format(other_dict[\"class\"].name, name)\n \n else: #multiple or variable amount of values => vector\n s = self.vector_format.format(other_dict[\"class\"].name, name)\n \n \n \n #adds 2x indent, semicolon and newline\n s = \"{}{}\".format(indent_here,s) \n \n \n #these go under private access modifier byy default\n self.private_attr_string = \"{}{}\".format(self.private_attr_string,s)\n \n \n \n #TODO: potentially add these in a separated string and divide by extra \\n \n # so it looks better in the code", "title": "" }, { "docid": "4b86c23e2e2006f1a6171dbf2668decf", "score": "0.5426637", "text": "def places_id_amenities_id(id, am_id):\n place = storage.get(Place, id)\n if (place):\n if request.method == 'DELETE':\n amenity = storage.get(Amenity, am_id)\n if (amenity):\n if storage_t == 'db':\n if (amenity in place.amenities):\n place.amenities.remove(amenity)\n storage.save()\n return {}, 200\n abort(404)\n elif storage_t == 'fs':\n if (am_id in place.amenity_ids):\n place.amenity_ids.remove(am_id)\n storage.save()\n return {}, 200\n abort(404)\n abort(404)\n elif request.method == 'POST':\n amenity = storage.get(Amenity, am_id)\n place = storage.get(Place, id)\n if (place):\n if (amenity):\n if storage_t == 'db':\n if (amenity not in place.amenities):\n place.amenities.append(amenity)\n storage.save()\n return amenity.to_dict(), 201\n elif storage_t == 'fs':\n if (am_id not in place.amenity_ids):\n place.amenity_ids.append(am_id)\n storage.save()\n return amenity.to_dict(), 200\n abort(404)\n abort(404)\n abort(404)\n abort(404)", "title": "" }, { "docid": "aee3bb05c63cf1b7ef23107e29859641", "score": "0.5401089", "text": "def amenities(self, obj):\n if isinstance(obj, Amenity):\n self.amenity_ids.append(obj.id)", "title": "" }, { "docid": "2703f656be4bc3aa140bdccd33793064", "score": "0.5377812", "text": "def create_amenities():\n new_amenity = request.get_json()\n if not new_amenity:\n abort(400, \"Not a JSON\")\n\n if new_amenity:\n if \"name\" not in new_amenity:\n abort(400, \"Missing name\")\n ameni = Amenity(**new_amenity)\n storage.new(ameni)\n storage.save()\n return make_response(jsonify(ameni.to_dict()), 201)", "title": "" }, { "docid": "51afa1a0386ef44cdcc80676fbc36d38", "score": "0.5373449", "text": "def create_amenity():\n new_amenity_dict = request.get_json(silent=True)\n if new_amenity_dict is None:\n return jsonify({\"error\": \"Not a JSON\"}), 400\n if 'name' not in request.json:\n return jsonify({\"error\": \"Missing name\"}), 400\n new_amenity = Amenity(**new_amenity_dict)\n storage.new(new_amenity)\n storage.save()\n return jsonify(new_amenity.to_dict()), 201", "title": "" }, { "docid": "074fcfc1690e9ec82d2f2ffdad5c15db", "score": "0.53601104", "text": "def amenity_post():\n new_amenity = Amenity()\n\n req = request.get_json()\n if req is None:\n return (\"Not a JSON\", 400)\n if 'name' not in req.keys():\n return (\"Missing name\", 400)\n\n new_amenity.__dict__.update(req)\n new_amenity.save()\n return (jsonify(new_amenity.to_json()), 201)", "title": "" }, { "docid": "95fd9d77b272c6986bc1ba02910edaf7", "score": "0.5359549", "text": "def test_create_place_with_categories(self):\n category1 = sample_category(name='Stadium')\n category2 = sample_category(name='Sport')\n payload = {\n 'name': 'BJK Inonu Stadium',\n 'latitude': 41,\n 'longitude': 28,\n 'notes': 'Football Temple',\n 'external_source': 'http://www.openstreetmap.org/relation/6554433',\n 'categories': [category1.id, category2.id],\n }\n res = self.client.post(PLACES_URL, payload)\n\n self.assertEqual(res.status_code, status.HTTP_201_CREATED)\n place = Place.objects.get(id=res.data['id'])\n cats = place.categories.all()\n self.assertEqual(cats.count(), 2)\n self.assertIn(category1, cats)\n self.assertIn(category2, cats)", "title": "" }, { "docid": "ad7f44b3bd79d1fb45ac6e4e959fd942", "score": "0.5346535", "text": "def amenities(self, value):\n\n if type(value) == Amenity:\n self.amenity_ids.append(value.id)", "title": "" }, { "docid": "baa081e374dec6308665494a2694a222", "score": "0.5314287", "text": "def addAssociation(self,assoc): \n \n #first we determine which member of the association is this class\n #and which member is the other class\n member = assoc.whichMemberIs(self.cls)\n member_dict = None\n \n other = None\n other_dict = None\n\n \n if member == \"A\":\n member_dict = assoc.A_dict\n other_dict = assoc.B_dict\n other = \"B\"\n \n else:\n member_dict = assoc.B_dict\n other_dict = assoc.A_dict\n other = \"A\"\n \n \n #NEW ATTRIBUTE NAME \n name = None\n \n #using the other table's role if it was named in the association\n if not other_dict[\"role\"] == \"\": #format: \"rolename_othername_association\"\n \n role = other_dict[\"role\"]\n role = role.replace(\" \",\"_\") #precaution in cae white spaces present, replaces with _\n \n name = \"{}_{}_association\".format(role,other_dict[\"class\"].name)\n \n else: #we must manage with format: \"othername_association\"\n name = \"{}_association\".format(other_dict[\"class\"].name) \n \n \n #this class is the \"member\" class and it will have attribute referencing the \"other\" class\n #thus the multiplicity of the other class matters\n s = \"\"\n \n if assoc.isSingleMultiplicity(other):\n s = self.attribute_format.format(self.indent * 2,name,\n \"None\",\"\") #value set as None and no comment\n \n else: #multiple or variable amount of values => list\n s = self.list_format.format(self.indent * 2, name)\n \n #adding to the rest\n self.attributes = \"{}{}\".format(self.attributes,s)\n \n return", "title": "" }, { "docid": "683c22b56d2a945a02825a759e81f6c7", "score": "0.5296133", "text": "def test_place(self):\n my_amenity = Amenity()\n my_city = City()\n my_user = User()\n my_place = Place()\n my_place.city_id = my_city.id\n my_place.user_id = my_user.id\n my_place.name = 'Coworking'\n my_place.description = 'description'\n my_place.number_rooms = 4\n my_place.number_bathrooms = 2\n my_place.max_guest = 4\n my_place.price_by_night = 200\n my_place.latitude = 25.0342808\n my_place.longitude = -77.3962784\n my_place.amenity_ids = str(my_amenity.id)\n self.assertEqual(my_place.city_id, my_city.id)\n self.assertEqual(my_place.user_id, my_user.id)\n self.assertEqual(my_place.name, 'Coworking')\n self.assertEqual(my_place.description, 'description')\n self.assertEqual(my_place.number_rooms, 4)\n self.assertTrue(type(my_place.number_rooms), int)\n self.assertEqual(my_place.number_bathrooms, 2)\n self.assertTrue(type(my_place.number_bathrooms), int)\n self.assertEqual(my_place.max_guest, 4)\n self.assertTrue(type(my_place.max_guest), int)\n self.assertEqual(my_place.price_by_night, 200)\n self.assertTrue(type(my_place.price_by_night), int)\n self.assertEqual(my_place.latitude, 25.0342808)\n self.assertTrue(type(my_place.latitude), float)\n self.assertEqual(my_place.longitude, -77.3962784)\n self.assertTrue(type(my_place.longitude), float)\n self.assertEqual(my_place.amenity_ids, str(my_amenity.id))\n self.assertTrue(type(my_place.amenity_ids), str)", "title": "" }, { "docid": "93b7afb20ffe4841b77584d4f27fbe7f", "score": "0.529057", "text": "def association (self):", "title": "" }, { "docid": "263bf7ce42bdd3522d1f2ba6470bc6db", "score": "0.5287286", "text": "def add_organism(\r\n store: Store,\r\n internal_id: str = \"organism_test\",\r\n name: str = \"organism_name\",\r\n reference_genome: str = \"reference_genome_test\",\r\n ) -> Organism:\r\n return store.add_organism(\r\n internal_id=internal_id, name=name, reference_genome=reference_genome\r\n )", "title": "" }, { "docid": "572116b5dd3c5600058b60f943a6b813", "score": "0.52847034", "text": "def set_apartment(apt_name, apt_type, apt_zone, apt_price, furnitured='False', electricity='False', _10_month='False'):\n\n global conn, log\n\n cur = conn.cursor()\n try:\n log.info('Apartment: Inserting new apartment: {0}'.format(apt_name))\n sql = \"\"\"INSERT INTO apartment (name, type, zone, price, furnitured, electricity, _10_month) \n VALUES (%s, %s, %s, %s, %s, %s, %s) \n RETURNING nIdapartment\"\"\"\n cur.execute(sql, (apt_name, apt_type, apt_zone, apt_price, furnitured, electricity, _10_month))\n apartment_id = int(cur.fetchone()[0])\n log.info('Apartment: Committing transaction')\n conn.commit()\n cur.close()\n return apartment_id\n except Exception as e:\n conn.rollback()\n log.error('Apartment: Rolling back transaction')\n log.exception(\"Apartment: Couldn't insert successfully\")\n raise DatabaseException(str(e))", "title": "" }, { "docid": "f9ad0a6523a648c40660e0a12d83c547", "score": "0.5257918", "text": "def create_amenity():\n req = request.get_json()\n if req:\n if 'name' in req:\n new_amenity = Amenity(**req)\n storage.new(new_amenity)\n storage.save()\n return jsonify(new_amenity.to_dict()), 201\n else:\n return jsonify(error=\"Missing name\"), 400\n return jsonify(error=\"Not a JSON\"), 400", "title": "" }, { "docid": "f31da7073318e8abf34bd3c2f44a5a27", "score": "0.5242475", "text": "def delete_link_place_amenity(place_id, amenity_id):\n place = models.storage.get(Place, place_id)\n if not place:\n abort(404)\n amenity = models.storage.get(Amenity, amenity_id)\n if not amenity:\n abort(404)\n if models.storage_t == \"db\":\n if amenity not in place.amenities:\n abort(404)\n place.amenities.remove(amenity)\n else:\n if amenity_id not in place.amenity_ids:\n abort(404)\n place.amenity_ids.remove(amenity_id)\n models.storage.save()\n return jsonify({}), 200", "title": "" }, { "docid": "46e55c95aaa8af41cf8fd1e5f4d69984", "score": "0.5227496", "text": "def affiliate_org(self, org_id='', affiliate_org_id=''):\n param_objects = self._validate_parameters(org_id=org_id, affiliate_org_id=affiliate_org_id)\n org = param_objects['org']\n affiliate_org = param_objects['affiliate_org']\n\n aid = self.clients.resource_registry.create_association(org, PRED.affiliatedWith, affiliate_org)\n if not aid:\n return False\n\n return True", "title": "" }, { "docid": "739864a1814e2ac6c30b126a31244f28", "score": "0.51761127", "text": "def set_association(\n self,\n object_type: str,\n object_id: str,\n to_object_type: str,\n to_object_id: str,\n association_type: str = None,\n ) -> SimplePublicObjectWithAssociations:\n self._require_authentication()\n if not association_type:\n association_type = f\"{object_type}_to_{to_object_type}\"\n return self.hs.crm.objects.associations_api.create(\n self._validate_object_type(object_type),\n object_id,\n self._validate_object_type(to_object_type),\n to_object_id,\n association_type,\n )", "title": "" }, { "docid": "7502384cd052b0385cab73cc5ef747e1", "score": "0.5173717", "text": "def delete_place_amenity(place_id, amenity_id):\n place = storage.get(\"Place\", place_id)\n if place is None:\n abort(404)\n amenity = storage.get(\"Amenity\", amenity_id)\n if amenity is None:\n abort(404)\n if amenity not in place.amenities:\n abort(404)\n place.amenities.remove(amenity)\n storage.save()\n return {}, 200", "title": "" }, { "docid": "66c6d7f2ef1a9053cd11331bebc857e6", "score": "0.51319116", "text": "def add_place(self, place, person_group: int = 0):\n if place.cell != self.microcell.cell:\n raise AttributeError(\"Place and person are not in the same\\\n cell\")\n self.places.append((place, person_group))\n self.place_types.append(place.place_type)", "title": "" }, { "docid": "6cd80474b97496d440e4f45d27043528", "score": "0.5096659", "text": "def delete_amenity_in_place(place_id, amenity_id):\n place = storage.get(Place, place_id)\n if place is None:\n abort(404)\n amenity = storage.get(Amenity, amenity_id)\n if amenity is None:\n abort(404)\n\n if amenity not in place.amenities:\n abort(404)\n\n if models.storage_t != 'db':\n place.amenity_ids.remove(amenity)\n else:\n place.amenities.remove(amenity)\n storage.save()\n storage.reload()\n return jsonify({}), 200", "title": "" }, { "docid": "7bc8b876f884dee0b777c561a19a9bfa", "score": "0.5078246", "text": "def create_party(self, name, hqAddress, logoUrl):\n party = {\n \"name\": name,\n \"hqAddress\": hqAddress,\n \"logoUrl\": logoUrl\n }\n self.db.insert('parties', party)\n return party", "title": "" }, { "docid": "5aab95bc67075a5b31bedf4a0cfa0878", "score": "0.50455576", "text": "def populate_db():\n db.session.add(Place(\n name='Descaro del Rey',\n ))\n db.session.add(Place(\n name='Más allá del Zumo',\n ))\n db.session.add(Place(\n name='Veranolandia',\n ))\n db.session.commit()", "title": "" }, { "docid": "0fd0f46c9dc86a60f830fc6697a09f3a", "score": "0.5037135", "text": "def test_amenity_ids(self):\n self.assertTrue(isinstance(Place.amenity_ids, list))\n self.assertTrue(isinstance(place1.amenity_ids, list))", "title": "" }, { "docid": "8e0da15879e3ad8d59d0cacbb8610915", "score": "0.5032317", "text": "def storeAssociation(self, server_url, association):\r\n raise NotImplementedError", "title": "" }, { "docid": "1cbd335df11181ca6a1ec2f637d8e3dc", "score": "0.5014271", "text": "def delete_place_amenity(place_id, amenity_id):\n place = storage.get('Place', place_id)\n if place:\n amenity = storage.get('Amenity', amenity_id)\n if amenity:\n if amenity in place.amenities:\n place.amenities.remove(amenity)\n storage.save()\n return jsonify({})\n else:\n abort(404)\n else:\n abort(404)\n else:\n abort(404)", "title": "" }, { "docid": "6b52b80e376019a97bab5660cf63c11b", "score": "0.50140804", "text": "def amenity_post():\n request_data = request.get_json()\n if request_data is None:\n return make_response(jsonify({'error': 'Not a JSON'}), 400)\n if \"name\" not in request_data.keys():\n return make_response(jsonify({'error': 'Missing name'}), 400)\n amenity_obj = Amenity(**request_data)\n storage.new(amenity_obj)\n storage.save()\n amenity_dict = amenity_obj.to_dict()\n return jsonify(amenity_dict), 201", "title": "" }, { "docid": "23b7654299fec9c448727e2ebf8524c1", "score": "0.50059265", "text": "def storeAssociation(self, server_url, association):\r\n assoc = Association(url=server_url,\r\n handle=association.handle,\r\n association=association.serialize())\r\n assoc.put()", "title": "" }, { "docid": "22ba3ec21b29934551402f7ca9d245ae", "score": "0.5003838", "text": "def delete_amenity_from(place_id, amenity_id):\n place = storage.get(\"Place\", place_id)\n amenity = storage.get(\"Amenity\", amenity_id)\n if place is None or amenity is None:\n abort(404)\n try:\n place.amenities.remove(amenity)\n place.save()\n return jsonify({}), 200\n except:\n abort(404)", "title": "" }, { "docid": "8d4d20c453abfea62fe0651954280b76", "score": "0.4989336", "text": "def async_create_area(self, data: dict) -> AreaEntry:\n area_id = str(int(time.time()))\n new_area = AreaEntry(**data, area_id=area_id)\n self.areas[area_id] = new_area\n self.async_schedule_save()\n return attr.asdict(new_area)", "title": "" }, { "docid": "79a1db4d8a33b0b247e06eb7adb5bd83", "score": "0.49524412", "text": "def assign_place(cls, line, client, place):\n link = cls.query.filter_by(line_name=line.name, client_username=client.username).first()\n link.place_in_line = place\n db.session.commit()", "title": "" }, { "docid": "4ca22714f757fc1e2a1838b54c19687e", "score": "0.49301368", "text": "def create_facility():\n check(add_document('facilities', facility_schema))", "title": "" }, { "docid": "2e0e32b49f3acb2ad9691fb2c3c5db9f", "score": "0.4925878", "text": "def amenity_put(amenity_id):\n req = request.get_json()\n if req is None:\n return (\"Not a JSON\", 400)\n amenity = storage.get('Amenity', amenity_id)\n if amenity is None:\n abort(404)\n\n skip = ['id', 'created_at', 'updated_at', '_sa_instance_state']\n for i in amenity.__dict__:\n if i not in skip:\n setattr(amenity, i, req[i])\n\n amenity.save()\n return (jsonify(amenity.to_json()))", "title": "" }, { "docid": "19ffe91af22953e5dc7cfcd0fb2be3c7", "score": "0.49251106", "text": "def create_sample_investor_org(org_name='XX Holdings', address='Nigeria'):\n\n return InvestorOrganization.objects.create(\n org_name=org_name,\n address=address\n )", "title": "" }, { "docid": "267031dea673b271aba1e74f60bc4c33", "score": "0.49200922", "text": "def add_animal(self, animal_obj, place):\n try:\n assert 0 <= place <= len(self.ecosystem) - 1\n assert isinstance(self.ecosystem[place], Water)\n except AssertionError:\n print('Ecosystem has fixed size. Try another place!')\n return None\n else:\n self.ecosystem[place] = animal_obj", "title": "" }, { "docid": "4945fcc567becc540641a8e95e7f8569", "score": "0.49108347", "text": "def create_placement(self, friendly_name: Optional[str], image_url: Optional[str]) -> TDomainId:\n new_id = uuid.uuid4()\n domain_id = new_id.hex\n\n assert isinstance(domain_id, TDomainId)\n\n new_placement = Placement(domain_id, friendly_name, image_url)\n self._placements.add(new_placement)\n\n self._notify(\n object_id=domain_id,\n event_type=ServiceEventType.added,\n object_dto=build_dto(new_placement)\n )\n\n return domain_id", "title": "" }, { "docid": "58e27cff3139daafac1f13d1a497e205", "score": "0.490369", "text": "def add_place(lat, lng):\n places = Places(lat=lat, lng=lng)\n places.save()\n p_id = Places.objects.get(lat=lat, lng=lng).id\n return p_id", "title": "" }, { "docid": "07d3ce43c4eb7771402dd397a81026e5", "score": "0.48983806", "text": "def add_association():\n\n # On receiving a `GET` request, we request stakeholders\n # and deliverables from the API.\n #\n # We then return `add-association.html`.\n\n if flask.request.method == \"GET\":\n stakeholders = requester.get(SHOW_STAKEHOLDERS)\n deliverables = requester.get(SHOW_DELIVERABLES)\n\n responses = {\n \"stakeholders\": stakeholders.json(),\n \"deliverables\": deliverables.json(),\n }\n\n return flask.render_template(\n \"add-association.html\",\n data=responses\n )\n\n # On receiving a `POST` request, we forward the\n # form data in the body of the request to the API.\n # This contains the end-user's request to add an association.\n #\n # We then redirect the end-user to `/associations`.\n\n else:\n requester.post(\n ADD_ASSOCIATION, \n data=flask.request.form\n )\n\n return flask.redirect(\n flask.url_for(\"show_associations\")\n )", "title": "" }, { "docid": "cccc47b822227f2ca3e1e70e92ed5541", "score": "0.48954198", "text": "def test_create_room_add_room_successfully(self):\n self.amity.create_room([\"Hogwarts\"], \"office\")\n self.assertEqual(\"Hogwarts\", self.amity.rooms[0].room_name)", "title": "" }, { "docid": "72a05f523dd3a932a86a202cd280b841", "score": "0.48927975", "text": "def test_create_access_policy_category(self):\n pass", "title": "" }, { "docid": "3ec3d53069cd05e81afa6b3d3487e4f7", "score": "0.48925155", "text": "def NewAimsFeature( self, coords ): \r\n # init new address object and open form\r\n UiUtility.clearForm(self._controller._queues.tabWidget)\r\n coords = UiUtility.transform(self._iface, coords)\r\n self.setMarker(coords) \r\n addInstance = self.af.get()\r\n self._controller._queues.uEditFeatureTab.setFeature('add', addInstance, coords)\r\n self._controller._queues.tabWidget.setCurrentIndex(0)\r\n UiUtility.setEditability(self._controller._queues.uEditFeatureTab)", "title": "" }, { "docid": "09f6bd8e396bcb64c4d2d35b9934658f", "score": "0.48861784", "text": "def add_person(self, person_name, person_type, wants_accommodation=False):\n\n person_name = person_name.capitalize()\n\n # Check for duplicates\n if person_name in self.people:\n print(\"{} already exists in amity and cannot be re-added\".\n format(person_name))\n return\n\n # Staff should not have accommodation\n if person_type is \"staff\" and wants_accommodation:\n print(\"{} will be added but cannot be accommodated as\"\n \" s/he is a staff member\".format(person_name))\n wants_accommodation = False\n\n # Add the person\n if person_type is \"fellows\":\n self.people[person_name] = Fellow(person_name, wants_accommodation)\n else:\n self.people[person_name] = Staff(person_name)\n self.total_no_of_people += 1\n\n # Assign room\n allocation = self.assign_random_room(person_name, wants_accommodation)\n\n if allocation[\"office\"][\"assigned\"]:\n print(\"{} has been added and assigned the office {}\".\n format(person_name, self.people[person_name].office))\n elif not allocation[\"office\"][\"assigned\"]:\n print(\"{} has been added but has not been assigned an office\".\n format(person_name))\n\n if not wants_accommodation:\n self.show_state()\n return\n\n if allocation[\"livingspace\"][\"assigned\"]:\n print(\"{} has been assigned the livingspace {}\".\n format(person_name, self.people[person_name].livingspace))\n elif not allocation[\"livingspace\"][\"assigned\"]:\n print(\"{} has not been assigned a livingspace\".format(person_name))\n\n self.show_state()", "title": "" }, { "docid": "d581933f04ae484a8f951bb78866ef44", "score": "0.48852453", "text": "def amenities(self):\n amenity_instances = []\n objects = storage.all()\n for k, v in objects.items():\n class_name, instance_id = k.split(\".\")\n if class_name == \"Amenity\":\n if (v[\"place_id\"] == self.id and\n instance_id in self.amenity_ids):\n amenity_instances.append(v)\n return amenity_instances", "title": "" }, { "docid": "6fe59013ae2b219bb340f94bc7ed0425", "score": "0.48831922", "text": "def createArea(self):\n self._area_id_max += 1\n area_id = self._area_id_max\n area = AreaModel(self._event_manager, area_id)\n self._areas[area_id] = area\n LOGGER.info(\"Area %i created.\", area_id)\n return area", "title": "" }, { "docid": "30ad4d6c03a4c19ca0607102e05e84ed", "score": "0.48780426", "text": "def test_create_organization(self):\n organization, membership = org_utils.create_organization(name='Test-Org', owner_id=self.owner.id)\n assert organization is not None\n assert membership is not None\n assert organization.name == 'Test-Org'\n assert organization.owner_id == self.owner.id\n assert membership.member_id == self.owner.id\n assert membership.organization_id == organization.id\n assert membership.joined == True\n assert membership.is_owner == True", "title": "" }, { "docid": "b470d8e98c19cab6506cde46bcbb9774", "score": "0.48706022", "text": "def create_location(self, location):", "title": "" }, { "docid": "92018883e196eefd71497bc745c2eb4d", "score": "0.48661837", "text": "def post_amen():\n payload = request.get_json(silent=True)\n\n if payload is None:\n abort(400, 'Not a JSON')\n elif 'name' not in payload:\n abort(400, 'Missing name')\n\n new_amenity = Amenity(**payload)\n new_amenity.save()\n\n return(jsonify(new_amenity.to_dict()), 201)", "title": "" }, { "docid": "3074869bcd0b988e1a44a52c1fabbcbc", "score": "0.4862227", "text": "def add_place(self, place_raw):\n place = str(place_raw).split(',')\n name = place[0]\n country = place[1]\n priority = place[2]\n is_visited = self.to_boolean(place[3])\n self.places.append(Place(name, country, priority, is_visited)) # Create Place object and add to places list", "title": "" }, { "docid": "ecbed49e24487b08dded05f96664a881", "score": "0.48619813", "text": "def test_amenity_ids(self):\n place = Place()\n self.assertTrue(hasattr(place, \"amenity_ids\"))\n self.assertIsInstance(place.amenity_ids, list)", "title": "" }, { "docid": "870a1d640a33ed222a2968e2c4fd9069", "score": "0.48599437", "text": "def sample_place(user, **params):\n defaults = {\n 'name': 'Anywhere buildings',\n 'latitude': 30,\n 'longitude': 60.5,\n 'notes': 'I like here. Should be visited again!!',\n 'external_source': 'www.anysourceblablablabla.com',\n }\n defaults.update(params)\n\n return Place.objects.create(user=user, **defaults)", "title": "" }, { "docid": "2e14c1eddb524941703346ab47264a97", "score": "0.48575923", "text": "def amenities_of_place(place_id):\n try:\n place = storage.get('Place', place_id)\n return jsonify([amenity.to_json() for amenity in place.amenities])\n except:\n abort(404)", "title": "" }, { "docid": "89cc3cb6935fff3122fe7b3679b13537", "score": "0.48498052", "text": "def attach_area(self, name):\n area = { 'target': name, 'portals': [ ] }\n if not name in self.asset['nodes']:\n LOG.warning(\"portal:%s:referencing missing node:%s\", name, name)\n self.asset['areas'].append(area)", "title": "" }, { "docid": "6fd62f72db1ffeaeddaf155dd85aedbc", "score": "0.4841303", "text": "def create_address(self, Address: Dict) -> Dict:\n pass", "title": "" }, { "docid": "5129033c6304d54d8d3a81d997d0424d", "score": "0.4839085", "text": "def test_create_an_address_pixel_association(self, assoc):\n assert assoc is not None", "title": "" }, { "docid": "1bd4294b65779bcd33d69ed7b62f57e6", "score": "0.4829423", "text": "def test_create_belongs_to(self):\n\n\t\t# Creates a reader and an access group (required to create an instance of BelongsTo).\n\t\treader = Reader(email=\"peter@example.org\", password=\"abc123\", name=\"Peter\", surname=\"Parker\")\n\t\tdb.session.add(reader)\n\t\tag = AccessGroup(name=\"Basic\")\n\t\tdb.session.add(ag)\n\n\t\t# Create an instance of BelongsTo.\n\t\tbt = BelongsTo(reader=reader, ag=ag, expiration_datetime=get_datetime(years_in_future=1))\n\t\tdb.session.add(bt)\n\t\tdb.session.commit()", "title": "" }, { "docid": "efb0b8d4ca0f41d87bf88e1d9258d455", "score": "0.48090294", "text": "def build_city (self, name, allegiance, x, y):\n #because building cities from command line is easy.\n self.cities.append (City (name, allegiance, x, y))\n self.map.transform_tile (x, y, self.cities[-1])", "title": "" }, { "docid": "5a723551b79c0e9ef1545a876c72cb27", "score": "0.48089257", "text": "def address_add(self, contact, address, **kwargs):\n params = create_fields(contact, **kwargs)\n return create_association(\n ContactAddress, contact=contact,\n address=address, **params)", "title": "" }, { "docid": "9675eaf416dff0e8929f97170ee566f2", "score": "0.48027492", "text": "def postamenity():\n jsoned = request.get_json()\n if jsoned is None:\n abort(400, 'Not a JSON')\n name = jsoned.get('name')\n if name is None:\n abort(400, 'Missing name')\n new_a = Amenity(**jsoned)\n new_a.save()\n return jsonify(new_a.to_dict()), 201", "title": "" }, { "docid": "d1d8cc173851c68f316dc7fed676243e", "score": "0.4793212", "text": "def db_set_assoc(self, server_url, handle, secret, issued, lifetime, assoc_type):\r\n result = self.db_get_assoc(server_url, handle)\r\n rows = self.cur.fetchall()\r\n if len(rows):\r\n # Update the table since this associations already exists.\r\n return self.db_update_assoc(secret, issued, lifetime, assoc_type,\r\n server_url, handle)\r\n else:\r\n # Insert a new record because this association wasn't\r\n # found.\r\n return self.db_new_assoc(server_url, handle, secret, issued,\r\n lifetime, assoc_type)", "title": "" }, { "docid": "e42b0e5990190e7c5f90017c9521fc1e", "score": "0.47849712", "text": "def _make_action(self, a):\n cfg = self._config()\n newa = a + cfg\n self.set_joints(newa)", "title": "" }, { "docid": "572e10a36dc1d0373fc782c482cf6de0", "score": "0.47773814", "text": "def create_food():", "title": "" }, { "docid": "a37579092b4377feb3d1cbc653284de0", "score": "0.47729668", "text": "def api_removeAmenityFromPlace(place_id, amenity_id):\n place = storage.get('Place', place_id)\n amenity = storage.get('Amenity', amenity_id)\n if amenity is None or place is None or amenity not in place.amenities:\n flask.abort(404)\n if os.getenv('HBNB_TYPE_STORAGE') == 'db':\n place.amenities.remove(amenity)\n else:\n place.amenity_ids.remove(amenity.id)\n storage.save()\n return flask.jsonify({})", "title": "" }, { "docid": "c3ce25962d602ca9cacdc26a435e65f0", "score": "0.47716823", "text": "def __init__(self, region, namespace, attribute):\n self.region = region\n self.namespace = namespace\n self._relationship_options = {\n ( attribute.property.parent.class_, attribute.property.key ) : self\n }", "title": "" }, { "docid": "c7d815926d36c32c3e337f519df45849", "score": "0.47587743", "text": "def amenities_by_place(place_id):\n list_res = []\n\n place = storage.get(Place, place_id)\n if place is None:\n abort(404)\n\n amenities = place.amenities\n for amenity in amenities:\n list_res.append(amenity.to_dict())\n return jsonify(list_res)", "title": "" }, { "docid": "55ea08e387a8451dc1cf2bce635668ee", "score": "0.4748742", "text": "def add_person(self, person_type, first_name, surname, wants_accommodation=\"n\"):\n person_type = person_type.lower()\n wants_accommodation = wants_accommodation.lower()\n if person_type == \"fellow\" or person_type == \"staff\":\n if person_type == \"fellow\":\n new_person = Fellow(person_type, first_name,\n surname, wants_accommodation)\n self.all_persons.append(new_person)\n self.fellows.append(new_person)\n print(\"Fellow {0:s} {1:s} has been successfully added\".format(\n first_name, surname))\n\n \"\"\"The following section assigns a random office and an optional random living\n space to the new fellow added above\"\"\"\n if len(self.all_offices) == 0:\n print(\n \"Office not assigned, because no offices are available at this time.\")\n else:\n random_office = random.choice(self.all_offices)\n if len(random_office.occupants) == 6:\n random_office = random.choice(self.all_offices)\n else:\n random_office.occupants.append(new_person)\n self.allocated_an_office.append(new_person)\n print(\"{0:s} {1:s} has been allocated the office {2:s}\".format(\n new_person.first_name, new_person.surname, random_office.room_name))\n\n if new_person.wants_accommodation == \"yes\" or new_person.wants_accommodation == \"y\":\n if len(self.all_living_spaces) == 0:\n print(\n \"Living space not assigned, because no living spaces are available at this time.\")\n else:\n random_living_space = random.choice(\n self.all_living_spaces)\n if len(random_living_space.occupants) == 4:\n random_living_space = random.choice(self.all_living_spaces)\n else:\n random_living_space.occupants.append(new_person)\n self.allocated_a_living_space.append(new_person)\n print(\"{0:s} {1:s} has been allocated the living space {2:s}\".format(\n new_person.first_name, new_person.surname, random_living_space.room_name))\n\n elif person_type == \"staff\":\n new_person = Staff(person_type, first_name,\n surname, wants_accommodation)\n self.all_persons.append(new_person)\n self.staff.append(new_person)\n print(\"Staff {0:s} {1:s} has been successfully added\".format(\n first_name, surname))\n\n if len(self.all_offices) == 0:\n print(\n \"Office not assigned, because no offices are available at this time.\")\n if new_person.wants_accommodation == \"yes\" or new_person.wants_accommodation == \"y\":\n print(\"Staff cannot be assigned a living space.\")\n else:\n random_office = random.choice(self.all_offices)\n if len(random_office.occupants) >= 6:\n random_office = random.choice(self.all_offices)\n if new_person.wants_accommodation == \"yes\" or new_person.wants_accommodation == \"y\":\n print(\"Staff cannot be assigned a living space.\")\n else:\n random_office.occupants.append(new_person)\n self.allocated_an_office.append(new_person)\n print(\"{0:s} {1:s} has been allocated the office {2:s}\".format(new_person.first_name,\n new_person.surname, random_office.room_name))\n if new_person.wants_accommodation == \"yes\" or new_person.wants_accommodation == \"y\":\n print(\"Staff cannot be assigned a living space.\")\n\n else:\n print(\"Person can only either be Fellow or Staff.\")", "title": "" }, { "docid": "a41e4c66b31e466949a057fe6139fba1", "score": "0.4736288", "text": "def create_place(city_id):\n if storage.get(City, city_id) is None:\n abort(404)\n my_dict = request.get_json()\n if my_dict is None:\n abort(400, \"Not a JSON\")\n if \"user_id\" not in my_dict:\n abort(400, \"Missing user_id\")\n user = storage.get('User', my_dict[\"user_id\"])\n if user is None:\n abort(404)\n if \"name\" not in my_dict:\n abort(400, \"Missing name\")\n my_dict[\"city_id\"] = city_id\n new_place = Place(**my_dict)\n new_place.save()\n return jsonify(new_place.to_dict()), 201", "title": "" }, { "docid": "2bf107efb68905deae4623487dd289b1", "score": "0.4734877", "text": "def test_insert_into_database(self, db_session, assoc):\n assert db_session.query(db.AddressPixelAssociation).count() == 0\n\n db_session.add(assoc)\n db_session.commit()\n\n assert db_session.query(db.AddressPixelAssociation).count() == 1", "title": "" }, { "docid": "982851650b1c56c285c3a18c3a2c3a99", "score": "0.47282332", "text": "def insert_in_db(\n self, \n nation_name:str = None, \n city_name:str = None, capitol:bool=False, population:int=0,\n EU:bool=False, NATO:bool=False, commonwealth:bool=False):\n # ------------\n # Nation Table\n # ------------\n cur_n = self.db.cursor()\n cur_n.execute(\"\"\"SELECT COUNT(id) FROM nations;\"\"\")\n size_of_n = str(cur_n.fetchall()[0][0] + 1)\n\n cur_n.execute(\n \"\"\"\n INSERT INTO nations(id, nation)\n VALUES(?,?)\"\"\",\n (str(size_of_n), nation_name),\n )\n\n # ------------\n # Cities Table\n # ------------\n cur_c = self.db.cursor()\n cur_c.execute(\"\"\"SELECT COUNT(city) FROM cities;\"\"\")\n size_of_c = str(cur_c.fetchall()[0][0] + 1)\n\n cur_c.execute(\n \"\"\"\n INSERT INTO cities(id, nation_id, city, capitol, population)\n VALUES(?,?,?,?,?)\"\"\",\n (str(size_of_c), size_of_n, city_name, capitol, population), #str(rigth_wrong)\n )\n\n # ------------\n # Nation Table\n # ------------\n cur_o = self.db.cursor()\n cur_o.execute(\"\"\"SELECT COUNT(id) FROM organizations;\"\"\")\n size_of_o = str(cur_o.fetchall()[0][0] + 1)\n\n cur_o.execute(\n \"\"\"\n INSERT INTO organizations(id, nation_id, EU, NATO, commonwealth)\n VALUES(?,?,?,?,?)\"\"\",\n (str(size_of_o), size_of_n, EU, NATO, commonwealth),\n )\n\n self.db.commit()", "title": "" }, { "docid": "515e7f0c3bc82b7175bc7a4789f6778d", "score": "0.47195685", "text": "def create_route(self, route):\n self.verify_route_ok(route)\n if self.get_subtopic('kn_routes').post(route):\n route.populate(self)", "title": "" }, { "docid": "491fd65f10d5c42a97d1dcee6c21ff62", "score": "0.4715727", "text": "def add_place(self, name, capacity=1):\r\n if isinstance(name, int) and name > 0:\r\n if not self.place_exists(name):\r\n idx = len(self.places)\r\n self.places[idx] = name\r\n self.marking.append(0)\r\n self.capacity.append(capacity)\r\n else:\r\n raise ValueError('place identifier has to be unique')\r\n else:\r\n raise TypeError('place identifier has to be numeric and > 0')\r\n\r\n return self", "title": "" }, { "docid": "d9a456e859ff5270e37947553ab7527f", "score": "0.4697591", "text": "def test_place(self):\n self.assertEqual(type(self.new_place.name), str)\n self.assertEqual(type(self.new_place.number_bathrooms), int)\n self.assertEqual(type(self.new_place.price_by_night), int)\n self.assertEqual(type(self.new_place.max_guest), int)\n self.assertEqual(type(self.new_place.number_rooms), int)\n self.assertEqual(type(self.new_place.latitude), float)\n self.assertEqual(type(self.new_place.longitude), float)\n self.assertEqual(type(self.new_place.description), str)\n self.assertEqual(type(self.new_place.amenity_ids), list)\n self.assertEqual(type(self.new_place.city_id), str)\n self.assertEqual(type(self.new_place.user_id), str)", "title": "" }, { "docid": "e1c4ae8df5c508f2c800ec1c0e73906f", "score": "0.4691345", "text": "def add_room(self, room_names, room_type):\n\n # Remove duplicates\n room_names = set(room_names)\n\n # go through rooms adding each\n for room in room_names:\n room = room.capitalize()\n if room not in self.rooms.keys():\n if room_type is \"offices\":\n self.rooms[room] = Office(room)\n print(\"{} successfully added to Amity\".format(room))\n self.total_no_of_offices += 1\n else:\n self.rooms[room] = LivingSpace(room)\n print(\"{} successfully added to Amity\".format(room))\n self.total_no_of_livingspaces += 1\n else:\n print(\"{} not added to amity as it already exists in Amity\".\n format(room))\n\n self.total_no_of_rooms = len(self.rooms)\n self.show_state()", "title": "" }, { "docid": "e8850c793f0e6a561105c2540df8a3d7", "score": "0.46803042", "text": "def create_additive_rule(creator_id, creator_display_name, title, description, long_description, level_names,\n shareable, source, link):\n split_terms = [term.strip() for term in level_names.split(',')]\n rule = AdditiveRule(creator_id, creator_display_name, title, description, long_description, shareable, source, link,\n split_terms)\n db.session.add(rule)\n db.session.commit()\n print(\"Successfully added rule ID: {}\".format(rule.id))\n db.session.close()", "title": "" }, { "docid": "ee93c509984c998c478c4d4d86823230", "score": "0.4677019", "text": "def createRelationship(relationshipType, childOid, parentOid, sequence = None):\n # Create the record and generate the insert\n rel = AnaRelationshipDb.AnaRelationshipDbRecord()\n oid = Oids.createNextOid()\n\n rel.setOid(oid)\n rel.setRelationshipType(relationshipType)\n rel.setChildOid(childOid)\n rel.setParentOid(parentOid)\n\n rel.insert()\n\n # add relationship to this module's knowledge.\n _addRelToKnowledge(rel)\n\n # Add relationship to other module's knowledge\n Nodes.connectTheDotsForRelationship(rel)\n\n return rel", "title": "" }, { "docid": "e5585bab9b20c9bb4954849806a47414", "score": "0.46754432", "text": "def save(self, *args, **kwargs):\n self.full_clean()\n super().save(*args, **kwargs)\n\n if self.pk:\n # pylint: disable=no-member\n self.organizations.add(self.organization_main)", "title": "" }, { "docid": "368146693ba3c42a87fd85a9695a45aa", "score": "0.46744952", "text": "def test_place_class_membership_and_attributes(self):\n place = Place()\n self.assertIsNotNone(place.id)\n self.assertIsNotNone(place.created_at)\n self.assertIsNotNone(place.updated_at)\n self.assertIsInstance(place, Place)\n self.assertIsNotNone(place.city_id)\n self.assertIsNotNone(place.user_id)\n self.assertIsNotNone(place.name)\n self.assertIsNotNone(place.description)\n self.assertIsNotNone(place.number_rooms)\n self.assertIsNotNone(place.number_bathrooms)\n self.assertIsNotNone(place.max_guest)\n self.assertIsNotNone(place.price_by_night)\n self.assertIsNotNone(place.latitude)\n self.assertIsNotNone(place.longitude)\n self.assertIsNotNone(place.amenity_ids)", "title": "" }, { "docid": "c03cc49fb28eda5d315908686933a54a", "score": "0.46644396", "text": "def set_is_offered(apt_address, time_stamp):\n\n global conn, log\n\n cur = conn.cursor()\n try:\n log.info('Apartment-Offer: Inserting new relationship: {0}-{1}'.format(apt_address, time_stamp))\n sql = \"\"\"INSERT INTO isOffered (nIdApartment, nIdOffer) \n SELECT %s, %s\n WHERE NOT EXISTS (\n SELECT 1 FROM isOffered WHERE nidApartment=%s and nIdOffer=%s\n )\"\"\"\n apt_id = get_apartment_id(apt_address)\n offer_id = get_offer_id(time_stamp)\n if offer_id is None:\n offer_id = set_offer(time_stamp)\n\n cur.execute(sql, (apt_id, offer_id, apt_id, offer_id))\n log.info('Apartment-Offer: Committing transaction')\n conn.commit()\n cur.close()\n except Exception as e:\n conn.rollback()\n log.error('Apartment-Offer: Rolling back transaction')\n log.exception(\"Apartment-Offer: Couldn't insert successfully\")\n print(\"Couldn't insert apartment-offer relation: \" + str(e))\n raise DatabaseException(str(e))", "title": "" }, { "docid": "c4a91a97d7c1767cb1339b41d9fe6a8a", "score": "0.46530142", "text": "def __init__(self,cityName,stateName,population=0,capital=False,location=None):\n self.name = cityName\n self.capital = capital\n self.state = None\n self.setState(stateName)\n self.population = population\n self.north = location[0]\n self.west = location[1]\n self.location = (self.north,self.west)\n City.citySet.add(self)", "title": "" }, { "docid": "47d9938f5731effdbc9279d699fd518c", "score": "0.46487173", "text": "def test_create_new_org(self):\r\n\r\n result = self.client.post('/createorg',\r\n data={'org_name':'DIVAS', \r\n 'cause_id': 1,\r\n 'mission':'To educate, encourage, and empower women to be their best'},\r\n follow_redirects=True)\r\n self.assertIn(b\"Womens Services\", result.data)", "title": "" }, { "docid": "9a3ef36bb55ce6e98ad12cb17451aa94", "score": "0.4647332", "text": "def insert(self, connection):\n\t\t\n\t\ttry:\n\t\t\tcur = connection.cursor()\n\t\t\t\n\t\t\tcur.execute('INSERT INTO organizations VALUES (?,?)', (self.name, self.calendar['id'],))\n\t\t\t\t\n\t\tfinally:\n\t\t\tcur.close()", "title": "" }, { "docid": "847eae2dce374764fece32bf56f5b5be", "score": "0.46466944", "text": "def make_place(user, placedata):\n logging.debug('Making place archives for %s from user %s' %\n (placedata['id'], user.id))\n key = db.Key.from_path('User', str(user.id),\n 'Place', str(placedata['id']))\n place = Place.get(key)\n if not place:\n # Average the bounding box to get a single coordinate point for\n # the place\n bbox = placedata.pop('bounding_box')\n coords = bbox['coordinates'][0]\n num = float(len(coords))\n lat = sum(lat for lon, lat in coords) / num\n lon = sum(lon for lon, lat in coords) / num\n\n # Make sure placedata keys are not unicode\n placedata = dict((str(k), v) for k,v in placedata.iteritems())\n\n # Create the place with the calculated coordinates\n placedata['coordinates'] = db.GeoPt(lat, lon)\n place = Place(key=key, **placedata)\n\n place.tweet_count += 1\n return place", "title": "" }, { "docid": "28bb27ae9f8b37fc29db91cbe0f6a691", "score": "0.46444064", "text": "def create_relationship(self, upstream, downstream, relationship_type=None):\r\n json = {\"fromItem\": upstream, \"toItem\": downstream}\r\n if relationship_type:\r\n json[\"relationshipType\"] = relationship_type\r\n resp = self.post(\"/relationships\", json).json()\r\n if resp[\"meta\"][\"status\"] == \"Bad Request\":\r\n raise Exception(f\"create_relationship ERROR: {resp['meta']['message']}\")\r\n return resp", "title": "" }, { "docid": "ce4f3a7ed9dc9856ba9fae1c07c96d81", "score": "0.4640207", "text": "def put_amen(amenity_id):\n payload_amen = request.get_json(silent=True)\n amenities = storage.all(Amenity)\n\n if payload_amen is None:\n abort(400, 'Not a JSON')\n\n for amenity in amenities.values():\n if amenity.id == amenity_id:\n for k, v in payload_amen.items():\n if k != 'created_at' and k != 'updated_at' and k != 'id':\n setattr(amenity, k, v)\n amenity.save()\n return(jsonify(amenity.to_dict()), 200)\n abort(404)", "title": "" }, { "docid": "170f18cdf1ca0542e37093ead44af429", "score": "0.46393117", "text": "def test_add_to_plan(self):\n district = self.district1\n districtid = district.district_id\n\n geounitids = [str(self.geounits[self.geolevel.id][0].id)]\n\n self.plan.add_geounits(districtid, geounitids, self.geolevel.id, self.plan.version)\n district = District.objects.get(plan=self.plan, district_id=districtid, version=1)\n\n self.assertEqual(district.geom.area, self.geounits[self.geolevel.id][0].geom.area, \"Geometry area for added district doesn't match\")\n self.assertEqual(district.geom.extent, self.geounits[self.geolevel.id][0].geom.extent, \"Geometry area for added district doesn't match\")\n self.assertEqual(district.geom.length, self.geounits[self.geolevel.id][0].geom.length, \"Geometry area for added district doesn't match\")", "title": "" }, { "docid": "3c0b5ee489abc366b999c3ca283ef178", "score": "0.4623929", "text": "def _createRelationship(self, dictRel, key, value):\n if key not in dictRel:\n dictRel[key] = list()\n dictRel[key].append(value)", "title": "" }, { "docid": "10f63bc797fa85573deaef5fb58cc3cd", "score": "0.46185815", "text": "def test_allocation_to_rooms(self):\n self.fellow = Person.create(\n 'Jee Gikera', 'fellow', wants_accomodation='Y')\n self.staff = Person.create('Chidi Nnadi', 'staff')\n office_room = Office('valhalla')\n living_room = LivingSpace('blue')\n # store person instances for testing\n fellow_only.append(self.fellow)\n persons.append(self.staff)\n persons.append(self.fellow)\n\n office_results = self.staff.assign_office_space(office_room)\n living_results = self.fellow.assign_living_space(living_room)\n office_room.assign_person(self.staff)\n living_room.assign_person(self.fellow)\n self.assertTrue(self.staff.has_office())\n self.assertTrue(self.fellow.has_living_space())\n self.assertIsInstance(office_results, Office)\n self.assertIsInstance(living_results, LivingSpace)\n self.assertIsNotNone(living_room)\n self.assertIsNotNone(office_room)\n self.assertFalse(living_room.is_occupied())\n self.assertFalse(office_room.is_occupied())\n self.office = Office('GreenHouse')\n self.living = LivingSpace('BlueMoon')\n self.amity = Amity()\n\n ospace = self.amity.allocate_office_space(self.fellow)\n lspace = self.amity.allocate_living_space(self.fellow)\n allocated = self.office.get_occupants()\n self.assertEquals(self.staff.has_living_space(), False)\n self.assertEquals(self.fellow.has_living_space(), True)\n self.assertIsNotNone(allocated)\n self.assertIsNotNone(ospace)\n self.assertIsNotNone(lspace)", "title": "" }, { "docid": "8b4a2e2f7ef30278e59afa70d8bbacfc", "score": "0.46169403", "text": "def amenities(self):\n from models import storage\n amenities_ids = []\n all_amenities = storage.all(Amenity)\n for amenity in all_amenities.values():\n if amenity.place_id == self.id:\n amenities_ids.append(amenity.id)\n return amenities_ids", "title": "" } ]
b045baebd3fdeb67f323f4540274d7b2
Call with lfile = Path of file with list of image file names bsz = Batch size you want this to generate csz = Output images will be size csz x csz niter = Resume at niterations isval = Running on train or val (random crops and shuffling for train)
[ { "docid": "aece2bf01fb86cd50abdd7e024ad11db", "score": "0.5739073", "text": "def __init__(self,lfile,bsz,csz,niter=0,isval=False):\n\n self.bsz = bsz\n self.csz = csz\n self.isrand = not isval\n\n # Setup fetch graph\n self.graph()\n\n # Load file list\n self.files = [l.strip() for l in open(lfile).readlines()]\n self.ndata = len(self.files)\n self.niter = niter*bsz\n \n # Setup shuffling\n if self.isrand:\n self.rand = np.random.RandomState(0)\n idx = self.rand.permutation(self.ndata)\n for i in range(niter // self.ndata):\n idx = self.rand.permutation(self.ndata)\n self.idx = np.int32(idx)\n else:\n self.idx = np.int32(np.arange(self.ndata))", "title": "" } ]
[ { "docid": "c42d77f1f24bb0462c6d7dc01a486be2", "score": "0.67475164", "text": "def generateBatchesForOneEpoch(self):\n\n #printMessageVerb(self.FLAGverbose, '-->> generating batches ...')\n\n\n # Load the files if needed\n if (self.currentEpoch == 0) or ((self.currentEpoch > 0) and self.loadNewFilesEveryEpoch):\n\n # Reset the list of images\n self.currentChannelImages = []\n self.currentGt = []\n self.currentRois = []\n\n\n # Choose the random images that will be sampled in this epoch\n # (*) ToDo -> make sure that new samples are used every epoch.\n self.indexCurrentImages = np.array(self.rndSequence.sample(range(0,self.numFiles), self.numOfCasesLoadedPerEpoch)) #it needs to be a numpy array so we can extract multiple elements with a list (the elements defined by IDsCases, which will be shuffled each epoch)\n self.IDsCases = list(range(0,self.numOfCasesLoadedPerEpoch)) #IDs to the cases in self.indexCurrentImages\n\n\n printMessageVerb(self.FLAGverbose, \"Loading %d images for epoch %d\" % (len(self.indexCurrentImages), self.currentEpoch))\n logging.debug(self.id + \" Loading images number : %s\" % self.indexCurrentImages )\n\n self.batchesPerEpoch = int(np.floor(self.numFiles / self.batchSize)) # number of batches per epoch\n\n #print(self.id + \" Loading images number : %s\" % indexCurrentImages )\n # Load the images for the epoch\n #i = 0\n self.listCurrentFiles = [] # reset the list of files loaded per epoch.\n for realImageIndex in self.indexCurrentImages:\n\n loadedImageChannels = [] #list to store all the channels of the current image.\n printMessageVerb(self.FLAGverbose, '-->> loading case %d'% (realImageIndex))\n self.listCurrentFiles.append(self.allChannelsFilenames[0][realImageIndex]) #List of filenames in the order\n\n # Load ROI if exists\n if ('roiMasksTraining' in self.confTrain):\n roi = nib.load(self.roiFilenames[realImageIndex]).get_data()\n roi = preprocessIntensityData(roi, FLAGresizeImages=self.FLAGresizeImages, imageSize=self.imageSize, FLAGpreserveIntValues = True, arrayMask=[], intensityNormalizationMode=None)\n self.currentRois.append(roi)\n else:\n roi = None\n\n # Imgs channels ----------------------------------------------\n # (*) ToDo --> incorporate the masks in the image normalization stage.!!\n for channel in range(0, self.numChannels):\n # Load the corresponding image for the corresponding channel and append it to the list of channels\n # for the current imageIndex\n # loadedImageChannels.append(nib.load(self.allChannelsFilenames[channel][realImageIndex]).get_data())\n\n # Load, preprocess, and normalize the channel.\n dataIn = nib.load(self.allChannelsFilenames[channel][realImageIndex]).get_data()\n if self.isChannelBinary[channel] == 0: # not binary input\n dataIn = preprocessIntensityData(dataIn,FLAGresizeImages=self.FLAGresizeImages,\n imageSize=self.imageSize,\n FLAGpreserveIntValues = False, # False\n arrayMask=[],\n intensityNormalizationMode=self.intensityNormalizationMode[channel],\n intNormParam1=self.intNormParam1[channel],\n intNormParam2=self.intNormParam2[channel]\n )\n if self.FLAGsetBkgrnd == True:\n dataIn = setOutMaskValue(dataIn, roi, voxelValue = self.bkgrndLabel)\n\n elif self.isChannelBinary[channel] == 1: # binary input --> treat as gt.\n dataIn = preprocessIntensityData(dataIn, FLAGresizeImages=self.FLAGresizeImages,\n imageSize=self.imageSize,\n FLAGpreserveIntValues = True,\n arrayMask=[],\n intensityNormalizationMode = None)\n dataIn = normalizeLabels(dataIn)\n\n # Add the image to the queue of channels\n loadedImageChannels.append(dataIn)\n\n # Check that all the channels have the same dimensions\n if channel > 0:\n assert loadedImageChannels[channel].shape == loadedImageChannels[0].shape, self.id + \" Data size incompatibility when loading image channels for volume %s\" % self.allChannelsFilenames[channel][realImageIndex]\n\n # Append all the channels of the image to the list\n self.currentChannelImages.append(loadedImageChannels)\n\n # GT channel ----------------------------------------------\n gt = nib.load(self.gtFilenames[realImageIndex]).get_data()\n gt = preprocessIntensityData(gt, FLAGresizeImages=self.FLAGresizeImages, imageSize=self.imageSize, FLAGpreserveIntValues = True, arrayMask=[], intensityNormalizationMode=None)\n gt = normalizeLabels(gt)\n #assert gt.shape == loadedImageChannels[0].shape, self.id + \" Data size incompatibility when loading GT %s\" % self.gtFilenames[realImageIndex]\n self.currentGt.append(gt)\n\n\n # Initialize the batch and gt variables (so we only need to declare them once)\n if self.FLAGresizeImages ==1:\n self.batch = np.ndarray(shape=(self.batchSize, self.numChannels, self.imageSize[0], self.imageSize[1], self.imageSize[2]), dtype=np.float32)\n #gt = np.ndarray(shape=(1, self.numClasses, self.gtSize[0], self.gtSize[1], self.gtSize[2]), dtype=np.float32)\n self.gt = np.ndarray(shape=(self.batchSize, 1, self.gtSize[0], self.gtSize[1], self.gtSize[2]), dtype=np.float32)\n else:\n dims = self.currentChannelImages[0][0].shape\n self.batch = np.ndarray(shape=(self.batchSize, self.numChannels, dims[0], dims[1], dims[2]), dtype=np.float32)\n #gt = np.ndarray(shape=(1, self.numClasses, dims[0], dims[1], dims[2]), dtype=np.float32)\n self.gt = np.ndarray(shape=(self.batchSize, 1, dims[0], dims[1], dims[2]), dtype=np.float32)\n\n else:\n # reshuffle the IDs of the cases so each epoch, the cases are presented in different order.\n rnd.shuffle(self.IDsCases)\n\n\n\n#\n## #TODO FIX IT Generate only batches for the given validation batch size\n# for batch in range(0, len(self.indexCurrentImages)):\n# #print \"generating batch %d\" % (batch)\n# #logging.debug(self.id + \" Generating batch: %d\" % batch )\n# isoScale_i = self.generateSingleBatch(batch)\n# #self.batch, self.gt, isoScale_i = self.generateSingleBatch(batch)\n#\n# self.queue.put((self.batch,self.gt))\n# self.queueFileNames.put(self.listCurrentFiles[batch])\n# self.queueScalingFactors.put(isoScale_i)\n#\n\n# #TODO FIX IT Generate only batches for the given validation batch size\n for numbatch in range(0, self.batchesPerEpoch):\n #print \"generating batch %d\" % (batch)\n #logging.debug(self.id + \" Generating batch: %d\" % batch )\n IDs_aux_i = self.IDsCases[(numbatch*self.batchSize):((numbatch*self.batchSize)+self.batchSize)] #sample from the shuffled list of IDs (self.IDsCases)\n IDs_i = self.indexCurrentImages[IDs_aux_i] # recover the real IDs of the images to work with.\n\n isoScale_i = self.generateSingleBatchV2(IDs_i)\n #self.batch, self.gt, isoScale_i = self.generateSingleBatch(batch)\n\n self.queue.put((self.batch,self.gt))\n self.queueScalingFactors.put(isoScale_i)\n currentFilesNames = [];\n for IDs_j in IDs_i:\n currentFilesNames.append(self.listCurrentFiles[IDs_j])\n self.queueFileNames.put(currentFilesNames)\n\n\n\n# # Unload the files if we are loading new files every subpeoc\n if self.loadNewFilesEveryEpoch:\n self.unloadFiles()", "title": "" }, { "docid": "e99a944c40de0230dc5e575964cd92d6", "score": "0.663043", "text": "def generateBatchesForOneEpoch(self):\n # Load the files if needed\n # =====================================================================\n if (self.currentEpoch == 0) or ((self.currentEpoch > 0) and self.loadNewFilesEveryEpoch):\n\n # Choose the random images that will be sampled in this epoch\n # (*) ToDo -> make sure that new samples are used every epoch.\n self.indexCurrentImages = np.array(self.rndSequence.sample(range(0,self.numFiles), self.numOfCasesLoadedPerEpoch)) #it needs to be a numpy array so we can extract multiple elements with a list (the elements defined by IDsCases, which will be shuffled each epoch)\n self.IDsCases = list(range(0,self.numOfCasesLoadedPerEpoch)) #IDs to the cases in self.indexCurrentImages\n\n printMessageVerb(self.FLAGverbose, \"Loading %d images for epoch %d\" % (len(self.indexCurrentImages), self.currentEpoch))\n logging.debug(self.id + \" Loading images number : %s\" % self.indexCurrentImages )\n\n self.batchesPerEpoch = int(np.floor(self.numFiles / self.batchSize)) # number of batches per epoch\n\n self.currentChannelImages = [] # reset the list of images volumes (actual volumes)\n self.currentGt = [] # reset the list of gt volumes (actual volumes)\n self.currentRois = [] # reset the list of ROIs volumes (actual volumes)\n self.listCurrentFiles = [] # reset the list of files loaded per epoch.\n self.listCurrentIDs = np.array([]) # reset the list of IDs\n\n self.listCurrent2Dplane_1 = [] # reset the list of 2D planes loaded per epoch.\n self.listCurrent2Dplane_1_ID = np.array([]) # reset the list of IDs of 2D planes loaded per epoch.\n self.current_2Dplane_1 = [] # reset the auxiliar 2D planes 1 (actual images)\n\n self.listCurrent2Dplane_2 = [] # reset the list of 2D planes loaded per epoch.\n self.listCurrent2Dplane_2_ID = np.array([]) # reset the list of IDs of 2D planes loaded per epoch.\n self.current_2Dplane_2 = [] # reset the auxiliar 2D planes 2 (actual images)\n\n self.listCurrent2Dplane_3 = [] # reset the list of 2D planes loaded per epoch.\n self.listCurrent2Dplane_3_ID = np.array([]) # reset the list of IDs of 2D planes loaded per epoch.\n self.current_2Dplane_3 = [] # reset the auxiliar 2D planes 3 (actual images)\n\n for realImageIndex in self.indexCurrentImages:\n loadedImageChannels = [] #list to store all the channels of the current image.\n self.listCurrentFiles.append(self.allChannelsFilenames[0][realImageIndex]) # List of filenames in the order\n #self.listCurrentIDs.append(self.allFilenamesIDs[realImageIndex]) # Real patient ID of each case. Used to match the image with the auuxiliar 2D planes\n case_ID_i = self.allFilenamesIDs[realImageIndex]\n self.listCurrentIDs = np.append(self.listCurrentIDs, case_ID_i)\n printMessageVerb(self.FLAGverbose, '-->> loading %d - ID %d'% (realImageIndex,case_ID_i))\n\n # Load ROI if exists\n if ('roiMasksTraining' in self.confTrain):\n roi = nib.load(self.roiFilenames[realImageIndex]).get_data()\n roi = preprocessIntensityData(roi, FLAGresizeImages=self.FLAGresizeImages, imageSize=self.imageSize, FLAGpreserveIntValues = True, arrayMask=[], intensityNormalizationMode=None)\n self.currentRois.append(roi)\n else:\n roi = None\n\n # Imgs channels ----------------------------------------------\n # (*) ToDo --> incorporate the masks in the image normalization stage.!!\n for channel in range(0, self.numChannels):\n # Load the corresponding image for the corresponding channel and append it to the list of channels\n # for the current imageIndex\n # loadedImageChannels.append(nib.load(self.allChannelsFilenames[channel][realImageIndex]).get_data())\n\n # Load, preprocess, and normalize the channel.\n dataIn = nib.load(self.allChannelsFilenames[channel][realImageIndex]).get_data()\n if self.isChannelBinary[channel] == 0: # not binary input\n dataIn = preprocessIntensityData(dataIn,FLAGresizeImages=self.FLAGresizeImages,\n imageSize=self.imageSize,\n FLAGpreserveIntValues = False,\n arrayMask=None,\n intensityNormalizationMode=self.intensityNormalizationMode[channel],\n intNormParam1=self.intNormParam1[channel],\n intNormParam2=self.intNormParam2[channel]\n )\n if self.FLAGsetBkgrnd == True:\n dataIn = setOutMaskValue(dataIn, roi, voxelValue = self.bkgrndLabel)\n\n elif self.isChannelBinary[channel] == 1: # binary input --> treat as gt.\n dataIn = preprocessIntensityData(dataIn, FLAGresizeImages=self.FLAGresizeImages,\n imageSize=self.imageSize,\n FLAGpreserveIntValues = True,\n arrayMask=None,\n intensityNormalizationMode = None)\n dataIn = normalizeLabels(dataIn)\n\n # Add the image to the queue of channels\n loadedImageChannels.append(dataIn)\n\n # Check that all the channels have the same dimensions\n if channel > 0:\n assert loadedImageChannels[channel].shape == loadedImageChannels[0].shape, self.id + \" Data size incompatibility when loading image channels for volume %s\" % self.allChannelsFilenames[channel][realImageIndex]\n\n # Append all the channels of the image to the list\n self.currentChannelImages.append(loadedImageChannels)\n\n # GT channel ----------------------------------------------\n gt = nib.load(self.gtFilenames[realImageIndex]).get_data()\n gt = preprocessIntensityData(gt, FLAGresizeImages=self.FLAGresizeImages, imageSize=self.imageSize, FLAGpreserveIntValues = True, arrayMask=[], intensityNormalizationMode=None)\n gt = normalizeLabels(gt)\n #assert gt.shape == loadedImageChannels[0].shape, self.id + \" Data size incompatibility when loading GT %s\" % self.gtFilenames[realImageIndex]\n self.currentGt.append(gt)\n\n\n# # Aux 2D planes ---------------------------------------------\n\n # Additional 2D image 1\n aux_plane_i = 0\n if len(self.allFilenames2Dplane_1_ID)>0:\n pos_ID_i = np.where(self.allFilenames2Dplane_1_ID == case_ID_i)[0]\n assert len(pos_ID_i)>0, self.id + \" 2D plane 1 missing for case ID %d\" % case_ID_i\n if len(pos_ID_i)>0:\n for p_i in pos_ID_i:\n dataIn = nib.load(self.allFilenames2Dplane_1[p_i]).get_data()[:,:,0]\n if self.isChannelBinary_2D[aux_plane_i] == 0: # not binary input\n\n dataIn = preprocessIntensityData(dataIn,FLAGresizeImages=self.FLAGresizeImages_2D,\n imageSize=self.imageSize_2D,\n FLAGpreserveIntValues = False,\n arrayMask=None,\n intensityNormalizationMode=self.intensityNormalizationMode_2D[aux_plane_i],\n intNormParam1=self.intNormParam1_2D[aux_plane_i],\n intNormParam2=self.intNormParam2_2D[aux_plane_i]\n )\n elif self.isChannelBinary_2D[aux_plane_i] == 1:\n dataIn = preprocessIntensityData(dataIn, FLAGresizeImages=self.FLAGresizeImages_2D,\n imageSize=self.imageSize_2D,\n FLAGpreserveIntValues = True,\n arrayMask=None,\n intensityNormalizationMode = None)\n dataIn = normalizeLabels(dataIn)\n\n # Append all the channels of the image to the list\n self.current_2Dplane_1.append(dataIn)\n self.listCurrent2Dplane_1.append(self.allFilenames2Dplane_1[p_i])\n self.listCurrent2Dplane_1_ID = np.append(self.listCurrent2Dplane_1_ID,case_ID_i)\n printMessageVerb(self.FLAGverbose, '-->>2D plane 1 found: %s'% (self.allFilenames2Dplane_1[p_i]))\n\n # Additional 2D image 2\n aux_plane_i = 1\n if len(self.allFilenames2Dplane_2_ID)>0:\n pos_ID_i = np.where(self.allFilenames2Dplane_2_ID == case_ID_i)[0]\n assert len(pos_ID_i)>0, self.id + \" 2D plane 2 missing for case ID %d\" % case_ID_i\n if len(pos_ID_i)>0:\n for p_i in pos_ID_i:\n dataIn = nib.load(self.allFilenames2Dplane_2[p_i]).get_data()[:,:,0]\n if self.isChannelBinary_2D[aux_plane_i] == 0: # not binary input\n dataIn = preprocessIntensityData(dataIn,FLAGresizeImages=self.FLAGresizeImages_2D,\n imageSize=self.imageSize_2D,\n FLAGpreserveIntValues = False,\n arrayMask=None,\n intensityNormalizationMode=self.intensityNormalizationMode_2D[aux_plane_i],\n intNormParam1=self.intNormParam1_2D[aux_plane_i],\n intNormParam2=self.intNormParam2_2D[aux_plane_i]\n )\n elif self.isChannelBinary_2D[aux_plane_i] == 1:\n dataIn = preprocessIntensityData(dataIn, FLAGresizeImages=self.FLAGresizeImages_2D,\n imageSize=self.imageSize_2D,\n FLAGpreserveIntValues = True,\n arrayMask=None,\n intensityNormalizationMode = None)\n dataIn = normalizeLabels(dataIn)\n\n # Append all the channels of the image to the list\n self.current_2Dplane_2.append(dataIn)\n self.listCurrent2Dplane_2.append(self.allFilenames2Dplane_2[p_i])\n self.listCurrent2Dplane_2_ID = np.append(self.listCurrent2Dplane_2_ID,case_ID_i)\n printMessageVerb(self.FLAGverbose, '-->>2D plane 2 found: %s'% (self.allFilenames2Dplane_2[p_i]))\n\n # Additional 2D image 3\n aux_plane_i = 2\n if len(self.allFilenames2Dplane_3_ID)>0:\n pos_ID_i = np.where(self.allFilenames2Dplane_3_ID == case_ID_i)[0]\n assert len(pos_ID_i)>0, self.id + \" 2D plane 2 missing for case ID %d\" % case_ID_i\n if len(pos_ID_i)>0:\n for p_i in pos_ID_i:\n dataIn = nib.load(self.allFilenames2Dplane_3[p_i]).get_data()[:,:,0]\n if self.isChannelBinary_2D[aux_plane_i] == 0: # not binary input\n dataIn = preprocessIntensityData(dataIn,FLAGresizeImages=self.FLAGresizeImages_2D,\n imageSize=self.imageSize_2D,\n FLAGpreserveIntValues = False,\n arrayMask=None,\n intensityNormalizationMode=self.intensityNormalizationMode_2D[aux_plane_i],\n intNormParam1=self.intNormParam1_2D[aux_plane_i],\n intNormParam2=self.intNormParam2_2D[aux_plane_i]\n )\n elif self.isChannelBinary_2D[aux_plane_i] == 1:\n dataIn = preprocessIntensityData(dataIn, FLAGresizeImages=self.FLAGresizeImages_2D,\n imageSize=self.imageSize_2D,\n FLAGpreserveIntValues = True,\n arrayMask=None,\n intensityNormalizationMode = None)\n dataIn = normalizeLabels(dataIn)\n\n # Append all the channels of the image to the list\n self.current_2Dplane_3.append(dataIn)\n self.listCurrent2Dplane_3.append(self.allFilenames2Dplane_2[p_i])\n self.listCurrent2Dplane_3_ID = np.append(self.listCurrent2Dplane_3_ID,case_ID_i)\n printMessageVerb(self.FLAGverbose, '-->>2D plane 3 found: %s'% (self.allFilenames2Dplane_3[p_i]))\n\n\n\n # Initialize the batch and gt variables (so we only need to declare them once)\n if self.FLAGresizeImages ==1:\n self.batch = np.ndarray(shape=(self.batchSize, self.numChannels, self.imageSize[0], self.imageSize[1], self.imageSize[2]), dtype=np.float32)\n self.batch2D_1 = np.ndarray(shape=(self.batchSize, 1, self.imageSize_2D[0], self.imageSize_2D[1]), dtype=np.float32)\n self.batch2D_2 = np.ndarray(shape=(self.batchSize, 1, self.imageSize_2D[0], self.imageSize_2D[1]), dtype=np.float32)\n self.batch2D_3 = np.ndarray(shape=(self.batchSize, 1, self.imageSize_2D[0], self.imageSize_2D[1]), dtype=np.float32)\n #gt = np.ndarray(shape=(1, self.numClasses, self.gtSize[0], self.gtSize[1], self.gtSize[2]), dtype=np.float32)\n self.gt = np.ndarray(shape=(self.batchSize, 1, self.gtSize[0], self.gtSize[1], self.gtSize[2]), dtype=np.float32)\n else:\n dims = self.currentChannelImages[0][0].shape\n self.batch = np.ndarray(shape=(self.batchSize, self.numChannels, dims[0], dims[1], dims[2]), dtype=np.float32)\n #gt = np.ndarray(shape=(1, self.numClasses, dims[0], dims[1], dims[2]), dtype=np.float32)\n dims_2D = self.current_2Dplane_1[1].shape\n self.batch2D_1 = np.ndarray(shape=(self.batchSize, 1, dims_2D[0], dims_2D[1]), dtype=np.float32)\n self.batch2D_2 = np.ndarray(shape=(self.batchSize, 1, dims_2D[0], dims_2D[1]), dtype=np.float32)\n self.batch2D_3 = np.ndarray(shape=(self.batchSize, 1, dims_2D[0], dims_2D[1]), dtype=np.float32)\n self.gt = np.ndarray(shape=(self.batchSize, 1, dims[0], dims[1], dims[2]), dtype=np.float32)\n else:\n # reshuffle the IDs of the cases so each epoch, the cases are presented in different order.\n rnd.shuffle(self.IDsCases)\n\n\n # Create the batches\n # =====================================================================\n t_1 = time.time()\n for numbatch in range(0, self.batchesPerEpoch):\n #print \"generating batch %d\" % (batch)\n #logging.debug(self.id + \" Generating batch: %d\" % batch )\n #print('generating batch %d' %(numbatch))\n IDs_aux_i = self.IDsCases[(numbatch*self.batchSize):((numbatch*self.batchSize)+self.batchSize)] #sample from the shuffled list of IDs (self.IDsCases)\n IDs_i = self.indexCurrentImages[IDs_aux_i] # recover the real IDs of the images to work with.\n # self.isoScale_i = self.generateSingleBatchV2(IDs_i)\n isoScale_i = self.generateSingleBatch_VolAnd2DPlanes(IDs_i)\n\n# self.aux = IDs_i\n# #self.batch, self.gt, isoScale_i = self.generateSingleBatch(batch)\n\n self.queue.put((self.batch,self.gt, self.batch2D_1, self.batch2D_2, self.batch2D_3))\n self.queueScalingFactors.put(isoScale_i)\n currentFilesNames = [];\n for IDs_j in IDs_i:\n currentFilesNames.append(self.listCurrentFiles[IDs_j])\n self.queueFileNames.put(currentFilesNames)\n\n t_2 = time.time()\n self.timePerEpoch = np.append(self.timePerEpoch,t_2-t_1)\n t_1 = t_2\n# # Unload the files if we are loading new files every subpeoc\n if self.loadNewFilesEveryEpoch:\n self.unloadFiles()", "title": "" }, { "docid": "11e8c39fe82e76497935ea0ef6f50c8f", "score": "0.6605367", "text": "def generateBatchesForOneEpoch(self):\n # Load the files if needed\n # =====================================================================\n if (self.currentEpoch == 0) or ((self.currentEpoch > 0) and self.loadNewFilesEveryEpoch):\n\n printMessageVerb(self.FLAGverbose, '-->> Epoch %d: LOADING IMAGES... '% (self.currentEpoch))\n\n # Choose the random images that will be sampled in this epoch\n # (*) ToDo -> make sure that new samples are used every epoch.\n self.indexCurrentImages = np.array(self.rndSequence.sample(range(0,self.numFiles), self.numOfCasesLoadedPerEpoch)) #it needs to be a numpy array so we can extract multiple elements with a list (the elements defined by IDsCases, which will be shuffled each epoch)\n self.IDsCases = list(range(0,self.numOfCasesLoadedPerEpoch)) #IDs to the cases in self.indexCurrentImages\n\n printMessageVerb(self.FLAGverbose, \"Loading %d images for epoch %d\" % (len(self.indexCurrentImages), self.currentEpoch))\n logging.debug(self.id + \" Loading images number : %s\" % self.indexCurrentImages )\n\n self.batchesPerEpoch = int(np.floor(self.numFiles / self.batchSize)) # number of batches per epoch\n\n self.currentChannelImages = [] # reset the list of images volumes (actual volumes)\n self.currentGt = [] # reset the list of gt volumes (actual volumes)\n self.currentRois = [] # reset the list of ROIs volumes (actual volumes)\n self.listCurrentFiles = [] # reset the list of files loaded per epoch.\n self.listCurrentIDs = np.array([]) # reset the list of IDs\n\n self.listCurrent2Dplane_1 = [] # reset the list of 2D planes loaded per epoch.\n self.listCurrent2Dplane_1_ID = np.array([]) # reset the list of IDs of 2D planes loaded per epoch.\n self.current_2Dplane_1 = [] # reset the auxiliar 2D planes 1 (actual images)\n\n self.listCurrent2Dplane_2 = [] # reset the list of 2D planes loaded per epoch.\n self.listCurrent2Dplane_2_ID = np.array([]) # reset the list of IDs of 2D planes loaded per epoch.\n self.current_2Dplane_2 = [] # reset the auxiliar 2D planes 2 (actual images)\n\n self.listCurrent2Dplane_3 = [] # reset the list of 2D planes loaded per epoch.\n self.listCurrent2Dplane_3_ID = np.array([]) # reset the list of IDs of 2D planes loaded per epoch.\n self.current_2Dplane_3 = [] # reset the auxiliar 2D planes 3 (actual images)\n\n for realImageIndex in self.indexCurrentImages:\n loadedImageChannels = [] #list to store all the channels of the current image.\n self.listCurrentFiles.append(self.allChannelsFilenames[0][realImageIndex]) # List of filenames in the order\n #self.listCurrentIDs.append(self.allFilenamesIDs[realImageIndex]) # Real patient ID of each case. Used to match the image with the auuxiliar 2D planes\n case_ID_i = self.allFilenamesIDs[realImageIndex]\n self.listCurrentIDs = np.append(self.listCurrentIDs, case_ID_i)\n printMessageVerb(self.FLAGverbose, '-->> loading %d - ID %d'% (realImageIndex,case_ID_i))\n\n # Load ROI if exists\n if ('roiMasksTraining' in self.confTrain):\n roi = nib.load(self.roiFilenames[realImageIndex]).get_data()\n roi = preprocessIntensityData(roi, FLAGresizeImages=self.FLAGresizeImages, imageSize=self.imageSize, FLAGpreserveIntValues = True, arrayMask=[], intensityNormalizationMode=None)\n self.currentRois.append(roi)\n else:\n roi = None\n\n # Imgs channels ----------------------------------------------\n # (*) ToDo --> incorporate the masks in the image normalization stage.!!\n for channel in range(0, self.numChannels):\n # Load the corresponding image for the corresponding channel and append it to the list of channels\n # for the current imageIndex\n # loadedImageChannels.append(nib.load(self.allChannelsFilenames[channel][realImageIndex]).get_data())\n\n # Load, preprocess, and normalize the channel.\n dataIn = nib.load(self.allChannelsFilenames[channel][realImageIndex]).get_data()\n if self.isChannelBinary[channel] == 0: # not binary input\n dataIn = preprocessIntensityData(dataIn,FLAGresizeImages=self.FLAGresizeImages,\n imageSize=self.imageSize,\n FLAGpreserveIntValues = False,\n arrayMask=None,\n intensityNormalizationMode=self.intensityNormalizationMode[channel],\n intNormParam1=self.intNormParam1[channel],\n intNormParam2=self.intNormParam2[channel]\n )\n if self.FLAGsetBkgrnd == True:\n dataIn = setOutMaskValue(dataIn, roi, voxelValue = self.bkgrndLabel)\n\n elif self.isChannelBinary[channel] == 1: # binary input --> treat as gt.\n dataIn = preprocessIntensityData(dataIn, FLAGresizeImages=self.FLAGresizeImages,\n imageSize=self.imageSize,\n FLAGpreserveIntValues = True,\n arrayMask=None,\n intensityNormalizationMode = None)\n dataIn = normalizeLabels(dataIn)\n\n # Add the image to the queue of channels\n loadedImageChannels.append(dataIn)\n\n # Check that all the channels have the same dimensions\n if channel > 0:\n assert loadedImageChannels[channel].shape == loadedImageChannels[0].shape, self.id + \" Data size incompatibility when loading image channels for volume %s\" % self.allChannelsFilenames[channel][realImageIndex]\n\n # Append all the channels of the image to the list\n self.currentChannelImages.append(loadedImageChannels)\n\n # GT channel ----------------------------------------------\n gt = nib.load(self.gtFilenames[realImageIndex]).get_data()\n gt = preprocessIntensityData(gt, FLAGresizeImages=self.FLAGresizeImages, imageSize=self.imageSize, FLAGpreserveIntValues = True, arrayMask=[], intensityNormalizationMode=None)\n gt = normalizeLabels(gt)\n #assert gt.shape == loadedImageChannels[0].shape, self.id + \" Data size incompatibility when loading GT %s\" % self.gtFilenames[realImageIndex]\n self.currentGt.append(gt)\n\n\n# # Aux 2D planes ---------------------------------------------\n\n # Additional 2D image 1\n aux_plane_i = 0\n if len(self.allFilenames2Dplane_1_ID)>0:\n pos_ID_i = np.where(self.allFilenames2Dplane_1_ID == case_ID_i)[0]\n assert len(pos_ID_i)>0, self.id + \" 2D plane 1 missing for case ID %d\" % case_ID_i\n if len(pos_ID_i)>0:\n for p_i in pos_ID_i:\n dataIn = nib.load(self.allFilenames2Dplane_1[p_i]).get_data()[:,:,0]\n if self.isChannelBinary_2D[aux_plane_i] == 0: # not binary input\n\n dataIn = preprocessIntensityData(dataIn,FLAGresizeImages=self.FLAGresizeImages_2D,\n imageSize=self.imageSize_2D,\n FLAGpreserveIntValues = False,\n arrayMask=None,\n intensityNormalizationMode=self.intensityNormalizationMode_2D[aux_plane_i],\n intNormParam1=self.intNormParam1_2D[aux_plane_i],\n intNormParam2=self.intNormParam2_2D[aux_plane_i]\n )\n elif self.isChannelBinary_2D[aux_plane_i] == 1:\n dataIn = preprocessIntensityData(dataIn, FLAGresizeImages=self.FLAGresizeImages_2D,\n imageSize=self.imageSize_2D,\n FLAGpreserveIntValues = True,\n arrayMask=None,\n intensityNormalizationMode = None)\n dataIn = normalizeLabels(dataIn)\n\n # Append all the channels of the image to the list\n self.current_2Dplane_1.append(dataIn)\n self.listCurrent2Dplane_1.append(self.allFilenames2Dplane_1[p_i])\n self.listCurrent2Dplane_1_ID = np.append(self.listCurrent2Dplane_1_ID,case_ID_i)\n printMessageVerb(self.FLAGverbose, '-->>2D plane 1 found: %s'% (self.allFilenames2Dplane_1[p_i]))\n\n # Additional 2D image 2\n aux_plane_i = 1\n if len(self.allFilenames2Dplane_2_ID)>0:\n pos_ID_i = np.where(self.allFilenames2Dplane_2_ID == case_ID_i)[0]\n assert len(pos_ID_i)>0, self.id + \" 2D plane 2 missing for case ID %d\" % case_ID_i\n if len(pos_ID_i)>0:\n for p_i in pos_ID_i:\n dataIn = nib.load(self.allFilenames2Dplane_2[p_i]).get_data()[:,:,0]\n if self.isChannelBinary_2D[aux_plane_i] == 0: # not binary input\n dataIn = preprocessIntensityData(dataIn,FLAGresizeImages=self.FLAGresizeImages_2D,\n imageSize=self.imageSize_2D,\n FLAGpreserveIntValues = False,\n arrayMask=None,\n intensityNormalizationMode=self.intensityNormalizationMode_2D[aux_plane_i],\n intNormParam1=self.intNormParam1_2D[aux_plane_i],\n intNormParam2=self.intNormParam2_2D[aux_plane_i]\n )\n elif self.isChannelBinary_2D[aux_plane_i] == 1:\n dataIn = preprocessIntensityData(dataIn, FLAGresizeImages=self.FLAGresizeImages_2D,\n imageSize=self.imageSize_2D,\n FLAGpreserveIntValues = True,\n arrayMask=None,\n intensityNormalizationMode = None)\n dataIn = normalizeLabels(dataIn)\n\n # Append all the channels of the image to the list\n self.current_2Dplane_2.append(dataIn)\n self.listCurrent2Dplane_2.append(self.allFilenames2Dplane_2[p_i])\n self.listCurrent2Dplane_2_ID = np.append(self.listCurrent2Dplane_2_ID,case_ID_i)\n printMessageVerb(self.FLAGverbose, '-->>2D plane 2 found: %s'% (self.allFilenames2Dplane_2[p_i]))\n\n # Additional 2D image 3\n aux_plane_i = 2\n if len(self.allFilenames2Dplane_3_ID)>0:\n pos_ID_i = np.where(self.allFilenames2Dplane_3_ID == case_ID_i)[0]\n assert len(pos_ID_i)>0, self.id + \" 2D plane 2 missing for case ID %d\" % case_ID_i\n if len(pos_ID_i)>0:\n for p_i in pos_ID_i:\n dataIn = nib.load(self.allFilenames2Dplane_3[p_i]).get_data()[:,:,0]\n if self.isChannelBinary_2D[aux_plane_i] == 0: # not binary input\n dataIn = preprocessIntensityData(dataIn,FLAGresizeImages=self.FLAGresizeImages_2D,\n imageSize=self.imageSize_2D,\n FLAGpreserveIntValues = False,\n arrayMask=None,\n intensityNormalizationMode=self.intensityNormalizationMode_2D[aux_plane_i],\n intNormParam1=self.intNormParam1_2D[aux_plane_i],\n intNormParam2=self.intNormParam2_2D[aux_plane_i]\n )\n elif self.isChannelBinary_2D[aux_plane_i] == 1:\n dataIn = preprocessIntensityData(dataIn, FLAGresizeImages=self.FLAGresizeImages_2D,\n imageSize=self.imageSize_2D,\n FLAGpreserveIntValues = True,\n arrayMask=None,\n intensityNormalizationMode = None)\n dataIn = normalizeLabels(dataIn)\n\n # Append all the channels of the image to the list\n self.current_2Dplane_3.append(dataIn)\n self.listCurrent2Dplane_3.append(self.allFilenames2Dplane_2[p_i])\n self.listCurrent2Dplane_3_ID = np.append(self.listCurrent2Dplane_3_ID,case_ID_i)\n printMessageVerb(self.FLAGverbose, '-->>2D plane 3 found: %s'% (self.allFilenames2Dplane_3[p_i]))\n\n\n\n # Initialize the batch and gt variables (so we only need to declare them once)\n if self.FLAGresizeImages ==1:\n self.batch = np.ndarray(shape=(self.batchSize, self.numChannels, self.imageSize[0], self.imageSize[1], self.imageSize[2]), dtype=np.float32)\n self.batch2D_1 = np.ndarray(shape=(self.batchSize, 1, self.imageSize_2D[0], self.imageSize_2D[1]), dtype=np.float32)\n self.batch2D_2 = np.ndarray(shape=(self.batchSize, 1, self.imageSize_2D[0], self.imageSize_2D[1]), dtype=np.float32)\n self.batch2D_3 = np.ndarray(shape=(self.batchSize, 1, self.imageSize_2D[0], self.imageSize_2D[1]), dtype=np.float32)\n #gt = np.ndarray(shape=(1, self.numClasses, self.gtSize[0], self.gtSize[1], self.gtSize[2]), dtype=np.float32)\n self.gt = np.ndarray(shape=(self.batchSize, 1, self.gtSize[0], self.gtSize[1], self.gtSize[2]), dtype=np.float32)\n else:\n dims = self.currentChannelImages[0][0].shape\n self.batch = np.ndarray(shape=(self.batchSize, self.numChannels, dims[0], dims[1], dims[2]), dtype=np.float32)\n #gt = np.ndarray(shape=(1, self.numClasses, dims[0], dims[1], dims[2]), dtype=np.float32)\n dims_2D = self.current_2Dplane_1[1].shape\n self.batch2D_1 = np.ndarray(shape=(self.batchSize, 1, dims_2D[0], dims_2D[1]), dtype=np.float32)\n self.batch2D_2 = np.ndarray(shape=(self.batchSize, 1, dims_2D[0], dims_2D[1]), dtype=np.float32)\n self.batch2D_3 = np.ndarray(shape=(self.batchSize, 1, dims_2D[0], dims_2D[1]), dtype=np.float32)\n self.gt = np.ndarray(shape=(self.batchSize, 1, dims[0], dims[1], dims[2]), dtype=np.float32)\n else:\n # reshuffle the IDs of the cases so each epoch, the cases are presented in different order.\n rnd.shuffle(self.IDsCases)\n\n\n # Create the batches\n # =====================================================================\n printMessageVerb(self.FLAGverbose, '-->> Creating batches .................................')\n t_1 = time.time()\n\n # 1) Split the batches between all the threads\n # i) split the number of batches\n\n batchesPerEpochPerThread = [self.batchesPerEpoch // self.numThreads] * self.numThreads\n batchesPerEpochPerThread[0] += self.batchesPerEpoch % self.numThreads #batches per thread\n self.IDsCases_Threads = [self.IDsCases[sum(batchesPerEpochPerThread[0:x])*self.batchSize:sum(batchesPerEpochPerThread[0:x+1])*self.batchSize] for x in range(1, len(batchesPerEpochPerThread))]\n self.IDsCases_Threads.append(self.IDsCases[0:batchesPerEpochPerThread[0]*self.batchSize]) #list of list with the IDs to use on each epoch\n\n listBatchGenTh = []\n for th_i in range(0,self.numThreads):\n\n batchGenTh_i = Thread(target = self.batchGeneratorThread, args=(th_i, self.IDsCases_Threads[th_i]))\n batchGenTh_i.setDaemon(False)\n batchGenTh_i.start()\n listBatchGenTh.append(batchGenTh_i)\n\n for th_i in range(0,self.numThreads): # Necessary to prevent collapsing\n listBatchGenTh[th_i].join()\n\n\n t_2 = time.time()\n self.timePerEpoch = np.append(self.timePerEpoch,t_2-t_1)\n t_1 = t_2\n\n # Lunch a thread with its corresponding list of IDs IDsCases_Threads[th_i]\n\n #IDs_aux_i = self.IDsCases[(numbatch*self.batchSize):((numbatch*self.batchSize)+self.batchSize)] #sample from the shuffled list of IDs (self.IDsCases)\n #IDs_i = self.indexCurrentImages[IDs_aux_i] # recover the real IDs of the images to work with.\n #isoScale_i = self.generateSingleBatch_VolAnd2DPlanes(IDs_i)\n\n\n\n\n # 2) Each thread has to take care of a certain number of batches\n\n # - check the new multi-thread function 'generateSingleBatch_VolAnd2DPlanes' doesn't change or modify common / global resurces.\n # - control simultaneous access to common variables (use a lock...)\n # - put the batch and filenames in the queue\n\n\n# for numbatch in range(0, self.batchesPerEpoch):\n# #print \"generating batch %d\" % (batch)\n# #logging.debug(self.id + \" Generating batch: %d\" % batch )\n# #print('generating batch %d' %(numbatch))\n# IDs_aux_i = self.IDsCases[(numbatch*self.batchSize):((numbatch*self.batchSize)+self.batchSize)] #sample from the shuffled list of IDs (self.IDsCases)\n# IDs_i = self.indexCurrentImages[IDs_aux_i] # recover the real IDs of the images to work with.\n# # self.isoScale_i = self.generateSingleBatchV2(IDs_i)\n# isoScale_i = self.generateSingleBatch_VolAnd2DPlanes(IDs_i)\n#\n## self.aux = IDs_i\n## #self.batch, self.gt, isoScale_i = self.generateSingleBatch(batch)\n#\n# self.queue.put((self.batch,self.gt, self.batch2D_1, self.batch2D_2, self.batch2D_3))\n# self.queueScalingFactors.put(isoScale_i)\n# currentFilesNames = [];\n# for IDs_j in IDs_i:\n# currentFilesNames.append(self.listCurrentFiles[IDs_j])\n# self.queueFileNames.put(currentFilesNames)\n\n# t_2 = time.time()\n# self.timePerEpoch = np.append(self.timePerEpoch,t_2-t_1)\n# t_1 = t_2\n# # Unload the files if we are loading new files every subpeoc\n if self.loadNewFilesEveryEpoch:\n self.unloadFiles()", "title": "" }, { "docid": "7f506f60ac834e2af18806110685bb36", "score": "0.6329911", "text": "def gen_batch_function_nir_ttv(data_folder, image_shape):\n def get_batches_fn_nir_ttv(batch_size):\n \"\"\"\n Create batches of training data\n :param batch_size: Batch Size\n :return: Batches of training data\n \"\"\"\n print(\"data_folder: \", data_folder)\n for folder in data_folder:\n image_paths = glob(os.path.join(folder, '*color*.png')) # previously 'data*.png'\n label_paths = {\n re.sub(r'ground_truth', 'color', os.path.basename(path)): path # previously 'ground_truth', 'data'\n for path in glob(os.path.join(folder, '*ground_truth*.png'))}\n background_color = np.array([0, 0, 0, 0])\n\n random.shuffle(image_paths)\n for batch_i in range(0, len(image_paths), batch_size):\n images = []\n gt_images = []\n nir_images = []\n for image_file in image_paths[batch_i:batch_i+batch_size]:\n gt_image_file = label_paths[os.path.basename(image_file)]\n\n image = scipy.misc.imresize(scipy.misc.imread(image_file), image_shape)\n gt_image = scipy.misc.imresize(scipy.misc.imread(gt_image_file), image_shape)\n _, filename = os.path.split(image_file)\n fd_id = filename[0]\n img_id = image_file[-8:]\n #print(folder+\"/\"+fd_id+\"_nir_\"+img_id)\n nir = cv2.imread(folder+\"/\"+fd_id+\"_nir_\"+img_id)\n #print(folder+\"/nir_\"+img_id)\n nir = scipy.misc.imresize(nir, image_shape)\n\n gt_bg = np.all(gt_image == background_color, axis=2)\n gt_bg = gt_bg.reshape(*gt_bg.shape, 1)\n gt_image = np.concatenate((gt_bg, np.invert(gt_bg)), axis=2)\n images.append(image)\n gt_images.append(gt_image)\n nir_images.append(nir)\n\n yield np.array(images), np.array(gt_images), np.array(nir_images)\n return get_batches_fn_nir_ttv", "title": "" }, { "docid": "e81c90584f5a8cfd5533fc42568f6c5e", "score": "0.6258612", "text": "def get_batches_fn(batch_size):\n image_paths = glob(os.path.join(data_folder, 'CameraRGB', '*.png'))\n label_paths = glob(os.path.join(data_folder, 'LabeledSeg', '*.png'))\n back_color = np.array([255, 0, 0])\n road_color = np.array([255, 0, 255])\n cars_color = np.array([0, 0, 0])\n\n random.shuffle(image_paths)\n for batch_i in range(0, len(image_paths), batch_size):\n images = []\n gt_images = []\n for i,image_file in enumerate(image_paths[batch_i:batch_i+batch_size]):\n gt_image_file = label_paths[i]\n image = scipy.misc.imresize(scipy.misc.imread(image_file), image_shape)\n gt_image = scipy.misc.imresize(scipy.misc.imread(gt_image_file), image_shape)\n #image = cv2.imread(image_file)\n #gt_image = cv2.imread(gt_image_file)\n #image, gt_image = random_crop(image, gt_image) #Random crop augmentation\n #image = cv2.resize(image, image_shape)\n contr = random.uniform(0.85, 1.15) # Contrast augmentation\n bright = random.randint(-45, 30) # Brightness augmentation\n image = bc_img(image, contr, bright)\n #gt_image = cv2.resize(gt_image, image_shape)\n\n gt_back = np.all(gt_image == back_color, axis=2)\n gt_road = np.all(gt_image == road_color, axis =2)\n gt_cars = np.all(gt_image == cars_color, axis =2)\n gt_back = gt_back.reshape(*gt_back.shape, 1)\n gt_road = gt_road.reshape(*gt_road.shape, 1)\n gt_cars = gt_cars.reshape(*gt_cars.shape, 1)\n gt_image = np.concatenate((gt_back, gt_road, gt_cars), axis=2)\n\n images.append(image)\n gt_images.append(gt_image)\n\n yield np.array(images), np.array(gt_images)", "title": "" }, { "docid": "b7358897566833bc02d55d1448e38cb6", "score": "0.6246429", "text": "def gen_batch_function(data_folder, image_shape):\n def get_batches_fn(batch_size):\n \"\"\"\n Create batches of training data\n :param batch_size: Batch Size\n :return: Batches of training data\n \"\"\"\n image_paths = glob(os.path.join(data_folder, 'CameraRGB', '*.png'))\n label_paths = glob(os.path.join(data_folder, 'LabeledSeg', '*.png'))\n back_color = np.array([255, 0, 0])\n road_color = np.array([255, 0, 255])\n cars_color = np.array([0, 0, 0])\n\n random.shuffle(image_paths)\n for batch_i in range(0, len(image_paths), batch_size):\n images = []\n gt_images = []\n for i,image_file in enumerate(image_paths[batch_i:batch_i+batch_size]):\n gt_image_file = label_paths[i]\n image = scipy.misc.imresize(scipy.misc.imread(image_file), image_shape)\n gt_image = scipy.misc.imresize(scipy.misc.imread(gt_image_file), image_shape)\n #image = cv2.imread(image_file)\n #gt_image = cv2.imread(gt_image_file)\n #image, gt_image = random_crop(image, gt_image) #Random crop augmentation\n #image = cv2.resize(image, image_shape)\n contr = random.uniform(0.85, 1.15) # Contrast augmentation\n bright = random.randint(-45, 30) # Brightness augmentation\n image = bc_img(image, contr, bright)\n #gt_image = cv2.resize(gt_image, image_shape)\n\n gt_back = np.all(gt_image == back_color, axis=2)\n gt_road = np.all(gt_image == road_color, axis =2)\n gt_cars = np.all(gt_image == cars_color, axis =2)\n gt_back = gt_back.reshape(*gt_back.shape, 1)\n gt_road = gt_road.reshape(*gt_road.shape, 1)\n gt_cars = gt_cars.reshape(*gt_cars.shape, 1)\n gt_image = np.concatenate((gt_back, gt_road, gt_cars), axis=2)\n\n images.append(image)\n gt_images.append(gt_image)\n\n yield np.array(images), np.array(gt_images)\n return get_batches_fn", "title": "" }, { "docid": "bd6745d5c7767afb827f9d6759d3eee3", "score": "0.62339", "text": "def next_batch(self):\n #print(self.pcl[self.train_idx[self.train_ptr]])\n new_epoch = 0\n if self.dataset_size <=self.ptr + self.batch_size:\n if self.is_training:\n np.random.shuffle(self.idx)\n self.ptr = 0\n new_epoch = 1\n #print('new epoch: ptr= %d' %(self.train_ptr))\n \n img_batch_l, sal_batch_l = [],[]\n for i in range(self.batch_size):\n img_root_fn, sal_root_fn = self.data[self.ptr].split(\" \")\n img_fn = os.path.join(self.data_dir, img_root_fn)\n sal_fn = os.path.join(self.data_dir, sal_root_fn)\n #print('img_fn: %s' %(img_fn))\n if MACHINE==2:\n img = Image.open(img_fn) \n sal = Image.open(sal_fn)\n if self.resize_img:\n img = np.array(img.resize(self.out_size))[:,:,::-1]\n sal = np.array(sal.resize(self.out_size)).astype(np.float32)\n else:\n img = np.array(img)[:,:,::-1]#.astype(np.float32)\n sal = np.array(sal)#.astype(np.float32)\n #print(sal.shape)\n else:\n img = cv2.imread(img_fn)\n sal = cv2.imread(sal_fn, cv2.IMREAD_UNCHANGED)\n if self.resize_img:\n img = cv2.resize(img, self.out_size, interpolation=cv2.INTER_AREA)\n sal = cv2.resize(sal, self.out_size, interpolation=cv2.INTER_AREA)\n\n\n # pre-proc img\n img = img.astype(np.float32) - self.mean\n \n # pre proc saliency\n # scale to [0.5,1]\n sal = sal.astype(np.float32)\n old_min, old_max = np.min(sal), np.max(sal)\n new_min, new_max = 0.5, 1.0\n sal = new_min + (sal - old_min) * (new_max - new_min) / (old_max - old_min)\n # convert into log prob\n sal -= logsumexp(sal) # log(softmax(sal))\n\n sal = np.expand_dims(sal, 2)\n img_batch_l.append(img)\n sal_batch_l.append(sal)\n\n self.ptr += 1\n #print('self.train_ptr: %d' %(self.train_ptr))\n #print(np.array(img_batch_l).shape)\n #print(np.array(sal_batch_l).shape)\n return new_epoch, np.array(img_batch_l), np.array(sal_batch_l)", "title": "" }, { "docid": "4110091c4bd0a06a49ee77b813dd3f4b", "score": "0.6230792", "text": "def create_trainset_RGB(path_groundtruth,path_trainset,noise_std_range,im_size):\n np.random.seed(0)\n torch.manual_seed(1)\n\n path_save = os.path.join(path_trainset)\n if not os.path.exists(path_save):\n os.makedirs(path_save) \n \n # center-crops the test images to match the input size\n print('not cropped')\n transf1 = transforms.Compose([Cropcenter_RGB(im_size),lambda x: torch.from_numpy(x)]) \n\n already_cropped = 'no'\n for j in range(10):\n data = MyDataset_RGB(folder=os.path.join(path_groundtruth),transf1=transf1,need_names='yes',blur_name = j,blur_type = 'Gaussian',noise_std_range = noise_std_range)\n loader = DataLoader(data, batch_size=1, shuffle=False)\n\n\n # tqdm shows the progress\n for minibatch in tqdm(loader,file=sys.stdout):\n [blur_name,h,image_name,x,x_degraded,noise_std] = minibatch\n if image_name =='.ipynb_checkpoints': continue\n img = np.zeros((im_size[0],im_size[1],3))\n img_degraded = np.zeros((im_size[0],im_size[1],3))\n blur = np.zeros((9,9))\n noise = np.zeros((1))\n img[0:im_size[0],0:im_size[1],0:3] = x[0,0:im_size[0],0:im_size[1],0:3]\n img_degraded[0:im_size[0],0:im_size[1],0:3] = x_degraded[0,0:im_size[0],0:im_size[1],0:3]\n blur[0:9,0:9] = h[0,0:9,0:9]\n noise[0] = noise_std\n file_name_degraded = os.path.join(path_save,str(image_name[0])+'_blur'+str(blur_name.numpy())[1:-1]+'.mat')\n sio.savemat(file_name_degraded,{'image':img_degraded, 'h':blur, 'trueimage':img, 'noise_std':noise}) # degraded image blur and true image \n print('Gaussian blur done') \n # 2 pre-defined uniform blurs \n for j in range(2):\n data = MyDataset_RGB(folder=os.path.join(path_groundtruth),transf1=transf1, need_names='yes',blur_name = j+10,blur_type = 'uniform_'+str(j+1),noise_std_range = noise_std_range)\n loader = DataLoader(data, batch_size=1, shuffle=False)\n # tqdm shows the progress\n for minibatch in tqdm(loader,file=sys.stdout):\n [blur_name,h,image_name,x,x_degraded,noise_std] = minibatch\n img = np.zeros((im_size[0],im_size[1],3))\n img_degraded = np.zeros((im_size[0],im_size[1],3))\n blur = np.zeros((9,9))\n noise = np.zeros((1))\n img[0:im_size[0],0:im_size[1],0:3] = x[0,0:im_size[0],0:im_size[1],0:3]\n img_degraded[0:im_size[0],0:im_size[1],0:3] = x_degraded[0,0:im_size[0],0:im_size[1],0:3]\n blur[0:9,0:9] = h[0,0:9,0:9]\n noise[0] = noise_std\n file_name_degraded = os.path.join(path_save,str(image_name[0])+'_blur'+str(blur_name.numpy())[1:-1]+'.mat')\n sio.savemat(file_name_degraded,{'image':img_degraded, 'h':blur, 'trueimage':img, 'noise_std':noise}) # degraded image and blur kernel\n print('Uniform blur done') \n # 3 random defocus blurs \n for j in range(3):\n data = MyDataset_RGB(folder=os.path.join(path_groundtruth),transf1=transf1, need_names='yes',blur_name = j+12,blur_type = 'defocus',noise_std_range = noise_std_range)\n loader = DataLoader(data, batch_size=1, shuffle=False)\n # tqdm shows the progress\n for minibatch in tqdm(loader,file=sys.stdout):\n [blur_name,h,image_name,x,x_degraded,noise_std] = minibatch\n img = np.zeros((im_size[0],im_size[1],3))\n img_degraded = np.zeros((im_size[0],im_size[1],3))\n blur = np.zeros((9,9))\n noise = np.zeros((1))\n img[0:im_size[0],0:im_size[1],0:3] = x[0,0:im_size[0],0:im_size[1],0:3]\n img_degraded[0:im_size[0],0:im_size[1],0:3] = x_degraded[0,0:im_size[0],0:im_size[1],0:3]\n blur[0:9,0:9] = h[0,0:9,0:9]\n noise[0] = noise_std\n file_name_degraded = os.path.join(path_save,str(image_name[0])+'_blur'+str(blur_name.numpy())[1:-1]+'.mat')\n print(file_name_degraded)\n sio.savemat(file_name_degraded,{'image':img_degraded, 'h':blur, 'trueimage':img, 'noise_std':noise}) # degraded image and blur kernel\n print('Defocus blur done')", "title": "" }, { "docid": "92d49f2e53c51d7fe5ff79df391a0eec", "score": "0.62197626", "text": "def gen_batch_function(image_paths, label_paths, indexes=None, crops=None, downsample=None):\n# print('Indexes',len(indexes))\n if indexes is None:\n indexes = [i for i in range(len(image_paths))]\n def get_batches_fn(batch_size):\n \"\"\"\n Create batches of training data\n :param batch_size: Batch Size\n :return: image_batch[:batch_size], label_batch[:batch_size]\n \"\"\"\n# background_color = np.array([255, 0, 0])\n random.shuffle(indexes)\n print(\"get_batches set size:\", len(indexes))\n crop_t, crop_b = None, None\n if crops and len(crops):\n crop_t = crops[0]\n if len(crops) > 1: crop_b = - crops[1]\n for batch_i in range(0, len(indexes), batch_size):\n images = []\n gt_images = []\n for i in indexes[batch_i:batch_i+batch_size]:\n image_file = image_paths[i]\n gt_image_file = label_paths[i]\n\n image = scipy.misc.imread(image_file) #, image_shape\n# if (np.random.rand() < 0.5): \n# image = random_manipulation(image)\n #image = random_manipulation(image,'jpg70')\n gt_image = scipy.misc.imread(gt_image_file) #, image_shape)\n gt_image = gt_image[:,:,0]\n\n gt_road = np.any(np.stack((gt_image == 6, gt_image==7), axis=2), axis=2)\n gt_vehicles = gt_image == 10\n #print(np.sum(gt_vehicles), np.sum(gt_image))\n #tt\n gt_vehicles[496:] = False\n gt_objects = np.stack((gt_road, gt_vehicles), axis=2)\n gt_other = np.logical_not(np.any(gt_objects, axis=2))\n gt = np.stack((gt_road, gt_vehicles, gt_other), axis=2)\n# gt_bg = np.all(gt_image == background_color, axis=2)\n# gt_bg = gt_bg.reshape(*gt_bg.shape, 1)\n# gt_image = np.concatenate((gt_bg, np.invert(gt_bg)), axis=2)\n image = image[crop_t:crop_b] \n gt = gt[crop_t:crop_b]\n \n \n \n #scale = 0.0; dims = (240,600)\n if downsample != None:\n dims = (0,0) #scale 0.75 (240x600) puca\n \n image = cv2.resize(image,dims, fx=downsample, fy=downsample, interpolation = cv2.INTER_AREA)\n \n gt_road = cv2.resize(gt[:,:,0].astype('uint8'),dims, fx=downsample, fy=downsample, interpolation = cv2.INTER_AREA).astype(bool)\n gt_vehicles = cv2.resize(gt[:,:,1].astype('uint8'),dims, fx=downsample, fy=downsample, interpolation = cv2.INTER_AREA).astype(bool)\n gt_other = cv2.resize(gt[:,:,2].astype('uint8'),dims, fx=downsample, fy=downsample, interpolation = cv2.INTER_AREA).astype(bool)\n \n gt = np.stack((gt_road, gt_vehicles, gt_other), axis=2)\n \n \n \n images.append(image) #64:576\n gt_images.append(gt) #64:576\n\n yield np.array(images), np.array(gt_images)\n return get_batches_fn", "title": "" }, { "docid": "40b74510ba8ff32c0afde079c0d948c1", "score": "0.6173545", "text": "def get_batches_fn_nir_ttv(batch_size):\n print(\"data_folder: \", data_folder)\n for folder in data_folder:\n image_paths = glob(os.path.join(folder, '*color*.png')) # previously 'data*.png'\n label_paths = {\n re.sub(r'ground_truth', 'color', os.path.basename(path)): path # previously 'ground_truth', 'data'\n for path in glob(os.path.join(folder, '*ground_truth*.png'))}\n background_color = np.array([0, 0, 0, 0])\n\n random.shuffle(image_paths)\n for batch_i in range(0, len(image_paths), batch_size):\n images = []\n gt_images = []\n nir_images = []\n for image_file in image_paths[batch_i:batch_i+batch_size]:\n gt_image_file = label_paths[os.path.basename(image_file)]\n\n image = scipy.misc.imresize(scipy.misc.imread(image_file), image_shape)\n gt_image = scipy.misc.imresize(scipy.misc.imread(gt_image_file), image_shape)\n _, filename = os.path.split(image_file)\n fd_id = filename[0]\n img_id = image_file[-8:]\n #print(folder+\"/\"+fd_id+\"_nir_\"+img_id)\n nir = cv2.imread(folder+\"/\"+fd_id+\"_nir_\"+img_id)\n #print(folder+\"/nir_\"+img_id)\n nir = scipy.misc.imresize(nir, image_shape)\n\n gt_bg = np.all(gt_image == background_color, axis=2)\n gt_bg = gt_bg.reshape(*gt_bg.shape, 1)\n gt_image = np.concatenate((gt_bg, np.invert(gt_bg)), axis=2)\n images.append(image)\n gt_images.append(gt_image)\n nir_images.append(nir)\n\n yield np.array(images), np.array(gt_images), np.array(nir_images)", "title": "" }, { "docid": "5c7432af4c07f1292de852706ec3b08c", "score": "0.61263055", "text": "def build_batch(blur_dir, truth_dir, batch_size, resolution=256, validation=False):\n filenames = os.listdir(blur_dir)\n n = len(filenames)\n print(n)\n print(blur_dir)\n # Allow ourselves to loop infinitely over a dataset\n while True:\n # Shuffle is in-place\n np.random.shuffle(filenames)\n for i in range(0, n, batch_size):\n blur_batch, truth_batch = [], []\n # Python list comp allows going past last iter\n for filename in filenames[i:i+batch_size]:\n with Image.open(os.path.join(blur_dir, filename), 'r') as blurry:\n imagenp = np.array(blurry)\n # (,,3) if color\n if imagenp.shape == (resolution, resolution, 3):\n blur_batch.append(np.array(blurry))\n with Image.open(os.path.join(truth_dir, filename), 'r') as truthy:\n imagenp = np.array(truthy)\n # (,,3) if color\n if imagenp.shape == (resolution, resolution, 3):\n truth_batch.append(np.array(truthy))\n elif validation:\n truth_batch.append(np.array(truthy))\n yield np.array(blur_batch), np.array(truth_batch)", "title": "" }, { "docid": "3cf03de3f2a9050e4955e050d807397f", "score": "0.6113161", "text": "def imagekfold(thisrun, inorder):\n conv_data = thisrun.conv_data\n ms_data = thisrun.ms_data\n inputs = thisrun.inputs\n trimrows = thisrun.trimrows\n num_of_images = len(conv_data)\n test_im_num = round((1-thisrun.fracin)*num_of_images)\n images, images_copy = [], []\n conv_train_list, conv_test_list = [], []\n ms_train_list, ms_test_list = [], []\n test_centers_list, train_centers_list = [], []\n out_num_list, im_in_f_list = [], []\n small_train = (thisrun.fracin < .5)\n\n for i in range(0, num_of_images):\n images.append(i)\n images_copy.append(i)\n\n if small_train:\n small_set_num = num_of_images - test_im_num\n else:\n small_set_num = test_im_num\n\n if test_im_num == 0:\n repeatnum = 1\n else:\n repeatnum = math.ceil(num_of_images/small_set_num)\n\n for i in range(0, repeatnum):\n small_set = []\n test_ims = []\n\n # chooses images to remove randomly from a list of slices that haven't\n # been used yet. If all have been used, repeats are allowed, but the\n # same image can't be used twice in the same fold\n if inorder is False:\n for j in range(0, small_set_num):\n if not images:\n images = images_copy\n\n index = random.choice(images)\n while index in small_set:\n index = random.choice(images)\n\n images.remove(index)\n small_set.append(index)\n\n # uses list of slices in order instead\n elif inorder is True:\n for j in range(0, small_set_num):\n if not images:\n images = images_copy\n index = images[0]\n images.remove(index)\n small_set.append(index)\n\n elements = 0\n totalelements = 0\n for s in range(num_of_images):\n shape = conv_data[s].shape\n elements = elements + (shape[0]-trimrows*2) * (shape[1]-trimrows*2)\n totalelements = totalelements + shape[0]*shape[1]\n\n num_el_small_set = 0\n for s in small_set:\n shape = conv_data[s].shape\n num_el_small_set = (num_el_small_set + (shape[0] - trimrows * 2) *\n (shape[1] - trimrows * 2))\n\n num_el_large_set = elements - num_el_small_set\n # Builds vector of all slices excluding specified slices\n if small_train:\n conv_test = np.zeros([num_el_large_set, inputs])\n conv_train = np.zeros([num_el_small_set, inputs])\n ms_test = np.zeros([num_el_large_set, 1])\n ms_train = np.zeros([num_el_small_set, 1])\n else:\n conv_train = np.zeros([num_el_large_set, inputs])\n conv_test = np.zeros([num_el_small_set, inputs])\n ms_train = np.zeros([num_el_large_set, 1])\n ms_test = np.zeros([num_el_small_set, 1])\n\n traincount = 0\n im_count = 0\n\n for k in range(0, num_of_images):\n if small_train:\n if (k in small_set) is False:\n test_ims.append(k)\n for i in range(trimrows, conv_data[k].shape[0]-trimrows):\n for j in range(trimrows, conv_data[k].shape[1]-trimrows):\n if k in small_set:\n conv_train[traincount, :] = get_region_data(\n inputs, conv_data, i, j, k)\n ms_train[traincount, 0] = ms_data[k][i, j]\n traincount = traincount + 1\n\n if (k in small_set) is False:\n conv_test[im_count, :] = get_region_data(\n inputs, conv_data, i, j, k)\n ms_test[im_count, 0] = ms_data[k][i, j]\n im_count = im_count + 1\n else:\n if (k in small_set) is True:\n test_ims.append(k)\n for i in range(trimrows, conv_data[k].shape[0]-trimrows):\n for j in range(trimrows, conv_data[k].shape[1]-trimrows):\n if (k in small_set) is False:\n conv_train[traincount, :] = get_region_data(\n inputs, conv_data, i, j, k)\n print(np.asarray(ms_data).shape)\n ms_train[traincount, 0] = ms_data[k][i, j, k]\n traincount = traincount + 1\n\n if (k in small_set) is True:\n conv_test[im_count, :] = get_region_data(\n inputs, conv_data, i, j, k)\n ms_test[im_count, 0] = ms_data[k][i, j]\n im_count = im_count + 1\n\n if len(small_set) == 0:\n conv_test = conv_train\n ms_test = ms_train\n\n traincenters = len(ms_train)\n testcenters = len(ms_test)\n outnum = totalelements - traincenters - testcenters\n\n conv_test_list.append(conv_test)\n conv_train_list.append(conv_train)\n ms_test_list.append(ms_test)\n ms_train_list.append(ms_train)\n train_centers_list.append(traincenters)\n test_centers_list.append(testcenters)\n out_num_list.append(outnum)\n im_in_f_list.append(test_ims)\n\n thisrun.set_train_test_out_nums(np.mean(train_centers_list),\n np.mean(test_centers_list),\n np.mean(out_num_list))\n thisrun.conv_train_list = conv_train_list\n thisrun.ms_train_list = ms_train_list\n thisrun.conv_test_list = conv_test_list\n thisrun.ms_test_list = ms_test_list\n thisrun.im_in_f_list = im_in_f_list\n\n return", "title": "" }, { "docid": "34b0d836b33a45dc08fdbf356023395a", "score": "0.6110382", "text": "def importRandomBatch(num, type, scale = 0):\r\n X_files, y_files = getData(3000, 0, type)\r\n r = np.random.choice(len(X_files), num)\r\n\r\n y1_files = np.array(X_files)\r\n X1_files = np.array(y_files)\r\n \r\n y_files = y1_files[r]\r\n X_files = X1_files[r]\r\n\r\n X_input = []\r\n y_input = []\r\n \r\n filenames = []\r\n z = 0\r\n \r\n for i in range(X_files.shape[0]):\r\n\r\n\r\n\r\n X_file = X_files[i]\r\n filenames.append(X_file[:-16])\r\n X_img = imread(X_file)\r\n\r\n if (scale != 0):\r\n X_new = np.zeros((int(X_img.shape[0] / scale), int(X_img.shape[1] / scale), 3))\r\n k = 0\r\n for x in X_img[::scale]:\r\n X_new[k] = x[::scale]\r\n k += 1\r\n X_img = X_new\r\n X_input.append(X_img)\r\n z = 0\r\n for i in range(y_files.shape[0]):\r\n\r\n y_file = y_files[i]\r\n y_img = imread(y_file)\r\n if (scale != 0):\r\n y_new = np.zeros((int(y_img.shape[0] / scale), int(y_img.shape[1] / scale)))\r\n k = 0\r\n for y in y_img[::scale]:\r\n y_new[k] = y[::scale]\r\n k += 1\r\n y_img = y_new\r\n y_input.append(y_img)\r\n\r\n X = np.array(X_input)\r\n y = np.array(y_input)\r\n if (type == 'val'):\r\n return X, y, filenames\r\n return X, y", "title": "" }, { "docid": "5f16c86e57d1afdea92020f1d5389561", "score": "0.6107485", "text": "def create_trainset(path_groundtruth,path_trainset,noise_std_range,im_size):\n np.random.seed(0)\n torch.manual_seed(1)\n \n\n path_save = os.path.join(path_trainset)\n if not os.path.exists(path_save):\n os.makedirs(path_save) \n \n # center-crops the test images to match the input size\n print('not cropped')\n transf1 = transforms.Compose([Cropcenter(im_size),lambda x: torch.from_numpy(x)]) \n\n already_cropped = 'no'\n # 8 random anisotropic Gaussian blur kernels\n for j in range(8):\n data = MyDataset(folder=os.path.join(path_groundtruth),transf1=transf1,need_names='yes',blur_name = j,blur_type = 'Gaussian',noise_std_range = noise_std_range)\n loader = DataLoader(data, batch_size=1, shuffle=False)\n\n # tqdm shows the progress\n for minibatch in tqdm(loader,file=sys.stdout):\n [blur_name,h,image_name,x,x_degraded] = minibatch\n if image_name =='.ipynb_checkpoints': continue\n img = np.zeros(im_size)\n img_degraded = np.zeros(im_size)\n blur = np.zeros((9,9))\n img[0:im_size[0],0:im_size[1]] = x[0,0:im_size[0],0:im_size[1]]\n img_degraded[0:im_size[0],0:im_size[1]] = x_degraded[0,0:im_size[0],0:im_size[1]]\n blur[0:9,0:9] = h[0,0:9,0:9]\n file_name_degraded = os.path.join(path_save,str(image_name[0])+'_blur'+str(blur_name.numpy())[1:-1]+'.mat')\n sio.savemat(file_name_degraded,{'image':img_degraded, 'h':blur, 'trueimage':img}) # degraded image blur and true image \n print('Gaussian blur done') \n # 2 random isotropic Gaussian blur kernel \n for j in range(2):\n data = MyDataset(folder=os.path.join(path_groundtruth),transf1=transf1, need_names='yes',blur_name = j+8,blur_type = 'Gaussian_isotropic',noise_std_range = noise_std_range)\n loader = DataLoader(data, batch_size=1, shuffle=False)\n # tqdm shows the progress\n for minibatch in tqdm(loader,file=sys.stdout):\n [blur_name,h,image_name,x,x_degraded] = minibatch\n img = np.zeros(im_size)\n img_degraded = np.zeros(im_size)\n blur = np.zeros((9,9))\n img[0:im_size[0],0:im_size[1]] = x[0,0:im_size[0],0:im_size[1]]\n img_degraded[0:im_size[0],0:im_size[1]] = x_degraded[0,0:im_size[0],0:im_size[1]]\n blur[0:9,0:9] = h[0,0:9,0:9]\n file_name_degraded = os.path.join(path_save,str(image_name[0])+'_blur'+str(blur_name.numpy())[1:-1]+'.mat')\n sio.savemat(file_name_degraded,{'image':img_degraded, 'h':blur, 'trueimage':img}) # degraded image and blur kernel\n print('Isotropic Gaussian blur done')", "title": "" }, { "docid": "0001b800c6623341edfbc3fbd68e5611", "score": "0.60992324", "text": "def img_parts_generator(parts_filename, data_dir, \n cache=True,batch_size=10, steps=None, target_dim=(299,299), \n bottlenecks=None, unpickled=None, load_image=False, \n load_paths=False, bb_only=False, load_orig_img=False, load_parts=True):\n if load_parts:\n # check if cahced features exist\n if bb_only:\n cache_prefix = 'bb_'\n else:\n cache_prefix = 'parts_'\n cache_path = 'cache/'+cache_prefix+data_dir[:-1]+'.p'\n data_dir = '../'+ data_dir\n if unpickled:\n features = unpickled['features']\n count = unpickled['count'] \n elif cache and isfile(cache_path):\n print('Loaded cached features from '+cache_path)\n unpickled = pickle.load(open(cache_path,'rb'))\n features = unpickled['features']\n count = unpickled['count']\n else:\n # load parts data from file\n with open(parts_filename,'r') as f:\n lines = f.readlines()\n \n count = 0\n print(len(lines))\n features = {} # map processed features to file path\n bar = Bar('Extracting features', max=len(lines)) # show progress bar\n for l in lines:\n #split and strip fields\n fields = l.strip().split('\\t')\n fields = [x.strip() for x in fields]\n if isfile(data_dir + fields[2]):\n path = data_dir + fields[2]\n count += 1\n else:\n # continue if file is not a part of current dir\n # eg: if data_dir is validation but current sample is in train\n continue\n\n row = []\n i = 4 # bounding box and other part info starts from column index 4\n featue_range = 42\n if bb_only:\n feature_range = 8 # bounding boxes are indices 4-7\n else:\n feature_range = 42 # 4 + 19*2 is num of cols in file\n \n while i<feature_range: \n orig_dim = get_dim(path)\n field1 = fields[i]\n field2 = fields[i+1]\n # use -1 if feature does not exist\n if field1.lower() == 'null' or field2.lower() == 'null':\n field1 = -1\n field2 = -1\n else:\n field1 = int(field1)\n field2 = int(field2)\n point = (field1, field2)\n \n # scale the point to target img dim\n if target_dim:\n scaled_feature = scale(point,orig_dim,target_dim=target_dim)\n else:\n scaled_feature = point\n \n row.append(scaled_feature[0])\n row.append(scaled_feature[1])\n i += 2\n features[path] = row\n bar.next()\n bar.finish()\n # pickle this info to do it only once\n pickled = {'features':features, 'count':count}\n pickle.dump(pickled, open(cache_path,'wb'))\n\n # get actual img and parts\n #print('Images found in '+ data_dir +': '+ str(count))\n # get files in data_dir\n filenames = [file for file in glob.glob(data_dir+'*/*', recursive=True)]\n filenames.sort()\n i = 0\n num_files = len(filenames)\n \n batch_count = 0\n while i < num_files:\n \n if batch_count == steps:\n break\n \n data_img = []\n data_orig_img = []\n data_parts = []\n data_bottleneck = []\n data_paths = []\n \n for j in range(batch_size):\n if i+j>= num_files:\n break\n img = filenames[i+j]\n if bottlenecks is not None:\n data_bottleneck.append(bottlenecks[i+j])\n if(load_orig_img):\n data_orig_img.append(get_img_array(img, target_dim=None))\n if(load_image):\n data_img.append(get_img_array(img, target_dim=target_dim))\n if load_paths:\n data_paths.append(img)\n if load_parts:\n data_parts.append(features[img])\n i += batch_size\n batch_count += 1\n data_orig_img, data_img, data_parts, data_bottleneck = np.asarray(data_orig_img), np.asarray(data_img), np.asarray(data_parts), np.asarray(data_bottleneck)\n #print(data_img.shape)\n ret_values = []\n if bottlenecks:\n ret_values.append(data_bottleneck)\n if load_image:\n ret_values.append(data_img)\n if load_orig_img:\n ret_values.append(data_orig_img)\n if load_paths:\n ret_values.append(data_paths)\n if load_parts:\n ret_values.append(data_parts)\n\n yield tuple(ret_values)", "title": "" }, { "docid": "a5c529f1d743aefa90c9b3853022b2f4", "score": "0.60985845", "text": "def gen_batch_function(data_folder, image_shape):\n\n def get_batches_fn(batch_size):\n \"\"\"\n Create batches of training data\n :param batch_size: Batch Size\n :return: Batches of training data\n \"\"\"\n image_paths = glob(os.path.join(data_folder, 'images', '*.png'))\n label_paths = {\n os.path.basename(path): path\n for path in glob(os.path.join(data_folder, 'groundtruth', '*.png'))}\n background_color = np.array([0, 0, 0])\n\n random.shuffle(image_paths)\n for batch_i in range(0, len(image_paths), batch_size):\n images = []\n gt_images = []\n for image_file in image_paths[batch_i:batch_i + batch_size]:\n gt_image_file = label_paths[os.path.basename(image_file)]\n\n image = scipy.misc.imresize(scipy.misc.imread(image_file), image_shape)\n gt_image = scipy.misc.imresize(scipy.misc.imread(gt_image_file), image_shape)\n\n gt_image = np.expand_dims(gt_image, axis=3)\n\n gt_image = np.all(gt_image == background_color, axis=2)\n gt_image = gt_image.reshape(gt_image.shape + (1,))\n\n gt_image = np.concatenate((gt_image, np.invert(gt_image)), axis=2).astype(np.int8)\n\n images.append(image)\n gt_images.append(gt_image)\n\n images_rot = get_rotated_images(images, [90, 180, 270])\n gt_images_rot = get_rotated_images(gt_images, [90, 180, 270])\n\n images_flipped = horizontal_flip(images)\n images_flipped_v = vertical_flip(images)\n\n images_flipped_gt = horizontal_flip(gt_images)\n images_flipped_v_gt = vertical_flip(gt_images)\n\n final = images + images_flipped + images_flipped_v + images_rot\n final_gt = gt_images + images_flipped_gt + images_flipped_v_gt + gt_images_rot\n\n yield np.array(final), np.array(final_gt)\n\n return get_batches_fn", "title": "" }, { "docid": "f16e41781e6e61bd679831dca5b04317", "score": "0.60678643", "text": "def gen_train_val_folds(data_folder, tst_size=None, seed=None):\n img_files = os.listdir(os.path.join(data_folder, 'CameraRGB'))\n image_paths = [os.path.join(data_folder, 'CameraRGB', i) for i in img_files]\n label_paths = [os.path.join(data_folder, 'CameraSeg', i) for i in img_files]\n #image_paths = glob(os.path.join(data_folder, 'CameraRGB', '*.png'))\n #label_paths = glob(os.path.join(data_folder, 'CameraSeg', '*.png'))\n indices = [i for i in range(len(image_paths))]\n if tst_size != None:\n indices = train_test_split(indices, test_size=tst_size, random_state=seed)\n return image_paths, label_paths, indices", "title": "" }, { "docid": "b808488091a53f7ebf8d3e78aeddac64", "score": "0.6035064", "text": "def get_batches_fn_nir(batch_size):\n for folder in data_folder:\n image_paths = glob(os.path.join(folder, 'color*.png')) # previously 'data*.png'\n label_paths = {\n re.sub(r'ground_truth', 'color', os.path.basename(path)): path # previously 'ground_truth', 'data'\n for path in glob(os.path.join(folder, 'ground_truth*.png'))}\n background_color = np.array([0, 0, 0, 0])\n\n random.shuffle(image_paths)\n for batch_i in range(0, len(image_paths), batch_size):\n images = []\n gt_images = []\n for image_file in image_paths[batch_i:batch_i+batch_size]:\n gt_image_file = label_paths[os.path.basename(image_file)]\n\n image = scipy.misc.imresize(scipy.misc.imread(image_file), image_shape)\n gt_image = scipy.misc.imresize(scipy.misc.imread(gt_image_file), image_shape)\n\n img_id = image_file[-8:]\n nir = cv2.imread(folder+\"/nir_\"+img_id)\n #print(folder+\"/nir_\"+img_id)\n nir = scipy.misc.imresize(nir, image_shape)\n overlay = cv2.addWeighted(image,0.5,nir,0.5,0)\n\n gt_bg = np.all(gt_image == background_color, axis=2)\n gt_bg = gt_bg.reshape(*gt_bg.shape, 1)\n gt_image = np.concatenate((gt_bg, np.invert(gt_bg)), axis=2)\n\n images.append(overlay)\n gt_images.append(gt_image)\n\n yield np.array(images), np.array(gt_images)", "title": "" }, { "docid": "01d23a2981a6b27c6b3535d4be2bf1db", "score": "0.60147274", "text": "def get_batches_fn(batch_size):\n# background_color = np.array([255, 0, 0])\n random.shuffle(indexes)\n print(\"get_batches set size:\", len(indexes))\n crop_t, crop_b = None, None\n if crops and len(crops):\n crop_t = crops[0]\n if len(crops) > 1: crop_b = - crops[1]\n for batch_i in range(0, len(indexes), batch_size):\n images = []\n gt_images = []\n for i in indexes[batch_i:batch_i+batch_size]:\n image_file = image_paths[i]\n gt_image_file = label_paths[i]\n\n image = scipy.misc.imread(image_file) #, image_shape\n# if (np.random.rand() < 0.5): \n# image = random_manipulation(image)\n #image = random_manipulation(image,'jpg70')\n gt_image = scipy.misc.imread(gt_image_file) #, image_shape)\n gt_image = gt_image[:,:,0]\n\n gt_road = np.any(np.stack((gt_image == 6, gt_image==7), axis=2), axis=2)\n gt_vehicles = gt_image == 10\n #print(np.sum(gt_vehicles), np.sum(gt_image))\n #tt\n gt_vehicles[496:] = False\n gt_objects = np.stack((gt_road, gt_vehicles), axis=2)\n gt_other = np.logical_not(np.any(gt_objects, axis=2))\n gt = np.stack((gt_road, gt_vehicles, gt_other), axis=2)\n# gt_bg = np.all(gt_image == background_color, axis=2)\n# gt_bg = gt_bg.reshape(*gt_bg.shape, 1)\n# gt_image = np.concatenate((gt_bg, np.invert(gt_bg)), axis=2)\n image = image[crop_t:crop_b] \n gt = gt[crop_t:crop_b]\n \n \n \n #scale = 0.0; dims = (240,600)\n if downsample != None:\n dims = (0,0) #scale 0.75 (240x600) puca\n \n image = cv2.resize(image,dims, fx=downsample, fy=downsample, interpolation = cv2.INTER_AREA)\n \n gt_road = cv2.resize(gt[:,:,0].astype('uint8'),dims, fx=downsample, fy=downsample, interpolation = cv2.INTER_AREA).astype(bool)\n gt_vehicles = cv2.resize(gt[:,:,1].astype('uint8'),dims, fx=downsample, fy=downsample, interpolation = cv2.INTER_AREA).astype(bool)\n gt_other = cv2.resize(gt[:,:,2].astype('uint8'),dims, fx=downsample, fy=downsample, interpolation = cv2.INTER_AREA).astype(bool)\n \n gt = np.stack((gt_road, gt_vehicles, gt_other), axis=2)\n \n \n \n images.append(image) #64:576\n gt_images.append(gt) #64:576\n\n yield np.array(images), np.array(gt_images)", "title": "" }, { "docid": "972b587e6ccc1cfcfa3f4c5fca1dfcfe", "score": "0.6010233", "text": "def test_crop_generator(input_path, batch_size=1, mode=\"test\", num_classes =6, epsilon = 0, resize_params = (224, 224), do_shuffle=True):\n data_path = os.path.join(input_path, mode)\n all_images = glob.glob(os.path.join(data_path, \"**/*.jpg\"))\n print(\"Found {} files for {}\".format(len(all_images), mode))\n if do_shuffle:\n shuffle(all_images)\n curr_idx = 0\n while curr_idx < len(all_images):\n # create random batches first\n #batch_paths = np.random.choice(a= all_images, size = batch_size)\n # initialize our batches of images and labels\n #print(all_images[curr_idx])\n imgs = []\n ts = []\n labels = [] \n curr_batch = all_images[curr_idx]\n _, ts, labels = preprocess_test_img(image_path= curr_batch, class_names = [0,1,2,3,4,5], num_classes = num_classes, epsilon = epsilon, resize_width_and_height=resize_params, mode=mode) \n #imgs = np.array(imgs)\n ts = np.array(ts)\n labels = np.array(labels)\n curr_idx += batch_size\n yield (ts, labels, curr_batch)", "title": "" }, { "docid": "65c7ec67f7d22ee2b25f88424ccb2274", "score": "0.5997243", "text": "def crop_generator(input_path, batch_size=32, mode=\"train\", num_classes =6, epsilon = 0, resize_params = (224, 224), do_shuffle=True):\n data_path = os.path.join(input_path, mode)\n all_images = glob.glob(os.path.join(data_path, \"**/*.jpg\"))\n #all_ts = glob.glob(os.path.join(data_path, \"**/*.csv\"))\n print(\"Found {} files for {}\".format(len(all_images), mode))\n if do_shuffle:\n shuffle(all_images)\n curr_idx = 0\n while True:\n # create random batches first\n #batch_paths = np.random.choice(a= all_images, size = batch_size)\n # initialize our batches of images and labels\n imgs = []\n ts = []\n labels = [] \n if curr_idx > len(all_images): # reset if you've parsed all data\n curr_idx = 0\n curr_batch = all_images[curr_idx: (curr_idx + batch_size)]\n _, ts, labels = preprocess_batches(image_batch= curr_batch, class_names = [0,1,2,3,4,5], num_classes = num_classes, epsilon = epsilon, resize_width_and_height=resize_params, mode=mode) \n ts = np.array(ts)\n labels = np.array(labels)\n curr_idx += batch_size \n yield (ts, labels)", "title": "" }, { "docid": "82f274bccb2422035ac75ccaef64e10e", "score": "0.59957653", "text": "def randompixelkfold(thisrun):\n fracin = thisrun.fracin\n inputs = thisrun.inputs\n conv_data = thisrun.conv_data\n ms_data = thisrun.ms_data\n trimrows = thisrun.trimrows\n num_of_images = len(conv_data)\n total_elements = 0\n no_edge_els = 0\n small_train = (fracin < .5)\n\n for s in range(num_of_images):\n shape = conv_data[s].shape\n total_elements = total_elements + shape[0]*shape[1]\n no_edge_els = no_edge_els + (shape[0]-2*trimrows)*(shape[1]-2*trimrows)\n\n if small_train:\n num_small_set = int(fracin*no_edge_els)\n num_large_set = no_edge_els-num_small_set\n num_folds = int(math.ceil(no_edge_els/num_small_set))\n else:\n num_large_set = int(fracin*no_edge_els)\n num_small_set = no_edge_els-num_large_set\n num_folds = int(math.ceil(no_edge_els/num_small_set))\n\n conv_test_list, conv_train_list = [], []\n ms_test_list, ms_train_list = [], []\n test_centers_list, train_centers_list, out_num_list = [], [], []\n rands, rands_copy, rands_in_fold = [], [], []\n folds = 0\n\n for i in range(no_edge_els):\n rands.append(i)\n rands_copy.append(i)\n\n # places pixels\n for n in range(0, num_folds):\n rands_in_fold.append([])\n out = []\n for s in range(len(conv_data)):\n shape = conv_data[s].shape\n out_image = np.zeros([shape[0], shape[1]])\n out.append(out_image)\n folds = folds + 1\n\n for m in range(0, num_small_set):\n if not rands:\n rands = rands_copy\n # generates list of random indices that haven't already been used\n rand_index = random.choice(rands)\n while rand_index in rands_in_fold[folds-1]:\n rand_index = random.choice(rands)\n rands_in_fold[folds-1].append(rand_index)\n rands.remove(rand_index)\n randloc = pixel_index_to_loc(rand_index, trimrows, conv_data)\n # pulls out data for the current random pixel in the list. Marks\n # these locations in the out array with 2. Also marks invalid zones\n # in out array with 1. Valid squares are marked with 0.\n out[randloc[2]][randloc[0], randloc[1]] = 2\n\n invalid_array, invalid_count = find_invalid_no_buffer(out, trimrows)\n valid_count = total_elements - invalid_count\n # now assign training and test data\n if small_train:\n conv_test = np.zeros([num_large_set, inputs])\n ms_test = np.zeros([num_large_set, 1])\n conv_train = np.zeros([invalid_count, inputs])\n ms_train = np.zeros([invalid_count, 1])\n else:\n conv_test = np.zeros([num_small_set, inputs])\n ms_test = np.zeros([num_small_set, 1])\n conv_train = np.zeros([valid_count, inputs])\n ms_train = np.zeros([valid_count, 1])\n\n test_count = 0\n train_count = 0\n for k in range(0, num_of_images):\n currslice = invalid_array[k]\n for i in range(0, conv_data[k].shape[0]):\n for j in range(0, conv_data[k].shape[1]):\n if small_train:\n if currslice[i, j] == 0:\n conv_test[test_count, :] = get_region_data(\n inputs, conv_data, i, j, k)\n ms_test[test_count, :] = ms_data[k][i, j]\n test_count = test_count + 1\n elif currslice[i, j] == 2:\n conv_train[train_count, :] = get_region_data(\n inputs, conv_data, i, j, k)\n ms_train[train_count, :] = ms_data[k][i, j]\n train_count = train_count + 1\n else:\n if currslice[i, j] == 2:\n conv_test[test_count, :] = get_region_data(\n inputs, conv_data, i, j, k)\n ms_test[test_count, :] = ms_data[k][i, j]\n test_count = test_count + 1\n elif currslice[i, j] == 0:\n conv_train[train_count, :] = get_region_data(\n inputs, conv_data, i, j, k)\n ms_train[train_count, :] = ms_data[k][i, j]\n train_count = train_count + 1\n\n train_centers = len(ms_train)\n test_centers = len(ms_test)\n outnum = total_elements - no_edge_els\n\n conv_test_list.append(conv_test)\n conv_train_list.append(conv_train)\n ms_test_list.append(ms_test)\n ms_train_list.append(ms_train)\n\n train_centers_list.append(train_centers)\n test_centers_list.append(test_centers)\n out_num_list.append(outnum)\n\n thisrun.set_train_test_out_nums(np.mean(train_centers_list),\n np.mean(test_centers_list),\n np.mean(out_num_list))\n thisrun.conv_train_list = conv_train_list\n thisrun.ms_train_list = ms_train_list\n thisrun.conv_test_list = conv_test_list\n thisrun.ms_test_list = ms_test_list\n\n return", "title": "" }, { "docid": "21020093d6adc38332817ca5a9c1ce7e", "score": "0.59804404", "text": "def gen_unet_batch_v1(img_mask_names, crop_sz=(64,64,64), mask_sz=(24,24,24), batch_sz=32):\n imgs = []\n masks = []\n # read all images and masks into lists \n for i in range(len(img_mask_names)):\n curr_name = img_mask_names[i]\n curr_img, head = nrrd.read(curr_name[0])\n curr_mask, head = nrrd.read(curr_name[1])\n assert curr_img.shape == curr_mask.shape, \"Image and mask size do not match!\"\n \n curr_img = np.float32(curr_img)\n curr_img = (curr_img - curr_img.mean()) / curr_img.std() # normalize image\n imgs.append(curr_img)\n curr_mask = np.float32(curr_mask)\n masks.append(curr_mask)\n\n batch_img = np.zeros((batch_sz, crop_sz[0], crop_sz[1], crop_sz[2], 1), dtype='float32')\n batch_mask = np.zeros((batch_sz, mask_sz[0], mask_sz[1], mask_sz[2], 1), dtype='float32') \n \n while True:\n # randomly crop an image from imgs list\n idx = np.random.randint(0, len(imgs))\n img_for_crop = imgs[idx]\n mask_for_crop = masks[idx]\n num_crop = 0\n while num_crop < batch_sz:\n x = np.random.randint(0, img_for_crop.shape[0]-crop_sz[0])\n y = np.random.randint(0, img_for_crop.shape[1]-crop_sz[1])\n z = np.random.randint(0, img_for_crop.shape[2]-crop_sz[2])\n cropped_img = img_for_crop[x:x+crop_sz[0], y:y+crop_sz[1], z:z+crop_sz[2]]\n cropped_mask = mask_for_crop[x:x+crop_sz[0], y:y+crop_sz[1], z:z+crop_sz[2]]\n shrink_sz = (int((crop_sz[0]-mask_sz[0])/2), int((crop_sz[1]-mask_sz[1])/2), int((crop_sz[2]-mask_sz[2])/2))\n cropped_mask = cropped_mask[shrink_sz[0]:crop_sz[0]-shrink_sz[0], shrink_sz[1]:crop_sz[1]-shrink_sz[1], shrink_sz[2]:crop_sz[2]-shrink_sz[2]]\n # if include the random crop in training\n is_include = False\n num_syn_vxl = len(cropped_mask[cropped_mask==1])\n accept_prob = np.random.random()\n if num_syn_vxl > 2000 or accept_prob > 0.95:\n is_include = True\n elif 1000 < num_syn_vxl <= 2000 and accept_prob > 0.5:\n is_include = True\n elif 500 < num_syn_vxl <= 1000 and accept_prob > 0.75:\n is_include = True\n elif 0 < num_syn_vxl <=500 and accept_prob > 0.85:\n is_include = True\n \n # include the crop\n if is_include:\n batch_img[num_crop,:,:,:,0] = cropped_img\n batch_mask[num_crop,:,:,:,0] = cropped_mask\n num_crop += 1\n \n # data augmentation\n x_flip = np.random.randint(2, size=batch_sz)\n z_flip = np.random.randint(2, size=batch_sz)\n rot_angle = np.random.randint(4, size=batch_sz)\n for j in range(batch_sz):\n if x_flip[j]:\n batch_img[j,:,:,:,0] = np.flip(batch_img[j,:,:,:,0], axis=0)\n batch_mask[j,:,:,:,0] = np.flip(batch_mask[j,:,:,:,0], axis=0)\n if z_flip[j]:\n batch_img[j,:,:,:,0] = np.flip(batch_img[j,:,:,:,0], axis=2)\n batch_mask[j,:,:,:,0] = np.flip(batch_mask[j,:,:,:,0], axis=2)\n if rot_angle[j]:\n batch_img[j,:,:,:,0] = np.rot90(batch_img[j,:,:,:,0], rot_angle[j], axes=(0,1))\n batch_mask[j,:,:,:,0] = np.rot90(batch_mask[j,:,:,:,0], rot_angle[j], axes=(0,1))\n\n yield batch_img, batch_mask", "title": "" }, { "docid": "8905eab17d39cc626d94435a3895a4ce", "score": "0.5979535", "text": "def create_sIII_run_filelist(\n glob_file='data/core50_128x128/*/*/*',\n dest_bp='/insert/your/path/sIII_inc/',\n dest_cum_bp='/insert/your/path/sIII_cum/',\n all_sess=range(11),\n all_objs=range(50),\n cumulative=True,\n batch_order=[x for x in range(79)]):\n\n # Here the creations of the units (which obj in which sess)\n # is **independent** by the external seed. This means that the units are\n # static throughout the runs while only their order can change. This is\n # the same as for the NI and NC scenarios where the batches are fixed.\n rnd_state = np.random.get_state()\n np.random.seed(0)\n\n filelist_all_sess = load_filelist_per_sess(glob_file)\n train_sess = [0, 1, 3, 4, 5, 7, 8, 10]\n test_sess = [2, 6, 9]\n\n # Selecting the five objs for batch\n first_ten_objs = [i * 5 for i in range(10)]\n objs_after_first_b = []\n for id in all_objs:\n if id not in first_ten_objs:\n objs_after_first_b.append(id)\n\n np.random.shuffle(objs_after_first_b)\n objs_per_batch = np.reshape(objs_after_first_b, (8, 5))\n objs_per_batch = [row for row in objs_per_batch]\n\n # Creating units for classes after first batch\n units = []\n for sess in train_sess:\n for objs_id in objs_per_batch:\n units.append((sess, objs_id))\n\n # Creating for the first 10 classes split in two groups\n for sess in train_sess[1:]:\n units.append((sess, first_ten_objs[:5]))\n units.append((sess, first_ten_objs[5:]))\n\n # Suffling units\n np.random.shuffle(units)\n\n print(\"Number of incremental units: \", len(units))\n print(\"----- Unit details (sess, objs) ------\")\n for unit in units:\n print(unit)\n\n # Creating first batch\n create_filelist(dest_bp + \"train_batch_00\", filelist_all_sess, [0],\n first_ten_objs)\n\n # Creating test\n create_filelist(dest_bp + \"test\", filelist_all_sess, test_sess,\n all_objs)\n\n # Reordering incremental units based on batch order\n new_units = [[]] * 78\n for i, id in enumerate(batch_order[1:]):\n new_units[i] = units[id-1]\n units = new_units\n\n # Creating incremental batches with units\n for batch_id, unit in enumerate(units):\n create_filelist(dest_bp + \"train_batch_\" +\n str(batch_id + 1).zfill(2),\n filelist_all_sess, [unit[0]], unit[1])\n\n # Creating the cumulative version\n if cumulative:\n all_lines = []\n for batch_id in range(len(units) + 1):\n with open(dest_bp + 'train_batch_' +\n str(batch_id).zfill(2) + '_filelist.txt', 'r') as f:\n all_lines += f.readlines()\n with open(dest_cum_bp + 'train_batch_' +\n str(batch_id).zfill(2) + '_filelist.txt', 'w') as f:\n for line in all_lines:\n f.write(line)\n shutil.copy(dest_bp + \"test_filelist.txt\", dest_cum_bp)\n\n # Resetting previous rnd state\n np.random.set_state(rnd_state)", "title": "" }, { "docid": "13f31f4cbb5bbe1de561023934842c34", "score": "0.5962804", "text": "def gen_batch_function_nir(data_folder, image_shape):\n def get_batches_fn_nir(batch_size):\n \"\"\"\n Create batches of training data\n :param batch_size: Batch Size\n :return: Batches of training data\n \"\"\"\n for folder in data_folder:\n image_paths = glob(os.path.join(folder, 'color*.png')) # previously 'data*.png'\n label_paths = {\n re.sub(r'ground_truth', 'color', os.path.basename(path)): path # previously 'ground_truth', 'data'\n for path in glob(os.path.join(folder, 'ground_truth*.png'))}\n background_color = np.array([0, 0, 0, 0])\n\n random.shuffle(image_paths)\n for batch_i in range(0, len(image_paths), batch_size):\n images = []\n gt_images = []\n for image_file in image_paths[batch_i:batch_i+batch_size]:\n gt_image_file = label_paths[os.path.basename(image_file)]\n\n image = scipy.misc.imresize(scipy.misc.imread(image_file), image_shape)\n gt_image = scipy.misc.imresize(scipy.misc.imread(gt_image_file), image_shape)\n\n img_id = image_file[-8:]\n nir = cv2.imread(folder+\"/nir_\"+img_id)\n #print(folder+\"/nir_\"+img_id)\n nir = scipy.misc.imresize(nir, image_shape)\n overlay = cv2.addWeighted(image,0.5,nir,0.5,0)\n\n gt_bg = np.all(gt_image == background_color, axis=2)\n gt_bg = gt_bg.reshape(*gt_bg.shape, 1)\n gt_image = np.concatenate((gt_bg, np.invert(gt_bg)), axis=2)\n\n images.append(overlay)\n gt_images.append(gt_image)\n\n yield np.array(images), np.array(gt_images)\n return get_batches_fn_nir", "title": "" }, { "docid": "12d37df1eb08211e25406cde83f05008", "score": "0.59626096", "text": "def image_generator_flowerData(batch_size, img_dir):\n input_filenames = glob(img_dir + \"/*-in.jpg\")\n counter = 0\n random.shuffle(input_filenames)\n while True:\n small_images = np.zeros((batch_size, 32, 32, 3))\n large_images = np.zeros((batch_size, 256, 256, 3))\n if counter+batch_size >= len(input_filenames):\n counter = 0\n for i in range(batch_size):\n img = input_filenames[counter + i]\n img_low_res = Image.open(img)\n img_high_res = Image.open(img.replace(\"-in.jpg\", \"-out.jpg\"))\n small_images[i] = np.array(img_low_res) / 255.0\n large_images[i] = np.array(img_high_res) / 255.0\n yield (small_images, large_images)\n counter += batch_size", "title": "" }, { "docid": "66063bef0ffbd4045f50e942e8da7e44", "score": "0.5937325", "text": "def next_batch(self, batch_size):\n start = self._index_in_epoch\n self._index_in_epoch += batch_size\n if batch_size > (self._num_examples - self._index_in_epoch):\n # Finished epoch\n print 'end epoch'\n self._epochs_completed += 1\n # Shuffle the data\n \"\"\" Shufling all the Images with a single permutation \"\"\"\n random.shuffle(self._images_key)\n start = 0\n self._index_in_epoch = batch_size\n assert batch_size <= self._num_examples\n\n images = np.empty((batch_size, self._input_size[0], self._input_size[1],self._input_size[2]))\n if len(self._depth_size)==2:\n self._depth_size = (self._depth_size[0], self._depth_size[1],1)\n depths = np.empty((batch_size, self._depth_size[0], self._depth_size[1],self._depth_size[2]))\n for n in range(batch_size):\n key=self._images_key[start+n]\n rotation=0\n inversion=0\n if self.rotate:\n rotation=key & 3\n key=int(key/4)\n\n if self.invert:\n inversion=key & 1\n key=int(key/2)\n \n if self._is_validation:\n images[n] = readImageFromDB(self._db,'val'+str(key),self._input_size)\n depths[n] = readImageFromDB(self._db,'val'+str(key)+\"depth\",self._depth_size)\n else:\n images[n] = readImageFromDB(self._db,str(key),self._input_size)\n depths[n] = readImageFromDB(self._db,str(key)+\"depth\",self._depth_size)\n\n images[n]=np.rot90(images[n],rotation)\n depths[n]=np.rot90(depths[n],rotation)\n\n if inversion:\n images[n]=np.fliplr(images[n])\n depths[n]=np.fliplr(depths[n])\n return images, depths#, transmission", "title": "" }, { "docid": "e0d960a0e902d965859599d78e22e268", "score": "0.5924801", "text": "def train(self, train_data, img_size, start_size, iterations, lr, save_dir, b1, b2, save_int, **kwargs):\n if not os.path.isdir(save_dir): os.mkdir(save_dir)\n gen_steps, gen_res, disc_steps, disc_res = self.load_saved_models(save_dir)\n\n if gen_steps:\n loop_start_size = gen_res\n fade_on = True\n else:\n loop_start_size = start_size\n fade_on = False\n self.init_models(loop_start_size, True, lr, b1, b2)\n\n # Number of progressive resolution stages\n resolutions = int(np.log2(img_size/loop_start_size)) + 1\n ones = tf.cast(tf.ones((self.batch_size, 1)), tf.float32)\n\n for resolution in range(resolutions):\n if resolution > 0: fade_on = True\n print('Resolution: ', loop_start_size*2**resolution)\n res_iterations = iterations[loop_start_size*2**resolution]\n stage_iterations = max(0, res_iterations - gen_steps) if resolution == 0 else res_iterations\n progress = tqdm(train_data.take(stage_iterations))\n for iteration, (imgs, conditioning) in enumerate(progress):\n # imgs, conditioning = sample['image'], sample['label']\n if resolution < resolutions - 1:\n imgs = tf.image.resize_images(imgs, (loop_start_size*2**resolution, loop_start_size*2**resolution),\n align_corners=True)\n if len(conditioning.get_shape().as_list()) < 2:\n conditioning = tf.one_hot(conditioning, self.num_classes, axis=-1)\n fade = min(iteration/(stage_iterations//2.0), 1.0) if (resolution > 0 and fade_on) else 1.0\n fade_tensor = tf.constant(fade, shape=(self.batch_size, 1), dtype=tf.float32)\n self.train_step(imgs=imgs, conditioning=conditioning, fade=fade_tensor, ones=ones)\n progress.set_postfix(best_gen_loss=self.best_gen_loss.numpy(), best_disc_loss=self.best_disc_loss.numpy(),\n gen_loss=self.tot_gen_loss.numpy(), disc_loss=self.tot_disc_loss.numpy())\n\n # Save every n intervals\n if iteration % save_int == 0:\n random_classes = []\n for i in range(self.num_classes):\n random_label = [0] * self.num_classes\n random_label[i] = 1.0\n random_classes.append(random_label)\n conditioning = tf.stack(random_classes)\n self.generate(iteration + 1, save_dir, self.num_classes, conditioning, fade)\n self.save_learning_curve(save_dir, loop_start_size*2**resolution)\n self.save_models(save_dir, loop_start_size*2**resolution, iteration, gen_steps, disc_steps)\n\n if resolution < resolutions - 1:\n print('Updating models to add new layers for next resolution.')\n self.update_models(loop_start_size*2**resolution)", "title": "" }, { "docid": "7bcd0490a34e69eac5bbd6f45fa17294", "score": "0.59223986", "text": "def load_imagenet_random_train(path_img, path_info, args):\n imgnet_info = load_file(path_info)\n name_folders = [p.split('/')[0].strip() for p in imgnet_info]\n name_folders = list(sorted(set(name_folders))) \n random_train_file = list()\n for m in range(args.random_train_num_start, args.random_train_num_end):\n random.seed(m)\n # for i, n in enumerate(name_folders):\n # random_name_file = sorted(random.sample(os.listdir(path_img + n), args.random_train_size))\n # process_folder = [load_img_imagenet(path_img + n + '/' + r, args) for r in random_name_file]\n # process_folder = [p for p in process_folder if len(p.shape) == 4]\n # if len(process_folder) > 0: \n # random_train_file += random_name_file\n # print(m, i)\n\n if os.path.exists('./dataset/%s_%s_random_train_%i.p' % (args.d, args.model, m)): \n print('File exists in your directory')\n else: \n x_random_train, y_random_train = list(), list()\n for i, n in enumerate(name_folders):\n random_name_file = sorted(random.sample(os.listdir(path_img + n), args.random_train_size)) \n process_folder = [load_img_imagenet(path_img + n + '/' + r, args) for r in random_name_file]\n process_folder = [p for p in process_folder if len(p.shape) == 4]\n if len(process_folder) > 0:\n process_folder = np.concatenate(process_folder, axis=0)\n label_folder = get_label_imagenet(name_folder=n, imagnet_info=imgnet_info) \n label_folder = np.array([label_folder for i in range(args.random_train_size)]) \n \n x_random_train.append(process_folder)\n y_random_train.append(label_folder) \n print('Random training %i-th of the folder %i-th which has name %s' % (m, i, n))\n \n x_random_train = np.concatenate(x_random_train, axis=0)\n y_random_train = np.concatenate(y_random_train, axis=0) \n pickle.dump((x_random_train, y_random_train), open('./dataset/%s_%s_random_train_%i.p' % (args.d, args.model, m), 'wb'), protocol=4) \n write_file('./dataset/%s_%s_random_train_name_file.txt' % (args.d, args.model), random_train_file)\n print('Now you can load the training dataset')\n exit()", "title": "" }, { "docid": "72d9e6bcdede3d79bbcd11eeef6290b5", "score": "0.59195316", "text": "def get_test_batch():\n ref_image = np.empty([c.TEST_BATCH_SIZE, c.HR_HEIGHT, c.HR_WIDTH, 3],dtype=np.float32)\n multi_plane = np.empty([c.TEST_BATCH_SIZE, c.HR_HEIGHT, c.HR_WIDTH, 3*c.NUM_PLANE],dtype=np.float32) \n gt = np.empty([c.TEST_BATCH_SIZE, c.HR_HEIGHT, c.HR_WIDTH, 3],dtype=np.float32)\n for i in range(c.TEST_BATCH_SIZE):\n #idx = str(np.random.randint(1,c.NUM_TEST_SAMPLE+1)).zfill(5)\n idx = str(i+1).zfill(5)\n lr_path = c.TEST_DIR + '/lr/' + idx + '.png'\n ref_image[i,:,:,:] = imresize(imread(lr_path),(128,128))/255.0\n \n psv_directory = os.path.join(c.TEST_DIR,'psv',idx)\n for j in range(c.NUM_PLANE):\n psv_path = os.path.join(psv_directory,str(j+1).zfill(2)+'.png') \n multi_plane[i,:,:,3*j:3*(j+1)] = imread(psv_path)/255.0\n \n gt_path = c.TEST_DIR + '/gt/' + idx + '.png'\n gt[i,:,:,:] = imread(gt_path)/255.0\n\n res = {'ref_image':ref_image,'multi_plane':multi_plane,'gt':gt}\n\n return res", "title": "" }, { "docid": "f23eee0561fac8f6401f6c5951e60290", "score": "0.59191084", "text": "def get_batches_fn(batch_size):\n # Grab image and label paths\n image_paths = glob(os.path.join(data_folder, 'image_2', '*.png'))\n label_paths = {\n re.sub(r'_(lane|road)_', '_', os.path.basename(path)): path\n for path in glob(os.path.join(data_folder, 'gt_image_2', '*_road_*.png'))}\n background_color = np.array([255, 0, 0])\n\n # Shuffle training data\n random.shuffle(image_paths)\n # Loop through batches and grab images, yielding each batch\n for batch_i in range(0, len(image_paths), batch_size):\n # Elffer: Loop through part of the images\n # for batch_i in range(0, 5, batch_size):\n images = []\n gt_images = []\n for image_file in image_paths[batch_i:batch_i+batch_size]:\n gt_image_file = label_paths[os.path.basename(image_file)]\n # Re-size to image_shape\n image = scipy.misc.imresize(scipy.misc.imread(image_file), image_shape)\n gt_image = scipy.misc.imresize(scipy.misc.imread(gt_image_file), image_shape)\n\n # Create \"one-hot-like\" labels by class\n gt_bg = np.all(gt_image == background_color, axis=2)\n gt_bg = gt_bg.reshape(*gt_bg.shape, 1)\n gt_image = np.concatenate((gt_bg, np.invert(gt_bg)), axis=2)\n\n images.append(image)\n gt_images.append(gt_image)\n\n # Augmentation\n if augmentation is True:\n images, gt_images = augment(images, gt_images)\n\n yield np.array(images), np.array(gt_images)", "title": "" }, { "docid": "9b1c881336c42dba405cf3f993d7dfe1", "score": "0.5918303", "text": "def __init__(self,lfile,bsz,imsz,wsz,csz,psz,nstd,niter=0,isval=False):\n\n self.bsz, self.imsz = bsz, imsz\n self.wsz, self.csz, self.psz = wsz, csz, psz\n self.nstd = nstd\n self.isrand = not isval\n\n # Setup fetch graph\n self.graph()\n\n # Load file list\n self.files = [l.strip() for l in open(lfile).readlines()]\n if len(self.files) < bsz: # repeat file list if its size < bsz\n self.files = self.files * int(np.ceil(float(bsz)/len(self.files)))\n self.ndata = len(self.files)\n self.niter = niter*bsz\n \n # Setup shuffling\n if self.isrand:\n self.rand = np.random.RandomState(0)\n idx = self.rand.permutation(self.ndata)\n for i in range(niter // self.ndata):\n idx = self.rand.permutation(self.ndata)\n self.idx = np.int32(idx)\n else:\n self.idx = np.int32(np.arange(self.ndata))", "title": "" }, { "docid": "fd548be60fc0554f58a89d0710e9fb5a", "score": "0.58946717", "text": "def gen_test_output(sess, logits, keep_prob, image_pl, data_folder, image_shape):\n batch_size = 5\n num_pixels = image_shape[0] * image_shape[1]\n org_image_shape = (600, 800)\n image_files = glob(os.path.join(data_folder, 'CameraRGB', '*.png'))\n print(len(image_files))\n n_loop = int(len(image_files) / batch_size)\n print(n_loop)\n for i in range(n_loop):\n print(\"Batch\", i)\n start_idx = i * batch_size\n stop_idx = (i+1) * batch_size\n images = []\n image_names = []\n for image_file in image_files[start_idx:stop_idx]:\n print(image_file)\n image = scipy.misc.imresize(scipy.misc.imread(image_file), image_shape)\n images.append(image)\n image_names.append(image_file)\n\n im_softmax = sess.run(\n [tf.nn.softmax(logits)],\n {keep_prob: 1.0, image_pl: images})[0]\n\n for i in range(batch_size):\n print(\"Infer\",i)\n start_idx = i * num_pixels\n stop_idx = (i+1) * num_pixels\n max_class = np.argmax(im_softmax[start_idx:stop_idx], axis=1).reshape(image_shape[0], image_shape[1])\n road = (max_class == 1).reshape(image_shape[0], image_shape[1], 1)\n cars = (max_class == 2).reshape(image_shape[0], image_shape[1], 1)\n road_mask = np.dot(road, np.array([[0, 255, 0, 127]]))\n cars_mask = np.dot(cars, np.array([[255, 0, 0, 127]]))\n road_mask = scipy.misc.toimage(road_mask, mode=\"RGBA\")\n cars_mask = scipy.misc.toimage(cars_mask, mode=\"RGBA\")\n street_im = scipy.misc.toimage(images[i])\n street_im.paste(road_mask, box=None, mask=road_mask)\n street_im.paste(cars_mask, box=None, mask=cars_mask)\n \n res_image = scipy.misc.imresize(street_im, org_image_shape)\n\n yield os.path.basename(image_names[i]), np.array(res_image)", "title": "" }, { "docid": "31a319cb4ad58345963f03cbb2daf208", "score": "0.58885396", "text": "def read_images(img_list, path='', n_threads=10, printable=True):\n imgs = []\n for idx in range(0, len(img_list), n_threads):\n b_imgs_list = img_list[idx : idx + n_threads]\n b_imgs = prepro.threading_data(b_imgs_list, fn=read_image, path=path)\n # print(b_imgs.shape)\n imgs.extend(b_imgs)\n if printable:\n print('read %d from %s' % (len(imgs), path))\n return imgs", "title": "" }, { "docid": "0337d8f0525e10c728fdb1530118658c", "score": "0.58705986", "text": "def get_batches_fn(batch_size):\n image_paths = glob(os.path.join(data_folder, 'images', '*.png'))\n label_paths = {\n os.path.basename(path): path\n for path in glob(os.path.join(data_folder, 'groundtruth', '*.png'))}\n background_color = np.array([0, 0, 0])\n\n random.shuffle(image_paths)\n for batch_i in range(0, len(image_paths), batch_size):\n images = []\n gt_images = []\n for image_file in image_paths[batch_i:batch_i + batch_size]:\n gt_image_file = label_paths[os.path.basename(image_file)]\n\n image = scipy.misc.imresize(scipy.misc.imread(image_file), image_shape)\n gt_image = scipy.misc.imresize(scipy.misc.imread(gt_image_file), image_shape)\n\n gt_image = np.expand_dims(gt_image, axis=3)\n\n gt_image = np.all(gt_image == background_color, axis=2)\n gt_image = gt_image.reshape(gt_image.shape + (1,))\n\n gt_image = np.concatenate((gt_image, np.invert(gt_image)), axis=2).astype(np.int8)\n\n images.append(image)\n gt_images.append(gt_image)\n\n images_rot = get_rotated_images(images, [90, 180, 270])\n gt_images_rot = get_rotated_images(gt_images, [90, 180, 270])\n\n images_flipped = horizontal_flip(images)\n images_flipped_v = vertical_flip(images)\n\n images_flipped_gt = horizontal_flip(gt_images)\n images_flipped_v_gt = vertical_flip(gt_images)\n\n final = images + images_flipped + images_flipped_v + images_rot\n final_gt = gt_images + images_flipped_gt + images_flipped_v_gt + gt_images_rot\n\n yield np.array(final), np.array(final_gt)", "title": "" }, { "docid": "2f80f3945ef7f6b6a7dbbe0c03216119", "score": "0.5858783", "text": "def myFastGenerator(length=100000):\n #labels = pd.read_csv(\"/data/uesu/cdiscount/cdiscount-kernel/data/images_128x128.csv\").head(length)\n labels = feahter.read_dataframe(\"/data/uesu/cdiscount/data/meta.feather\").head(length)\n with open(\"/data/uesu/cdiscount/cdiscount-kernel/data/images_128x128.bin\", 'rb') as file_:\n # for class_, offset, size in list(examples)[start:end]:\n for startend in list(zip([start for start in range(1,length+1, 100)],[end for end in range(100,length+100, 100)])):\n examples = zip(\n labels['class'][startend[0]:startend[1]],\n labels['offset'][startend[0]:startend[1]],\n labels['size'][startend[0]:startend[1]]\n )\n tempArr = []\n for class_, offset, size in examples:\n file_.seek(offset)\n tempArr.append(cv2.imdecode(np.frombuffer(file_.read(size), dtype=np.uint8), cv2.IMREAD_COLOR))\n yield (np.concatenate([y[np.newaxis,:,:,:] for y in tempArr], axis=0), labels['class'][startend[0]:startend[1]].values)", "title": "" }, { "docid": "7abac6ba31bbba23cd558c01afca6735", "score": "0.58533347", "text": "def create_testset_RGB(name_set,path_groundtruth,path_testset,name_kernel,noise_std_range,im_size):\n np.random.seed(0)\n torch.manual_seed(1)\n \n path_kernel = os.path.join(path_testset,name_kernel,'kernel.mat')# a matlab file\n path_save = os.path.join(path_testset,name_kernel,name_set)\n if not os.path.exists(path_save):\n os.makedirs(path_save)\n \n if os.path.exists(os.path.join(path_groundtruth,'cropped1_RGB',name_set)):\n print('cropped already')\n transf1 = OpenMat_transf()\n already_cropped = 'yes'\n # 10 random Gaussian blurs\n for j in range(10):\n data = MyDataset_RGB(folder=os.path.join(path_groundtruth,'cropped1_RGB',name_set),transf1=transf1, need_names='yes',blur_name = j,blur_type = 'Gaussian',noise_std_range = noise_std_range)\n loader = DataLoader(data, batch_size=1, shuffle=False)\n # tqdm shows the progress\n for minibatch in tqdm(loader,file=sys.stdout):\n [blur_name,h,image_name,x,x_degraded,noise_std] = minibatch\n img = np.zeros((im_size[0],im_size[1],3))\n img_degraded = np.zeros((im_size[0],im_size[1],3))\n blur = np.zeros((9,9))\n noise = np.zeros((1))\n img[0:im_size[0],0:im_size[1],0:3] = x[0,0:im_size[0],0:im_size[1],0:3]\n img_degraded[0:im_size[0],0:im_size[1],0:3] = x_degraded[0,0:im_size[0],0:im_size[1],0:3]\n blur[0:9,0:9] = h[0,0:9,0:9]\n noise[0] = noise_std\n file_name_degraded = os.path.join(path_save,str(image_name[0])+'_blur'+str(blur_name.numpy())[1:-1]+'.mat')\n sio.savemat(file_name_degraded,{'image':img_degraded, 'h':blur, 'trueimage':img,'noise_std':noise}) # degraded image and blur kernel\n print('Gaussian blur done') \n # 2 pre-defined uniform blurs \n for j in range(2):\n data = MyDataset_RGB(folder=os.path.join(path_groundtruth,'cropped1_RGB',name_set),transf1=transf1, need_names='yes',blur_name = j+10,blur_type = 'uniform_'+str(j+1),noise_std_range = noise_std_range)\n loader = DataLoader(data, batch_size=1, shuffle=False)\n # tqdm shows the progress\n for minibatch in tqdm(loader,file=sys.stdout):\n [blur_name,h,image_name,x,x_degraded,noise_std] = minibatch\n img = np.zeros((im_size[0],im_size[1],3))\n img_degraded = np.zeros((im_size[0],im_size[1],3))\n blur = np.zeros((9,9))\n noise = np.zeros((1))\n img[0:im_size[0],0:im_size[1],0:3] = x[0,0:im_size[0],0:im_size[1],0:3]\n img_degraded[0:im_size[0],0:im_size[1],0:3] = x_degraded[0,0:im_size[0],0:im_size[1],0:3]\n blur[0:9,0:9] = h[0,0:9,0:9]\n noise[0] = noise_std\n file_name_degraded = os.path.join(path_save,str(image_name[0])+'_blur'+str(blur_name.numpy())[1:-1]+'.mat')\n sio.savemat(file_name_degraded,{'image':img_degraded, 'h':blur, 'trueimage':img,'noise_std':noise}) # degraded image and blur kernel \n print('Uniform blur done') \n # 3 random defocus blurs \n for j in range(3):\n data = MyDataset_RGB(folder=os.path.join(path_groundtruth,'cropped1_RGB',name_set),transf1=transf1, need_names='yes',blur_name = j+12,blur_type = 'defocus',noise_std_range = noise_std_range)\n loader = DataLoader(data, batch_size=1, shuffle=False)\n # tqdm shows the progress\n for minibatch in tqdm(loader,file=sys.stdout):\n [blur_name,h,image_name,x,x_degraded,noise_std] = minibatch\n img = np.zeros((im_size[0],im_size[1],3))\n img_degraded = np.zeros((im_size[0],im_size[1],3))\n blur = np.zeros((9,9))\n noise = np.zeros((1))\n img[0:im_size[0],0:im_size[1],0:3] = x[0,0:im_size[0],0:im_size[1],0:3]\n img_degraded[0:im_size[0],0:im_size[1],0:3] = x_degraded[0,0:im_size[0],0:im_size[1],0:3]\n blur[0:9,0:9] = h[0,0:9,0:9]\n noise[0] = noise_std\n file_name_degraded = os.path.join(path_save,str(image_name[0])+'_blur'+str(blur_name.numpy())[1:-1]+'.mat')\n print(file_name_degraded)\n sio.savemat(file_name_degraded,{'image':img_degraded, 'h':blur, 'trueimage':img,'noise_std':noise}) # degraded image and blur kernel \n print('Defocus blur done') \n else:\n # center-crops the test images to match the input size\n print('not cropped')\n transf1 = transforms.Compose([Cropcenter_RGB(im_size),lambda x: torch.from_numpy(x)]) \n path_save_true = os.path.join(path_groundtruth,'cropped1_RGB',name_set)\n if not os.path.exists(path_save_true):\n os.makedirs(path_save_true)\n already_cropped = 'no'\n for j in range(10):\n data = MyDataset_RGB(folder=os.path.join(path_groundtruth,'full1_RGB',name_set),transf1=transf1,need_names='yes',blur_name = j,blur_type = 'Gaussian',noise_std_range = noise_std_range)\n loader = DataLoader(data, batch_size=1, shuffle=False)\n \n \n # tqdm shows the progress\n for minibatch in tqdm(loader,file=sys.stdout):\n [blur_name,h,image_name,x,x_degraded,noise_std] = minibatch\n img = np.zeros((im_size[0],im_size[1],3))\n img_degraded = np.zeros((im_size[0],im_size[1],3))\n blur = np.zeros((9,9))\n noise = np.zeros((1))\n img[0:im_size[0],0:im_size[1],0:3] = x[0,0:im_size[0],0:im_size[1],0:3]\n img_degraded[0:im_size[0],0:im_size[1],0:3] = x_degraded[0,0:im_size[0],0:im_size[1],0:3]\n blur[0:9,0:9] = h[0,0:9,0:9]\n noise[0] = noise_std\n file_name_degraded = os.path.join(path_save,str(image_name[0])+'_blur'+str(blur_name.numpy())[1:-1]+'.mat')\n sio.savemat(file_name_degraded,{'image':img_degraded, 'h':blur, 'trueimage':img,'noise_std':noise}) # degraded image and blur\n file_name_true = os.path.join(path_save_true,str(image_name[0])+'.mat')\n sio.savemat(file_name_true,{'image':img})# center-cropped image\n print('Gaussian blur done') \n # 2 pre-defined uniform blurs \n for j in range(2):\n data = MyDataset_RGB(folder=os.path.join(path_groundtruth,'full1_RGB',name_set),transf1=transf1, need_names='yes',blur_name = j+10,blur_type = 'uniform_'+str(j+1),noise_std_range = noise_std_range)\n loader = DataLoader(data, batch_size=1, shuffle=False)\n # tqdm shows the progress\n for minibatch in tqdm(loader,file=sys.stdout):\n [blur_name,h,image_name,x,x_degraded,noise_std] = minibatch\n img = np.zeros((im_size[0],im_size[1],3))\n img_degraded = np.zeros((im_size[0],im_size[1],3))\n blur = np.zeros((9,9))\n noise = np.zeros((1))\n img[0:im_size[0],0:im_size[1],0:3] = x[0,0:im_size[0],0:im_size[1],0:3]\n img_degraded[0:im_size[0],0:im_size[1],0:3] = x_degraded[0,0:im_size[0],0:im_size[1],0:3]\n blur[0:9,0:9] = h[0,0:9,0:9]\n noise[0] = noise_std\n file_name_degraded = os.path.join(path_save,str(image_name[0])+'_blur'+str(blur_name.numpy())[1:-1]+'.mat')\n sio.savemat(file_name_degraded,{'image':img_degraded, 'h':blur, 'trueimage':img,'noise_std':noise}) # degraded image and blur kernel\n file_name_true = os.path.join(path_save_true,str(image_name[0])+'.mat')\n sio.savemat(file_name_true,{'image':img})# center-cropped image\n print('Uniform blur done') \n # 3 random defocus blurs \n for j in range(3):\n data = MyDataset_RGB(folder=os.path.join(path_groundtruth,'full1_RGB',name_set),transf1=transf1, need_names='yes',blur_name = j+12,blur_type = 'defocus',noise_std_range = noise_std_range)\n loader = DataLoader(data, batch_size=1, shuffle=False)\n # tqdm shows the progress\n for minibatch in tqdm(loader,file=sys.stdout):\n [blur_name,h,image_name,x,x_degraded,noise_std] = minibatch\n img = np.zeros((im_size[0],im_size[1],3))\n img_degraded = np.zeros((im_size[0],im_size[1],3))\n blur = np.zeros((9,9))\n noise = np.zeros((1))\n img[0:im_size[0],0:im_size[1],0:3] = x[0,0:im_size[0],0:im_size[1],0:3]\n img_degraded[0:im_size[0],0:im_size[1],0:3] = x_degraded[0,0:im_size[0],0:im_size[1],0:3]\n blur[0:9,0:9] = h[0,0:9,0:9]\n noise[0] = noise_std\n file_name_degraded = os.path.join(path_save,str(image_name[0])+'_blur'+str(blur_name.numpy())[1:-1]+'.mat')\n print(file_name_degraded)\n sio.savemat(file_name_degraded,{'image':img_degraded, 'h':blur, 'trueimage':img,'noise_std':noise}) # degraded image and blur kernel\n file_name_true = os.path.join(path_save_true,str(image_name[0])+'.mat')\n sio.savemat(file_name_true,{'image':img})# center-cropped image\n print('Defocus blur done')", "title": "" }, { "docid": "ccd8a87166a21b09515f407c781259a7", "score": "0.5844645", "text": "def loadTraining():\n cParams = {'xBuff' : 20, # in each direction (size=*2)\n 'yBuff' : 20, # in each direction\n 'zBuff' : 10, # in each direction\n 'nMax' : 20}\n \n # image_size = 100\n num_channels = 1\n num_labels = 2\n validProp = 0.15\n # Image size\n xSize = cParams['xBuff']*2 # Input size\n ySize = cParams['yBuff']*2 # Input size\n zSize = cParams['zBuff']*2 # Input size, depth\n \n allFiles, trainFiles, testFiles, _, nTrainFiles, _ = \\\n availFiles(paths['PPedSSD'])\n \n nLoad = nTrainFiles\n #nLoad = 100\n loadIdx = np.random.choice(range(0,trainFiles.shape[0]), nLoad, replace=False)\n splitLoad = round(nLoad*(0-validProp))\n trainIdx = loadIdx[0:splitLoad]\n validIdx = loadIdx[splitLoad:nLoad]\n \n trainFilesLoad = trainFiles.iloc[trainIdx] \n trainLabelsLoad = labelsCSV.iloc[trainIdx]\n cTrain, cTrainLabels = loadPPFilesV7(paths,\n trainFilesLoad, cParams, trainLabelsLoad, nodeMode=1)\n \n validFilesLoad = trainFiles.iloc[validIdx] \n validLabelsLoad = labelsCSV.iloc[validIdx]\n cValid, cValidLabels = loadPPFilesV7(paths,\n validFilesLoad, cParams, validLabelsLoad, nodeMode=1)", "title": "" }, { "docid": "54a10d77673fa18056cb38e8276a0344", "score": "0.5839162", "text": "def gen():\n while True:\n r = np.random.randint(0, len(X_train_files) - 255 - 1)\n batch_x_files = X_train_files[r:r+255]\n batch_y_files = Y_train_files[r:r+255]\n\n batch_x = np.array([plt.imread(x).reshape(35, 35, 1) for x in batch_x_files])\n batch_y = np.array([plt.imread(x).reshape(35, 35, 1) for x in batch_y_files])\n\n yield (batch_x, batch_y)", "title": "" }, { "docid": "c4c32461abe7b6cbf168840b5efd9a8a", "score": "0.58150095", "text": "def next(self):\n\n # Lock the iterator when the index is changed.\n with self.lock:\n index_array = next(self.index_generator)\n current_batch_size = len(index_array)\n\n # Initialize the arrays according to the size of the output images\n batch_a = np.zeros((current_batch_size,) + self.img_shape_a)\n batch_b = np.zeros((current_batch_size,) + self.img_shape_b[:-1]\n + (self.nch_gdt,))\n\n files = []\n ind = []\n\n if self.b_dir_name is None:\n if self.dataset == 'messidor':\n ### For Messidor\n all_coords = pickle.load(open(os.path.join(self.directory + 'resized_coords.pkl'), 'r'))\n\n elif self.dataset == 'idrid':\n ### For IDRiD\n file_csv_od = os.path.join(self.directory + 'IDRiD_OD_Center_Training_set.csv')\n file_csv_fov = os.path.join(self.directory + 'IDRiD_Fovea_Center_Training_set.csv')\n\n gt_fovea = pd.read_csv(file_csv_fov)\n # get rid of garbage data\n gt_fovea.drop(gt_fovea.columns[3:], axis=1, inplace=True)\n gt_fovea.drop(gt_fovea.index[413:], inplace=True)\n\n gt_od = pd.read_csv(file_csv_od)\n # get rid of garbage data\n gt_od.drop(gt_od.columns[3:], axis=1, inplace=True)\n gt_od.drop(gt_od.index[413:], inplace=True)\n\n # Load images and apply transformations\n for i, j in enumerate(index_array):\n im_id = self.a_fnames[j][:-4]\n\n if self.b_dir_name is not None:\n a_img, b_img = self._load_img_pair(j)\n\n else:\n a_img = img_as_float(io.imread(os.path.join(self.a_dir, self.a_fnames[j])))\n\n if self.dataset == 'messidor':\n ### For Messidor\n a_idx = np.where(np.array(all_coords['Image']) == im_id + '.tif')[0][0]\n coords = [all_coords['fovea'][a_idx], all_coords['od'][a_idx]]\n # get the distance maps\n b_img = self.get_dist_maps(coords)\n\n elif self.dataset == 'idrid':\n ### For IDRiD\n fovea_coords = gt_fovea[gt_fovea['Image No'] == im_id]\n fx, fy = int(fovea_coords['X- Coordinate']), int(fovea_coords['Y - Coordinate'])\n od_coords = gt_od[gt_od['Image No'] == im_id]\n odx, ody = int(od_coords['X- Coordinate']), int(od_coords['Y - Coordinate'])\n coords = [(fx,fy), (odx, ody)]\n b_img = self.get_dist_maps(coords, shp=(2848, 4288))\n\n a_img, b_img = self._random_transform(a_img, b_img)\n if self.zscore is True:\n a_img = (a_img - a_img.mean()) / (a_img.std())\n\n batch_a[i] = a_img\n batch_b[i] = b_img\n\n files.append(self.a_fnames[j])\n\n # when using tanh activation the inputs must be between [-1 1]\n if self.normalize_tanh is True and self.zscore is False:\n batch_a = normalize_for_tanh(batch_a)\n batch_b = normalize_for_tanh(batch_b)\n\n if self.return_mode == 'normal':\n return [batch_a, batch_b]\n\n elif self.return_mode == 'fnames':\n return [batch_a, batch_b, files]", "title": "" }, { "docid": "4db84a217fbf462413ca57d272eb5bb4", "score": "0.581446", "text": "def __init__(self, directory, a_dir_name='A', b_dir_name=None, N=-1,\n batch_size=32, shuffle=True, seed=None, target_size=(512,512),\n cspace='rgb', nch_gdt=1,\n zscore=True, normalize_tanh=False,\n return_mode='normal', decay=5, dataset='idrid',\n rotation_range=0., height_shift_range=0., shear_range=0.,\n width_shift_range=0., zoom_range=0., fill_mode='constant',\n cval=0., horizontal_flip=False, vertical_flip=False):\n self.directory = directory\n\n self.a_dir = os.path.join(directory, a_dir_name)\n self.a_fnames = sorted(os.listdir(self.a_dir))\n\n self.b_dir_name = b_dir_name\n if b_dir_name is not None:\n self.b_dir = os.path.join(directory, b_dir_name)\n self.b_fnames = sorted(os.listdir(self.b_dir))\n\n # Use only a subset of the files. Good to easily overfit the model\n if N > 0:\n self.filenames = self.a_fnames[:N]\n self.N = len(self.a_fnames)\n\n self.ch_order = K.image_dim_ordering()\n\n # Preprocess images\n self.cspace = cspace #colorspace\n\n # Image shape\n self.target_size = target_size\n self.nch_gdt = nch_gdt\n\n self.nch = len(self.cspace) # for example if grayscale\n\n self.select_vessels = select_vessels\n\n self.img_shape_a = self._get_img_shape(self.target_size, ch=self.nch)\n self.img_shape_b = self._get_img_shape(self.target_size, ch=self.nch_gdt)\n\n if self.ch_order == 'tf':\n self.channel_index = 3\n self.row_index = 1\n self.col_index = 2\n else:\n self.channel_index = 1\n self.row_index = 2\n self.col_index = 3\n\n #Normalizations\n self.normalize_tanh = normalize_tanh\n self.zscore = zscore\n\n # Transformations\n self.rotation_range = rotation_range\n self.height_shift_range = height_shift_range\n self.width_shift_range = width_shift_range\n self.shear_range = shear_range\n self.fill_mode = fill_mode\n self.cval = cval\n self.horizontal_flip = horizontal_flip\n self.vertical_flip = vertical_flip\n if np.isscalar(zoom_range):\n self.zoom_range = [1 - zoom_range, 1 + zoom_range]\n elif len(zoom_range) == 2:\n self.zoom_range = [zoom_range[0], zoom_range[1]]\n\n\n self.return_mode = return_mode\n\n self.decay=decay\n self.dataset = dataset\n\n super(TwoImageIterator, self).__init__(len(self.a_fnames), batch_size,\n shuffle, seed)", "title": "" }, { "docid": "01c72dbaad15928f42368f460fe8fbfe", "score": "0.5806961", "text": "def main():\r\n \r\n model = 'lrcn'\r\n saved_model = None # None or weights file\r\n class_limit = None # int, can be 1-101 or None\r\n seq_length = 5\r\n load_to_memory = False # pre-load the sequences into memory\r\n batch_size = 8\r\n nb_epoch = 300\r\n\r\n # Chose images or features and image shape based on network.\r\n data_type = 'images'\r\n image_shape = (80, 80, 3)\r\n\r\n train(data_type, seq_length, model, saved_model=saved_model,\r\n class_limit=class_limit, image_shape=image_shape,\r\n load_to_memory=load_to_memory, batch_size=batch_size, nb_epoch=nb_epoch)", "title": "" }, { "docid": "1288d9e40e1e5429021cb7fd12481b9d", "score": "0.5799145", "text": "def train(lpinfo_list, batch_size=100, ishape=(32, 14), nkerns=4, h_out=16, sampletype=1, \\\n cnnparamsfile=None, cnnparamsfile_restore=None):\n usewgt = True\n learning_rate = 0.1\n n_epochs = 100\n \n image_num = len(lpinfo_list)\n image_batch_size = 200\n image_batch_num = image_num / image_batch_size\n \n lpinfo_list_rnd = np.random.permutation(lpinfo_list)\n \n if usewgt:\n print 'training with weighted sample...'\n else:\n print 'training without weighted sample...'\n \n \n print 'learning_rate:',learning_rate,', max_epochs:',n_epochs,', batch_size:',batch_size\n print 'image_batch_size:', image_batch_size, ', image_batch_num:', image_batch_num\n \n \n# cost_train, cost_train_weight, params, x, y, wgt, cost_test = buildWeightCNN(ishape, batch_size, nkerns, h_out)\n# cost_train, cost_train_weight, params, x, y, wgt, cost_test = buildWeightCNN_C2H2(ishape, batch_size)\n cost_train, cost_train_weight, params, x, y, wgt, cost_test = buildWeightCNN_C1H2(ishape, batch_size)\n \n if cnnparamsfile_restore is not None:\n print 'set model from %s....'%(cnnparamsfile_restore)\n params_trained = cPickle.load(open(cnnparamsfile_restore, 'rb'))\n updates = []\n for param_i, trained_i in zip(params, params_trained):\n updates.append((param_i, trained_i))\n \n set_model = theano.function([], [], updates=updates)\n set_model()\n \n if usewgt:\n grads = T.grad(cost_train_weight, params)\n else:\n grads = T.grad(cost_train, params)\n \n updates = []\n for param_i, grad_i in zip(params, grads):\n updates.append((param_i, param_i - learning_rate * grad_i))\n \n train_model = None\n train_model_weight = None\n if usewgt:\n train_model_weight = theano.function(inputs=[x, y, wgt], outputs=cost_train_weight, updates=updates)\n else:\n train_model = theano.function(inputs=[x, y], outputs=cost_train, updates=updates)\n \n test_model = theano.function(inputs=[x], outputs=cost_test)\n \n \n ###############\n # TRAIN MODEL #\n ###############\n print '... training'\n # early-stopping parameters\n\n start_time = time.clock()\n \n epoch = 0\n while epoch < n_epochs:\n epoch = epoch + 1\n numall = [0, 0]\n rightnumall = [0, 0]\n test_cost = 0\n for img_batchidx in xrange(image_batch_num):\n \n if img_batchidx == image_batch_num-1:\n lpinfo_list_tmp = lpinfo_list_rnd[img_batchidx*image_batch_size:]\n else:\n lpinfo_list_tmp = lpinfo_list_rnd[img_batchidx*image_batch_size:(img_batchidx+1)*image_batch_size]\n print 'loading batch image set data %d/%d, num:%d...'%(img_batchidx+1, image_batch_num, len(lpinfo_list_tmp))\n numallone, rightnumallone, test_costone = \\\n image_batch_training(lpinfo_list_tmp, ishape, batch_size, \\\n usewgt, train_model_weight, train_model, test_model, params, cnnparamsfile, sampletype)\n numall[0] += numallone[0]\n numall[1] += numallone[1]\n rightnumall[0] += rightnumallone[0]\n rightnumall[1] += rightnumallone[1]\n test_cost += test_costone\n print '++++ img_batch:%d/%d'%(img_batchidx+1, image_batch_num), \\\n '+:%.2f%%(%d/%d) -:%.2f%%(%d/%d)'%(rightnumallone[0] * 100. / numallone[0], rightnumallone[0], numallone[0], rightnumallone[1] * 100. / numallone[1], rightnumallone[1], numallone[1]), \\\n 'test_cost:%.6f'%(test_costone/np.sum(numallone))\n \n print '---------- epoch:%d/%d'%(epoch, n_epochs), \\\n '+:%.2f%%(%d/%d) -:%.2f%%(%d/%d)'%(rightnumall[0] * 100. / numall[0], rightnumall[0], numall[0], rightnumall[1] * 100. / numall[1], rightnumall[1], numall[1]), \\\n 'test_cost:%.6f'%(test_cost/np.sum(numall))\n print\n# print 'epoch:%d/%d'%(epoch, n_epochs), ' test_cost:', np.mean(test_cost)\n \n end_time = time.clock()\n print('Optimization complete.')\n print >> sys.stderr, ('The code for file ' +\n os.path.split(__file__)[1] +\n ' ran for %.2fm' % ((end_time - start_time) / 60.))", "title": "" }, { "docid": "6f64e0056e5592322ebe2d98aac5c394", "score": "0.5796341", "text": "def load_imagenet_random_train_ver2(path_img, path_info, args):\n\n imgnet_info = load_file(path_info)\n name_folders = [p.split('/')[0].strip() for p in imgnet_info]\n name_folders = list(sorted(set(name_folders))) \n random_train_file = list()\n random.seed(0)\n for i, n in enumerate(name_folders):\n random_train_file, x_random_train, y_random_train = list(), list(), list()\n\n random_name_file = sorted(random.sample(os.listdir(path_img + n), args.random_train_size))\n process_folder = [load_img_imagenet(path_img + n + '/' + r, args) for r in random_name_file]\n zip_process = [(p, r) for p, r in zip(process_folder, random_name_file) if len(p.shape) == 4]\n process_folder = [p for (p, r) in zip_process]\n random_name_file = [r for (p, r) in zip_process]\n if len(process_folder) > 0:\n process_folder = np.concatenate(process_folder, axis=0)\n label_folder = get_label_imagenet(name_folder=n, imagnet_info=imgnet_info) \n label_folder = np.array([label_folder for i in range(len(process_folder))]) \n\n print(len(random_name_file), process_folder.shape, label_folder.shape)\n print('Random training the folder %i-th which has name %s' % (i, n))\n pickle.dump((random_name_file, process_folder, label_folder), open('./dataset_imagenet/%s_%s_random_train_%i.p' % (args.d, args.model, i), 'wb'), protocol=4)\n print('Now you can load the training dataset')\n exit()", "title": "" }, { "docid": "afe31dfef8c7e87751ee86d1859b8524", "score": "0.5788792", "text": "def get_hdf5(imgDir, imglists_path, output_txt, hdf5_dir, img_size, batch_size, shuffling=False, landmark_num=5):\n # GET Dataset, and shuffling.\n dataset = get_dataset(imgDir, imglists_path, landmark_num=landmark_num)\n print(\"dataSet size: %d\" % (len(dataset)))\n\n # filenames = dataset['filename']\n if shuffling:\n prefix = \"shuffle_\"\n random.shuffle(dataset)\n else:\n prefix = \"\"\n\n bg = batch_generator(dataset, batch_size)\n\n #imgDir = \"E:/work/data/landmark/samples/98/\"\n txt = open(output_txt, 'w')\n \n img_array = np.zeros((batch_size, 3, img_size, img_size)).astype(np.float32)\n label_array = np.zeros((batch_size, landmark_num*2)).astype(np.float32)\n for i,batch in enumerate(bg):\n for j,item in enumerate(batch):\n imgFn = item['filename']\n img = cv2.imread(imgFn)\n if img is None:\n print(\"Warning:cv2.imread {} is None\".format(item['filename']))\n img = np.zeros((3, img_size, img_size)).astype(np.float32)\n label = np.zeros((landmark_num*2)).astype(np.float32)\n else:\n h,w,c = img.shape\n if w!=img_size or h!=img_size:\n img = cv2.resize(img, (img_size, img_size))\n\n img = (np.asarray(img).astype(np.float32) - 127.5)/128.0\n img = np.transpose(img, (2,0,1)) #convert (height, width, 3) to (3, height, width)\n label = np.array(item['landmark'])\n img_array[j, :, :, :] = img\n label_array[j, ...] =label\n\n filename = prefix + \"batch_\" + str(i) + \".hdf5\"\n hdf5Fn = hdf5_dir + filename\n with h5py.File(hdf5Fn, 'w') as f:\n f.create_dataset('data', data=np.asarray(img_array).astype(np.float32)) \n f.create_dataset('label', data=np.asarray(label_array).astype(np.float32))\n \n txt.writelines(hdf5Fn+'\\n')\n print(hdf5Fn)\n\n txt.close()", "title": "" }, { "docid": "f40033ff392774985b737be70de39ed5", "score": "0.5787119", "text": "def Initial(batchSize):\r\n numClass = 10\r\n #pad=True #default choice.\r\n model = dict()\r\n \"\"\"\r\n cifar data size: 32*32*3\r\n \"\"\"\r\n image_row = 32\r\n channel = 3\r\n mask_shape = dict()\r\n mask_shape['mask_v'] = (1,image_row,image_row,channel)\r\n patch_row = 3 \r\n #featureNum_in = 3 #number of RGB channels.\r\n image_size = image_row**2 * channel\r\n #featureNum = 1\r\n feature_list = np.array([3,128,160,192])\r\n numOfBlock = 3\r\n numOfLayer = 2\r\n out_in = image_size // 8**2\r\n mask_shape = dict()\r\n row = image_row\r\n for i in range(numOfBlock):\r\n feature_in = feature_list[i]\r\n feature_out = feature_list[i+1]\r\n block_i = dict()\r\n mask_shape['block_'+str(i)] = (1,row,row,feature_out)\r\n row = row // 2 #2*2 max-pooling is apllied after each block.\r\n for j in range(numOfLayer):\r\n layer_j = dict()\r\n sec,slope = init_Grelu()\r\n layer_j['sec'] = sec\r\n layer_j['slope'] = slope\r\n if j==0:\r\n f1 = feature_in\r\n f2 = feature_out\r\n else:\r\n f1 = feature_out\r\n f2 = f1\r\n if j==0:\r\n \"\"\"\r\n inception of 1*1 at the last two layers of each block.\r\n \"\"\"\r\n patch_row = 3\r\n else:\r\n \"\"\"\r\n inception, bottleneck layer, but greatly reduce the number of parameters.\r\n \"\"\"\r\n patch_row = 1\r\n layer_j['w_conv'] = 1. / np.sqrt((f1*patch_row**2)/2.) * (np.random.randn(patch_row**2,f1,f2))\r\n layer_j['b_conv'] = np.zeros([1,f2])\r\n layer_j['beta'] = 1. + 1 * 1e-2 * np.random.randn(1,f2)\r\n layer_j['gamma'] = 1e-2 * np.random.randn(1,f2)\r\n \"\"\"\r\n each block stores layer para\r\n \"\"\"\r\n block_i[str(j)] = layer_j\r\n \"\"\"\r\n model stores block para.\r\n \"\"\"\r\n if i<2:\r\n \"\"\"\r\n keep 1 point per feature map for intermediate layers.\r\n \"\"\"\r\n out_in += feature_out * 4\r\n else:\r\n \"\"\"\r\n keep more info at the last convolved layer.\r\n i.e., only 2*2 avgPool is applied to get \r\n 16 features per feature map.\r\n \"\"\"\r\n out_in += feature_out * 4\r\n model['block_'+str(i)] = block_i\r\n \"\"\"\r\n max-pooling is employed and down-sample \r\n the convolved feature maps \r\n into smaller-size feature maps, typically 2*2.\r\n \"\"\"\r\n \"\"\"\r\n only 2*2 pooled feature from the raw image is used as residual information from lower layers.\r\n \"\"\"\r\n print(model['block_0']['0']['slope'])\r\n softmax = dict()\r\n w_o = 1. / np.sqrt((out_in)/2.) * (np.random.randn(out_in,numClass)) \r\n b_o = 0 * (np.random.randn(1,numClass))\r\n beta_o = 1. + 1 * 1e-2 * np.random.randn(1,numClass)\r\n gamma_o = 1e-2 * np.random.randn(1,numClass)\r\n softmax['w_o'] = w_o\r\n softmax['b_o'] = b_o\r\n softmax['beta'] = beta_o\r\n softmax['gamma'] = gamma_o\r\n mask_shape['mask_res'] = (1,out_in)\r\n model['out'] = softmax\r\n print(model.keys())\r\n L2 = 1e-8 * batchSize #as 400 is batchSize.\r\n momentum = Initial_momentum(model)\r\n second_order = Initial_momentum(model)\r\n \"\"\"\r\n be cautious that copy dict() object is the same object, as they are in the same memory area. \r\n \"\"\"\r\n shadow_model = Initial_momentum(model)\r\n gc.collect()\r\n return model,L2,momentum,second_order,mask_shape,shadow_model", "title": "" }, { "docid": "1b11923538475fa181abe33998ef6fe7", "score": "0.5786899", "text": "def train_generator(batch_size):\n while 1:\n with open('data/driving_log.csv') as driving_file:\n driving_log_reader = csv.DictReader(driving_file)\n count = 0\n Images = []\n Steerings = []\n #Data augmentation by flipping the image\n try:\n for row in driving_log_reader:\n #correction factor to steer the vehicle\n steering_offset = 0.2\n\n centerImage = mpimg.imread('data/'+ row['center'].strip())\n flippedCenterImage = np.fliplr(centerImage)\n centerSteering = float(row['steering'])\n\n leftImage = mpimg.imread('data/'+ row['left'].strip())\n flippedLeftImage = np.fliplr(leftImage)\n leftSteering = centerSteering + steering_offset\n\n rightImage = mpimg.imread('data/'+ row['right'].strip())\n flippedRightImage = np.fliplr(rightImage)\n rightSteering = centerSteering - steering_offset\n\n if count == 0:\n Images = np.empty([0, 160, 320, 3], dtype=float)\n Steerings = np.empty([0, ], dtype=float)\n if count < batch_size:\n Images = np.append(Images, np.array([centerImage, flippedCenterImage, leftImage, flippedLeftImage, rightImage, flippedRightImage]), axis=0)\n Steerings = np.append(Steerings, np.array([centerSteering, -centerSteering, leftSteering, -leftSteering, rightSteering, -rightSteering]), axis=0)\n count += 6\n else:\n #use of generators\n yield shuffle(Images, Steerings)\n count = 0\n except StopIteration:\n pass", "title": "" }, { "docid": "537dbea5241d52ea1425a111e22f5871", "score": "0.5785484", "text": "def sample_train_files(self, data_dict, cfg, n_files):\n # Create dicts equal to the number of folds\n tr_fold_list = list(range(1,cfg['folds_cv'] + 1))\n train_dict = dict([(key,pd.DataFrame()) for key in tr_fold_list])\n train_cv_dict = dict([(key,pd.DataFrame()) for key in tr_fold_list])\n\n f_len = 1\n for k,v in data_dict.train_set_dict_.items():\n print (\"Train file:\", v)\n if f_len <= n_files:\n df, cv_fold_dict = pickle.load(open(v, \"rb\"))\n if len(cv_fold_dict.keys()) > 0:\n for j in cv_fold_dict.keys():\n print (\"Fold:\", j)\n fold = cv_fold_dict[j].set_index([cfg['ID_COL'],cfg['CTU_COL']])\n df_tr = df[~df.set_index([cfg['ID_COL'],cfg['CTU_COL']]).index.isin(fold.index)]\n df_cv = df[df.set_index([cfg['ID_COL'],cfg['CTU_COL']]).index.isin(fold.index)]\n df_tr = self.sample_data(df_tr, cfg)\n train_cv_dict[j] = pd.concat([train_cv_dict[j],df_cv], axis=0)\n train_dict[j] = pd.concat([train_dict[j],df_tr], axis=0)\n print (\"\\ttrain shape:\",train_dict[j].shape)\n print (\"\\ttrain cv shape:\", train_cv_dict[j].shape)\n del df_tr\n\n del cv_fold_dict, df\n f_len += 1\n\n cust_count = 0\n positives= 0\n for k,v in train_dict.items():\n cust_count = train_dict[k][cfg['ID_COL']].nunique() + cust_count\n positives = train_dict[k][cfg['TE_TARGET_COL']].sum() + positives\n\n print (\"TRAIN - number of customers:\", cust_count)\n print (\"TRAIN- Total Positives: \", positives)\n\n return train_dict, train_cv_dict", "title": "" }, { "docid": "fc08f091edb30373bf017a2b68c5f9a2", "score": "0.57832634", "text": "def main(arguments):\n src_root = \"../drive/app/IET-SHOPEE/sample_test\"\n dst_root = \"./demo/result\"\n label_map_path = \"/tmp/data/labels.txt\"\n if not os.path.isdir(dst_root):\n os.mkdir(dst_root)\n\n images = os.listdir(src_root)\n output_file = os.path.join(dst_root, \"output_result.txt\")\n result_file = open(output_file, \"a\")\n\n label_map_file = open(label_map_path)\n label_map = {}\n for line_number, label in enumerate(label_map_file.readlines()):\n label_map[line_number] = label[:-1]\n line_number += 1\n label_map_file.close()\n\n\n for image in images:\n image_path = os.path.join(src_root, image)\n start = datetime.datetime.now()\n with tf.gfile.FastGFile(image_path, 'rb') as jpeg_file_raw:\n jpeg_file = jpeg_file_raw.read()\n input_0 = decode_image(jpeg_file)\n\n image_height = input_0.shape[0]\n image_width = input_0.shape[1]\n image_height_center = int(image_height/2)\n image_width_center = int(image_width/2)\n\n tl_crop = input_0[0:331, 0:331]\n tr_crop = input_0[0:331, image_width-331:image_width]\n bl_crop = input_0[image_height-331:image_height, 0:331]\n br_crop = input_0[image_height-331:image_height, image_width-331:image_width]\n center_crop = input_0[image_height_center - 165: image_height_center + 166, image_width_center - 165: image_width_center + 166]\n\n input_concat = np.asarray([tl_crop, tr_crop, bl_crop, br_crop, center_crop])\n input_batch = input_concat.reshape(-1, 331, 331, 3)\n\n predictions = diagnose_image(inference_session, input_batch)\n overall_result = np.argmax(np.sum(predictions, axis=0))\n\n result_file.write(image_path + \"\\n\")\n result_file.write(str(overall_result) + \"\\n\")\n\n end = datetime.datetime.now()\n print(image_path)\n print(overall_result, label_map[overall_result])\n print(\"Time cost: \", end - start, \"\\n\")\n\n result_file.close()", "title": "" }, { "docid": "3953cc4356a82f4ad16092cdbca54ed8", "score": "0.5776576", "text": "def load_batch(self, idx=0, img_paths=None, training=True, bicubic=False):\n\n # Starting index to look in\n cur_idx = 0\n if not img_paths:\n cur_idx = idx * self.batch_size\n\n # Scale and pre-process images\n imgs_hr, imgs_lr = [], []\n while True:\n\n # Check if done with batch\n if img_paths is None:\n if cur_idx >= self.total_imgs:\n cur_idx = 0\n if len(imgs_hr) >= self.batch_size:\n break\n if img_paths is not None and len(imgs_hr) == len(img_paths):\n break\n\n try:\n # Load image\n img_hr = None\n if img_paths:\n img_hr = self.load_img(img_paths[cur_idx], training)\n else:\n img_hr = self.load_img(self.img_paths[cur_idx], training)\n\n # Create HR images to go through\n img_crops = []\n if training:\n for i in range(self.crops_per_image):\n # print(idx, cur_idx, \"Loading crop: \", i)\n img_crops.append(self.random_crop(img_hr, (self.height_hr, self.width_hr)))\n else:\n # img_crops = [img_hr]\n img_crops.append(self.random_crop(img_hr, (self.height_hr, self.width_hr)))\n\n # Downscale the HR images and save\n for img_hr in img_crops:\n\n # TODO: Refactor this so it does not occur multiple times\n if img_paths is None:\n if cur_idx >= self.total_imgs:\n cur_idx = 0\n if len(imgs_hr) >= self.batch_size:\n break\n if img_paths is not None and len(imgs_hr) == len(img_paths):\n break\n\n # For LR, do bicubic downsampling\n method = Image.BICUBIC if bicubic else choice(self.options)\n lr_shape = (int(img_hr.shape[1] / self.scale), int(img_hr.shape[0] / self.scale))\n img_lr = Image.fromarray(img_hr.astype(np.uint8))\n img_lr = np.array(img_lr.resize(lr_shape, method))\n # Scale color values\n img_hr = self.scale_imgs(img_hr)\n img_lr = self.scale_imgs(img_lr)\n\n # Store images\n imgs_hr.append(img_hr)\n imgs_lr.append(img_lr)\n\n except Exception as e:\n # print(e)\n pass\n finally:\n cur_idx += 1\n\n # Convert to numpy arrays when we are training\n # Note: all are cropped to same size, which is not the case when not training\n if training:\n imgs_hr = np.array(imgs_hr)\n imgs_lr = np.array(imgs_lr)\n\n # Return image batch\n return imgs_lr, imgs_hr", "title": "" }, { "docid": "a028a967a8490bfb8969b3fddb17e59e", "score": "0.57763296", "text": "def get_images(path):\n for filename in glob.glob(path):\n image = skimage.color.rgb2lab(skimage.io.imread(filename))\n Lband = np.zeros((1, 224,224,3))\n Lband[0, :,:,0] = image[0:224,0:224,0]\n #trainTarget = np.zeros((1, 100352))\n #trainTarget[0, 0:50176] = np.array(image[0:224,0:224,1]).flatten() # A band ***May need to flatten() these?\n #trainTarget[0, 50176:] = np.array(image[0:224,0:224,2]).flatten() # B band\n trainTarget = np.zeros((1, 50176))\n trainTarget = (np.array(image[0:224,0:224,1]).flatten()).transpose()\n yield(Lband, trainTarget)", "title": "" }, { "docid": "2a6a4373f909a174fd5401a870f8bd9c", "score": "0.5773362", "text": "def train_UNET(train_step, loss, reconstruction, show_every=100, print_every=5, batch_size=128, num_epoch=10):\n image_dir = '/home/youngwook/Downloads/Carvana'\n folder_names = get_folders(image_dir)\n\n train_folder = folder_names[1]\n target_folder = folder_names[2]\n\n resize = 224\n\n train_data = Pix2Pix_AB_Dataloader(train_folder, target_folder, size=resize, randomcrop=opts.image_shape)\n\n train_loader = DataLoader(train_data, batch_size=opts.batch, shuffle=True, num_workers=12)\n\n step = 0\n recon = []\n true = []\n last_100_loss_dq = deque(maxlen=100)\n last_100_loss = []\n\n for epoch in range(num_epoch):\n # every show often, show a sample result\n for (minibatch, minbatch_y) in train_loader:\n\n # run a batch of data through the network\n # logits= sess.run(logits_real, feed_dict={x:minibatch})\n _, D_loss_curr = session.run([train_step[0], loss[0]], feed_dict={input_image: minibatch, target_image: minbatch_y})\n _, G_loss_curr = session.run([train_step[1], loss[1]], feed_dict={input_image: minibatch, target_image: minbatch_y})\n\n last_100_loss_dq.append(G_loss_curr)\n last_100_loss.append(np.mean(last_100_loss_dq))\n\n step += 1\n\n if step % show_every == 0:\n '''for every show_every step, show reconstructed images from the training iteration'''\n\n recon_name = './img/recon_%s.png' % step\n true_name = './img/true_%s.png' % step\n\n #translate the image\n recon_images = session.run(recon_image, feed_dict={input_image: minibatch})\n\n recon.append(recon_name)\n true.append(true_name)\n\n show_images(recon_images[:opts.batch], opts, recon_name)\n show_images(minibatch[:opts.batch], opts, true_name)\n\n if step % print_every == 0:\n print('Epoch: {}, D: {:.4}'.format(epoch, G_loss_curr))\n raw_score_plotter(last_100_loss)\n\n raw_score_plotter(last_100_loss)\n image_to_gif('', recon, duration=0.5, gifname='recon')\n image_to_gif('', true, duration=0.5, gifname='true')", "title": "" }, { "docid": "d068ba6d412846ac15d3fcf0a0edd9ba", "score": "0.5764694", "text": "def eval_net(net,pkl_file,image_path,n=None,NMS_THRESH = 0.1):\n \n # Set random number generator\n random_number_generator = np.random.RandomState(0)\n \n # Load information about exams\n with open(pkl_file, 'rb') as f:\n data = pickle.load(f)\n\n image_indexes = []\n malignant_pred = []\n malignant_label = [] \n # Iterate over exams in data\n for d in tqdm(data):\n for v in ['L-CC', 'L-MLO', 'R-CC', 'R-MLO']:\n if len(d[v]) == 0:\n continue\n else:\n index = random_number_generator.randint(low=0, high=len(d[v]))\n image_id = d[v][index] \n image_indexes.append(image_id)\n im_path = image_path + '/' + image_id + '.png'\n im = preprocess_image(im_path)\n bboxes_and_scores = score_im(net,im,NMS_THRESH)\n scores = bboxes_and_scores[:, -1]\n max_score = np.max(scores)\n malignant_pred.append(max_score)\n \n if v[0] == 'L':\n malignant_label.append(d['cancer_label']['left_malignant'])\n else:\n malignant_label.append(d['cancer_label']['right_malignant'])\n \n # Create pandas dataframe\n df = pd.DataFrame()\n df[\"image_index\"] = image_indexes\n df[\"malignant_pred\"] = malignant_pred\n df[\"malignant_label\"] = malignant_label\n \n\n return df", "title": "" }, { "docid": "054ddb290b88d0fba5de2cd02ac2c954", "score": "0.5738474", "text": "def process_batch(folder: str) -> None:\n\n # define folder path for the batch e.g. /results/1\n folder_path = os.path.join(src_path, folder)\n\n # get the full paths of all the valid files in the source folder\n file_paths = [os.path.join(folder_path, f) for f in os.listdir(folder_path) if f.endswith(FILE_EXT)]\n\n # generate array of all image arrays\n data = np.array([open_image(fp) for fp in file_paths])\n\n # generate lightened image\n lightened = process_images(copy(data), np.argmax)\n\n # save lightened image to folder\n save_image(lightened, 'Lighten', folder)\n\n # generate darkened image\n darkened = process_images(copy(data), np.argmin)\n\n # save darkened image to folder\n save_image(darkened, 'Darken', folder)", "title": "" }, { "docid": "16e1677170a86b044e16463cc848ba10", "score": "0.573675", "text": "def gen_unet_batch_v2(img_mask_junk_names, crop_sz=(64,64,64), mask_sz=(24,24,24), batch_sz=32):\n imgs = []\n masks = []\n junks = []\n # read all images and masks into lists \n for i in range(len(img_mask_junk_names)):\n curr_name = img_mask_junk_names[i]\n curr_img, head = nrrd.read(curr_name[0])\n curr_mask, head = nrrd.read(curr_name[1])\n curr_junk, head = nrrd.read(curr_name[2])\n assert curr_img.shape == curr_mask.shape, \"Image and training mask size do not match!\"\n assert curr_img.shape == curr_junk.shape, \"Image and junk mask size do not match!\"\n \n curr_img = np.float32(curr_img)\n curr_img = (curr_img - curr_img.mean()) / curr_img.std() # normalize image\n imgs.append(curr_img)\n curr_mask = np.float32(curr_mask)\n masks.append(curr_mask)\n junks.append(curr_junk) \n \n while True:\n batch_img = np.zeros((batch_sz, crop_sz[0], crop_sz[1], crop_sz[2], 1), dtype='float32')\n batch_mask = np.zeros((batch_sz, mask_sz[0], mask_sz[1], mask_sz[2], 1), dtype='float32')\n \n # randomly crop an image from imgs list\n idx = np.random.randint(0, len(imgs))\n img_for_crop = imgs[idx]\n mask_for_crop = masks[idx]\n junk_for_crop = junks[idx] # only used for including enough junk crops\n num_crop = 0\n while num_crop < batch_sz:\n x = np.random.randint(0, img_for_crop.shape[0]-crop_sz[0])\n y = np.random.randint(0, img_for_crop.shape[1]-crop_sz[1])\n z = np.random.randint(0, img_for_crop.shape[2]-crop_sz[2])\n cropped_img = img_for_crop[x:x+crop_sz[0], y:y+crop_sz[1], z:z+crop_sz[2]]\n cropped_mask = mask_for_crop[x:x+crop_sz[0], y:y+crop_sz[1], z:z+crop_sz[2]]\n cropped_junk = junk_for_crop[x:x+crop_sz[0], y:y+crop_sz[1], z:z+crop_sz[2]]\n shrink_sz = (int((crop_sz[0]-mask_sz[0])/2), int((crop_sz[1]-mask_sz[1])/2), int((crop_sz[2]-mask_sz[2])/2))\n cropped_mask = cropped_mask[shrink_sz[0]:crop_sz[0]-shrink_sz[0], shrink_sz[1]:crop_sz[1]-shrink_sz[1], shrink_sz[2]:crop_sz[2]-shrink_sz[2]]\n cropped_junk = cropped_junk[shrink_sz[0]:crop_sz[0]-shrink_sz[0], shrink_sz[1]:crop_sz[1]-shrink_sz[1], shrink_sz[2]:crop_sz[2]-shrink_sz[2]]\n # if include the random crop in training\n is_include = False\n num_syn_vxl = len(cropped_mask[cropped_mask==1])\n num_junk_vxl = len(cropped_junk[cropped_junk==255])\n accept_prob = np.random.random()\n if num_syn_vxl > 500 or num_junk_vxl > 100 or accept_prob > 0.98:\n is_include = True\n elif (0 < num_syn_vxl <= 500 or 0 < num_junk_vxl <= 100) and accept_prob > 0.5:\n is_include = True\n \n # include the crop\n if is_include:\n batch_img[num_crop,:,:,:,0] = cropped_img\n batch_mask[num_crop,:,:,:,0] = cropped_mask\n num_crop += 1\n \n # data augmentation\n x_flip = np.random.randint(2, size=batch_sz)\n z_flip = np.random.randint(2, size=batch_sz)\n rot_angle = np.random.randint(4, size=batch_sz)\n for j in range(batch_sz):\n if x_flip[j]:\n batch_img[j,:,:,:,0] = np.flip(batch_img[j,:,:,:,0], axis=0)\n batch_mask[j,:,:,:,0] = np.flip(batch_mask[j,:,:,:,0], axis=0)\n if z_flip[j]:\n batch_img[j,:,:,:,0] = np.flip(batch_img[j,:,:,:,0], axis=2)\n batch_mask[j,:,:,:,0] = np.flip(batch_mask[j,:,:,:,0], axis=2)\n if rot_angle[j]:\n batch_img[j,:,:,:,0] = np.rot90(batch_img[j,:,:,:,0], rot_angle[j], axes=(0,1))\n batch_mask[j,:,:,:,0] = np.rot90(batch_mask[j,:,:,:,0], rot_angle[j], axes=(0,1))\n \n yield batch_img, batch_mask", "title": "" }, { "docid": "5bb48c87e8c427df80573909bc95d45b", "score": "0.5728116", "text": "def gen_test_output_nir_ttv(sess, logits, keep_prob, image_pl, image_input_nir, data_folder, image_shape):\n i = 0\n for folder in data_folder:\n print(folder)\n j = 0\n image_files = glob(os.path.join(folder, '*color*.png'))\n max_iter = len(image_files)\n for image_file in image_files: # previously 'data*.png'\n sys.stdout.write(\"\\rRunning test image %d / %d\"%(j+1, max_iter))\n sys.stdout.flush()\n\n image = scipy.misc.imresize(scipy.misc.imread(image_file), image_shape)\n _, filename = os.path.split(image_file)\n fd_id = filename[0]\n img_id = image_file[-8:]\n nir = cv2.imread(folder+\"/\"+fd_id+\"_nir_\"+img_id)\n nir = scipy.misc.imresize(nir, image_shape)\n\n im_softmax = sess.run(\n [tf.nn.softmax(logits)],\n {keep_prob: 1.0, image_pl: [image], image_input_nir: [nir]})\n im_softmax = im_softmax[0][:, 1].reshape(image_shape[0], image_shape[1])\n segmentation = (im_softmax > 0.5).reshape(image_shape[0], image_shape[1], 1)\n mask = np.dot(segmentation, np.array([[0, 0, 255, 127]]))\n mask = scipy.misc.toimage(mask, mode=\"RGBA\")\n\n result = np.dot(segmentation, np.array([[0, 0, 255, 255]]))\n result = scipy.misc.toimage(result, mode=\"RGBA\")\n\n street_im = scipy.misc.toimage(image)\n street_im.paste(mask, box=None, mask=mask)\n\n base_name = os.path.basename(image_file)\n base_name = str(i)+\"_\"+base_name\n j += 1\n yield base_name, np.array(street_im), result\n print(\"\")\n i += 1", "title": "" }, { "docid": "f8cd4ce219cbd06c0fc10aef475b4f8d", "score": "0.57191396", "text": "def batch_handle(image_files: list,\n labels: list,\n save_path: str,\n test: float,\n valid: float,\n image_size=None,\n batch_size=None):\n if not os.path.isdir(save_path):\n os.mkdir(save_path)\n\n print('=' * 70)\n if batch_size is None:\n print('No Batch, all images will be handled at once.')\n images2npy(image_files, labels,\n save_path, test, valid, image_size)\n else:\n\n if type(batch_size) != int:\n raise TypeError('batch_size must be an integer')\n\n arr_len = len(image_files)\n if arr_len % batch_size == 0:\n total_batch = int(arr_len / batch_size)\n else:\n total_batch = int(arr_len // batch_size + 1)\n\n print('Batch Task, batch size={}, data length={}, get {}'\n ' batches to handle'.format(batch_size, arr_len, total_batch))\n batch = 0\n while len(image_files) > 0:\n print('=' * 70)\n print('Batch {}/{}'.format(batch, total_batch - 1))\n batch_files, batch_labels = choose_file(image_files, labels, batch_size)\n batch_path = os.path.join(save_path, 'batch_%d' % batch)\n\n if not os.path.isdir(batch_path):\n os.mkdir(batch_path)\n\n print('Directory: %s' % batch_path.replace('\\\\', '/'))\n print('=' * 70)\n thread = BatchThread(batch_files, batch_labels, batch_path, test, valid, image_size)\n thread.start()\n thread.join()\n batch += 1", "title": "" }, { "docid": "fcccf2bcdee9386ccc7a8ddfe9feaa73", "score": "0.5709244", "text": "def tile_images(image_files,tile_size,tile_overlap, tiledir):\n n_images = len(image_files)\n n_saved = 0\n for f in image_files:\n\n file_results = []\n pil_img = Image.open(f)\n pil_img = np.asarray(pil_img)\n pil_img = copy.deepcopy(pil_img) #necessary step\n #Convert to tensor\n img = torch.from_numpy(pil_img)\n img = img.permute(2,0,1)\n fullimage_size = img.shape[1:]\n\n #Get tiles\n tile_size = (800, 800) #H, W in pixels\n tile_overlap = 100\n tiles = get_tiles(img,tile_size,tile_overlap)\n\n #Now put in a list and make sure the tiles are contiguous (they weren't)\n nrows, ncols = tiles.shape[1],tiles.shape[2]\n tilelist = []\n tilelist = [tiles[0][ir][ic].contiguous() for ir in range(nrows) for ic in range(ncols) ]\n\n #Create a set of tile positions (note: order must match that of tilelist!)\n tile_positions = [{\"trow\":ir,\"tcol\":ic} for ir in range(nrows) for ic in range(ncols)]\n\n #Create a list of dicts (one per image) for Detectron2 input (note: list comprehension zips names and positions)\n ddict = [{\"image_id\":str(f), \"trow\":tile_pos[\"trow\"], \"tcol\":tile_pos[\"tcol\"],\\\n \"image\":tile, \"width\":tile_size[1],\"height\":tile_size[0]} \\\n for tile_pos,tile in zip(tile_positions,tilelist)]\n\n for i in range(len(ddict)):\n row = ddict[i]['trow']\n col = ddict[i]['tcol']\n fname = Path(ddict[i]['image_id']).stem\n file_type = 'jpg'\n tile_id_str = '{}[{}][{}]'.format(fname,row,col) #To match with images, extension is omitted\n new_im = transforms.ToPILImage()(ddict[i]['image']).convert(\"RGB\")\n new_im.save('{}/{}.{}'.format(tiledir, tile_id_str, file_type))\n n_saved += 1\n if n_saved % 50000 == 0:\n print(\"working:\",n_saved,\"tiles created...\")\n print(\"Saved\",n_saved,\"tiles to \",tiledir,\"from \",n_images,\"full-sized images.\")", "title": "" }, { "docid": "69d7eeea3898a9eea5c8ffe5fd8b8d35", "score": "0.568841", "text": "def create_images_for_train(image_objects,\n bw=30,\n train_val_split = 0.1, directory = \"memory/\"):\n sns.set_style(\"darkgrid\", {\"axes.facecolor\": \".9\",\n 'figure.facecolor': 'white',\n 'axes.spines.bottom': False,\n 'axes.spines.left': False,\n 'axes.spines.right': False,\n 'axes.spines.top': False,\n\n })\n\n import re\n\n counter = 1\n input_images_dir_name = \"images\"\n label_images_dir_name = \"masks\"\n\n\n images_names_list = [int(re.search(r'\\d+',\n os.path.basename(images[-2])).group()) for images in image_objects]\n\n\n unque_images_length = len(np.unique(images_names_list))\n splitting_imagename = int(unque_images_length * (1- train_val_split))\n\n print(\"splitting_imagename \",splitting_imagename)\n for i, image in enumerate(image_objects):\n\n numbers = int(re.search(r'\\d+', os.path.basename(image[-2])).group())\n\n print(\"number: \",numbers)\n\n image_id = os.path.basename(image[-2])\n\n image_id = image[1]+\"_\" +image[2]+\"_\" +image[3]+\"_\" +image_id\n\n os.chdir(output_path)\n if not os.path.isdir(image[2] + \"_\" +image[3]):\n subprocess.call(\"mkdir \"+directory +image[2] + \"_\" +image[3], shell = True )\n\n #image_id = os.path.basename(image[-2])\n\n if not os.path.isdir(image[1]):\n counter = 0\n subprocess.call(\"mkdir \"+directory +image[2] + \"_\" +image[3]\\\n +\"/\"+image[1], shell = True )\n\n if numbers <= splitting_imagename:\n directory_name = \"train\"\n print(\"Image Training\",os.path.basename(image[-2]))\n else:\n directory_name = \"test\"\n print(\"Validation Training\",os.path.basename(image[-2]))\n\n if not os.path.isdir(image[2] + \"_\" +image[3]+\"/\"\\\n +image[1]+\"/\"+directory_name+\"/\"\\\n +input_images_dir_name):\n subprocess.call(\"mkdir \"+directory+image[2] + \"_\" +image[3]+\\\n \"/\"+image[1]+\"/\"+directory_name+\"/\"+input_images_dir_name,\n shell = True )\n\n copyfile(image[-2], image[2] + \"_\" +image[3]+\"/\"+image[1]+\"/\"+\\\n directory_name+\"/\"+input_images_dir_name+\"/\"+image_id)\n\n if not os.path.isdir(image[2] + \"_\" +image[3]+\"/\"+image[1]+\"/\"+\\\n directory_name+\"/\"+label_images_dir_name):\n subprocess.call(\"mkdir \"+directory+image[2] + \"_\" +image[3]+\"/\"+image[1]+\"/\"\\\n +directory_name+\"/\"+label_images_dir_name,\n shell = True )\n\n\n counter = counter +1\n plt.figure(figsize=(w/my_dpi, h/my_dpi), dpi=my_dpi)\n fig = sns.kdeplot(image[0].iloc[:,2],\n image[0].iloc[:,3],\n kernel = \"gau\",\n bw= 30,\n cmap = plt.cm.gray,\n shade=True,\n n_levels = 100,\n legend= False,\n cumulative= False)\n fig.axes.get_yaxis().set_visible(False)\n fig.axes.get_xaxis().set_visible(False)\n fig.axes.invert_yaxis()\n\n plt.tight_layout()\n # plt.savefig(image[2] + \"_\" +image[3]+\"/\"+image[1]+\"/\"+label_images_dir_name+directory_name +\"/\"+image_id,\n # dpi=my_dpi,\n # transparent='True',\n # bbox_inches='tight',\n # pad_inches = 0)\n\n plt.setp([fig.get_xticklines() +\n fig.get_yticklines() +\n fig.get_xgridlines() +\n fig.get_ygridlines()],antialiased=False)\n\n figure = fig.get_figure()\n figure.tight_layout(pad=0)\n figure.canvas.draw()\n\n\n\n image1 = np.fromstring(figure.canvas.tostring_rgb(),dtype=np.uint8,sep='')\n image1 = image1.reshape(figure.canvas.get_width_height()[::-1] + (3,))\n\n\n misc.imsave(directory+image[2] + \"_\" +image[3]+\"/\"+image[1]+\"/\"+directory_name +\"/\" +label_images_dir_name +\"/\"+image_id, image1[:,:,0])", "title": "" }, { "docid": "6f83f925247ae7878c7010992d06c91c", "score": "0.56812793", "text": "def build_train_val(train_path, val_path, val_size=0.2, seed=1):\n # Rotate and Flip -> 8-fold dataset\n # for i in range(1, 101):\n for i in range(1, 901):\n im = Image.open(os.path.join(train_path, 'images2', 'satImage_%.3d.png'%i))\n ma = Image.open(os.path.join(train_path, 'groundtruth2', 'satImage_%.3d.png'%i))\n\n im_f = im.transpose(Image.FLIP_LEFT_RIGHT)\n io.imsave(os.path.join(train_path, 'images2', 'satImage_%.3d_f.png'%i), np.array(im_f))\n\n ma_f = ma.transpose(Image.FLIP_LEFT_RIGHT)\n io.imsave(os.path.join(train_path, 'groundtruth2', 'satImage_%.3d_f.png'%i), np.array(ma_f))\n \n \n for angle in [90, 180, 270]:\n im_r = im.rotate(angle)\n io.imsave(os.path.join(train_path, 'images2', 'satImage_%.3d_%.3d.png'%(i, angle)), np.array(im_r))\n\n im_f_r = im_f.rotate(angle)\n io.imsave(os.path.join(train_path, 'images2', 'satImage_%.3d_f_%.3d.png'%(i, angle)), np.array(im_f_r))\n\n ma_r = ma.rotate(angle)\n io.imsave(os.path.join(train_path, 'groundtruth2', 'satImage_%.3d_%.3d.png'%(i, angle)), np.array(ma_r))\n\n ma_f_r = ma_f.rotate(angle)\n io.imsave(os.path.join(train_path, 'groundtruth2', 'satImage_%.3d_f_%.3d.png'%(i, angle)), np.array(ma_f_r))\n\n # Get all images's name\n train_val_images = os.listdir(os.path.join(train_path, 'images2'))\n\n # Split image into train and validation set\n train_images, val_images = train_test_split(train_val_images, test_size=val_size, random_state=seed)\n\n # Build new folders for validation set\n make_dir(val_path)\n make_dir(os.path.join(val_path, 'images2'))\n make_dir(os.path.join(val_path, 'groundtruth2'))\n \n # Move validation images to new folders\n for im in val_images:\n os.rename(os.path.join(train_path, 'images2', im), os.path.join(val_path, 'images', im))\n os.rename(os.path.join(train_path, 'groundtruth2', im), os.path.join(val_path, 'groundtruth', im))", "title": "" }, { "docid": "b62fff3432b2200c959fdee27584861d", "score": "0.5680873", "text": "def train(name, style, train_dir, image_size, global_step, lr, batch_size,\n val_size, vggfile, model, max_iteration, eval_iteration,\n save_iteration):\n\n style_image = scipy.misc.imread(style)\n style_image = tf.to_float(style_image[np.newaxis,:,:,:])\n # create a tf placeholder for training content images\n input_shape = [batch_size] + image_size + [3]\n content_image = tf.placeholder(tf.float32, input_shape)\n style_net = Network(MODELDESC, name, trainable=True)\n output_image = style_net.image(content_image)\n\n c_loss, s_loss, tv_loss = loss_vgg(\n style_image, content_image, output_image, vggfile)\n loss = c_loss + s_loss + tv_loss\n train_op = tf.train.AdamOptimizer(lr).minimize(loss)\n\n train_images = train_set(train_dir)\n # first val_size of batches are used as the validation set\n val_set = [train_images(batch_size) for k in range(val_size)]\n saver = tf.train.Saver()\n with tf.Session() as sess:\n if global_step == 0:\n sess.run(tf.initialize_all_variables())\n else:\n saver.restore(sess, model+'-{}'.format(global_step))\n train_images.index = global_step % (\n train_images.number - batch_size*val_size) + batch_size*val_size\n print 'Started training with {} trainable tensors:'.format(\n len(tf.trainable_variables()))\n sys.stdout.flush()\n time_start = datetime.now()\n for i in range(max_iteration):\n if train_images.index+batch_size > train_images.number:\n train_images.index = batch_size*val_size\n image = train_images(batch_size)\n sess.run(train_op, {content_image: image})\n global_step += batch_size\n if (i+1) % eval_iteration == 0 or (i+1) == max_iteration:\n time_end = datetime.now()\n if (i+1) % eval_iteration == 0:\n iterations = eval_iteration\n else:\n iterations = (i+1) % eval_iteration\n val_losses = []\n for val_image in val_set:\n val_losses.append(\n sess.run([c_loss, s_loss, tv_loss],\n {content_image: val_image}))\n val_losses = np.stack(val_losses)\n [val_c_loss, val_s_loss, val_tv_loss] = np.mean(val_losses,\n axis=0)/batch_size\n print ('iteration %d: c_loss = %.1f, s_loss = %.1f, tv_loss = '\n '%.1f, ave_time = %s') %(\n i+1, val_c_loss, val_s_loss, val_tv_loss,\n (time_end-time_start)/iterations)\n print 'gradients:'\n tvars = tf.trainable_variables()\n temp = sess.run(tf.gradients(loss, tvars), {content_image: image})\n print [tf.sqrt(tf.reduce_mean(value*value)).eval() for value in temp]\n sys.stdout.flush()\n time_start = datetime.now()\n if (i+1) % save_iteration == 0 or (i+1) == max_iteration:\n saver.save(sess, model, global_step, write_meta_graph=False)\n print 'Session file {}-{} saved.'.format(model, global_step)\n sys.stdout.flush()\n print 'Training finished (global step = {}).'.format(global_step)", "title": "" }, { "docid": "090b2acc3b973662638ef715a697979e", "score": "0.5668816", "text": "def main():\n train_data = args[\"path2train\"]\n test_data = args[\"path2test\"]\n n_epochs = args[\"n_epochs\"]\n batch_size = args[\"batch_size\"] \n \n \n \"\"\"\n Create the out directory, if it doesn't already exist \n \"\"\"\n dirName = os.path.join(\"..\", \"out\")\n if not os.path.exists(dirName):\n os.mkdir(dirName)\n \n # print that it has been created\n print(\"Directory \" , dirName , \" Created \")\n else: \n # print that it exists\n print(\"Directory \" , dirName , \" already exists\")\n\n \n \"\"\"\n ==============\n Preprocessing\n ==============\n \"\"\"\n \n print(\"\\n Hi there! Are you ready to start classifying some impressionist paintings? \\n I hope so, it's exciting stuff!\")\n print(\"\\n I'm about to initialize the construction of your LeNet convolutional neural network model...\") \n print(\"\\n We'll start with the pre-processing. This might take a few minutes.\") \n \n \"\"\"\n Labelling the data\n \"\"\"\n # Create the list of label names\n label_names = functions.listdir_nohidden(train_data)\n \n \"\"\"\n Resizing the images \n \"\"\"\n # Find the optimal dimensions to resize the images \n print(\"\\n[INFO] Estimating the optimal image dimensions to resize images...\")\n min_height, min_width = functions.find_image_dimensions(train_data, test_data, label_names)\n print(f\"\\n[INFO] Input images are resized to dimensions of height = {min_height} and width = {min_width}...\")\n \n \n # Training data: Resize and create trainX and trainY\n print(\"\\n[INFO] Resizing training images and creating training data (trainX), and labels (trainY)...\")\n trainX, trainY = functions.create_trainX_trainY(train_data, min_height, min_width, label_names)\n \n # Validation data: Resize and create testX and testY\n print(\"\\n[INFO] Resizing validation images and creating validation data (testX), and labels (testY)...\")\n testX, testY = functions.create_testX_testY(test_data, min_height, min_width, label_names)\n \n \n \"\"\"\n Normalizing and Binarizing \n \"\"\"\n # Normalize the data and binarize the labels\n print(\"\\n[INFO] Normalize training and validation data and binarizing training and validation labels...\")\n trainX, trainY, testX, testY = functions.normalize_binarize(trainX, trainY, testX, testY)\n \n \n \"\"\"\n ===============================\n Building and training the model\n ===============================\n \"\"\"\n \n #We build the model here so that we can see the layers we're building into it\n print(\"\\n[INFO] Preprocessing complete. I'm now defining the LeNet model architecture as follows...\")\n print(\"\\n INPUT => CONV => ReLU => MAXPOOL => CONV => ReLU => MAXPOOL => FC => ReLU => FC\") \n\n \n #Run the model\n model = define_LeNet_model(min_width, min_height)\n \n # Train model\n print(\"\\n[INFO] The model's ready so we'll begin training it...\\n\\n\")\n H = functions.train_LeNet_model(model, trainX, trainY, testX, testY, n_epochs, batch_size)\n \n print(\"\\nTraining complete - thanks for your patience! We'll start to evaluate the model's performance\") \n \n \n \"\"\"\n ====================\n Evaluating the model\n ====================\n \"\"\"\n\n # Plot loss/accuracy history of the model\n functions.plot_history(H, n_epochs)\n \n # Evaluate model\n print(\"\\n[INFO] Below is the classification report. This has been copied into the out directory\\n\")\n functions.evaluate_model(model, testX, testY, batch_size, label_names)\n \n # User message\n print(\"\\n That's you all done - woohoo!\\nYou have now defined and trained a CNN on impressionist paintings which is able to classify paintings by their artists.!\")", "title": "" }, { "docid": "29bf087b88c2e3b8d6e158a8b0b03c6f", "score": "0.56611156", "text": "def sample_image(n_row, batches_done,date_string):\n # Sample noise\n z = Variable(FloatTensor(np.random.normal(0, 1, (n_row ** 2, opt.latent_dim))))\n #print(\"z == \" + str(z))\n # Get labels ranging from 0 to n_classes for n rows\n labels = np.array([num for _ in range(n_row) for num in range(n_row)])\n #print(\"labels == \" + str(labels))\n labels = Variable(LongTensor(labels))\n #print(\"labels == \" + str(labels))\n gen_imgs = generator(z, labels)\n save_image(gen_imgs.data, b + \"/modelimage/full_\" + date_string + \"_%s.png\" % (str(batches_done).zfill(4)), nrow=n_row, normalize=True)\n src = b + '/modelimage/full_' + date_string + '_0001.png'\n dst = '/content/gdrive/My Drive/TFE/dataset/modelimage/full_0001.png'\n copyfile(src,dst)", "title": "" }, { "docid": "0745cae776a57264da16f4f8ef428be4", "score": "0.5650079", "text": "def train():\n\n ### DO NOT CHANGE SEEDS!\n # Set the random seeds for reproducibility\n np.random.seed(42)\n\n ## Prepare all functions\n # Get number of units in each hidden layer specified in the string such as 100,100\n if FLAGS.dnn_hidden_units:\n dnn_hidden_units = FLAGS.dnn_hidden_units.split(\",\")\n dnn_hidden_units = [int(dnn_hidden_unit_) for dnn_hidden_unit_ in dnn_hidden_units]\n else:\n dnn_hidden_units = []\n\n ########################\n # PUT YOUR CODE HERE #\n #######################\n\n #load data\n cifar10 = cifar10_utils.get_cifar10(FLAGS.data_dir)\n\n #hyperparameters\n eta = FLAGS.learning_rate\n eps = 1e-6 # convergence criterion\n max_steps = FLAGS.max_steps\n b_size = FLAGS.batch_size\n\n\n #load test data\n x_test = cifar10[\"test\"].images\n y_test = cifar10[\"test\"].labels\n\n\n n_inputs = np.size(x_test,0)\n n_classes = np.size(y_test,1)\n v_size = np.size(x_test,1) * np.size(x_test,2) * np.size(x_test,3)\n\n x_test = x_test.reshape((n_inputs, v_size))\n\n #load whole train data ############################################################\n x_train = cifar10[\"train\"].images\n x_train = x_train.reshape((np.size(x_train,0), v_size))\n y_train = cifar10[\"train\"].labels\n\n #initialize the MLP model\n model = MLP(n_inputs = v_size, n_hidden = dnn_hidden_units, n_classes = n_classes)\n get_loss = CrossEntropyModule()\n\n train_loss = []\n test_loss = []\n train_acc = []\n test_acc = []\n\n for step in range(max_steps):\n\n #get batch\n x, y = cifar10['train'].next_batch(b_size)\n\n #stretch input images into vectors\n x = x.reshape(b_size, v_size)\n\n #forward pass\n pred = model.forward(x)\n\n #get loss\n current_train_loss = get_loss.forward(pred,y)\n\n #get loss gradient\n current_loss_grad = get_loss.backward(pred,y)\n\n #backpropagation\n model.backward(current_loss_grad)\n\n\n # #SGD\n for l in model.layers:\n l.params[\"weight\"] -= eta*l.grads[\"weight\"]\n l.params[\"bias\"] -= eta*l.grads[\"bias\"]\n\n if (step % FLAGS.eval_freq) == 0:\n # train_loss.append(current_train_loss)\n # current_train_acc = accuracy(pred, y)\n # train_acc.append(current_train_acc)\n \n train_pred = model.forward(x_train)\n\n current_train_loss = get_loss.forward(train_pred, y_train)\n train_loss.append(current_train_loss)\n current_train_acc = accuracy(train_pred, y_train)\n train_acc.append(current_train_acc)\n\n test_pred = model.forward(x_test)\n\n current_test_loss = get_loss.forward(test_pred, y_test)\n test_loss.append(current_test_loss)\n current_test_acc = accuracy(test_pred, y_test)\n test_acc.append(current_test_acc)\n\n print('\\nStep ',step, '\\n------------\\nTraining Loss = ', current_train_loss, ', Train Accuracy = ', current_train_acc, '\\nTest Loss = ', current_test_loss, ', Test Accuracy = ', current_test_acc)\n\n if step > 0 and abs(test_loss[(int(step/100))] - test_loss[int(step/100)-1]) < eps:\n break\n\n plot_graphs(train_loss, 'Training Loss', 'orange',\n test_loss, 'Test Loss', 'blue',\n title='Stochastic gradient descent',\n ylabel='Loss',\n xlabel='Steps')\n\n plot_graphs(train_acc, 'Training Accuracy', 'darkorange',\n test_acc, 'Test Accuracy', 'darkred',\n title='Stochastic gradient descent',\n ylabel='Accuracy',\n xlabel='Steps')\n\n # #save results:\n # path = \"./results/numpy results/\"\n # np.save(path + 'train_loss', train_loss)\n # np.save(path + 'train_acc', train_acc)\n # np.save(path + 'test_loss', test_loss)\n # np.save(path + 'test_acc', test_acc)\n\n\n np.save('train_loss', train_loss)\n np.save('train_acc', train_acc)\n np.save('test_loss', test_loss)\n np.save('test_acc', test_acc)\n\n\n # raise NotImplementedError\n ########################\n # END OF YOUR CODE #\n #######################", "title": "" }, { "docid": "17d5ced8f22a4540b76febcaa16cd64f", "score": "0.564733", "text": "def _process_image_files(name, filenames, labels, medical_dicts, num_shards):\n\t\n\tassert len(filenames) == len(labels)\n\tassert len(filenames) == medical_dicts.shape[0]\t\n\t\n\t# break all images into batches\n\tspacing = np.linspace(0, len(filenames), FLAGS.num_threads + 1).astype(np.int)\n\tranges = []\n\tfor i in range(len(spacing)-1):\n\t\tranges.append([spacing[i], spacing[i+1]])\n\t\n\t# lauch a thread for each batch\n\tprint('Launching %d threads for spacings: %s' % (FLAGS.num_threads, ranges))\n\tsys.stdout.flush()\n\t\n\t# create a mechnism for monitoring when all threads are finished\n\tcoord = tf.train.Coordinator()\n\n\t# create a generic Tensorflow-based utility for converting all image codings\n\tcoder = ImageCoder()\t\n\t\n\tthreads = []\n\tfor thread_index in range(len(ranges)):\n\t\targs = (coder, thread_index, ranges, name, filenames, labels, medical_dicts, num_shards)\n\t\tt = threading.Thread(target=_process_image_files_batch, args=args)\n\t\tt.start()\n\t\tthreads.append(t)\n\t\n\t# wait for all the threads to terminate\n\tcoord.join(threads)\n\tprint('%s: Finished writing all %d images in data set.' % (datetime.now(), len(filenames)))\n\tsys.stdout.flush()", "title": "" }, { "docid": "5eb56481508de063e9f1c11025c4bf7d", "score": "0.56395304", "text": "def main():\n opt = {}\n opt['n_thread'] = 20\n opt['compression_level'] = 3\n\n # HR images\n opt['input_folder'] = 'datasets/DIV2K/DIV2K_train_HR'\n opt['save_folder'] = 'datasets/DIV2K/DIV2K_train_HR_sub'\n opt['crop_size'] = 480\n opt['step'] = 240\n opt['thresh_size'] = 0\n extract_subimages(opt)\n\n # LRx4 images\n opt['input_folder'] = 'datasets/DIV2K/DIV2K_train_LR_bicubic/X4'\n opt['save_folder'] = 'datasets/DIV2K/DIV2K_train_LR_bicubic/X4_sub'\n opt['crop_size'] = 120\n opt['step'] = 60\n opt['thresh_size'] = 0\n extract_subimages(opt)", "title": "" }, { "docid": "f76d06181f47270d2979b91cbd5e26d0", "score": "0.5636655", "text": "def perpare_train_images_from_file(path):\r\n for i in range(0, 12500):\r\n name = 'cat.%d.jpg' %i\r\n img = cv2.imread(path + name)\r\n img = preprocess_image_for_stacking(img)\r\n cv2.imwrite(saved_train_images_path + name, img)\r\n\r\n for i in range(0, 12500):\r\n name = 'dog.%d.jpg' %i\r\n img = cv2.imread(path + name)\r\n img = preprocess_image_for_stacking(img)\r\n cv2.imwrite(saved_train_images_path + name, img)", "title": "" }, { "docid": "cf40904d29e0aad067cea0bcd9b39d43", "score": "0.5636321", "text": "def load_images_from_files_no_parts(\n filenames,\n max_imgs=500,\n do_shuffle=True,\n run_parallel=True,\n shape=(299, 299),\n num_workers=10,\n):\n image_size = shape[0]\n imgs = []\n # First shuffle a copy of the filenames.\n filenames = filenames[:]\n if do_shuffle:\n np.random.shuffle(filenames)\n\n imgs = torch.empty((0, 3, image_size, image_size))\n if run_parallel:\n pool = multiprocessing.Pool(num_workers)\n img_pool = pool.map(\n lambda filename: load_image_from_file_no_parts(filename, shape),\n filenames[:max_imgs],\n )\n # logging.info(img_pool)\n for img in img_pool:\n if img is not None:\n # imgs.append(img)\n img = img.view(1, 3, shape[0], shape[1])\n imgs = torch.cat([imgs, img], dim=0)\n if imgs.shape[0] <= 1:\n raise ValueError(\n \"You must have more than 1 image in each class to run TCAV.\"\n )\n else:\n for filename in filenames:\n img = load_image_from_file_no_parts(filename, shape)\n if img is not None:\n # imgs.append(img)\n img = img.view(1, 3, shape[0], shape[1])\n imgs = torch.cat([imgs, img], dim=0)\n if imgs.shape[0] <= 1:\n raise ValueError(\n \"You must have more than 1 image in each class to run TCAV.\"\n )\n elif imgs.shape[1] >= max_imgs:\n break\n # if len(imgs) <= 1:\n # raise ValueError(\n # \"You must have more than 1 image in each class to run TCAV.\"\n # )\n # elif len(imgs) >= max_imgs:\n # break\n\n return imgs\n # return np.array(imgs)", "title": "" }, { "docid": "f1adfbd7d2d72beaa50c881063e8e852", "score": "0.56335396", "text": "def filesampler(files, testsetsize = 0.1, devsetsize = 0, trainsetsize = 0, outputdir = '', encoding='utf-8'):\r\n\r\n if not isinstance(files, list):\r\n files = list(files)\r\n\r\n total = 0\r\n for filename in files:\r\n f = io.open(filename,'r', encoding=encoding)\r\n count = 0\r\n for line in f:\r\n count += 1\r\n f.close()\r\n if total == 0:\r\n total = count\r\n elif total != count:\r\n raise Exception(\"Size mismatch, when multiple files are specified they must contain the exact same amount of lines!\")\r\n\r\n #support for relative values:\r\n if testsetsize < 1:\r\n testsetsize = int(total * testsetsize)\r\n if devsetsize < 1 and devsetsize > 0:\r\n devsetsize = int(total * devsetsize)\r\n\r\n\r\n if testsetsize >= total or devsetsize >= total or testsetsize + devsetsize >= total:\r\n raise Exception(\"Test set and/or development set too large! No samples left for training set!\")\r\n\r\n\r\n trainset = {}\r\n testset = {}\r\n devset = {}\r\n for i in range(1,total+1):\r\n trainset[i] = True\r\n for i in random.sample(trainset.keys(), testsetsize):\r\n testset[i] = True\r\n del trainset[i]\r\n\r\n if devsetsize > 0:\r\n for i in random.sample(trainset.keys(), devsetsize):\r\n devset[i] = True\r\n del trainset[i]\r\n\r\n if trainsetsize > 0:\r\n newtrainset = {}\r\n for i in random.sample(trainset.keys(), trainsetsize):\r\n newtrainset[i] = True\r\n trainset = newtrainset\r\n\r\n for filename in files:\r\n if not outputdir:\r\n ftrain = io.open(filename + '.train','w',encoding=encoding)\r\n else:\r\n ftrain = io.open(outputdir + '/' + os.path.basename(filename) + '.train','w',encoding=encoding)\r\n if not outputdir:\r\n ftest = io.open(filename + '.test','w',encoding=encoding)\r\n else:\r\n ftest = io.open(outputdir + '/' + os.path.basename(filename) + '.test','w',encoding=encoding)\r\n if devsetsize > 0:\r\n if not outputdir:\r\n fdev = io.open(filename + '.dev','w',encoding=encoding)\r\n else:\r\n fdev = io.open(outputdir + '/' + os.path.basename(filename) + '.dev','w',encoding=encoding)\r\n\r\n f = io.open(filename,'r',encoding=encoding)\r\n for linenum, line in enumerate(f):\r\n if linenum+1 in trainset:\r\n ftrain.write(line)\r\n elif linenum+1 in testset:\r\n ftest.write(line)\r\n elif devsetsize > 0 and linenum+1 in devset:\r\n fdev.write(line)\r\n f.close()\r\n\r\n ftrain.close()\r\n ftest.close()\r\n if devsetsize > 0: fdev.close()", "title": "" }, { "docid": "27dcda27d19f31bbf2e6ed649bfc25af", "score": "0.56313527", "text": "def sample_batch(self, batch_type, batch_size):\n\n if batch_type == \"train\":\n folders = self.meta_train_characters\n if batch_type == \"test\":\n folders = self.meta_test_characters\n if batch_type == \"val\":\n folders = self.meta_val_characters\n\n num_batches = len(folders)//batch_size\n folders = folders[:num_batches*batch_size]\n all_image_batches = []\n all_label_batches = []\n\n for batch_idx in range(batch_size):\n sample_classes = random.sample(folders, self.num_classes)\n #sample_classes = folders[batch_idx*self.num_classes : (batch_idx+1)*self.num_classes]\n one_hot_labels = np.identity(self.num_classes)\n\n labels_images = get_images(sample_classes, one_hot_labels, nb_sample=self.num_samples_per_class, shuffle=False)\n train_images = []\n train_labels = [] \n for sample_idx, (labels, images) in enumerate(labels_images):\n train_images.append(image_file_to_array(images, 784))\n train_labels.append(labels)\n\n \n train_images, train_labels = shuffle(train_images, train_labels)\n\n labels = np.vstack(train_labels).reshape((-1, self.num_classes, self.num_classes)) # K, N, N\n images = np.vstack(train_images).reshape((self.num_samples_per_class, self.num_classes, -1)) # K x N x 784\n\n all_label_batches.append(labels)\n all_image_batches.append(images)\n\n all_image_batches = np.stack(all_image_batches).astype(np.float32)\n all_label_batches = np.stack(all_label_batches).astype(np.float32)\n\n return all_label_batches, all_image_batches", "title": "" }, { "docid": "763e7c78fbe1390d6e09cdcdf4efc45a", "score": "0.56270367", "text": "def load_next_batch(file_object, batch_size=20):\r\n while True:\r\n\r\n data = [ [], [], [] ] # [enc_images, enc_texts, labels]\r\n item_count = 0\r\n\r\n for l in file_object:\r\n\r\n data_split = l.split(\";\")\r\n\r\n\r\n image = [float(v) for v in data_split[0].split()]\r\n\r\n question = [float(v) for v in data_split[1].split()]\r\n\r\n label = int(data_split[2])\r\n\r\n\r\n #image = data[0][0]\r\n reshape_image = []\r\n for i in range(3):\r\n layer = []\r\n for j in range(224):\r\n line = []\r\n for k in range(224):\r\n line.append(image[k+j*224+i*3])\r\n layer.append(line)\r\n reshape_image.append(layer)\r\n tensor_image = torch.FloatTensor(reshape_image)\r\n\r\n #question = data[1][0]\r\n reshape_question = []\r\n for i in range(16):\r\n layer = []\r\n for j in range(1536):\r\n layer.append(question[j+i*16])\r\n reshape_question.append(layer)\r\n tensor_question = torch.FloatTensor(reshape_question) \r\n\r\n #label = data[2][0]\r\n\r\n\r\n #combine_form = [tensor_image,tensor_question,label]\r\n #print(\"tensor :\",combine_form)\r\n\r\n #data[0] = torch.cat((data[0], tensor_image), 0)\r\n\r\n\r\n data[0].append(tensor_image)\r\n data[1].append(tensor_question)\r\n data[2].append(label)\r\n item_count += 1\r\n\r\n if item_count == batch_size:\r\n\r\n yield data\r\n data = [ [], [], [] ]\r\n item_count = 0\r\n \r\n if item_count != 0:\r\n yield data\r\n \r\n break", "title": "" }, { "docid": "8a3389dbd5d9496b88199e5984db2243", "score": "0.5627003", "text": "def learning_iteration(self):\n # print 'DoCaffeTrainng'\n startTime = time.time()\n # print 'q_imStack.qsize() : ', self.q_imStack.qsize()\n # print 'q_labelStack.qsize() : ', self.q_labelStack.qsize()\n\n\n # if too few items in queue do not proceed with iterations\n if self.q_imStack.qsize() < 16*5:\n return None\n\n\n\n batchsize = self.solver.net.blobs['data'].data.shape[0]\n # print 'batchsize', batchsize\n # print \"self.solver.net.blobs['data'].data\", self.solver.net.blobs['data'].data.shape\n # print \"self.solver.net.blobs['label_x'].data\",self.solver.net.blobs['label_x'].data.shape\n for i in range(batchsize):\n im = self.q_imStack.get() #320x240x3\n y = self.q_labelStack.get()\n\n im_noisy = Noise.noisy( 'gauss', im )\n im_gry = np.mean( im_noisy, axis=2)\n\n\n # cv2.imwrite( str(i)+'__.png', x )\n\n cencusTR = ct.censusTransform( im_gry.astype('uint8') )\n edges_out = cv2.Canny(cv2.blur(im_gry.astype('uint8'),(3,3)),100,200)\n\n\n self.solver.net.blobs['data'].data[i,0,:,:] = self.zNormalized( im_gry.astype('float32') )\n self.solver.net.blobs['data'].data[i,1,:,:] = self.zNormalized( cencusTR.astype('float32') )\n self.solver.net.blobs['data'].data[i,1,:,:] = self.zNormalized( edges_out.astype('float32') )\n self.solver.net.blobs['label_x'].data[i,0] = y[0]\n self.solver.net.blobs['label_y'].data[i,0] = y[1]\n self.solver.net.blobs['label_z'].data[i,0] = y[2]\n self.solver.net.blobs['label_yaw'].data[i,0] = y[3]\n # print y[0], y[1], y[2], y[3]\n\n self.solver.step(1)\n self.caffeTrainingLossX[self.caffeIter] = self.solver.net.blobs['loss_x'].data\n self.caffeTrainingLossY[self.caffeIter] = self.solver.net.blobs['loss_y'].data\n self.caffeTrainingLossZ[self.caffeIter] = self.solver.net.blobs['loss_z'].data\n self.caffeTrainingLossYaw[self.caffeIter] = self.solver.net.blobs['loss_yaw'].data\n if self.caffeIter % 50 == 0 and self.caffeIter>0:\n print 'Writing File : train_loss.npy'\n np.save('train_loss_x.npy', self.caffeTrainingLossX[0:self.caffeIter])\n np.save('train_loss_y.npy', self.caffeTrainingLossY[0:self.caffeIter])\n np.save('train_loss_z.npy', self.caffeTrainingLossZ[0:self.caffeIter])\n np.save('train_loss_yaw.npy', self.caffeTrainingLossYaw[0:self.caffeIter])\n\n #time.sleep(.3)\n print 'my_iter=%d, solver_iter=%d, time=%f, loss_x=%f, lossYaw=%f' % (self.caffeIter, self.solver.iter, time.time() - startTime, self.caffeTrainingLossX[self.caffeIter], self.caffeTrainingLossYaw[self.caffeIter])\n self.caffeIter = self.caffeIter + 1", "title": "" }, { "docid": "6d55765aba085fccf56de5ef5af45cef", "score": "0.562468", "text": "def crop_faces():\n model_name = 'mxnet-face-fr50'\n _, arg_params, aux_params = mx.model.load_checkpoint(model_name, 0)\n arg_params, aux_params = chg_ctx(arg_params, aux_params, get_ctx())\n sym = faster_rcnn.faster_rcnn50(num_class=2)\n img_dir = '../CelebA/Img/img_celeba'\n face_id_dir = '../CelebA/Anno/identity_CelebA.txt'\n\n cnt = 0\n num_multi = 0\n num_fail = 0\n # get the map of img_name to face_id\n img_map = {}\n with open(face_id_dir) as file:\n for line in file:\n splited = line.split()\n img_map[splited[0]] = splited[1]\n\n for file_name in os.listdir(img_dir):\n file_path = os.path.join(img_dir, file_name)\n img_np = cv2.imread(file_path)\n # img_np, scale = resize_long(img_np, 600)\n img_nd = np2nd(img_np)\n boxes = detect_face(get_ctx(), sym, arg_params, aux_params, img_nd)\n img_np, crops = stick_boxes(img_np, boxes)\n show_img(crops[0])\n if cnt == 10:\n break\n if len(crops) > 1:\n num_multi += 1\n elif len(crops) == 0:\n num_fail += 1\n if len(crops) != 0:\n crops[0] = resize_pad(crops[0], (120, 100))\n cur_id = img_map[file_name]\n path = '../crops/'+ cur_id\n if not os.path.exists(path):\n os.makedirs(path)\n cv2.imwrite(\"../crops/\"+cur_id+'/'+file_name, crops[0])\n cnt += 1\n if cnt % 500 == 0:\n print('Already processed: %d' % cnt)\n print('Current multis: %d' % num_multi)\n print('Current fails: %d\\n' % num_fail)\n #break\n #if cnt >= 20:\n # break", "title": "" }, { "docid": "e337d4a132aa4473735779598954b9f0", "score": "0.5619468", "text": "def prepro(target,tiles,repeat,canon):\n #time measure starts\n start=time.time()\n #print 'start preprocessing...'\n \n #get target image's dimension\n img=Image.open(target)\n target_dimen=img.size\n\n #check there is at least one tile \n if not len(os.listdir(tiles)) > 0:\n print one_tile_err\n cleanup(canon)\n sys.exit()\n \n #get tile image's format and dimension \n for tile in os.listdir(tiles):\n try:\n tile_img=Image.open(tiles+'/'+tile)\n tile_format=imghdr.what(tiles+'/'+tile)\n except:\n print tile_format_err\n print tiles+'/'+tile\n cleanup(canon)\n sys.exit()\n break\n tile_dimen=tile_img.size\n\n \n if not tile_format in ['jpeg','png','bmp','tiff','gif']:\n print tile_format_err\n print tiles+'/'+tile\n cleanup(canon)\n sys.exit()\n \n #make sure dimension requirements meet\n if target_dimen[0]%tile_dimen[0] or target_dimen[1]%tile_dimen[1]:\n print dimen_error\n cleanup(canon)\n sys.exit()\n\n #check if tiles can construct final mosaic under\n #repeat time restriction\n width_time=target_dimen[0]/tile_dimen[0]\n height_time=target_dimen[1]/tile_dimen[1]\n num_of_tile=width_time*height_time\n num_available=repeat*len(os.listdir(tiles))\n if num_of_tile > num_available:\n print construct_err\n cleanup(canon)\n sys.exit()\n\n L=sorted(os.listdir(tiles),key=lambda x: os.path.splitext(x)[0])\n \n #process every tile \n for tile in L:\n each_format=imghdr.what(tiles+'/'+tile)\n if each_format not in \\\n ['jpeg','png','bmp','tiff','gif']:\n print tile_format_err\n print tiles+'/'+tile\n cleanup(canon)\n sys.exit()\n \n try:\n each_tile=Image.open(tiles+'/'+tile)\n except:\n print 'Error: tile invalid!'\n print tiles+'/'+tile\n cleanup(canon)\n sys.exit()\n\n if not tile_dimen==each_tile.size: \n print tile_dimen_err\n print tiles+'/'+tile\n cleanup(canon)\n sys.exit()\n \n if not each_format == tile_format:\n print tile_format_err\n print tiles+'/'+tile\n cleanup(canon)\n sys.exit()\n \n #print \"Total time for preprocessing: \"\\\n # + str(time.time()-start)+ \" seconds.\" \n return (target_dimen,tile_dimen)", "title": "" }, { "docid": "fd340c1c842f23c5d590e31a4c88efe4", "score": "0.5617148", "text": "def get_batches_fn(batch, y_batch, image_shape):\n images = []\n labels = []\n for i, (image_file, label) in enumerate(zip(batch, y_batch)):\n try:\n img = scipy.misc.imread(image_file, mode='RGB')\n if image_file.split('.')[-1] != 'jpeg':\n img = imutils.rotate_bound(img, 90)\n if img is not None:\n img = cv2.cvtColor(np.array(img), cv2.COLOR_RGB2BGR)\n if random.random() < 0.5:\n neares = cv2.resize(img, (image_shape[1],image_shape[0]), interpolation=cv2.INTER_NEAREST) / 255\n images.append(neares)\n\n labels.append(label)\n else:\n cubic = cv2.resize(img, (image_shape[1], image_shape[0]), interpolation=cv2.INTER_CUBIC) / 255\n images.append(cubic)\n\n labels.append(label)\n # print(\"\\r import data {:d}/{:d}\".format(i, len(batch)), end=\"\")\n # sys.stdout.flush()\n\n except Exception as e:\n print(e)\n pass\n return np.array(images), np.array(labels)", "title": "" }, { "docid": "fc74de44785cf0ef8b046e56c8aa6d60", "score": "0.5612763", "text": "def create_learning_directory(count_per_tag=1400, train_count=1000, size=(150, 150, 3)):\n assert size[2] in {1, 3}, \"Wrong number of channels, it must be 1 or 3\"\n tags = raw_data_paths.keys()\n for i, tag in enumerate(tags):\n for filename in os.listdir(raw_data_paths[tag])[:train_count]:\n img = io.imread(raw_data_paths[tag] + filename)\n img = transform.resize(img, size)\n io.imsave('{path}/{tag}/{file}'.format(path=learn_data_paths['Train'], tag=tag, file=filename), img)\n for filename in os.listdir(raw_data_paths[tag])[train_count:count_per_tag]:\n img = io.imread(raw_data_paths[tag] + filename)\n img = transform.resize(img, size)\n io.imsave('{path}/{tag}/{file}'.format(path=learn_data_paths['Validation'], tag=tag, file=filename), img)", "title": "" }, { "docid": "f820f0cc813fb2116ebb83b841589ad4", "score": "0.56074417", "text": "def train10(data, batch_size, data_list):\n return reader_creator_filepath(data, 'data_batch', True,\n batch_size, data_list)", "title": "" }, { "docid": "1d4b4bb11bd02da362978207014ff380", "score": "0.5606145", "text": "def get_batches_fn(batch_size):\n for folder in data_folder:\n image_paths = glob(os.path.join(folder, 'color*.png')) # previously 'data*.png'\n label_paths = {\n re.sub(r'ground_truth', 'color', os.path.basename(path)): path # previously 'ground_truth', 'data'\n for path in glob(os.path.join(folder, 'ground_truth*.png'))}\n background_color = np.array([0, 0, 0, 0])\n\n random.shuffle(image_paths)\n for batch_i in range(0, len(image_paths), batch_size):\n images = []\n gt_images = []\n for image_file in image_paths[batch_i:batch_i+batch_size]:\n gt_image_file = label_paths[os.path.basename(image_file)]\n\n image = scipy.misc.imresize(scipy.misc.imread(image_file), image_shape)\n gt_image = scipy.misc.imresize(scipy.misc.imread(gt_image_file), image_shape)\n\n gt_bg = np.all(gt_image == background_color, axis=2)\n gt_bg = gt_bg.reshape(*gt_bg.shape, 1)\n gt_image = np.concatenate((gt_bg, np.invert(gt_bg)), axis=2)\n\n images.append(image)\n gt_images.append(gt_image)\n\n yield np.array(images), np.array(gt_images)", "title": "" }, { "docid": "d9993d8ec3d39babd88fb4d81210ab35", "score": "0.5587074", "text": "def train_CNN_cifar10(input_images, input_labels, train_step, batch_size, num_step):\n with tf.Session() as sess:\n sess.run(tf.global_variables_initializer())\n \n for i in range(num_step):\n # Training set\n batch_all = random_batch(data_images=input_images, data_labels=input_labels_labels, batch_size=batch_size)\n train_img = batch_all[0]\n train_lab = batch_all[1]\n \n sess.run(train_step, feed_dict={x: train_img, y: train_lab, pkeep: 0.5})\n \n if i%1000 == 0: \n valid_acu = sess.run(accuracy, {x: valid_img, y: valid_lab, pkeep:1.0})\n print(\"\\rAfter step {0:3d}, validation accuracy {1:0.4f}\".format(i, valid_acu))\n if i%10000 == 0:\n saver = tf.train.Saver()\n saver.save(sess, \"./alex_on_cifar10/\", global_step=i)", "title": "" }, { "docid": "45791e8a34a64d96298ef0b6d4bd4337", "score": "0.55828744", "text": "def get_batches_fn(batch_size):\n image_paths = glob(os.path.join(data_folder, 'image_2', '*.png'))\n label_paths = {\n re.sub(r'_(lane|road)_', '_', os.path.basename(path)): path\n for path in glob(os.path.join(data_folder, 'gt_image_2', '*_road_*.png'))}\n background_color = np.array([255, 0, 0])\n\n random.shuffle(image_paths)\n for batch_i in range(0, len(image_paths), batch_size):\n images = []\n gt_images = []\n for image_file in image_paths[batch_i:batch_i+batch_size]:\n gt_image_file = label_paths[os.path.basename(image_file)]\n\n image = scipy.misc.imresize(scipy.misc.imread(image_file), image_shape)\n gt_image = scipy.misc.imresize(scipy.misc.imread(gt_image_file), image_shape)\n\n gt_bg = np.all(gt_image == background_color, axis=2)\n gt_bg = gt_bg.reshape(*gt_bg.shape, 1)\n gt_image = np.concatenate((gt_bg, np.invert(gt_bg)), axis=2)\n\n images.append(image)\n gt_images.append(gt_image)\n\n yield np.array(images), np.array(gt_images)", "title": "" }, { "docid": "f09753431e53c03fe82b2911cec8e4d3", "score": "0.55777544", "text": "def load_imagenet_random_train(path_img, path_info, args):\r\n imgnet_info = load_file(path_info)\r\n name_folders = [p.split('/')[0].strip() for p in imgnet_info]\r\n name_folders = list(sorted(set(name_folders)))\r\n \r\n if args.random_train == True:\r\n for i, n in enumerate(name_folders):\r\n random_name_file = sorted(random.sample(os.listdir(path_img + n), args.random_train_size))\r\n process_folder = numpy_append_advance([load_img_imagenet(path_img + n + '/' + r, args) for r in random_name_file])\r\n label_folder = get_label_imagenet(name_folder=n, imagnet_info=imgnet_info)\r\n label_folder = np.array([label_folder for i in range(args.random_train_size)]) \r\n print('Processing folder %i with have name: %s' % (i, n))\r\n pickle.dump((process_folder, label_folder), open('./dataset/imagenet/%s_%i_%s_.p' % (args.model, i, n), 'wb'))\r\n print('--------------------------------------------------')\r\n print('We finish processing the IMAGENET dataset')\r\n print('--------------------------------------------------')\r\n \r\n path_file = './dataset/imagenet/'\r\n x_random_train, y_random_train = list(), list()\r\n for i, n in enumerate(name_folders):\r\n x, y = pickle.load(open(path_file + args.model + '_' + str(i) + '_' + str(n) + '_.p', 'rb')) \r\n x_random_train.append(x)\r\n y_random_train.append(y) \r\n x_random_train = np.concatenate(x_random_train, axis=0)\r\n y_random_train = np.concatenate(y_random_train, axis=0)\r\n pickle.dump((x_random_train, y_random_train), open('./dataset/imagenet_train_%s.p' % (args.model), 'wb'), protocol=4)\r\n return x_random_train, y_random_train\r\n\r\n else:\r\n path_file = './dataset/imagenet_train_%s.p' % (args.model)\r\n x, y = pickle.load(open(path_file, 'rb'))\r\n return x, y", "title": "" }, { "docid": "1db8f16ac7b957a6de1b1cc546af61a5", "score": "0.55769527", "text": "def make_multi_crop_batch(filename, coder):\n # Read the image file.\n with tf.gfile.FastGFile(filename, 'rb') as f:\n image_data = f.read()\n\n # Convert any PNG to JPEG's for consistency.\n if _is_png(filename):\n print('Converting PNG to JPEG for %s' % filename)\n image_data = coder.png_to_jpeg(image_data)\n\n image = coder.decode_jpeg(image_data)\n\n crops = []\n print('Running multi-cropped image')\n h = image.shape[0]\n w = image.shape[1]\n hl = h - RESIZE_FINAL\n wl = w - RESIZE_FINAL\n\n crop = tf.image.resize_images(image, (RESIZE_FINAL, RESIZE_FINAL))\n crops.append(standardize_image(crop))\n crops.append(tf.image.flip_left_right(crop))\n\n corners = [ (0, 0), (0, wl), (hl, 0), (hl, wl), (int(hl/2), int(wl/2))]\n for corner in corners:\n ch, cw = corner\n cropped = tf.image.crop_to_bounding_box(image, ch, cw, RESIZE_FINAL, RESIZE_FINAL)\n crops.append(standardize_image(cropped))\n flipped = tf.image.flip_left_right(cropped)\n crops.append(standardize_image(flipped))\n\n image_batch = tf.stack(crops)\n return image_batch", "title": "" }, { "docid": "03627a774a983255b85aa8c113f2eade", "score": "0.5570246", "text": "def create_images(image_objects,\n bw=30,\n train_val_split = 0.1):\n sns.set_style(\"darkgrid\", {\"axes.facecolor\": \".9\",\n 'figure.facecolor': 'white',\n 'axes.spines.bottom': False,\n 'axes.spines.left': False,\n 'axes.spines.right': False,\n 'axes.spines.top': False,\n\n })\n\n counter = 1\n input_images_dir_name = \"input_images\"\n label_images_dir_name = \"masks\"\n\n\n\n train_val_split = 0.1\n\n images_names_list = [int(re.search(r'\\d+', os.path.basename(images[-2])).group()) for images in image_objects]\n unque_images_length = len(np.unique(images_names_list))\n splitting_imagename = int(unque_images_length * (1- train_val_split))\n\n\n for i, image in enumerate(each_images):\n\n\n image_id = os.path.basename(image[-2])\n\n image_id = image[1]+\"_\" +image[2]+\"_\" +image[3]+\"_\" +image_id\n\n os.chdir(output_path)\n if not os.path.isdir(image[2] + \"_\" +image[3]):\n subprocess.call(\"mkdir \" +image[2] + \"_\" +image[3], shell = True )\n\n #image_id = os.path.basename(image[-2])\n\n if not os.path.isdir(image[1]):\n subprocess.call(\"mkdir \" +image[2] + \"_\" +image[3]+\"/\"+image[1], shell = True )\n\n if (counter) % splitter == 0:\n directory_name = \"_validation\"\n else:\n directory_name = \"_training\"\n\n if not os.path.isdir(image[2] + \"_\" +image[3]+\"/\"+image[1]+\"/\"+input_images_dir_name+directory_name):\n subprocess.call(\"mkdir \"+image[2] + \"_\" +image[3]+\"/\"+image[1]+\"/\"+input_images_dir_name+ directory_name,\n shell = True )\n\n copyfile(image[-2], image[2] + \"_\" +image[3]+\"/\"+image[1]+\"/\"+input_images_dir_name+ directory_name+\"/\"+image_id)\n\n if not os.path.isdir(image[2] + \"_\" +image[3]+\"/\"+image[1]+\"/\"+label_images_dir_name+ directory_name):\n subprocess.call(\"mkdir \"+image[2] + \"_\" +image[3]+\"/\"+image[1]+\"/\"+label_images_dir_name+ directory_name ,\n shell = True )\n\n counter = counter +1\n plt.figure(figsize=(w/my_dpi, h/my_dpi), dpi=my_dpi)\n fig = sns.kdeplot(image[0].iloc[:,2],\n image[0].iloc[:,3],\n kernel = \"gau\",\n bw= 30,\n cmap = plt.cm.gray,\n shade=True,\n n_levels = 100,\n legend= False,\n cumulative= False)\n fig.axes.get_yaxis().set_visible(False)\n fig.axes.get_xaxis().set_visible(False)\n fig.axes.invert_yaxis()\n\n plt.tight_layout()\n# plt.savefig(image[2] + \"_\" +image[3]+\"/\"+image[1]+\"/\"+label_images_dir_name+directory_name +\"/\"+image_id,\n# dpi=my_dpi,\n# transparent='True',\n# bbox_inches='tight',\n# pad_inches = 0)\n\n plt.setp([fig.get_xticklines() +\n fig.get_yticklines() +\n fig.get_xgridlines() +\n fig.get_ygridlines()],antialiased=False)\n\n figure = fig.get_figure()\n figure.tight_layout(pad=0)\n figure.canvas.draw()\n\n\n\n image1 = np.fromstring(figure.canvas.tostring_rgb(),dtype=np.uint8,sep='')\n image1 = image1.reshape(figure.canvas.get_width_height()[::-1] + (3,))\n\n\n misc.imsave(image[2] + \"_\" +image[3]+\"/\"+image[1]+\"/\"+label_images_dir_name+directory_name +\"/\"+image_id, image1[:,:,0])", "title": "" }, { "docid": "f541dbe69d6eb9b6c6c8d8f9ff45e856", "score": "0.5564803", "text": "def give_CUB200_datasets(opt):\n image_sourcepath = opt.source_path+'/images'\n #Find available data classes.\n image_classes = sorted([x for x in os.listdir(image_sourcepath) if '._' not in x], key=lambda x: int(x.split('.')[0]))\n #Make a index-to-labelname conversion dict.\n conversion = {int(x.split('.')[0]):x.split('.')[-1] for x in image_classes}\n #Generate a list of tuples (class_label, image_path)\n image_list = {int(key.split('.')[0]):sorted([image_sourcepath+'/'+key+'/'+x for x in os.listdir(image_sourcepath+'/'+key) if '._' not in x]) for key in image_classes}\n image_list = [[(key,img_path) for img_path in image_list[key]] for key in image_list.keys()]\n image_list = [x for y in image_list for x in y]\n\n #Image-dict of shape {class_idx:[list of paths to images belong to this class] ...}\n image_dict = {}\n for key, img_path in image_list:\n key = key-1\n if not key in image_dict.keys():\n image_dict[key] = []\n image_dict[key].append(img_path)\n\n keys = sorted(list(image_dict.keys()))\n\n #Following \"Deep Metric Learning via Lifted Structured Feature Embedding\", we use the first half of classes for training.\n train,test = keys[:len(keys)//2], keys[len(keys)//2:]\n train_image_dict, val_image_dict = {key:image_dict[key] for key in train},{key:image_dict[key] for key in test}\n\n\n train_dataset = BaseTripletDataset(train_image_dict, opt, samples_per_class=opt.samples_per_class)\n val_dataset = BaseTripletDataset(val_image_dict, opt, is_validation=True)\n eval_dataset = BaseTripletDataset(train_image_dict, opt, is_validation=True)\n\n train_dataset.conversion = conversion\n val_dataset.conversion = conversion\n eval_dataset.conversion = conversion\n\n return {'training':train_dataset, 'testing':val_dataset, 'evaluation':eval_dataset}", "title": "" }, { "docid": "97318b8654051d09db1f04727a618760", "score": "0.5564543", "text": "def read_batch_with_filepath(self, shuffle=True):\n mc = self.mc\n\n if shuffle:\n if self._cur_idx + mc.BATCH_SIZE >= len(self._image_idx):\n self._shuffle_image_idx()\n batch_idx = self._perm_idx[self._cur_idx:self._cur_idx+mc.BATCH_SIZE]\n self._cur_idx += mc.BATCH_SIZE\n else:\n if self._cur_idx + mc.BATCH_SIZE >= len(self._image_idx):\n batch_idx = self._image_idx[self._cur_idx:] \\\n + self._image_idx[:self._cur_idx + mc.BATCH_SIZE-len(self._image_idx)]\n self._cur_idx += mc.BATCH_SIZE - len(self._image_idx)\n else:\n batch_idx = self._image_idx[self._cur_idx:self._cur_idx+mc.BATCH_SIZE]\n self._cur_idx += mc.BATCH_SIZE\n\n lidar_per_batch = []\n lidar_mask_per_batch = []\n intensity_per_batch = []\n multiplier_per_batch = []\n delta_per_batch = []\n bin_per_batch = []\n filepath_per_batch = []\n record_per_batch = []\n \n for idx in batch_idx:\n # load data\n # loading from npy is 30x faster than loading from pickle\n filepath = os.path.join(self._output_path, idx+'.npy')\n filepath_per_batch.append(filepath)\n record = np.load(self._lidar_2d_path_at(idx, gta = True)).astype(np.float32, copy=False)\n record_per_batch.append(record)\n INPUT_MEAN = mc.INPUT_MEAN_GTAV\n INPUT_STD = mc.INPUT_STD_GTAV\n #comment this part out when saving the data for the first time\n \n lidar = record[:, :, :5] # x, y, z, intensity, depth\n intensity = record[:, :, 3]\n if os.path.exists(self._R_path_at(idx, gta = True)) and os.path.exists(self._multiplier_path_at(idx, gta = True))\\\n and os.path.exists(self._mask_path_at(idx, gta = True)):\n multiplier = np.load(self._multiplier_path_at(idx, gta = True)).astype(np.float32, copy = False)\n lidar_mask = np.load(self._mask_path_at(idx, gta = True)).astype(bool, copy = False)\n R = np.load(self._R_path_at(idx, gta = True)).astype(np.float32, copy = False)\n else:\n depths = record[:,:,4]\n position_vectors = lidar[:,:,[0, 1, 2]]\n normals = self.calculate_normals(position_vectors)\n R, multiplier, dp_mask = self.calculate_RAndMulti(depths, normals, position_vectors, intensity)\n depth_mask = np.reshape(\n (lidar[:, :, 4] > 0),\n [mc.ZENITH_LEVEL, mc.AZIMUTH_LEVEL]\n )\n R_mask = np.reshape((R[:,:,] < 1000), [mc.ZENITH_LEVEL, mc.AZIMUTH_LEVEL])\n lidar_mask = np.logical_and(dp_mask, np.logical_and(depth_mask, R_mask))\n np.save(self._R_path_at(idx, gta = True), R)\n np.save(self._multiplier_path_at(idx, gta = True), multiplier)\n np.save(self._mask_path_at(idx, gta = True), lidar_mask)\n \n lidar_mask = np.reshape(lidar_mask, [mc.ZENITH_LEVEL, mc.AZIMUTH_LEVEL, 1])\n lidar = (lidar - INPUT_MEAN) / INPUT_STD \n lidar = np.delete(lidar, 3, 2)\n bin_label = np.zeros((mc.ZENITH_LEVEL, mc.AZIMUTH_LEVEL, mc.NUM_BIN))\n delta_label = np.zeros((mc.ZENITH_LEVEL, mc.AZIMUTH_LEVEL, mc.NUM_BIN))\n for l in range(mc.NUM_BIN):\n bin_mask = np.logical_and(\n R >= mc.MID_VALUES[l] - mc.BIN_INTERVALS[l]/2,\n R < mc.MID_VALUES[l] + mc.BIN_INTERVALS[l]/2)\n bin_label[:, :, l] = 1.0 * bin_mask\n delta_label[:, :, l] = (R - mc.MID_VALUES[l]) * bin_mask\n bin_label = scipy.ndimage.filters.gaussian_filter1d(bin_label, mc.SOFT_LABEL_SIGMA)\n\n # Append all the data\n lidar_per_batch.append(lidar)\n lidar_mask_per_batch.append(lidar_mask)\n bin_per_batch.append(bin_label)\n delta_per_batch.append(delta_label)\n intensity_per_batch.append(intensity)\n multiplier_per_batch.append(multiplier)\n\n assert len(lidar_per_batch) == mc.BATCH_SIZE and \\\n len(lidar_mask_per_batch) == mc.BATCH_SIZE and \\\n len(intensity_per_batch) == mc.BATCH_SIZE and \\\n len(multiplier_per_batch) == mc.BATCH_SIZE and \\\n len(bin_per_batch) == mc.BATCH_SIZE and \\\n len(delta_per_batch) == mc.BATCH_SIZE and \\\n len(record_per_batch) == mc.BATCH_SIZE and \\\n len(filepath_per_batch) == mc.BATCH_SIZE, \\\n 'imdb: data batch size error' \n\n return np.array(lidar_per_batch), np.array(lidar_mask_per_batch), \\\n np.array(intensity_per_batch), np.array(multiplier_per_batch), \\\n np.array(bin_per_batch), np.array(delta_per_batch),\\\n np.array(filepath_per_batch), np.array(record_per_batch)", "title": "" }, { "docid": "fdaf3900e04d125af465a1ec7eb7b1e7", "score": "0.5563647", "text": "def bootstrap(self, currentSize, val_class_acc):\n\n # Read in the original labeled data sample numbers\n DATA_DIR = FLAGS.data_dir or os.environ['ML_DATA']\n# print(\"DATA_DIR \", DATA_DIR)\n indx = FLAGS.dataset.index('-')\n name = FLAGS.dataset[:indx] + '-label.json'\n root = os.path.join(DATA_DIR, 'SSL2', name)\n with open(root) as f:\n origSamples = json.load(f)\n classify_op = self.ops.classify_op \n images, labels = self.tmp.cache['train_original']\n\n batch = FLAGS.batch # len(labels)//10\n predicted = []\n count = images.shape[0]\n for x in range(0, count, batch):\n p = self.session.run(\n classify_op,\n feed_dict={\n self.ops.x: images[x:x + batch]\n })\n predicted.append(p)\n del images\n predicted = np.concatenate(predicted, axis=0)\n preds = predicted.argmax(1)\n probs = predicted.max(1)\n top = np.argsort(-probs,axis=0)\n# print(\"preds \", preds.shape, preds)\n# print(\"probs \", probs.shape, probs)\n# unique_train_counts = [0]*self.nclass\n# unique_train_pseudo_labels, unique_train_counts = np.unique(preds, return_counts=True)\n# print(\"Number of training pseudo-labels in each class: \", unique_train_counts,\" for classes: \", unique_train_pseudo_labels)\n numPerClass = currentSize // self.nclass\n sortByClass = np.random.randint(0,high=len(labels), size=(self.nclass, numPerClass), dtype=int)\n indx = np.zeros([self.nclass], dtype=int)\n perClass = numPerClass * np.ones([self.nclass], dtype=int)\n tp = np.zeros([self.nclass], dtype=int)\n fp = np.zeros([self.nclass], dtype=int)\n fn = np.zeros([self.nclass], dtype=int)\n\n matches = []\n labls = preds[top]\n# samples = top\n trainLabelAcc = 0\n# print(\"number of samples \", len(top))\n# print(\"labls\",labls[:100])\n# print(\"labels\",labels[top[:100]])\n# print(\"pseudo-labels\",preds[top[:100]])\n# print(\"probs\",probs[top[:100]])\n# print(\"samples\", top[:100])\n samples = []\n pseudo_labels = []\n for i in origSamples['label']:\n pseudo_labels.append(labels[i])\n samples.append(i)\n matches.append(labels[i])\n tp[labels[i]] +=1\n if preds[i] == labels[i]:\n trainLabelAcc += 1\n indx[labels[i]] += 1\n trainLabelAcc = 100 * trainLabelAcc / len(origSamples['label'])\n if FLAGS.imbalance > 0:\n # mean val class accuracy\n valAcc = statistics.mean(val_class_acc)\n # Norm by FLAGS.imbalance*(currentSize - origSize)/ self.nclass\n norm = FLAGS.imbalance*(currentSize - self.origSize)/ self.nclass\n # for each class: mean - class\n adjust = np.zeros([self.nclass], dtype=float)\n for i in range(self.nclass):\n adjust[i] = valAcc - val_class_acc[i]\n# norm = norm/np.amax(np.absolute(adjust))\n norm = norm/np.amax(-adjust)\n \n # Adjust perClass, and make neg = pos\n sumAdj = 0\n for i in range(self.nclass):\n perClass[i] += np.rint(adjust[i]*norm)\n sumAdj += np.rint(adjust[i]*norm)\n for i in range(int(np.absolute(sumAdj))):\n perClass[i] -= np.sign(sumAdj)\n print(\"Estimated number of pseudo labels in each class \", perClass, \" adjustment sum \",sumAdj)\n\n for i in range(len(top)):\n if top[i] not in samples and indx[labls[i]] < perClass[labls[i]]:\n pseudo_labels.append(labls[i])\n samples.append(top[i])\n# print(i, labls[i], labels[top[i]],indx[labls[i]])\n if labls[i] == labels[top[i]]:\n matches.append(labls[i])\n tp[labels[top[i]]] +=1\n else:\n matches.append(self.nclass+labls[i])\n fn[labels[top[i]]] += 1\n fp[labls[i]] += 1\n indx[labls[i]] += 1\n if len(samples) < currentSize:\n i = len(samples)\n print(\"len(samples) < currentSize: len= \", i)\n while len(samples) < currentSize:\n if top[i] not in samples:\n pseudo_labels.append(labls[i])\n samples.append(top[i])\n if labls[i] == labels[top[i]]:\n tp[labels[top[i]]] +=1\n matches.append(labls[i])\n else:\n matches.append(self.nclass+labls[i])\n fn[labels[top[i]]] += 1\n fp[labls[i]] += 1\n i += 1\n# print(\"matches\",matches, \" Pseudo-labeling accuracy \", 100.0*sum(matches)/len(matches))\n sample = frozenset([int(x) for x in samples])\n print(\"Length pseudo labeled samples \",len(sample))\n plAcc = 100*sum(tp)/len(matches)\n precision = []\n recall = []\n f1 = []\n numClass = []\n for i in range(self.nclass):\n# tp = [j for j, x in enumerate(matches) if x == i]\n# fp = [j for j, x in enumerate(matches) if x == (self.nclass+i)]\n numClass.append(tp[i] + fp[i])\n# print(i, \"tp, fp, fn \", tp[i],\", \", fp[i],\", \", fn[i])\n precision.append(tp[i] / (tp[i] + fp[i]))\n recall.append(tp[i] / (tp[i] + fn[i]))\n f1.append(tp[i]/(tp[i]+ (fp[i]+fn[i])/2) )\n print(\" Class precision, recall, f1: \", precision, recall, f1 )\n print(\"Accuracy of the predicted pseudo-labels for \", len(matches), \": \",plAcc, \"Actual number of pseudo-labels \", numClass )\n\n\n # Set up new dataset in a random folder\n datasetName = self.dataset.name[:self.dataset.name.find('.')]\n if not self.boot:\n letters = string.ascii_letters\n subfolder = ''.join(random.choice(letters) for i in range(8))\n FLAGS.data_subfolder = 'temp/' + subfolder\n tf.gfile.MakeDirs(data.DATA_DIR+'/temp/'+subfolder)\n if not tf.gfile.Exists(data.DATA_DIR+'/temp/'+subfolder+'/'+datasetName+'-unlabel.json'):\n infile = data.DATA_DIR+'/SSL2/'+datasetName+'-unlabel.'\n outfile = data.DATA_DIR+'/temp/'+subfolder+'/'+datasetName+'-unlabel.'\n print(\"Copied from \",infile, \"* to \", outfile +'*')\n tf.io.gfile.copy(infile+'json', outfile + 'json')\n tf.io.gfile.copy(infile+'tfrecord', outfile + 'tfrecord')\n\n seedIndx = FLAGS.dataset.find('@')\n seed = int(FLAGS.dataset[seedIndx-1])\n input_file=data.DATA_DIR+'/'+datasetName+'-train.tfrecord'\n target = ('%s/%s/%s.%d@%d' % (data.DATA_DIR, FLAGS.data_subfolder, datasetName, seed, currentSize) )\n print(\"input_file \", input_file,\" target \",target)\n if tf.gfile.Exists(target + '-label.tfrecord'): \n tf.io.gfile.remove(target + '-label.tfrecord')\n tf.io.gfile.remove(target + '-label.json')\n\n matches = []\n tf.gfile.MakeDirs(os.path.dirname(target))\n with tf.python_io.TFRecordWriter(target + '-label.tfrecord') as writer_label:\n pos, loop = 0, trange(count, desc='Writing records')\n #for infile in input_file:\n for record in tf.python_io.tf_record_iterator(input_file):\n if pos in sample:\n pseudo_label = pseudo_labels[samples.index(pos)]\n if pseudo_label == labels[pos]:\n matches.append(1)\n else:\n matches.append(0)\n feat = dict(image=self._bytes_feature(tf.train.Example.FromString(record).features.feature['image'].bytes_list.value[0]),\n label=self._int64_feature(pseudo_label))\n newrecord = tf.train.Example(features=tf.train.Features(feature=feat))\n writer_label.write(newrecord.SerializeToString())\n pos += 1\n loop.update()\n loop.close()\n print(\"2. Pseudo-labeling accuracy \", 100.0*sum(matches)/len(matches),\" len(matches)= \",len(matches))\n with tf.gfile.Open(target + '-label.json', 'w') as writer:\n train_stats = np.array(perClass, np.float64)\n train_stats /= train_stats.max()\n writer.write(json.dumps(dict(distribution=train_stats.tolist(), label=sorted(sample)), indent=2, sort_keys=True))\n return", "title": "" }, { "docid": "53234c1b2a39bca78c625696254a9444", "score": "0.556001", "text": "def gen_batch_function(data_folder, image_shape, augmentation=True):\n def get_batches_fn(batch_size):\n \"\"\"\n Create batches of training data\n :param batch_size: Batch Size\n :return: Batches of training data\n \"\"\"\n # Grab image and label paths\n image_paths = glob(os.path.join(data_folder, 'image_2', '*.png'))\n label_paths = {\n re.sub(r'_(lane|road)_', '_', os.path.basename(path)): path\n for path in glob(os.path.join(data_folder, 'gt_image_2', '*_road_*.png'))}\n background_color = np.array([255, 0, 0])\n\n # Shuffle training data\n random.shuffle(image_paths)\n # Loop through batches and grab images, yielding each batch\n for batch_i in range(0, len(image_paths), batch_size):\n # Elffer: Loop through part of the images\n # for batch_i in range(0, 5, batch_size):\n images = []\n gt_images = []\n for image_file in image_paths[batch_i:batch_i+batch_size]:\n gt_image_file = label_paths[os.path.basename(image_file)]\n # Re-size to image_shape\n image = scipy.misc.imresize(scipy.misc.imread(image_file), image_shape)\n gt_image = scipy.misc.imresize(scipy.misc.imread(gt_image_file), image_shape)\n\n # Create \"one-hot-like\" labels by class\n gt_bg = np.all(gt_image == background_color, axis=2)\n gt_bg = gt_bg.reshape(*gt_bg.shape, 1)\n gt_image = np.concatenate((gt_bg, np.invert(gt_bg)), axis=2)\n\n images.append(image)\n gt_images.append(gt_image)\n\n # Augmentation\n if augmentation is True:\n images, gt_images = augment(images, gt_images)\n\n yield np.array(images), np.array(gt_images)\n return get_batches_fn", "title": "" }, { "docid": "b3c31abe9d5e1e3fc9245a12469e99b0", "score": "0.55567014", "text": "def batch_process_images( config_details , file_list ):\n \n # Get output directories from configuraiton file\n out_dir = config_details.get('object_detection', 'batch_detection_folder')\n gs_out_dir = config_details.get('object_detection', 'gs_sub_dir') # grayscale image no object detected\n go_out_dir = config_details.get('object_detection', 'go_sub_dir') # grayscale image object detected\n od_out_dir = config_details.get('object_detection', 'od_sub_dir') # color image object detected\n no_out_dir = config_details.get('object_detection', 'no_sub_dir') # color image no object detected\n \n # Create output directories if they don't exist\n for needed_dir in (gs_out_dir , od_out_dir , no_out_dir , go_out_dir):\n needed_dir = os.path.join(out_dir,needed_dir)\n if not os.path.isdir(needed_dir):\n os.makedirs (needed_dir)\n \n # Load the frozen detection graph and the corresponding class dictionary\n # Processing using only the RGB graph is currently hard coded \n detection_graph = load_frozen_detection_model( \n config_details.get('object_detection' , 'rgb_frozen_model_path' )\n )\n id_to_label = load_tf_dict( \n config_details.get('object_detection' , 'rgb_dict_file' )\n )\n \n # Hard coded not to show results, but just to write them out to disk\n make_plots = False\n \n # (class_id , threshold)\n #plt_classes = [ (1,0.30) , (2,0.20) , (16,0.20) , (17,0.20) , (18,0.40) ]\n \"\"\" The classes of things we actually want to detect (as opposed to all\n the classes the frozen TensorFlow graph can detect) are indicated in the \n configuration file along with the detection thresholds. This bit of code\n reverses the dictionary to go from class names (in the configuraiton file)\n to class IDs and then creates a list of tuples pairing each class ID with \n its detection threshold.\"\"\"\n plt_classes = []\n for cls, thresh in zip( config_details.get('object_detection','det_classes').split(',') , \n list(map(float,config_details.get('object_detection','det_thresholds').split(','))) ):\n key = next(key for key, value in list(id_to_label.items()) if value == cls.strip())\n plt_classes.append( (key , thresh) )\n \n # Loop over each image\n with detection_graph.as_default():\n for image_path in file_list:\n \n # Get the file name and extension of the image\n fname = os.path.splitext(os.path.basename(image_path))[0]\n f_ext = os.path.splitext(os.path.basename(image_path))[1]\n \n # Load the image and convert from BGR (CV2) to RGB (numpy and TensorFlow)\n image_np = cv2.cvtColor(cv2.imread(image_path), cv2.COLOR_BGR2RGB)\n \n # Is the image really grayscale?\n is_gs = isgrayscale( image_np )\n \n # Run TensorFlow object detection\n (boxes , scores, classes) = tf_detect(image_np,detection_graph)\n \n # Get the appropriate file prefix (for RGB or grayscale)\n if is_gs:\n res_file_prefix = config_details.get('object_detection','gs_hist_prefix')\n else:\n res_file_prefix = config_details.get('object_detection','rgb_hist_prefix') \n \n # Store the score and class results\n store_results( os.path.join(out_dir,res_file_prefix) , classes , scores)\n \n # Use box drawing function to apply detection threshold criteria\n # and then to draw boxes (if appropriate)\n (object_detected , image_np , top_object) = draw_box2( image_np \n , boxes, scores , classes, id_to_label, plt_classes)\n \n # If an object was detected, save a copy of it (with bounding boxes)\n # in the appropriate folder\n if object_detected:\n if make_plots:\n plt.figure( figsize=(10,7) )\n plt.imshow( image_np )\n plt.show()\n \n # Create the new file by appending _sd to the name, also put it\n # into the correct directory depending on RGB or grayscale\n fname_out = fname + '_sd' + f_ext\n if is_gs:\n out_path = os.path.join(out_dir,go_out_dir,fname_out)\n else:\n out_path = os.path.join(out_dir,od_out_dir,fname_out) \n \n # Write the annotated image to disk\n cv2.imwrite( out_path\n ,cv2.cvtColor(image_np, cv2.COLOR_RGB2BGR) )\n else:\n if make_plots:\n plt.figure( figsize=(10,7) )\n plt.imshow( cv2.putText( image_np \n ,'BELOW_THRESH'\n ,( image_np.shape[1]/2 , image_np.shape[0]/2)\n , cv2.FONT_HERSHEY_SIMPLEX, 2, (255,0,0) ,2 ) )\n plt.show()\n \n # Create symbolic link path based on input file name an whether \n # it's RGB or grayscale.\n fname_out = fname + f_ext\n if is_gs:\n out_path = os.path.join(out_dir,gs_out_dir,fname_out)\n else:\n out_path = os.path.join(out_dir,no_out_dir,fname_out) \n \n # Create the desired symbolic link. Using try/except because\n # there may be duplicate file names. Not worth resolving this,\n # so just let the link go to the first file with the offending\n # name.\n try:\n os.symlink(image_path,out_path)\n except:\n print(image_path)", "title": "" }, { "docid": "81c2c265f35cb3f8cfc6195d6f9f15a9", "score": "0.5552921", "text": "def process_batch(self):\r\n for fpath in self.input_files:\r\n # create and set output dir name\r\n orig_fname = os.path.splitext(os.path.basename(fpath))[0]\r\n pip_name = os.path.splitext(os.path.basename(self.pipeline_path))[0]\r\n dir_name = os.path.join(self.out_dir, '_'.join([pip_name,\r\n orig_fname]))\r\n data = [read_image_file(fpath, '', None), None]\r\n self.original_img = data[0]\r\n # process given image with the pipeline\r\n last_cat = None\r\n for cat in self.executed_cats:\r\n cat.process(data)\r\n # reassign results of the prev alg for the next one\r\n data = list(cat.active_algorithm.result.items())\r\n data.sort(key=lambda x: ['img', 'graph'].index(x[0]))\r\n data = [i[1] for i in data]\r\n last_cat = cat\r\n if data[1]:\r\n # draw the graph into the original image\r\n data[0] = _utility.draw_graph(self.original_img, data[1])\r\n # save the results and update the cache if store_image is True\r\n save_fname = self.get_results_fname(fpath, last_cat)\r\n save_path = os.path.join(dir_name, save_fname)\r\n self.save_results(save_path, save_fname, data)", "title": "" }, { "docid": "72eefa6796bb4231200506976af8014d", "score": "0.555241", "text": "def test_all(self):\n if self.phase=='test_all' and self.checkpoint_load():\n print(\" [*] before training, Load SUCCESS \")\n num_sample = 100\n test_files = glob(os.path.join(self.data_dir, 'test', '*'))\n self.attr_names, self.attr_list = attr_extract(self.data_dir)\n if self.n_label>3:\n test_list = []\n test_atr = []\n attr_list = ([self.attr_list[os.path.basename(val)] \n for val in test_files])\n real_atr = preprocess_attr_single(\n self.attr_names, attr_list,self.attr_keys)\n for idx,value in enumerate(real_atr):\n if sum(value[0:3])==1:\n test_list.append(test_files[idx])\n test_atr.append(value)\n print (len(test_list))\n test_list = test_list[:num_sample]\n test_atr = test_atr[:num_sample]\n\n else:\n test_list = test_files[:num_sample]\n attr_list = ([self.attr_list[os.path.basename(val)] \n for val in test_files])\n \n \n # get batch images and labels\n # Only reserve attrs that is listed in attr_keys.\n real_atr = preprocess_attr_single(\n self.attr_names, attr_list, self.attr_keys) \n real_img = preprocess_image(\n test_list, self.image_size, phase='test') # Read images\n \n for idx,img in enumerate(real_img):\n # generate fakeB\n if self.c_method=='Sigmoid':\n num_img = 9\n fake_atr = test_atr[idx]\n fake_atr = np.tile(fake_atr,(num_img,1))\n fake_atr[:3,:3] = np.identity(3) # hair color\n fake_atr[3,3] = 0 if fake_atr[3,3] else 1 #convert gender\n fake_atr[4,4] = 0 if fake_atr[4,4] else 1 #aged\n fake_atr[5,:3] = [1,0,0] #black hair\n fake_atr[5,3] = 0 if fake_atr[5,3] else 1 #convert gender\n fake_atr[6,:3] = [1,0,0] #black hair\n fake_atr[6,4] = 0 if fake_atr[6,4] else 1 #aged hair\n fake_atr[7,3] = 0 if fake_atr[7,3] else 1 #gender + aged\n fake_atr[7,4] = 0 if fake_atr[7,4] else 1\n fake_atr[8,:3] = [1,0,0] #black hair\n fake_atr[8,3] = 0 if fake_atr[8,3] else 1 # gender\n fake_atr[8,4] = 0 if fake_atr[8,4] else 1 # aged\n else:\n fake_atr = np.identity(self.n_label)\n num_img = self.n_label\n\n org_img = img.copy()\n img = np.reshape(img,\n [1,self.image_size,self.image_size,self.image_channel])\n img = np.repeat(img,num_img,axis=0)\n feed = { \n self.real_img: img, \n self.real_atr: np.array(real_atr), \n self.fake_atr: np.array(fake_atr) \n }\n fake_img = self.sess.run(self.fake_img_sample, feed_dict = feed)\n fake_img = list(fake_img)\n # save samples\n file_name = os.path.basename(test_list[idx])\n test_file = os.path.join(self.test_dir, file_name)\n img_list = [org_img]\n img_list = img_list+fake_img\n save_images_test(img_list, self.image_size, test_file, \n num=1,col=num_img+1)\n\n else:\n print(\" [!] before training, no need to Load \")", "title": "" }, { "docid": "d72f396988f572af5dce58b225fbf1be", "score": "0.5550265", "text": "def gen_test_output_nir(sess, logits, keep_prob, image_pl, data_folder, image_shape):\n i = 0\n for folder in data_folder:\n print(folder)\n for image_file in glob(os.path.join(folder, '*color*.png')): # previously 'data*.png'\n image = scipy.misc.imresize(scipy.misc.imread(image_file), image_shape)\n _, filename = os.path.split(image_file)\n fd_id = filename[0]\n img_id = image_file[-8:]\n nir = cv2.imread(folder+\"/\"+fd_id+\"_nir_\"+img_id)\n nir = scipy.misc.imresize(nir, image_shape)\n\n im_softmax = sess.run(\n [tf.nn.softmax(logits)],\n {keep_prob: 1.0, image_pl: [image]})\n im_softmax = im_softmax[0][:, 1].reshape(image_shape[0], image_shape[1])\n segmentation = (im_softmax > 0.5).reshape(image_shape[0], image_shape[1], 1)\n mask = np.dot(segmentation, np.array([[0, 0, 255, 127]]))\n mask = scipy.misc.toimage(mask, mode=\"RGBA\")\n\n result = np.dot(segmentation, np.array([[0, 0, 255, 255]]))\n result = scipy.misc.toimage(result, mode=\"RGBA\")\n\n street_im = scipy.misc.toimage(image)\n street_im.paste(mask, box=None, mask=mask)\n\n base_name = os.path.basename(image_file)\n base_name = str(i)+\"_\"+base_name\n yield base_name, np.array(street_im), result\n i += 1", "title": "" }, { "docid": "65f57db8051131d09f35f6933a311b71", "score": "0.554996", "text": "def kfolds(inpath, n_folds):\n # Read in all lines at once and shuffle\n lines = codecs.open(inpath, 'rU', 'utf_8').readlines()\n random.seed(0)\n random.shuffle(lines)\n\n # Open output files\n assert '.' in inpath, \"Filenames given need to be in name.extension format\"\n dot_idx = inpath.rfind('.')\n base, ext = inpath[:dot_idx], inpath[dot_idx + 1:]\n train_paths = ['%s_train_%d.%s' % (base, i, ext) for i in range(n_folds)]\n test_paths = ['%s_test_%d.%s' % (base, i, ext) for i in range(n_folds)]\n train_outs = [codecs.open(train_path, 'w', 'utf-8') for train_path in train_paths]\n test_outs = [codecs.open(test_path, 'w', 'utf-8') for test_path in test_paths]\n test_idx = 0\n for line in lines:\n test_idx = (test_idx + 1) % n_folds\n # Write it to test for its index\n print >> test_outs[test_idx], line.rstrip() \n # Write it to train for all other files\n for train_idx, train_file in enumerate(train_outs):\n if train_idx != test_idx:\n print >> train_file, line.rstrip()\n\n # Close up\n for train_file in train_outs:\n train_file.close()\n for test_file in test_outs:\n test_file.close()\n\n return (test_paths, train_paths)", "title": "" }, { "docid": "aa7ad556c9da8938dfe5d424a3b4cbcd", "score": "0.5544818", "text": "def load_data():\n dirname = 'cifar-10-batches-py'\n origin = 'https://www.cs.toronto.edu/~kriz/cifar-10-python.tar.gz'\n path = get_file(\n dirname,\n origin=origin,\n untar=True,\n file_hash=\n '6d958be074577803d12ecdefd02955f39262c83c16fe9348329d7fe0b5c001ce')\n\n num_train_samples = 50000\n\n x_train = np.empty((num_train_samples, 3, 32, 32), dtype='uint8')\n y_train = np.empty((num_train_samples,), dtype='uint8')\n\n for i in range(1, 6):\n fpath = os.path.join(path, 'data_batch_' + str(i))\n (x_train[(i - 1) * 10000:i * 10000, :, :, :],\n y_train[(i - 1) * 10000:i * 10000]) = load_batch(fpath)\n\n fpath = os.path.join(path, 'test_batch')\n x_test, y_test = load_batch(fpath)\n\n y_train = np.reshape(y_train, (len(y_train), 1))\n y_test = np.reshape(y_test, (len(y_test), 1))\n\n if backend.image_data_format() == 'channels_last':\n x_train = x_train.transpose(0, 2, 3, 1)\n x_test = x_test.transpose(0, 2, 3, 1)\n\n x_test = x_test.astype(x_train.dtype)\n y_test = y_test.astype(y_train.dtype)\n\n return (x_train, y_train), (x_test, y_test)", "title": "" }, { "docid": "81d2edf85425b778d01139bea6d42c48", "score": "0.5542819", "text": "def read_all_images(path_to_data):\n # path to the binary train file with image data\n DATA_PATH_train = os.path.join(path_to_data,'train_X.bin')\n\n # path to the binary train file with labels\n LABEL_PATH_train = os.path.join(path_to_data,'train_y.bin')\n\n # path to the binary test file with image data\n DATA_PATH_test = os.path.join(path_to_data,'test_X.bin')\n\n # path to the binary test file with labels\n LABEL_PATH_test = os.path.join(path_to_data,'test_y.bin')\n \n with open(LABEL_PATH_train, 'rb') as f:\n rawlabels_train = np.fromfile(f, dtype=np.uint8)\n rawlabels_train = rawlabels_train -1\n labels_train = np.zeros((len(rawlabels_train),10))\n labels_train[np.arange(len(rawlabels_train)),rawlabels_train] = 1\n \n with open(LABEL_PATH_test, 'rb') as f:\n rawlabels_test = np.fromfile(f, dtype=np.uint8)\n rawlabels_test = rawlabels_test -1\n labels_test = np.zeros((len(rawlabels_test),10))\n labels_test[np.arange(len(rawlabels_test)),rawlabels_test] = 1\n \n labels = np.concatenate((labels_train,labels_test))\n raw_labels = np.argmax(labels,axis = 1)\n \n with open(DATA_PATH_train, 'rb') as f:\n # read whole file in uint8 chunks\n everything = np.fromfile(f, dtype=np.uint8)\n images_train = np.reshape(everything, (-1, 3, 96, 96))\n images_train = np.transpose(images_train, (0, 3, 2, 1))\n with open(DATA_PATH_test, 'rb') as f:\n # read whole file in uint8 chunks\n everything = np.fromfile(f, dtype=np.uint8)\n images_test = np.reshape(everything, (-1, 3, 96, 96))\n images_test = np.transpose(images_test, (0, 3, 2, 1))\n images = np.concatenate((images_train,images_test))\n images = images/255.0\n \n print(\"shape of images: {}\".format(images.shape))\n print(\"shape of labels: {}\".format(labels.shape))\n print(\"shape of raw labels: {}\".format(raw_labels.shape))\n return(images, labels, raw_labels)", "title": "" }, { "docid": "28b76bdd048392d79bfbb7a61b624cb0", "score": "0.55360675", "text": "def make_folds():\r\n files = np.array([basename(f) for f in glob.glob(\"data/preprocessed/train/ResNet-0.5-400/*.npy\")])\r\n labels = []\r\n classes = np.array([0, 1, 2, 3])\r\n for f in files:\r\n lb = np.array([f.startswith(\"n\"),\r\n f.startswith(\"b\"),\r\n f.startswith(\"is\"),\r\n f.startswith(\"iv\")])\r\n labels.append(classes[np.argmax(lb)])\r\n labels = np.array(labels)\r\n\r\n folds = []\r\n skf = StratifiedKFold(n_splits=10, shuffle=True)\r\n for train_index, test_index in skf.split(files, labels):\r\n f_train, f_test = files[train_index], files[test_index]\r\n y_train, y_test = labels[train_index], labels[test_index]\r\n folds.append({\"train\": {\"x\": f_train, \"y\": y_train}, \"test\": {\"x\": f_test, \"y\": y_test}})\r\n\r\n with open(\"data/folds-10.pkl\", \"wb\") as f:\r\n pickle.dump(folds, f)", "title": "" }, { "docid": "e0f57d5754e8dc8f115d9442c1cc205e", "score": "0.5534", "text": "def generate_images(generator_model, output_dir, iter, latent_dim, nb_row=5, nb_col=5):\n test_image_stack = generator_model.predict(np.random.randn(nb_row * nb_col, latent_dim))\n test_image_stack = (test_image_stack * 127.5) + 127.5\n test_image_stack = np.round(test_image_stack).astype(np.uint8)\n\n arr = test_image_stack\n _, C, H, W = arr.shape\n arr = np.reshape(arr, (nb_row, nb_col, C, H, W)) # rc * C * H * W -> r * c * C * H * W\n arr = arr.transpose(0, 3, 1, 4, 2)\n arr = np.reshape(arr, (nb_row * H, nb_col * W, C))\n\n tiled_output = Image.fromarray(arr, mode='RGB')\n outfile = os.path.join(output_dir, 'iter_%08d.png' % iter)\n tiled_output.save(outfile)", "title": "" }, { "docid": "121f61fed753f77d41495b8484a843fd", "score": "0.5527706", "text": "def _process_image_files(name, img_files, lbl_files, out_folder,\n num_shards, num_threads,\n dltile_from_filename, png_to_jpg, store_as_array):\n assert len(img_files) == len(lbl_files)\n\n # Break all images into batches with a [ranges[i][0], ranges[i][1]].\n spacing = np.linspace(0, len(img_files), num_threads + 1).astype(np.int)\n ranges = []\n for i in range(len(spacing) - 1):\n ranges.append([spacing[i], spacing[i + 1]])\n\n # Launch a thread for each batch.\n print('Launching %d threads for spacings: %s' % (num_threads, ranges))\n sys.stdout.flush()\n\n # Create a mechanism for monitoring when all threads are finished.\n coord = tf.train.Coordinator()\n\n # Create a generic TensorFlow-based utility for converting all image codings.\n coder = ImageCoder()\n\n threads = []\n for thread_index in range(len(ranges)):\n args = (coder, thread_index, ranges,\n name, img_files, lbl_files, out_folder,\n num_shards,\n dltile_from_filename, png_to_jpg, store_as_array)\n t = threading.Thread(target=_process_image_files_worker, args=args)\n t.start()\n threads.append(t)\n\n # Wait for all the threads to terminate.\n coord.join(threads)\n print('%s: Finished writing all %d images in data set.' %\n (datetime.now(), len(img_files)))\n sys.stdout.flush()", "title": "" }, { "docid": "d81eb6f640069ae4016dc8231ae2101b", "score": "0.552713", "text": "def Image_benchmark_BRANCH(model_identite,main_network:str,type_branch:str,ponderation_features:list, nombre_class:int,border:int,filtres:int,filtres_branch:int, nombre_kernel:int,save_rep:str,root_folder:str,BN_init:bool,BN_fin:bool,DOG_init:bool, DOG_fin:bool, sigma_noise_blur:float,FG_sigma_init:int,FG_sigma_puissance:int,benchmark_folder:str, cbcr_sty:bool, w_h,w_v, w_h_s,w_v_s,style,clusters,bins): \n rep_images = os.path.join(benchmark_folder,\"BENCHMARK_VISUEL/Rapports\")\n liste_images = [x for x in os.listdir(rep_images) if x.endswith(\".png\")]\n x_m,y_m,x_max,y_max=300,300,900,900\n R=4\n tf.keras.backend.clear_session()\n\n # Change Input size\n if main_network==\"SR_EDSR\":\n new_model = model_identite\n else:\n new_model = MAIN_network_none(nombre_class=nombre_class,filtres=filtres, ponderation_features=ponderation_features,kernel=nombre_kernel,w_h=w_h,w_v=w_v, BN_init=BN_init, BN_fin=BN_fin, DOG_init=DOG_init, DOG_fin=DOG_fin)\n if type_branch==\"Stycbcr\":\n if cbcr_sty:\n new_model= STy_branch_col_none(model=new_model, main_network=main_network,filtres_branch=filtres_branch,border=border,DOG_init=DOG_init, DOG_fin=DOG_fin, BN_init=BN_init, BN_fin=BN_fin,nombre_kernel=nombre_kernel,w_h_s=w_h_s,w_v_s=w_v_s)\n else:\n new_model= STy_branch_none(model=new_model, main_network=main_network,filtres_branch=filtres_branch,border=border,DOG_init=DOG_init, DOG_fin=DOG_fin, BN_init=BN_init, BN_fin=BN_fin,nombre_kernel=nombre_kernel,w_h_s=w_h_s,w_v_s=w_v_s)\n \n if type_branch==\"Stcol\":\n new_model = STrcol_branch_none(model=new_model,main_network=main_network,filtres_branch=filtres_branch, border=border,DOG_init=DOG_init, DOG_fin=DOG_fin, BN_init=BN_init, BN_fin=BN_fin,nombre_kernel=nombre_kernel,w_h_s=w_h_s,w_v_s=w_v_s)\n \n if type_branch==\"St3\":\n new_model = ST3_branch_none(model=new_model,main_network=main_network,filtres_branch=filtres_branch, border=border,DOG_init=DOG_init, DOG_fin=DOG_fin, BN_init=BN_init, BN_fin=BN_fin,nombre_kernel=nombre_kernel,w_h_s=w_h_s,w_v_s=w_v_s)\n \n new_model =copy_parameters(model=new_model,model_patch=model_identite)\n nombre_image=len(liste_images)\n for img in liste_images :\n print(\"Traitement de l'image: \"+str(img))\n \n # Data Preparation & Input\n img_array_LR = Ouverture_img(os.path.join(os.path.join(os.path.join(os.path.join(rep_images,\"LR\"),img.replace(\".png\",\"_LR.png\")))),1)[0]#.astype(np.float64)\n img_array_LR=img_array_LR[int(x_m/R):int(x_max/R),int(y_m/R):int(y_max/R),:]/256.\n img_array_HR=Ouverture_img(os.path.join(os.path.join(os.path.join(rep_images,img))),1)[0]#.astype(np.float64)\n img_array_HR=img_array_HR[x_m:x_max,y_m:y_max,:]/256.\n \n if main_network==\"SR\":\n INPUT = upsampling(img_array_LR,R) \n elif main_network==\"DENOISING\":\n INPUT = img_array_HR.copy()\n INPUT[:,:,0]+=np.random.normal(0,0.2,(img_array_HR.shape[0],img_array_HR.shape[1]))\n elif main_network==\"BLURRING\":\n INPUT = img_array_HR.copy()\n INPUT_y = gaussian_filter(INPUT[:,:,0], sigma=sigma_noise_blur)\n INPUT[:,:,0]=INPUT_y\n elif main_network==\"SR_EDSR\":\n INPUT=img_array_LR.copy()*255.\n INPUT=np.expand_dims(INPUT,axis=0)\n TRUE=np.expand_dims(img_array_HR,axis=0)\n \n # Extraction des features et Calculs inférence sur l'image\n if type_branch==\"Stycbcr\":\n STm_i_y,STm_i_cb,STm_i_cr, STm_i_yDoG,STm_i_cbDoG,STm_i_crDoG, STm_i_yDoGBn,STm_i_cbDoGBn,STm_i_crDoGBn ,STm_o_y,STm_o_yDoG,STm_o_yDoGBn,STm_o_yFinal,STm_o_yFinal_residu = extract_layers__STr_y(new_model,BN_init,BN_fin,DOG_init,DOG_fin)\n prediction, STf_i_yDoG,STf_i_cbDoG,STf_i_crDoG,STf_i_yDoGBn,STf_i_cbDoGBn,STf_i_crDoGBn,STf_o_y,STf_o_yDoG,STf_o_yDoGBn,STf_o_yFinal = compute_features_Sty(STm_i_y,STm_i_cb,STm_i_cr,STm_i_yDoG,STm_i_cbDoG,STm_i_crDoG,STm_i_yDoGBn,STm_i_cbDoGBn,STm_i_crDoGBn,STm_o_y,STm_o_yDoG,STm_o_yDoGBn,STm_o_yFinal,DOG_init,DOG_fin,BN_init,BN_fin,new_model,INPUT ) \n \n if type_branch==\"Stcol\":\n COLm_i_y,COLm_i_cb,COLm_i_cr, COLm_i_yDoG,COLm_i_cbDoG,COLm_i_crDoG, COLm_i_yDoGBn,COLm_i_cbDoGBn,COLm_i_crDoGBn ,COLm_o_cb,COLm_o_cbDoG,COLm_o_cbDoGBn,COLm_o_cbFinal,COLm_o_cbFinalresidu,COLm_o_cr,COLm_o_crDoG,COLm_o_crDoGBn,COLm_o_crFinal,COLm_o_crFinalresidu=extract_layers_STr_col(new_model,BN_init,BN_fin,DOG_init,DOG_fin) \n prediction ,COLf_o_cbFinalresidu,COLf_o_crFinalresidu=compute_features_Stcol(COLm_i_y,COLm_i_cb,COLm_i_cr, COLm_i_yDoG,COLm_i_cbDoG,COLm_i_crDoG, COLm_i_yDoGBn,COLm_i_cbDoGBn,COLm_i_crDoGBn ,COLm_o_cb,COLm_o_cbDoG,COLm_o_cbDoGBn,COLm_o_cbFinal,COLm_o_cbFinalresidu,COLm_o_cr,COLm_o_crDoG,COLm_o_crDoGBn,COLm_o_crFinal,COLm_o_crFinalresidu,DOG_init,DOG_fin,BN_init,BN_fin,new_model,INPUT )\n\n if type_branch ==\"St3\":\n ST3m_i_y,ST3m_i_cb,ST3m_i_cr, ST3m_i_yDoG,ST3m_i_cbDoG,ST3m_i_crDoG, ST3m_i_yDoGBn,ST3m_i_cbDoGBn,ST3m_i_crDoGBn ,ST3m_o_y,ST3m_o_cb,ST3m_o_yDoG,ST3m_o_cbDoG,ST3m_o_cbDoGBn,ST3m_o_yDoGBn,ST3m_o_cbFinal,ST3m_o_yFinal,ST3m_o_cbFinalresidu,ST3m_o_yFinalresidu,ST3m_o_cr,ST3m_o_crDoG,ST3m_o_crDoGBn,ST3m_o_crFinal,ST3m_o_crFinalresidu = extract_layers_STr3(new_model,BN_init,BN_fin,DOG_init,DOG_fin) \n prediction ,ST3f_o_yFinal_residu,ST3f_o_cbFinal_residu,ST3f_o_crFinal_residu= compute_features_St3(ST3m_i_y,ST3m_i_cb,ST3m_i_cr, ST3m_i_yDoG,ST3m_i_cbDoG,ST3m_i_crDoG, ST3m_i_yDoGBn,ST3m_i_cbDoGBn,ST3m_i_crDoGBn ,ST3m_o_y,ST3m_o_cb,ST3m_o_yDoG,ST3m_o_cbDoG,ST3m_o_cbDoGBn,ST3m_o_yDoGBn,ST3m_o_cbFinal,ST3m_o_yFinal,ST3m_o_cbFinalresidu,ST3m_o_yFinalresidu,ST3m_o_cr,ST3m_o_crDoG,ST3m_o_crDoGBn,ST3m_o_crFinal,ST3m_o_crFinalresidu,DOG_init,DOG_fin,BN_init,BN_fin,new_model,INPUT ) \n \n y,cb,cr =RGB2Ycbcr_numpy(prediction[0,:,:,0],prediction[0,:,:,1],prediction[0,:,:,2])\n prediction_ycbcr= np.stack([y,cb,cr],axis=-1)\n \n # FOLDER & SAVING\n rep_img_save = os.path.join(os.path.join(os.path.join(save_rep,\"rapports_benchmark\"),str(img)))\n ensure_dir(rep_img_save)\n \n # TENSORS to display\n if main_network==\"SR_EDSR\": # adapting report if main model is already trained\n list_tensor_input,list_tensor_intermediate_features= [TRUE[0,:,:,0],TRUE[0,:,:,:],prediction_ycbcr[:,:,0],prediction_ycbcr,style],[]\n name_tensor_input,name_tensor_intermediate_features=[\"True_y\",\"True\",\"Output_y\",\"Output\",\"style_patch\"],[]\n INPUT=INPUT/255.\n else:\n if type_branch==\"Stycbcr\":\n tbadded=TRUE[0,border:INPUT.shape[1]-border,border:INPUT.shape[2]-border,0]-INPUT[0,border:INPUT.shape[1]-border,border:INPUT.shape[2]-border,0]\n added=prediction_ycbcr[:,:,0].copy()-INPUT[0,border:INPUT.shape[1]-border,border:INPUT.shape[2]-border,0]\n \n \n style_02,style_05,style_08 = INPUT[0,border:INPUT.shape[1]-border,border:INPUT.shape[2]-border,0].copy(),INPUT[0,border:INPUT.shape[1]-border,border:INPUT.shape[2]-border,0].copy(),INPUT[0,border:INPUT.shape[1]-border,border:INPUT.shape[2]-border,0].copy()\n style_02 += 0.2*added.copy()\n style_05 += 0.5*added.copy()\n style_08 += 0.8*added.copy()\n\n list_tensor_input= [INPUT[0,:,:,0],INPUT[0,:,:,:], TRUE[0,:,:,0],TRUE[0,:,:,:],y,style_02,style_05,style_08,prediction_ycbcr,style]\n list_tensor_intermediate_features=[STf_o_y[0][:,:,0],STf_o_yDoG[0][:,:,0],tbadded,added]\n \n name_tensor_input=[\"Input_y\",\"Input\",\"True_y\",\"True\",\"Output_y\",\"Output(0.2)\",\"Output(0.5)\",\"Output(0.8)\",\"Output\",'style_patch']\n name_tensor_intermediate_features=[\"Output_BRANCH\",\"Output_BRANCH_DoG\",'To be Added','Added']\n \n if type_branch==\"Stcol\":\n # hist _ pred\n cbcr=np.stack([prediction_ycbcr[:,:,1],prediction_ycbcr[:,:,2]],-1)\n cbcr = np.reshape(cbcr,(cbcr.shape[0]*cbcr.shape[1],2)).astype(np.float32)\n hist_out = histogram_2d(cbcr, clusters , cbcr.shape[0],cbcr.shape[1]) \n Save_Hist_1d_tf(hist_out,os.path.join(rep_img_save,str(img.replace(\".png\",\"\"))+\"__HIST1D_out__.png\"),bins)\n Save_Hist_2d_tf(hist_out,os.path.join(rep_img_save,str(img.replace(\".png\",\"\"))+\"__HIST2D_out__.png\"),bins)\n \n # hist _ true\n cbcr=np.stack([TRUE[:,:,:,1],TRUE[:,:,:,2]],-1)\n cbcr = np.reshape(cbcr,(cbcr.shape[1]*cbcr.shape[2],2)).astype(np.float32)\n hist_out = histogram_2d(cbcr, clusters , cbcr.shape[0],cbcr.shape[1]) \n Save_Hist_1d_tf(hist_out,os.path.join(rep_img_save,str(img.replace(\".png\",\"\"))+\"__HIST1D_true__.png\"),bins)\n Save_Hist_2d_tf(hist_out,os.path.join(rep_img_save,str(img.replace(\".png\",\"\"))+\"__HIST2D_true__.png\"),bins)\n \n \n list_tensor_input= [INPUT[0,:,:,0],INPUT[0,:,:,:], TRUE[0,:,:,0],TRUE[0,:,:,:],prediction_ycbcr,style]\n list_tensor_intermediate_features=[COLf_o_cbFinalresidu[0][0,:,:],COLf_o_crFinalresidu[0][0,:,:]]\n \n name_tensor_input,name_tensor_intermediate_features=[\"Input_y\",\"Input\",\"True_y\",\"True\",\"Output\",'style_patch'],[\"Output_cb\",\"Output_cr\"]\n \n if type_branch==\"St3\":\n list_tensor_input= [INPUT[0,:,:,0],INPUT[0,:,:,:], TRUE[0,:,:,0],TRUE[0,:,:,:],prediction_ycbcr,style]\n list_tensor_intermediate_features=[ST3f_o_yFinal_residu[0,:,:,0],ST3f_o_cbFinal_residu[0,:,:,0],ST3f_o_crFinal_residu[0,:,:,0]]\n \n name_tensor_input,name_tensor_intermediate_features=[\"Input_y\",\"Input\",\"True_y\",\"True\",\"Output\",'style_patch'],[\"Output_y\",\"Output_cb\",\"Output_cr\"]\n \n # PDF Report\n Benchmark_report(200,300,350,list_tensor_input, list_tensor_intermediate_features, name_tensor_input, name_tensor_intermediate_features,\n type_branch=type_branch,save_rep=rep_img_save,taille=TRUE.shape[1],nombre_class=nombre_class,root=root_folder,border=border, nom = img)\n\n # PNG files\n Save_tensor(list_tensor=[INPUT,TRUE,np.expand_dims(prediction_ycbcr,axis=0)],list_name=[\"input\",\"true\",\"prediction\"],rep_save=rep_img_save)\n \n if type_branch==\"Stycbcr\":\n Save_tensor(list_tensor=[STf_o_yFinal],list_name=[\"Output_y\"],rep_save=rep_img_save)\n \n if type_branch==\"Stcol\":\n Save_tensor(list_tensor=[COLf_o_cbFinalresidu,COLf_o_crFinalresidu],list_name=[\"Branch_output_cb_stcol\",\"Branch_output_cr_stcol\"],rep_save=rep_img_save)\n \n if type_branch ==\"St3\":\n Save_tensor(list_tensor=[ST3f_o_yFinal_residu,ST3f_o_cbFinal_residu,ST3f_o_crFinal_residu],list_name=[\"Branch_output_y_st3\",\"Branch_output_cb_st3\",\"Branch_output_cr_st3\"],rep_save=rep_img_save)", "title": "" }, { "docid": "d5cea8596ecb0b5aa60b0462234d371e", "score": "0.5525526", "text": "def run_generative_model(learning_rate=0.1, \n dataset='mnist.pkl.gz',\n n_epochs=5,\n batch_size=20, \n display_step=1000,\n n_visible=28*28, # MNIST Pixels\n n_hidden=500,\n corruption_level=0.3, # DA\n contraction_level=0.1, # CA\n k=5, # RBM\n chains=10, # RBM\n output_folder='Generative_plots',\n img_shape=(28,28), # image shape of MNIST for tile_raster_images\n model_name='AutoEncoder',\n \n):\n\n # numpy random generator\n rng = np.random.RandomState(123)\n # create a Theano random generator that gives symbolic random values\n theano_rng = RandomStreams(rng.randint(2 ** 30))\n\n if not os.path.isdir(output_folder):\n os.makedirs(output_folder)\n os.chdir(output_folder)\n\n #############\n # Load Data #\n #############\n datasets = load_data(dataset)\n train_set_x, train_set_y = datasets[0]\n # valid_set_x, valid_set_y = datasets[1]\n test_set_x, test_set_y = datasets[2]\n\n ###################################\n # Calculate number of Minibatches #\n ###################################\n n_train_batches = train_set_x.get_value(borrow=True).shape[0] // batch_size\n # n_valid_batches = valid_set_x.get_value(borrow=True).shape[0] // batch_size\n n_test_batches = test_set_x.get_value(borrow=True).shape[0] // batch_size\n\n ############################################\n # allocate symbolic variables for the data #\n ############################################\n # allocate symbolic variables for the data\n index = T.lscalar() # index to a [mini]batch\n\n x = T.matrix('x') # the data is presented as rasterized images\n\n ###############\n # BUILD MODEL #\n ###############\n print('... building the model')\n\n if model_name == 'AutoEncoder':\n model = AutoEncoder(\n numpy_rng=rng,\n theano_rng=theano_rng,\n input=x,\n n_visible=n_visible,\n n_hidden=n_hidden\n )\n elif model_name == 'DA':\n model = DA(\n numpy_rng=rng,\n theano_rng=theano_rng,\n input=x,\n n_visible=n_visible,\n n_hidden=n_hidden\n )\n elif model_name == 'CA':\n model = CA(\n numpy_rng=rng,\n theano_rng=theano_rng,\n input=x,\n n_visible=n_visible,\n n_hidden=n_hidden,\n batch_size=batch_size\n )\n elif model_name == 'RBM':\n model = RBM(\n input=x,\n numpy_rng=rng,\n theano_rng=theano_rng,\n n_visible=n_visible,\n n_hidden=n_hidden\n )\n\n #####################\n # Training Function #\n #####################\n # COST & UPDATES\n\n if model_name == 'AutoEncoder':\n cost, updates = model.get_cost_updates(\n learning_rate=learning_rate\n )\n\n elif model_name == 'DA':\n cost, updates = model.get_cost_updates(\n corruption_level=corruption_level,\n learning_rate=learning_rate\n )\n \n elif model_name == 'CA': \n cost, updates = model.get_cost_updates(\n contraction_level=contraction_level,\n learning_rate=learning_rate\n )\n\n elif model_name == 'RBM':\n # initialize storage for the persistent chain (state = hidden layer of chain)\n persistent_chain = theano.shared(np.zeros(shape=(batch_size, model.n_hidden),\n dtype=theano.config.floatX),\n borrow=True)\n # get the cost and the gradient corresponding to one step of CD-15\n cost, updates = model.get_cost_updates(learning_rate=learning_rate,\n persistent=persistent_chain,\n k=k)\n\n\n # TRAINING FUNCTION\n train_model = theano.function(\n inputs=[index],\n outputs=cost,\n updates=updates,\n givens={\n x: train_set_x[index * batch_size: (index + 1) * batch_size]\n }\n )\n\n \n ###############\n # TRAIN MODEL #\n ###############\n print('... training')\n\n plotting_time = 0.\n\n start_time = timeit.default_timer()\n\n # go through training epochs\n for epoch in range(n_epochs):\n minibatch_avg_cost = []\n for minibatch_index in range(n_train_batches):\n\n minibatch_avg_cost.append(train_model(minibatch_index))\n\n # iteration number\n iter = epoch * n_train_batches + minibatch_index\n if iter % display_step == 0:\n print('training @ iter = ', iter)\n\n print('Training epoch %d, cost ' % epoch, np.mean(minibatch_avg_cost, dtype='float64'))\n\n # Plot filters after each training epoch\n plotting_start = timeit.default_timer()\n # Construct image from the weight matrix\n image = Image.fromarray(\n tile_raster_images(X=model.W.get_value(borrow=True).T,\n img_shape=img_shape, tile_shape=(10, 10),\n tile_spacing=(1, 1)))\n\n image.save('filters_at_epoch_%i.png' % epoch)\n plotting_stop = timeit.default_timer()\n plotting_time += (plotting_stop - plotting_start)\n\n end_time = timeit.default_timer()\n\n pretraining_time = (end_time - start_time) - plotting_time\n print ('Training took %f minutes' % (pretraining_time / 60.))\n\n print(('The code for file ' +\n os.path.split(__file__)[1] +\n ' ran for %.2fm' % ((end_time - start_time) / 60.)), file=sys.stderr)\n \n image = Image.fromarray(\n tile_raster_images(X=model.W.get_value(borrow=True).T,\n img_shape=img_shape, tile_shape=(10, 10),\n tile_spacing=(1, 1)))\n \n image.save('trained_filters.png')\n\n #################################\n # Sampling from the Model #\n #################################\n #if model_name == 'RBM':\n # sample_RBM(model=model, test_set_x=test_set_x, chains=20)\n\n \n ####################\n # Change Directory #\n ####################\n os.chdir('../')", "title": "" } ]
3e91058010205107203c86ac299054e1
Remove small Poly series coefficients.
[ { "docid": "b3b43851ecc1c56368879bc4395bc2ab", "score": "0.0", "text": "def trimseq(seq):\n if len(seq) == 0 or seq[-1] != 0:\n return seq\n else:\n for i in range(len(seq) - 1, -1, -1):\n if seq[i] != 0:\n break\n return seq[:i+1]", "title": "" } ]
[ { "docid": "8b9ee84b793ab334e701ba720865622b", "score": "0.6996968", "text": "def removepiston(self):\n\t\tZ = self.__coefficients__\n\t\tZ[0] = 0\n\t\treturn Z", "title": "" }, { "docid": "9e68ca731d7a246e64c0cc1333dab693", "score": "0.6676961", "text": "def removecoma(self):\n\t\tcoma = [7,8,16,17,29,30]\n\t\tZ = self.__coefficients__\n\t\tfor i in coma:\n\t\t\tZ[i-1] = 0\n\t\treturn Z", "title": "" }, { "docid": "62a2dd9ab9bb4a49b058095a621b49b0", "score": "0.5866291", "text": "def testnocoeffs(self):", "title": "" }, { "docid": "1799ef761d3dc210d5d495096a0c1ec7", "score": "0.5852681", "text": "def clean_coeff(self):\n for cur_axis in range(self.coeff.ndim):\n change = True\n while change:\n change = False\n if self.coeff.shape[cur_axis] == 1:\n continue\n slices = list()\n for i,degree in enumerate(self.coeff.shape):\n if cur_axis == i:\n s = slice(degree-1,degree)\n else:\n s = slice(0,degree)\n slices.append(s)\n if np.sum(abs(self.coeff[tuple(slices)])) == 0:\n self.coeff = np.delete(self.coeff,-1,axis=cur_axis)\n change = True", "title": "" }, { "docid": "d8f8ba4040e882306c270474f85d1053", "score": "0.58194953", "text": "def new_points(poly):\n return unique([pt for pt in concat_map(contiguous, poly) if pt not in poly])", "title": "" }, { "docid": "86af28596f3e9bb9d9ea1ff31f337f80", "score": "0.575655", "text": "def __sub__(self, p):\n\n new_poly = list\n\tif len(self.coeff) <= len(p.coeff):\n\t\tnew_poly = p.coeff[:]\n\t\tfor i in range(len(self.coeff)):\n\t\t\tnew_poly[i] = p.coeff[i] - self.coeff[i]\n\telse:\n\t\tnew_poly = self.coeff[:]\n\t\tfor i in range(len(p.coeff)):\n\t\t\tnew_poly[i] = self.coeff[i] - p.coeff[i]", "title": "" }, { "docid": "cb47c64a19b3fd4b88483aceb509ff68", "score": "0.57272583", "text": "def new_points(poly):\n ipdb.set_trace()\n return unique([pt for pt in concat_map(contiguous, poly) if pt not in poly])", "title": "" }, { "docid": "c4fa3f5f4650570c614d5214d9e03fa0", "score": "0.56002486", "text": "def poly(cs, x):\n pass", "title": "" }, { "docid": "e4a407249b6bda152b62a74898e72051", "score": "0.5564687", "text": "def truncate(self):\n sortedindex = np.argsort(np.abs(self.coeffs))[::-1]\n Ncoeff = self.coeffs.shape[-1]\n cutoff = np.int(np.round(Ncoeff*self.threshold/100.))\n \n print \"Keeping %2.0f %% (N=%s) of the biggest coefficients\"%(self.threshold,cutoff)\n\n self.coeffs_trunc = self.coeffs.copy() # copy of all coeff\n self.coeffs_trunc[sortedindex[cutoff:]] = 0 # put coeff below threshold to 0", "title": "" }, { "docid": "e01c36a15d924bfdd16e8c7f51a9b774", "score": "0.5554427", "text": "def _remove_intercept_patsy(terms):\n from patsy.desc import INTERCEPT\n if INTERCEPT in terms:\n terms.remove(INTERCEPT)\n return terms", "title": "" }, { "docid": "2e01bcd1fc80dedc3dcd66e3c1c05b47", "score": "0.55157685", "text": "def remove_linear(x, xp):\n if x.shape != xp.shape:\n raise ValueError(\"inputs are not same dimension\")\n\n if x.ndim == 1:\n A = np.vstack([xp, np.ones_like(xp)]).T # Data matrix for least-squares\n slope_intercept = la.lstsq(A, x)[0] # lstsq returns other stuff too\n newxp = np.dot(A, slope_intercept)\n return newxp\n else:\n return np.hstack(map(remove_linear, x, xp)).reshape(xp.shape)", "title": "" }, { "docid": "48c74799da702b4bdeb9e6838debe29f", "score": "0.5515094", "text": "def remove_spikes(poly,threshold=0.01):\r\n line_ring = poly.exterior_ring\r\n spike_indecies = spike_ring_indecies(line_ring,threshold=threshold)\r\n if( spike_indecies ):\r\n for i,org_index in enumerate(spike_indecies):\r\n if(org_index==0): # special case, must remove first and last point, and add end point that overlaps new first point\r\n # get the list of points\r\n pnts = list(line_ring.coords)\r\n # remove the first point\r\n pnts.remove(pnts[0])\r\n # remove the last point\r\n pnts.remove(pnts[-1])\r\n # append a copy of the new first point (old second point) onto the end so it makes a closed ring\r\n pnts.append(pnts[0])\r\n # replace the old line ring\r\n line_ring = LinearRing(pnts)\r\n else:\r\n line_ring.remove(line_ring.coords[org_index])\r\n poly.exterior_ring = line_ring\r\n return poly", "title": "" }, { "docid": "84f409ec92c4856e51fa8c67502ebe1c", "score": "0.549981", "text": "def remove_small_components(self, minlen):\n for cc in filter(lambda c: sum([self.segment(sn).length for sn in c]) \\\n < minlen, self.connected_components()):\n for s in cc:\n self.rm(s)", "title": "" }, { "docid": "4bac8f54a229849be354e482f64aa14f", "score": "0.54815555", "text": "def Simplify(poly, epsilon=0.5):\n simplified = Polygon(poly).simplify(epsilon)\n line_ring = list(simplified.array_interface_base['data'])\n points = zip(line_ring[::2], line_ring[1::2])\n return list([int(p), int(q)] for p,q in points)", "title": "" }, { "docid": "cc8fab269eab98d8f284cf1ebd2e7003", "score": "0.54428977", "text": "def removeMin(self):", "title": "" }, { "docid": "2151162aeb578368fbcd90821b8bfc13", "score": "0.5386837", "text": "def SetFixRemovePCurveMode(self, *args):\n return _ShapeFix.ShapeFix_Wire_SetFixRemovePCurveMode(self, *args)", "title": "" }, { "docid": "b24048efb2f5c98d9b32491b6b520c4e", "score": "0.5357831", "text": "def removeextra(scale):\n if len(scale) == 8:\n del scale[7]\n return scale", "title": "" }, { "docid": "b782485e094fe74445afcc1f9487e720", "score": "0.5351715", "text": "def stanley_symm_poly_weight(self,w):\n # The algorithm=\"delete\" is a workaround when the set of\n # vertices is empty, in which case subgraph tries another\n # method which turns out to currently fail with Dynkin diagrams\n return DiGraph(DynkinDiagram(w.parent().cartan_type())).subgraph(set(w.reduced_word()), algorithm=\"delete\").connected_components_number()", "title": "" }, { "docid": "a1e5fd069faabd1a2dda5c9449fde769", "score": "0.53355896", "text": "def SymPoly_lead_term_eliminate(mf, Xv, Lm, Pv, Sv):\n # Initialization of the size parameter\n sz=Integer(len(Xv))\n # Initialization of the vector of unknowns\n Y=var_list('y',1+len(Lm))\n #print('Y=',Y)\n # Initialization of the total degree\n td=sum(mf.degree(v) for v in Xv)\n # Initialization of the list of sum of powers\n Lp=[1]+[sum(Xv[i]^j for i in rg(sz)) for j in rg(1,1+td)]\n # Initialization of the polynomial\n Fn=expand(sum(Y[i]*prod(Lp[Lm[i].degree(Xv[u])] for u in rg(sz)) for i in rg(len(Lm)))+Y[len(Lm)]*prod(Lp[mf.degree(Xv[u])] for u in rg(sz)))\n #print('Fn=',Fn)\n G=sum(Y[i]*prod(Pv[Lm[i].degree(Xv[u])] for u in rg(sz)) for i in rg(len(Lm)))+Y[len(Lm)]*prod(Pv[mf.degree(Xv[u])] for u in rg(sz))\n # Obtaining the system of linear constraints\n CnstrLst=[Fn.coefficient(mnm) for mnm in Lm]+[Fn.coefficient(mf)-1]\n #print('CnstrLst=',CnstrLst)\n # Initialization of the total degree\n [A,b]=ConstraintFormatorIVHM(CnstrLst,Y)\n #A.printHM()\n Sln=linear_solverHM(A, b, HM(A.n(1),1,Y), HM(A.n(1),1,Y))\n # Performing the reduction in degrees\n for d in rg(td):\n if (td-d)>sz:\n G=fast_reduce_no_expand(G,[Pv[td-d]],[-sum((-1)^k*Sv[k]*Pv[(td-d)-k] for k in rg(1,sz+1))])\n # Initialization of the list derived from Girard's identities\n GiL=[Pv[1]==Sv[1]]\n for bnd in rg(2,sz+1):\n eL=[l for l in List_of_Integers([1+floor(bnd/i) for i in rg(1,bnd+1)]) if bnd==sum(l[i]*(i+1) for i in rg(bnd))]\n GiL.append( Pv[bnd]==bnd*(-1)^bnd*sum( ( factorial(sum(l)-1)/prod(factorial(l[j]) for j in rg(len(l)))) * prod( (-Sv[i])^l[i-1] for i in rg(1,bnd+1) ) for l in eL ) )\n return [G]+[Sln]+[GiL,[Sv[i]==sum(prod(s) for s in Set(Xv).subsets(i)) for i in rg(1,sz+1)]]", "title": "" }, { "docid": "8a02e158ddf3938f1b619bd2b17b7823", "score": "0.53281605", "text": "def GetFixRemovePCurveMode(self):\n return _ShapeFix.ShapeFix_Wire_GetFixRemovePCurveMode(self)", "title": "" }, { "docid": "8695d3fd721d23ef91590e2079c39aa1", "score": "0.5306278", "text": "def free_polynomial(varlist):\n X = poly_over_varlist(varlist)\n return sum([v * X**i for i, v in enumerate(varlist)])", "title": "" }, { "docid": "648c07baf308447d41000d46c4ec83f6", "score": "0.52772105", "text": "def test_remove_small_polygons_min0(self):\n\n this_local_max_dict = echo_top_tracking._remove_small_polygons(\n local_max_dict=copy.deepcopy(LOCAL_MAX_DICT_WITH_SMALL),\n min_grid_cells_in_polygon=0)\n\n self.assertTrue(_compare_maxima_with_sans_small_polygons(\n this_local_max_dict, LOCAL_MAX_DICT_WITH_SMALL))", "title": "" }, { "docid": "4f8402ab81abec7038d95bddfc4fc1b2", "score": "0.52414465", "text": "def _reduce_conic(self):\n \n # start with removing fractions\n coeff = [self.coefficients()[0], self.coefficients()[3],\n self.coefficients()[5]]\n coeff = lcm(lcm(coeff[0].denominator(), coeff[1].denominator()),\n coeff[2].denominator()) * vector(coeff)\n # go to base ring of fraction field\n coeff = [self.base().base()(x) for x in coeff]\n coeff = vector(coeff) / gcd(coeff)\n # remove common divisors\n labda = mu = nu = 1\n g1 = g2 = g3 = 0\n ca, cb, cc = coeff\n while g1 != 1 or g2 != 1 or g3 != 1:\n g1 = gcd(ca,cb); ca = ca/g1; cb = cb/g1; cc = cc*g1; nu = g1*nu\n g2 = gcd(ca,cc); ca = ca/g2; cc = cc/g2; cb = cb*g2; mu = g2*mu\n g3 = gcd(cb,cc); cb = cb/g3; cc = cc/g3; ca = ca*g3;\n labda = g3*labda\n coeff = [ca, cb, cc]\n multipliers = [labda, mu, nu]\n \n # remove squares\n for i, x in enumerate(coeff):\n if is_FractionField(x.parent()):\n # go to base ring of fraction field\n x = self.base().base()(x)\n \n try:\n decom = x.squarefree_decomposition()\n except (NotImplementedError, AttributeError):\n decom = x.factor()\n x = decom.unit()\n x2 = 1\n for factor in decom:\n if factor[1] > 1:\n if factor[1] % 2 == 0:\n x2 *= factor[0] ** (factor[1] // 2)\n else:\n x *= factor[0]\n x2 *= factor[0] ** ((factor[1] - 1) // 2)\n else:\n x *= factor[0]\n for j, y in enumerate(multipliers):\n if j != i:\n multipliers[j] = y * x2\n coeff[i] = self.base_ring().base().coerce(x);\n \n return (coeff, multipliers)", "title": "" }, { "docid": "8377392a178e7e795b169a6f66fe2488", "score": "0.52282286", "text": "def trimcoef(c, tol=0):\n if tol < 0:\n raise ValueError(\"tol must be non-negative\")\n\n [c] = as_series([c])\n [ind] = np.nonzero(np.abs(c) > tol)\n if len(ind) == 0:\n return c[:1]*0\n else:\n return c[:ind[-1] + 1].copy()", "title": "" }, { "docid": "9e442da6bc0cd305502de3da6f848f2b", "score": "0.52181053", "text": "def strip_extra_coordinates(p: ndarray) -> ndarray:\n return p", "title": "" }, { "docid": "7858dc5cf83526cfbf784b73553ec8b7", "score": "0.5204712", "text": "def test_poly_coeffs_immutable(self):\n p = np.poly1d([1, 2, 3])\n\n try:\n # despite throwing an exception, this used to change state\n p.coeffs += 1\n except Exception:\n pass\n assert_equal(p.coeffs, [1, 2, 3])\n\n p.coeffs[2] += 10\n assert_equal(p.coeffs, [1, 2, 3])", "title": "" }, { "docid": "a4dd7d0d1280dca5d62819b20e7b7051", "score": "0.51968664", "text": "def __rmul__(self, c):\n\n c = int(round(c))\n\tpoly = list\n\tpoly = self.coeff[:]\n\tfor i in range(len(self.coeff)):\n\t\tpoly[i] = self.coeff[i]*c\n\n\treturn poly", "title": "" }, { "docid": "aedaa52d6249e9fbb01902c2c9644ffc", "score": "0.51786494", "text": "def remove_spike(x):\n x_c = signal.savgol_filter(x, 3, 0)\n res = x - x_c\n std = np.std(res, ddof=1)\n cur = np.where(res > 3.5 * std)\n cur = [range(i - 1, i + 2) for i in cur[0]]\n x_x = np.array(range(len(x)))\n x_m = np.delete(x_x, cur)\n x_n = np.delete(x, cur)\n return np.interp(x_x, x_m, x_n)", "title": "" }, { "docid": "a66d9946b1c432a8c9f39e0e68300279", "score": "0.51731765", "text": "def polyfromroots(roots) :\n if len(roots) == 0 :\n return np.ones(1)\n else :\n [roots] = pu.as_series([roots], trim=False)\n prd = np.zeros(len(roots) + 1, dtype=roots.dtype)\n prd[-1] = 1\n for i in range(len(roots)) :\n prd[-(i+2):-1] -= roots[i]*prd[-(i+1):]\n return prd", "title": "" }, { "docid": "4c3ec72c85cd796b0f47717c4c45f57c", "score": "0.51674074", "text": "def remove_items_with_coefficient_zero(self):\n\n # Process left items.\n for idx in range(len(self.__left_items) - 1, -1, -1):\n if self.__left_items[idx].get_coefficient().simplify().is_zero:\n self.__left_items.pop(idx)\n\n # Process right items.\n for idx in range(len(self.__right_items) - 1, -1, -1):\n if self.__right_items[idx].get_coefficient().simplify().is_zero:\n self.__right_items.pop(idx)", "title": "" }, { "docid": "5907a1a1306af0a687cf53ba3ddb8afa", "score": "0.5158736", "text": "def polyreduce(a, root):\n c, p = [], 0\n a.reverse()\n for coef in a:\n p = p * root + coef\n c.append(p)\n a.reverse()\n c.reverse()\n return c[1:]", "title": "" }, { "docid": "9d9b76c596be6a837d62e45f73296534", "score": "0.51554614", "text": "def stanley_symm_poly_weight(self,w):\n ct = w.parent().cartan_type()\n support = set(w.reduced_word())\n if 1 in support or 0 in support:\n support_complement = set(ct.index_set()).difference(support).difference(set([0,1]))\n else:\n support_complement = set(ct.index_set()).difference(support).difference(set([0]))\n return DiGraph(DynkinDiagram(ct)).subgraph(support_complement, algorithm=\"delete\").connected_components_number() - 1", "title": "" }, { "docid": "a62007c99fe75f3c9a327f0f491d5425", "score": "0.51490796", "text": "def delete_unneccesary_characters(self, list_of_lines):\n for line_number in range(len(list_of_lines)):\n list_of_lines[line_number] = list_of_lines[line_number].strip()\n list_of_lines[line_number] = list_of_lines[line_number].replace('.','0')\n list_of_lines[line_number] = list_of_lines[line_number].translate(None, ' -+|')\n list_of_lines = [line for line in list_of_lines if line]\n return list_of_lines", "title": "" }, { "docid": "ac4b821273808761fca4efae6cb5ceb9", "score": "0.5138789", "text": "def SymPoly_lead_term_eliminateII(mf, Xv, Lm, Pv, Sv):\n # Initialization of the size parameter\n sz=Integer(len(Xv))\n # Initialization of the vector of unknowns\n Y=var_list('y',1+len(Lm))\n #print('Y=',Y)\n # Initialization of the total degree\n td=sum(mf.degree(v) for v in Xv)\n # Initialization of the list of sum of powers\n Lp=[1]+[sum(Xv[i]^j for i in rg(sz)) for j in rg(1,1+td)]\n # Initialization of the polynomial\n Fn=expand(sum(Y[i]*prod(Lp[Lm[i].degree(Xv[u])] for u in rg(sz)) for i in rg(len(Lm)))+Y[len(Lm)]*prod(Lp[mf.degree(Xv[u])] for u in rg(sz)))\n #print('Fn=',Fn)\n G=sum(Y[i]*prod(Pv[Lm[i].degree(Xv[u])] for u in rg(sz)) for i in rg(len(Lm)))+Y[len(Lm)]*prod(Pv[mf.degree(Xv[u])] for u in rg(sz))\n # Obtaining the system of linear constraints\n CnstrLst=[Fn.coefficient(mnm) for mnm in Lm]+[Fn.coefficient(mf)-1]\n #print('CnstrLst=',CnstrLst)\n # Initialization of the total degree\n [A,b]=ConstraintFormatorIVHM(CnstrLst, Y)\n #A.printHM()\n Sln=linear_solverHM(A, b, HM(A.n(1),1,Y), HM(A.n(1),1,Y))\n # Performing the reduction in degrees\n for d in rg(td):\n if (td-d)>sz:\n G=fast_reduce_no_expand(G, [Pv[td-d]],[-sum((-1)^k*Sv[k]*Pv[(td-d)-k] for k in rg(1,sz+1))])\n # Initialization of the list derived from Newton's identities\n NwL=[Sv[1]==Pv[1]]\n for bnd in rg(2,sz+1):\n eL=[l for l in List_of_Integers([1+floor(bnd/i) for i in rg(1,bnd+1)]) if bnd==sum(l[i]*(i+1) for i in rg(bnd))]\n NwL.append( Sv[bnd]==(-1)^bnd*sum( prod( (-Pv[i])^l[i-1]/(factorial(l[i-1])*i^l[i-1]) for i in rg(1,bnd+1) ) for l in eL ) )\n return [G]+[Sln]+[NwL,[Pv[i]==sum(Xv[j]^i for j in rg(sz)) for i in rg(1,1+sz)]]", "title": "" }, { "docid": "93d51cf8205abb2acfca0151515c9928", "score": "0.51270115", "text": "def min_coeff(self):\n poly_list = self.tuplelist()\n poly_list.sort(key=self._key_func_0)\n\n r_list = []\n # minimum coefficient\n min_value = poly_list[0][0]\n for term in poly_list:\n if term[0] == min_value:\n r_list.append(term)\n else:\n break\n r_list.sort(key=self._key_func_1, reverse=True)\n return r_list", "title": "" }, { "docid": "eec887b9a95c8b7cb6f8e189bc071ac4", "score": "0.51221454", "text": "def PolyCoefficients(x, coeffs):\r\n\r\n o = len(coeffs)\r\n\r\n z = 0\r\n for i in range(o):\r\n z += coeffs[i] * x ** i\r\n return z", "title": "" }, { "docid": "5ec5d048d7cab4079fd0015c9399abf7", "score": "0.51181054", "text": "def diff_polynomial(_a_):\n\treturn polynomial([________ for i in range(________)])", "title": "" }, { "docid": "3558798641fa65b55b165702f5fee4ad", "score": "0.51148707", "text": "def polysub(c1, c2):\n # c1, c2 are trimmed copies\n [c1, c2] = pu.as_series([c1, c2])\n if len(c1) > len(c2) :\n c1[:c2.size] -= c2\n ret = c1\n else :\n c2 = -c2\n c2[:c1.size] += c1\n ret = c2\n return pu.trimseq(ret)", "title": "" }, { "docid": "82cb42aa87a6d4e06d2167136736fc4e", "score": "0.50927436", "text": "def removeClosePoints( self, verbose = 0 ) :\n\n ne, badXIndicies, messages = endl2dmathmisc.check2dData( self, printErrors = False, printWarning = False )\n badXIndicies.reverse( )\n for i in badXIndicies :\n if( verbose ) : fudgemisc.printWarning( \" removing point at index %d with x value = %e\" % ( i, self.data[i][0] ) )\n del self.data[i]", "title": "" }, { "docid": "694e6f5c0180fb8b0917ea64f3df07f1", "score": "0.5059091", "text": "def _new_constant_poly(self, x, P):\n return MPolynomial_polydict(P, {P._zero_tuple:x})", "title": "" }, { "docid": "2c0e1f418fa47ebe299b1b04a2928090", "score": "0.5055097", "text": "def char_poly_coeffs(self):\n char_poly = self.char_poly()\n return char_poly.all_coeffs()", "title": "" }, { "docid": "d240c65159142367693c242bb68d8c77", "score": "0.5050158", "text": "def simplify(polynom):\n polynom = Poly(polynom)\n new_polynom = 0\n variables = list(polynom.free_symbols)\n\n for var_i in variables:\n coefficient_i = polynom.as_expr().coeff(var_i)/2\n coefficient_i += polynom.as_expr().coeff(var_i ** 2)\n new_polynom += coefficient_i.as_coefficients_dict()[1] * var_i\n for var_j in variables:\n if var_j != var_i:\n coefficient_j = coefficient_i.coeff(var_j)\n new_polynom += coefficient_j.as_coefficients_dict()[1] *\\\n var_i * var_j\n return new_polynom + polynom.as_expr().as_coefficients_dict()[1]", "title": "" }, { "docid": "700782c8bbed5b2aba911383ba8eba42", "score": "0.50355285", "text": "def unfit(self):\n pass", "title": "" }, { "docid": "1f806634a2cb35217f6a08f89f539b6f", "score": "0.50329757", "text": "def updatePolynomials(self):", "title": "" }, { "docid": "2b145e1f728bfe6b209f94716d4c3034", "score": "0.5014835", "text": "def remove_zeros(self):\n pass", "title": "" }, { "docid": "c5e6fd8f60dbeefc8ea11db4e17f016e", "score": "0.5009098", "text": "def poly(c,x):\n y = c[-1]\n for i in c[:-1][::-1]:\n y = y*x + i\n return y", "title": "" }, { "docid": "359c2ffc9a610ba44ebea8767758a7f8", "score": "0.5000519", "text": "def remove_fit(self, event):\n initiate_decomposer(self)\n # Add the best-fitting solution and useful parameters to a dictionary\n self.modeldict=get_model_info(self)\n self.modeldict['method']='remove'\n # recreate the model\n self.mod,self.res,self.totmod=recreate_model_manual(self)\n # update the plot with the manually-fitted solution\n plot_model(self, self.specx, self.totmod, update=True, plottoupdate=self.plot_tot)\n plot_model(self,self.specx, self.res,plottoupdate=self.plot_res,update=True,ls='-')\n\n if np.size(self.plot_model)!=0:\n for i in range(len(self.plot_model)):\n self.plot_model[i].pop(0).remove()\n\n # plot a legend\n self.spectrum_legend.remove()\n self.spectrum_legend = self.spectrum_window.legend(loc=2,frameon=False,fontsize=8)\n self.spectrum_window_lookup_artist, self.spectrum_window_lookup_handle = self.build_legend_lookups(self.spectrum_legend)\n self.setup_legend_connections(self.spectrum_legend, self.spectrum_window_lookup_artist, self.spectrum_window_lookup_handle)\n self.update_legend(self.spectrum_window_lookup_artist, self.spectrum_window_lookup_handle)\n\n # update plot\n self.fig.canvas.draw()", "title": "" }, { "docid": "bd5de5f5fa212f6457fae2baba5ee9b1", "score": "0.49995354", "text": "def removetilt(self):\n\t\ttilt = [2,3]\n\t\tZ = self.__coefficients__\n\t\tfor i in tilt:\n\t\t\tZ[i-1] = 0\n\t\treturn Z", "title": "" }, { "docid": "58040dfd16cc6c5ba4f6e8b0bf484fdb", "score": "0.4989113", "text": "def reject(self, var):\n return Poly(*[x for x in self._terms if not x.is_function_of(var)])", "title": "" }, { "docid": "0d70d667a0f8079245771a9bf966d5ee", "score": "0.49781522", "text": "def poincare_polynomial(self):\n charpoly = self.characteristic_polynomial()\n R = charpoly.parent()\n x = R.gen(0)\n poincare = (-x)**self.dimension() * charpoly(-QQ(1)/x)\n return R(poincare)", "title": "" }, { "docid": "1cbf6f2a402ed3672e4c6f16ce603b84", "score": "0.4968041", "text": "def removeClef(offset, part):\n elems = part.getElementsByOffset(offset)\n for elem in elems:\n # Only clef objects have an octaveChange field\n if hasattr(elem, \"octaveChange\"):\n if offset != 0.0:\n part.remove(elem)\n return [offset, offset]\n return [offset, offset]", "title": "" }, { "docid": "fb6417d4abe1e29d4cfc9f5f57ebf5ea", "score": "0.4956149", "text": "def __sub__(self, v):\n v = self._free_module(v)\n return Polyomino([p-v for p in self], color=self._color)", "title": "" }, { "docid": "8ba4d8959cc54c1e26da46314a43094d", "score": "0.49447602", "text": "def SymPoly_lead_term_eliminateG(mf, Xv, Lm, Pv, Sv, RtL):\n # Initialization of the size parameter\n sz=Integer(len(Xv))\n # Initialization of the vector of unknowns\n Y=var_list('y',1+len(Lm))\n #print('Y=',Y)\n # Initialization of the total degree\n td=sum(mf.degree(v) for v in Xv)\n # Initialization of the list of sum of powers\n Lp=[1]+[sum(Xv[i]^j - RtL[i]^j for i in rg(sz)) for j in rg(1,1+td)]\n # Initialization of the polynomial\n Fn=expand(sum(Y[i]*prod(Lp[Lm[i].degree(Xv[u])] for u in rg(sz)) for i in rg(len(Lm)))+Y[len(Lm)]*prod(Lp[mf.degree(Xv[u])] for u in rg(sz)))\n #print('Fn=',Fn)\n G=sum(Y[i]*prod(Pv[Lm[i].degree(Xv[u])] for u in rg(sz)) for i in rg(len(Lm)))+Y[len(Lm)]*prod(Pv[mf.degree(Xv[u])] for u in rg(sz))\n # Obtaining the system of linear constraints\n CnstrLst=[Fn.coefficient(mnm) for mnm in Lm]+[Fn.coefficient(mf)-1]\n #print('CnstrLst=',CnstrLst)\n # Initialization of the total degree\n [A,b]=ConstraintFormatorIVHM(CnstrLst, Y)\n #A.printHM()\n Sln=linear_solverHM(A, b, HM(A.n(1),1,Y), HM(A.n(1),1,Y))\n # Performing the reduction in degrees\n for d in rg(td):\n if (td-d)>sz:\n #G=fast_reduce_no_expand(G, [Pv[td-d]],[-sum((-1)^k*Sv[k]*Pv[(td-d)-k] for k in rg(1,sz+1))])\n G=fast_reduce_no_expand(G, [Pv[td-d]],[-sum(RtL[u]^(td-d) for u in rg(sz))-sum((-1)^k*(Sv[k]+sum(prod(s) for s in Set(RtL).subsets(k)))*(Pv[(td-d)-k]+sum(RtL[u]^((td-d)-k) for u in rg(sz))) for k in rg(1,sz+1))])\n # Initialization of the list derived from Girard's identities\n GiL=[Pv[1]==Sv[1]]\n for bnd in rg(2,sz+1):\n eL=[l for l in List_of_Integers([1+floor(bnd/i) for i in rg(1,bnd+1)]) if bnd==sum(l[i]*(i+1) for i in rg(bnd))]\n GiL.append( Pv[bnd]==bnd*(-1)^bnd*sum( ( factorial(sum(l)-1)/prod(factorial(l[j]) for j in rg(len(l)))) * prod( (-Sv[i]-sum(prod(s) for s in Set(RtL).subsets(i)))^l[i-1] for i in rg(1,bnd+1) ) for l in eL ) - sum(RtL[u]^bnd for u in rg(sz)))\n return [G]+[Sln]+[GiL,[Sv[i]==sum(prod(s) for s in Set(Xv).subsets(i)) - sum(prod(s) for s in Set(RtL).subsets(i)) for i in rg(1,sz+1)]]", "title": "" }, { "docid": "11d4b40b780e051a3a2f4a6552471c0c", "score": "0.4944332", "text": "def remove_poor_quality_obs(self):\n quality_good = self.obs.apply(\n lambda row: row.analysis_flag[0] == '.',\n axis=1)\n self.obs = self.obs[quality_good]", "title": "" }, { "docid": "bb69f8bc621dee53ab8eff6bed3641b2", "score": "0.4936579", "text": "def clearSlopes(self) -> None:\n self._db.slopeLimits = []\n self._dlg.slopeBrowser.setText('[0, 9999]')\n self._dlg.slopeBand.clear()", "title": "" }, { "docid": "5f3376f25f34a4c009c5dd0b08433606", "score": "0.49360788", "text": "def __sub__(self, p):\n if(isinstance(p, int)):\n new_c = self.coefficients()\n new_c[0] -= p\n return Polynomial(new_c)\n\n original = self.coefficients()\n other = p.coefficients()\n new_c = []\n if (len(original) < len(other)):\n for i in range(0,len(original)):\n new_c.append(original[i] - other[i])\n for i in range(len(original),len(other)):\n new_c.append(-other[i])\n else:\n for i in range(0,len(other)):\n new_c.append(original[i]-other[i])\n for i in range(len(other),len(original)):\n new_c.append(original[i])\n\n return Polynomial(new_c)\n raise ArithmeticError", "title": "" }, { "docid": "def0244b700b371fa6257f1457cc49db", "score": "0.4932065", "text": "def monic_free_polynomial(varlist):\n X = poly_over_varlist(varlist)\n deg = len(varlist)\n poly = X**deg\n for i, v in enumerate(varlist):\n poly += v * X**i\n return poly", "title": "" }, { "docid": "9e58f15cb35dee31d59bcd3d2b236b8e", "score": "0.4923149", "text": "def stanley_symm_poly_weight(self,w):\n return 0", "title": "" }, { "docid": "b461ab25213e95833e526a240c2cb93d", "score": "0.4920302", "text": "def poly_model(x: np.ndarray, coeff: DArray, flip_coeff: bool = False) -> np.ndarray:\n if flip_coeff:\n flipped_coeff = np.flip(coeff)\n else:\n flipped_coeff = coeff\n\n pol_order = len(coeff)\n x_matrix = np.array([x ** i for i in range(pol_order)]).transpose()\n y_true = np.matmul(x_matrix, flipped_coeff)\n return y_true", "title": "" }, { "docid": "e8efea2ca4adb6d0901485c2e4a2abad", "score": "0.49180907", "text": "def clear(self, components, spaxel):\n d = copy.deepcopy(self.source)\n if len(components) > 0:\n for i in components:\n d -= np.tensordot(self.eigen_spectra[i], self.tomograms[i], axes=0)\n\n fig = plt.figure()\n ax = fig.add_subplot(111)\n ax.plot(self.wavelength, self.source[:, spaxel[1], spaxel[0]])\n ax.plot(self.wavelength, d[:, spaxel[1], spaxel[0]])\n plt.show()", "title": "" }, { "docid": "66a8dba6624267ce2f790f3549360d37", "score": "0.4915686", "text": "def _RemoveAxes12():\n return tl.Fn('RemoveAxes12', lambda x: jnp.squeeze(x, (1, 2)))", "title": "" }, { "docid": "3904528ca03353321ebfb337d205bf6b", "score": "0.49148208", "text": "def _slow_characteristic_polynomial(self):\n from sage.rings.polynomial.polynomial_ring import polygen\n x = polygen(QQ, 'x')\n P = self.intersection_poset()\n n = self.dimension()\n return sum([P.moebius_function(0, p) * x**(n - P.rank(p)) for p in P])", "title": "" }, { "docid": "b3d6589c6fee624bb4168fc9b531741a", "score": "0.49137038", "text": "def polylysis(polymer):\n pm = hydrolysis(polymer)\n ph = polyheadlysis(pm.pop(0))\n return [ph] + [monolysis(pb) for pb in pm]", "title": "" }, { "docid": "2577896733bb67132c9abc94ec36332a", "score": "0.49056444", "text": "def _strip_stray_atoms(self):\n components = self.bond_graph.connected_components()\n major_component = max(components, key=len)\n for atom in list(self.particles()):\n if atom not in major_component:\n self.remove(atom)", "title": "" }, { "docid": "e119a2e1ff9bddd3c11b5a6cd3e4b095", "score": "0.4897034", "text": "def _minimize_cell(self, cell_lines):\n while (cell_lines and cell_lines[0] in ('\\n', '.sp\\n')):\n del cell_lines[0]\n while (cell_lines and cell_lines[-1] in ('\\n', '.sp\\n')):\n del cell_lines[-1]", "title": "" }, { "docid": "965858f71a78aebfe52a1ee9a5d0e67f", "score": "0.48953667", "text": "def set_negative_coefficients_to_0(coefficients: ArrayLike) -> np.ndarray:\n coeff = np.copy(np.asarray(coefficients))\n if not np.any(coeff < 0):\n return coeff\n\n idx = np.argmax(coeff < 0)\n coeff[idx:] = 0\n return coeff", "title": "" }, { "docid": "08fa894018087dc8e326252da0a8c7cb", "score": "0.48926735", "text": "def testRemoveFunctions(self):\n sp, atom = makeScatterer()\n c = makeCrystal(sp, atom)\n\n # You can add scatterers with the same name. That should be a no-no.\n sp2, atom2 = makeScatterer()\n c.AddScatterer(atom2)\n c.AddScatteringPower(sp2)\n\n # These act according to the library. You can try to remove an object\n # that is not in the crystal, and it will gladly do nothing for you.\n\n # Remove the scatterers\n c.RemoveScatterer(atom)\n c.RemoveScatteringPower(sp)\n # Remove again\n c.RemoveScatterer(atom)\n c.RemoveScatteringPower(sp)\n\n # Try to remove scatterers that are not in the crystal\n c.RemoveScatterer(atom2)\n c.RemoveScatteringPower(sp2)\n return", "title": "" }, { "docid": "4bb5d450c30f4a0618f9cbf2e6a0d764", "score": "0.48892218", "text": "def test_chop(self):\n eps = 1e-10\n op = SparsePauliOp(\n [\"XYZ\", \"ZII\", \"ZII\", \"YZY\"], coeffs=[eps + 1j * eps, 1 + 1j * eps, eps + 1j, 1 + 1j]\n )\n simplified = op.chop(tol=eps)\n expected_coeffs = [1, 1j, 1 + 1j]\n expected_paulis = [\"ZII\", \"ZII\", \"YZY\"]\n self.assertListEqual(simplified.coeffs.tolist(), expected_coeffs)\n self.assertListEqual(simplified.paulis.to_labels(), expected_paulis)", "title": "" }, { "docid": "a4f547ac62400f3f9bf5d8a7ffe388ae", "score": "0.4887771", "text": "def char_poly(self):\n I = sp.eye(self.n)\n char_poly = (self.s * I - self.A).det()\n return sp.poly(char_poly, self.s)", "title": "" }, { "docid": "e63deb51072b101fb5598b41ffabd585", "score": "0.488613", "text": "def _sparse_coefficients(self, feature_library, rows, cols, ind, coef, bounds_perc, N, num_levels, sa_time_ind):\n c = coef.flatten()\n # big_ind = np.abs(c) >= threshold\n big_ind = self._sensitivity_analysis(feature_library, rows, cols, ind, coef, bounds_perc, N, num_levels, sa_time_ind)\n c[~big_ind] = 0\n return c.reshape((rows, cols)), big_ind.reshape((rows, cols))", "title": "" }, { "docid": "a65e5a5804b528f08f82e8297d4fe3eb", "score": "0.48521823", "text": "def polytrans(features,features_test,features_oos,poly): \n \n features['FEMA_21'] = poly.fit_transform(np.nan_to_num(features.FEMA_21.astype(np.float32)).reshape(-1, 1))\n features['FEMA_8'] = poly.fit_transform(np.nan_to_num(features.FEMA_8.astype(np.float32)).reshape(-1, 1))\n features['FADRLo'] = poly.fit_transform(np.nan_to_num(features.FADRLo.astype(np.float32)).reshape(-1, 1))\n features['FADRHi'] = poly.fit_transform(np.nan_to_num(features.FADRHi.astype(np.float32)).reshape(-1, 1))\n features['FRVI40'] = poly.fit_transform(np.nan_to_num(features.FRVI40.astype(np.float32)).reshape(-1, 1))\n features['FRVI60'] = poly.fit_transform(np.nan_to_num(features.FRVI60.astype(np.float32)).reshape(-1, 1))\n features['FONLOSMA5'] = poly.fit_transform(np.nan_to_num(features.FONLOSMA5.astype(np.float32)).reshape(-1, 1))\n features['FONHISMA5'] = poly.fit_transform(np.nan_to_num(features.FONHISMA5.astype(np.float32)).reshape(-1, 1))\n features['FONLOSMA21'] = poly.fit_transform(np.nan_to_num(features.FONLOSMA21.astype(np.float32)).reshape(-1, 1))\n features['FONHISMA21'] = poly.fit_transform(np.nan_to_num(features.FONHISMA21.astype(np.float32)).reshape(-1, 1))\n features['FONLOSMA34'] = poly.fit_transform(np.nan_to_num(features.FONLOSMA34.astype(np.float32)).reshape(-1, 1))\n features['FSBGAMMA'] = poly.fit_transform(np.nan_to_num(features.FSBGAMMA.astype(np.float32)).reshape(-1, 1))\n features['FOPENWEEKLY'] = poly.fit_transform(np.nan_to_num(features.FOPENWEEKLY.astype(np.float32)).reshape(-1, 1))\n features['FHIGHWEEKLY'] = poly.fit_transform(np.nan_to_num(features.FHIGHWEEKLY.astype(np.float32)).reshape(-1, 1))\n features['FLOWWEEKLY'] = poly.fit_transform(np.nan_to_num(features.FLOWWEEKLY.astype(np.float32)).reshape(-1, 1))\n features['FCLOSEWEEKLY'] = poly.fit_transform(np.nan_to_num(features.FCLOSEWEEKLY.astype(np.float32)).reshape(-1, 1))\n features['FOPENDAILY'] = poly.fit_transform(np.nan_to_num(features.FOPENDAILY.astype(np.float32)).reshape(-1, 1))\n features['FHIGHDAILY'] = poly.fit_transform(np.nan_to_num(features.FHIGHDAILY.astype(np.float32)).reshape(-1, 1))\n features['FLOWDAILY'] = poly.fit_transform(np.nan_to_num(features.FLOWDAILY.astype(np.float32)).reshape(-1, 1))\n features['FCLOSEDAILY'] = poly.fit_transform(np.nan_to_num(features.FCLOSEDAILY.astype(np.float32)).reshape(-1, 1))\n features['FOPENHOURLY'] = poly.fit_transform(np.nan_to_num(features.FOPENHOURLY.astype(np.float32)).reshape(-1, 1))\n features['FHIGHHOURLY'] = poly.fit_transform(np.nan_to_num(features.FHIGHHOURLY.astype(np.float32)).reshape(-1, 1))\n features['FLOWHOURLY'] = poly.fit_transform(np.nan_to_num(features.FLOWHOURLY.astype(np.float32)).reshape(-1, 1))\n features['FCLOSEHOURLY'] = poly.fit_transform(np.nan_to_num(features.FCLOSEHOURLY.astype(np.float32)).reshape(-1, 1))\n features['FSMA200'] = poly.fit_transform(np.nan_to_num(features.FSMA200.astype(np.float32)).reshape(-1, 1))\n features['FBOLUP20'] = poly.fit_transform(np.nan_to_num(features.FBOLUP20.astype(np.float32)).reshape(-1, 1))\n features['FPP'] = poly.fit_transform(np.nan_to_num(features.FPP.astype(np.float32)).reshape(-1, 1))\n features['FS38'] = poly.fit_transform(np.nan_to_num(features.FS38.astype(np.float32)).reshape(-1, 1))\n features['FS62'] = poly.fit_transform(np.nan_to_num(features.FS62.astype(np.float32)).reshape(-1, 1))\n features['FS100'] = poly.fit_transform(np.nan_to_num(features.FS100.astype(np.float32)).reshape(-1, 1))\n features['FS138'] = poly.fit_transform(np.nan_to_num(features.FS138.astype(np.float32)).reshape(-1, 1))\n features['FR162'] = poly.fit_transform(np.nan_to_num(features.FS162.astype(np.float32)).reshape(-1, 1))\n features['FS200'] = poly.fit_transform(np.nan_to_num(features.FS200.astype(np.float32)).reshape(-1, 1))\n features['FR38'] = poly.fit_transform(np.nan_to_num(features.FR38.astype(np.float32)).reshape(-1, 1))\n features['FR62'] = poly.fit_transform(np.nan_to_num(features.FR62.astype(np.float32)).reshape(-1, 1))\n features['FR100'] = poly.fit_transform(np.nan_to_num(features.FR100.astype(np.float32)).reshape(-1, 1))\n features['FR138'] = poly.fit_transform(np.nan_to_num(features.FR138.astype(np.float32)).reshape(-1, 1))\n features['FR162'] = poly.fit_transform(np.nan_to_num(features.FR162.astype(np.float32)).reshape(-1, 1))\n features['FR200'] = poly.fit_transform(np.nan_to_num(features.FR200.astype(np.float32)).reshape(-1, 1))\n features['SBATR'] = poly.fit_transform(np.nan_to_num(features.SBATR.astype(np.float32)).reshape(-1, 1))\n \n features_test['FEMA_21'] = poly.fit_transform(np.nan_to_num(features_test.FEMA_21.astype(np.float32)).reshape(-1, 1))\n features_test['FEMA_8'] = poly.fit_transform(np.nan_to_num(features_test.FEMA_8.astype(np.float32)).reshape(-1, 1))\n features_test['FADRLo'] = poly.fit_transform(np.nan_to_num(features_test.FADRLo.astype(np.float32)).reshape(-1, 1))\n features_test['FADRHi'] = poly.fit_transform(np.nan_to_num(features_test.FADRHi.astype(np.float32)).reshape(-1, 1))\n features_test['FRVI40'] = poly.fit_transform(np.nan_to_num(features_test.FRVI40.astype(np.float32)).reshape(-1, 1))\n features_test['FRVI60'] = poly.fit_transform(np.nan_to_num(features_test.FRVI60.astype(np.float32)).reshape(-1, 1))\n features_test['FONLOSMA5'] = poly.fit_transform(np.nan_to_num(features_test.FONLOSMA5.astype(np.float32)).reshape(-1, 1))\n features_test['FONHISMA5'] = poly.fit_transform(np.nan_to_num(features_test.FONHISMA5.astype(np.float32)).reshape(-1, 1))\n features_test['FONLOSMA21'] = poly.fit_transform(np.nan_to_num(features_test.FONLOSMA21.astype(np.float32)).reshape(-1, 1))\n features_test['FONHISMA21'] = poly.fit_transform(np.nan_to_num(features_test.FONHISMA21.astype(np.float32)).reshape(-1, 1))\n features_test['FONLOSMA34'] = poly.fit_transform(np.nan_to_num(features_test.FONLOSMA34.astype(np.float32)).reshape(-1, 1))\n features_test['FSBGAMMA'] = poly.fit_transform(np.nan_to_num(features_test.FSBGAMMA.astype(np.float32)).reshape(-1, 1))\n features_test['FOPENWEEKLY'] = poly.fit_transform(np.nan_to_num(features_test.FOPENWEEKLY.astype(np.float32)).reshape(-1, 1))\n features_test['FHIGHWEEKLY'] = poly.fit_transform(np.nan_to_num(features_test.FHIGHWEEKLY.astype(np.float32)).reshape(-1, 1))\n features_test['FLOWWEEKLY'] = poly.fit_transform(np.nan_to_num(features_test.FLOWWEEKLY.astype(np.float32)).reshape(-1, 1))\n features_test['FCLOSEWEEKLY'] = poly.fit_transform(np.nan_to_num(features_test.FCLOSEWEEKLY.astype(np.float32)).reshape(-1, 1))\n features_test['FOPENDAILY'] = poly.fit_transform(np.nan_to_num(features_test.FOPENDAILY.astype(np.float32)).reshape(-1, 1))\n features_test['FHIGHDAILY'] = poly.fit_transform(np.nan_to_num(features_test.FHIGHDAILY.astype(np.float32)).reshape(-1, 1))\n features_test['FLOWDAILY'] = poly.fit_transform(np.nan_to_num(features_test.FLOWDAILY.astype(np.float32)).reshape(-1, 1))\n features_test['FCLOSEDAILY'] = poly.fit_transform(np.nan_to_num(features_test.FCLOSEDAILY.astype(np.float32)).reshape(-1, 1))\n features_test['FOPENHOURLY'] = poly.fit_transform(np.nan_to_num(features_test.FOPENHOURLY.astype(np.float32)).reshape(-1, 1))\n features_test['FHIGHHOURLY'] = poly.fit_transform(np.nan_to_num(features_test.FHIGHHOURLY.astype(np.float32)).reshape(-1, 1))\n features_test['FLOWHOURLY'] = poly.fit_transform(np.nan_to_num(features_test.FLOWHOURLY.astype(np.float32)).reshape(-1, 1))\n features_test['FCLOSEHOURLY'] = poly.fit_transform(np.nan_to_num(features_test.FCLOSEHOURLY.astype(np.float32)).reshape(-1, 1))\n features_test['FSMA200'] = poly.fit_transform(np.nan_to_num(features_test.FSMA200.astype(np.float32)).reshape(-1, 1))\n features_test['FBOLUP20'] = poly.fit_transform(np.nan_to_num(features_test.FBOLUP20.astype(np.float32)).reshape(-1, 1))\n features_test['FPP'] = poly.fit_transform(np.nan_to_num(features_test.FPP.astype(np.float32)).reshape(-1, 1))\n features_test['FS38'] = poly.fit_transform(np.nan_to_num(features_test.FS38.astype(np.float32)).reshape(-1, 1))\n features_test['FS62'] = poly.fit_transform(np.nan_to_num(features_test.FS62.astype(np.float32)).reshape(-1, 1))\n features_test['FS100'] = poly.fit_transform(np.nan_to_num(features_test.FS100.astype(np.float32)).reshape(-1, 1))\n features_test['FS138'] = poly.fit_transform(np.nan_to_num(features_test.FS138.astype(np.float32)).reshape(-1, 1))\n features_test['FR162'] = poly.fit_transform(np.nan_to_num(features_test.FS162.astype(np.float32)).reshape(-1, 1))\n features_test['FS200'] = poly.fit_transform(np.nan_to_num(features_test.FS200.astype(np.float32)).reshape(-1, 1))\n features_test['FR38'] = poly.fit_transform(np.nan_to_num(features_test.FR38.astype(np.float32)).reshape(-1, 1))\n features_test['FR62'] = poly.fit_transform(np.nan_to_num(features_test.FR62.astype(np.float32)).reshape(-1, 1))\n features_test['FR100'] = poly.fit_transform(np.nan_to_num(features_test.FR100.astype(np.float32)).reshape(-1, 1))\n features_test['FR138'] = poly.fit_transform(np.nan_to_num(features_test.FR138.astype(np.float32)).reshape(-1, 1))\n features_test['FR162'] = poly.fit_transform(np.nan_to_num(features_test.FR162.astype(np.float32)).reshape(-1, 1))\n features_test['FR200'] = poly.fit_transform(np.nan_to_num(features_test.FR200.astype(np.float32)).reshape(-1, 1))\n features_test['SBATR'] = poly.fit_transform(np.nan_to_num(features_test.SBATR.astype(np.float32)).reshape(-1, 1))\n\n features_oos['FEMA_21'] = poly.fit_transform(np.nan_to_num(features_oos.FEMA_21.astype(np.float32)).reshape(-1, 1))\n features_oos['FEMA_8'] = poly.fit_transform(np.nan_to_num(features_oos.FEMA_8.astype(np.float32)).reshape(-1, 1))\n features_oos['FADRLo'] = poly.fit_transform(np.nan_to_num(features_oos.FADRLo.astype(np.float32)).reshape(-1, 1))\n features_oos['FADRHi'] = poly.fit_transform(np.nan_to_num(features_oos.FADRHi.astype(np.float32)).reshape(-1, 1))\n features_oos['FRVI40'] = poly.fit_transform(np.nan_to_num(features_oos.FRVI40.astype(np.float32)).reshape(-1, 1))\n features_oos['FRVI60'] = poly.fit_transform(np.nan_to_num(features_oos.FRVI60.astype(np.float32)).reshape(-1, 1))\n features_oos['FONLOSMA5'] = poly.fit_transform(np.nan_to_num(features_oos.FONLOSMA5.astype(np.float32)).reshape(-1, 1))\n features_oos['FONHISMA5'] = poly.fit_transform(np.nan_to_num(features_oos.FONHISMA5.astype(np.float32)).reshape(-1, 1))\n features_oos['FONLOSMA21'] = poly.fit_transform(np.nan_to_num(features_oos.FONLOSMA21.astype(np.float32)).reshape(-1, 1))\n features_oos['FONHISMA21'] = poly.fit_transform(np.nan_to_num(features_oos.FONHISMA21.astype(np.float32)).reshape(-1, 1))\n features_oos['FONLOSMA34'] = poly.fit_transform(np.nan_to_num(features_oos.FONLOSMA34.astype(np.float32)).reshape(-1, 1))\n features_oos['FSBGAMMA'] = poly.fit_transform(np.nan_to_num(features_oos.FSBGAMMA.astype(np.float32)).reshape(-1, 1))\n features_oos['FOPENWEEKLY'] = poly.fit_transform(np.nan_to_num(features_oos.FOPENWEEKLY.astype(np.float32)).reshape(-1, 1))\n features_oos['FHIGHWEEKLY'] = poly.fit_transform(np.nan_to_num(features_oos.FHIGHWEEKLY.astype(np.float32)).reshape(-1, 1))\n features_oos['FLOWWEEKLY'] = poly.fit_transform(np.nan_to_num(features_oos.FLOWWEEKLY.astype(np.float32)).reshape(-1, 1))\n features_oos['FCLOSEWEEKLY'] = poly.fit_transform(np.nan_to_num(features_oos.FCLOSEWEEKLY.astype(np.float32)).reshape(-1, 1))\n features_oos['FOPENDAILY'] = poly.fit_transform(np.nan_to_num(features_oos.FOPENDAILY.astype(np.float32)).reshape(-1, 1))\n features_oos['FHIGHDAILY'] = poly.fit_transform(np.nan_to_num(features_oos.FHIGHDAILY.astype(np.float32)).reshape(-1, 1))\n features_oos['FLOWDAILY'] = poly.fit_transform(np.nan_to_num(features_oos.FLOWDAILY.astype(np.float32)).reshape(-1, 1))\n features_oos['FCLOSEDAILY'] = poly.fit_transform(np.nan_to_num(features_oos.FCLOSEDAILY.astype(np.float32)).reshape(-1, 1))\n features_oos['FOPENHOURLY'] = poly.fit_transform(np.nan_to_num(features_oos.FOPENHOURLY.astype(np.float32)).reshape(-1, 1))\n features_oos['FHIGHHOURLY'] = poly.fit_transform(np.nan_to_num(features_oos.FHIGHHOURLY.astype(np.float32)).reshape(-1, 1))\n features_oos['FLOWHOURLY'] = poly.fit_transform(np.nan_to_num(features_oos.FLOWHOURLY.astype(np.float32)).reshape(-1, 1))\n features_oos['FCLOSEHOURLY'] = poly.fit_transform(np.nan_to_num(features_oos.FCLOSEHOURLY.astype(np.float32)).reshape(-1, 1))\n features_oos['FSMA200'] = poly.fit_transform(np.nan_to_num(features_oos.FSMA200.astype(np.float32)).reshape(-1, 1))\n features_oos['FBOLUP20'] = poly.fit_transform(np.nan_to_num(features_oos.FBOLUP20.astype(np.float32)).reshape(-1, 1))\n features_oos['FPP'] = poly.fit_transform(np.nan_to_num(features_oos.FPP.astype(np.float32)).reshape(-1, 1))\n features_oos['FS38'] = poly.fit_transform(np.nan_to_num(features_oos.FS38.astype(np.float32)).reshape(-1, 1))\n features_oos['FS62'] = poly.fit_transform(np.nan_to_num(features_oos.FS62.astype(np.float32)).reshape(-1, 1))\n features_oos['FS100'] = poly.fit_transform(np.nan_to_num(features_oos.FS100.astype(np.float32)).reshape(-1, 1))\n features_oos['FS138'] = poly.fit_transform(np.nan_to_num(features_oos.FS138.astype(np.float32)).reshape(-1, 1))\n features_oos['FR162'] = poly.fit_transform(np.nan_to_num(features_oos.FS162.astype(np.float32)).reshape(-1, 1))\n features_oos['FS200'] = poly.fit_transform(np.nan_to_num(features_oos.FS200.astype(np.float32)).reshape(-1, 1))\n features_oos['FR38'] = poly.fit_transform(np.nan_to_num(features_oos.FR38.astype(np.float32)).reshape(-1, 1))\n features_oos['FR62'] = poly.fit_transform(np.nan_to_num(features_oos.FR62.astype(np.float32)).reshape(-1, 1))\n features_oos['FR100'] = poly.fit_transform(np.nan_to_num(features_oos.FR100.astype(np.float32)).reshape(-1, 1))\n features_oos['FR138'] = poly.fit_transform(np.nan_to_num(features_oos.FR138.astype(np.float32)).reshape(-1, 1))\n features_oos['FR162'] = poly.fit_transform(np.nan_to_num(features_oos.FR162.astype(np.float32)).reshape(-1, 1))\n features_oos['FR200'] = poly.fit_transform(np.nan_to_num(features_oos.FR200.astype(np.float32)).reshape(-1, 1))\n features_oos['SBATR'] = poly.fit_transform(np.nan_to_num(features_oos.SBATR.astype(np.float32)).reshape(-1, 1))\n\n return(features,features_test,features_oos)", "title": "" }, { "docid": "d1625d428e31902f9b14adc5cd2f6e93", "score": "0.4850039", "text": "def remove_all_combinations(self):\n self.combinations = []", "title": "" }, { "docid": "e73f084c926071e69c60ce539be4e6ae", "score": "0.4849316", "text": "def _clean_polyline(links):\n # Extract geometries\n geometries = list(filter(None, [l.get('Geometry') for l in links]))\n if not geometries:\n return None\n\n # Extracts all points\n pos = list(map(\n lambda x : (float(x[1]), float(x[0])), # lat lng\n re.findall('(\\d+\\.\\d+) (\\d+\\.\\d+)', ''.join(geometries))\n ))\n\n # Encode polyline\n return polyline.encode(pos)", "title": "" }, { "docid": "c76b446ee73a46a435f6f75135163b4a", "score": "0.48489", "text": "def test_poly(self):\n conv = ToPointsAndSegments()\n conv.add_polygon(\n [[(0, 0), (9, 0), (11, -.1), (11.1, 0), (22, 0), (14, 10), (2, 8), (0, 5), (0, 0)]])\n skel = calc_skel(conv, pause=PAUSE, output=OUTPUT)\n assert len(skel.segments()) == (8 + 13)\n assert len(skel.sk_nodes) == 14\n assert not segments_intersecting(skel.segments())", "title": "" }, { "docid": "d86eaeb8073926946813511d1293428a", "score": "0.48359793", "text": "def test_remove_small_polygons_min5(self):\n\n this_local_max_dict = echo_top_tracking._remove_small_polygons(\n local_max_dict=copy.deepcopy(LOCAL_MAX_DICT_WITH_SMALL),\n min_grid_cells_in_polygon=MIN_GRID_CELLS_IN_POLYGON)\n\n self.assertTrue(_compare_maxima_with_sans_small_polygons(\n this_local_max_dict, LOCAL_MAX_DICT_WITHOUT_SMALL))", "title": "" }, { "docid": "bfebbe41c3022839e2634f77343653e7", "score": "0.4834354", "text": "def negate(self):\n self.coefficient *= -1", "title": "" }, { "docid": "8648005b7257a95a36e6baaafe43d93c", "score": "0.48323956", "text": "def _disable_numpy_complex_warning():\n import warnings\n warnings.filterwarnings(\"ignore\", module=numpy_support.__name__)", "title": "" }, { "docid": "4d2b898b24ebc55c623e5d7ed8f1267d", "score": "0.4829248", "text": "def del_constant_start_stop(x):\r\n del x[2:6]\r\n return x", "title": "" }, { "docid": "e4f3253f8f6c964f208367dfcd9cf1bc", "score": "0.4829005", "text": "def remove_small_vectors(U, S):\n columns = [U.T[i] for i, w in enumerate(S) if not util.is_small(w)]\n U_reduced = np.vstack(columns).T\n S_reduced = [w for w in S if not util.is_small(w)]\n return U_reduced, S_reduced", "title": "" }, { "docid": "f1b67f7790947d6546e097b36ac8c69d", "score": "0.48213962", "text": "def _bell_incomplete_poly(n, k, symbols):\n if (n == 0) and (k == 0):\n return mp.one\n elif (n == 0) or (k == 0):\n return mp.zero\n s = mp.zero\n a = mp.one\n for m in range(1, n - k + 2):\n s += a * _bell_incomplete_poly(n - m, k - 1, symbols) * symbols[m - 1]\n a = a * (n - m) / m\n return s", "title": "" }, { "docid": "32927e1b857d30618843d46d87a7d5ac", "score": "0.48133186", "text": "def remove_relaxation(self):\n del self._cuts\n self._cuts = None\n del self._original_constraint\n self._original_constraint = None\n del self._nonlinear\n self._nonlinear = None", "title": "" }, { "docid": "d49b2be5e4dda579d8f1c8ea0851290b", "score": "0.48110703", "text": "def remove_adj_markers(self):\r\n if self.track_adjustment is not None:\r\n self.track_adjustment.remove_markers()", "title": "" }, { "docid": "eb4ab836d7f3c26b3f0495bdc964c218", "score": "0.4810638", "text": "def ensure_non_quotient(x):\n if is_MPolynomial(x):\n return x\n elif is_Polynomial(x):\n return x\n elif hasattr(x, 'lift'): # duck typing ;-)\n # print(\"debug have to lift {0}\".format(x))\n return x.lift()\n else:\n return x", "title": "" }, { "docid": "a53f69f56f4927ba1f49a71ea78f692c", "score": "0.48014352", "text": "def remove_spines(*spines_to_remove):\n ax = get_axis()\n return ax.remove_spines(*spines_to_remove)", "title": "" }, { "docid": "c1e614319bdc7a9f11fb2aae06fd6239", "score": "0.47957414", "text": "def _remove(shape, solution):\n for point in shape:\n solution.remove(point)", "title": "" }, { "docid": "8e4469bf17e65614898ca35ccb0584ce", "score": "0.47828594", "text": "def general_poly (L):\n #YOUR CODE HERE \n def func(x):\n L_copy = L[:]\n L_copy.reverse()\n result = 0\n for i in range(len(L_copy)):\n result += L_copy[i]*(x**i)\n return result\n return func", "title": "" }, { "docid": "21045437f50d41829fcd86badb3d7337", "score": "0.4779764", "text": "def ThirdOrdercharpoly(A, c):\n # Initialization of the list of powers\n L=[]; Ls=[]; i=0\n while len(L) < 1+prod(A.dimensions()):\n L=L+HypermatrixCayleyHamiltonList(A, 2*i+1)\n Ls=Ls+HypermatrixCayleyHamiltonStringList(2*i+1, c)\n i=i+1\n # Initialization of the boolean variables which assert the status of the search.\n Fnd=False\n n=1+prod(A.dimensions())\n while (not Fnd) and n>1:\n # Initializing the index variables\n Indx = Set(range(len(L))).subsets(n)\n # Looping through all the subset of the appropriate size.\n for index in Indx:\n M=Matrix(SR, [L[i] for i in index]).transpose()\n #print 'Indexing set =', index\n if M.rank()==n-1:\n Fnd=True\n break\n # Initialization the result\n if M.rank()==n-1:\n b=Matrix(SR, M.nrows(), 1, HM(M.nrows(), 1, 'zero').list())\n x=Matrix(SR, M.ncols(), 1, [var('x'+str(i)) for i in range(M.ncols())])\n return [linear_solver(M, b, x, x) ,\" + \".join([str(x[i,0])+'*'+Ls[index[i]] for i in range(M.ncols())])]\n n=n-1\n if Fnd==False:\n return []", "title": "" }, { "docid": "4070c986f87612c24ae55b68e0c9b0e7", "score": "0.4776955", "text": "def remove_identicals(self):\n # FIXME: in some cases a series of many points close together \n # is removed, even if they form together a valid shape.\n from spira import settings\n pts = self.points\n if len(pts) > 1:\n identicals = np.prod(abs(pts - np.roll(self.points, -1, 0)) < 0.5 / settings.get_grids_per_unit(), 1)\n if not self.is_closed:\n identicals[-1] = False\n self.points = np.delete(pts, identicals.nonzero()[0], 0)\n return self", "title": "" }, { "docid": "79920d749455bac4fc2546e7eed9e11a", "score": "0.47718883", "text": "def fit(self):\n if len(self.x) > 0 and len(self.y) > 0:\n self.poly = np.poly1d(np.polyfit(self.y, self.x, deg=1))\n else:\n self.poly = None \n return self.poly", "title": "" }, { "docid": "ada73cc06b14cba2a5316ffd7f034f0c", "score": "0.47710088", "text": "def polynomial(x, c):\r\n \"*** YOUR CODE HERE ***\"", "title": "" }, { "docid": "362d5135663062c08669907a6b0d4ed8", "score": "0.47667962", "text": "def GetFixRemoveCurve3dMode(self):\n return _ShapeFix.ShapeFix_Wire_GetFixRemoveCurve3dMode(self)", "title": "" }, { "docid": "48b62b00bb3573e036dc45b34c6e4ed4", "score": "0.47636452", "text": "def poly_clear_constants(polynomial_fraction):\n num_new = polynomial_fraction.numerator()\n den_new = polynomial_fraction.denominator()\n num_content = principal_content(num_new)\n den_content = principal_content(den_new)\n # print(\"debug num_content = {0}, den_content = {1}\".format(num_content, den_content))\n # this gcd works also for rational expressions.\n g = gcd(num_content, den_content)\n # print(\"debug gcd = {0}, parent = {1}\".format(g, g.parent()))\n num_new = num_new / g\n den_new = den_new / g\n # print(\"debug num = {0}, den = {1}\".format(num_new, den_new))\n return num_new / den_new", "title": "" }, { "docid": "a0851c49e8e7eb0b7f8f7ec500882ecb", "score": "0.4762892", "text": "def dup_neg(f, K):\n return [ -coeff for coeff in f ]", "title": "" }, { "docid": "abb4c802245dd33a677fab660b0c0fbb", "score": "0.4762519", "text": "def remove_constrained_suppliers(data: xr.DataArray) -> xr.DataArray:\n\n # we set CHP suppliers to zero\n # as electricity production is not a\n # determining product for CHPs\n tech_to_ignore = [\"CHP\", \"biomethane\", \"biogas\"]\n data.loc[\n dict(\n variables=[\n v for v in data.variables.values if any(x in v for x in tech_to_ignore)\n ],\n )\n ] = 0\n\n return data", "title": "" }, { "docid": "a33eff442f03a2ca45fa4c3f4882e319", "score": "0.47614908", "text": "def remove_selective_dynamics(basis):\n if \"selective_dynamics\" in basis.arrays.keys():\n for ind in range(len(basis)):\n basis.selective_dynamics[ind] = [True, True, True]\n return basis", "title": "" }, { "docid": "1f4726372463d5640b8dcb8e792ea5bc", "score": "0.47590613", "text": "def nonzero(x: PolyLike, **kwargs: Any) -> Tuple[numpy.ndarray, ...]:\n x = numpoly.aspolynomial(x)\n return numpy.nonzero(numpy.any(numpy.asarray(x.coefficients), axis=0))", "title": "" }, { "docid": "161a4f1f59f5e8eec615be8937911bb2", "score": "0.4755566", "text": "def _unComplexifyCoef(self):\n if len(self.axis) > 0 and not self.complex:\n for key in self.axis:\n self.rot_x[key].coef = self.rot_x[key].coef.real.astype(\"d\")\n self.rot_y[key].coef = self.rot_y[key].coef.real.astype(\"d\")\n self.rot_z[key].coef = self.rot_z[key].coef.real.astype(\"d\")\n self.rot_theta[key].coef = self.rot_theta[key].coef.real.astype(\"d\")\n\n self.scale[key].coef = self.scale[key].coef.real.astype(\"d\")\n self.scale_x[key].coef = self.scale_x[key].coef.real.astype(\"d\")\n self.scale_y[key].coef = self.scale_y[key].coef.real.astype(\"d\")\n self.scale_z[key].coef = self.scale_z[key].coef.real.astype(\"d\")\n\n for i in range(self.refAxis.nCurve):\n self.refAxis.curves[i].coef = self.refAxis.curves[i].coef.real.astype(\"d\")\n\n self.coef = self.coef.real.astype(\"d\")", "title": "" }, { "docid": "b29694379301c038095e7dfa1a9b4156", "score": "0.47487417", "text": "def zpoly(field, roots):\n polysOver = polynomials_over(field).factory\n root = [field(1)]\n for x in roots:\n root.insert(0, field(0))\n for j in range(len(root) - 1):\n root[j] -= root[j + 1] * x\n print(\"root\")\n print(root)\n return polysOver(root)", "title": "" }, { "docid": "b3242ef2f0d3ff42907b072e77cc8a3b", "score": "0.4746169", "text": "def trim_coeffs(coeffs, abs_approx_tol, rel_approx_tol, inf_norms, errors):\n # Assume we start with good approximations\n good_approx = True\n for num, coeff in enumerate(coeffs):\n # Get the error inherent in the approximation\n error = errors[num]\n\n # Try to zero out everything below the lower-reverse-hyperdiagonal\n # that's a fancy way of saying monomials that are more than the specified degree\n dim = coeff.ndim\n deg = np.sum(coeff.shape) - dim - 1\n initial_mons = []\n for deg0 in range(coeff.shape[0], deg+1):\n initial_mons += mon_combos_limited_wrap(deg0, dim, coeff.shape)\n mons = np.array(initial_mons).T\n slices = tuple(mons[:dim])\n slice_error = np.sum(np.abs(coeff[slices]))\n # increment error\n error += slice_error\n if error > abs_approx_tol+rel_approx_tol*inf_norms[num]:\n # FREAK OUT if we can't zero out everything below the lower-reverse-hyperdiagonal\n good_approx = False\n else:\n # try to increment the degree down\n coeff[slices] = 0\n deg = coeff.shape[0]-1\n # stop when it gets linear...\n while deg > 1:\n # try to cut off another hyperdiagonal from the coefficient matrix\n mons = mon_combos_limited_wrap(deg, dim, coeff.shape)\n mons = np.array(mons).T\n slices = tuple(mons[:dim])\n slice_error = np.sum(np.abs(coeff[slices]))\n # if that introduces too much error, backtrack\n if slice_error + error > abs_approx_tol+rel_approx_tol*inf_norms[num]:\n if deg < coeff.shape[0]-1:\n slices = tuple([slice(0, deg+1)]*dim)\n coeff = coeff[slices]\n break\n # otherwise, increment the error\n else:\n error += slice_error\n coeff[slices] = 0\n deg-=1\n if deg == 1:\n slices = tuple([slice(0, 2)]*dim)\n coeff = coeff[slices]\n break\n coeffs[num] = coeff\n errors[num] = error\n\n return coeffs, good_approx, errors", "title": "" }, { "docid": "7a93f9ff67c0ad09ec792641ce4e2380", "score": "0.47438225", "text": "def polyFit(i0,Imat,order=3, removeOrders=[]): \n Imatf = Imat.reshape((len(Imat),-1))\n pol = np.vander(i0,order+1) \n removeOrders = iterfy(removeOrders) \n removeOrders = np.sort(removeOrders)[-1::-1] \n for remo in removeOrders: \n pol = np.delete(pol,-(remo+1),axis=1) \n lhs = copy.copy(pol) \n scale = np.sqrt((lhs*lhs).sum(axis=0)) \n lhs /= scale \n comps,resid,rnk,singv = linalg.lstsq(lhs,Imatf) \n comps = (comps.T/scale).T \n \n for remo in removeOrders: \n comps = np.insert(comps,order-remo,0,axis=0) \n return comps.reshape((order+1,)+np.shape(Imat)[1:])", "title": "" }, { "docid": "a54a7c9b6f03a258b086ad69d42fd541", "score": "0.47417796", "text": "def test_poly_int_overflow(self):\n v = np.arange(1, 21)\n assert_almost_equal(np.poly(v), np.poly(np.diag(v)))", "title": "" } ]
efbb09e08a98276527ed32ce2c4be1f9
Set wifi Infrastrucure Parameters. Have to use models for loss, latency, bw..
[ { "docid": "bc2380ef1fb03f6cf8cb6a6d1049ad99", "score": "0.688999", "text": "def setInfraParameters(self, sta, ap, distance, wlan):\n if wlan != '':\n self.parameters(sta, ap, distance, wlan)\n else:\n for wlan in range(0, sta.nWlans):\n self.parameters(sta, ap, distance, wlan)", "title": "" } ]
[ { "docid": "382c49d23c62cfc09cc339a28e81a2f8", "score": "0.7367894", "text": "def set_wifi_parameters(self, *args, **kwargs):\n self._enable_warn()\n return", "title": "" }, { "docid": "603d08cbe987a95961a828e529e32973", "score": "0.65696967", "text": "def apply_wireless_settings(self):\n\n # set channels for 2G and 5G bands\n self.ssh.run(\"uci set wireless.radio1.channel='%s'\" % self.channel_2g)\n self.ssh.run(\"uci set wireless.radio0.channel='%s'\" % self.channel_5g)\n\n # disable default OpenWrt SSID\n self.ssh.run(\"uci set wireless.default_radio1.disabled='%s'\" %\n DISABLE_RADIO)\n self.ssh.run(\"uci set wireless.default_radio0.disabled='%s'\" %\n DISABLE_RADIO)\n\n # Enable radios\n self.ssh.run(\"uci set wireless.radio1.disabled='%s'\" % ENABLE_RADIO)\n self.ssh.run(\"uci set wireless.radio0.disabled='%s'\" % ENABLE_RADIO)\n\n for config in self.wireless_configs:\n\n # configure open network\n if config.security == OPEN_SECURITY:\n if config.band == hostapd_constants.BAND_2G:\n self.ssh.run(\"uci set wireless.default_radio1.ssid='%s'\" %\n config.ssid)\n self.ssh.run(\"uci set wireless.default_radio1.disabled='%s'\" %\n ENABLE_RADIO)\n if config.hidden:\n self.ssh.run(\"uci set wireless.default_radio1.hidden='%s'\" %\n ENABLE_HIDDEN)\n elif config.band == hostapd_constants.BAND_5G:\n self.ssh.run(\"uci set wireless.default_radio0.ssid='%s'\" %\n config.ssid)\n self.ssh.run(\"uci set wireless.default_radio0.disabled='%s'\" %\n ENABLE_RADIO)\n if config.hidden:\n self.ssh.run(\"uci set wireless.default_radio0.hidden='%s'\" %\n ENABLE_HIDDEN)\n continue\n\n self.ssh.run(\"uci set wireless.%s='wifi-iface'\" % config.name)\n if config.band == hostapd_constants.BAND_2G:\n self.ssh.run(\"uci set wireless.%s.device='radio1'\" % config.name)\n else:\n self.ssh.run(\"uci set wireless.%s.device='radio0'\" % config.name)\n self.ssh.run(\"uci set wireless.%s.network='%s'\" %\n (config.name, config.iface))\n self.ssh.run(\"uci set wireless.%s.mode='ap'\" % config.name)\n self.ssh.run(\"uci set wireless.%s.ssid='%s'\" %\n (config.name, config.ssid))\n self.ssh.run(\"uci set wireless.%s.encryption='%s'\" %\n (config.name, config.security))\n if config.security == PSK_SECURITY or config.security == SAE_SECURITY:\n self.ssh.run(\"uci set wireless.%s.key='%s'\" %\n (config.name, config.password))\n elif config.security == WEP_SECURITY:\n self.ssh.run(\"uci set wireless.%s.key%s='%s'\" %\n (config.name, config.wep_key_num, config.wep_key))\n self.ssh.run(\"uci set wireless.%s.key='%s'\" %\n (config.name, config.wep_key_num))\n elif config.security == ENT_SECURITY:\n self.ssh.run(\"uci set wireless.%s.auth_secret='%s'\" %\n (config.name, config.radius_server_secret))\n self.ssh.run(\"uci set wireless.%s.auth_server='%s'\" %\n (config.name, config.radius_server_ip))\n self.ssh.run(\"uci set wireless.%s.auth_port='%s'\" %\n (config.name, config.radius_server_port))\n if config.ieee80211w:\n self.ssh.run(\"uci set wireless.%s.ieee80211w='%s'\" %\n (config.name, config.ieee80211w))\n if config.hidden:\n self.ssh.run(\"uci set wireless.%s.hidden='%s'\" %\n (config.name, ENABLE_HIDDEN))\n\n self.ssh.run(\"uci commit wireless\")\n self.ssh.run(\"cp %s %s.tmp\" % (LEASE_FILE, LEASE_FILE))", "title": "" }, { "docid": "75083a12872424fed75490a7f58500f7", "score": "0.64731205", "text": "async def wifiConfig(request, response, args):\n\tglobal currentNetworks, currentConfig, currentNetwork\n\n\tpatternDns = rb\"^([a-zA-Z0-9]|[a-zA-Z0-9][a-zA-Z0-9\\-]{0,61}[a-zA-Z0-9])(\\.([a-zA-Z0-9]|[a-zA-Z0-9][a-zA-Z0-9\\-]{0,61}[a-zA-Z0-9]))*$\"\n\n\tif currentConfig == None:\n\t\tcurrentConfig = wifi.Station.config\n\t\tcurrentNetwork = wifi.Station.network\n\t\tcurrentNetworks = currentNetwork.listKnown()\n\n\tconfig = currentConfig\n\tnetwork = currentNetwork\n\n\taction = request.params.get(b\"action\",b\"none\")\n\tforced = request.params.get(b\"forced\",b\"none\")\n\tcurrent = request.params.get(b\"current\",b\"\")\n\tdynamic = request.params.get(b\"dynamic\",b\"1\")\n\n\tdisabled = False\n\t# If default page\n\tif action == b\"none\" and len(request.params) == 0:\n\t\tdisabled = True\n\t# If the dynamic ip switch changed\n\telif action == b\"none\":\n\t\tif dynamic == b\"1\":\n\t\t\tforced = b\"1\"\n\t\telse:\n\t\t\tforced = b\"0\"\n\t\taction = b\"change\"\n\t# If the modify button clicked\n\telif action == b\"modify\":\n\t\tpass\n\t# If the save button clicked\n\telif action == b\"save\":\n\t\tnetwork.update(request.params)\n\t\tnetwork.save()\n\t\tnetwork = selectNetwork(0, True)\n\t\tconfig.update(request.params)\n\t\tconfig.default = network.ssid\n\t\tconfig.save()\n\t\tforced = b\"none\"\n\t\tdisabled = True\n\telif action == b\"previous\":\n\t\tnetwork = selectNetwork(-1)\n\t\tforced = b\"none\"\n\telif action == b\"next\":\n\t\tnetwork = selectNetwork(1)\n\t\tforced = b\"none\"\n\telif action == b\"forget\":\n\t\tforced = b\"none\"\n\t\tnetwork.forget()\n\t\tnetwork = selectNetwork(-1,True)\n\telif action == b\"default\":\n\t\tconfig.default = useful.tostrings(network.ssid)\n\t\tconfig.save()\n\tif forced == b\"none\":\n\t\tdynamic = network.dynamic\n\telif forced == b\"1\":\n\t\tdynamic = True\n\telse:\n\t\tdynamic = False\n\t\n\tssids = []\n\t\n\tif action in [b\"previous\",b\"next\",b\"change\",b\"forget\",b\"modify\",b\"default\"]:\n\t\tsubmit = \\\n\t\t\tSubmit (text=lang.save , name=b\"action\", value=b\"save\" , style=b\"margin-right:0.5em\"), \\\n\t\t\tSubmit (text=lang.lt , name=b\"action\", value=b\"previous\", style=b\"margin-right:0.5em\"), \\\n\t\t\tSubmit (text=lang.gt , name=b\"action\", value=b\"next\" , style=b\"margin-right:0.5em\"), \\\n\t\t\tSubmit (text=lang.forget, name=b\"action\", value=b\"forget\" , style=b\"margin-right:0.5em\"), \\\n\t\t\tSubmit (text=lang.set_default,name=b\"action\", value=b\"default\" , style=b\"margin-right:0.5em\"), \\\n\t\t\tInput ( name=b\"forced\", value=forced , type=b\"hidden\"), \\\n\t\t\tInput ( name=b\"current\", value=current , type=b\"hidden\")\n\n\t\tif wifi.Station.isActive():\n\t\t\tnetworks = wifi.Station.scan()\n\t\t\tfor net in networks:\n\t\t\t\tssids.append(Option(value=net[0]))\n\telse:\n\t\tsubmit = Submit(text=lang.modify, name=b\"action\", value=b\"modify\")\n\n\tpage = mainFrame(request, response, args, lang.wifi_configuration,\n\t\tSwitch(text=lang.activated, name=b\"activated\", checked=config.activated, disabled=disabled),Br(),\n\t\tEdit(text=lang.hostname , name=b\"hostname\", placeholder=lang.hostname_not_available, pattern=patternDns, value=config.hostname, disabled=disabled), \n\t\tSwitch(text=lang.fallback_to_the, name=b\"fallback\", checked=config.fallback, disabled=disabled),Br(),\n\t\tCard(\n\t\t\t[\n\t\t\t\tCardHeader(text= lang.wifi if useful.tostrings(network.ssid) != useful.tostrings(config.default) else lang.wifi_default),\n\t\t\t\tCardBody([\n\t\t\t\t\tComboBox(ssids, text=lang.ssid, placeholder=lang.enter_ssid, name=b\"ssid\", value=network.ssid, disabled=disabled),\n\t\t\t\t\tEdit(text=lang.password, name=b\"wifipassword\", placeholder=lang.enter_password, type=b\"password\",value=network.wifipassword, disabled=disabled),\n\t\t\t\t])\n\t\t\t]),Br(),\n\t\tCard(\n\t\t\t[\n\t\t\t\tCardHeader([\\\n\t\t\t\t\tSwitch(text=lang.dynamic_ip, checked=dynamic, name=b\"dynamic\", onchange=b\"this.form.submit()\", disabled=disabled)]),\n\t\t\t\tCardBody([\\\n\t\t\t\t\tNone if dynamic else staticIpHtml(network, disabled)])\n\t\t\t]),\n\t\tBr(),\n\t\tsubmit)\n\tawait response.sendPage(page)", "title": "" }, { "docid": "eea6beb66f0538393f765e2ff8df65e1", "score": "0.6423391", "text": "async def wifiConfig(request, response, args):\n\tconfig = wifi.StationConfig()\n\tconfig.load()\n\n\taction = request.params.get(b\"modify\",b\"none\")\n\tif action == b\"none\" : disabled = True\n\telif action == b\"modify\": disabled = False \n\telif action == b\"save\" : \n\t\tdisabled = True\n\t\tdel request.params[b\"modify\"]\n\t\tconfig.update(request.params)\n\t\tconfig.save()\n\t\tif wifi.Station:\n\t\t\twifi.Station.config = config\n\telif action == b\"modify\":\n\t\tdisabled = False\n\n\tpage = configureWifi(title=args[\"title\"], config=config, accessPoint=False, disabled=disabled, active=args[\"index\"], request=request, response=response)\n\tawait response.sendPage(page)", "title": "" }, { "docid": "f6e48faa47fd845431ad6e0f9a8fabbd", "score": "0.6387347", "text": "def setAdhocParameters(self, sta, iface):\n latency = 2\n #delay = 5 * distance\n bandwidth = wifiParameters.set_bw(sta.mode)\n sta.pexec(\"tc qdisc replace dev %s-wlan%s \\\n root handle 3: netem rate %smbit \\\n latency %sms\" % (sta, iface, bandwidth, latency)) \n #Reordering packets \n sta.pexec('tc qdisc add dev %s-wlan%s parent 3:1 pfifo limit 1000' % (sta, iface))", "title": "" }, { "docid": "431456816cc8d02823195952b69f17e0", "score": "0.6036319", "text": "def set_wifi():\n print 'Please set up your userid and password.(Press enter to end input)'\n\n userid = raw_input('Please input your userid:')\n password = raw_input('Please input your password:')\n\n if login_gateway_test_wifi(userid, password) > 1:\n print 'Settings succeeded'\n list_t = [userid, password]\n text_save(list_t, 'gateway.setup', mode='w')\n if login_gateway_test_wifi(userid, password) < 1:\n print 'Failed to login,please check your input!'\n print ' '\n set_wifi()\n return 0", "title": "" }, { "docid": "7a9c7b06fcbcfa35774a63258ca0aa88", "score": "0.59682983", "text": "def setBw(self, ap, wlan): \n iface = str(ap.virtualWlan) + str(wlan)\n \n if ap.equipmentModel == None:\n bw = wifiParameters.set_bw(ap.mode)\n else: \n r = deviceDataRate(ap, None, wlan)\n bw = r.rate\n \n os.system(\"tc qdisc replace dev %s \\\n root handle 2: netem rate %.2fmbit \\\n latency 1ms \\\n delay 0.1ms\" % (iface, bw)) \n #Reordering packets \n os.system('tc qdisc add dev %s parent 2:1 pfifo limit 10000' % iface)\n #os.system(\"tc qdisc add dev %s root tbf rate %smbit latency 2ms burst 15k\" % \\\n #(ap.virtualWlan, bandwidth))", "title": "" }, { "docid": "bc66f5b7d53b5f1ae1c05ce73dccbfc8", "score": "0.5831395", "text": "def turnWifiOn():\n\tsystem('sudo ifup wlan0')\n\tutils.log(log_path, 'Turn on wifi for send data.')\n\tsleep(5)", "title": "" }, { "docid": "44b3fc26f1407cf5346cef63f7212292", "score": "0.582104", "text": "def configureWifi(title=b\"\",config=None,accessPoint=False,disabled=False,active=0,request=None,response=None):\n\tif accessPoint:\n\t\tauthmodes = []\n\n\t\tfor key, value in wifi.AUTHMODE.items():\n\t\t\tif value==config.authmode:\n\t\t\t\tauthmodes.append(Option(text=value, selected=b\"selected\", value=value))\n\t\t\telse:\n\t\t\t\tauthmodes.append(Option(text=value, value=value))\n\t\tauthentication = Select( authmodes,text=b\"Authentication mode\",name=b\"authmode\", disabled=disabled)\n\t\tssid = Edit(text=b\"SSID\", placeholder=b\"Enter SSID\", name=b\"ssid\", value=config.ssid, disabled=disabled)\n\telse:\n\t\tauthentication = None\n\t\tssids = []\n\t\tif disabled == False:\n\t\t\tif wifi.Station.isActive():\n\t\t\t\tnetworks = wifi.Station.scan()\n\t\t\t\tfor network in networks:\n\t\t\t\t\tssids.append(Option(value=network[0]))\n\t\tssid = ComboBox(ssids, text=b\"SSID\", placeholder=b\"Enter SSID\", name=b\"ssid\", value=config.ssid, disabled=disabled)\n\tpatternIp = b\"(25[0-5]|2[0-4][0-9]|[01]?[0-9][0-9]?)\\.(25[0-5]|2[0-4][0-9]|[01]?[0-9][0-9]?)\\.(25[0-5]|2[0-4][0-9]|[01]?[0-9][0-9]?)\\.(25[0-5]|2[0-4][0-9]|[01]?[0-9][0-9]?)\"\n\tif disabled:\n\t\tsubmit = Submit(text=b\"Modify\")\n\t\tvalue = b'modify'\n\telse:\n\t\tsubmit = Submit(text=b\"Save\")\n\t\tvalue = b'save'\n\tpage = mainPage(\n\t\tcontent=[Br(),Container([\\\n\t\t\t\t\tCard([\\\n\t\t\t\t\t\tForm([\\\n\t\t\t\t\t\t\tBr(),\n\t\t\t\t\t\t\tSwitch(text=b\"Activated\", name=b\"activated\", checked=config.activated, disabled=disabled),\n\t\t\t\t\t\t\tssid,\n\t\t\t\t\t\t\tEdit(text=b\"Password\", name=b\"wifipassword\",type=b\"password\", placeholder=b\"Enter password\", value=config.wifipassword, disabled=disabled),\n\t\t\t\t\t\t\tauthentication,\n\t\t\t\t\t\t\tEdit(text=b\"Ip address\", name=b\"ipaddress\", pattern=patternIp, placeholder=b\"Enter ip address or leave blank\", value=config.ipaddress, disabled=disabled),\n\t\t\t\t\t\t\tEdit(text=b\"Netmask\", name=b\"netmask\", pattern=patternIp, placeholder=b\"Enter netmask or leave blank\", value=config.netmask, disabled=disabled),\n\t\t\t\t\t\t\tEdit(text=b\"Gateway\", name=b\"gateway\", pattern=patternIp, placeholder=b\"Enter gateway or leave blank\", value=config.gateway, disabled=disabled),\n\t\t\t\t\t\t\tEdit(text=b\"DNS\", name=b\"dns\", pattern=patternIp, placeholder=b\"Enter DNS or leave blank\", value=config.dns, disabled=disabled),\n\t\t\t\t\t\t\tInput(text=b\"modify\", name=b\"modify\", type=b\"hidden\", value=value),\n\t\t\t\t\t\t\tsubmit,\n\t\t\t\t\t\t])\n\t\t\t\t\t])\n\t\t\t\t])\n\t\t\t], title=title, active=active, request=request, response=response)\n\treturn page", "title": "" }, { "docid": "b88324be59f25028d1f1315cb1681b67", "score": "0.5716199", "text": "def Set_WLAN_Standard(self, sStd):\n #WLAN-->Frame Blocks-->TxMode\n sStd.upper()\n if sStd == 'B':\n self.write(f':SOUR:BB:WLNN:FBL1:PMOD LEG') #Set Physical Mode\n self.write(f':SOUR:BB:WLNN:FBL1:TMOD CCK')\n elif sStd == 'G' or sStd == 'A':\n self.write(f':SOUR:BB:WLNN:FBL1:PMOD LEG') #Set Physical Mode\n self.write(f':SOUR:BB:WLNN:FBL1:TMOD L{self.WLAN_ChBW}')\n elif sStd == 'N':\n self.write(f':SOUR:BB:WLNN:FBL1:PMOD MIX') #Set Physical Mode\n self.write(f':SOUR:BB:WLNN:FBL1:TMOD HT{self.WLAN_ChBW}')\n elif sStd == 'AC':\n self.write(f':SOUR:BB:WLNN:FBL1:PMOD MIX') #Set Physical Mode\n self.write(f':SOUR:BB:WLNN:FBL1:TMOD V{self.WLAN_ChBW}')\n elif sStd == 'AX':\n self.write(f':SOUR:BB:WLNN:FBL1:PMOD MIX') #Set Physical Mode\n self.write(f':SOUR:BB:WLNN:FBL1:TMOD HE{self.WLAN_ChBW}')\n else:\n print(f'Set_WLAN_Standard: {sStd} not supported')", "title": "" }, { "docid": "31689ea1fd6104f52d64941e1e5c658a", "score": "0.5702221", "text": "def wifi_network(network_info, net_index, short_net_name, ns3_mode, nodes, \n get_node_from_ns3node, node_member, terminal_members): \n max_distance = get_max_distance_in_network(nodes, node_member, terminal_members)\n \n logging.info(\"Network '%s': AP-node = '%s', STA-nodes = %s\" % \n (short_net_name, node_member, terminal_members))\n logging.info(\"Network '%s': ns-3 mode: %s, max_distance: %d meters\" %\n (short_net_name, ns3_mode, max_distance))\n \n # Wifi channel\n channel = ns3.YansWifiChannelHelper.Default()\n phy = ns3.YansWifiPhyHelper.Default() \n channel = ns3.YansWifiChannelHelper.Default()\n channel.SetPropagationDelay(\"ns3::ConstantSpeedPropagationDelayModel\")\n channel.AddPropagationLoss(\"ns3::FixedRssLossModel\", \"Rss\", ns3.DoubleValue(0))\n phy.SetChannel(channel.Create())\n\n address_helper = ns3.Ipv4AddressHelper()\n netaddr = \"10.1.%d.0\" % net_index\n address_helper.SetBase(ns3.Ipv4Address(netaddr), ns3.Ipv4Mask(\"255.255.255.0\")) \n \n def configure_node(wifi_helper, name):\n ns3_node = nodes[name].ns3_node\n sta_device = wifi_helper.Install(phy, mac, ns3_node)\n node = get_node_from_ns3node(ns3_node)\n add_device_to_node(node, short_net_name, network_info, sta_device.Get(0), \n helper=wifi_helper, phy_helper=phy)\n set_wifi_timeouts(sta_device.Get(0), max_distance)\n sta_interface = address_helper.Assign(sta_device)\n address = sta_interface.GetAddress(0)\n add_interface_to_device_node(node, short_net_name, network_info, address)\n \n # STA devices & and interfaces \n wifi_helper = ns3.WifiHelper.Default()\n wifi_helper.SetRemoteStationManager (\"ns3::ConstantRateWifiManager\",\n \"DataMode\", ns3.StringValue(ns3_mode),\n \"RtsCtsThreshold\", ns3.StringValue(\"2200\"))\n \n mac = ns3.NqosWifiMacHelper.Default() \n ssid = ns3.Ssid(\"%s%d\" % (short_net_name[:5], net_index))\n mac.SetType(\"ns3::QstaWifiMac\", \n \"Ssid\", ns3.SsidValue(ssid),\n \"ActiveProbing\", ns3.BooleanValue(False))\n \n for terminal_member in terminal_members:\n configure_node(wifi_helper, terminal_member)\n \n # AP devices & interfaces\n wifi_helper = ns3.WifiHelper.Default()\n mac = ns3.NqosWifiMacHelper.Default()\n mac.SetType (\"ns3::QapWifiMac\", \n \"Ssid\", ns3.SsidValue(ssid),\n \"BeaconGeneration\", ns3.BooleanValue(True),\n \"BeaconInterval\", ns3.TimeValue(ns3.Seconds(2.5))) \n configure_node(wifi_helper, node_member)", "title": "" }, { "docid": "dd4be183d436ba1d867b12e6d0e883cf", "score": "0.56114984", "text": "def configure_network_params():\n PropertyFile('/etc/sysctl.conf', ' = ').override({\n 'net.ipv4.conf.all.accept_source_route': '0',\n 'net.ipv4.conf.default.accept_source_route': '0',\n 'net.ipv4.conf.all.accept_redirects': '0',\n 'net.ipv4.conf.default.accept_redirects': '0',\n 'net.ipv4.conf.all.secure_redirects': '0',\n 'net.ipv4.conf.default.secure_redirects': '0',\n 'net.ipv4.conf.all.log_martians': '1',\n 'net.ipv4.conf.default.log_martians': '1',\n 'net.ipv4.icmp_echo_ignore_broadcasts': '1',\n 'net.ipv4.icmp_ignore_bogus_error_responses': '1',\n 'net.ipv4.conf.all.rp_filter': '1',\n 'net.ipv4.conf.default.rp_filter': '1',\n 'net.ipv4.tcp_syncookies': '1'\n }).write()", "title": "" }, { "docid": "6d6fce9b3c972bd7107e9cb670e70013", "score": "0.5598844", "text": "def set_parameters(self, params, mode=\"wbx\"):\n with torch.no_grad():\n k = 0\n if 'w' in mode:\n for i, w in enumerate(self.weights):\n w.copy_(params[k][i])\n k += 1\n\n if 'b' in mode:\n for i, b in enumerate(self.biases):\n b.copy_(params[k][i])\n k += 1\n\n if 'x' in mode and len(self.extra_params) > 0:\n for i, ep in enumerate(self.extra_params):\n if ep.requires_grad:\n ep.copy_(params[k][i])\n k += 1", "title": "" }, { "docid": "e6520c64ec1a3bcd577427e5d57b01ea", "score": "0.55846745", "text": "def set_ssid_settings(network_id, wireless_name, wireless_password):\n # MISSION TODO\n response = requests.put(\n # TODO: Add the API endpoint path to set SSID settings\n # (don't forget to add the network ID)\n meraki_dashboard_api_base_url + \"MISSION\",\n headers={\n \"X-Cisco-Meraki-API-Key\": env_user.MERAKI_API_KEY,\n \"Content-Type\": \"application/json\"\n },\n json={\n \"number\": 0,\n \"name\": wireless_name,\n \"enabled\": True,\n \"splashPage\": \"Click-through splash page\",\n \"ssidAdminAccessible\": False,\n \"authMode\": \"psk\",\n \"psk\": wireless_password,\n \"encryptionMode\": \"wpa\",\n \"wpaEncryptionMode\": \"WPA2 only\",\n \"ipAssignmentMode\": \"Bridge mode\",\n \"useVlanTagging\": False,\n \"walledGardenEnabled\": True,\n \"walledGardenRanges\": \"*.ngrok.io\",\n \"minBitrate\": 11,\n \"bandSelection\": \"5 GHz band only\",\n \"perClientBandwidthLimitUp\": 0,\n \"perClientBandwidthLimitDown\": 0\n },\n )\n # END MISSION SECTION\n response.raise_for_status()", "title": "" }, { "docid": "5283908deb2734189dc5da0576371e00", "score": "0.55800754", "text": "def start(self, ap, country_code=None, auth_algs=None, wpa=None, iface=None,\n wpa_key_mgmt=None, rsn_pairwise=None, wpa_passphrase=None, encrypt=None, \n wep_key0=None, **params):\n self.exists = True\n iface = str(ap.virtualWlan) + str(iface)\n self.cmd = (\"echo \\'\")\n \"\"\"General Configurations\"\"\" \n self.cmd = self.cmd + (\"interface=%s\" % iface) # the interface used by the AP\n \"\"\"Not using at the moment\"\"\"\n self.cmd = self.cmd + (\"\\ndriver=nl80211\")\n self.cmd = self.cmd + (\"\\nssid=%s\" % ap.ssid) # the name of the AP\n \n if ap.mode == 'n' or ap.mode == 'ac'or ap.mode == 'a':\n self.cmd = self.cmd + (\"\\nhw_mode=g\") \n else:\n self.cmd = self.cmd + (\"\\nhw_mode=%s\" % ap.mode) \n self.cmd = self.cmd + (\"\\nchannel=%s\" % ap.channel) # the channel to use \n #if(ap.mode==\"ac\" or ap.mode=='a'):\n # self.cmd = self.cmd + (\"\\nieee80211ac=1\")\n self.cmd = self.cmd + (\"\\nwme_enabled=1\") \n self.cmd = self.cmd + (\"\\nwmm_enabled=1\") \n #if(ap.mode==\"n\" or ap.mode==\"a\"):\n # self.cmd = self.cmd + (\"\\nieee80211n=1\")\n #if(ap.mode==\"n\"):\n # self.cmd = self.cmd + (\"\\nht_capab=[HT40+][SHORT-GI-40][DSSS_CCK-40]\")\n \n if encrypt == 'wpa':\n module.wpa_supplicantIsRunning = True\n self.cmd = self.cmd + (\"\\nauth_algs=%s\" % auth_algs)\n self.cmd = self.cmd + (\"\\nwpa=%s\" % wpa)\n self.cmd = self.cmd + (\"\\nwpa_key_mgmt=%s\" % wpa_key_mgmt ) \n self.cmd = self.cmd + (\"\\nwpa_passphrase=%s\" % wpa_passphrase) \n elif encrypt == 'wpa2':\n module.wpa_supplicantIsRunning = True\n self.cmd = self.cmd + (\"\\nauth_algs=%s\" % auth_algs)\n self.cmd = self.cmd + (\"\\nwpa=%s\" % wpa)\n self.cmd = self.cmd + (\"\\nwpa_key_mgmt=%s\" % wpa_key_mgmt ) \n self.cmd = self.cmd + (\"\\nrsn_pairwise=%s\" % rsn_pairwise) \n self.cmd = self.cmd + (\"\\nwpa_passphrase=%s\" % wpa_passphrase) \n elif encrypt == 'wep':\n self.cmd = self.cmd + (\"\\nauth_algs=%s\" % auth_algs)\n self.cmd = self.cmd + (\"\\nwep_default_key=%s\" % 0) \n self.cmd = self.cmd + (\"\\nwep_key0=%s\" % wep_key0) \n \n #Not used yet!\n if(country_code!=None):\n self.cmd = self.cmd + (\"\\ncountry_code=%s\" % country_code) # the country code\n \n #elif(len(self.baseStationName)>self.countAP and len(self.baseStationName) != 1):\n # \"\"\"From AP2\"\"\"\n # self.cmd = self.apcommand\n #self.cmd = self.cmd + \"\\n\"\n # self.cmd = self.cmd + (\"\\nbss=%s\" % self.newapif[self.nextIface]) # the interface used by the AP\n # if(self.ssid!=None):\n # self.cmd = self.cmd + (\"\\nssid=%s\" % self.ssid ) # the name of the AP\n #self.cmd = self.cmd + (\"\\nssid=%s\" % self.ssid) # the name of the AP\n # if(self.auth_algs!=None):\n # self.cmd = self.cmd + (\"\\nauth_algs=%s\" % self.auth_algs) # 1=wpa, 2=wep, 3=both\n # if(self.wpa!=None):\n # self.cmd = self.cmd + (\"\\nwpa=%s\" % self.wpa) # WPA2 only\n # if(self.wpa_key_mgmt!=None):\n # self.cmd = self.cmd + (\"\\nwpa_key_mgmt=%s\" % self.wpa_key_mgmt ) \n # if(self.rsn_pairwise!=None):\n # self.cmd = self.cmd + (\"\\nrsn_pairwise=%s\" % self.rsn_pairwise) \n # if(self.wpa_passphrase!=None):\n # self.cmd = self.cmd + (\"\\nwpa_passphrase=%s\" % self.wpa_passphrase) \n # self.countAP = len(self.baseStationName)\n # self.apcommand = \"\" \n return self.cmd", "title": "" }, { "docid": "9711d925824a46b7982848142b91d897", "score": "0.55783075", "text": "def setParameters(self, **kwargs):\n\t\tGlowwormSwarmOptimization.setParameters(self, **kwargs)", "title": "" }, { "docid": "f55e9527dea9bf3ad85db4fd2e16afe5", "score": "0.5543956", "text": "def setparams(self):\n \n # kinematic\n self.l1 = 3 \n self.lc1 = 2\n \n # dynamic\n self.m1 = 10\n self.I1 = 10\n self.gravity = 9.81\n self.d1 = 50", "title": "" }, { "docid": "9933bfb35edda6b01d82abc302bc4e1e", "score": "0.554261", "text": "def turnWifiOff():\n\tsystem('sudo ifdown wlan0')\n\tutils.log(log_path,'Turn off wifi after send data.')", "title": "" }, { "docid": "a9567f134d1f7318b63d31b802e75204", "score": "0.55382806", "text": "def set_params(self, params):\n # Reset to default value\n self.action_new()\n\n # Set parameters from config file back to interface components\n self.comboBox_basic_optimizer.setCurrentText(params['optimizer'])\n self.comboBox_basic_loss.setCurrentText(params['loss'])\n self.comboBox_basic_activation.setCurrentText(params['activation'])\n self.spinBox_basic_epochs.setValue(params['epochs'])\n self.spinBox_basic_batch.setValue(params['batch'])\n\n # Layer(s) parameters\n self.spinBox_compute_num.setValue(len(params['layers']))\n for idx, config in enumerate(params['layers']):\n self.layers[idx].activation = config['activation']\n self.layers[idx].dropout = config['dropout']", "title": "" }, { "docid": "2cb964a45e9132a88c4735488b2326f3", "score": "0.54809767", "text": "def set_parameters(self, kern1, bias1, kern2, bias2, kern3, bias3, fc_weight, fc_bias):\r\n \r\n self.l1.weight= nn.Parameter(kern1)\r\n self.l1.bias=nn.Parameter(bias1)\r\n #self.l2.weight = nn.Parameter(kern3)\r\n #self.l2.bias= nn.parameter(bias3)\r\n self.l3.weight= nn.Parameter(kern2)\r\n self.l3.bias = nn.Parameter(bias2)\r\n self.l4.weight = nn.Parameter(fc_weight)\r\n self.l4.bias = nn.Parameter(fc_bias)\r\n pass", "title": "" }, { "docid": "9b938324081a0e5d6d8491a57a2747bf", "score": "0.5447471", "text": "def default_params():\n return dict(\n # Models:\n model='wae_gan',\n # Model parameters.\n latent_dim=20,\n dense_nodes=75,\n aa_embedding_dim=21,\n v_gene_embedding_dim=30,\n j_gene_embedding_dim=13,\n beta=0.75,\n # Input data parameters.\n max_cdr3_len=30,\n n_aas=len(conversion.AA_LIST),\n n_v_genes=len(conversion.TCRB_V_GENE_LIST),\n n_j_genes=len(conversion.TCRB_J_GENE_LIST),\n # Training parameters.\n stopping_monitor='val_loss',\n batch_size=100,\n pretrains=10,\n warmup_period=20,\n epochs=200,\n patience=20)", "title": "" }, { "docid": "0b7629a38ffecc9f0a6cd9f48d56c50d", "score": "0.5447357", "text": "def set_parameters(self, params):\n self.kp = params.gains.kp\n self.ki = params.gains.ki\n self.kd = params.gains.kd", "title": "" }, { "docid": "9c0ea01e00f0370a86d49990341d7a3a", "score": "0.54403496", "text": "def setNetSetting(self, k, v):\n cfg.get(\"networks\", cfg.net.Name)[k] = v", "title": "" }, { "docid": "ed0ef8a4241d7628efb92c9721a034b5", "score": "0.54355776", "text": "def open_wifi(self, mode):\n self.open_application(\"com.android.settings/.Settings\")\n self.scroll_to_find(text='WLAN')\n self.click_text(\"WLAN\")\n if mode == 'on':\n if self.d(resourceIdMatches='.*switch_widget').checked:\n return True\n self.click_ui(resourceIdMatches='.*switch_widget')\n return self.d(resourceIdMatches='.*switch_widget', checked=True).wait.exists(timeout=3000)\n elif mode == 'off':\n if not self.d(resourceIdMatches='.*switch_widget').checked:\n return True\n self.click_ui(resourceIdMatches='.*switch_widget')\n if self.wait_for_ui_exists(200, textMatches='WLAN disconnected.*'):\n self.click_ui(resourceId=\"android:id/button1\")\n return self.d(resourceIdMatches='.*switch_widget', checked=False).wait.exists(timeout=3000)\n else:\n print \"please confirm you set parameter on or off\"\n return False", "title": "" }, { "docid": "21bfca36a3889628c701c9a34f064365", "score": "0.5413956", "text": "def intialize(self,w,learning_rate,stopping_crieteria):\n self.w=theta \n self.learning_rate=learning_rate\n self.stopping_crieteria=stopping_crieteria", "title": "" }, { "docid": "2cf278cdef3646d762b719ef7ec99f89", "score": "0.5410018", "text": "def _set_params(self, knob):\r\n \r\n instance_id = self.db_info['instance_id']\r\n\r\n data = dict()\r\n data[\"instanceid\"] = instance_id\r\n data[\"operator\"] = \"cdbtune\"\r\n para_list = []\r\n for kv in knob.items():\r\n para_list.append({\"name\": str(kv[0]), \"value\": str(kv[1])})\r\n data[\"para_list\"] = para_list\r\n data = json.dumps(data)\r\n data = \"data=\" + data\r\n \r\n response = parse_json(CONST.URL_SET_PARAM % self.host, data)\r\n \r\n err = response['errno']\r\n if err != 0:\r\n raise Exception(\"SET UP FAILED: {}\".format(err))\r\n\r\n # if restarting isn't needed, workid should be ''\r\n workid = response.get('workid', '')\r\n\r\n return workid", "title": "" }, { "docid": "c107b8b7c081e61419ed20af6ad59f63", "score": "0.5409867", "text": "def set_parameters(self, params):\n # Get all definition from the init method\n cell = self.cell\n outlet = self.outlet\n soil = cell.layers[0]\n gw = cell.layers[1]\n \n # EVT1 must be adjusted to cell size\n ETV1 = params[\"ETV1\"]\n ETV1 = (ETV1 / 1000) * cell.area\n \n # V0 must be adjusted to cell size as well\n V0_soil = params[\"V0_soil\"]\n V0_soil = (V0_soil / 1000) * cell.area\n \n # Adjustment of the ET\n cell.set_uptakestress(cmf.VolumeStress(\n ETV1,\n ETV1 * params[\"fETV0\"]))\n\n # Flux from soil to outlet\n cmf.kinematic_wave(soil,\n outlet,\n params[\"tr_soil_out\"] / V0_soil,\n V0=V0_soil,\n exponent=params[\"beta_soil_out\"])\n\n # Flux from soil to groundwater\n cmf.kinematic_wave(soil, gw,\n params[\"tr_soil_gw\"] / V0_soil,\n V0=V0_soil,\n exponent=params[\"beta_soil_gw\"])\n\n # Flux from the groundwater to the outlet (baseflow)\n cmf.kinematic_wave(gw, outlet, params[\"tr_gw_out\"])\n\n # Split the rainfall in interception and throughfall\n cmf.Rainfall(cell.canopy, cell, False, True)\n cmf.Rainfall(cell.surfacewater, cell, True, False)\n\n # Make an overflow for the interception storage\n cmf.RutterInterception(cell.canopy, cell.surfacewater, cell)\n\n # Transpiration from the plants is added\n cmf.CanopyStorageEvaporation(cell.canopy, cell.evaporation, cell)\n\n # Sets the paramaters for interception\n cell.vegetation.LAI = params[\"LAI\"]\n\n # Defines how much throughfall there is (in %)\n cell.vegetation.CanopyClosure = params[\"CanopyClosure\"]\n\n # # Set parameters of the snow calculations\n cmf.Weather.set_snow_threshold(params[\"snow_melt_temp\"])\n cmf.SimpleTindexSnowMelt(cell.snow, soil, cell,\n rate=params[\"meltrate\"])", "title": "" }, { "docid": "3a42545d1d9c4a1b7fe79c5ea2d9fa0f", "score": "0.5403006", "text": "def SetNetworkParams(opts, args):\n # TODO: add \"network\": opts.network,\n all_changes = {\n \"gateway\": opts.gateway,\n \"add_reserved_ips\": _HandleReservedIPs(opts.add_reserved_ips),\n \"remove_reserved_ips\": _HandleReservedIPs(opts.remove_reserved_ips),\n \"mac_prefix\": opts.mac_prefix,\n \"gateway6\": opts.gateway6,\n \"network6\": opts.network6,\n }\n\n if list(all_changes.values()).count(None) == len(all_changes):\n ToStderr(\"Please give at least one of the parameters.\")\n return 1\n\n op = opcodes.OpNetworkSetParams(network_name=args[0], **all_changes)\n\n # TODO: add feedback to user, e.g. list the modifications\n SubmitOrSend(op, opts)", "title": "" }, { "docid": "602a1869de96ea8b8161c77cb89aacfd", "score": "0.53961974", "text": "def le_set_wifi_password_broadcast(self, adv_data, enc_params):\n\n # Change filter/mode TODO\n # LE Set Advertising Data ->\n AD_TOT_LEN = 0x1f\n AD_LENGHT_FLAG = 0x02\n AD_TYPE_FLAG = 0x01\n AD_DATA_FLAG = 12\n adv_header_flags = struct.pack(\">BBBB\", AD_TOT_LEN, AD_LENGHT_FLAG, AD_TYPE_FLAG, AD_DATA_FLAG)\n AD_DATA_LEN = 27\n cmd_pkt = struct.pack(\">BB\", AD_DATA_LEN, ADV_TYPE_MANUFACTURER_SPECIFIC_DATA)\n cmd_pkt += struct.pack(\"<H\", COMPANY_ID)\n cmd_pkt += struct.pack(\">H\", BEACON_TYPE_CODE)\n # Custom values begins here (after BEAC identifier)\n cmd_data_payload = adv_data[\"wifipassword\"]\n wifipassword = cmd_data_payload\n cmd_data_payload_enc = self.encrypt_payload(cmd_data_payload, enc_params[\"aes_key\"], enc_params[\"aes_iv\"])\n cmd_pkt += cmd_data_payload_enc\n cmd_pkt += struct.pack(\">H\", adv_data.get(\"user_id\")) # Note: if is an ack, this 2 bytes are user_id, otherwise 0xFFFF (all user)\n cmd_pkt += struct.pack(\">H\", adv_data[\"obj_id\"])\n cmd_pkt += struct.pack(\">bB\", ADV_RSSI_VALUE, 0x00) # Last byte is manufacturer reserved\n cmd_pkt = adv_header_flags + cmd_pkt\n print(\"***** Wifipassword PACKEt *****\")\n print(cmd_pkt.hex()) # TODELETE\n if bluez.hci_send_cmd(self.hci_sock, OGF_LE_CTL, OCF_LE_SET_ADVERTISING_DATA, cmd_pkt) == 0x00:\n return wifipassword\n #TOCHECK!!! #TODO", "title": "" }, { "docid": "c3af153f5b2f6593fb365da4d731de3d", "score": "0.5381821", "text": "def set_model_params(self, theta):\n trainable_params = self.unpack_theta(theta)\n for trainable_param, layer in zip(trainable_params, self.model.layers):\n layer.set_weights(trainable_param)", "title": "" }, { "docid": "f8bb09e91a163ef3c2a528878d9b0fb6", "score": "0.53653306", "text": "def update_fw_params(self, rtr_id=-1, fw_type=-1):\n if rtr_id != -1:\n self.router_id = rtr_id\n if fw_type != -1:\n self.fw_type = fw_type", "title": "" }, { "docid": "3793bdbd5b06c6fbaff3928296c0c1d8", "score": "0.53583735", "text": "def assign_iface(self, nodes):\n log_filename = '/tmp/mininetwifi-wwan_hwsim.log'\n self.logging_to_file(log_filename)\n debug(\"\\n*** Configuring interfaces with appropriated network\"\n \"-namespaces...\\n\")\n wwan_list = self.get_virtual_wwan()\n for node in nodes:\n for wlan in range(0, len(node.params['wwan'])):\n sh('ip link set {} netns {}'.format(wwan_list[0], node.pid))\n node.cmd('ip link set {} down'.format(wwan_list[0]))\n node.cmd('ip link set {} name {}'.format(wwan_list[0], node.params['wwan'][wlan]))\n wwan_list.pop(0)", "title": "" }, { "docid": "27fd26c6ed9e5773e856754344b7870e", "score": "0.5355203", "text": "def GetDefaultParameters():\r\n global data_path\r\n params = {\"Phase\": \"TuningPhase\", # \"TuningPhase\" # \"ModelTrainPhase\" #\"Assignment\"\r\n \"Fold\": {\"1\": np.arange(21, 31), \"2\": np.arange(41, 51)}, \"3\": [],\r\n \"getData\": {\"img_size\": (64, 64), \"report_file_path\": \"Results\"},\r\n \"Split\": 0.5,\r\n \"Tuning\": {\"fold\": 5,\r\n \"parameters\": [{'kernel': ['linear'],\r\n 'C': [0.01, 0.1, 1, 10, 100]},\r\n {'kernel': ['poly'],\r\n 'C': [0.01, 0.1, 1, 10, 100],\r\n 'degree': [2, 3, 4, 5, 6]}, {'kernel': ['rbf'],\r\n 'C': np.logspace(-2, 10, 13),\r\n 'gamma': np.logspace(-9, 3, 13)}]},\r\n \"Train\": {\"C\": 0.1, \"degree\": 3, \"kernel\": 'poly'},\r\n \"Prepare\": {\r\n \"Hog\": {\"orientations\": 10, \"pixels_per_cell\": (8, 8), \"cells_per_block\": (4, 4),\r\n \"feature_vector\": False, \"multichannel\": False}},\r\n \"Summary\": {},\r\n \"Report\": {}\r\n }\r\n\r\n return params", "title": "" }, { "docid": "f8c93a25b730fa498232d474903c23ca", "score": "0.5347874", "text": "def setup_wpa_conf():\n wpa_conf_default = \"\"\"country=US\\nctrl_interface=DIR=/var/run/wpa_supplicant\\nupdate_config=1\"\"\"\n \n with open(WPA_CONF_PATH, \"w\") as f:\n f.write(wpa_conf_default)", "title": "" }, { "docid": "d6c2648000ce2acafb3e45cde152aec7", "score": "0.53454775", "text": "def setlinkparams(self) -> None:\n with self.iface_lock:\n for iface in self.iface_to_pos:\n options = LinkOptions(\n bandwidth=self.bw,\n delay=self.delay,\n loss=self.loss,\n jitter=self.jitter,\n )\n iface.options.update(options)\n iface.set_config()", "title": "" }, { "docid": "c36fa615f6c08b39aea56ad468733e8d", "score": "0.53385574", "text": "def __init__(self,\n net: nx.NxNet,\n ip3_sensitivity,\n sic_amplitude,\n sic_window,\n srVThMant,\n srCurrentDecay,\n srVoltageDecay,\n srActivityImpulse,\n srActivityTimeConstant,\n srMinActivity,\n srMaxActivity,\n srHomeostasisGain,\n srEnableHomeostasis,\n ip3VThMant,\n ip3CurrentDecay,\n ip3VoltageDecay,\n sicCurrentDecay,\n sicVoltageDecay,\n sgVThMant,\n sgCurrentDecay,\n sgVoltageDecay,\n sr2ip3Weight,\n ip32sicWeight,\n DEBUG=False):\n # Loihi net\n self.net = net\n\n # Astrocyte Core Properties\n # ---------------------------------------------------\n # Spike Receiver Properties\n self.srVThMant = srVThMant\n self.srCurrentDecay = srCurrentDecay\n self.srVoltageDecay = srVoltageDecay\n self.srActivityImpulse = srActivityImpulse\n self.srActivityTimeConstant = srActivityTimeConstant\n self.srMinActivity = srMinActivity\n self.srMaxActivity = srMaxActivity\n self.srHomeostasisGain = srHomeostasisGain\n self.srEnableHomeostasis = srEnableHomeostasis\n # IP3 unit Properties\n self.ip3VThMant = ip3VThMant\n self.ip3CurrentDecay = ip3CurrentDecay\n self.ip3VoltageDecay = ip3VoltageDecay\n # SIC Properties\n self.sicCurrentDecay = sicCurrentDecay\n self.sicVoltageDecay = sicVoltageDecay\n # Spike Generator Properties\n self.sgVThMant = sgVThMant\n self.sgCurrentDecay = sgCurrentDecay\n self.sgVoltageDecay = sgVoltageDecay\n # Spike Receiver to IP3 unit connection weight\n self.sr2ip3Weight = sr2ip3Weight\n self.ip32sicWeight = ip32sicWeight\n # ---------------------------------------------------\n\n # Smart Setup Properties\n # ---------------------------------------------------\n if sic_window is not None and sic_amplitude is not None:\n if DEBUG:\n print(\"DEBUG: Configuring based on provided window size and maximum firing rate\")\n self._validate_sic_window(sic_window)\n self._validate_sic_firing_rate(sic_amplitude)\n self.ip32sicWeight, self.sicCurrentDecay = AstrocytePrototypeBase._calculate_sic_props(sic_amplitude,\n sic_window)\n self.sicCurrentDecay = int(self.sicCurrentDecay * 2 ** 12)\n self._sicWindow = sic_window\n self._sicAmplitude = sic_amplitude\n\n if ip3_sensitivity is not None:\n if DEBUG:\n print(\"DEBUG: Configuring based on provided IP3 Sensitivity level\")\n self.ip3Sensitivity = ip3_sensitivity", "title": "" }, { "docid": "b1880305d2293d99ca227e1375cddbd7", "score": "0.53253996", "text": "def update_network_params(self, tau=None):\r\n if tau is None:\r\n tau = self.tau\r\n # Update target actor weights\r\n weights = []\r\n target_weights = self.actor_target.weights\r\n for i, weight in enumerate(self.actor.weights):\r\n weights.append(weight * tau + target_weights[i]*(1-tau))\r\n self.actor_target.set_weights(weights)\r\n\r\n # Update target critic weights\r\n weights = []\r\n target_weights = self.critic_target.weights\r\n for i, weight in enumerate(self.critic.weights):\r\n weights.append(weight * tau + target_weights[i]*(1-tau))\r\n self.critic_target.set_weights(weights)", "title": "" }, { "docid": "d5f7d8617035ec4e8adebc755429f71f", "score": "0.53237915", "text": "def _customize_tune(self) -> None:\n ...", "title": "" }, { "docid": "bd6f2c025b8c86fd0e773b4dc337702a", "score": "0.5313044", "text": "def set_freq_watt(self, params=None):\n agent = dnp3_agent.AgentClient(self.ipaddr, self.ipport)\n agent.connect(self.ipaddr, self.ipport)\n freq_watt_pts = {}\n point_name = []\n pt_value = []\n\n for key, value in list(params.items()):\n point_name.append(key)\n pt_value.append(value)\n\n for x in range(0, len(point_name)):\n if point_name[x] == 'fw_dbof':\n dbof_val = pt_value[x]\n dbof_pt = {'ao': {'62': dbof_val, '63': dbof_val}}\n\n for x in range(0, len(point_name)):\n if point_name[x] == 'fw_dbuf':\n dbuf_val = pt_value[x]\n dbuf_pt = {'ao': {'66': dbuf_val, '67': dbuf_val}}\n\n for x in range(0, len(point_name)):\n if point_name[x] == 'fw_kof':\n kof_val = pt_value[x]\n kof_pt = {'ao': {'64': kof_val, '65': kof_val}}\n\n for x in range(0, len(point_name)):\n if point_name[x] == 'fw_kuf':\n kuf_val = pt_value[x]\n kuf_pt = {'ao': {'68': kuf_val, '69': kuf_val}}\n\n for x in range(0, len(point_name)):\n if point_name[x] == 'fw_open_loop_time':\n time_val = pt_value[x]\n time_pt = {'ao': {'72': time_val, '73': time_val}}\n\n for x in range(0, len(point_name)):\n if point_name[x] == 'fw_enable':\n enable_val = pt_value[x]\n enable_pt = {'bo': {'26': enable_val}}\n\n dbof_w = agent.write_outstation(self.oid, self.rid, dbof_pt)\n dbuf_w = agent.write_outstation(self.oid, self.rid, dbuf_pt)\n kof_w = agent.write_outstation(self.oid, self.rid, kof_pt)\n kuf_w = agent.write_outstation(self.oid, self.rid, kuf_pt)\n time_w = agent.write_outstation(self.oid, self.rid, time_pt)\n enable_w = agent.write_outstation(self.oid, self.rid, enable_pt)\n\n res1 = eval(dbof_w[1:-1])\n res2 = eval(dbuf_w[1:-1])\n res3 = eval(kof_w[1:-1])\n res4 = eval(kuf_w[1:-1])\n res5 = eval(time_w[1:-1])\n res6 = eval(enable_w[1:-1])\n\n res = {'params': {'points': {'ao': {}, 'bo': {}}}}\n res['params']['points']['ao']['62'] = res1['params']['points']['ao']['62']\n res['params']['points']['ao']['62'] = res1['params']['points']['ao']['63']\n res['params']['points']['ao']['66'] = res2['params']['points']['ao']['66']\n res['params']['points']['ao']['67'] = res2['params']['points']['ao']['67']\n res['params']['points']['ao']['64'] = res3['params']['points']['ao']['64']\n res['params']['points']['ao']['65'] = res3['params']['points']['ao']['65']\n res['params']['points']['ao']['68'] = res4['params']['points']['ao']['68']\n res['params']['points']['ao']['69'] = res4['params']['points']['ao']['69']\n res['params']['points']['ao']['72'] = res5['params']['points']['ao']['72']\n res['params']['points']['ao']['73'] = res5['params']['points']['ao']['73']\n res['params']['points']['bo']['26'] = res6['params']['points']['bo']['26']\n if 'params' in list(res.keys()):\n resp = res['params']['points']\n if 'ao' in list(resp.keys()):\n if '62' in resp['ao']:\n freq_watt_pts['fw_dbof'] = resp['ao']['62']\n else:\n freq_watt_pts['fw_dbof'] = {'status': 'Not Written'}\n if '66' in resp['ao']:\n freq_watt_pts['fw_dbuf'] = resp['ao']['66']\n else:\n freq_watt_pts['fw_dbuf'] = {'status': 'Not Written'}\n if '64' in resp['ao']:\n freq_watt_pts['fw_kof'] = resp['ao']['64']\n else:\n freq_watt_pts['fw_kof'] = {'status': 'Not Written'}\n if '68' in resp['ao']:\n freq_watt_pts['fw_kuf'] = resp['ao']['68']\n else:\n freq_watt_pts['fw_kuf'] = {'status': 'Not Written'}\n if '72' in resp['ao']:\n freq_watt_pts['fw_open_loop_time'] = resp['ao']['72']\n else:\n freq_watt_pts['fw_open_loop_time'] = {'status': 'Not Written'}\n\n res['params']['points'] = freq_watt_pts\n\n return res", "title": "" }, { "docid": "0f62b9311d9115fcd4bb0b04b6cfa58f", "score": "0.53098506", "text": "def set_tune_params(self, params, n_params, mode, keys):\n return super().set_tune_params(params, n_params, mode, keys)", "title": "" }, { "docid": "7c0292081815d798bbfbbd944e3c59b3", "score": "0.5305922", "text": "def init_params(self):\n \n self.MODEL = 'linear'\n\n self.DATA_CLASS = 'jena'\n\n self.N_HISTORY_DATA = 18\n\n self.N_PREDICT_DATA = 6\n\n self.LABEL_COLUMNS = [0, 1]\n\n self.MAX_EPOCHS = 10\n\n self.PATIENCE = 3\n\n self.EXPORT_MODE = 'csv'\n\n self.wandb = False\n\n # Linear config\n self.L1_REGULARIZE = 0.01\n\n # MLP config\n self.LAYER_1_UNITS = 8\n \n self.LAYER_2_UNITS = 8\n\n # LSTM config\n self.LSTM_UNITS = '8'", "title": "" }, { "docid": "f6a83e371d395f2c252b91e44fadc53e", "score": "0.52993625", "text": "def update_config(self, config: dict[str, str]) -> None:\n self.range = get_config_int(self.range, config, \"range\")\n if self.range is None:\n self.range = 0\n logger.debug(\"wlan %s set range to %s\", self.wlan.name, self.range)\n self.bw = get_config_int(self.bw, config, \"bandwidth\")\n self.delay = get_config_int(self.delay, config, \"delay\")\n self.loss = get_config_float(self.loss, config, \"error\")\n self.jitter = get_config_int(self.jitter, config, \"jitter\")\n promiscuous = config.get(\"promiscuous\", \"0\") == \"1\"\n if self.promiscuous and not promiscuous:\n self.wlan.net_client.set_mac_learning(self.wlan.brname, LEARNING_ENABLED)\n elif not self.promiscuous and promiscuous:\n self.wlan.net_client.set_mac_learning(self.wlan.brname, LEARNING_DISABLED)\n self.promiscuous = promiscuous\n self.setlinkparams()", "title": "" }, { "docid": "336cde7f1be0bf5bda9e1c5f34115fb9", "score": "0.5259305", "text": "def _set_Param(self, param):\n for (key, value) in param.items():\n if key == 'task':\n _check_call(_LIB.XLearnSetStr(ctypes.byref(self.handle),\n c_str(key), c_str(value)))\n elif key == 'metric':\n _check_call(_LIB.XLearnSetStr(ctypes.byref(self.handle),\n c_str(key), c_str(value)))\n elif key == 'opt':\n _check_call(_LIB.XLearnSetStr(ctypes.byref(self.handle),\n c_str(key), c_str(value)))\n elif key == 'log':\n _check_call(_LIB.XLearnSetStr(ctypes.byref(self.handle),\n c_str(key), c_str(value)))\n elif key == 'lr':\n _check_call(_LIB.XLearnSetFloat(ctypes.byref(self.handle),\n c_str(key), ctypes.c_float(value)))\n elif key == 'k':\n _check_call(_LIB.XLearnSetInt(ctypes.byref(self.handle),\n c_str(key), ctypes.c_uint(value)))\n elif key == 'lambda':\n _check_call(_LIB.XLearnSetFloat(ctypes.byref(self.handle),\n c_str(key), ctypes.c_float(value)))\n elif key == 'init':\n _check_call(_LIB.XLearnSetFloat(ctypes.byref(self.handle),\n c_str(key), ctypes.c_float(value)))\n elif key == 'epoch':\n _check_call(_LIB.XLearnSetInt(ctypes.byref(self.handle),\n c_str(key), ctypes.c_uint(value)))\n elif key == 'fold':\n _check_call(_LIB.XLearnSetInt(ctypes.byref(self.handle),\n c_str(key), ctypes.c_uint(value)))\n elif key == 'alpha':\n _check_call(_LIB.XLearnSetFloat(ctypes.byref(self.handle),\n c_str(key), ctypes.c_float(value)))\n elif key == 'beta':\n _check_call(_LIB.XLearnSetFloat(ctypes.byref(self.handle),\n c_str(key), ctypes.c_float(value)))\n elif key == 'lambda_1':\n _check_call(_LIB.XLearnSetFloat(ctypes.byref(self.handle),\n c_str(key), ctypes.c_float(value)))\n elif key == 'lambda_2':\n _check_call(_LIB.XLearnSetFloat(ctypes.byref(self.handle),\n c_str(key), ctypes.c_float(value)))\n elif key == 'nthread':\n _check_call(_LIB.XLearnSetInt(ctypes.byref(self.handle),\n c_str(key), ctypes.c_uint(value)))\n elif key == 'block_size':\n _check_call(_LIB.XLearnSetInt(ctypes.byref(self.handle),\n c_str(key), ctypes.c_uint(value)))\n elif key == 'stop_window':\n _check_call(_LIB.XLearnSetInt(ctypes.byref(self.handle),\n c_str(key), ctypes.c_uint(value)))\n elif key == 'seed':\n _check_call(_LIB.XLearnSetInt(ctypes.byref(self.handle),\n c_str(key), ctypes.c_uint(value)))\n else:\n raise Exception(\"Invalid key!\", key)", "title": "" }, { "docid": "8b34590f850346f37c48c05b45ccdd1c", "score": "0.5258595", "text": "def test_wpas_ctrl_global(dev):\n wpas = WpaSupplicant(global_iface='/tmp/wpas-wlan5')\n wpas.interface_add(\"wlan5\")\n\n if \"PONG\" not in wpas.global_request(\"PING\"):\n raise Exception(\"PING failed\")\n if \"wlan5\" not in wpas.global_request(\"INTERFACES\"):\n raise Exception(\"Interface not found\")\n if \"UNKNOWN COMMAND\" not in wpas.global_request(\"FOO\"):\n raise Exception(\"Unexpected response to unknown command\")\n if \"PONG\" not in wpas.global_request(\"IFNAME=wlan5 PING\"):\n raise Exception(\"Per-interface PING failed\")\n if \"FAIL-NO-IFNAME-MATCH\" not in wpas.global_request(\"IFNAME=notfound PING\"):\n raise Exception(\"Unknown interface not reported correctly\")\n if \"FAIL\" not in wpas.global_request(\"SAVE_CONFIG\"):\n raise Exception(\"SAVE_CONFIG succeeded unexpectedly\")\n if \"OK\" not in wpas.global_request(\"SET wifi_display 0\"):\n raise Exception(\"SET failed\")\n if \"wifi_display=0\" not in wpas.global_request(\"STATUS\"):\n raise Exception(\"wifi_display not disabled\")\n if \"OK\" not in wpas.global_request(\"SET wifi_display 1\"):\n raise Exception(\"SET failed\")\n if \"wifi_display=1\" not in wpas.global_request(\"STATUS\"):\n raise Exception(\"wifi_display not enabled\")\n if \"FAIL\" not in wpas.global_request(\"SET foo 1\"):\n raise Exception(\"SET succeeded unexpectedly\")\n\n if \"p2p_state=IDLE\" not in wpas.global_request(\"STATUS\"):\n raise Exception(\"P2P was disabled\")\n wpas.global_request(\"P2P_SET disabled 1\")\n if \"p2p_state=DISABLED\" not in wpas.global_request(\"STATUS\"):\n raise Exception(\"P2P was not disabled\")\n wpas.global_request(\"P2P_SET disabled 0\")\n if \"p2p_state=IDLE\" not in wpas.global_request(\"STATUS\"):\n raise Exception(\"P2P was not enabled\")\n\n # driver_nl80211.c does not support interface list, so do not fail because\n # of that\n logger.debug(wpas.global_request(\"INTERFACE_LIST\"))\n\n if \"FAIL\" not in wpas.global_request(\"INTERFACE_ADD \"):\n raise Exception(\"INTERFACE_ADD succeeded unexpectedly\")\n if \"FAIL\" not in wpas.global_request(\"INTERFACE_ADD FOO\"):\n raise Exception(\"INTERFACE_ADD succeeded unexpectedly\")\n if \"FAIL\" not in wpas.global_request(\"INTERFACE_ADD FOO\tconf\"):\n raise Exception(\"INTERFACE_ADD succeeded unexpectedly\")\n if \"FAIL\" not in wpas.global_request(\"INTERFACE_ADD FOO\tconf\tdriver\"):\n raise Exception(\"INTERFACE_ADD succeeded unexpectedly\")\n if \"FAIL\" not in wpas.global_request(\"INTERFACE_ADD FOO\tconf\tdriver\tctrliface\"):\n raise Exception(\"INTERFACE_ADD succeeded unexpectedly\")\n if \"FAIL\" not in wpas.global_request(\"INTERFACE_ADD FOO\tconf\tdriver\tctrliface\tdriverparam\"):\n raise Exception(\"INTERFACE_ADD succeeded unexpectedly\")\n if \"FAIL\" not in wpas.global_request(\"INTERFACE_ADD FOO\tconf\tdriver\tctrliface\tdriverparam\tbridge\"):\n raise Exception(\"INTERFACE_ADD succeeded unexpectedly\")\n if \"FAIL\" not in wpas.global_request(\"INTERFACE_ADD FOO\tconf\tdriver\tctrliface\tdriverparam\tbridge\tfoo\"):\n raise Exception(\"INTERFACE_ADD succeeded unexpectedly\")\n if \"FAIL\" not in wpas.global_request(\"INTERFACE_ADD FOO\t\t\t\t\t\"):\n raise Exception(\"INTERFACE_ADD succeeded unexpectedly\")\n if \"FAIL\" not in wpas.global_request(\"INTERFACE_ADD FOO\tconf\tdriver\tctrliface\tdriverparam\tbridge\tcreate\tabcd\"):\n raise Exception(\"INTERFACE_ADD succeeded unexpectedly\")", "title": "" }, { "docid": "57c17a8c36a781bc3cc960cceff35818", "score": "0.5256316", "text": "def set_tunable_parameters(self, **point):\n raise NotImplementedError", "title": "" }, { "docid": "fe574cea34991a2a955e5495fcea669e", "score": "0.5253798", "text": "def update_parameters(self, **kwargs):\n for k, v in kwargs.items():\n if k == \"feval\":\n self.feval = get_feval(v)\n elif k == \"device\":\n self.to(v)\n elif hasattr(self, k):\n setattr(self, k, v)\n else:\n raise KeyError(\"Cannot set parameter\", k, \"for trainer\", self.__class__)", "title": "" }, { "docid": "801bf991ac7a70a12652b8bcd39e2831", "score": "0.52461934", "text": "def Set_WLAN_Modulation(self, sMod):\n #WLAN-->Frame Blocks-->PPDU Conf..--> MCS Config\n self.write(f':SOUR:BB:WLNN:FBL1:MOD1 {sMod}')", "title": "" }, { "docid": "2a2e95b7bb3411c177c59ab1a1f81992", "score": "0.52450347", "text": "def _generate_wireless_settings_data(settings):\n settings.ensure_valid()\n\n data = {'ssid1': settings.ssid, 'channel': settings.channel, 'Save': 'Save'}\n\n if settings.is_enabled:\n data['ap'] = 1\n\n if settings.is_broadcasting_ssid:\n data['broadcast'] = 2\n\n # preserve some of the params we don't handle\n for k in ('region', 'mode', 'chanWidth', 'rate'):\n data[k] = settings.get_internal_param(k)\n\n # WDS (WLAN bridging) related settings\n # we'll clear them all by default\n merge_with = {'brlssid': '', 'brlbssid': '', 'keytype': 1, 'wepindex'\n 'authtype': 1, 'keytext': ''}\n data = dict(data, **merge_with)\n\n return data", "title": "" }, { "docid": "5e894082467baa558b3b5f2533a9112e", "score": "0.524189", "text": "def reset_parameters(self):\n torch.nn.init.kaiming_normal_(self.ip_layer.weight.data, a=self.leak, mode='fan_in')\n for hl in self.hls:\n torch.nn.init.kaiming_normal_(hl.weight.data, a=self.leak, mode='fan_in')\n torch.nn.init.uniform_(self.op_layer.weight.data, -3e-3, 3e-3)", "title": "" }, { "docid": "89b91768af3fff791c02011e0dcf2cb3", "score": "0.5194334", "text": "def init_params(self):\n # PARAMETERS: initialize with default utilsvalues\n # (values are overwritten if user specifies them)\n # Attacker configuration\n most_used_ip_address = self.statistics.get_most_used_ip_address()\n self.add_param_value(atkParam.Parameter.IP_SOURCE, most_used_ip_address)\n self.add_param_value(atkParam.Parameter.MAC_SOURCE, self.statistics.get_mac_address(most_used_ip_address))\n\n # Victim configuration\n random_ip_address = self.statistics.get_random_ip_address()\n self.add_param_value(atkParam.Parameter.IP_DESTINATION, random_ip_address)\n destination_mac = self.statistics.get_mac_address(random_ip_address)\n if isinstance(destination_mac, list) and len(destination_mac) == 0:\n destination_mac = self.generate_random_mac_address()\n self.add_param_value(atkParam.Parameter.MAC_DESTINATION, destination_mac)\n self.add_param_value(atkParam.Parameter.PORT_DESTINATION, self.http_port)\n # self.add_param_value(atkParam.Parameter.TARGET_URI, '/')\n self.add_param_value(atkParam.Parameter.TARGET_HOST, \"www.hackme.com\")\n\n # Attack configuration\n self.add_param_value(atkParam.Parameter.INJECT_AFTER_PACKET, rnd.randint(0, self.statistics.get_packet_count()))\n self.add_param_value(atkParam.Parameter.PACKETS_PER_SECOND,\n (self.statistics.get_pps_sent(most_used_ip_address) +\n self.statistics.get_pps_received(most_used_ip_address)) / 2)", "title": "" }, { "docid": "3b964d1f623486e27bc4a1b9b8600a29", "score": "0.5189885", "text": "def set_param():\n par.BEAM_CURRENT_DENSITY = 0.001\n par.BEAM_CURRENT = 1e-9\n par.FWHM = 100\n par.BEAM_CENTER = 0\n par.SCAN_WIDTH = 1000\n par.ERF_BEAM_WIDTH = 1000", "title": "" }, { "docid": "0fd27bbb61af4e6c1867ace45fd0b129", "score": "0.5187049", "text": "def _update_satellite_params(self):\n for key, value in self.param_dict.items():\n if key in self.central_occupation_model.param_dict:\n self.central_occupation_model.param_dict[key] = value\n\n log_halo_mass_threshold_h1p0 = self.central_occupation_model.mean_log_halo_mass(\n log_stellar_mass_h1p0=self.threshold\n )\n log_halo_mass_threshold_h0p72 = log_halo_mass_threshold_h1p0 - L11_LGH\n knee_threshold_h0p72 = 10.0**log_halo_mass_threshold_h0p72\n\n # 1e12 is the numerical value used in Leauthaud+11 and so assumes h=0.72\n knee_mass_h0p72 = 1.0e12\n\n self._msat = (\n knee_mass_h0p72\n * self.param_dict[\"bsat\"]\n * (knee_threshold_h0p72 / knee_mass_h0p72) ** self.param_dict[\"betasat\"]\n )\n\n self._mcut = (\n knee_mass_h0p72\n * self.param_dict[\"bcut\"]\n * (knee_threshold_h0p72 / knee_mass_h0p72) ** self.param_dict[\"betacut\"]\n )", "title": "" }, { "docid": "71f2a658c9424c163924891efa5d73b1", "score": "0.5186868", "text": "def reset_parameters(self):\n for nl in self.nls:\n torch.nn.init.kaiming_normal_(nl.weight.data, a=self.leak, mode='fan_in')\n torch.nn.init.uniform_(self.op_layer.weight.data, -3e-3, 3e-3)", "title": "" }, { "docid": "cbf0cb467cf5d6df3205236e60e950e7", "score": "0.517626", "text": "def __init__(self, **stn_dict):\n self.ws_name = \"HP1000\"\n\n loginf('HP1000 Starting')\n\n self.internal_test_mode = False\n self.startup_count = 5\n\n try:\n self.ip_address_mask = stn_dict['ip_address_mask']\n loginf(\"Using user-defined broadcast mask - %s\" % self.ip_address_mask)\n except KeyError as e:\n try:\n import netifaces\n gateway_interface = netifaces.gateways()['default'][netifaces.AF_INET][1]\n self.ip_address_mask = netifaces.ifaddresses(\n gateway_interface)[netifaces.AF_INET][0]['broadcast']\n loginf('Using \"netifaces\" to determine broadcast mask')\n except ImportError:\n self.ip_address_mask = None\n\n if self.ip_address_mask is None:\n raise Exception(\n \"Required parameter 'ip_address_mask' has not been specified or could not be determined\")\n\n # Save the configuration parameters\n self.retry_count = int(stn_dict.get('retry_count', 5))\n self.socket_timeout = float(stn_dict.get('socket_timeout', 5))\n self.loop_delay = float(stn_dict.get('loop_delay', None))\n self.retry_wait = int(stn_dict.get('retry_wait', 5))\n self.max_retry = int(stn_dict.get('max_retry', 3))\n\n self.last_rain_value = None\n self.last_rain_time = None\n\n loginf('Address Mask = %s' % self.ip_address_mask)\n loginf('Retry count = %f' % self.retry_count)\n loginf('Socket timeout = %f' % self.socket_timeout)\n if self.loop_delay is None:\n loginf('No loop delay')\n else:\n loginf('Loop delay = %f' % self.loop_delay)\n loginf('Retry Wait = %f' % self.retry_wait)\n loginf('Max Retry = %f' % self.max_retry)\n\n # Show that we are not connected to a weather station\n self.ws_socket = None", "title": "" }, { "docid": "03a5b1394358fd36b66fffd19d61b530", "score": "0.5169252", "text": "def set_params(self, \r\n memory_turns=3,\r\n n_output=2,\r\n learning = 0.1,\r\n discount = 0.1,\r\n units_per_hidden = 128,\r\n epsilon=0.2,\r\n target_sync_freq = 10):\r\n self.learning_rate = learning\r\n self.discount_rate = discount\r\n self.units_per_hidden = units_per_hidden\r\n self.epsilon = epsilon\r\n self.target_sync_freq = target_sync_freq\r\n self.sync_counter = 0 #hard coded to 0\r\n \r\n self.memory_length = memory_turns\r\n self.n_input_features = 4*memory_turns\r\n self.n_outputs = n_output", "title": "" }, { "docid": "a1f6b33239fa4c782354d7d0d1b5f486", "score": "0.51625353", "text": "def hci_set_advertising_parameters(self):\n\n advertising_interval_min = 0x00A0 # Minimum advertising interval for undirected and low duty cycle directed advertising. \n advertising_interval_max = 0x00A8 # Maximum advertising interval, Range: 0x0020 to 0x4000|Default: N = 0x0800 (1.28 s)|Time = N * 0.625 ms|Time Range: 20 ms to 10.24 s\n advertising_type = ADV_NONCONN_IND # Advertising Type([un]Connactable/[un]directed/...)\n own_address_type, peer_address_type = 0x00, 0x00 # 0x00 public, 0x01 random\n channels_map = 0x07\n filter_policy = 0x00\n cmd_pkt = struct.pack(\"<HHBBB\", advertising_interval_min, advertising_interval_max, advertising_type, own_address_type, peer_address_type)\n cmd_pkt += struct.pack(\"<6B\", 0x00, 0x00, 0x00, 0x00, 0x00, 0x00) # Peer_addr =00000\n cmd_pkt += struct.pack(\"<BB\", channels_map, filter_policy) # All channels\n res = bluez.hci_send_cmd(self.hci_sock, OGF_LE_CTL, OCF_LE_SET_ADVERTISING_PARAMETERS, cmd_pkt)\n return res\n # Response?return status: 0x00LE_Set_Scan_Parameters command succeeded.\n # Note: If the advertising interval range provided by the Host (Advertising_Interval_Min, Advertising_Interval_Max) is outside the advertising interval range supported by the Controller, then the Controller shall return the Unsupported Feature or Parameter Value (0x11) error code.", "title": "" }, { "docid": "0bc16f969c760c849c6c725a51cb2641", "score": "0.5158611", "text": "def test_wpas_ctrl_set_wps_params(dev):\n try:\n _test_wpas_ctrl_set_wps_params(dev)\n finally:\n dev[2].request(\"SET config_methods \")", "title": "" }, { "docid": "221e59f162afaad1e07b589b3b067065", "score": "0.5155933", "text": "def __init__(self):\n # Initial groundwater storage (mm)\n self.gwstorage = 200 \n \n # Baseflow coefficient (per day)\n self.bfcoeff = 0.04\n\n # Deep seepage coefficient (per day) \n self.dscoeff = 0 \n \n # Watershed groundwater baseflow threshold area (ha)\n self.bfthreshold = 1", "title": "" }, { "docid": "0689b11daa5857c0eeedf620cdb31fcc", "score": "0.5152512", "text": "def update_parameters(context, hyperparams):\n for i in range(1, len(context)):\n context[i]['W'] = context[i]['W'] - hyperparams[HYPERPARAM_LEARNING_RATE] * context[i]['dW']\n context[i]['b'] = context[i]['b'] - hyperparams[HYPERPARAM_LEARNING_RATE] * context[i]['db']", "title": "" }, { "docid": "36946c76f3fdb3903c5410618444f0bf", "score": "0.5143536", "text": "def init_wifi(apname, password, timeout=3000):\n wifi = network.WLAN(network.STA_IF)\n wifi.active(True)\n wifi.connect(apname, password)\n if timeout > 0:\n time.sleep_ms(1000)\n now = time.ticks_ms()\n while True:\n if wifi.ifconfig()[0] != '0.0.0.0':\n print(\"Connected, IP: {}\".format(wifi.ifconfig()[0]))\n break\n if time.ticks_ms() - now > timeout:\n break\n return wifi", "title": "" }, { "docid": "3ace2d6a92c55f90c3de16c9f03ecb22", "score": "0.5142895", "text": "def set_bw(self, bw):\n self.set_modem_config_1(bw=bw)", "title": "" }, { "docid": "969b3911f64b2670345f37ac32b9036a", "score": "0.514153", "text": "def wifi():\n\n #check if the wifi docker is already running\n if w4sp.c('wifi'):\n #if it check if the cleartext hostapd is running\n if psef('hostapd_clear'):\n return 'wifi already running', 404\n\n #if hostapd isn't running lets start it\n else:\n w4sp.c('wifi').dexec('hostapd /hostapd_clear.conf')\n return 'ok1'\n\n #count of interfaces discovered and var for nic name\n count = 0\n phy = False\n\n #our regex to find phy%d\n match = re.compile('phy\\d')\n\n #get iw output\n iwo = subprocess.check_output(['iw', 'list'])\n\n for line in iwo.split():\n #find they phy interface number\n if match.search(line):\n count += 1\n phy = line.strip()\n\n\n #check that we got one and only one phy\n if count >= 2:\n return 'got more than one phy interface, remove one wireless device', 500\n\n if not phy:\n return 'didn''t find a valid phy, please check wifi device connection', 500\n\n #we get here we should have a valid phy name\n #we are going to spin up the wireless container\n NSROOT.register_ns('wifi', 'w4sp/labs:wireless')\n #connect wifi container to sw2\n w4sp.c('wifi').connect(w4sp.c('sw2'))\n\n #no we need to move our wifi nic into the container\n cmd = 'iw phy %s set netns %s' % (phy, w4sp.c('wifi').pid)\n\n try:\n subprocess.call(cmd.split(' '))\n #ugh, delaying so setup_wifi.py can catch the new interface :/\n time.sleep(0.01)\n w4sp.c('wifi').dexec('hostapd /hostapd_clear.conf')\n return 'ok'\n\n except:\n return 'error moving wireless device to container', 500", "title": "" }, { "docid": "64c268e7c411d56720e537d31a15f9f2", "score": "0.51378304", "text": "def getKernelSetupParams():\n defaultParams = \"ks=file:/user.ks.cfg hpsa_monitoring_agent=1 ip=dhcp noipv6\"\n localServer = ThisLocalServer()\n mac = localServer.getMACAddress().lower()\n\n defaultParams += \" ksdevice=%s\" % mac\n return defaultParams", "title": "" }, { "docid": "12da8cb640dce1498c83134105b362b8", "score": "0.51320475", "text": "def default_params(cls):\n return dict(\n # Models:\n model='basic',\n # Model parameters.\n latent_dim=20,\n dense_nodes=75,\n aa_embedding_dim=21,\n v_gene_embedding_dim=30,\n j_gene_embedding_dim=13,\n beta=0.75,\n # Input data parameters.\n max_cdr3_len=30,\n n_aas=len(conversion.AA_LIST),\n n_v_genes=len(conversion.TCRB_V_GENE_LIST),\n n_j_genes=len(conversion.TCRB_J_GENE_LIST),\n # Training parameters.\n stopping_monitor='val_loss',\n batch_size=100,\n pretrains=10,\n warmup_period=20,\n epochs=500,\n patience=20)", "title": "" }, { "docid": "4c3829b9e3a8b084f06921848ba41d6f", "score": "0.5130808", "text": "def set_device_parameters(request):\n\n def fin():\n request.cls.device.close()\n\n request.addfinalizer(fin)\n\n request.cls.driver = iosxr_netconf.IOSXRNETCONFDriver\n request.cls.patched_driver = PatchedIOSXRNETCONFDriver\n request.cls.vendor = \"iosxr_netconf\"\n parent_conftest.set_device_parameters(request)", "title": "" }, { "docid": "d639ef96d4297d62f49810999854b2e0", "score": "0.51242477", "text": "def update_params(self):\n pass\n #self.workdirpath = os.path.dirname(self.netfilepath)\n #bn = os.path.basename(self.netfilepath).split('.')\n # if len(bn)>0:\n # self.rootname = bn[0]", "title": "" }, { "docid": "e8f2472f03415254833620a18b24caef", "score": "0.5112453", "text": "def set_parameters(self, params):\n self.mu, self.theta, self.C, self.c, self.gamma, self.eta = params\n # set endo and viral\n self.endo = self.get_endo()\n self.viral = self.mu * self.endo", "title": "" }, { "docid": "772546b6de9a9cfc5685e6ecf0b8ad5d", "score": "0.5110538", "text": "def define_parameters(self):\n self.weight_matrix = torch.nn.Parameter(torch.Tensor(self.in_channels, self.out_channels))", "title": "" }, { "docid": "772546b6de9a9cfc5685e6ecf0b8ad5d", "score": "0.5110538", "text": "def define_parameters(self):\n self.weight_matrix = torch.nn.Parameter(torch.Tensor(self.in_channels, self.out_channels))", "title": "" }, { "docid": "93442fe63c66edbfb5a729cc324225ae", "score": "0.5101625", "text": "def setParameters(self, bw_min=1, bw_max=2, **kwargs):\n\t\tself.bw_min, self.bw_max = bw_min, bw_max\n\t\tHarmonySearch.setParameters(self, **kwargs)", "title": "" }, { "docid": "f56b4e09e2874f324de977b9701a5f3f", "score": "0.50956035", "text": "def configure_host_network_params():\n PropertyFile('/etc/sysctl.conf', ' = ').override({\n 'net.ipv4.ip_forward': '0',\n 'net.ipv4.conf.all.send_redirects': '0',\n 'net.ipv4.conf.default.send_redirects': '0',\n }).write()", "title": "" }, { "docid": "31606db9ee16a01f903e4bbcc08e8600", "score": "0.5091704", "text": "def update_parameters(self):\n #model_type = self.model.__class__.__name__.replace('Model', '')\n\n params = self.suggestion.assignments\n # if model_type == 'CNN':\n # self.model.num_filt_1 = int(params['num_filt_1'])\n # self.model.kernel_size = int(params['kernel_size'])\n # self.model.num_fc_1 = int(params['num_fc_1'])\n # elif model_type == 'RNN':\n # self.model.n_hidden = int(params['n_hidden'])\n # self.model.num_fc_1 = int(params['num_fc_1'])\n # self.model.n_layers = int(params['n_layers'])\n\n #self.model.dropout_rate = params['dropout_rate']\n self.model.learning_rate = params['learning_rate']\n self.model.beta1 = params['beta1']\n self.model.beta2 = params['beta2']\n self.model.epsilon = params['epsilon']", "title": "" }, { "docid": "ff57bfdedd1245a585388a7886aaf7ef", "score": "0.5087145", "text": "def ParamsVanillaNet101(cls) -> InstantiableParams:\n return cls.Params().Set(blocks=[3, 4, 23, 3])", "title": "" }, { "docid": "692c9d14f0c77996ac76be9e599bee8e", "score": "0.50862765", "text": "def __init__(self, wlan_adapter, bluetooth_adapter):\n self._wlan_adapter = wlan_adapter\n self._bluetooth_adapter = bluetooth_adapter\n self._gatt_service = None\n self._current_ssid = None\n self._is_joining_wifi = False", "title": "" }, { "docid": "e384f435683fa84a099d08142483ea59", "score": "0.5079305", "text": "def set_up_params(self):", "title": "" }, { "docid": "9359988de5e9f7ba620684aa9c0ecec5", "score": "0.5077118", "text": "def _set_parameters(self):\n\n #############################################\n # Set algorithm parameters for OpenCV models.\n #############################################\n\n if self.classifier_info['classifier'] in ['CART', 'cvrf', 'CVEX_RF']:\n\n self.model.setMaxDepth(self.classifier_info['max_depth'])\n self.model.setMinSampleCount(self.classifier_info['min_samps'])\n self.model.setCalculateVarImportance(self.classifier_info['calc_var_importance'])\n self.model.setActiveVarCount(self.classifier_info['rand_vars'])\n self.model.setTermCriteria(self.classifier_info['term_crit'])\n\n if self.classifier_info['priors'].min() < 1:\n self.model.setPriors(self.classifier_info['priors'])\n \n self.model.setTruncatePrunedTree(self.classifier_info['truncate'])\n\n elif self.classifier_info['classifier'] == 'cvmlp':\n\n n_steps = 1000\n max_err = .0001\n step_size = .3\n momentum = .2\n\n # cv2.TERM_CRITERIA_EPS\n self.parameters = dict(term_crit=(cv2.TERM_CRITERIA_COUNT, n_steps, max_err),\n train_method=cv2.ANN_MLP_TRAIN_PARAMS_BACKPROP,\n bp_dw_scale=step_size,\n bp_moment_scale=momentum)\n\n elif self.classifier_info['classifier'] == 'cvsvm':\n\n self.model.setC(self.classifier_info_svm['C'])\n self.model.setGamma(self.classifier_info_svm['gamma'])\n self.model.setKernel(cv2.ml.SVM_RBF)\n self.model.setType(cv2.ml.SVM_C_SVC)\n\n # self.parameters = dict(kernel_type=cv2.ml.SVM_RBF,\n # svm_type=cv2.ml.SVM_C_SVC,\n # C=self.classifier_info_svm['C'],\n # gamma=self.classifier_info_svm['gamma'])\n\n elif self.classifier_info['classifier'] == 'cvsvma':\n\n # SVM, parameters optimized\n self.parameters = dict(kernel_type=cv2.ml.SVM_RBF,\n svm_type=cv2.ml.SVM_C_SVC)\n\n elif self.classifier_info['classifier'] == 'CVSVMR':\n\n # SVM regression\n self.parameters = dict(kernel_type=cv2.ml.SVM_RBF,\n svm_type=cv2.ml.SVM_NU_SVR,\n C=self.classifier_info_svm['C'],\n gamma=self.classifier_info_svm['gamma'],\n nu=self.classifier_info_svm['nu'],\n p=self.classifier_info_svm['p'])\n\n elif self.classifier_info['classifier'] == 'CVSVMRA':\n\n # SVM regression, parameters optimized\n self.parameters = dict(kernel_type=cv2.ml.SVM_RBF,\n svm_type=cv2.ml.SVM_NU_SVR,\n nu=self.classifier_info['nu'])\n\n else:\n\n self.parameters = None", "title": "" }, { "docid": "b39d03e09e0b549eb28fc2ee2aa5f1fa", "score": "0.50726587", "text": "def _train_params(self):\n raise NotImplementedError", "title": "" }, { "docid": "edd1a4d8757830a5425f925f5d0fbf8c", "score": "0.50652134", "text": "def initialize_parameters():\n\n tf.set_random_seed(1)\n \n W1 = tf.get_variable(\"W1\", [25, 12288], initializer = tf.contrib.layers.xavier_initializer(seed = 1))\n b1 = tf.get_variable(\"b1\", [25, 1], initializer = tf.zeros_initializer())\n W2 = tf.get_variable(\"W2\", [12, 25], initializer = tf.contrib.layers.xavier_initializer(seed = 1))\n b2 = tf.get_variable(\"b2\", [12, 1], initializer = tf.zeros_initializer())\n W3 = tf.get_variable(\"W3\", [6, 12], initializer = tf.contrib.layers.xavier_initializer(seed = 1))\n b3 = tf.get_variable(\"b3\", [6, 1], initializer = tf.zeros_initializer())\n parameters = {\"W1\": W1, \"b1\": b1, \"W2\": W2, \"b2\": b2, \"W3\": W3, \"b3\": b3}\n\n return parameters", "title": "" }, { "docid": "e6977c68792c8234d848158a7f532b57", "score": "0.5063419", "text": "def hci_le_set_scan_parameters(self):\n\n # old_filter = hci_sock.getsockopt(bluez.SOL_HCI, bluez.HCI_FILTER, 14) # when restore the filter?\n le_scan_type = 0x00 # Passive Scanning. No scanning PDUs shall be sent (default)\n le_scan_interval = 0x0010 # Range: 0x0004 to 0x4000 Default: 0x0010 (10 ms), Time = N * 0.625 ms\n le_scan_window = 0x0010 # Duration of the LE scan. LE_Scan_Window <= LE_Scan_Interval\n own_address_type = 0x01 # 0x01 - Random Device Address, 0x00 - Public Device Address (default)\n scanning_filter_policy = 0x00 # Accept all adv packets except directed adv packets not addressed to this device (default)\n cmd_pkt = struct.pack(\"<BHHBB\", le_scan_type, le_scan_interval, le_scan_window, own_address_type, scanning_filter_policy)\n res = bluez.hci_send_cmd(self.hci_sock, OGF_LE_CTL, OCF_LE_SET_SCAN_PARAMETERS, cmd_pkt)\n return res\n # Response?return status: 0x00LE_Set_Scan_Parameters command succeeded.\n # Note: when the user needs to receive the data as fast as possible, make sure that scanning window is more than the advertising interval + 10ms to guarantee discovery.", "title": "" }, { "docid": "7bb693f9ac962809920df06594b2de9a", "score": "0.5061823", "text": "def set_net_prop(self, option, value):\n wireless.SetWirelessProperty(self.networkID, option, value)", "title": "" }, { "docid": "9eee0cd3b81f6aa309da5a912b441a93", "score": "0.5059762", "text": "def build_network_manager_connection_settings(\n ssid=None, key_mgnt='wpa-psk', passwd=None,\n settings={}):\n\n connection = settings.get('connection', {})\n wireless = settings.get('802-11-wireless', {})\n wireless_security = settings.get('802-11-wireless-security', {})\n enterprise = settings.get('802-1x', {})\n ipv4 = settings.get('ipv4', {})\n ipv6 = settings.get('ipv6', {})\n\n\n if not ssid:\n ssid = wireless['ssid']\n\n connection_defaults = {\n 'type': '802-11-wireless',\n 'uuid': str(uuid()),\n 'id': '%s-from-barcode' % ssid,\n }\n\n wireless_defaults = {\n 'ssid': dbus.ByteArray(ssid),\n #'security': '802-11-wireless'})\n #'security': '802-11-wireless-security',\n }\n\n if key_mgnt != 'none' and not passwd:\n passwd = wireless_security['psk']\n\n wireless_security_defaults = {\n 'key-mgmt': key_mgnt,\n 'psk': passwd,\n }\n\n s_8021x = { #dbus.Dictionary({\n #'eap': [],\n #'identity': 'wtf',\n #'client-cert': path_to_value(\"/some/place/client.pem\"),\n #'ca-cert': path_to_value(\"/some/place/ca-cert.pem\"),\n #'private-key': path_to_value(\"/some/place/privkey.pem\"),\n # 'private-key-password': \"12345testing\"})\n }\n\n ipv4_defaults = {'method': 'auto'}\n ipv6_defaults = {'method': 'ignore'}\n\n\n defaults = {\n 'connection': connection_defaults,\n '802-11-wireless': wireless_defaults,\n '802-11-wireless-security': wireless_security_defaults,\n #'802-1x': s_8021x,\n 'ipv4': ipv4_defaults,\n 'ipv6': ipv6_defaults,\n }\n \n\n merged_settings = dict(defaults)\n merged_settings['connection'].update(connection)\n merged_settings['802-11-wireless'].update(wireless)\n merged_settings['802-11-wireless-security'].update(wireless_security)\n if key_mgnt == \"eap\":\n merged_settings['802-1x'].update(enterprise)\n merged_settings['ipv4'].update(ipv4)\n merged_settings['ipv6'].update(ipv6)\n \n return dict_to_dbus(merged_settings)", "title": "" }, { "docid": "fa9cacfc04dea85b6e8e7fe8f0fcf31c", "score": "0.50560915", "text": "def set_bw(self, mode):\n self.bandwidth = 0\n if (mode=='a'):\n self.bandwidth = 20 # 54\n elif(mode=='b'):\n self.bandwidth = 6 #11\n elif(mode=='g'):\n self.bandwidth = 20 #54\n elif(mode=='n'):\n self.bandwidth = 48 # 600\n elif(mode=='ac'):\n self.bandwidth = 90 #6777\n \n return self.bandwidth", "title": "" }, { "docid": "d2b5b12da7bae22d4cca20b18f177e18", "score": "0.50510895", "text": "def __init__(self, train_parameters):\n self.update(train_parameters)", "title": "" }, { "docid": "6835ee2fa4ac069e3dd94fe87c582dac", "score": "0.5043178", "text": "def reset_parameters(self) -> None:\n super().reset_parameters()\n self.set_weights(self.weight, self.bias)", "title": "" }, { "docid": "2b2fe479c5e7ac5b7a27e3dfc5753114", "score": "0.5033523", "text": "def set_weights(params, new_params):\n for param, new_param in zip(params, new_params):\n param.data.copy_(new_param.data)", "title": "" }, { "docid": "2b2fe479c5e7ac5b7a27e3dfc5753114", "score": "0.5033523", "text": "def set_weights(params, new_params):\n for param, new_param in zip(params, new_params):\n param.data.copy_(new_param.data)", "title": "" }, { "docid": "b975239b7b639f2ab42f0d923ebb851c", "score": "0.5032254", "text": "def setBodyWaterParameters(controller: agxModel.WindAndWaterController, body: agx.RigidBody,\n lift: float = 0.01, viscous_drag: float = 0.1, pressure_drag: float = 0.6):\n agxModel.WindAndWaterParameters.setHydrodynamicCoefficient(controller, body,\n agxModel.WindAndWaterParameters.VISCOUS_DRAG,\n viscous_drag)\n agxModel.WindAndWaterParameters.setHydrodynamicCoefficient(controller, body,\n agxModel.WindAndWaterParameters.LIFT,\n lift)\n agxModel.WindAndWaterParameters.setHydrodynamicCoefficient(controller, body,\n agxModel.WindAndWaterParameters.PRESSURE_DRAG,\n pressure_drag)", "title": "" }, { "docid": "57f43a058ea63e83ee42219e074602bb", "score": "0.5030174", "text": "def setup_parameters():\n\n # initialize level parameters\n level_params = dict()\n level_params['restol'] = 1e-08\n level_params['dt'] = 1.0\n level_params['nsweeps'] = [1]\n\n # initialize sweeper parameters\n sweeper_params = dict()\n sweeper_params['quad_type'] = 'RADAU-RIGHT'\n sweeper_params['num_nodes'] = [3]\n sweeper_params['Q1'] = ['LU']\n sweeper_params['Q2'] = ['LU']\n sweeper_params['QI'] = ['LU']\n sweeper_params['initial_guess'] = 'zero'\n\n # initialize problem parameters\n problem_params = dict()\n problem_params['Du'] = 1.0\n problem_params['Dv'] = 0.01\n problem_params['A'] = 0.09\n problem_params['B'] = 0.086\n problem_params['nvars'] = [(128, 128)]\n problem_params['nlsol_tol'] = 1e-10\n problem_params['nlsol_maxiter'] = 100\n problem_params['lsol_tol'] = 1e-10\n problem_params['lsol_maxiter'] = 100\n\n # initialize step parameters\n step_params = dict()\n step_params['maxiter'] = 50\n\n # initialize space transfer parameters\n # space_transfer_params = dict()\n # space_transfer_params['finter'] = True\n\n # initialize controller parameters\n controller_params = dict()\n controller_params['logger_level'] = 30\n\n # fill description dictionary for easy step instantiation\n description = dict()\n description['problem_class'] = None # pass problem class\n description['problem_params'] = problem_params # pass problem parameters\n description['sweeper_class'] = None # pass sweeper (see part B)\n description['sweeper_params'] = sweeper_params # pass sweeper parameters\n description['level_params'] = level_params # pass level parameters\n description['step_params'] = step_params # pass step parameters\n # description['space_transfer_class'] = mesh_to_mesh_petsc_dmda # pass spatial transfer class\n # description['space_transfer_params'] = space_transfer_params # pass paramters for spatial transfer\n\n return description, controller_params", "title": "" }, { "docid": "50a2db93cc0605828e9684fd4e9de7f8", "score": "0.5027018", "text": "def set_configuration(self, robot,desiredConfig):\n pass", "title": "" }, { "docid": "03feee1bb7f02ee989e574e91a313ef0", "score": "0.50266635", "text": "def wifiAP():\n ssid = 'MicroPython-AP'\n password = '123456789'\n\n ap = network.WLAN(network.AP_IF)\n ap.active(True)\n ap.config(essid=ssid, password=password)\n\n while ap.active() == False:\n pass\n\n print('Connection successful')\n print(ap.ifconfig())\n return ap.isconnected()", "title": "" }, { "docid": "1542426feee4536dcb2ef874df71e747", "score": "0.5014723", "text": "def set_hotspot(value):\n if value == 'start':\n hostapd.start_ap()\n elif value == 'stop':\n hostapd.stop_ap()\n else:\n abort(404)\n return redirect(url_for('settings'))", "title": "" }, { "docid": "2adbb580e8e1417c7e3639cded15be75", "score": "0.5011694", "text": "def set_model_setting(self, debug=False, model_id=None, data_id=None):\n if model_id in ['RankMSE', 'RankNet', 'ListNet', 'ListMLE', 'RankCosine']:\n # the 1st type with model_id, where ModelParameter is sufficient\n self.model_parameter = ModelParameter(model_id=model_id)\n elif model_id in ['LambdaRank', 'ApproxNDCG', 'DirectOpt', 'MarginLambdaLoss']:\n # the 2nd type, where the information of the type of relevance label is required.\n data_meta = get_data_meta(data_id=data_id) # add meta-information\n if data_meta['multi_level_rele']:\n self.model_parameter = globals()[model_id + \"Parameter\"](debug=debug, std_rele_is_permutation=False)\n else: # the case like MSLETOR_LIST\n self.model_parameter = globals()[model_id + \"Parameter\"](debug=debug, std_rele_is_permutation=True)\n else:\n # the 3rd type, where debug-mode enables quick test\n self.model_parameter = globals()[model_id + \"Parameter\"](debug=debug)", "title": "" }, { "docid": "b3e9664b15be8f95e7d6ceac7fd7a2db", "score": "0.5011371", "text": "def parameter_initialization(self):\n \n # 'k' or the filter\n self.cnn_params['k'] = np.random.randn(self.filter_size, self.filter_size, self.filters) \\\n * np.sqrt(2 / (self.input_dim * self.input_dim))\n \n # 'W'\n self.cnn_params['W'] = np.random.rand(self.classes, (self.input_dim - self.filter_size + 1), \n (self.input_dim - self.filter_size + 1), self.filters) \\\n * np.sqrt(2 / (self.input_dim * self.input_dim))\n \n # 'b'\n self.cnn_params['b'] = np.zeros(self.classes)", "title": "" }, { "docid": "1e3b28056f57370190f829f3e3108505", "score": "0.5006413", "text": "def save_wireless_settings(self, networkid, entry, netent):\n if entry.chkbox_encryption.get_active():\n print \"setting encryption info...\"\n encryption_info = entry.encryption_info\n encrypt_methods = misc.LoadEncryptionMethods()\n entry.set_net_prop(\"enctype\",\n encrypt_methods[entry.combo_encryption.\n get_active()][1])\n for x in encryption_info:\n if encryption_info[x].get_text() == \"\":\n error(self.window, language['encrypt_info_missing'])\n return False\n entry.set_net_prop(x, noneToString(encryption_info[x].\n get_text()))\n elif not entry.chkbox_encryption.get_active() and \\\n wireless.GetWirelessProperty(networkid, \"encryption\"):\n error(self.window, language['enable_encryption'])\n return False\n else:\n print 'encryption is ' + str(wireless.GetWirelessProperty(networkid, \n \"encryption\"))\n print \"no encryption specified...\"\n entry.set_net_prop(\"enctype\", \"None\")\n entry.set_net_prop(\"automatic\",\n noneToString(netent.chkbox_autoconnect.get_active()))\n if entry.chkbox_static_ip.get_active():\n entry.set_net_prop(\"ip\", noneToString(entry.txt_ip.get_text()))\n entry.set_net_prop(\"netmask\",\n noneToString(entry.txt_netmask.get_text()))\n entry.set_net_prop(\"gateway\",\n noneToString(entry.txt_gateway.get_text()))\n else:\n entry.set_net_prop(\"ip\", '')\n entry.set_net_prop(\"netmask\", '')\n entry.set_net_prop(\"gateway\", '')\n if entry.chkbox_static_dns.get_active() and \\\n not entry.chkbox_global_dns.get_active():\n entry.set_net_prop('use_static_dns', True)\n entry.set_net_prop('use_global_dns', False)\n entry.set_net_prop('dns1', noneToString(entry.txt_dns_1.get_text()))\n entry.set_net_prop('dns2', noneToString(entry.txt_dns_2.get_text()))\n entry.set_net_prop('dns3', noneToString(entry.txt_dns_3.get_text()))\n elif entry.chkbox_static_dns.get_active() and \\\n entry.chkbox_global_dns.get_active():\n entry.set_net_prop('use_static_dns', True)\n entry.set_net_prop('use_global_dns', True)\n else:\n entry.set_net_prop('use_static_dns', False) \n entry.set_net_prop('use_global_dns', False)\n entry.set_net_prop('dns1', '')\n entry.set_net_prop('dns2', '')\n entry.set_net_prop('dns3', '')\n if entry.chkbox_global_settings.get_active():\n entry.set_net_prop('use_settings_globally', True)\n else:\n entry.set_net_prop('use_settings_globally', False)\n config.RemoveGlobalEssidEntry(networkid)\n config.SaveWirelessNetworkProfile(networkid)\n return True", "title": "" }, { "docid": "ccc0459bc0bec289cbf394c613263075", "score": "0.5001026", "text": "def __init__(__self__, *,\n infrastructure_vpn_configuration: 'outputs.VpnConfigurationPropertiesResponse',\n workload_vpn_configuration: 'outputs.VpnConfigurationPropertiesResponse'):\n pulumi.set(__self__, \"infrastructure_vpn_configuration\", infrastructure_vpn_configuration)\n pulumi.set(__self__, \"workload_vpn_configuration\", workload_vpn_configuration)", "title": "" }, { "docid": "ea7e6a00db9b1eb5dd464921c0edefb8", "score": "0.500073", "text": "def set_weights(self, W):\n return", "title": "" }, { "docid": "ad93451209cb912a631bf7bce7f80601", "score": "0.49935898", "text": "def update(self, params):\n for k,v in params.items():\n if 'weights' in k:\n self.weights = v\n else:\n self.bias = v", "title": "" }, { "docid": "ad93451209cb912a631bf7bce7f80601", "score": "0.49935898", "text": "def update(self, params):\n for k,v in params.items():\n if 'weights' in k:\n self.weights = v\n else:\n self.bias = v", "title": "" }, { "docid": "f5d78ad85b676502e32ca5127e876846", "score": "0.49882716", "text": "def setDefaultParameters(self, parameterNode):\n if not parameterNode.GetParameter(\"URL\"):\n parameterNode.SetParameter(\"URL\", \"http://localhost:5000\")\n if not parameterNode.GetParameter(\"Local\"):\n parameterNode.SetParameter(\"Local\", \"true\")\n if not parameterNode.GetParameter(\"HeartModelPath\"):\n Path = RepoRoot + '/Models/Segmentation/Models_Saved/Heart_Localization'\n if os.path.exists(Path):\n parameterNode.SetParameter(\"HeartModelPath\", Path)\n if not parameterNode.GetParameter(\"CalModelPath\"):\n Path = RepoRoot + '/Models/Segmentation/Models_Saved/CAC'\n if os.path.exists(Path):\n parameterNode.SetParameter(\"CalModelPath\", Path)\n if not parameterNode.GetParameter(\"UseProcesses\"):\n parameterNode.SetParameter(\"UseProcesses\", \"true\")\n if not parameterNode.GetParameter(\"DeepCal\"):\n parameterNode.SetParameter(\"DeepCal\", \"true\")", "title": "" }, { "docid": "ed13467216f00172824edc1a9bc882c9", "score": "0.49874383", "text": "def set_params(self, params):\n params = dict_to_namespace(params)\n\n # Set self.params\n self.params = Namespace()\n self.params.ls = getattr(params, 'ls', 3.7)\n self.params.alpha = getattr(params, 'alpha', 1.85)\n self.params.sigma = getattr(params, 'sigma', 1e-5)\n self.params.kernel = getattr(params, 'kernel', kern_exp_quad)", "title": "" } ]
8580523f96004df044b4a84e4ff477c8
Should be called at the beginning of each training epoch to prepare the compression method to continue training the model in the `next_epoch`.
[ { "docid": "36ca0df77856774e73fc2d607a13f7e5", "score": "0.0", "text": "def epoch_step(self, next_epoch: Optional[int] = None) -> None:", "title": "" } ]
[ { "docid": "db01a079c55e78fc097df6ee0d63770f", "score": "0.68628335", "text": "def __call__(self, epoch):\n\n if not self.weights:\n self.initialize_weights()\n \n self.train(epoch)", "title": "" }, { "docid": "218f3fba5d19c7cb821fed89201ba63e", "score": "0.6787783", "text": "def prepare_training(self):\n\n # set random seed\n set_seed(self.training_config.seed)\n\n # set autoencoder optimizer and scheduler\n self.set_autoencoder_optimizer()\n self.set_autoencoder_scheduler()\n\n # set discriminator optimizer and scheduler\n self.set_discriminator_optimizer()\n self.set_discriminator_scheduler()\n\n # create foder for saving\n self._set_output_dir()\n\n # set callbacks\n self._setup_callbacks()", "title": "" }, { "docid": "d7599d3e32f8d5aef315b6d186bea2ac", "score": "0.6717053", "text": "def _pre_epoch_hook(self, epoch: int):\n super(DifferentiableSearchMemoryNetwork, self)._pre_epoch_hook(epoch)\n if epoch >= self.num_epochs_delay and epoch % self.num_epochs_per_encoding == 0:\n # First we encode the corpus and (re-)build an LSH.\n self._initialize_lsh()\n\n # Then we update both self.training_dataset and self.validation_dataset with new\n # background information, taken from a nearest neighbor search over the corpus.\n logger.info(\"Updating the training data background\")\n self.training_dataset = self._update_background_dataset(self.training_dataset)\n indexed_dataset = self.training_dataset.to_indexed_dataset(self.data_indexer)\n self.training_arrays = self.create_data_arrays(indexed_dataset)\n if self.validation_dataset:\n logger.info(\"Updating the validation data background\")\n self.validation_dataset = self._update_background_dataset(self.validation_dataset)\n indexed_dataset = self.validation_dataset.to_indexed_dataset(self.data_indexer)\n self.validation_arrays = self.create_data_arrays(indexed_dataset)", "title": "" }, { "docid": "5e068dcbf08202f1d429490ae02ad3d3", "score": "0.6710621", "text": "def _do_train(self, iteration, batch):\n pass", "title": "" }, { "docid": "6b56d31dfa1e07a0dc5750f052465e03", "score": "0.670745", "text": "def on_train_begin(self):\n pass", "title": "" }, { "docid": "6b56d31dfa1e07a0dc5750f052465e03", "score": "0.670745", "text": "def on_train_begin(self):\n pass", "title": "" }, { "docid": "29e3b3af11bf997a1aa49f336a54d8a9", "score": "0.658795", "text": "def train_epoch(self) -> None:\n self.encoder.set_train()\n self.decoder.set_train()\n\n for batch_idx, (data, label) in enumerate(self.train_loader):\n # prep data\n data_noise = self.get_noise(data)\n data = data.to(self.device)\n label = label.to(self.device)\n data_noise = data_noise.to(self.device)\n\n self.optimizer.zero_grad()\n\n output = self.encoder.forward(data_noise)\n output = self.decoder.forward(output)\n\n loss = self.criterion(output, data)\n loss.backward()\n self.optimizer.step()\n\n # display\n if (batch_idx > 0) and (batch_idx % self.print_every) == 0:\n print('[TRAIN] : Epoch iteration Loss')\n print(' [%3d/%3d] [%6d/%6d] %.6f' %\\\n (self.cur_epoch+1, self.num_epochs, batch_idx, len(self.train_loader), loss.item()))\n\n if self.tb_writer is not None:\n self.tb_writer.add_scalar('loss/train', loss.item(), self.loss_iter)\n\n self.loss_history[self.loss_iter] = loss.item()\n self.loss_iter += 1\n\n # save checkpoints\n if self.save_every > 0 and (self.loss_iter % self.save_every) == 0:\n ck_name = self.checkpoint_dir + '/' + self.checkpoint_name +\\\n '_epoch_' + str(self.cur_epoch) + '_iter_' + str(self.loss_iter) + '.pkl'\n if self.verbose:\n print('\\t Saving checkpoint to file [%s] ' % str(ck_name))\n self.save_checkpoint(ck_name)\n\n # perform any scheduling\n if self.lr_scheduler is not None:\n self.apply_lr_schedule()\n\n if self.mtm_scheduler is not None:\n self.apply_mtm_schedule()\n\n # Render generated/denoised image\n if self.tb_writer is not None:\n X, _ = next(iter(self.train_loader))\n X = torch.mul(X + self.noise_bias, self.noise_factor + torch.randn(*X.shape))\n X = X.to(self.device)\n\n output = self.encoder.forward(X)\n output = self.decoder.forward(output)\n output = output.detach()\n outout = output.to('cpu')\n grid = torchvision.utils.make_grid(output)\n self.tb_writer.add_image('dae/denoised', grid, self.cur_epoch)", "title": "" }, { "docid": "bb6092361a62d046c60ea5bc58ae852f", "score": "0.65677756", "text": "def train_start(self):\n self.image_encoder.train()\n self.text_encoder.train()", "title": "" }, { "docid": "13ac53ff48da1bb7ec2ed1361735ecbc", "score": "0.65286666", "text": "def _generate_train_batch(self):", "title": "" }, { "docid": "827f144252ff19fcf2766fec630aae81", "score": "0.6512675", "text": "def after_train(self):\r\n pass", "title": "" }, { "docid": "cd0b673e31888f0ffe8e145f10c83b0e", "score": "0.6504085", "text": "def step_pre_training_epoch(self, *args, **kwargs):\r\n raise NotImplementedError", "title": "" }, { "docid": "443dafe822d20eab3a80098355878057", "score": "0.64858305", "text": "def after_train(self):\n pass", "title": "" }, { "docid": "443dafe822d20eab3a80098355878057", "score": "0.64858305", "text": "def after_train(self):\n pass", "title": "" }, { "docid": "443dafe822d20eab3a80098355878057", "score": "0.64858305", "text": "def after_train(self):\n pass", "title": "" }, { "docid": "45a0ae60720803e340f35a7f37d4f5fd", "score": "0.6465367", "text": "def _on_training_start(self) -> None:\n pass", "title": "" }, { "docid": "be6e70543b199756dd629416eb16761d", "score": "0.6460337", "text": "def train_start(self):\n self.img_enc.train()\n self.txt_enc.train()", "title": "" }, { "docid": "be6e70543b199756dd629416eb16761d", "score": "0.6460337", "text": "def train_start(self):\n self.img_enc.train()\n self.txt_enc.train()", "title": "" }, { "docid": "34fd5b959828c920f9a4ae0641f97d02", "score": "0.6438409", "text": "def _init_train(self):\n pass", "title": "" }, { "docid": "cf648e57c8f63aa9db16d03ae08cf6ee", "score": "0.6434189", "text": "def train_start(self):\n self.question_encoder.train()\n self.sample.train()\n self.decoder.train()\n self.hiddenz.train()", "title": "" }, { "docid": "d45a798b3c372f8b6754a48f235f147e", "score": "0.640666", "text": "def _train_epoch(self, epoch, total_epochs):\n raise NotImplementedError", "title": "" }, { "docid": "a38d72c5bfe0e16a459fbbca43d70bee", "score": "0.6386545", "text": "def _prepare_for_epoch(self, stage: str, epoch: int) -> None:\n pass", "title": "" }, { "docid": "9018b33eead34b031f4772eaf150fc42", "score": "0.6383203", "text": "def on_train_begin(self, model):\n pass", "title": "" }, { "docid": "dc7f782e72cf9029daf86557df96fec4", "score": "0.63703626", "text": "def before_train(self):\r\n pass", "title": "" }, { "docid": "dd917fe43834e3388b271c9914c06e5a", "score": "0.63645065", "text": "def before_train(self):\n pass", "title": "" }, { "docid": "dd917fe43834e3388b271c9914c06e5a", "score": "0.63645065", "text": "def before_train(self):\n pass", "title": "" }, { "docid": "dd917fe43834e3388b271c9914c06e5a", "score": "0.63645065", "text": "def before_train(self):\n pass", "title": "" }, { "docid": "2e5f5ef39a16d79ecae5a5825465b909", "score": "0.63522196", "text": "def on_train_begin(self, **kwargs: Any) -> None:\n pass", "title": "" }, { "docid": "e959aee49537ba480e73eefe08a8086b", "score": "0.63480496", "text": "def pretrain_generator(self, epochs):\n for epoch in range(epochs):\n self.sig.update()\n if self.sig.pre_sig:\n for i in range(cfg.k_label):\n pre_loss = self.train_gen_epoch(self.gen_list[i], self.oracle_data_list[i].loader,\n self.mle_criterion, self.gen_opt_list[i])\n\n # ===Test===\n if epoch % cfg.pre_log_step == 0 or epoch == epochs - 1:\n if i == cfg.k_label - 1:\n self.log.info('[MLE-GEN] epoch %d : pre_loss = %.4f, %s' % (\n epoch, pre_loss, self.comb_metrics(fmt_str=True)))\n if cfg.if_save and not cfg.if_test:\n self._save('MLE', epoch)\n else:\n self.log.info('>>> Stop by pre signal, skip to adversarial training...')\n break", "title": "" }, { "docid": "2476ebb24123074d3e51fe97da4d032a", "score": "0.6304441", "text": "def prepare(self, is_training, input_size):\n raise NotImplementedError", "title": "" }, { "docid": "3e7def3bbba7c081791249b62296ea17", "score": "0.6274894", "text": "def _perform_train_step(self):\n pass", "title": "" }, { "docid": "682577bc003444c3d2b4d26f7e2508e4", "score": "0.62594664", "text": "def __preprocess(self):\n print('\\nProcessing data ... ')\n\n load_model_params = Model.checkpoint_params['load_model']\n if load_model_params:\n tokenizer_path = create_dir_in_root('runtime', 'tokenizer',\n load_model_params[0], load_model_params[1], 'tokenizer.pkl')\n self.__src_tokenizer = self.__tar_tokenizer = read_cache(tokenizer_path)\n\n self.__train_src_encode, self.__train_tar_encode, _, _ = utils.pipeline(\n Model.encode_pipeline,\n self.__train_src,\n self.__train_tar, {\n **Model.data_params,\n 'tokenizer': self.__src_tokenizer,\n 'src_tokenizer': self.__src_tokenizer,\n 'tar_tokenizer': self.__tar_tokenizer,\n })\n\n else:\n self.__train_src_encode, self.__train_tar_encode, self.__src_tokenizer, self.__tar_tokenizer = utils.pipeline(\n Model.preprocess_pipeline,\n self.__train_src,\n self.__train_tar,\n Model.data_params,\n )\n\n params = {\n **Model.data_params,\n 'tokenizer': self.__src_tokenizer,\n 'src_tokenizer': self.__src_tokenizer,\n 'tar_tokenizer': self.__tar_tokenizer,\n }\n\n self.__test_src_encode, self.__test_tar_encode, _, _ = utils.pipeline(Model.encode_pipeline,\n self.__test_src, self.__test_tar, params)\n\n # get vocabulary size\n self.__src_vocab_size = self.__src_tokenizer.vocab_size\n self.__tar_vocab_size = self.__tar_tokenizer.vocab_size\n\n print('\\nFinish preprocessing ')", "title": "" }, { "docid": "0689a261f4a5db8c2875d4026aa5db73", "score": "0.6249769", "text": "def train_step(self, epoch: int):\n self.callback_handler.on_train_step_begin(\n training_config=self.training_config,\n train_loader=self.train_loader,\n epoch=epoch,\n rank=self.rank,\n )\n\n # set model in train model\n self.model.train()\n\n epoch_autoencoder_loss = 0\n epoch_discriminator_loss = 0\n epoch_loss = 0\n\n for inputs in self.train_loader:\n\n inputs = self._set_inputs_to_device(inputs)\n\n model_output = self.model(\n inputs,\n epoch=epoch,\n dataset_size=len(self.train_loader.dataset),\n uses_ddp=self.distributed,\n )\n\n self._optimizers_step(model_output)\n\n autoencoder_loss = model_output.autoencoder_loss\n discriminator_loss = model_output.discriminator_loss\n\n loss = autoencoder_loss + discriminator_loss\n\n epoch_autoencoder_loss += autoencoder_loss.item()\n epoch_discriminator_loss += discriminator_loss.item()\n epoch_loss += loss.item()\n\n self.callback_handler.on_train_step_end(\n training_config=self.training_config\n )\n\n # Allows model updates if needed\n if self.distributed:\n self.model.module.update()\n else:\n self.model.update()\n\n epoch_autoencoder_loss /= len(self.train_loader)\n epoch_discriminator_loss /= len(self.train_loader)\n epoch_loss /= len(self.train_loader)\n\n return epoch_loss, epoch_autoencoder_loss, epoch_discriminator_loss", "title": "" }, { "docid": "c6b1b2ce70120e5c979f86c27152739f", "score": "0.6242871", "text": "def step_pre_training_batch(self, *args, **kwargs):\r\n pass", "title": "" }, { "docid": "26146ae70dd45991623fdcbfc6679747", "score": "0.62162715", "text": "def _train_epoch(self, epoch):\n self.model.train()\n self.train_metrics.reset()\n self.model.zero_grad()\n acc_loss, acc_output, acc_target = 0.0, None, None\n for batch_idx, (data, token_ids, attn_mask, target) in enumerate(self.data_loader):\n data, token_ids, attn_mask, target = data.to(self.device), token_ids.to(self.device), attn_mask.to(self.device), target.to(self.device)\n\n\n output = self.model(data, token_ids, attn_mask)\n\n if self.config.config.get('pos_neg_ratio'):\n loss = self.criterion(output, target, self.config['pos_neg_ratio'])\n else:\n loss = self.criterion(output, target)\n\n\n\n if self.config['trainer']['accumulation_steps'] > 1:\n loss = loss / self.config['trainer']['accumulation_steps']\n loss.backward()\n\n acc_loss += loss.item()\n if isinstance(acc_output, torch.Tensor) and isinstance(acc_target, torch.Tensor):\n acc_output = torch.cat([acc_output, output], dim=0)\n acc_target = torch.cat([acc_target, target], dim=0)\n else:\n acc_output = output\n acc_target = target\n\n if (batch_idx + 1) % self.config['trainer']['accumulation_steps'] == 0:\n\n global_batch_index = int(batch_idx / self.config['trainer']['accumulation_steps'])\n\n if self.config['clip_grad']:\n torch.nn.utils.clip_grad_norm_(self.model.parameters(), self.config['clip_grad'])\n self.optimizer.step()\n\n if self.lr_scheduler is not None:\n if self.config['lr_scheduler']['step_every_batch']:\n if type(self.lr_scheduler) is torch.optim.lr_scheduler.ReduceLROnPlateau:\n self.lr_scheduler.step(val_log['loss'])\n else:\n self.lr_scheduler.step()\n\n output = acc_output.sigmoid()\n\n self.model.zero_grad()\n\n\n self.writer.set_step((epoch - 1) * self.len_epoch + global_batch_index)\n self.train_metrics.update('loss', acc_loss)\n for met in self.metric_ftns:\n self.train_metrics.update(met.__name__, met(output, acc_target))\n\n\n\n if global_batch_index % self.log_step == 0:\n self.logger.debug('Train Epoch: {} {} Loss: {:.6f}'.format(\n epoch,\n self._progress(global_batch_index),\n acc_loss))\n #self.writer.add_image('input', make_grid(data.cpu(), nrow=8, normalize=True))\n\n acc_loss, acc_output, acc_target = 0.0, None, None\n\n if global_batch_index == self.len_epoch:\n break\n log = self.train_metrics.result()\n log_total = self.train_metrics.total()\n\n precision, recall, f1 = calculate_prec_rec_f1(log_total)\n\n if self.do_validation:\n val_log, val_log_total = self._valid_epoch(epoch)\n log.update(**{'val_'+k : v for k, v in val_log.items()})\n val_precision, val_recall, val_f1 = calculate_prec_rec_f1(val_log_total)\n\n if self.lr_scheduler is not None:\n if not self.config['lr_scheduler']['step_every_batch']:\n if type(self.lr_scheduler) is torch.optim.lr_scheduler.ReduceLROnPlateau:\n self.lr_scheduler.step(val_log['loss'])\n else:\n self.lr_scheduler.step()\n\n\n if self.do_validation:\n additional_log = {\"tp\": log_total['tp'], \"fp\": log_total['fp'], \"tn\": log_total['tn'],\n \"fn\": log_total['fn'], \"precision\": precision, \"recall\": recall, \"f1\": f1,\n \"val_tp\": val_log_total['tp'], \"val_fp\": val_log_total['fp'],\n \"val_tn\": val_log_total['tn'], \"val_fn\": val_log_total['fn'],\n \"val_precision\": val_precision, \"val_recall\": val_recall, \"val_f1\": val_f1}\n else:\n additional_log = {\"tp\": log_total['tp'], \"fp\": log_total['fp'], \"tn\": log_total['tn'],\n \"fn\": log_total['fn'], \"precision\": precision, \"recall\": recall, \"f1\": f1}\n log.update(additional_log)\n return log", "title": "" }, { "docid": "c065c4d26ef14bdf2aaaabb3f719a1d3", "score": "0.6215406", "text": "def _run_train_iter(self):", "title": "" }, { "docid": "c2de1dfe5fb500adddaa4aeb0c952eb9", "score": "0.62053514", "text": "def train(self):\n model_net_path = os.path.join(self.model_path, '%s-%d-%.4f-%d-%.4f.pkl' %(self.model_type,self.num_epochs,self.lr,self.num_epochs_decay,self.augmentation_prob))\n print(\"-------> started Training <------\")\n if os.path.isfile(model_net_path):\n # Load the pretrained Encoder\n self.model_net.load_state_dict(torch.load(model_net_path))\n print('%s is Successfully Loaded from %s'%(self.model_type,model_net_path))\n else:\n # Train for Encoder\n lr = self.lr\n best_model_net_score = 0.\n \n for epoch in range(self.num_epochs):\n\n self.model_net.train(True)\n epoch_loss = 0\n acc_train = 0\n \n \n if self.objective =='classification':\n for i, (images, labels) in enumerate(self.train_loader):\n images = images.to(self.device)\n labels = labels.to(self.device)\n pred_output = self.model_net(images)\n loss = self.criterion(pred_output,labels)\n \n epoch_loss += loss.item()\n \n # Backprop + optimize\n self.reset_grad()\n loss.backward()\n self.optimizer.step()\n pred_probs = F.softmax(pred_output,dim=1)\n _, preds = torch.max(pred_probs.data, 1)\n print(preds,labels)\n acc_train += torch.sum(preds == labels.data)\n \n\n # Print the log info\n print('Epoch [%d/%d], Loss: %.4f, \\n[Training] Acc: %.4f' % (\n epoch+1, self.num_epochs, \\\n epoch_loss,\\\n acc_train.item()/len(self.train_loader.dataset)))\n\n if (epoch+1)%10==0:\n print(\"saving model\")\n model_net_path_epoch = os.path.join(self.model_path, 'epoch-%d-%s-%d-%.4f-%d-%.4f.pkl' %(epoch,self.model_type,self.num_epochs,self.lr,self.num_epochs_decay,self.augmentation_prob))\n saved_epoch = self.model_net.state_dict()\n torch.save(saved_epoch,model_net_path_epoch)\n print(\"saving at---> \", model_net_path_epoch)\n\n # Decay learning rate\n if (epoch+1) > (self.num_epochs - self.num_epochs_decay):\n lr -= (self.lr / float(self.num_epochs_decay))\n for param_group in self.optimizer.param_groups:\n param_group['lr'] = lr\n print ('Decay learning rate to lr: {}.'.format(lr))", "title": "" }, { "docid": "4c8f4432d68980939652397d75c8ad84", "score": "0.62025905", "text": "def start_training(self):\n pass", "title": "" }, { "docid": "c5ae6c51c0da461367df5a81db0e8761", "score": "0.6199925", "text": "def _train_epoch(self, epoch):\n self.model.train()\n self.train_metrics.reset()\n for batch_idx, (data_cpu, target_cpu) in enumerate(self.data_loader):\n data = data_cpu.to(self.device)\n target = target_cpu.to(self.device, non_blocking=True)\n\n self.optimizer.zero_grad()\n output = self.model(data)\n \n # TODO: Remove explicit specification of loss and regularization functions\n loss = self.criterionVGG(output, target) + self.criterion(output, target)\n #loss = self.criterion(output, target)\n loss.backward()\n self.optimizer.step()\n\n self.train_metrics.update('loss', loss.item(), write=False)\n for met in self.metric_ftns:\n self.train_metrics.update(met.__name__, met(output, target), write=False)\n\n if batch_idx % self.log_step == 0:\n self.logger.debug('Train Epoch: {} {} Loss: {:.6f}'.format(\n epoch,\n self._progress(batch_idx),\n loss.item()))\n\n if batch_idx == self.len_epoch:\n break\n\n # Only visualize the final sample for brevity\n self._visualize_input(data_cpu)\n self._visualize_prediction(output.cpu())\n self._visualize_target(target_cpu)\n\n self.writer.set_step(epoch - 1)\n log = self.train_metrics.result(write=True)\n\n if self.do_validation:\n val_log = self._valid_epoch(epoch)\n log.update(**{'val_'+k : v for k, v in val_log.items()})\n\n if self.lr_scheduler is not None:\n self.lr_scheduler.step()\n return log", "title": "" }, { "docid": "648c19289105ace1573b0c4bf1f2a8a2", "score": "0.6192941", "text": "def on_epoch_begin(self) -> None:\n self.state_dict[\"epoch_batches\"] = 0\n self(\"on_epoch_begin\")", "title": "" }, { "docid": "0f1b7de040176a72477d3f477ffa560f", "score": "0.6187087", "text": "def train_loop_begin(self):\n pass", "title": "" }, { "docid": "b0c6b73deff04ab4054d85605add1832", "score": "0.6181174", "text": "def _train_epoch(self, epoch):\n if self.lr_scheduler is not None:\n print(f'lr: {self.lr_scheduler.get_lr()}')\n self.model.train()\n self.evaluator.reset()\n total_loss = 0\n total_metrics = np.zeros(len(self.evaluator))\n tbar = tqdm(self.data_loader, ascii=True)\n for batch_idx, sample in enumerate(tbar):\n data, target = sample['image'], sample['label']\n data, target = data.to(self.device), target.to(self.device)\n\n self.optimizer.zero_grad()\n output = self.model(data)\n # print(f'output shape: {output.shape}, target shape: {target.shape}')\n # print(torch.unique(target), target.shape)\n # exit()\n loss = self.loss(output, target)\n loss.backward()\n self.optimizer.step()\n\n self.writer.set_step((epoch - 1) * self.len_epoch + batch_idx)\n self.writer.add_scalar('loss', loss.item())\n total_loss += loss.item()\n\n tbar.set_description('Train loss: %.3f' % (total_loss / (batch_idx + 1)))\n\n output = torch.sigmoid(output)\n output = output.data.cpu().numpy()\n target = target.cpu().numpy()\n # output = np.argmax(output, axis=1)\n \n self.evaluator.add_batch(target, output)\n\n if batch_idx % self.log_step == 0:\n self.logger.debug('Train Epoch: {} {} Loss: {:.6f}'.format(\n epoch,\n self._progress(batch_idx),\n loss.item()))\n self.writer.add_image('input', make_grid(data.cpu()[:4], nrow=2, normalize=True))\n grid = make_grid(decode_seg_map_sequence(np.argmax(output, axis=1)[:4], dataset=self.dataset), nrow=2, normalize=False)\n self.writer.add_image('pred', grid)\n # grid = make_grid(decode_seg_map_sequence(target[:4], dataset=self.dataset), nrow=2, normalize=False)\n grid = make_grid(decode_seg_map_sequence(np.argmax(target[:4], axis=1), dataset=self.dataset), nrow=2, normalize=False)\n self.writer.add_image('label', grid)\n\n if batch_idx == self.len_epoch:\n break\n\n print('[Epoch: %d, numImages: %5d]' % (epoch, batch_idx * self.data_loader.batch_size + data.data.shape[0]))\n print('Loss: %.5f' % total_loss)\n self.writer.add_scalar_with_tag('loss_epoch/train', total_loss, epoch)\n for i, metric in enumerate(self.evaluator):\n mtr_val = metric()\n total_metrics[i] = mtr_val\n self.writer.add_scalar_with_tag(f'train/{metric.__name__}', mtr_val, epoch)\n\n log = {\n 'loss': total_loss / self.len_epoch,\n # 'metrics': (total_metrics / self.len_epoch).tolist()\n 'metrics': total_metrics.tolist()\n }\n\n if self.do_validation:\n val_log = self._valid_epoch(epoch)\n log.update(val_log)\n\n if self.lr_scheduler is not None:\n self.lr_scheduler.step()\n\n return log", "title": "" }, { "docid": "f2af63fb5244168b5708e570509e507e", "score": "0.6145347", "text": "def train(self):\n self.epoch = 0\n self.step = 0\n self.start_time = time.time()\n for self.epoch in range(self.opt.num_epochs):\n self.run_epoch()\n if (self.epoch + 1) % self.opt.save_frequency == 0:\n self.save_model()", "title": "" }, { "docid": "a798296943d11e444cf26bc1355954c1", "score": "0.61292064", "text": "def on_epoch_begin(self, epoch: int):\n pass", "title": "" }, { "docid": "6ad1921488294cef7a418ce8e7952675", "score": "0.61205006", "text": "def prepare_data(self, batch_size, epoch=0, shuffle=True):\n raise NotImplementedError", "title": "" }, { "docid": "39c68be482f8a2b376cb91d2ef942897", "score": "0.6089393", "text": "def train(self):\n\n # Switch to train mode\n self.encoder.train()\n self.decoder.train()\n\n # Keep track of train loss\n total_loss = 0\n\n # Start time for every 100 steps\n start_train_time = time.time()\n i_step = 0\n \n # Obtain the batch\n pbar = tqdm(self.train_loader)\n pbar.set_description('training epoch {}'.format(self.epoch))\n for batch in pbar:\n i_step += 1\n images, captions, lengths = batch[0], batch[1], batch[2]\n \n # Move to GPU if CUDA is available\n if torch.cuda.is_available():\n images = images.cuda()\n captions = captions.cuda()\n lengths = lengths.cuda()\n\n # Pass the inputs through the CNN-RNN model\n features = self.encoder(images)\n outputs = self.decoder(features, captions, lengths)\n\n # Calculate the batch loss\n # Flatten batch dimension\n outputs = outputs.view(-1, vocab_size)\n captions = captions.view(-1)\n\n loss = self.criterion(outputs, captions)\n\n # Zero the gradients. Since the backward() function accumulates \n # gradients, and we don’t want to mix up gradients between minibatches,\n # we have to zero them out at the start of a new minibatch\n self.optimizer.zero_grad()\n # Backward pass to calculate the weight gradients\n loss.backward()\n # Update the parameters in the optimizer\n self.optimizer.step()\n\n total_loss += loss.item()\n\n pbar.set_postfix(last=loss.item(), avg=total_loss/i_step)\n \n self.epoch += 1\n self.save()\n\n return total_loss / i_step", "title": "" }, { "docid": "e4c84d2f22fde159e374124b2510f355", "score": "0.6088309", "text": "def _train_epoch(self, epoch):\n self.model.train()\n\n total_loss = 0\n total_metrics = np.zeros(3) # precious, recall, hmean\n pbar = tqdm.tqdm(self.data_loader, 'Epoch ' + str(epoch), ncols=120)\n for batch_idx, gt in enumerate(pbar):\n imagePaths, img, score_map, geo_map, training_mask, transcripts, boxes, mapping = gt\n img, score_map, geo_map, training_mask = self._to_tensor(img, score_map, geo_map, training_mask)\n\n # import cv2\n # for i in range(img.shape[0]):\n # image = img[i]\n # for tt, bb in zip(transcripts[i], boxes[i]):\n # show_box(image.permute(1, 2, 0).detach().cpu().numpy()[:,:, ::-1].astype(np.uint8).copy(), bb, tt)\n\n self.optimizer.zero_grad()\n pred_score_map, pred_geo_map, pred_recog, pred_boxes, pred_mapping, indices = self.model.forward(img, boxes, mapping)\n\n transcripts = transcripts[indices]\n pred_boxes = pred_boxes[indices]\n pred_mapping = pred_mapping[indices]\n labels, label_lengths = self.labelConverter.encode(transcripts.tolist())\n recog = (labels, label_lengths)\n\n det_loss, reg_loss = self.loss(score_map, pred_score_map, geo_map, pred_geo_map, recog, pred_recog, training_mask)\n loss = det_loss + reg_loss\n loss.backward()\n self.optimizer.step()\n\n total_loss += loss.item()\n pred_transcripts = []\n if len(pred_mapping) > 0:\n pred_mapping = pred_mapping[indices]\n pred_boxes = pred_boxes[indices]\n pred_fns = [imagePaths[i] for i in pred_mapping]\n\n pred, lengths = pred_recog\n _, pred = pred.max(2)\n for i in range(lengths.numel()):\n l = lengths[i]\n p = pred[:l, i]\n t = self.labelConverter.decode(p, l)\n pred_transcripts.append(t)\n pred_transcripts = np.array(pred_transcripts)\n\n gt_fns = [imagePaths[i] for i in mapping]\n total_metrics += fots_metrics((pred_boxes, ['' for _ in pred_fns], pred_fns),\n (boxes, ['' for _ in gt_fns], gt_fns))\n\n pbar.set_postfix_str(f'Loss: {loss.item():.4f}, Detection loss: {det_loss.item():.4f}, '\n f'Recognition loss: {reg_loss.item():.4f}', refresh=False)\n\n log = {\n 'loss': total_loss / len(self.data_loader),\n 'precious': total_metrics[0] / len(self.data_loader),\n 'recall': total_metrics[1] / len(self.data_loader),\n 'hmean': total_metrics[2] / len(self.data_loader)\n }\n if self.valid and 5 < epoch: # skip first epochs as they generate too many proposals\n val_log = self._valid_epoch()\n log = {**log, **val_log}\n for key, value in log.items():\n self.logger.info(' {:15s}: {}'.format(str(key), value))\n\n return log", "title": "" }, { "docid": "9e847b539877e650d0ebdb92ee4a3800", "score": "0.6084742", "text": "def _train_epoch(self, epoch):\n self.model.train()\n self.real_model._hook_before_iter()\n self.train_metrics.reset()\n\n if hasattr(self.criterion, \"_hook_before_epoch\"):\n self.criterion._hook_before_epoch(epoch)\n\n for batch_idx, data in enumerate(self.data_loader):\n\n start_time = time.time()\n data, target = data\n data, target = data.to(f'npu:{NPU_CALCULATE_DEVICE}'), target.to(f'npu:{NPU_CALCULATE_DEVICE}')\n\n self.optimizer.zero_grad()\n\n with autocast():\n if self.real_model.requires_target:\n output = self.model(data, target=target) \n\n output, loss = output \n else:\n extra_info = {}\n output = self.model(data)\n\n if self.add_extra_info:\n if isinstance(output, dict):\n logits = output[\"logits\"]\n extra_info.update({\n \"logits\": logits.transpose(0, 1)\n })\n else:\n extra_info.update({\n \"logits\": self.real_model.backbone.logits\n })\n\n if isinstance(output, dict):\n output = output[\"output\"]\n\n if self.add_extra_info:\n loss = self.criterion(output_logits=output, target=target, extra_info=extra_info)\n else:\n loss = self.criterion(output_logits=output, target=target) \n if not use_fp16:\n with amp.scale_loss(loss, self.optimizer) as scaled_loss:\n scaled_loss.backward()\n #loss.backward()\n self.optimizer.step()\n else:\n self.scaler.scale(loss).backward()\n self.scaler.step(self.optimizer)\n self.scaler.update()\n\n self.writer.set_step((epoch - 1) * self.len_epoch + batch_idx)\n self.train_metrics.update('loss', loss.item())\n for met in self.metric_ftns:\n self.train_metrics.update(met.__name__, met(output, target, return_length=True))\n \n step_time = time.time() - start_time\n FPS = self.data_loader.batch_size / step_time\n if batch_idx < 2:\n print(\"step_time = %.4f\" % (step_time), flush=True)\n if batch_idx % self.log_step == 0:\n self.logger.debug('Train Epoch: {} {} Loss: {:.6f} max group LR: {:.4f} min group LR: {:.4f}, time/step(s):{:.4f}, FPS:{:.3f}'.format(\n epoch,\n self._progress(batch_idx),\n loss.item(),\n max([param_group['lr'] for param_group in self.optimizer.param_groups]),\n min([param_group['lr'] for param_group in self.optimizer.param_groups]),step_time,FPS))\n self.writer.add_image('input', make_grid(data.cpu(), nrow=8, normalize=True))\n\n if batch_idx == self.len_epoch:\n break\n log = self.train_metrics.result()\n\n if self.do_validation:\n val_log = self._valid_epoch(epoch)\n log.update(**{'val_'+k : v for k, v in val_log.items()})\n\n if self.lr_scheduler is not None:\n self.lr_scheduler.step()\n return log", "title": "" }, { "docid": "b4fb97cd67fa5dd2fb1e025230fd864b", "score": "0.6072336", "text": "def on_epoch_begin(self, epoch, logs=None):\n if epoch > 0:\n self.model.classifier.reset_covariance_matrix()", "title": "" }, { "docid": "11da38fdb9bd69bc705a1c5aa39339ba", "score": "0.60652155", "text": "def train(self):\n self.epoch = 0\n self.step = 0\n self.start_time = time.time()\n for self.epoch in range(self.opt.num_epochs):\n self.run_epoch()\n if (self.epoch + 1) % self.opt.save_frequency == 0 and self.opt.save_model:\n self.save_model()", "title": "" }, { "docid": "73d90177982ae2db4c27332f4747200c", "score": "0.6052523", "text": "def train(self):\n # collect initial episodes\n self.collect_initial_episodes()\n # main training cycle\n if self._summary_writer is not None:\n with self._summary_writer.as_default():\n self._train()\n else:\n self._train()", "title": "" }, { "docid": "5192642b28646e30fe82b8ccbab4ce26", "score": "0.60305226", "text": "def train_epoch(self):\n self.model.train()\n self.training = True\n torch.set_grad_enabled(self.training)\n self.train_loss.reset()\n self.batch_time.reset()\n time_stamp = time.time()\n \n for batch_idx, (data, target) in enumerate(self.train_loader):\n \n data, target = data.to(self.device),target.to(self.device)\n target = list(chunks(target, self.nclasses)) \n self.optimizer.zero_grad()\n output = self.model(data)\n \n loss_tensor = torch.tensor(0.).to(self.device)\n loss_values = []\n loss_coeffs = []\n for loss_idx in range(len(self.nclasses)):\n current_target = target[loss_idx].squeeze(1).long() if self.loss_names[loss_idx]=='cce' else target[loss_idx]\n #current_loss,current_loss_coef = self.loss[loss_idx](output[loss_idx], current_target),1\n current_loss, current_loss_coef = self.calc_loss(output[loss_idx], current_target, loss_idx)\n loss_tensor += current_loss * self.loss_weights[loss_idx]\n loss_values.append(current_loss.item())\n loss_coeffs.append(current_loss_coef)\n loss_tensor.backward() \n self.optimizer.step()\n self.train_loss.update(np.array(loss_values),np.array(loss_coeffs))\n \n self.batch_time.update(time.time() - time_stamp)\n time_stamp = time.time()\n \n self.log_batch(batch_idx)\n if self.opt.debug and (batch_idx==10):\n print('Debugging done!')\n break;", "title": "" }, { "docid": "f86d0ca0d52426e53027766d96fe28d6", "score": "0.6024157", "text": "def before_fit(self, model: Model, iteration: int = 0):\n pass", "title": "" }, { "docid": "0c3448b55dcd8b711e10549c0998ac3b", "score": "0.60207045", "text": "def new_epoch(self):\n n, d = self.train_data.shape\n data_perm = rnd.permutation(n)\n self.train_data = deepcopy(self.train_data[data_perm])\n\n if self.train_miss_mask is not None:\n # noise samples are grouped in nu-sized chunks that are missing the same features\n # we need to keep this grouping when we shuffle the data\n new_samples = deepcopy(self.noise_samples)\n new_samples = new_samples.reshape(n, self.nu, d)\n self.noise_samples = new_samples[data_perm].reshape(self.noise_samples.shape)\n else:\n noise_perm = rnd.permutation(len(self.noise_samples))\n self.noise_samples = deepcopy(self.noise_samples[noise_perm])\n\n print('epoch {}: J1 = {}'.format(self.current_epoch, self.current_loss))\n self.current_epoch += 1", "title": "" }, { "docid": "fc3eb712dfda0359d71b2d52c381ffd2", "score": "0.60203284", "text": "def _train_epoch(self, epoch):\n\n assert self.generator['config']['arch']['args']['output_size'] == \\\n self.discriminator['config']['arch']['args']['input_size']\n\n total_gen_loss = 0\n total_dis_loss = 0\n\n for batch_idx, (data, _) in enumerate(self.data_loader):\n data = data.to(self.device)\n\n # Sample noise as generator input\n z = Variable(torch.randn(data.size(0), self.z_size)).to(self.device)\n\n dis_loss = self._train_discriminator(epoch, batch_idx, data, z)\n\n total_dis_loss += dis_loss\n\n if batch_idx % self.log_step == 0:\n log_msg = ('Train Epoch: {} {} \\n\\t'\n + 'discriminator loss\\t: {:.6f}\\n').format(\n epoch,\n self._progress(batch_idx),\n dis_loss)\n self.discriminator['writer'].add_image('input', make_grid(data.cpu(), nrow=8, normalize=True))\n self.generator['writer'].add_image('input', make_grid(data.cpu(), nrow=8, normalize=True))\n\n # Train the generator every n_critic iterations\n if batch_idx % self.n_critic == 0:\n gen_loss = self._train_generator(epoch, batch_idx, z)\n\n total_gen_loss += gen_loss\n\n log_msg += ('\\tgenerator loss\\t: {:.6f}').format(gen_loss)\n self.generator['writer'].add_image('input', make_grid(data.cpu(), nrow=8, normalize=True))\n\n if batch_idx % self.log_step == 0:\n self.logger.debug(log_msg)\n\n if batch_idx == self.len_epoch:\n break\n\n log = {\n 'generator': {\n 'loss': total_gen_loss / self.len_epoch,\n },\n 'discriminator': {\n 'loss': total_dis_loss / self.len_epoch,\n }\n }\n\n if self.do_validation:\n val_log = self._valid_epoch(epoch)\n log['discriminator'].update(val_log['discriminator'])\n log['generator'].update(val_log['generator'])\n\n if self.generator['lr_scheduler'] is not None:\n self.generator['lr_scheduler'].step()\n\n if self.discriminator['lr_scheduler'] is not None:\n self.discriminator['lr_scheduler'].step()\n\n return log", "title": "" }, { "docid": "50c6da2f8b7370d0c471b92a9a4e1e5f", "score": "0.60147643", "text": "def on_train_end(self):\n pass", "title": "" }, { "docid": "2dac9316ff43a8a8c51e45fe05e73b0f", "score": "0.59980685", "text": "def before_train(self, epoch, logs=None):\n # Use zero valid_freq to supress default valid step\n self.trainer.auto_save_ckpt = False\n self.trainer.auto_save_perf = False\n self.trainer.valid_freq = 0\n cudnn.benchmark = True\n cudnn.enabled = True\n self.search_alg = SearchAlgorithm(SearchSpace())\n self.set_algorithm_model(self.trainer.model)\n # setup alphas\n n_individual = self.alg_policy.num_individual\n self.alphas = torch.cat([self.trainer.model.random_sample_path().unsqueeze(0)\n for i in range(n_individual)], dim=0)\n self.trainer.train_loader = self.trainer._init_dataloader(mode='train')\n self.trainer.valid_loader = self.trainer._init_dataloader(mode='val')", "title": "" }, { "docid": "8f9fb3a5d429329d5a749b130621a82c", "score": "0.5995909", "text": "def _train_epoch(self, epoch):\n self.model.train()\n epoch_start, batch_start = time.time(), time.time()\n train_loss = 0.0\n lr = self.optimizer.param_groups[0]['lr']\n for i, (img, gt_score, gt_geo, ignored_map, gt_embedding) in enumerate(self.train_loader):\n if i >= self.train_loader_len:\n break\n self.global_step += 1\n lr = self.optimizer.param_groups[0]['lr']\n\n cur_batch_size = img.size()[0]\n img, gt_score, gt_geo, ignored_map, gt_embedding = img.to(self.device), gt_score.to(self.device), \\\n gt_geo.to(self.device), ignored_map.to(self.device), \\\n gt_embedding.to(self.device)\n\n (predict_score, predict_geo), predict_embedding = self.model(img)\n loss_all, loss_cls, loss_ang, loss_diou, loss_embed = self.criterion(\n gt_score, predict_score, gt_geo, predict_geo, gt_embedding, predict_embedding, ignored_map)\n\n # backward\n self.optimizer.zero_grad()\n loss_all.backward()\n self.optimizer.step()\n\n loss_all = loss_all.item()\n loss_cls, loss_ang, loss_diou = loss_cls.item(), loss_ang.item(), loss_diou.item()\n loss_embed = loss_embed.item()\n train_loss += loss_all\n\n if i % self.display_interval == 0 or i == self.train_loader_len - 1:\n batch_time = time.time() - batch_start\n self.logger.info('[{}/{}], [{}/{}], g_step: {}, Spe: {:.1f} sam/sec, l_all: {:.4f}, l_cls: {:.4f}, '\n 'l_ang: {:.4f}, l_diou: {:.4f}, l_embed: {:.4f}, lr: {:.6}, T: {:.2f}'\n .format(str(epoch).zfill(3), self.epochs, str(i + 1).zfill(3), self.train_loader_len,\n self.global_step, self.display_interval * cur_batch_size / batch_time,\n loss_all, loss_cls, loss_ang, loss_diou, loss_embed, lr, batch_time))\n batch_start = time.time()\n\n if self.tensorboard_enable:\n self.writer.add_scalar('TRAIN/LOSS/loss_all', loss_all, self.global_step)\n self.writer.add_scalar('TRAIN/LOSS/loss_cls', loss_cls, self.global_step)\n self.writer.add_scalar('TRAIN/LOSS/loss_ang', loss_ang, self.global_step)\n self.writer.add_scalar('TRAIN/LOSS/loss_diou', loss_diou, self.global_step)\n self.writer.add_scalar('TRAIN/LOSS/loss_embed', loss_embed, self.global_step)\n self.writer.add_scalar('TRAIN/lr', lr, self.global_step)\n\n return {'train_loss': train_loss / self.train_loader_len, 'lr': lr, 'time': time.time() - epoch_start, 'epoch': epoch}", "title": "" }, { "docid": "d98bf69e94e0bdc3e0db9a41b2e0ba38", "score": "0.59924054", "text": "def train_epoch(model, optimizer, x_loader, z_loader, beta, lamb, tau=0, rho=0,\n nu_e=0, nu_d=0, z_loss_fun=None, x_constraint_loss_fun=None, device=device):\n model.train()\n if z_loss_fun is None:\n z_loss_fun = lambda z, z_tilde: sliced_wd(z, z_tilde, TRAIN_NUM_SLICES, P)\n for batch_idx, (x, z) in enumerate(zip(x_loader, z_loader)):\n x, z = x.to(device), z.to(device)\n optimizer.zero_grad()\n z_tilde = model.encode(x)\n z_loss = z_loss_fun(z, z_tilde) if lamb > 0 else 0\n if beta > 0:\n x_tilde = model.decode(z_tilde) # full \"autoencoding chian\": x -> z_tilde -> x_tilde\n x_loss = data_loss(x, x_tilde, P)\n else:\n x_loss = 0.\n encoder_anchor_loss = anchor_loss(z_tilde, x) if nu_e > 0 else 0\n decoder_anchor_loss = 0\n if tau > 0 or rho > 0 or nu_d > 0:\n model_x = model.decode(z)\n# alt_x_loss = sliced_wd(x, model_x, num_slices, P) if tau > 0 else 0\n alt_x_loss = z_loss_fun(x, model_x) if tau > 0 else 0\n x_constraint_loss = x_constraint_loss_fun(model_x) if rho > 0 else 0 # x_constraint_loss directly takes neural net output\n # x_constraint_loss = 0\n # if rho > 0:\n # # First convert model output to \"raw observations\"\n # model_x = (model_x * x_train_std_torch) + x_train_mean_torch\n # x_constraint_loss = x_constraint_loss_fun(model_x)\n decoder_anchor_loss = anchor_loss(z, model_x) if nu_d > 0 else 0\n else:\n alt_x_loss = 0\n x_constraint_loss = 0\n\n loss = beta * x_loss + lamb * z_loss + tau * alt_x_loss + rho * x_constraint_loss + nu_e * encoder_anchor_loss + nu_d * decoder_anchor_loss\n loss.backward()\n optimizer.step()\n losses = dict(loss=loss, x_loss=x_loss, z_loss=z_loss, alt_x_loss=alt_x_loss, x_constraint_loss=x_constraint_loss,\n encoder_anchor_loss=encoder_anchor_loss, decoder_anchor_loss=decoder_anchor_loss)\n return losses", "title": "" }, { "docid": "4b8787c50ad17cac715273ab2ce03320", "score": "0.59698033", "text": "def _run_training(self) -> None:", "title": "" }, { "docid": "c714efe274f0b68a3625ac9522a40b84", "score": "0.5967808", "text": "def before_train(self, runner) -> None:\n runner.message_hub.update_info('loop_stage', 'train')\n runner.message_hub.update_info('epoch', runner.epoch)\n runner.message_hub.update_info('iter', runner.iter)\n runner.message_hub.update_info('max_epochs', runner.max_epochs)\n runner.message_hub.update_info('max_iters', runner.max_iters)\n if hasattr(runner.train_dataloader.dataset, 'metainfo'):\n runner.message_hub.update_info(\n 'dataset_meta', runner.train_dataloader.dataset.metainfo)", "title": "" }, { "docid": "0b538f6809e8dd4c789f99710656b40d", "score": "0.5964886", "text": "def initialize(self):\n self.rng = RandomState(30948348)\n self.data_batches = []\n self.y_batches = []\n self.input_time_length = get_input_time_length(self.cnt_model)\n self.n_sample_preds = get_n_sample_preds(self.cnt_model)\n self.n_classes = self.cnt_model.output_shape[1]\n # create train function\n log.info(\"Compile train function...\")\n self._create_train_function()\n log.info(\"Done compiling train function.\")", "title": "" }, { "docid": "0a72fd044ae5a72802b042c72ba0fbd0", "score": "0.5941748", "text": "def on_train_begin(self):\n self.training_losses = []\n self.no_improvement_count = 0\n self._done = False\n self.minimum_loss = float('inf')", "title": "" }, { "docid": "7878c046c4dafabda9c4d966e88b14ab", "score": "0.5927713", "text": "def pretrain(self, optimizer, lr, run_optimizer=True):\n pass", "title": "" }, { "docid": "7ed0ca65e5513685fb7465d384edb9c3", "score": "0.5927533", "text": "def _train_epoch(self, epoch):\n self.model.train()\n self.train_metrics.reset()\n \n for batch_idx, (data, target, mask) in enumerate(tqdm(self.data_loader)):\n data, target = [item.to(self.device) for item in data], target.to(self.device)\n mask = [item.to(self.device) for item in mask]\n self.optimizer.zero_grad()\n output,rec_mesh,img_probs,mesh = self.model(data)\n loss_img = 0\n loss_mask = 0\n loss_lap = 0\n loss_edge = 0\n loss_flat = 0\n for i in range(VIEW_NUMS):\n img = output[i].permute(0,3,1,2)\n # colored image L1 loss\n # loss_img += L1(img, data[i])\n # 轮廓mask IOU L1/L2\n # loss += L1(torch.where(img > 0,torch.ones_like(img) ,torch.zeros_like(img)) , torch.where(data[i] > 0,torch.ones_like(img) ,torch.zeros_like(img)) )\n loss_mask += L1(img_probs[i],mask[i])\n # Lap平滑损失\n # loss_lap += 0.0001*Lap_Loss(self.model.adj,rec_mesh)\n # 边长损失\n # loss_edge += Edge_regularization(rec_mesh,mesh.faces.long())\n # 法向损失\n loss_flat += 0.0001*Loss_flat(rec_mesh,mesh)\n \n loss = loss_img+loss_mask+loss_lap+loss_edge+loss_flat\n loss/=VIEW_NUMS\n loss.backward()\n self.optimizer.step()\n # log\n if batch_idx % self.log_step == 0:\n # 写入当前step\n step = (epoch - 1) * self.len_epoch + batch_idx\n self.writer.set_step(step)\n # 写入损失曲线\n # self.train_metrics.update('loss_img', loss_img.item())\n self.train_metrics.update('loss_mask', loss_mask.item())\n # self.train_metrics.update('loss_lap', loss_lap.item())\n # self.train_metrics.update('loss_edge', loss_edge.item())\n self.train_metrics.update('loss_flat', loss_flat.item())\n self.train_metrics.update('loss', loss.item())\n # 合成两张图像\n shape = data[0].shape\n input_img = torch.zeros([6,shape[1],shape[2],shape[3]])\n output_img = torch.zeros([6,shape[1],shape[2],shape[3]])\n # tb显示图像\n for i in range(6):\n input_img[i] = data[i][0].cpu()\n output_img[i] = output[i][0].permute(2,0,1).cpu().detach()\n self.writer.add_image('input', make_grid(input_img, nrow=6, normalize=False))\n self.writer.add_image('output', make_grid(output_img, nrow=6, normalize=False))\n # 控制台log\n self.logger.debug('Train Epoch: {} {} Loss: {:.6f}'.format(\n epoch,\n self._progress(batch_idx),\n loss.item()))\n # 保存为三维模型, point写入obj文件, face固定的, uv坐标值\n save_mesh(rec_mesh[0].cpu().detach(),mesh.faces.long().cpu().detach(),os.path.join(self.config.log_dir,'{}_{}_{}.stl'.format(epoch,batch_idx,step)))\n \n if batch_idx == self.len_epoch:\n break\n log = self.train_metrics.result()\n self.do_validation = False\n if self.do_validation and epoch%self.config['trainer']['save_period'] == 0:\n val_log = self._valid_epoch(epoch)\n log.update(**{'val_'+k : v for k, v in val_log.items()})\n\n if self.lr_scheduler is not None:\n self.lr_scheduler.step()\n return log", "title": "" }, { "docid": "842a70f765ce23f0adf9c5242765b634", "score": "0.5920795", "text": "def train(config):\n epochs = config[\"epochs\"]\n loss = config[\"loss\"]\n architecture = config[\"architecture\"]\n row_size = config[\"row_size\"]\n col_size = config[\"col_size\"]\n channels = config[\"channels\"]\n num_classes = config[\"num_classes\"]\n snapshot_dir = config[\"snapshot_dir\"]\n log_dir = config[\"log_dir\"]\n hyper_optimization = config[\"hyper_optimization\"]\n random_labels = config[\"random_labels\"]\n initial_epoch = config[\"initial_epoch\"]\n starting_checkpoint = config[\"starting_checkpoint\"]\n\n # make directories if they don't already exist\n os.makedirs(snapshot_dir, exist_ok=True)\n os.makedirs(log_dir, exist_ok=True)\n\n if hyper_optimization:\n learning_rate = {{uniform(0.1,0.00001)}}\n batch_size = {{choice([64,128,256])}}\n optimizer = {{choice(['adam', 'sgd'])}}\n weight_decay = {{uniform(0.,0.0001)}}\n dropout = {{uniform(0.,0.7)}}\n spatialdropout = {{uniform(0.,0.4)}}\n first_block = {{choice([32,64])}}\n else:\n learning_rate = config[\"learning_rate\"]\n batch_size = config[\"batch_size\"]\n optimizer = config[\"optimizer\"]\n weight_decay = config[\"weight_decay\"]\n dropout = config[\"dropout\"]\n spatialdropout = config[\"spatialdropout\"]\n first_block = config[\"first_block\"]\n\n second_block = 2*first_block\n\n # initialize optimizer function\n if optimizer == 'Adam':\n opt = optimizers.Adam(lr=learning_rate)\n else:\n opt = optimizers.SGD(lr=learning_rate, momentum=0.9)\n\n # compile model with given parameters\n img_shape = row_size,col_size,channels\n model = build_arch(architecture, img_shape, num_classes,weight_decay, dropout,\n spatialdropout, first_block, second_block)\n model.compile(loss=loss, optimizer=opt, metrics=['accuracy'])\n\n # get fashion-mnist data, training is in generator format to use continuous data augmentation\n train_gen, val_data, test_data = get_fashion_dataset(batch_size, random_labels)\n x_test, y_test = test_data\n\n # add callback for early stopping, saving best models and learning rate scheduling\n callbacks = []\n basename = \"%s_lr%0.4f_bs%d_%s_wd%0.5f_do%0.2f_sdo%0.2f_fb%d.csv\" % \\\n (architecture,learning_rate, batch_size, optimizer, weight_decay, dropout, spatialdropout, first_block)\n callbacks.append(EarlyStopping(monitor='val_acc',patience=75, restore_best_weights=True, verbose=1))\n callbacks.append(CSVLogger(os.path.join(log_dir,basename)))\n model_checkpoint_path = os.path.join(snapshot_dir,\"%s_{epoch:02d}-{val_loss:.2f}.h5\"%(basename))\n callbacks.append(ModelCheckpoint(model_checkpoint_path, monitor='val_acc', save_best_only=True, verbose=1,period=1))\n callbacks.append(ReduceLROnPlateau(monitor='val_loss',patience=50, factor=0.2, min_lr=0.0000001, verbose=1))\n callbacks.append(ReduceLROnPlateau(monitor='loss',patience=15, factor=0.2, min_lr=0.0001, verbose=1))\n\n # for warm start\n if starting_checkpoint:\n model.load_weights(starting_checkpoint)\n\n # train\n model.fit_generator(train_gen,\n epochs=epochs,\n steps_per_epoch=1000, # as asked in problem statement\n validation_data=val_data,\n callbacks=callbacks,\n workers=1,\n initial_epoch=initial_epoch,\n verbose=2)\n\n # get results on test data\n result = model.evaluate(x_test,y_test)\n print('test loss: %0.4f, test accuracy: %0.4f'%(result[0],result[1]))\n\n # return model and loss value for hyperparameter optimization\n return {'loss': -result[1], 'status': STATUS_OK, 'model': model}", "title": "" }, { "docid": "c523c631d9aff649bae014ed3ff3c29b", "score": "0.5916061", "text": "def train_epoch(self):\n raise NotImplementedError", "title": "" }, { "docid": "870f44eec1c356b8ae4e9c548209f1e6", "score": "0.59150356", "text": "def initialize_training(self):\n if self.cfg.is_finetuning:\n # Load scaler from pre-trained model.\n self._scaler = load_scaler(self.cfg.base_run_dir)\n\n # Initialize dataset before the model is loaded.\n ds = self._get_dataset()\n if len(ds) == 0:\n raise ValueError(\"Dataset contains no samples.\")\n self.loader = self._get_data_loader(ds=ds)\n\n self.model = self._get_model().to(self.device)\n if self.cfg.checkpoint_path is not None:\n LOGGER.info(f\"Starting training from Checkpoint {self.cfg.checkpoint_path}\")\n self.model.load_state_dict(torch.load(str(self.cfg.checkpoint_path), map_location=self.device))\n elif self.cfg.checkpoint_path is None and self.cfg.is_finetuning:\n # the default for finetuning is the last model state\n checkpoint_path = [x for x in sorted(list(self.cfg.base_run_dir.glob('model_epoch*.pt')))][-1]\n LOGGER.info(f\"Starting training from checkpoint {checkpoint_path}\")\n self.model.load_state_dict(torch.load(str(checkpoint_path), map_location=self.device))\n\n # Freeze model parts from pre-trained model.\n if self.cfg.is_finetuning:\n self._freeze_model_parts()\n\n self.optimizer = self._get_optimizer()\n self.loss_obj = self._get_loss_obj().to(self.device)\n\n # Add possible regularization terms to the loss function.\n self._set_regularization()\n\n # restore optimizer and model state if training is continued\n if self.cfg.is_continue_training:\n self._restore_training_state()\n\n self.experiment_logger = Logger(cfg=self.cfg)\n if self.cfg.log_tensorboard:\n self.experiment_logger.start_tb()\n\n if self.cfg.is_continue_training:\n # set epoch and iteration step counter to continue from the selected checkpoint\n self.experiment_logger.epoch = self._epoch\n self.experiment_logger.update = len(self.loader) * self._epoch\n\n if self.cfg.validate_every is not None:\n if self.cfg.validate_n_random_basins < 1:\n warn_msg = [\n f\"Validation set to validate every {self.cfg.validate_every} epoch(s), but \",\n \"'validate_n_random_basins' not set or set to zero. Will validate on the entire validation set.\"\n ]\n LOGGER.warning(\"\".join(warn_msg))\n self.cfg.validate_n_random_basins = self.cfg.number_of_basins\n self.validator = self._get_tester()\n\n if self.cfg.target_noise_std is not None:\n self.noise_sampler_y = torch.distributions.Normal(loc=0, scale=self.cfg.target_noise_std)\n self._target_mean = torch.from_numpy(\n ds.scaler[\"xarray_feature_center\"][self.cfg.target_variables].to_array().values).to(self.device)\n self._target_std = torch.from_numpy(\n ds.scaler[\"xarray_feature_scale\"][self.cfg.target_variables].to_array().values).to(self.device)", "title": "" }, { "docid": "75ea817a14a052dd1742431ec1039a4e", "score": "0.5909151", "text": "def init_training(args):\n datasets = Cifar10Dataset.get_datasets_from_scratch(args.data_path)\n for phase in ['train', 'test']:\n print('{} dataset len: {}'.format(phase, len(datasets[phase])))\n\n # define loaders\n data_loaders = {\n 'train': DataLoader(datasets['train'], batch_size=args.batch_size, shuffle=True, num_workers=args.num_workers),\n 'test': DataLoader(datasets['test'], batch_size=args.batch_size, shuffle=False, num_workers=args.num_workers)\n }\n\n # check CUDA availability and set device\n device = torch.device('cuda:0' if torch.cuda.is_available() else 'cpu')\n print('Use GPU: {}'.format(str(device) != 'cpu'))\n\n # set up models\n generator = Generator(args.gen_norm).to(device)\n discriminator = Discriminator(args.disc_norm).to(device)\n\n # initialize weights\n if args.apply_weight_init:\n generator.apply(weights_init_normal)\n discriminator.apply(weights_init_normal)\n\n # adam optimizer with reduced momentum\n optimizers = {\n 'gen': torch.optim.Adam(generator.parameters(), lr=args.base_lr_gen, betas=(0.5, 0.999)),\n 'disc': torch.optim.Adam(discriminator.parameters(), lr=args.base_lr_disc, betas=(0.5, 0.999))\n }\n\n # losses\n losses = {\n 'l1': torch.nn.L1Loss(reduction='mean'),\n 'disc': torch.nn.BCELoss(reduction='mean')\n }\n\n # make save dir, if it does not exists\n if not os.path.exists(args.save_path):\n os.makedirs(args.save_path)\n\n # load weights if the training is not starting from the beginning\n global_step = args.start_epoch * len(data_loaders['train']) if args.start_epoch > 0 else 0\n if args.start_epoch > 0:\n\n generator.load_state_dict(torch.load(\n os.path.join(args.save_path, 'checkpoint_ep{}_gen.pt'.format(args.start_epoch - 1)),\n map_location=device\n ))\n discriminator.load_state_dict(torch.load(\n os.path.join(args.save_path, 'checkpoint_ep{}_disc.pt'.format(args.start_epoch - 1)),\n map_location=device\n ))\n\n return global_step, device, data_loaders, generator, discriminator, optimizers, losses", "title": "" }, { "docid": "da77082e8661e5b0114d26131bd453bc", "score": "0.5908308", "text": "def __init__(self, cnn_model, data_dir, dst_dir, model_name, mark, target_size=(224, 224)):\n self.cnn_model = cnn_model\n self.model_name = model_name\n self.mark = mark\n self.data_dir = data_dir\n self.save_dir = os.path.join(dst_dir, self.mark)\n self.composed_model = None\n self.transfer_learned = False\n self.train_dir = os.path.join(self.data_dir, \"train\")\n self.test_dir = os.path.join(self.data_dir, \"test\")\n self.val_dir = os.path.join(self.data_dir, \"val\")\n self.train_imgs = list(paths.list_images(self.train_dir))\n self.test_imgs = list(paths.list_images(self.test_dir))\n self.val_imgs = list(paths.list_images(self.val_dir))\n self.hists = []\n self.target_size = target_size\n self.scores = []\n self.times = []\n self.nb = len(list(os.listdir(self.train_dir)))\n self.epoches = []\n self.lrs = []\n self.freezing_layer_index = None\n if not os.path.exists(self.save_dir):\n os.makedirs(self.save_dir)\n print(\"create dir \", self.save_dir)\n print(\"load model\", model_name,\n \"target_size\", target_size,\n \"datadir\", data_dir,\n \"save_dir\", self.save_dir)\n self.set_composed_model()", "title": "" }, { "docid": "bef05a60f1ce0e33bf11683bd0703879", "score": "0.5906557", "text": "def on_epoch_begin(self, model):\n pass", "title": "" }, { "docid": "ba1feb03af82946b6b99ee8cc3d11525", "score": "0.5901343", "text": "def on_train_epoch_start(self, trainer, pl_module):\n trainer.train_dataloader.resample_labels()\n super().on_train_epoch_start(trainer, pl_module)", "title": "" }, { "docid": "1ab154bfe4c53a34d185f7d05dd4b248", "score": "0.5897976", "text": "def modelarts_pre_process():\n def unzip(zip_file, save_dir):\n import zipfile\n s_time = time.time()\n if not os.path.exists(os.path.join(save_dir, config.modelarts_dataset_unzip_name)):\n zip_isexist = zipfile.is_zipfile(zip_file)\n if zip_isexist:\n fz = zipfile.ZipFile(zip_file, 'r')\n data_num = len(fz.namelist())\n print(\"Extract Start...\")\n print(\"unzip file num: {}\".format(data_num))\n data_print = int(data_num / 100) if data_num > 100 else 1\n i = 0\n for file in fz.namelist():\n if i % data_print == 0:\n print(\"unzip percent: {}%\".format(int(i * 100 / data_num)), flush=True)\n i += 1\n fz.extract(file, save_dir)\n print(\"cost time: {}min:{}s.\".format(int((time.time() - s_time) / 60),\n int(int(time.time() - s_time) % 60)))\n print(\"Extract Done.\")\n else:\n print(\"This is not zip.\")\n else:\n print(\"Zip has been extracted.\")\n\n if config.need_modelarts_dataset_unzip:\n zip_file_1 = os.path.join(config.data_path, config.modelarts_dataset_unzip_name + \".zip\")\n save_dir_1 = os.path.join(config.data_path)\n\n sync_lock = \"/tmp/unzip_sync.lock\"\n\n # Each server contains 8 devices as most.\n if config.device_target == \"GPU\":\n init()\n device_id = get_rank()\n device_num = get_group_size()\n elif config.device_target == \"Ascend\":\n device_id = get_device_id()\n device_num = get_device_num()\n else:\n raise ValueError(\"Not support device_target.\")\n\n if device_id % min(device_num, 8) == 0 and not os.path.exists(sync_lock):\n print(\"Zip file path: \", zip_file_1)\n print(\"Unzip file save dir: \", save_dir_1)\n unzip(zip_file_1, save_dir_1)\n print(\"===Finish extract data synchronization===\")\n try:\n os.mknod(sync_lock)\n except IOError:\n pass\n\n while True:\n if os.path.exists(sync_lock):\n break\n time.sleep(1)\n\n print(\"Device: {}, Finish sync unzip data from {} to {}.\".format(device_id, zip_file_1, save_dir_1))\n\n config.ckpt_path = os.path.join(config.output_path, config.ckpt_path)", "title": "" }, { "docid": "c39bc838fe64492c50e98365a188c9f1", "score": "0.5894084", "text": "def train_model(model):\n # Add your code here\n\n #Preprocessing \n # Data augmentation - creation of more images to train on\n train_datagen = ImageDataGenerator(\n rescale = 1./255,\n shear_range=0.2,\n zoom_range=0.2,\n width_shift_range=0.2,\n\t\theight_shift_range=0.2,\n rotation_range=20,\n horizontal_flip=True)\n\n test_datagen = ImageDataGenerator(rescale = 1./255)\n\n training_set = train_datagen.flow_from_directory( #how can I check if I am actually making many more instances of pictures.\n '/Users/harryrodger/Desktop/data',\n target_size=(64,64),\n batch_size=32,\n class_mode='categorical'\n )\n\n test_set = test_datagen.flow_from_directory( #how can I check if I am actually making many more instances of pictures.\n '/Users/harryrodger/Desktop/ProjectCOMP309/ProjectCode/data/test',\n target_size=(64,64),\n batch_size=32,\n class_mode='categorical'\n )\n\n print('Data augmentation complete')\n\n model.fit_generator( \n training_set,\n steps_per_epoch=8000, \n epochs = 50,\n validation_data=test_set,\n validation_steps = 15) \n\n return model", "title": "" }, { "docid": "fe9b3efa9a7c37bc9e00105bf47a1071", "score": "0.58931035", "text": "def run_epoch(self):\n\n print(\"Training\")\n self.set_train()\n\n for batch_idx, inputs in enumerate(self.train_loader):\n\n before_op_time = time.time()\n\n outputs, losses = self.process_batch(inputs)\n\n self.model_optimizer.zero_grad()\n losses[\"loss\"].backward()\n self.model_optimizer.step()\n\n duration = time.time() - before_op_time\n\n # log less frequently after the first 2000 steps to save time & disk space\n # early_phase = batch_idx % self.opt.log_frequency == 0 #and self.step < 2000\n # late_phase = self.step % 2000 == 0\n\n if batch_idx % self.opt.log_frequency == 0:\n self.log_time(batch_idx, duration, losses[\"loss\"].cpu().data)\n self.log(\"train\", inputs, outputs, losses)\n self.val()\n\n self.step += 1", "title": "" }, { "docid": "c6d3b308306367e67749ba6631d7f6b2", "score": "0.5882096", "text": "def prepare_generator(self):\n self.train_ds = self.train_dataset.shuffle(self.shuffle_buffer).batch(\n self.train_batch_size\n ) # .repeat(epochs_to_repeat)\n self.valid_ds = self.valid_dataset.batch(self.valid_batch_size)\n self.test_ds = self.test_dataset.batch(self.test_batch_size)", "title": "" }, { "docid": "4741bbe3cd56c60769baddc8bd9f7f41", "score": "0.5880263", "text": "def on_train_end(self, model):\n pass", "title": "" }, { "docid": "d6f68c93c471315269222ccd819b5241", "score": "0.58765817", "text": "def _train_epoch(self, epoch):\n if epoch > 40: \n self.len_epoch = len(self.train_dataloader)\n\n if epoch > 60 and self.config[\"arch\"][\"args\"][\"rnn2\"]==True:\n assert self.dec_weight_loss == 1\n self.dec_weight_loss = 0.1\n\n if epoch >= self.start_BEloc_epoch:\n self.use_BE_localiser = True\n\n self.model.train()\n batch_time = AverageMeter(\"batch_time\")\n data_time = AverageMeter(\"data_time\")\n losses_kws = AverageMeter(\"losses_kws\")\n losses_dec = AverageMeter(\"losses_dec\")\n losses_loc = AverageMeter(\"losses_loc\")\n top1 = AverageMeter(\"top1\")\n end = time.time()\n \n pbar = tqdm(total=len(self.train_dataloader))\n for i, lstVwidx in enumerate(self.train_dataloader):\n count = []\n positives = 0\n for k in range(0,len(lstVwidx)):\n for l in lstVwidx[k][1]:\n if l != -1:\n positives +=1\n if l not in count:\n count.append(l)\n if len(count)>1: \n input, lens, widx, target, localization_mask,localization_mask_boundaries= transform_batch(lstVwidx, self.train_dataset.get_word_mask(),\n self.num_words, self.config)\n target = torch.from_numpy(target).cuda(async=True)\n input = torch.from_numpy(input).float().cuda(async=True)\n localization_mask = torch.from_numpy(localization_mask).float().cuda(async=True)\n widx = torch.from_numpy(widx).cuda(async=True)\n grapheme = []\n phoneme = []\n p_lens = []\n for w in widx:\n p_lens.append(len(self.train_dataset.get_GP(w)[0]))\n grapheme.append(self.train_dataset.get_GP(w)[0])\n phoneme.append(self.train_dataset.get_GP(w)[1])\n input_var = Variable(input)\n p_lens = np.asarray(p_lens)\n target_var = Variable(target.view(-1,1)).float()\n if self.g2p:\n graphemeTensor = Variable(self.train_dataset.grapheme2tensor_g2p(grapheme)).cuda()\n phonemeTensor = Variable(self.train_dataset.phoneme2tensor_g2p(phoneme)).cuda()\n preds = self.model(vis_feat_lens=lens, p_lengths=p_lens, phonemes=phonemeTensor[:-1].detach(),\n graphemes=graphemeTensor.detach(), vis_feats=input_var, use_BE_localiser\n =self.use_BE_localiser, epoch=epoch, config=self.config)\n tdec = phonemeTensor[1:]\n else:\n graphemeTensor = Variable(self.train_dataset.grapheme2tensor(grapheme)).cuda()\n phonemeTensor = Variable(self.train_dataset.phoneme2tensor(phoneme)).cuda()\n preds = self.model(vis_feat_lens=lens, p_lengths=p_lens, phonemes=phonemeTensor.detach(),\n graphemes=graphemeTensor[:-1].detach(), vis_feats=input_var, use_BE_localiser\n =self.use_BE_localiser, epoch=epoch, config=self.config) #changed vis_feat_lens from lens to p_lens\n tdec = graphemeTensor[1:]\n loss_dec = module_loss.nll_loss(preds[\"odec\"].view(preds[\"odec\"].size(0)*preds[\"odec\"].size(1),-1),\n tdec.view(tdec.size(0)*tdec.size(1)))\n loss_kws = self.BCE_loss(preds[\"max_logit\"], target_var)\n if self.loc_weight_loss:\n localization_mask = localization_mask*-1000000\n o_logits = localization_mask + preds[\"o_logits\"].squeeze(-1)\n max_localised = o_logits.max(1)[0]\n loss_loc = self.BCE_loss(max_localised.unsqueeze(1), target_var)\n loss_total = self.kws_weight_loss*loss_kws + self.dec_weight_loss*loss_dec + self.loc_weight_loss*loss_loc\n else: \n loss_total = self.kws_weight_loss*loss_kws+ self.dec_weight_loss*loss_dec\n loss_loc = loss_total\n PTrue = preds[\"keyword_prob\"]\n PFalseTrue = torch.cat((PTrue.add(-1).mul(-1),PTrue),1)\n prec1 = module_met.accuracy(PFalseTrue.data, target, topk=(1,))[0]\n losses_kws.update(loss_kws.item(), input.size(0))\n losses_dec.update(loss_dec.item(), input.size(0))\n losses_loc.update(loss_loc.item(), input.size(0))\n top1.update(prec1[0], input.size(0))\n self.optimizer.zero_grad()\n loss_total.backward()\n clip_grad_norm(self.model.parameters(), self.clip, 'inf') #this might not work\n self.optimizer.step()\n batch_time.update(time.time() - end)\n end = time.time()\n\n \n pbar.update(1)\n self.writer.set_step(epoch)\n self.writer.add_scalar(\"loss_kws\", losses_kws.avg)\n self.writer.add_scalar(\"loss_loc\", losses_loc.avg)\n self.writer.add_scalar(\"loss_dec\", losses_dec.avg)\n self.writer.add_scalar(\"acc\", top1.avg)\n print('Epoch: [{0}][{1}/{2}]\\t'\n 'Time {batch_time.val:.3f} ({batch_time.avg:.3f}) \\t'\n 'Data {data_time.val:.3f} ({data_time.avg:.3f}) \\t'\n 'Loss_kws {loss_kws.val:.4f} ({loss_kws.avg:.4f})\\t'\n 'Loss_loc{loss_loc.val:.4f} ({loss_loc.avg:.4f})\\t'\n 'Loss_dec {loss_dec.val:.4f} ({loss_dec.avg:.4f})\\t'\n 'Prec@1 {top1.val:.3f} ({top1.avg:.3f})'.format(\n epoch, i, len(self.train_dataloader), batch_time=batch_time, data_time= data_time,\n loss_kws=losses_kws, loss_loc=losses_loc, loss_dec=losses_dec, top1=top1))\n self.logger.info('Epoch: [{0}][{1}/{2}]\\t'\n 'Time {batch_time.val:.3f} ({batch_time.avg:.3f}) \\t'\n 'Data {data_time.val:.3f} ({data_time.avg:.3f}) \\t'\n 'Loss_kws {loss_kws.val:.4f} ({loss_kws.avg:.4f})\\t'\n 'Loss_loc {loss_loc.val:.4f} ({loss_loc.avg:.4f})\\t'\n 'Loss_dec {loss_dec.val:.4f} ({loss_dec.avg:.4f})\\t'\n 'Prec@1 {top1.val:.3f} ({top1.avg:.3f})'.format(\n epoch, i, len(self.train_dataloader), batch_time=batch_time, data_time= data_time,\n loss_kws=losses_kws, loss_loc=losses_loc, loss_dec=losses_dec, top1=top1))\n\n pbar.close()\n \n if self.do_validation:\n self._valid_epoch(epoch) \n\n if self.lr_scheduler is not None:\n self.lr_scheduler.step()", "title": "" }, { "docid": "c72f936a13636564ba9b525b40b91777", "score": "0.5874249", "text": "def __init__(self, params):\n self.logger = params['logger']\n self.reload = params['reload']\n self.metrics = params['metrics']\n self.shuffle = params['shuffle']\n self.verbose = params['verbose']\n self.n_epochs = params['n_epochs']\n self.eval_each = params['eval_each']\n self.batch_size = params['batch_size']\n self.eval_on_sets = params['eval_on_sets']\n self.epochs_for_save = params['epochs_for_save']\n self.eval_each_epochs = params['eval_each_epochs']\n self.parallel_loaders = params['parallel_loaders']\n self.data_augmentation = params['data_augmentation']\n self.sampling_save_mode = params['sampling_save_mode']\n self.homogeneous_batches = params['homogeneous_batches']\n self.start_eval_on_epoch = params['start_eval_on_epoch']\n self.outputs_ids_dataset = params['outputs_ids_dataset']\n\n # Preparing dict for training\n self.training_params = self.get_training_params(params)\n\n ## If reload option is used try first to look for an existing model\n # before creating one\n if self.reload:\n self.model = model_zoo.load_or_create_model(params)\n else:\n self.model = model_zoo.create_model(params)\n\n self.dataset = build_dataset(params)\n\n ########### Callbacks\n self.buildCallbacks(params, self.model, self.dataset)\n ###########\n\n ########### Training\n self.train()\n\n ###########", "title": "" }, { "docid": "65e3b103002061a0d1293d4a791f745d", "score": "0.58717483", "text": "def _prepare_for_training(self, batch_size, log_training):\n\n self.reset()\n\n if self.X_train is None:\n raise TypeError(\"Training data cannot be empty!\")\n\n y_input = tf.placeholder(tf.float32, shape=(None, self.classes_num))\n\n with tf.variable_scope('cnn'):\n self.inference = self._inference()\n\n self.logits = self.inference\n\n with tf.name_scope('training'):\n loss = tf.reduce_mean(tf.losses.softmax_cross_entropy(onehot_labels=y_input, logits=self.logits))\n optimize = tf.train.AdamOptimizer().minimize(loss)\n\n self.sess.run([tf.global_variables_initializer(), tf.local_variables_initializer()])\n\n self.saver = tf.train.Saver()\n\n if log_training:\n self.train_writer = tf.summary.FileWriter('logs/', self.sess.graph)\n\n # I MIGHT clean this up someday, but this day has yet to come\n # Prepare tensors and dataset for training data\n self.y_train_tensor = tf.constant(self.y_train)\n self.X_train_tensor = tf.constant(self.X_train)\n\n self.tr_data = tf.data.Dataset.from_tensor_slices((self.X_train_tensor, self.y_train_tensor))\n self.tr_data = self.tr_data.map(self._input_parser, num_parallel_calls=4)\n self.tr_data = self.tr_data.batch(batch_size)\n self.tr_data = self.tr_data.prefetch(buffer_size=2 * batch_size)\n\n self.training_iterator = tf.data.Iterator.from_structure(self.tr_data.output_types,\n self.tr_data.output_shapes)\n self.next_training_batch = self.training_iterator.get_next()\n self.training_init_op = self.training_iterator.make_initializer(self.tr_data)\n\n if self.X_test is not None:\n # Prepare tensors and dataset for test data\n self.y_test_tensor = tf.constant(self.y_test)\n self.X_test_tensor = tf.constant(self.X_test)\n\n self.val_data = tf.data.Dataset.from_tensor_slices((self.X_test_tensor, self.y_test_tensor))\n self.val_data = self.val_data.map(self._input_parser, num_parallel_calls=4)\n self.val_data = self.val_data.batch(batch_size)\n self.val_data = self.val_data.prefetch(buffer_size=2 * batch_size)\n\n self.test_iterator = tf.data.Iterator.from_structure(self.val_data.output_types,\n self.val_data.output_shapes)\n self.next_test_batch = self.test_iterator.get_next()\n self.test_init_op = self.test_iterator.make_initializer(self.val_data)\n\n return y_input, loss, optimize", "title": "" }, { "docid": "d11a057e1c4ae5b17be414e4fb9e0b08", "score": "0.5869519", "text": "def on_train_begin(self, logs=None):", "title": "" }, { "docid": "6a95351de5990a5e5e51c3e92c52657e", "score": "0.58661246", "text": "def _train_epoch(data_loader, model, criterion, optimizer):\n for i, (X, y) in enumerate(data_loader):\n # clear parameter gradients\n optimizer.zero_grad()\n\n # forward + backward + optimize\n output = model(X)\n loss = criterion(output, y)\n loss.backward()\n optimizer.step()", "title": "" }, { "docid": "d29bd4382c0e1d803783dc30b8b519bb", "score": "0.58621556", "text": "def start_training(self):\n # 加载训练数据 并进行数据扩增\n # load training data & do data augmentation\n transform_train = self._get_transforms(train_mode=True)\n transform_test = self._get_transforms(train_mode=False)\n\n # 得到可用于torch的DataLoader\n # get data loader\n train_loader = self._get_dataloader(transform_train, train_mode=True)\n test_loader = self._get_dataloader(transform_test, train_mode=False)\n\n # 开始训练\n # start training network\n self.logger.info(\" ======= Training =======\\n\")\n\n for epoch in range(self.last_epoch + 1, self.config.epochs):\n self.epoch = epoch\n\n # train one epoch\n train_loss, _ = self.train_step(train_loader)\n # validate network\n if epoch == 0 or (epoch + 1) % self.config.eval_freq == 0 or epoch == self.config.epochs - 1:\n self.test(test_loader)\n\n # 更新学习率lr\n # adjust learning rate\n if self.lr_scheduler:\n if self.config.lr_scheduler.type == 'ADAPTIVE':\n if self.config.lr_scheduler.mode == 'max':\n self.lr_scheduler.step(self.best_prec, epoch)\n elif self.config.lr_scheduler.mode == 'min' and train_loss is not None:\n self.lr_scheduler.step(train_loss, epoch)\n else:\n self.lr_scheduler.step(epoch)\n\n self.logger.info(f\"======== Training Finished. best_test_acc: {self.best_prec:.3%} ========\")", "title": "" }, { "docid": "2e8cb8d9131959a42a11795b863ede89", "score": "0.58590937", "text": "def _train_epoch(self, epoch):\n self.model.train()\n self.train_metrics.reset()\n \n sum_ = 0\n right_ = 0\n for batch_idx, (data, target) in enumerate(tqdm(self.data_loader)):\n data = data.to(self.device)\n target = target.to(self.device)\n self.optimizer.zero_grad()\n _,output = self.model(data)\n # Compute accuracy\n pred_label = torch.argmax(output, dim=1)\n right_ += torch.sum((pred_label == target).float()).cpu().item()\n sum_+=data.shape[0]\n # 点云的分类损失\n loss_cls = F.cross_entropy(output,target)\n loss = loss_cls\n loss.backward()\n self.optimizer.step()\n # log\n if batch_idx > 0 and batch_idx % self.log_step == 0:\n # 写入当前step\n step = (epoch - 1) * self.len_epoch + batch_idx\n self.writer.set_step(step)\n # 写入损失曲线\n if type(loss_cls) == type(loss): self.train_metrics.update('loss_cls', loss_cls.item())\n self.train_metrics.update('loss', loss.item())\n self.train_metrics.update('acc', right_/sum_)\n # 控制台log\n self.logger.debug('Train Epoch: {} {} Loss: {:.6f} ACC : {:.6f}'.format(\n epoch,\n self._progress(batch_idx),\n loss.item(),\n right_/sum_))\n if batch_idx == self.len_epoch:\n break\n log = self.train_metrics.result()\n # self.do_validation = False\n if self.do_validation and epoch%self.config['trainer']['save_period'] == 0:\n val_log = self._valid_epoch(epoch)\n log.update(**{'val_'+k : v for k, v in val_log.items()})\n\n if self.lr_scheduler is not None:\n self.lr_scheduler.step()\n return log", "title": "" }, { "docid": "e7dd935ea830972926efa9a5eaae0dea", "score": "0.585067", "text": "def preprocessing(self):\n print_verbose(\"\\n\\n####PreProcess####\\n\")\n # prune parts without annotations\n self.train.prune_empty_parts()\n\n # prepare features\n print_verbose(\"##PreparePipeline##\")\n self.pipeline.execute(self.train)\n self.pipeline.serialize(self.train, to_file=self.debug_file)\n\n # labeling\n print_verbose(\"##Labeling##\")\n self.labeler.label(self.train)\n\n print_verbose(len(self.train.documents), \"documents are prepared for training dataset.\")", "title": "" }, { "docid": "502293aeb55bb3e646fb3f8f643877cb", "score": "0.5850565", "text": "def __init__(self, gradient_checkpointing=False, latent_size=768):\n super(EncoderNewsVAE, self).__init__()\n\n self.model = VAE_Encoder_RobertaModel.from_pretrained(\"roberta-base\",\n add_pooling_layer=False,\n return_dict=True,\n gradient_checkpointing=gradient_checkpointing)\n\n # Add a fresh pooling layer\n self.model.pooler = PoolerEncoderNewsVAE(hidden_size=self.model.config.hidden_size,\n latent_size=latent_size)\n self.model.pooler.dense.weight.data.normal_(mean=0.0, std=self.model.config.initializer_range)", "title": "" }, { "docid": "9b3e2fed7b5cf807f91b90a6718fa60a", "score": "0.5847083", "text": "def train_model(data_train, data_valid, num_epochs, latent_dimension, lr_enc, lr_dec, KLD_alpha, sample_num, encoding_alphabet):\n\n print('num_epochs: ',num_epochs)\n\n # initialize an instance of the model\n optimizer_encoder = torch.optim.Adam(model_encode.parameters(), lr=lr_enc)\n optimizer_decoder = torch.optim.Adam(model_decode.parameters(), lr=lr_dec)\n\n data_train = data_train.clone().detach()\n data_train=data_train.to(device)\n\n #print(data)\n quality_valid_list=[0,0,0,0];\n for epoch in range(num_epochs):\n x = [i for i in range(len(data_train))] # random shuffle input\n shuffle(x)\n\n data_train = data_train[x]\n start = time.time()\n for batch_iteration in range(num_batches_train): # batch iterator\n\n loss, recon_loss, kld = 0., 0., 0.\n\n # manual batch iterations\n current_smiles_start, current_smiles_stop = batch_iteration * batch_size, (batch_iteration + 1) * batch_size\n inp_smile_hot = data_train[current_smiles_start : current_smiles_stop]\n\n # reshaping for efficient parallelization\n inp_smile_encode = inp_smile_hot.reshape(inp_smile_hot.shape[0], inp_smile_hot.shape[1] * inp_smile_hot.shape[2])\n latent_points, mus, log_vars = model_encode(inp_smile_encode)\n latent_points = latent_points.reshape(1, batch_size, latent_points.shape[1])\n\n # standard Kullback–Leibler divergence\n kld += -0.5 * torch.mean(1. + log_vars - mus.pow(2) - log_vars.exp())\n\n # initialization hidden internal state of RNN (RNN has two inputs and two outputs:)\n # input: latent space & hidden state\n # output: onehot encoding of one character of molecule & hidden state\n # the hidden state acts as the internal memory\n hidden = model_decode.init_hidden(batch_size = batch_size)\n\n # decoding from RNN N times, where N is the length of the largest molecule (all molecules are padded)\n decoded_one_hot = torch.zeros(batch_size, inp_smile_hot.shape[1], inp_smile_hot.shape[2]).to(device)\n for seq_index in range(inp_smile_hot.shape[1]):\n decoded_one_hot_line, hidden = model_decode(latent_points, hidden)\n decoded_one_hot[:, seq_index, :] = decoded_one_hot_line[0]\n\n\n decoded_one_hot = decoded_one_hot.reshape(batch_size * inp_smile_hot.shape[1], inp_smile_hot.shape[2])\n _, label_atoms = inp_smile_hot.max(2)\n label_atoms = label_atoms.reshape(batch_size * inp_smile_hot.shape[1])\n\n # we use cross entropy of expected symbols and decoded one-hot\n criterion = torch.nn.CrossEntropyLoss()\n recon_loss += criterion(decoded_one_hot, label_atoms)\n\n loss += recon_loss + KLD_alpha * kld\n\n # perform back propogation\n optimizer_encoder.zero_grad()\n optimizer_decoder.zero_grad()\n loss.backward(retain_graph=True)\n nn.utils.clip_grad_norm_(model_decode.parameters(), 0.5)\n optimizer_encoder.step()\n optimizer_decoder.step()\n\n if batch_iteration % 30 == 0:\n end = time.time()\n\n # assess reconstruction quality\n _, decoded_max_indices = decoded_one_hot.max(1)\n _, input_max_indices = inp_smile_hot.reshape(batch_size * inp_smile_hot.shape[1], inp_smile_hot.shape[2]).max(1)\n\n differences = 1. - torch.abs(decoded_max_indices - input_max_indices)\n differences = torch.clamp(differences, min = 0., max = 1.).double()\n quality = 100. * torch.mean(differences)\n quality = quality.detach().cpu().numpy()\n\n qualityValid=quality_in_validation_set(data_valid)\n\n new_line = 'Epoch: %d, Batch: %d / %d,\\t(loss: %.4f\\t| quality: %.4f | quality_valid: %.4f)\\tELAPSED TIME: %.5f' % (epoch, batch_iteration, num_batches_train, loss.item(), quality, qualityValid, end - start)\n print(new_line)\n start = time.time()\n\n\n\n qualityValid = quality_in_validation_set(data_valid)\n quality_valid_list.append(qualityValid)\n\n # only measure validity of reconstruction improved\n quality_increase = len(quality_valid_list) - np.argmax(quality_valid_list)\n if quality_increase == 1 and quality_valid_list[-1] > 50.:\n corr, unique = latent_space_quality(latent_dimension,sample_num = sample_num, encoding_alphabet=encoding_alphabet)\n else:\n corr, unique = -1., -1.\n\n new_line = 'Validity: %.5f %% | Diversity: %.5f %% | Reconstruction: %.5f %%' % (corr * 100. / sample_num, unique * 100. / sample_num, qualityValid)\n\n print(new_line)\n with open('results.dat', 'a') as content:\n content.write(new_line + '\\n')\n\n if quality_valid_list[-1] < 70. and epoch > 200:\n break\n\n if quality_increase > 20:\n print('Early stopping criteria')\n break", "title": "" }, { "docid": "6f37f0873d30457da9c022c5dd53580e", "score": "0.5844625", "text": "def run_epoch(self):\n print(\"Training\")\n self.set_train()\n for batch_idx, inputs in enumerate(self.train_loader):\n before_op_time = time.time()\n outputs, losses = self.process_batch(inputs)\n self.model_optimizer.zero_grad()\n losses[\"loss\"].backward()\n self.model_optimizer.step()\n duration = time.time() - before_op_time\n # log less frequently after the first 2000 steps to save time & disk space\n # early_phase = batch_idx % self.opt.log_frequency == 0 #and self.step < 2000\n # late_phase = self.step % 2000 == 0\n if batch_idx % self.opt.log_frequency == 0:\n self.log_time(batch_idx, duration, losses[\"loss\"].cpu().data)\n self.log(\"train\", inputs, outputs, losses)\n self.val()\n self.step += 1", "title": "" }, { "docid": "38d2c5be600b6d61f43fb7e6b0e39b7a", "score": "0.5837761", "text": "def _on_training_end(self) -> None:\n pass", "title": "" }, { "docid": "d13376a2ac0b7b2f3bb755a9d50c5361", "score": "0.58325905", "text": "def main(self):\n if self.data_loader.directories.make_dirs:\n print(\"+++++ START RUN | saved files in {} +++++\".format(\\\n self.data_loader.directories.result_dir_no_prefix))\n else:\n print(\"+++++ START RUN +++++ | no save mode\")\n self._save_model_params_to_file()\n training = Training(self)\n testing = Testing(self)\n start = self.epoch if self.epoch else 1\n for epoch in range(start, self.epochs+1):\n epoch_watch = time.time()\n epoch_metrics = EpochMetrics()\n training.train(epoch_metrics)\n train_loss = self._save_train_metrics(epoch, epoch_metrics)\n print(\"====> Epoch: {} train set loss avg: {:.4f}\".format(epoch, train_loss))\n testing.test(epoch, epoch_metrics)\n test_loss = self._save_test_metrics(epoch_metrics)\n print(\"====> Test set loss avg: {:.4f}\".format(test_loss))\n self._sample(epoch)\n if self.lr_scheduler:\n self.lr_scheduler.step()\n if self.save_model_state:\n self.epoch = epoch+1 # signifying to continue from epoch+1 on.\n torch.save(self, self.data_loader.directories.result_dir + \"/model_state.pt\")\n print(\"{:.2f} seconds for epoch {}\".format(time.time() - epoch_watch, epoch))\n if self.data_loader.directories.make_dirs:\n self._save_final_model()\n print(\"+++++ RUN IS FINISHED +++++\")", "title": "" }, { "docid": "599fedab7c14a441ff09c3e457382c54", "score": "0.58261883", "text": "def train(self):\n\t\tfor epoch in range(self.current_epoch, self.config.max_epoch):\n\t\t\tself.current_epoch = epoch\n\t\t\tself.train_one_epoch()\n\t\tself.current_epoch = self.config.max_epoch\n\n\t\treturn", "title": "" }, { "docid": "09d4bcb48b8ce93ec1585a9a264cd47e", "score": "0.58256704", "text": "def on_epoch_end(self):\n if self.shuffle:\n np.random.shuffle(self.train_indices)", "title": "" }, { "docid": "71e6ccaf184a0e11175e8c89f61e3700", "score": "0.5821108", "text": "def train_one_epoch(self):\n raise NotImplementedError", "title": "" }, { "docid": "353b383ce93625f88cc6d09cd6253d87", "score": "0.5821049", "text": "def _mini_train_step(self):\n raise NotImplementedError", "title": "" }, { "docid": "d5d483db76ced009eafdb2483b6d55d0", "score": "0.5817916", "text": "def train(self):\n pass", "title": "" }, { "docid": "d5d483db76ced009eafdb2483b6d55d0", "score": "0.5817916", "text": "def train(self):\n pass", "title": "" }, { "docid": "d5d483db76ced009eafdb2483b6d55d0", "score": "0.5817916", "text": "def train(self):\n pass", "title": "" }, { "docid": "d5d483db76ced009eafdb2483b6d55d0", "score": "0.5817916", "text": "def train(self):\n pass", "title": "" }, { "docid": "b445c78d591fe23619d33cebdffba76f", "score": "0.5816419", "text": "def on_epoch_begin(self, epoch, logs=None):", "title": "" }, { "docid": "ec6164d0f1c1be3d8ce5ee6e1a11b1c3", "score": "0.58155805", "text": "def train_epoch(self):\n raise NotImplementedError", "title": "" }, { "docid": "122f1718a32077334dcbf66ac3001652", "score": "0.58142114", "text": "def _train_epoch(self, epoch):\n self.model.train()\n self.train_metrics.reset()\n self.model.zero_grad()\n acc_loss, acc_output, acc_target = 0.0, None, None\n for batch_idx, (data, token_ids, attn_mask, target, mlm_target) in enumerate(self.data_loader):\n data, token_ids, attn_mask, target, mlm_target = data.to(self.device), token_ids.to(self.device), attn_mask.to(self.device), target.to(self.device), mlm_target.to(self.device)\n\n output = self.model(data, token_ids, attn_mask)\n mlm_output = output[1]\n output = output[0]\n\n if self.config.config.get('pos_neg_ratio'):\n loss_binary = self.criterion(output, target, self.config['pos_neg_ratio'])\n else:\n loss_binary = self.criterion(output, target)\n\n if self.config['n_gpu'] > 1:\n mlm_loss = cross_entropy_loss(mlm_output.view(-1, self.model.module.config.vocab_size), mlm_target.view(-1))\n else:\n mlm_loss = cross_entropy_loss(mlm_output.view(-1, self.model.config.vocab_size), mlm_target.view(-1))\n \n loss = loss_binary + mlm_loss\n\n if self.config['trainer']['accumulation_steps'] > 1:\n loss = loss / self.config['trainer']['accumulation_steps']\n loss.backward()\n\n acc_loss += loss.item()\n if isinstance(acc_output, torch.Tensor) and isinstance(acc_target, torch.Tensor):\n acc_output = torch.cat([acc_output, output], dim=0)\n acc_target = torch.cat([acc_target, target], dim=0)\n else:\n acc_output = output\n acc_target = target\n\n if (batch_idx + 1) % self.config['trainer']['accumulation_steps'] == 0:\n\n global_batch_index = int(batch_idx / self.config['trainer']['accumulation_steps'])\n\n if self.config['clip_grad']:\n torch.nn.utils.clip_grad_norm_(self.model.parameters(), self.config['clip_grad'])\n self.optimizer.step()\n\n if self.lr_scheduler is not None:\n if self.config['lr_scheduler']['step_every_batch']:\n if type(self.lr_scheduler) is torch.optim.lr_scheduler.ReduceLROnPlateau:\n self.lr_scheduler.step(val_log['loss'])\n else:\n self.lr_scheduler.step()\n\n output = acc_output.sigmoid()\n\n self.model.zero_grad()\n\n\n self.writer.set_step((epoch - 1) * self.len_epoch + global_batch_index)\n self.train_metrics.update('loss', acc_loss)\n for met in self.metric_ftns:\n self.train_metrics.update(met.__name__, met(output, acc_target))\n\n\n\n if global_batch_index % self.log_step == 0:\n self.logger.debug('Train Epoch: {} {} Loss: {:.6f}'.format(\n epoch,\n self._progress(global_batch_index),\n acc_loss))\n #self.writer.add_image('input', make_grid(data.cpu(), nrow=8, normalize=True))\n\n acc_loss, acc_output, acc_target = 0.0, None, None\n\n if global_batch_index == self.len_epoch:\n break\n log = self.train_metrics.result()\n log_total = self.train_metrics.total()\n\n precision, recall, f1 = calculate_prec_rec_f1(log_total)\n\n if self.do_validation:\n val_log, val_log_total = self._valid_epoch(epoch)\n log.update(**{'val_'+k : v for k, v in val_log.items()})\n val_precision, val_recall, val_f1 = calculate_prec_rec_f1(val_log_total)\n\n if self.lr_scheduler is not None:\n if not self.config['lr_scheduler']['step_every_batch']:\n if type(self.lr_scheduler) is torch.optim.lr_scheduler.ReduceLROnPlateau:\n self.lr_scheduler.step(val_log['loss'])\n else:\n self.lr_scheduler.step()\n\n\n if self.do_validation:\n additional_log = {\"tp\": log_total['tp'], \"fp\": log_total['fp'], \"tn\": log_total['tn'],\n \"fn\": log_total['fn'], \"precision\": precision, \"recall\": recall, \"f1\": f1,\n \"val_tp\": val_log_total['tp'], \"val_fp\": val_log_total['fp'],\n \"val_tn\": val_log_total['tn'], \"val_fn\": val_log_total['fn'],\n \"val_precision\": val_precision, \"val_recall\": val_recall, \"val_f1\": val_f1}\n else:\n additional_log = {\"tp\": log_total['tp'], \"fp\": log_total['fp'], \"tn\": log_total['tn'],\n \"fn\": log_total['fn'], \"precision\": precision, \"recall\": recall, \"f1\": f1}\n log.update(additional_log)\n return log", "title": "" }, { "docid": "fb703e4180e27f39aa4059848a2d0749", "score": "0.5808232", "text": "def preprocess(self):\n\n log.logger.info(\"Starting preprocessing...\")\n\n preprocessed = lambda G: without_low_degree_nodes(\n without_selfloops(G), minimum=self.config['min_degree'])\n\n if self.test:\n networks = [preprocessed(G) for G in (self.training, self.test)]\n self.training, self.test = without_uncommon_nodes(networks)\n else: # Only a training network\n self.training = preprocessed(self.training)\n\n log.logger.info(\"Finished preprocessing.\")", "title": "" } ]
fff3bec00f214119f70bab2f9b98afd1
Contrast stretching by break point (number provided by Rick Kohrs)
[ { "docid": "936971accc3d801e92a7f02eea46286e", "score": "0.0", "text": "def breakpoint_stretch(C, breakpoint):\n lower = normalize(C, 0, 10) # Low end\n upper = normalize(C, 10, 255) # High end\n\n # Combine the two datasets\n # This works because if upper=1 and lower==.7, then\n # that means the upper value was out of range and the\n # value for the lower pass was used instead.\n combined = np.minimum(lower, upper)\n\n return combined", "title": "" } ]
[ { "docid": "ca537003831770effb6236db0cfa5019", "score": "0.63466364", "text": "def adj_contrast(src):\n clahe = cv2.createCLAHE(clipLimit=10.0, tileGridSize=(2, 2))\n return clahe.apply(src)", "title": "" }, { "docid": "b7df05c15d94e7d7f048f8f1c1ef6835", "score": "0.6288289", "text": "def contrast_stretching(img):\n p2, p98 = np.percentile(img, (2, 98))\n img_con = exposure.rescale_intensity(img, in_range=(p2, p98))\n logging.info('Contrast stretching performed!')\n return img_con", "title": "" }, { "docid": "3a266fa2d0a650fb740080ce11012061", "score": "0.6095392", "text": "def contrast_stretch(im):\n in_min = np.percentile(im, 5)\n in_max = np.percentile(im, 95)\n\n out_min = 0.0\n out_max = 255.0\n\n out = im - in_min\n out *= ((out_min - out_max) / (in_min - in_max))\n out += in_min\n\n return out", "title": "" }, { "docid": "6d9fafd5d7cd7dd0b8ed324f0bfd84ca", "score": "0.60911137", "text": "def contrast_stretch(im):\n in_min = np.percentile(im, 5)\n in_max = np.percentile(im, 95)\n\n out_min = 0.0\n out_max = 255.0\n\n out = im - in_min\n out *= ((out_min - out_max) / (in_min - in_max))\n out += in_min\n\n return out", "title": "" }, { "docid": "f548c548aa7c76bb88ad496f564540d2", "score": "0.6065221", "text": "def snellen_contrast_game(start_contrast=32, size=10.0, timer=15, p=7./9, exam=False):\n\n contrast_step = start_contrast/2\n contrast = start_contrast\n\n fig, ax = _setup_figure()\n\n tstart = time.time()\n\n _passed = True\n record = []\n while True:\n record.append(contrast)\n\n if exam:\n precision = float(contrast_step)/contrast\n if precision < exam or contrast_step == 1:\n print ('\\nContrast threshold measured to be {}/256 to a precision of {:.2g}%.'\n ''.format(contrast, 100*precision))\n break\n elif (time.time() - tstart)/60. > timer:\n print \"\\nOut of time.\"\n break\n\n ax.cla()\n n = _get_n(size, fig)\n brighter = contrast/2\n darker = contrast - brighter\n color = [(256/2 - brighter)/256.0]*3 + [1.0]\n bgcolor = [(256/2 + darker)/256.0]*3 + [1.0]\n ax.set_axis_bgcolor(bgcolor)\n\n print 'contrast = {}/256'.format(contrast)\n passed = _draw_test(fig, ax, size, color=color, bgcolor=bgcolor, exam=exam, p=p)\n\n _contrast = contrast\n istep = round(contrast_step)\n\n contrast = contrast - istep if passed else contrast + istep\n if contrast > 256: contrast = 256\n if contrast < 1: contrast = 1\n contrast_step = contrast_step*_step_grow if passed == _passed else contrast_step*_step_shrink\n if contrast_step < 1: contrast_step = 1\n\n\n if passed and _passed and _contrast == 1:\n print \"Smallest posible contrast reached. Reducing text size instead and resetting start contrast.\"\n contrast = start_contrast\n contrast_step = 256/2\n size *= 0.8\n\n _passed = passed\n\n n = 10\n avg = sum(record[-n:])/float(n)\n print '\\nAverage of last {} measurements = {:.3g}'.format(n, avg)\n\n _plt.close(fig)", "title": "" }, { "docid": "31d02b8838c40fc9a64286a57501e876", "score": "0.6047526", "text": "def adjust_contrast(img, contrast_level):\n assert (contrast_level >= 0.0), \"contrast_level too low.\"\n assert (contrast_level <= 1.0), \"contrast_level too high.\"\n return (1 - contrast_level) / 2.0 + img.dot(contrast_level)", "title": "" }, { "docid": "409fc40abc8dcaddbaf85a5e429d4862", "score": "0.5950373", "text": "def imLinescanPrep(im):\n #14.01.02-21.11: created function to maximize contrast of linescan previews\n #debug(\"auto-contrasting the PIL way.\")\n im=ImageOps.autocontrast(im)\n return im", "title": "" }, { "docid": "93c3f546d8f3ea1f8601bbdf82c84f60", "score": "0.59426296", "text": "def adjustContrast(frame, clipLimit=2.0, tileGridSize=(8,8)):\r\n # clahe = cv.createCLAHE(clipLimit=2.0, tileGridSize=(8,8))\r\n clahe = cv.createCLAHE(clipLimit=clipLimit, tileGridSize=tileGridSize)\r\n return clahe.apply(frame)", "title": "" }, { "docid": "185c5a64a2ef6e0b1ca1bb9b9ecdedf6", "score": "0.5888752", "text": "def auto_contrast(image):\r\n hist = histogram(image)\r\n p5 = shade_at_percentile(hist, .01)\r\n p95 = shade_at_percentile(hist, .99)\r\n a = 255.0 / (p95 + p5)\r\n b = -1.0 * a * p5\r\n\r\n result = (image.astype(float) * a) + b\r\n result = result.clip(0, 255.0)\r\n\r\n return image", "title": "" }, { "docid": "9ca672f7f85ad4252bc686eb76ac1f49", "score": "0.5885132", "text": "def contrast(self, img, scale):\n sp_img = self.resize(img, scale)\n sum_contrast = 0\n for x in range(sp_img.shape[0]):\n for y in range(sp_img.shape[1]):\n px = sp_img[x, y]\n l = 0.0\n d = 0\n if x > 0:\n l += abs(px - sp_img[x - 1][y])\n d += 1\n if y > 0:\n l += abs(px - sp_img[x][y - 1])\n d += 1\n if x < sp_img.shape[0] - 1:\n l += abs(px - sp_img[x + 1][y])\n d += 1\n if y < sp_img.shape[1] - 1:\n l += abs(px - sp_img[x][y + 1])\n d += 1\n local_contrast = l / d\n sum_contrast += local_contrast\n avg_contrast = sum_contrast / (sp_img.shape[0] * sp_img.shape[1])\n return avg_contrast", "title": "" }, { "docid": "02a894d2d6f6a14cf5e708648fd9a228", "score": "0.58796114", "text": "def contrast(img, w_c=1):\n gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)\n return np.absolute(cv2.Laplacian(gray, cv2.CV_32F)) ** w_c + 1", "title": "" }, { "docid": "019e7f9e856749680ebd12331b8d346d", "score": "0.5806571", "text": "def increase_contrast(bgr_img):\n # Convert image to LAB color space\n lab_img = cv2.cvtColor(bgr_img, cv2.COLOR_BGR2LAB)\n\n # Split the 3 channels\n l, a, b = cv2.split(lab_img)\n\n # Apply CLAHE to the L-channel\n clahe = cv2.createCLAHE(clipLimit=2.0, tileGridSize=(8, 8))\n cl = clahe.apply(l)\n\n # Merge the CLAHE enhanced L-channel with the other channels\n l_img = cv2.merge((cl, a, b))\n\n final = cv2.cvtColor(l_img, cv2.COLOR_LAB2BGR)\n return final", "title": "" }, { "docid": "25b022816d41e6b56e184287a6e10ca7", "score": "0.578645", "text": "def adjust_contrast(colordef, contrast, format, verbose):\n # check stdin for more colors\n if len(colordef) < 2 and not sys.stdin.isatty():\n stdincolors = from_stdin()\n colordef = (list(colordef) + list(stdincolors))[:2]\n\n if len(colordef) == 0:\n click.echo(\"At least one color definition required!\")\n sys.exit(1)\n\n c1 = repacolors.Color(colordef[0])\n # still not enough colors, fall back to black/white\n if len(colordef) < 2:\n # using #757575 as middle point\n c2 = repacolors.Color(\"#fff\") if c1.luminance < .178 else repacolors.Color(\"#000\")\n else:\n c2 = repacolors.Color(colordef[1])\n\n adjc1, adjc2 = c1.adjust_contrast(c2, contrast)\n\n if not verbose or not sys.stdout.isatty():\n adjc1.print(format)\n adjc2.print(format)\n\n else:\n if c1 == adjc1 and c2 == adjc2:\n print(f\"Contrast was already OK! ({c1.contrast_ratio(c2)} > {contrast})\")\n print(f\"{adjc2.termfg}{adjc1.termbg} {adjc1.lhex} \")\n print(f\"{adjc1.termfg}{adjc2.termbg} {adjc2.lhex} \")\n else:\n print(f\"Colors adjusted. ({c1.contrast_ratio(c2):.4f} => {adjc1.contrast_ratio(adjc2):.4f})\")\n print(f\"{c2.termfg}{c1.termbg} {c1.lhex} {c2.termreset} => {adjc2.termfg}{adjc1.termbg} {adjc1.lhex} \")\n print(f\"{c1.termfg}{c2.termbg} {c2.lhex} {c1.termreset} => {adjc1.termfg}{adjc2.termbg} {adjc2.lhex} \")\n\n print(c1.termreset)\n adjc1.print(format)\n adjc2.print(format)", "title": "" }, { "docid": "93d0f3cb54b8eb883531db1e836e9d30", "score": "0.5785027", "text": "def _difference_score(self,foreground255,background255):\r\n penalty=0\r\n bd=self._brightness_difference(foreground255,background255)-125\r\n if bd<0: penalty+=100\r\n hd=self._hue_difference(foreground255,background255)-500\r\n if hd<0: penalty+=100\r\n if hd<-400: penalty+=500\r\n return bd+hd-penalty", "title": "" }, { "docid": "549089e74fb4599aaea85542fa1d08eb", "score": "0.57171017", "text": "def kontrast_image(self):\n #We read input image as numpy array\n img = imageio.imread(self.kontrast_image_path)\n #We map it to 0-1 color space\n img = img.astype(float) / 255.0\n\n #We get the laplace of the image for each channel\n laplace = np.zeros((img.shape[0], img.shape[1], 3))\n if img.ndim == 3:\n for i in range(3):\n laplace[:, :, i] = ndimage.laplace(img[:, :, i])\n else:\n for i in range(3):\n laplace[:, :, i] = ndimage.laplace(img)\n\n #We read number of iterations specified by the user\n if self.iterationKontrast.text() != \"\":\n iterations = int(self.iterationKontrast.text())\n else:\n iterations = 1\n\n #We ead contrast value specified by the user\n if self.kontrastValue.text() != \"\":\n kontrastValue = int(self.kontrastValue.text())\n else:\n kontrastValue = 1\n\n #We get the number of channels for the image\n channels = 0\n if img.ndim == 3:\n channels = 3\n elif img.ndim == 2:\n channels = 1\n img_ = np.zeros((img.shape[0], img.shape[1], channels))\n img_[:, :, 0] = img\n img = img_\n\n #We loop trough all channels of the image, solve poisson and assemble image\n for i in range(channels):\n ig = poi.poisson(img[:, :, i], iterations, mode='kf', laplace=laplace[:, :, i], k=kontrastValue) # We Sovle poisson by specifying laplacen of the channel and contrast values as parameters\n img[:, :, i] = ig[:, :, -1]\n\n #We map image back to 0-255 color space\n prep_img = img * 255\n prep_img = prep_img.astype(np.uint8)\n self.kontrast_img = prep_img\n\n #We display iamge to user as output image\n if prep_img.shape[2] == 1:\n self.MplWidget_con.canvas.ax.imshow(prep_img[:, :, 0].copy() / 255, plt.cm.gray)\n elif prep_img.shape[2] == 3:\n self.MplWidget_con.canvas.ax.imshow(prep_img)\n\n self.MplWidget_con.canvas.draw()", "title": "" }, { "docid": "951b2138140b6d691b2b63b0cb72d853", "score": "0.57045615", "text": "def calculate_colour(c):\n c = c / 255.0\n if c <= 0.03928:\n c = c / 12.92\n else:\n c = math.pow((c + 0.055) / 1.055, 2.4)\n return c", "title": "" }, { "docid": "04bf8e0710676997530c7b465d7f191a", "score": "0.5688644", "text": "def brightness(table):\n value = random.uniform(0.8, 1.2)\n contrast = ImageEnhance.Brightness(table)\n contrast = contrast.enhance(value)\n return contrast", "title": "" }, { "docid": "b5f420fbe47b76f60c9a000567b71559", "score": "0.56657064", "text": "def Improve_Contrast(Image, Min, Max):\r\n Image[Image > Max] = Max\r\n Image[Image < Min] = Min\r\n Image = (Image - Min)/(Max - Min)\r\n return Image", "title": "" }, { "docid": "c262ec04bd9e28b671e09e71ac004207", "score": "0.56516683", "text": "def expand_contrast(img: np.ndarray, lower=0, upper=255) -> np.ndarray:\n return np.interp(img, [np.min(img), np.max(img)], [lower, upper])", "title": "" }, { "docid": "ecaccb3d3e1f50b1c2d8911d90da10cf", "score": "0.5623491", "text": "def ContrastEnhance(self, data, kwidth, bias, scaling, out = None):\n assert scaling == 1\n return filter.ContrastEnhance(data, kwidth, kwidth, bias = bias,\n out_data = out)", "title": "" }, { "docid": "9d0bb8348fb1e9849d086e75e3638fdc", "score": "0.56161296", "text": "def get_display_contrast(self):\n\n return int(self.query(\"BRIGT?\"))", "title": "" }, { "docid": "cecc63bf7912d1d1fa5004ab328b18a4", "score": "0.5611633", "text": "def npContrast(a,minimum=0,maximum=255):\n #debug(\"auto-contrasting the Numpy way.\")\n #14.01.02-21.11: implimented numpy contrast rather than ImageChops for LS preview.\n r=maximum-minimum #range\n a=a-np.min(a) # start at zero\n a=a/np.max(a) # now span 0-1\n a=a*r #now span correct range\n a=a+minimum #now go from minimum to maximum\n return a", "title": "" }, { "docid": "3d4bce1537e789e191b3f36e75e191a0", "score": "0.55975485", "text": "def Improve_Contrast_Colors(Image, Minima, Maxima):\r\n for i, m, M in zip(range(3), Minima, Maxima):\r\n Image[:, :, i] = (Image[:, :, i] - m)/(M - m)\r\n Image[Image > 1] = 1\r\n Image[Image < 0] = 0\r\n return Image", "title": "" }, { "docid": "f42da0ea66fab6e9ddfdb987c1919645", "score": "0.55637467", "text": "def adjust_contrast(img, factor=1.):\n gray_img = bgr2gray(img)\n hist = np.histogram(gray_img, 256, (0, 255))[0]\n mean = round(np.sum(gray_img) / np.sum(hist))\n degenerated = (np.ones_like(img[..., 0]) * mean).astype(img.dtype)\n degenerated = gray2bgr(degenerated)\n contrasted_img = cv2.addWeighted(\n img.astype(np.float32), factor, degenerated.astype(np.float32),\n 1 - factor, 0)\n return contrasted_img.astype(img.dtype)", "title": "" }, { "docid": "98e60f4a42a5621baeffcdfa84f93850", "score": "0.55560976", "text": "def difference1(source, color):\n return (source - color) / (255.0 - color)", "title": "" }, { "docid": "08a364ff6c8623ea39ffae2eec01608a", "score": "0.5523611", "text": "def color_adjust(img):\n logger.trace(\"Color adjusting image\")\n return img.astype('float32') / 255.0", "title": "" }, { "docid": "50e66d8421c8c1a0cdbcd9cc75cb15a2", "score": "0.5523325", "text": "def contrast(Im):\r\n Cx = np.abs(Im - np.pad(Im, ((1, 0), (0, 0)), 'constant')[:-1, :])\r\n Cy = np.abs(Im - np.pad(Im, ((0, 0), (1, 0)), 'constant')[:, :-1])\r\n return Cx, Cy", "title": "" }, { "docid": "2d6d276fab6d44b6604c5c05f77ccfa6", "score": "0.5523274", "text": "def cooling(img):\n c_r, c_g, c_b = cv2.split(img)\n c_r = cv2.LUT(c_r, incr_ch_lut).astype(np.uint8)\n c_b = cv2.LUT(c_b, decr_ch_lut).astype(np.uint8)\n img = cv2.merge((c_r, c_g, c_b))\n c_b = cv2.LUT(c_b, decr_ch_lut).astype(np.uint8)\n\n # increase color saturation\n c_h, c_s, c_v = cv2.split(cv2.cvtColor(img, cv2.COLOR_RGB2HSV))\n c_s = cv2.LUT(c_s, incr_ch_lut).astype(np.uint8)\n\n return cv2.cvtColor(cv2.merge((c_h, c_s, c_v)), cv2.COLOR_HSV2RGB)", "title": "" }, { "docid": "45d62af320210f101921ad97a8714860", "score": "0.5499785", "text": "def _augment(xs):\n\n # `xs` has shape [depth, height, width] with value in [0, 1].\n brt_gamma, brt_gain = np.random.uniform(low=0.9, high=1.1, size=2)\n aj_bright = adjust_gamma(xs, brt_gamma, brt_gain)\n contrast_gain = np.random.uniform(low=5, high=10)\n aj_contrast = adjust_sigmoid(aj_bright, gain=contrast_gain)\n return aj_contrast", "title": "" }, { "docid": "1e712af88c7d71987501d59c6bcfa1c0", "score": "0.54905856", "text": "def aberration_low(R):\n return -_lk4 / R", "title": "" }, { "docid": "a6b019c138a7b5c038d879c1b518ab1b", "score": "0.5405324", "text": "def warming(img):\n c_r, c_g, c_b = cv2.split(img)\n c_r = cv2.LUT(c_r, decr_ch_lut).astype(np.uint8)\n c_b = cv2.LUT(c_b, incr_ch_lut).astype(np.uint8)\n img = cv2.merge((c_r, c_g, c_b))\n\n # decrease color saturation\n c_h, c_s, c_v = cv2.split(cv2.cvtColor(img, cv2.COLOR_RGB2HSV))\n c_s = cv2.LUT(c_s, decr_ch_lut).astype(np.uint8)\n return cv2.cvtColor(cv2.merge((c_h, c_s, c_v)), cv2.COLOR_HSV2RGB)", "title": "" }, { "docid": "71617113cb99ff721eca826ed9087666", "score": "0.53935355", "text": "def adjust_brightness(self, pixel):\n r,g,b = pixel\n r = self.GAMMA[r]\n if r < 3: r = 0\n g = self.GAMMA[g]\n if g < 3: g = 0\n b = self.GAMMA[b]\n if b < 3: b = 0\n return (r,g,b)", "title": "" }, { "docid": "aebef2fc0d2db0b7a2a137cf2f68d8ff", "score": "0.5392146", "text": "def rgb_brightness_of_one_line(image, margin, line_index, do_x):\n brightness = 0\n points = 0\n for position in xrange(margin, image.size[0] - margin if do_x else image.size[1] - margin, 1):\n point = (position, line_index) if do_x else (line_index, position)\n brightness += rgb_brightness(image.getpixel(point))\n points += 1\n \n return brightness / points if points > 0 else 0", "title": "" }, { "docid": "d138d69391dabd7678bd82366d64da34", "score": "0.53775924", "text": "def difference2(source, color):\n return (color - source) / color", "title": "" }, { "docid": "dec13ba3d448c77073b76b0a2313a57b", "score": "0.53735155", "text": "def contrast(img, alpha = 2.0, beta = 10):\n\n return cv2.convertScaleAbs(img, alpha = alpha, beta = beta)", "title": "" }, { "docid": "6ce2eae2d8bc0babb257c64fab03bfc8", "score": "0.5352605", "text": "def augment_with_contrast(dataset, filename):\n\n contrast_connectors = ['but', 'however', 'yet']\n scalar_slots = get_scalar_slots()\n\n alignments = []\n\n print('Augmenting MRs with contrast in ' + str(filename))\n\n # Read in the data\n data_cont = data_loader.init_test_data(os.path.join(config.DATA_DIR, dataset, filename))\n mrs, utterances = data_cont['data']\n slot_sep, val_sep, val_sep_closing = data_cont['separators']\n\n for i, mr in enumerate(mrs):\n mr_dict = OrderedDict()\n\n # Extract the slot-value pairs into a dictionary\n for slot_value in mr.split(slot_sep):\n slot, value, slot_orig, _ = data_loader.parse_slot_and_value(slot_value, val_sep, val_sep_closing)\n mr_dict[slot] = value\n mrs[i] = mrs[i].replace(slot_orig, slot)\n\n alignments.append(find_alignment(utterances[i], mr_dict))\n\n for i in range(len(utterances)):\n for contrast_conn in contrast_connectors:\n contrast_pos = utterances[i].find(contrast_conn)\n if contrast_pos >= 0:\n slot_before = None\n value_before = None\n slot_after = None\n value_after = None\n\n for pos, slot, value in alignments[i]:\n if pos > contrast_pos:\n if not slot_before:\n break\n if slot in scalar_slots:\n slot_after = slot\n value_after = value\n break\n else:\n if slot in scalar_slots:\n slot_before = slot\n value_before = value\n\n if slot_before and slot_after:\n if slot_before in scalar_slots and slot_after in scalar_slots:\n if scalar_slots[slot_before][value_before] - scalar_slots[slot_after][value_after] == 0:\n mrs[i] += ', ' + CONCESSION_TOKEN + '[{0} {1}]'.format(slot_before, slot_after)\n else:\n mrs[i] += ', ' + CONTRAST_TOKEN + '[{0} {1}]'.format(slot_before, slot_after)\n\n break\n\n new_df = pd.DataFrame(columns=['mr', 'ref'])\n new_df['mr'] = mrs\n new_df['ref'] = utterances\n\n filename_out = ''.join(filename.split('.')[:-1]) + '_augm_contrast.csv'\n new_df.to_csv(os.path.join(config.DATA_DIR, dataset, filename_out), index=False, encoding='utf8')", "title": "" }, { "docid": "8fc61ed3b483c3f5a1693d6e466499b7", "score": "0.53472745", "text": "def contrast_info(self):\n pass", "title": "" }, { "docid": "0943671dad8a1eae2fe204e46a7871b0", "score": "0.53280604", "text": "def paint_fence(self, in_n, in_k):\n if in_n == 0:\n return 0\n if in_n == 1:\n return in_k\n same_color = 0\n diff_color = in_k\n total = same_color + diff_color\n for i in range(1, in_n):\n same_color = diff_color\n diff_color = (in_k - 1)*total\n total = same_color + diff_color\n return total", "title": "" }, { "docid": "bca9fd911cfbae1aa8dc85e065ef33f6", "score": "0.53251976", "text": "def color_augmentation(img: PIL.Image.Image, background: np.ndarray, norm_factor: np.ndarray,\r\n deconv_m: np.ndarray) -> PIL.Image.Image:\r\n ary = img_to_ary(img)\r\n ary = rgb_to_od(ary)\r\n ary = ary - background\r\n ary = np.dot(ary, deconv_m)\r\n\r\n def get_random_param(min, max):\r\n return np.random.rand(3) * (max - min) + min\r\n\r\n stain_factor_c = random.uniform(-0.8, 0.8)\r\n stain_factor = get_random_param(-0.4, 0.4) + stain_factor_c\r\n stain_factor = np.exp(stain_factor)\r\n result_background = get_random_param(0.0, 0.4)\r\n\r\n conv_m_h = np.exp(get_random_param(-1.0, 1.0))\r\n conv_m_h = h_od * conv_m_h\r\n conv_m_h = conv_m_h / np.linalg.norm(conv_m_h)\r\n conv_m_e = np.exp(get_random_param(-1.0, 1.0))\r\n conv_m_e = e_od * conv_m_e\r\n conv_m_e = conv_m_e / np.linalg.norm(conv_m_e)\r\n conv_m_r = np.cross(conv_m_h, conv_m_e)\r\n conv_m_r = conv_m_r / np.linalg.norm(conv_m_r)\r\n conv_m = np.array([conv_m_h, conv_m_e, conv_m_r])\r\n \r\n ary = ary * norm_factor * stain_factor\r\n ary = np.dot(ary, conv_m)\r\n ary = ary + result_background\r\n ary = np.clip(od_to_rgb(ary), 0.0, 255.0)\r\n img = PIL.Image.fromarray(np.uint8(ary))\r\n\r\n saturation = math.exp(random.uniform(-0.3, 0.3))\r\n contrast = math.exp(random.uniform(-0.3, 0.3))\r\n brightness = math.exp(random.uniform(-0.3, 0.3))\r\n img = PIL.ImageEnhance.Color(img).enhance(saturation)\r\n img = PIL.ImageEnhance.Contrast(img).enhance(contrast)\r\n img = PIL.ImageEnhance.Brightness(img).enhance(brightness)\r\n\r\n return img", "title": "" }, { "docid": "8d21e3e7751e29df83db7ae432d7049f", "score": "0.53046584", "text": "def meas(self):\n self.update_edgecolor('b')", "title": "" }, { "docid": "488cd8ab0204d522b01393624644967a", "score": "0.53038937", "text": "def _adjust_lightness(color, amount=0.5):\n import matplotlib.colors as mc\n import colorsys\n try:\n c = mc.cnames[color]\n except:\n c = color\n c = colorsys.rgb_to_hls(*mc.to_rgb(c))\n return colorsys.hls_to_rgb(c[0], max(0, min(1, amount * c[1])), c[2])", "title": "" }, { "docid": "9837619ff002378157109034d101a546", "score": "0.5297394", "text": "def adjust_contrast(img, contrast_factor):\n # much faster to use the LUT construction than anything else I've tried\n # it's because you have to change dtypes multiple times\n if not _is_numpy_image(img):\n raise TypeError('img should be numpy Image. Got {}'.format(type(img)))\n table = np.array([ (i-74)*contrast_factor+74 for i in range (0,256)]).clip(0,255).astype('uint8')\n # enhancer = ImageEnhance.Contrast(img)\n # img = enhancer.enhance(contrast_factor)\n if img.shape[2]==1:\n return cv2.LUT(img, table)[:,:,np.newaxis]\n else:\n return cv2.LUT(img,table)", "title": "" }, { "docid": "cc5abd59f5abcc7f84c21f73121c4f82", "score": "0.52633286", "text": "def contrast_siavash(image_array) -> float:\n\n max_pixel_value = np.max(image_array)\n min_pixel_value = np.min(image_array)\n return float((max_pixel_value - min_pixel_value) / (max_pixel_value + min_pixel_value))", "title": "" }, { "docid": "c0f643d87b5b3b910987885da5e11112", "score": "0.52566135", "text": "def test_interpolated_bright(self):\n lf.LumFilter.initialization()\n errors, total = TestLumFilter.interpolate_colors(\n lf.LumFilter.bright_interp, lambda l: l < 0.5)\n self.assertLess(errors, total // 5)", "title": "" }, { "docid": "d0128176434f4552ae093171767667a8", "score": "0.5254331", "text": "def _model(xs, cen, stretch, scale, background):\n return background + scale * erf(stretch * (xs - cen))", "title": "" }, { "docid": "85b2b62de87663a3b98a985eb2712a3e", "score": "0.52379334", "text": "def contrast_gray(image, limit):\n # create a CLAHE object (Arguments are optional).\n clahe = cv2.createCLAHE(clipLimit=limit, tileGridSize=(8,8))\n return clahe.apply(image.copy())", "title": "" }, { "docid": "2b5e4e9c7ce6c480dc716b9fb7297beb", "score": "0.5221906", "text": "def enhance_contrast(im, cmap='pink', display_contrast=1):\n im = ensure_positive(im)\n color_img = ind2cmap(im, cmap)\n color_img = color_img * 255 * display_contrast\n color_img[color_img > 255] = 255\n color_img = np.uint8(color_img)\n return color_img", "title": "" }, { "docid": "8f01b94b526b2b58988a41dee92602ed", "score": "0.5218918", "text": "def count_rates_to_contrast(co, c1):\n\n\n contrast = (c1-co) / (co+c1)\n avrg_count_rate = (co+c1) / 2\n\n return contrast, avrg_count_rate", "title": "" }, { "docid": "0d28b60da8b876ef3f2e9c026bc157d2", "score": "0.52131206", "text": "def secondary_shade(self, relative_lightness):\n fg_weight = relative_lightness\n bg_weight = 1 - relative_lightness\n l = self.bg.lab_l + relative_lightness * self.contrast\n a = fg_weight * self.fg.lab_a + bg_weight * self.bg.lab_a\n b = fg_weight * self.fg.lab_b + bg_weight * self.bg.lab_b\n return (l, a, b)", "title": "" }, { "docid": "9a194631a9ac923328b167a23addafc6", "score": "0.52112377", "text": "def extreme_contrast(image): \r\n new_image = copy(image)\r\n for x, y, (r, g, b) in image:\r\n if(0 <= r <= 127):\r\n r = 0\r\n else:\r\n r = 255\r\n if(0 <= g <= 127):\r\n g = 0\r\n else:\r\n g = 255 \r\n if(0 <= b <= 127):\r\n b = 0\r\n else:\r\n b = 255 \r\n color = create_color(r,g,b)\r\n set_color(new_image,x,y,color)\r\n return new_image", "title": "" }, { "docid": "8079402921d252390bfc41c560e89394", "score": "0.52102184", "text": "def auto_contrast(im):\n im = im.copy()\n if numpy.prod(im.shape) == 0:\n return im\n (minimum, maximum) = (im.min(), im.max())\n # Check that the image isn't binary\n if numpy.any((im > minimum) & (im < maximum)):\n im -= im.min()\n if im.max() > 0:\n im /= im.max()\n return im", "title": "" }, { "docid": "92972be25f5979d3c385ca1e365e30eb", "score": "0.52053046", "text": "def stretchgridedge(self, edge, width=1, factor=2, \n stretching='linear'):", "title": "" }, { "docid": "ef181d3a4156e5398893b53c76655656", "score": "0.52019906", "text": "def contrast_filter(image):\n \n contrast_min = 0\n contrast_max = 0\n\n data = []\n image_data = get_image_data(image)\n\n # get image min and max contrast\n for i in range(len(image_data)):\n current_tuple = list(image_data[i])\n avg = (current_tuple[0] + current_tuple[1] + current_tuple[2]) / 3\n contrast_min = min(avg, contrast_min)\n contrast_max = max(avg, contrast_max)\n \n # apply formula of contrast using min and max contrast\n for i in range(len(image_data)):\n current_tuple = list(image_data[i])\n avg = (current_tuple[0] + current_tuple[1] + current_tuple[2]) / 3\n\n if (avg == 0): continue\n\n # new luminosity\n new = 255 * (avg - contrast_min) / (contrast_max - contrast_min)\n\n current_tuple[0] = int(current_tuple[0] * new / avg)\n current_tuple[1] = int(current_tuple[1] * new / avg)\n current_tuple[2] = int(current_tuple[2] * new / avg)\n \n # saving the image\n footer(image, data, \"luminosity_filter\")", "title": "" }, { "docid": "4538b2f8b230048e2d69486d402408bb", "score": "0.5193418", "text": "def normalize(self):\n s = sum(self.chroma)\n if s > 0:\n self.chroma = [i / s for i in self.chroma]", "title": "" }, { "docid": "333bd1c8d12f6de70ab058054a22a387", "score": "0.5186167", "text": "def contrast_to_count_rates_avrg(contrast, avrg_count_rate):\n\n\n c1 = (1 + contrast) * avrg_count_rate\n co = (1 - contrast) * avrg_count_rate\n\n return co, c1", "title": "" }, { "docid": "d4ea84bc88e842efeb391af89deb9e41", "score": "0.5180941", "text": "def linearWeight(pixel_value):\n # WRITE YOUR CODE HERE.\n if pixel_value <= 127.5:\n return float(pixel_value)\n else:\n return float(255 - pixel_value)\n\n raise NotImplementedError", "title": "" }, { "docid": "5159a266010efcebf20a15f34dfb4521", "score": "0.5179835", "text": "def redStrike(self):\t\t\t\t\t\n\t\tself.points += 3", "title": "" }, { "docid": "d63d1879bb5ff94470bbd34340124683", "score": "0.5176832", "text": "def match_importance(self) -> int:", "title": "" }, { "docid": "92a853bbea14aeb7dc1b80dbc67d4308", "score": "0.5156222", "text": "def weighted_blend(feat_a_pre, w_a_pre, re_feat_bp_pre):\n return feat_a_pre*w_a_pre + re_feat_bp_pre*(1-w_a_pre)", "title": "" }, { "docid": "729fc84afb21dc1944cf5d5f9a08f930", "score": "0.5154965", "text": "def ajustarContrasteImagem(image, contrast): \n\n correction_factor = (259 * (255 + contrast))/(255 * (259 - contrast))\n\n if(image.mode == \"L\"):\n image_np = np.array(image)\n\n for y in range(image_np.shape[1]):\n for x in range(image_np.shape[0]):\n image_np[x][y] = truncar(correction_factor * (image_np[x][y] - 128) + 128)\n \n else:\n if(image.mode != \"RGB\"):\n image = converterImagemParaRGB(image)\n \n image_np = np.array(image)\n \n for y in range(image_np.shape[1]):\n for x in range(image_np.shape[0]):\n r,g,b = image_np[x][y]\n \n r = truncar(correction_factor * (r - 128) + 128)\n g = truncar(correction_factor * (g - 128) + 128)\n b = truncar(correction_factor * (b - 128) + 128)\n\n image_np[x][y] = (r,g,b) \n \n return Image.fromarray(image_np)", "title": "" }, { "docid": "f5dcb034040dee76ff553d8c9f6d0ab2", "score": "0.51516956", "text": "def rating_to_rbg_color(rating):\n limit = 30\n if rating <= limit:\n rating = 0\n return tuple(\n color * 100\n for color in colorsys.hsv_to_rgb((rating - limit) / (100 - limit) / 3, 1, 1)\n )", "title": "" }, { "docid": "2b129895375372b55f2271859d0dc782", "score": "0.5147896", "text": "def stretching(img):\n (width, height) = img.size\n newimg = Image.new(\"L\", (width, height), 0)\n newpixel_access = newimg.load()\n pixel_access = img.load()\n (mini, maxi) = img.getextrema()\n #calculation of the stretching coeficient\n coef = 255/max(maxi-mini, 1)\n for i in range(width):\n for j in range(height):\n #calculation of the new pixel's value\n newpixel_access[i, j] = int(coef*(pixel_access[i, j]-mini))\n return newimg", "title": "" }, { "docid": "02e57c2ee86c81520305d85776ecdb3f", "score": "0.51461655", "text": "def contrast(self, contrast):\n buf = bytearray([SET_CONTRAST, min(contrast, 255)])\n self._displayBus.send(REG_CMD, buf)", "title": "" }, { "docid": "212f7036a1d4964912ebe81a8b3cf5d6", "score": "0.5136969", "text": "def augment_with_contrast_tgen(dataset, filename):\n\n contrast_connectors = ['but', 'however', 'yet']\n scalar_slots = get_scalar_slots()\n\n alignments = []\n contrasts = []\n\n print('Augmenting MRs with contrast in ' + str(filename))\n\n # Read in the data\n data_cont = data_loader.init_test_data(os.path.join(config.DATA_DIR, dataset, filename))\n mrs, utterances = data_cont['data']\n slot_sep, val_sep, val_sep_closing = data_cont['separators']\n\n for i, mr in enumerate(mrs):\n mr_dict = OrderedDict()\n\n # Extract the slot-value pairs into a dictionary\n for slot_value in mr.split(slot_sep):\n slot, value, slot_orig, _ = data_loader.parse_slot_and_value(slot_value, val_sep, val_sep_closing)\n mr_dict[slot] = value\n mrs[i] = mrs[i].replace(slot_orig, slot)\n\n alignments.append(find_alignment(utterances[i], mr_dict))\n\n for i in range(len(utterances)):\n contrasts.append(['none', 'none', 'none', 'none'])\n for contrast_conn in contrast_connectors:\n contrast_pos = utterances[i].find(contrast_conn)\n if contrast_pos >= 0:\n slot_before = None\n value_before = None\n slot_after = None\n value_after = None\n\n for pos, slot, value in alignments[i]:\n if pos > contrast_pos:\n if not slot_before:\n break\n if slot in scalar_slots:\n slot_after = slot\n value_after = value\n break\n else:\n if slot in scalar_slots:\n slot_before = slot\n value_before = value\n\n if slot_before and slot_after:\n if scalar_slots[slot_before][value_before] - scalar_slots[slot_after][value_after] == 0:\n contrasts[i][2] = slot_before\n contrasts[i][3] = slot_after\n else:\n contrasts[i][0] = slot_before\n contrasts[i][1] = slot_after\n\n break\n\n new_df = pd.DataFrame(columns=['mr', 'ref', 'contrast1', 'contrast2', 'concession1', 'concession2'])\n new_df['mr'] = mrs\n new_df['ref'] = utterances\n new_df['contrast1'] = [tup[0] for tup in contrasts]\n new_df['contrast2'] = [tup[1] for tup in contrasts]\n new_df['concession1'] = [tup[2] for tup in contrasts]\n new_df['concession2'] = [tup[3] for tup in contrasts]\n\n filename_out = ''.join(filename.split('.')[:-1]) + '_augm_contrast_tgen.csv'\n new_df.to_csv(os.path.join(config.DATA_DIR, dataset, filename_out), index=False, encoding='utf8')", "title": "" }, { "docid": "74fa6bf1ee47bb180bfaffa3f49e556c", "score": "0.5128692", "text": "def increase_brightness(img, coord):\r\n img_brightened = 1.5 * img / 255.0\r\n return img_brightened, coord", "title": "" }, { "docid": "36976e00dce4393277591039652e7e10", "score": "0.51252127", "text": "def increase_contrast(image_collection):\n result = []\n\n for img in image_collection:\n img = img.copy()\n for i in range(img.shape[-1]):\n img[:, :, i] -= img[:, :, i].min()\n img[:, :, i] /= img[:, :, i].max()\n result.append(img)\n\n return result", "title": "" }, { "docid": "36976e00dce4393277591039652e7e10", "score": "0.51252127", "text": "def increase_contrast(image_collection):\n result = []\n\n for img in image_collection:\n img = img.copy()\n for i in range(img.shape[-1]):\n img[:, :, i] -= img[:, :, i].min()\n img[:, :, i] /= img[:, :, i].max()\n result.append(img)\n\n return result", "title": "" }, { "docid": "4bdfbd947cf3142f456ffdc63bd9006a", "score": "0.5124622", "text": "def ACF_contrast(ACF, q, wavelength, pupil_diameter, magnitude, sigma=5, figure_size=(5, 4)):\n plate_scale = wavelength / (pupil_diameter * q) * 206265. # (arcsec/pixel)\n rad_ACF = radial_data(np.abs(ACF), annulus_width=2)\n ACF_cc = -2.5 * np.log10((1. - np.sqrt(1. - (2 * (sigma * rad_ACF.std)) ** 2)) / (2 * (sigma * rad_ACF.std)))\n ACF_xax = np.array(range(len(rad_ACF.mean))) * plate_scale # arcsec\n ACF_fr = 10 ** (-ACF_cc / 2.5) # flux ratio for second y-axis\n\n fig, ax1 = plt.subplots(figsize=figure_size)\n color = 'tab:blue'\n label = 'V = ' + str(magnitude) + ' mag'\n\n ax1.set_xlabel(r'Separation (arcsec)')\n ax1.set_ylabel(r'' + str(sigma) + ' $\\sigma$ Contrast (mag)') \n ax1.plot(ACF_xax, ACF_cc, label=label, lw=3, color=color)\n plt.legend(loc='lower left')\n plt.gca().invert_yaxis()\n\n ax2 = ax1.twinx() # second y-axis\n ax2.set_ylabel('Flux Ratio') \n ax2.plot(ACF_xax, ACF_fr, color=color)\n plt.yscale(\"log\") # log scale\n\n plt.title('VIPER Conventional Speckle')\n fig.tight_layout() \n plt.show()", "title": "" }, { "docid": "d7e2fb26c06770e3901045effddb9d59", "score": "0.51075536", "text": "def __rred(r_1, r_2):\n if (r_1 == 0 or abs(r_1) == float('inf')) and r_2 != 0:\n r_red = r_2\n elif (r_2 == 0 or abs(r_2) == float('inf')) and r_1 != 0:\n r_red = r_1\n elif (r_1 == 0 or abs(r_1) == float('inf')) and \\\n (r_2 == 0 or abs(r_2) == float('inf')):\n r_red = 0\n elif r_1 == -r_2:\n r_red = 0\n else:\n r_red = 1 / (1 / r_1 + 1 / r_2)\n return r_red", "title": "" }, { "docid": "955c0d7e5b00789e52458145b28102f3", "score": "0.510436", "text": "def pegtop_blending(rgba, norm_intensities):\n # get rgb of normalized data based on cmap\n rgb = rgba[:, :, :3]\n \n # form an rgb eqvivalent of intensity\n d = norm_intensities.repeat(3).reshape(rgb.shape)\n \n # simulate illumination based on pegtop algorithm.\n return 2 * d * rgb + (rgb ** 2) * (1 - 2 * d)", "title": "" }, { "docid": "ba30051ae0ea4806bdf8dba422c59a0b", "score": "0.51026636", "text": "def skewness():", "title": "" }, { "docid": "15510e31851fd0c5761122efcc99b869", "score": "0.509828", "text": "def adjust_contrast(img, contrast_factor):\n if not _is_pil_image(img):\n raise TypeError('img should be PIL Image. Got {}'.format(type(img)))\n\n enhancer = ImageEnhance.Contrast(img)\n img = enhancer.enhance(contrast_factor)\n return img", "title": "" }, { "docid": "558de9289d288272f03897bf4a9bac93", "score": "0.5097599", "text": "def hist_stretching(result_img, brightness_range):\n def inner_hist_stretching(result_img, stretching_range):\n before_normalize_max = 2 ** (result_img.dtype.itemsize * 8) - 1\n prc_min = before_normalize_max * stretching_range[0] / 100\n prc_max = before_normalize_max * stretching_range[1] / 100\n k_min = random.randint(0, prc_min)\n k_max = random.randint(prc_max, before_normalize_max)\n ret = cv2.normalize(result_img, result_img, k_min, k_max, cv2.NORM_MINMAX)\n return ret\n return inner_hist_stretching(result_img, brightness_range)", "title": "" }, { "docid": "385b72936bdcf78fe4977069f0321cc3", "score": "0.5097583", "text": "def augment_brightness(image):\n\timage = cv2.cvtColor(image,cv2.COLOR_RGB2HSV)\n\trandom_bright = .25+np.random.uniform()\n\t\n\t# scaling up or down the V channel of HSV\n\timage[:,:,2] = image[:,:,2]*random_bright\n\treturn image", "title": "" }, { "docid": "0f9cec38bbaab728c67dbb211cf4cdde", "score": "0.5092839", "text": "def get_inline_contrast_keyboard(): # keyboard for image contrast level\n keyboard = [\n [\n InlineKeyboardButton(\"+0.1\", callback_data=CALLBACK_BUTTON_01), # increases contrast by 0.1\n InlineKeyboardButton(\"+0.5\", callback_data=CALLBACK_BUTTON_05) # increases contrast by 0.5\n ],\n [\n InlineKeyboardButton(\"-0.1\", callback_data=CALLBACK_BUTTON_m01), # decreases contrast by 0.1\n InlineKeyboardButton(\"-0.5\", callback_data=CALLBACK_BUTTON_m05) # decreases contrast by 0.5\n ],\n [\n InlineKeyboardButton(\"PERFECT!\", callback_data=CALLBACK_BUTTON_FIN) # finishes the editing of contrast\n ]\n ]\n return InlineKeyboardMarkup(keyboard)", "title": "" }, { "docid": "b974b92691f09a7b1f4ec08157b5861c", "score": "0.50858635", "text": "def contrast_to_count_rates_max(contrast, c1):\n\n\n co = (1 - contrast) / (1 + contrast) * c1\n\n return co, c1", "title": "" }, { "docid": "1086aab133cadf0ce7a9e8703045c1a7", "score": "0.5079538", "text": "def test_remapping(self):\n\n cpt = CPT(test_filename) \n\n cpt.normalise()\n \n assert allclose(cpt.segments[0].lower_bound, 0)\n assert allclose(cpt.segments[0].upper_bound, 1./3)\n \n assert allclose(cpt.segments[1].lower_bound, 1./3)\n assert allclose(cpt.segments[1].upper_bound, 2./3) \n \n assert allclose(cpt.segments[2].lower_bound, 2./3)\n assert allclose(cpt.segments[2].upper_bound, 1.0) \n \n # Test that colours and flags are unchanged\n assert allclose(cpt.segments[0].rgb_min, [0,0,0])\n assert allclose(cpt.segments[0].rgb_dif, [85,85,85])\n \n assert allclose(cpt.segments[1].rgb_min, [85,85,85]) \n assert allclose(cpt.segments[1].rgb_dif, [170-85,170-85,170-85]) \n \n assert allclose(cpt.segments[2].rgb_min, [170,170,170])\n assert allclose(cpt.segments[2].rgb_dif, [85,85,85]) \n \n assert cpt.segments[0].color_segment_boundary == 'L'\n assert cpt.segments[1].color_segment_boundary == ''\n assert cpt.segments[2].color_segment_boundary == 'U' \n\n\n cpt.rescale(-10, 20)\n assert allclose(cpt.segments[0].lower_bound, -10)\n assert allclose(cpt.segments[0].upper_bound, 0)\n \n assert allclose(cpt.segments[1].lower_bound, 0)\n assert allclose(cpt.segments[1].upper_bound, 10) \n \n assert allclose(cpt.segments[2].lower_bound, 10)\n assert allclose(cpt.segments[2].upper_bound, 20) \n \n\n # Test that colours and flags are unchanged\n assert allclose(cpt.segments[0].rgb_min, [0,0,0])\n assert allclose(cpt.segments[0].rgb_dif, [85,85,85])\n \n assert allclose(cpt.segments[1].rgb_min, [85,85,85]) \n assert allclose(cpt.segments[1].rgb_dif, [170-85,170-85,170-85]) \n \n assert allclose(cpt.segments[2].rgb_min, [170,170,170])\n assert allclose(cpt.segments[2].rgb_dif, [85,85,85]) \n \n assert cpt.segments[0].color_segment_boundary == 'L'\n assert cpt.segments[1].color_segment_boundary == ''\n assert cpt.segments[2].color_segment_boundary == 'U'", "title": "" }, { "docid": "97d515aff377d3eca7b3a0e20cb8e318", "score": "0.5075817", "text": "def pastel(colour, weight=2.4):\n rgb = np.asarray(colorConverter.to_rgb(colour))\n # scale colour\n #maxc = max(rgb)\n #if maxc < 1.0 and maxc > 0:\n # # scale colour\n # scale = 1.0 / maxc\n # rgb = rgb * scale\n # now decrease saturation\n total = sum(rgb)\n slack = 0\n for x in rgb:\n slack += 1.0 - x\n\n # want to increase weight from total to weight\n # pick x s.t. slack * x == weight - total\n # x = (weight - total) / slack\n x = (weight - total) / slack\n\n rgb = [c + 0.75*(x * (1.0-c)) for c in rgb]\n\n return rgb", "title": "" }, { "docid": "39500b6507b5447387700aa239d489ac", "score": "0.5074618", "text": "def adjustHue(self, adjustment: float) -> None: \n\n image = np.array(tf.image.adjust_hue(self.CVImage, adjustment))\n self.update(image)", "title": "" }, { "docid": "f42fc61a0c5dca55bcde62796113c6d9", "score": "0.507205", "text": "def hm_to_rel(hm_img):", "title": "" }, { "docid": "7559d3133be100a6aacf2db2a2bdde57", "score": "0.50693023", "text": "def _bg_correct(self, raw, bg):\n return raw / bg", "title": "" }, { "docid": "9958c4929175725fea1b67278edc005b", "score": "0.5067848", "text": "def post_image_contrast_stretch():\n content = request.get_json()\n p_low = request.args.get(\"l\", 10)\n p_high = request.args.get(\"h\", 90)\n percentile = (p_low, p_high)\n\n user_image_id = db.get_current_image_id(content[\"email\"])\n current_image = db.find_image(user_image_id, content[\"email\"])\n new_image = _link_new_image(current_image)\n\n image_data, new_image[\"processing_time\"] = \\\n Processing(b64str_to_numpy(current_image.image_data)\n ).contrast_stretch(percentile)\n new_image = _populate_image_meta(new_image, image_data)\n new_image[\"image_data\"] = numpy_to_b64str(image_data,\n format=new_image[\"format\"])\n new_image[\"image_data\"], _ = _get_b64_format(new_image[\"image_data\"])\n new_image[\"histogram\"] = _get_b64_histogram(image_data)\n new_image[\"process\"] = \"contrast_stretch\"\n db.update_user_process(content[\"email\"], new_image[\"process\"])\n return jsonify(new_image)", "title": "" }, { "docid": "085e2d2cf3ce73524fd8dfceec7479f8", "score": "0.5061316", "text": "def equalize_image(infile):\n import matplotlib\n import matplotlib.pyplot as plt\n import numpy as np\n\n from skimage import data, img_as_ubyte\n from skimage import exposure, io, color\n\n import warnings\n import gc\n\n matplotlib.rcParams['font.size'] = 8\n\n # %matplotlib inline\n\n # Load image\n img = data.imread(infile)\n\n # Contrast stretching\n p_lower, p_upper = np.percentile(img, (2, 95))\n img_rescale = exposure.rescale_intensity(img,\n in_range=(p_lower, p_upper))\n\n # Equalization\n img_eq = exposure.equalize_hist(img)\n\n # Adaptive Equalization\n img_adapteq = exposure.equalize_adapthist(img,\n clip_limit=0.1,\n nbins=2**16)\n\n # Display results\n fig = plt.figure(figsize=(8, 4))\n axes = np.zeros((2, 4), dtype=np.object)\n axes[0, 0] = fig.add_subplot(2, 4, 1)\n for i in range(1, 4):\n axes[0, i] = fig.add_subplot(2, 4, 1+i,\n sharex=axes[0, 0],\n sharey=axes[0, 0])\n for i in range(0, 4):\n axes[1, i] = fig.add_subplot(2, 4, 5+i)\n\n ax_img, ax_hist, ax_cdf = plot_img_and_hist(img, axes[:, 0])\n ax_img.set_title('Low contrast image')\n\n y_min, y_max = ax_hist.get_ylim()\n ax_hist.set_ylabel('Number of pixels')\n ax_hist.set_yticks(np.linspace(0, y_max, 5))\n\n ax_img, ax_hist, ax_cdf = plot_img_and_hist(img_rescale, axes[:, 1])\n ax_img.set_title('Contrast stretching')\n\n ax_img, ax_hist, ax_cdf = plot_img_and_hist(img_eq, axes[:, 2])\n ax_img.set_title('Histogram equalization')\n\n ax_img, ax_hist, ax_cdf = plot_img_and_hist(img_adapteq, axes[:, 3])\n ax_img.set_title('Adaptive equalization')\n\n ax_cdf.set_ylabel('Fraction of total intensity')\n ax_cdf.set_yticks(np.linspace(0, 1, 5))\n\n # prevent overlap of y-axis labels\n with warnings.catch_warnings(): # Ignore warning about tight layout\n warnings.simplefilter(\"ignore\")\n fig.tight_layout()\n plt.savefig(infile[:-4] + '_diag.pdf')\n plt.savefig(infile[:-4] + '_diag.tif', dpi=300)\n plt.close(fig)\n\n # Convert to 8 bit and add color channels\n img_eq = color.gray2rgb((img_eq * 2**8).astype(np.uint8, copy=False))\n img_adapteq = color.gray2rgb((img_adapteq * 2**8).astype(np.uint8,\n copy=False))\n\n with warnings.catch_warnings(): # Ignore warning about 16 bit to 8 bit\n warnings.simplefilter(\"ignore\")\n img_rescale = color.gray2rgb(img_as_ubyte(img_rescale))\n\n collected = gc.collect()\n # print(\"equalize_image: collected %d objects.\" % collected)\n\n # Consider dictionary {'img_eq':img_eq, 'img_adapteq':img_adapteq, 'img_rescale':img_rescale}\n return img_eq, img_adapteq, img_rescale", "title": "" }, { "docid": "d288fd8c3f7e76d97fce1852db45251b", "score": "0.5046553", "text": "def as_though(self, condition, anomalise=False, _hex=False):\n # first check something\n if condition == \"normal\":\n if _hex:\n return \"#{:02x}{:02x}{:02x}\".format(*self.rgb.astype(numpy.int32))\n return self.rgb\n elif condition == \"achroma\": # this one is special as it's the lack of colour\n # make the grey point\n z = (numpy.array([0.212656, 0.715158, 0.072186]) * self.rgb).sum() * numpy.ones(self.rgb.size)\n if anomalise:\n v = 1.75\n n = v + 1\n z = (v * z + self.rgb) / n\n\n z = numpy.round_(z).astype(numpy.int32)\n\n if _hex:\n return \"#{:02x}{:02x}{:02x}\".format(z[0], z[1], z[2])\n else:\n return z\n\n # now for the others\n # get the style of colour blindness\n style = _blindness_type[condition]\n\n # we can get the confusion line by finding the slope from our source colour in xyy and the known\n # confusion points from the colour blindness type\n confuse_slope = (self.xyy[1] - style[\"y\"]) / (self.xyy[0] - style[\"x\"])\n\n # from this we extract the y-intercept\n y_int = self.xyy[1] - self.xyy[0] * confuse_slope\n\n # now we find the change in x and y resulting from the confusion line being different to the\n # colour axis for the blindness type\n dx = (style[\"yi\"] - y_int) / (confuse_slope - style[\"m\"])\n dy = (confuse_slope * dx) + y_int # the change in y should just be propto the change in x\n dY = 0 # not sure what this is yet, I believe it's the reference white point?\n\n # now we can find the simulated colours in XYZ (from the white point?)\n z = self.xyy[2] * numpy.array([dx / dy, 1, (1 - (dx + dy)) / dy])\n\n # we then calculate the distance this colour is from the neutral grey in XYZ\n dX = 0.312713 * self.xyy[2] / 0.329016 - z[0]\n dZ = 0.358271 * self.xyy[2] / 0.329016 - z[2]\n\n # convert the distance into linear rgb, and also convert the simulated colour\n distance = _xyz_srgb_matrix @ numpy.array([dX, dY, dZ]).T\n new_rgb = _xyz_srgb_matrix @ z.T\n\n # the next thing to do is to figure out how to make the adjustment to our new_rgb\n # first find the ratio of the rgb to the distance\n ratio = ((new_rgb >= 0) - new_rgb) / distance # not quite sure what this is...\n ratio[(ratio < 0) | (ratio > 1)] = 0 # remove blown out values\n\n # the adjustment factor we use is just the largest of these factors\n adjustment = ratio.max()\n\n # apply the shift\n new_rgb += adjustment * distance\n\n # and apply the companding\n new_rgb[new_rgb < 0] = 0 # remove the blown out values\n new_rgb[new_rgb > 1] = 1\n new_rgb = new_rgb**(1/2.2) # this is the gamma correction\n new_rgb = (255 * new_rgb)\n\n # and finally apply any anomalise correction to it\n if anomalise:\n v = 1.75\n n = v + 1\n new_rgb = (v * new_rgb + self.rgb) / n\n\n new_rgb = numpy.round_(new_rgb).astype(numpy.int32)\n\n if _hex:\n return \"#{:02x}{:02x}{:02x}\".format(*new_rgb)\n else:\n return new_rgb", "title": "" }, { "docid": "6757895c48c7c0493b2c5b93eb2ada21", "score": "0.50426996", "text": "def CSUMBBlend(pic):\n amount = 0.5\n for x in range(0, getWidth(pic)/3):\n for y in range(0, getHeight(pic)):\n pixel = getPixel(pic, x, y)\n px = getColor(pixel)\n newBBRed = 0*amount + getRed(pixel)*(1-amount) #r = 0 in Bay Blue\n newBBGreen = 47*amount + getGreen(pixel)*(1-amount) #g = 47 in Bay Blue\n newBBBlue = 93*amount + getBlue(pixel)*(1-amount) #b = 93 in Bay Blue\n newColor = makeColor(newBBRed, newBBGreen, newBBBlue)\n setColor(pixel, newColor)\n for x in range(getWidth(pic)/3, getWidth(pic)*2/3):\n for y in range(0, getHeight(pic)):\n pixel = getPixel(pic, x, y)\n px = getColor(pixel)\n newGSRed = 132*amount + getRed(pixel)*(1-amount) #r = 132 in Golden Sand\n newGSGreen = 114*amount + getGreen(pixel)*(1-amount) #g = 114 in Golden Sand\n newGSBlue = 72*amount + getBlue(pixel)*(1-amount) #b = 72 in Golden Sand\n newColor = makeColor(newGSRed, newGSGreen, newGSBlue)\n setColor(pixel, newColor)\n for x in range(getWidth(pic)*2/3, getWidth(pic)):\n for y in range(0, getHeight(pic)):\n pixel = getPixel(pic, x, y)\n px = getColor(pixel)\n newVGRed = 0*amount + getRed(pixel)*(1-amount) #r = 0 in Valley Green\n newVGGreen = 120*amount + getGreen(pixel)*(1-amount) #g = 120 in Valley Green\n newVGBlue = 86*amount + getBlue(pixel)*(1-amount) #b = 86 in Valley Green\n newColor = makeColor(newVGRed, newVGGreen, newVGBlue)\n setColor(pixel, newColor)\n return pic", "title": "" }, { "docid": "54992083348adfb4b439995f04c15962", "score": "0.5029483", "text": "def high_contrast_pink(self) -> CDispatch:\n\n # Define the Background Color Object.\n series_color = win32.Dispatch(\"Illustrator.RGBColor\")\n series_color.Red = 248\n series_color.Green = 5\n series_color.Blue = 254\n\n return series_color", "title": "" }, { "docid": "6a8409f9e456cef5953848758a49e27c", "score": "0.5024995", "text": "def colorwheel(n: float) -> int:\n ...", "title": "" }, { "docid": "05dc74533b69dd0275bf353eefeaedc7", "score": "0.50208026", "text": "def OnContrastSlider(self, pressed):\n self.contrastval = self.slider_constrast.value()\n if not self.running:\n self.update_display(a, gx, gy, phi, rx, ry)", "title": "" }, { "docid": "f3cd6fb7018e6b1689a7204c968cc6b1", "score": "0.501999", "text": "def color_dist(c1, c2):\n dr2, dg2, db2 = (c1 - c2)**2\n avg_r = (c1[0] + c2[0]) / 2\n return (2 * dr2) + (4 * dg2) + (3 * db2) + (avg_r * (dr2 - db2) / 256)", "title": "" }, { "docid": "3f92f5f02cec0bbf798bd6c51d3241e4", "score": "0.5010076", "text": "def adjust_intensity(img, img_ref, alpha_threshold=0.5):\n fg = np.array(img)\n bg = np.array(img_ref)\n \n fg_flat = fg.reshape(fg.shape[0] * fg.shape[1], fg.shape[2])\n bg_flat = bg.reshape(bg.shape[0] * bg.shape[1], bg.shape[2])\n \n ### calculate intensities \n def get_intensity(img):\n if img.shape[1] == 3:\n img_tmp = img\n else:\n img_tmp = img[img[:,3] > alpha_threshold]\n return np.array(map(max, img_tmp[:,0:3]))\n I_fg = get_intensity(fg_flat)\n I_bg = get_intensity(bg_flat)\n \n I_bg.sort()\n argsort_I_fg = I_fg.argsort() # sorting indices\n len_ratio = len(I_bg)/float(len(I_fg)) # calculating length ratio\n \n ### obtaining adjusted intensity values\n I_adjust = np.empty(len(argsort_I_fg))\n for i,idx in enumerate(argsort_I_fg):\n idx_ini = int(len_ratio*i)\n idx_fin = int(len_ratio*(i+1))\n if idx_ini == idx_fin:\n I_adjust[idx] = I_bg[idx_ini] \n else:\n I_adjust[idx] = np.mean(I_bg[idx_ini:idx_fin]) \n \n #normalize pixel values of the object\n j=0\n for i in range(len(fg_flat)):\n if fg_flat[i][3] > alpha_threshold:\n if I_fg[j] == 0:\n fg_flat[i][0:3] = I_adjust[j]\n else:\n fg_flat[i][0:3] = fg_flat[i][0:3] * (I_adjust[j] / I_fg[j])\n j=j+1\n \n# ### check results \n# obj_orig=img\n# obj=fg\n# I_obj_orig = get_intensity(obj_orig.reshape(obj_orig.shape[0]*obj_orig.shape[1], 4))\n# I_obj_matched = get_intensity(obj.reshape(obj.shape[0]*obj.shape[1], 4))\n# I_bg = get_intensity(bg.reshape(bg.shape[0]*bg.shape[1], 3))\n# \n# I_obj_orig.sort()\n# I_obj_matched.sort()\n# I_bg.sort()\n# \n# plt.figure(0)\n# plt.subplot(311)\n# plt.plot(I_obj_orig, label='Original')\n# plt.xlim(0, len(I_obj_orig))\n# plt.legend(loc=4)\n# plt.subplot(312)\n# plt.plot(I_obj_matched, label='Normalized')\n# plt.xlim(0, len(I_obj_matched))\n# plt.legend(loc=4)\n# plt.subplot(313)\n# plt.plot(I_bg, label='Background')\n# plt.xlim(0, len(I_bg))\n# plt.legend(loc=4)\n# \n# ### SHOW OBJECTS\n# plt.figure(1)\n# plt.imshow(obj_orig)\n# plt.title(\"original object\")\n# \n# plt.figure(2)\n# plt.imshow(obj)\n# plt.title(\"normalized object\")\n# \n# plt.show()\n \n return fg", "title": "" }, { "docid": "c4991f2b52334d3852458417899a0fbb", "score": "0.5005448", "text": "def darken(self, scale):\n \tfor y in range(self.rows):\n \t\tfor x in range(self.columns):\n \t\t\tself.data[x][y] = scale*self.data[x][y]", "title": "" }, { "docid": "6771bfab56c80e7aaf39b21a46aab8e0", "score": "0.5005043", "text": "def compute_pyramid_patch_weight_loss(width: int, height: int) -> np.ndarray:\n xc = width * 0.5\n yc = height * 0.5\n xl = 0\n xr = width\n yb = 0\n yt = height\n Dc = np.zeros((width, height))\n De = np.zeros((width, height))\n\n Dcx = np.square(np.arange(width) - xc + 0.5)\n Dcy = np.square(np.arange(height) - yc + 0.5)\n Dc = np.sqrt(Dcx[np.newaxis].transpose() + Dcy)\n\n De_l = np.square(np.arange(width) - xl + 0.5) + np.square(0.5)\n De_r = np.square(np.arange(width) - xr + 0.5) + np.square(0.5)\n De_b = np.square(0.5) + np.square(np.arange(height) - yb + 0.5)\n De_t = np.square(0.5) + np.square(np.arange(height) - yt + 0.5)\n\n De_x = np.sqrt(np.minimum(De_l, De_r))\n De_y = np.sqrt(np.minimum(De_b, De_t))\n De = np.minimum(De_x[np.newaxis].transpose(), De_y)\n\n alpha = (width * height) / np.sum(np.divide(De, np.add(Dc, De)))\n W = alpha * np.divide(De, np.add(Dc, De))\n return W, Dc, De", "title": "" }, { "docid": "02f7b33b27c193c0c69782e4ff87177f", "score": "0.50044566", "text": "def effective_potental_ext(r):\n effec = 1/(r**2) - 2/r\n return effec", "title": "" }, { "docid": "6b223e8638704da37163f8b6b8573674", "score": "0.500356", "text": "def halve_brightness(img):\n for pixel in img:\n x, y, col = pixel\n r, g, b = col\n \n r *= 0.5\n g *= 0.5\n b *= 0.5\n \n col = create_color (r, g, b)\n set_color(img, x, y, col)", "title": "" }, { "docid": "9bcf2308955b624b96ed6fa58922b6b3", "score": "0.4986226", "text": "def adjust_brightness(self):\n if self.image:\n hsv = cv2.cvtColor(self.image, cv2.COLOR_RGB2HSV) # convert it to hsv\n\n h, s, v = cv2.split(hsv)\n v += np.clip(v + random.randint(-5, 15), 0, 255).astype('uint8')\n final_hsv = cv2.merge((h, s, v))\n\n image = cv2.cvtColor(final_hsv, cv2.COLOR_HSV2RGB)\n self.image = image", "title": "" }, { "docid": "ca49db40cf3947ae052045890c3dc6b3", "score": "0.49843436", "text": "def get_strand_color(is_rev):\n if is_rev is True:\n return 240\n else:\n return 70", "title": "" }, { "docid": "4551e1e24e706827ecad74ede30981fd", "score": "0.49762565", "text": "def contrast(self, x):\n\n factor = tf.random.uniform((x.shape[0], 1, 1, 1, 1)) + 0.5\n x_mean = tf.reduce_mean(x, axis=-1, keepdims=True)\n return (x - x_mean) * factor + x_mean", "title": "" }, { "docid": "edec51ea8df63c9c21482f337dabbfd7", "score": "0.4970951", "text": "def ContrastNormalized(frame, percentile : list) -> np_.ndarray:\r\n\r\n kernel = np_.array([[1,2,1],[2,4,2],[1,2,1]]) \r\n kernel = kernel/np_.sum(kernel) # normalized gaussian filter\r\n \r\n #convolution\r\n edges = convolve2d(frame, kernel, mode='same') \r\n \r\n\r\n # contrast stretching\r\n p_inf = np_.percentile(edges, percentile[0])\r\n p_sup = np_.percentile(edges, percentile[1])\r\n img = exposure.rescale_intensity(frame, in_range=(p_inf, p_sup)) #stretching image intensity\r\n \r\n smooth_frm = filters.gaussian(img, sigma=(5,3), multichannel=None) # smooth image with a gaussian filter\r\n \r\n\r\n #pl_.matshow(smooth_frm)\r\n \r\n return smooth_frm", "title": "" }, { "docid": "8921fe4a145e1a5998699159b2d2d825", "score": "0.49697638", "text": "def getKerningPairAdjustments(self, glyphs: list[int]) -> list[int] | None:", "title": "" }, { "docid": "43cd0dc91a1315d2f20bc3d871c6ab4f", "score": "0.4953807", "text": "def getChordScale(self, n):", "title": "" }, { "docid": "62b7349ab9ff7585957b7001e7edeba9", "score": "0.49483627", "text": "def get_red_palace(self):\r\n return self.red_palace", "title": "" } ]
80bb542c4b50552131d7e18b214540f7
Returns ``True`` if there were any warnings.
[ { "docid": "c1c4bffdf737a4995d54951d0a165374", "score": "0.8521827", "text": "def has_warnings(self):\n return len(self._warnings) > 0", "title": "" } ]
[ { "docid": "c28235583242444733b88c1d6dd7ca90", "score": "0.8699406", "text": "def has_warnings(self):\r\n if self.get_warnings():\r\n return True\r\n else:\r\n return False", "title": "" }, { "docid": "9b6d3ab68cfedf5e1c8745e5c6147a81", "score": "0.8603895", "text": "def hasWarnings(self):\n return self._warnings != 0", "title": "" }, { "docid": "cfae84a8bcfb11c613b341824303ead5", "score": "0.84909356", "text": "def has_warnings(self):\n return bool(len(self._warnings))", "title": "" }, { "docid": "b3dae154945481abde73d96e43ec6033", "score": "0.8292995", "text": "def has_warnings(self):\n return len(self.warnings) + len(self.persistent_warnings) > 0", "title": "" }, { "docid": "5c615ea487f21b239395ff59bda1008c", "score": "0.7518544", "text": "def has_warning(self):\n return self._data.get(\"warning\", False)", "title": "" }, { "docid": "f12b3e6b0b03e5a1ed2ae890cbd586b0", "score": "0.7260097", "text": "def isWarning(self):\n return _libsbml.XMLError_isWarning(self)", "title": "" }, { "docid": "aaa9ff4fd323ed6568df35378f479b7d", "score": "0.7238779", "text": "def failed(self, fail_on_warnings=True):\n\n return bool(self.errors) or (fail_on_warnings and bool(self.warnings))", "title": "" }, { "docid": "ed28ae686d2794a1c109a350455e1358", "score": "0.69932014", "text": "def warned(self, category):\n return category in self.warn_categories", "title": "" }, { "docid": "bbfd43b699a5f6b91ccae7a8d51d822e", "score": "0.6904108", "text": "def has_validation_warning(self):\n return self._validation_paragraph('warning').present", "title": "" }, { "docid": "a9e837c7dd2edf7c0ece8b4d4dd3842c", "score": "0.6719626", "text": "def is_warning(self):\n return self in [MachineStatus.WAITING_FOR_INPUT,\n MachineStatus.UNKNOWN,\n MachineStatus.UNRECOGNISED]", "title": "" }, { "docid": "ac857b3c6697f13a6194d8dd5cf5cd6f", "score": "0.6662607", "text": "def num_warnings(self):\n return len(self.warnings)", "title": "" }, { "docid": "072e675953736d7e6d0b51f728f51410", "score": "0.66551906", "text": "def areInCommandErrorsAndWarningsShown(self):\n return bool()", "title": "" }, { "docid": "0786c100abad935910f9adc3bfcefb95", "score": "0.65222144", "text": "def has_errors(self):\r\n if self.get_errors():\r\n return True\r\n else:\r\n return False", "title": "" }, { "docid": "0d68142189e6230039672ce66f0e61f0", "score": "0.63280714", "text": "def HasErrors(self):\n query = u'SELECT COUNT(*) FROM {0:s}'.format(u'extraction_error')\n self._cursor.execute(query)\n\n row = self._cursor.fetchone()\n return row and row[0] != 0", "title": "" }, { "docid": "85f1e4ec83def922c44cf87f45b4f320", "score": "0.62917393", "text": "def hasErrors(self):\n return self._errors != 0", "title": "" }, { "docid": "0db8a5e43206ae3697973bb2737ced51", "score": "0.62794226", "text": "def HasErrors(self):", "title": "" }, { "docid": "e097794b22aa64c490a53df250dbc39a", "score": "0.6260585", "text": "def warnings(self):\n return self._warnings", "title": "" }, { "docid": "e097794b22aa64c490a53df250dbc39a", "score": "0.6260585", "text": "def warnings(self):\n return self._warnings", "title": "" }, { "docid": "e097794b22aa64c490a53df250dbc39a", "score": "0.6260585", "text": "def warnings(self):\n return self._warnings", "title": "" }, { "docid": "91c4a0b81c5701a65fefe88ce8d78dd2", "score": "0.62156373", "text": "def _IsFailureFatal(self, failing, inflight, no_stat):\n sanity_builders = self._run.config.sanity_check_slaves or []\n sanity_builders = set(sanity_builders)\n return not sanity_builders.issuperset(failing | inflight | no_stat)", "title": "" }, { "docid": "4bb17f65848547d8a581773d629c5ac9", "score": "0.6183162", "text": "def number_of_warnings(self) -> int:\n return len(self.warnings)", "title": "" }, { "docid": "2cfa4b9f0b022276634d5ec50e4fdec8", "score": "0.61684763", "text": "def is_valid(self, ignore_warnings=False):\n if not super(WarningsForm, self).is_valid():\n return False\n\n if self.warnings and not self.should_ignore_warnings():\n return False\n\n return True", "title": "" }, { "docid": "c0a3438d160ef35cd94dc15ac6af7f72", "score": "0.6151328", "text": "def _is_error(self):\n if self.exit_code:\n msg = self.SUCCESS_EXIT_CODES.get(self.exit_code)\n if msg:\n log.info(msg)\n msg = self.WARNING_EXIT_CODES.get(self.exit_code)\n if msg:\n log.warning(msg)\n\n return (\n self.exit_code not in self.SUCCESS_EXIT_CODES\n and self.exit_code not in self.WARNING_EXIT_CODES\n )", "title": "" }, { "docid": "015e5fc60c8e6c857fd974d25b07305e", "score": "0.6144257", "text": "def has_script_errors(self):\r\n if self.get_script_errors():\r\n return True\r\n else:\r\n return False", "title": "" }, { "docid": "6dd4b6c23af24673e46d6483d9fda4a9", "score": "0.607476", "text": "def warn(self, key):\n return (\n not self.quiet and not self.warn_none\n and (self.warn_all or getattr(self, \"warn_%s\" % key))\n )", "title": "" }, { "docid": "6366e9600cf915532b4da0942574870f", "score": "0.6021171", "text": "def warnings(self):\n return self.select(AbinitWarning)", "title": "" }, { "docid": "f64060a4079c3a2f8f1e38cc10a84dc9", "score": "0.59926766", "text": "def HasErrors(self):\n return self._has_errors", "title": "" }, { "docid": "187abae733eff0e9682edbf0e1b52a0b", "score": "0.59866697", "text": "def test_warning(self):\r\n output = self.get_output('')\r\n self.assertIn(\r\n '** Consider using file patterns to speed up the process **',\r\n output)", "title": "" }, { "docid": "648006516e38b461bd513c52b39f9e0a", "score": "0.5971423", "text": "def test_no_warning(self):\n output = self._collectstatic_output(clear=True)\n self.assertNotIn(self.warning_string, output)", "title": "" }, { "docid": "f5cf5bc68e8d216e65a6d08a0ea2ad9d", "score": "0.5967976", "text": "def warnings(self) -> Optional[List[str]]:\n return self.__warnings", "title": "" }, { "docid": "d27a88069f5317bd75250a20ee8af75b", "score": "0.5946829", "text": "def has_error(self):\n return self._count > 0", "title": "" }, { "docid": "8808440c60caa3639a33ef46cb7f2bd2", "score": "0.59417677", "text": "def print_warnings(self):\n pass", "title": "" }, { "docid": "85e96bbdb602ffc91a06a9fa56e8ac6f", "score": "0.5934859", "text": "def test_Warning(self):\n self.assertTrue(issubclass(myconn.errors.Warning,StandardError),\n \"Warning is not subclass of StandardError\")", "title": "" }, { "docid": "517fb51def627cc7d07418818e16c1f7", "score": "0.5913581", "text": "def checkSanity(self):\n ret = True\n return ret", "title": "" }, { "docid": "7a0eaebe6f490a734c81af02f6a04860", "score": "0.5906016", "text": "def warningCount(self):\n return self._warnings", "title": "" }, { "docid": "86c78d26a2a549beaa7d9515325a01ea", "score": "0.5889763", "text": "def count(self):\n return len(self._warnings)", "title": "" }, { "docid": "9a1cff35481eddfc55bd658cd71a2a31", "score": "0.58495617", "text": "def is_warning_active(self, warning, description=None):\n\n return warning.name in self._active_warnings", "title": "" }, { "docid": "9c8d6aac2159e138d937e1bca9f25404", "score": "0.58324105", "text": "def _print_warnings(self):\n # Only do something if warnings exist.\n if self.warnings_dic:\n for msg in self.warnings_dic.itervalues():\n print(\"WARNING: {0}\".format(msg))\n self.rows_printed += 1", "title": "" }, { "docid": "e44d173502ef14c41b18e464bd76c0a0", "score": "0.5821443", "text": "def check_errors(cls, score_groups, verbose):\n errors_occurred = False\n for checker, rpair in score_groups.items():\n errors = rpair[-1]\n if len(errors):\n errors_occurred = True\n print(\n \"WARNING: The following exceptions occurred during the %s checker (possibly indicate compliance checker issues):\"\n % checker,\n file=sys.stderr,\n )\n for check_name, epair in errors.items():\n print(\n f\"{checker}.{check_name}: {epair[0]}\",\n file=sys.stderr,\n )\n\n if verbose > 0:\n traceback.print_tb(\n epair[1].tb_next.tb_next,\n ) # skip first two as they are noise from the running itself @TODO search for check_name\n print(file=sys.stderr)\n\n return errors_occurred", "title": "" }, { "docid": "50083e289cdc62d0edcb9c087ef2feda", "score": "0.5796149", "text": "def test_170504_falsepositive(dbcursor):\n prod = vtecparser(get_test_file(\"NPW/NPWFFC.txt\"))\n prod.sql(dbcursor)\n res = [x.find(\"duplicated VTEC\") > 0 for x in prod.warnings]\n assert not any(res)", "title": "" }, { "docid": "03b98665b72278be93523d25786ed11c", "score": "0.5790927", "text": "def ignore_warnings(record: logging.LogRecord) -> bool:\n return record.levelno < logging.WARNING", "title": "" }, { "docid": "3b90e1711d74b1229ca5a77a0e3e9c68", "score": "0.57893133", "text": "def wasSuccessful(self):\n failure_types = {\"addError\", \"addFailure\", \"addSubTest\", \"addUnexpectedSuccess\"}\n return all(e[0] not in failure_types for e in self.events)", "title": "" }, { "docid": "9e1f1f6e2a486700e34135f3f2204d99", "score": "0.5780428", "text": "def test50():\n assert isinstance(p, Pod)\n warnings = p.get_type_warnings()\n wstrings = \"\\n\".join(w.warning for w in warnings)\n assert not warnings, f\"warnings: {wstrings}\"", "title": "" }, { "docid": "6a86424f2b9c7d0b656d2c268874a447", "score": "0.5759886", "text": "def has_validation_not_configured_warning(self):\n return self._validation_paragraph('not-configured').present", "title": "" }, { "docid": "c30581dfb8426fad1800fe7006d796a7", "score": "0.57551837", "text": "def is_health_warn(remote):\n return health_overall_status(remote) == 'HEALTH_WARN'", "title": "" }, { "docid": "ea02a52d68d313bac78b2900d2ba35f1", "score": "0.57534534", "text": "def test_warnings(self):\n arr = np.random.randint(0, 1000, (1000, 1000))\n df = pd.DataFrame(arr)\n # Ensure that building a pipeline warns users that it is an experimental feature\n with pytest.warns(\n UserWarning,\n match=\"The Batch Pipeline API is an experimental feature and still under development in Modin.\",\n ):\n pipeline = PandasQueryPipeline(df)\n with pytest.warns(\n UserWarning,\n match=\"No outputs to compute. Returning an empty list. Please specify outputs by calling `add_query` with `is_output=True`.\",\n ):\n output = pipeline.compute_batch()\n assert output == [], \"Empty pipeline did not return an empty list.\"", "title": "" }, { "docid": "25ad4e5fd66eb7803224d2f22e0dbd3c", "score": "0.57114816", "text": "def wasSuccessful(self):\n # type: () -> bool\n return (len(self.failures) == len(self.errors) ==\n len(self.unexpectedSuccesses) == 0)", "title": "" }, { "docid": "7be49969cf78f714e800afe1073acbfd", "score": "0.5703645", "text": "def is_clean(self):\n return not self.errors", "title": "" }, { "docid": "4809220652af1daffa2a52e84a784091", "score": "0.5680422", "text": "def check(self):\n self.outdata = QCOutput(self.output_file).data\n self.errors = self.outdata.get(\"errors\")\n self.warnings = self.outdata.get(\"warnings\")\n # If we aren't out of optimization cycles, but we were in the past, reset the history\n if \"out_of_opt_cycles\" not in self.errors and len(self.opt_error_history) > 0:\n self.opt_error_history = []\n # If we're out of optimization cycles and we have unconnected fragments, no need to handle any errors\n if \"out_of_opt_cycles\" in self.errors and self.outdata[\"structure_change\"] == \"unconnected_fragments\":\n return False\n return len(self.errors) > 0", "title": "" }, { "docid": "48a6947f12e591534b58b215c0c2e19d", "score": "0.5670275", "text": "def validate():\n _get_manager().validate(mode=\"warn\")\n return {\n \"success\": True,\n \"message\": (\"Ran validation routine (warnings have been displayed if \"\n \"necessary.)\")\n }", "title": "" }, { "docid": "9fc0a6ab817209720fd611d42d1f1e7b", "score": "0.56552887", "text": "def ok(self):\n if self.error:\n return False\n return True", "title": "" }, { "docid": "01201ad2f62de32e3c38aab4b0c865e8", "score": "0.56526154", "text": "def _get_ignore_errors(self) -> bool:\n errcontrol = self.context.get_variable('__errorcontrol')\n return (errcontrol & 1) == 1", "title": "" }, { "docid": "aa5ec91b4090e010c516a343dd54fe0d", "score": "0.5645951", "text": "def convergence_check(self) -> bool:\n for job in self.iter_jobs(convert_to_object=False):\n if job.status not in [\"finished\", \"warning\"]:\n return False\n return True", "title": "" }, { "docid": "85ec160f8bd7c2bafa6b6f4887db5807", "score": "0.56405836", "text": "def isInfo(self):\n return _libsbml.XMLError_isInfo(self)", "title": "" }, { "docid": "54aadae551c6f0a6a78ce385a62b7a34", "score": "0.5640339", "text": "def check(self):\n\n return (False, 'Nothing happened', [])", "title": "" }, { "docid": "eaaf5e3353d212148c34eab91ca7b6c5", "score": "0.56369925", "text": "def is_valid(self):\n return len(self.errors) == 0", "title": "" }, { "docid": "169cc9c1e218adff003310234f5b8669", "score": "0.56272036", "text": "def test_201006_invalid_warning(dbcursor):\n prod = vtecparser(get_test_file(\"MWWPQR/MWWPQR_0.txt\"))\n prod.sql(dbcursor)\n assert not filter_warnings(prod.warnings)\n prod = vtecparser(get_test_file(\"MWWPQR/MWWPQR_1.txt\"))\n prod.sql(dbcursor)\n assert not filter_warnings(prod.warnings)", "title": "" }, { "docid": "83897cd949738dba10cf35106ad69a59", "score": "0.55986995", "text": "def check(self):\n return True", "title": "" }, { "docid": "32f967b544abb13afd9bc7314d2fe931", "score": "0.55821174", "text": "def find_warnings(self):\n w_i, r_i, lr_i, n_o_i = self.values\n if w_i < r_i:\n return \"Work interval is shorter than rest interval. \"\n elif r_i > lr_i:\n return \"Rest interval is longer than long rest interval. \"\n elif r_i > w_i * 0.5:\n return \"Rest interval is too long for your chosen work interval. \"\n elif n_o_i < 4:\n return \"The number of work intervals per session is too low. \"\n return False", "title": "" }, { "docid": "de6b558eb8d40f0a5d746a552dbb8e5b", "score": "0.5572628", "text": "def _check_incorrect_no_warning(self):\n warning = self.data.get('options', {}).get('warning', 'yes')\n srcs = self.data.get('srcs', [])\n if not srcs or warning != 'no':\n return\n\n keywords_list = self.blade.get_sources_keyword_list()\n for keyword in keywords_list:\n if keyword in self.current_source_path:\n return\n\n illegal_path_list = []\n for keyword in keywords_list:\n illegal_path_list += [s for s in srcs if not keyword in s]\n\n if illegal_path_list:\n console.warning(\"//%s:%s : warning='no' is only allowed \"\n \"for code in thirdparty.\" % (\n self.key[0], self.key[1]))", "title": "" }, { "docid": "42861514dbaa5df5108d0fbde02c4584", "score": "0.5544629", "text": "def failed(self) -> bool:\n updated_status = self._fetch_status()\n return any(updated_status == s for s in self._failed_statuses)", "title": "" }, { "docid": "cd926fc9f2cdc05731f3d4b266113930", "score": "0.55433065", "text": "def is_failed(self):\n if self.unit is None:\n return True\n\n if self.data is None:\n return True\n\n if self.error is not None:\n return True\n\n if do_iteration_samples_look_like_error(self.data):\n return True\n\n return False", "title": "" }, { "docid": "1284f94643406974f797458250911c1d", "score": "0.5538136", "text": "def areInCommandErrorsAndWarningsShown(self, areInCommandErrorsAndWarningsShown):\n pass", "title": "" }, { "docid": "dee61ee790899341e6cec80e7bb34000", "score": "0.55379814", "text": "def is_valid(self):\n if len(self.sents) == 0:\n log.warning(f\"{self.filename} is empty\")\n return False\n if len(self.ann) < 1:\n log.warning(f\"{self.filename} has no annotations\")\n return False\n return True", "title": "" }, { "docid": "4c045f287c8bcd183c43c8d221190408", "score": "0.5533272", "text": "def in_error(self):\n return not self._error.empty()", "title": "" }, { "docid": "071acf41f3727e0336ba4e0b37d6552d", "score": "0.55309147", "text": "def has_error(self):\n return self.error != Error.ok", "title": "" }, { "docid": "8557797bf3c4bab20248d1a88e265481", "score": "0.552776", "text": "def get_warnings(self):\n ret_val = self._warnings[:]\n del self._warnings[:]\n return ret_val", "title": "" }, { "docid": "4db97ad0a1b2c3962ee4aabfabaca0d7", "score": "0.5511297", "text": "def sanity_checks() -> bool:\n with open(\"/proc/sys/kernel/core_pattern\") as core_patten_fp:\n if core_patten_fp.read()[0] != '|':\n return True\n print(\"System is configured to send core dump notifications to an external utility. \"\n \"This will prevent afl-fuzz from starting. \")\n change_core_pattern = helpers.utils.query_yes_no(\"Do you want me to change that for you?\")\n if not change_core_pattern:\n return False\n else:\n try:\n with open(\"/proc/sys/kernel/core_pattern\", \"w\") as core_patten_fp:\n core_patten_fp.write(\"core\")\n return True\n except PermissionError:\n print(\"Permission denied!\")\n return False", "title": "" }, { "docid": "0641da4fea43ea26ae0eca7654c3cda4", "score": "0.5510212", "text": "def check_warning_detector(self):\n # If the warning detector fires we record the position\n # and reset. We take the most recent warning as the\n # start of out window, assuming this warning period\n # contains elements of the new state.\n # the change detector monitors likelihood, so we care about a DECREASE\n # in likelihood only\n if get_ADWIN_decrease(self.fingerprint_similarity_detector_warn):\n # When we use stdevs, look for increase\n\n self.warning_detected = False\n self.last_warning_point = self.ex\n self.fingerprint_similarity_detector_warn = make_detector(\n s=get_warning_sensitivity(self.get_current_sensitivity()), update_gap=self.active_head_monitor_gap)\n if not self.in_warning:\n self.last_warning_point = max(0, self.ex - 100)", "title": "" }, { "docid": "89223bd53977acbd23fdd0ea412925a4", "score": "0.55060655", "text": "def warning(self, msg):", "title": "" }, { "docid": "9bda0f4eb68159beb8f3b79be47f6c8d", "score": "0.54764336", "text": "def is_valid(self):\n return not self.errors['errors']", "title": "" }, { "docid": "ff83ededcfade6a055fe3ee3bafadc9f", "score": "0.5476351", "text": "def warnings(self) -> str:\n return self._warnings", "title": "" }, { "docid": "0bf5a5c40d3ec048326595072118a0bc", "score": "0.5468697", "text": "def warning(self):\n return self.__warning", "title": "" }, { "docid": "f9dc140071e6cda15b69f67644980da5", "score": "0.54677635", "text": "def is_error(self):\n return False", "title": "" }, { "docid": "fd01bbf444074aaf7eb7ef245e0e33c5", "score": "0.5464567", "text": "def isValueError(self):\n return bool()", "title": "" }, { "docid": "70b23295c98e991c3caee01a72644276", "score": "0.54627234", "text": "def has_failed(self) -> bool:\n return self.status == \"error\"", "title": "" }, { "docid": "2235489005d4a9f1f47246caa4124537", "score": "0.54597944", "text": "def is_error(self):\n return True", "title": "" }, { "docid": "07d5b32c89399e1509bee80a3c8c85a1", "score": "0.5455823", "text": "def is_benign_message(self, message):\n benign_messages = [\n # Fixes spyder-ide/spyder#14928\n # Fixes spyder-ide/spyder-kernels#343\n 'DeprecationWarning',\n # Fixes spyder-ide/spyder-kernels#365\n 'IOStream.flush timed out'\n ]\n\n return any([msg in message for msg in benign_messages])", "title": "" }, { "docid": "24aacc7e504ae9a51a6aeced7490f001", "score": "0.54536545", "text": "def check_for_errors(self):\n pass", "title": "" }, { "docid": "a0b50daddffb3c9b7da8e47a026f0bb8", "score": "0.54493606", "text": "def test_support_warning(self):\n with warnings.catch_warnings(record=True) as w:\n warnings.simplefilter(\"always\")\n sk = SinkhornKnopp()\n P = np.array([[0, 0], [1, 0]])\n sk.fit(P)\n self.assertEqual(w[0].category, UserWarning)", "title": "" }, { "docid": "26475ab0b81ac2da064e8a252b2999e3", "score": "0.5445519", "text": "def alive(self):\n return len(self.incorrects) < 6", "title": "" }, { "docid": "25625e5f2d5614fc090a89840628e417", "score": "0.54410225", "text": "def validate(self) -> tuple:\n response = self.run()\n\n if self.aw:\n self.exe = self.exe.append(\"WARNING\")\n\n prevent = list(filter(lambda rule: rule.mark not in self.exe, response))\n\n return bool(not prevent), prevent", "title": "" }, { "docid": "7828a942e6d097da35fdd026d1f07147", "score": "0.54333544", "text": "def is_sf_external_invite_warn(self):\n return self._tag == 'sf_external_invite_warn'", "title": "" }, { "docid": "c87afad0fc85b8ecf6d9892347e66c8e", "score": "0.54258585", "text": "def WARNINGS():\n ...", "title": "" }, { "docid": "98c6c74ec8583b9fbefb19344fdc2714", "score": "0.5420725", "text": "def is_error(self) -> bool:\n\n return not self.is_success", "title": "" }, { "docid": "98c6c74ec8583b9fbefb19344fdc2714", "score": "0.5420725", "text": "def is_error(self) -> bool:\n\n return not self.is_success", "title": "" }, { "docid": "00b439f2465b7518e54539fafe277772", "score": "0.540717", "text": "def HasAnalysisReports(self):\n query = u'SELECT COUNT(*) FROM {0:s}'.format(u'analysis_report')\n self._cursor.execute(query)\n\n row = self._cursor.fetchone()\n return row and row[0] != 0", "title": "" }, { "docid": "86f97c1179eaaf27a1cb57607f43b261", "score": "0.540393", "text": "def is_data_correct(self):\n return self.data_errors == []", "title": "" }, { "docid": "7662fdf73b8ffd9cafc597b31b4545bc", "score": "0.53986216", "text": "def warnings(self) -> List[WarningTuple]: # noqa: D401\n return self._warnings", "title": "" }, { "docid": "2365ed3d2a1fb4f0aa7ebd49ea51fbe1", "score": "0.53981024", "text": "def has_tracking_error(scans):\n if scans is None:\n return False\n for scan in scans:\n if scan.info.telescope.has_tracking_error:\n return True\n else:\n return False", "title": "" }, { "docid": "315847cbb1082557a1f0eaa4a2bb449d", "score": "0.53699243", "text": "def _warn_and_signal_if_skip(self, fileinfo: FileInfo) -> bool:\n for warning_handler in self._get_warning_handlers():\n if warning_handler(fileinfo):\n # On the first warning handler that returns a signal to skip\n # immediately propagate this signal and no longer check\n # the other warning handlers as no matter what the file will\n # be skipped.\n return True\n return False", "title": "" }, { "docid": "6c32b701c6cba5b063f9f1580994a69f", "score": "0.53665394", "text": "def check_warnings(*filters, **kwargs): # pragma: no cover\n quiet = kwargs.get(\"quiet\")\n\n if not filters:\n filters = ((\"\", Warning),)\n # Preserve backward compatibility\n\n if quiet is None:\n quiet = True\n\n return _filterwarnings(filters, quiet)", "title": "" }, { "docid": "9870e85fc3f543f4f48495c49d42dd1f", "score": "0.5365128", "text": "def warnings(self, warnings):\n\n self._warnings = warnings", "title": "" }, { "docid": "7e8eb6a74ebcb4bd730ee1967e200b95", "score": "0.53623396", "text": "def check_permissions_error(check_errors):\n\n if ('permitted' in check_errors or 'permissions' in check_errors) and 'DEPRECATION' not in check_errors:\n return True\n\n return False", "title": "" }, { "docid": "0931d9987f93f6bda427222bfebcc59c", "score": "0.5359618", "text": "def version_warnings(cls):\n return None", "title": "" }, { "docid": "d5aaa1833ad9080cb87d785af627ebe5", "score": "0.534911", "text": "def get_warnings(self):\n warnings = self.warnings\n self.warnings = []\n return warnings", "title": "" }, { "docid": "ff029678adc1bf37385b67621f163727", "score": "0.53352", "text": "def valid(self):\n\n return bool(self._notifier)", "title": "" }, { "docid": "b00c5b25bf564caab9b2dee16c90700d", "score": "0.5334448", "text": "def check_build_success():\n\n def check(status, warning):\n assert \"build succeeded\" in status.getvalue()\n warnings = warning.getvalue().strip()\n assert warnings == \"\"\n\n return check", "title": "" }, { "docid": "1f9fd47eb2b9a5a806bc3a1c3e0c4642", "score": "0.5328164", "text": "def test_passThrough(self):\n self.assertEqual(self.runWithWarningsSuppressed([], lambda: 4), 4)", "title": "" }, { "docid": "d140950e234d29e8a456f8e893f157f0", "score": "0.532305", "text": "def isErrors(self):\n return self._zcmerge", "title": "" } ]
5955f2344749e17e519010ce65509e09
Computes the closest distances for all the atoms
[ { "docid": "48e9f35731dbbe50f315b81df81e0041", "score": "0.6647483", "text": "def close_distances(self):\n if self._pairs is None or self._distances is None:\n\n if self.structure.is_periodic:\n pcm_log.debug('Computing distances from scratch...')\n pairs_dict = {}\n distances_list = []\n index = 0\n for i, j in itertools.combinations(range(self.structure.natom), 2):\n if index % 100 == 0:\n pcm_log.debug('Computing distance between atoms %d and %d' % (i, j))\n ret = self.structure.lattice.distance2(self.structure.reduced[i], self.structure.reduced[j],\n radius=self.radius)\n for k in ret:\n if str(i) not in pairs_dict:\n pairs_dict[str(i)] = [index]\n else:\n pairs_dict[str(i)].append(index)\n if str(j) not in pairs_dict:\n pairs_dict[str(j)] = [index]\n else:\n pairs_dict[str(j)].append(index)\n ret[k]['pair'] = (i, j)\n distances_list.append(ret[k])\n index += 1\n for i in range(self.structure.natom):\n ret = self.structure.lattice.distance2(self.structure.reduced[i], self.structure.reduced[i])\n for k in ret:\n if str(i) not in pairs_dict:\n pairs_dict[str(i)] = [index]\n else:\n pairs_dict[str(i)].append(index)\n ret[k]['pair'] = (i, i)\n distances_list.append(ret[k])\n index += 1\n self._pairs = pairs_dict\n self._distances = distances_list\n else:\n dm = self.structure.distance_matrix()\n dm += np.eye(len(dm)) * max(dm.flatten())\n pairs_dict = {}\n distances_list = []\n for i in range(self.structure.natom):\n index = dm[:, i].argmin()\n pairs_dict[str(i)] = [index]\n distances_list.append(dm[index, i])\n self._pairs = pairs_dict\n self._distances = distances_list\n\n return self._pairs, self._distances", "title": "" } ]
[ { "docid": "34a6f16d64add96004fd651c569f8446", "score": "0.6619735", "text": "def get_dist(self,atom):\n d = self.r-self.r[:,atom].reshape(3,1) # subtract the location col from the matrix for each atom\n dist = np.sqrt((d*d).sum(axis=0))\n dist_copy = dist.copy()\n dist_copy[dist_copy >=self.sim_params['nbCutoff']] = 0\n dist_copy[self.bonded_atoms[atom]] = dist[self.bonded_atoms[atom]]\n mask = dist_copy>0\n mask[0] = False # take care of the zero position\n return dist, mask", "title": "" }, { "docid": "18daa13a98d6d5c998f6691988564d5a", "score": "0.65297484", "text": "def calculate_distances(self):\n self.approx_distances = np.linalg.norm(self.tmp_array[1:, :] - self.tmp_array[0:-1, :], axis=1)", "title": "" }, { "docid": "54efa02e454dab229305db1f6b5b15bc", "score": "0.6392249", "text": "def _calculate_atom_distance(i, j):\n return ((i[0] - j[0])**2 + (i[1] - j[1])**2 + (i[2] - j[2])**2) ** 0.5", "title": "" }, { "docid": "32efcfa42bf8202e1b840133810cb6b7", "score": "0.63702893", "text": "def get_distances(list_of_locs:np.ndarray, point:np.ndarray) -> np.ndarray:\n return np.linalg.norm(list_of_locs - point, ord=2, axis=1)", "title": "" }, { "docid": "b939ba4051274d3673b76ac58d5fd224", "score": "0.63335913", "text": "def closest(data, centers):\n\n # Validate input\n n, d = data.shape\n k, d_ = centers.shape\n assert d == d_\n\n result = np.zeros(n).astype('int')\n for i, p in enumerate(data):\n curr_min_dist = np.float('inf')\n corresponding_idx = 0\n for j, c in enumerate(centers):\n distance = np.sum((p - c)**2)\n if distance < curr_min_dist:\n curr_min_dist = distance\n corresponding_idx = j\n result[i] = corresponding_idx\n assert result.shape[0] == n\n return result", "title": "" }, { "docid": "e5e16ae6d1029c4314b59f237337428b", "score": "0.6271826", "text": "def resolve_closest_distance(data):\n wire1 = plot_path(data[0])\n wire2 = plot_path(data[1])\n distance = None\n position = None\n for pos in wire1:\n if pos in wire2:\n d = int(abs(pos.real)) + int(abs(pos.imag))\n if not distance or d < distance:\n distance = d\n position = pos\n return (distance, position)", "title": "" }, { "docid": "ffab5cbfc925200989635884a855261d", "score": "0.6269107", "text": "def get_distance(self, sample, ignore_closest=False):\n\n sample = np.asarray(sample)\n\n # these slots are not yet filled... so filter them out\n others = self.s if self.size == len(self.s) else self.s[:self.size]\n\n # L2 seems to work the best in my other experiments on density estimation\n distances = np.linalg.norm(sample - others, ord=2, axis=1)\n\n distances = sorted(distances)\n if ignore_closest:\n top_5 = distances[1:6]\n else:\n top_5 = distances[:5]\n\n if len(top_5) == 0:\n return 0\n else:\n return np.mean(top_5)", "title": "" }, { "docid": "ee8a369fda8ba6105c4be714e441a665", "score": "0.6230956", "text": "def compute_distance(self, correlation_matrix, attrs):\n return np.sqrt(2 * np.clip(1. - correlation_matrix ** 2, 0., 2.))", "title": "" }, { "docid": "13a93833975446113620bb0709747467", "score": "0.6227011", "text": "def closest_clusters(self):\n min_dist = sys.maxsize\n # arrays of clusters\n min_c1 = []\n min_c2 = []\n for c1 in self.clusters:\n for c2 in self.clusters:\n if c1 is c2:\n pass # so we don't compare the same cluster to itself\n else:\n dist = self.cluster_distance(c1, c2)\n if dist < min_dist:\n min_dist = dist\n min_c1 = c1\n min_c2 = c2\n return min_c1, min_c2, min_dist", "title": "" }, { "docid": "2074dbc1c7954f11b7b7789d646a6e6f", "score": "0.6174893", "text": "def _calculate_od_distances(self, mn):\n\n # iterate through all od pairs\n for od in mn.iter_od_pairs():\n od.calc_distance(mn.links)", "title": "" }, { "docid": "f91d68fea6984a69c530b15c0da3e42a", "score": "0.6163151", "text": "def distance_food(self):\n\t\tfor food_loc in self.ants.food():\n\t\t\tfor ant_loc in self.ants.my_ants():\n\t\t\t\tdist = self.ants.distance(ant_loc, food_loc)\n\t\t\t\tyield (dist, ant_loc, food_loc)", "title": "" }, { "docid": "1f2bb48c679e33412cbd56b3c4b8d83e", "score": "0.6155801", "text": "def _compute_distances_raw(l1, l2, embedder, n_closest=-1, return_distances=False):\n assert n_closest <= len(l2)\n inds = np.zeros((len(l1), n_closest), dtype=int)\n if return_distances:\n dists = np.zeros((len(l1), n_closest))\n \n batch_size = 4000\n # if one of the lists is short enough, precompute its embeddings\n # and normalize them\n m1_pre, m2_pre = None, None\n if len(l1) < batch_size:\n m1_pre = embedder.embed(l1)\n m1_pre = m1_pre / np.linalg.norm(m1_pre, ord=2, axis=-1, keepdims=True)\n if len(l2) < batch_size:\n m2_pre = embedder.embed(l2)\n m2_pre = m2_pre / np.linalg.norm(m2_pre, ord=2, axis=-1, keepdims=True)\n \n for m1_start in tqdm(range(0, len(l1), batch_size), desc='Calculating distances'):\n # normalize a batch of rows from m1\n if m1_pre is not None:\n m1_norm = m1_pre\n else:\n m1_norm = embedder.embed(l1[m1_start:m1_start + batch_size])\n m1_norm = m1_norm / np.linalg.norm(m1_norm, ord=2, axis=-1, keepdims=True)\n\n m1_size = min(batch_size, len(l1) - m1_start)\n curr = [[] for i in range(m1_size)] # set of closest rows\n for m2_start in tqdm(range(0, len(l2), batch_size), leave=False):\n # normalize a batch of rows from m2\n if m2_pre is not None:\n m2_norm = m2_pre \n else:\n m2_norm = embedder.embed(l2[m2_start:m2_start + batch_size])\n m2_norm = m2_norm / np.linalg.norm(m2_norm, ord=2, axis=-1, keepdims=True)\n # calculate and sort distances\n curr_dists = 1. - np.matmul(m1_norm, m2_norm.T)\n s_ids = m2_start + np.argsort(curr_dists, axis=-1)[:, :n_closest]\n s_dists = np.sort(curr_dists, axis=-1)[:, :n_closest]\n # merge to keep 'n_closest' rows from m2\n for i in range(0, m1_size):\n curr[i] = sorted(curr[i] + list(zip(s_ids[i], s_dists[i])), key=lambda x: x[1])[:n_closest] \n \n # store the indices of n closest targets\n inds[m1_start:m1_start + batch_size] = np.array([[x[0] for x in row] for row in curr])\n if return_distances:\n dists[m1_start:m1_start + batch_size] = np.array([[x[1] for x in row] for row in curr])\n if return_distances:\n return inds, dists\n return inds", "title": "" }, { "docid": "1871b3842d57764a06990bdaaab205e5", "score": "0.61550176", "text": "def _compute_pairwise_distances(self, traj):\n # Compute distances between atoms specified in atom_pairs, end direct well potential\n self.distances = md.compute_distances(traj,\n self.atom_pairs,\n periodic=False)\n return", "title": "" }, { "docid": "0ac5c728c04eeb3f05922a736dc46104", "score": "0.6145763", "text": "def closest_point(src, dests):\n distances = {}\n for dest in dests:\n distances[dest] = math.sqrt((dest[0] - src[0])**2 + (dest[1] - src[1])**2)\n return min(distances, key=distances.get)", "title": "" }, { "docid": "d1ba19a2824f3dae33fee0332c053d8b", "score": "0.6137282", "text": "def centerAndFindDistances(nodes):\n max_distances = []\n for n in nodes:\n disp = n.getCentroid()\n n.translate(-disp)\n coords = n.coords\n max_distance = 0.0\n for coord in coords:\n distance = np.sqrt(coord.dot(coord))\n if distance > max_distance:\n max_distance = distance\n max_distances.append(max_distance)\n return max_distances", "title": "" }, { "docid": "76d680d5211f27ba9d414dd7af1cfbd2", "score": "0.60955405", "text": "def exactMPD(S):\n max_dist = 0\n for i in range(len(S)-1):\n # d(S[i],S[j]) = d(S[j],S[i]) therefore skip it.\n for j in range(i+1, len(S)):\n curr_dist = quad_distance(S[i], S[j]) # does not execute the sqrt\n if max_dist < curr_dist:\n max_dist = curr_dist\n return math.sqrt(max_dist)", "title": "" }, { "docid": "47082fe682f8f24ae0ba134f3b8fd07f", "score": "0.60926384", "text": "def distance(atom1, atom2):\n return math.sqrt((atom1[0] - atom2[0])**2 + (atom1[1] - atom2[1])**2 + (atom1[2] - atom2[2])**2)", "title": "" }, { "docid": "c728a3a87aa252af1c48dc5fd724e535", "score": "0.6081265", "text": "def minimum_distances(fr,**kwargs):\n\tglobal pts_ions,pts_lipids,vecs,midplanes\n\tdistance_metric = kwargs.pop('distance_metric','r')\n\tif kwargs: raise Exception('unprocessed kwargs')\n\tvec = vecs[fr]\n\tpts_fore_unstuffed = pts_ions[fr]\n\tpts_fore = boxstuff(pts_fore_unstuffed,vec)\n\tif distance_metric=='r':\n\t\tpts_back_unstuffed = pts_lipids[fr]\n\t\tpts_back = boxstuff(pts_back_unstuffed,vec)\n\t\t#---! why does vec need to be twice as long? (tested that the limits work though)\n\t\ttry: tree = scipy.spatial.ckdtree.cKDTree(pts_back,boxsize=np.concatenate((vec,vec)))\n\t\t#---KDTree failures are blanked\n\t\texcept: return np.array([])\n\t\tclose,nns = tree.query(pts_fore,k=1)\n\telif distance_metric=='z':\n\t\t#---no PBCs implemented here and we really just take the distance to the average-z\n\t\tclose = np.abs(pts_fore[:,2]-midplanes[fr])\n\telse: raise Exception('unclear distance metric: %s'%distance_metric)\n\treturn close", "title": "" }, { "docid": "cafb85e5e0e7029f1f05e59f06b39e0c", "score": "0.6077467", "text": "def closest3 (dataset, index):\n manhattan_distances = all_distances(dataset,index)\n sort = sorted(manhattan_distances, key=lambda i: i['distance'])[1:4]\n\n for res in sort:\n print (str(res['coordinate']) + \" Manhattan Distance: \" + str(res['distance']))", "title": "" }, { "docid": "0f609d10f8c686178e9cb3ed37d8905f", "score": "0.6069147", "text": "def all_distances(self, seed=None):\n if self.E>0:\n w = self.weights.copy()\n self.weights = np.absolute(self.weights)\n dg = self.floyd(seed)\n dg[dg==(np.sum(self.weights)+1)] = np.infty\n self.weights = w\n return dg\n else:\n return np.array([])", "title": "" }, { "docid": "1817e8df8f0a45c726681b55b3ec5d06", "score": "0.60598016", "text": "def distance_to_closest(samples) :\n dist_with_samples = rbf_kernel(samples, samples) # Distance between each samples (symetric matrix)\n np.fill_diagonal(dist_with_samples, np.NINF) # Do not take into acount the distance between a sample and itself (values on the diagonal)\n dist_with_closest = dist_with_samples.max(axis=0) # For each sample, the distance to the closest other sample\n\n return dist_with_closest", "title": "" }, { "docid": "d7e6d76ed38410fafe8aa96ba5dfc387", "score": "0.6057108", "text": "def compute_distances(self):\n current_lat, current_lon = 0.0, 0.0\n dist = 0\n x, t = 0, 0\n tp = Trace_point.objects.filter(trace=self).order_by('time')\n for p in tp:\n x = lib.getDistance(current_lat, current_lon, p.latitude, p.longitude)\n dist = dist + x\n p.distance = dist\n p.save()\n current_lat = p.latitude\n current_lon = p.longitude\n transaction.commit()", "title": "" }, { "docid": "3dbd1fb10ba72c01746d82a9cd0a3a7c", "score": "0.6042636", "text": "def slow_closest_pairs(cluster_list):\r\n length = len(cluster_list)\r\n dist_dict = {}\r\n for index1 in range(length):\r\n for index2 in range(index1+1, length):\r\n dist = cluster_list[index1].distance(cluster_list[index2])\r\n if dist not in dist_dict.keys():\r\n dist_dict[dist]=[(index1, index2)]\r\n else:\r\n dist_dict[dist].append((index1, index2))\r\n #print dist_dict\r\n min_dist = min(dist_dict)\r\n dist_pairs = dist_dict[min_dist]\r\n #print min_dist\r\n #print dist_pairs[0][1]\r\n result = set([])\r\n for index in range(len(dist_pairs)):\r\n result.add((min_dist, dist_pairs[index][0], dist_pairs[index][1]))\r\n return result", "title": "" }, { "docid": "e55560b55cedc71e696849e175503de8", "score": "0.60247344", "text": "def get_closest(self):\n return self._target_dist(lambda a, b: a < b)", "title": "" }, { "docid": "2db19ae490e6feefec872ce6bfac600d", "score": "0.6023819", "text": "def test_compute_min_dist():\n mol1_pos = np.array([[-1, -1, -1], [1, 1, 1]], np.float)\n mol2_pos = np.array([[3, 3, 3], [3, 4, 5]], np.float)\n mol3_pos = np.array([[2, 2, 2], [2, 4, 5]], np.float)\n assert compute_min_dist(mol1_pos, mol2_pos, mol3_pos) == np.sqrt(3)", "title": "" }, { "docid": "7d3758d1df97af0f9acc4515023b821f", "score": "0.60000473", "text": "def closest_prons(self, pron1):\n result = []\n for pron2 in self._pronunciations:\n result.append((self.expand(pron2), self.__memoize__(pron1, pron2)))\n result.sort(lambda x, y: cmp(x[1], y[1]))\n return [x for x in result if x[1] <= _MAX_DISTANCE]", "title": "" }, { "docid": "fc6afb37b91db7c38ab33ec7da3642d4", "score": "0.5995108", "text": "def closest(vec, n=2):#10):\n all_dists = [(w, torch.dist(vec, get_word_vector(w))) for w in we.itos]\n return sorted(all_dists, key=lambda t: t[1])[:n]", "title": "" }, { "docid": "cac8e4f983a844d66f2a4a8df681146d", "score": "0.59738487", "text": "def get_closest_atoms(self, coords: torch.Tensor,\n cells: torch.Tensor) -> torch.Tensor:\n N_atoms, n_cells, ndim, M_nbrs = (self.N_atoms, self.n_cells, self.ndim,\n self.M_nbrs)\n # Tile both cells and coords to form arrays of size (N_atoms*n_cells, ndim)\n tiled_cells = torch.reshape(torch.tile(cells, (1, N_atoms)),\n (N_atoms * n_cells, ndim))\n\n # Shape (N_atoms*n_cells, ndim) after tile\n tiled_coords = torch.tile(coords, (n_cells, 1))\n\n # Shape (N_atoms*n_cells)\n coords_vec = torch.sum((tiled_coords - tiled_cells)**2, dim=-1)\n # Shape (n_cells, N_atoms)\n coords_norm = torch.reshape(coords_vec, (n_cells, N_atoms))\n\n # Find k atoms closest to this cell.\n # Tensor of shape (n_cells, M_nbrs)\n closest_inds = torch.topk(coords_norm, k=M_nbrs, largest=False)[1]\n\n return closest_inds", "title": "" }, { "docid": "c2523162f5c971385448447dbeaf960a", "score": "0.59635824", "text": "def distance(self):", "title": "" }, { "docid": "c2523162f5c971385448447dbeaf960a", "score": "0.59635824", "text": "def distance(self):", "title": "" }, { "docid": "87392ed7f5223ce923db496d6b0cb4eb", "score": "0.59619653", "text": "def calc_neighbor_dist(self):\n nb = (self.elem.dot(t) for t in SLNode._T)\n return (SLNode.cache_dist(c, self.cache) for c in nb)", "title": "" }, { "docid": "29d2595d8a3f9070debcded043368517", "score": "0.59587306", "text": "def get_distances(centroid, points):\n return np.linalg.norm(points - centroid, axis=1)", "title": "" }, { "docid": "58b7c5b3923c0e452080531a6750f53e", "score": "0.5952934", "text": "def get_closest(self, distances):\n sortedkeys = distances.keys()\n sortedkeys.sort(cmp=lambda a, b: cmp(distances[a], distances[b]))\n MIN = 0\n MAX = len(sortedkeys) - 1\n if self.cl is True:\n return sortedkeys[MIN]\n else:\n return sortedkeys[MAX]", "title": "" }, { "docid": "f43b566e3e3b605c58d26088a97d5709", "score": "0.59498274", "text": "def atom_distances(\n positions,\n neighbors,\n cell=None,\n cell_offsets=None,\n return_vecs=False,\n normalize_vecs=False,\n neighbor_mask=None,\n):\n\n # Construct auxiliary index vector\n n_batch = positions.size()[0]\n idx_m = torch.arange(n_batch, device=positions.device, dtype=torch.long)[\n :, None, None\n ]\n # Get atomic positions of all neighboring indices\n pos_xyz = positions[idx_m, neighbors[:, :, :], :]\n\n # Subtract positions of central atoms to get distance vectors\n dist_vec = pos_xyz - positions[:, :, None, :]\n\n # add cell offset\n if cell is not None:\n B, A, N, D = cell_offsets.size()\n cell_offsets = cell_offsets.view(B, A * N, D)\n offsets = cell_offsets.bmm(cell)\n offsets = offsets.view(B, A, N, D)\n dist_vec += offsets\n\n # Compute vector lengths\n distances = torch.norm(dist_vec, 2, 3)\n\n if neighbor_mask is not None:\n # Avoid problems with zero distances in forces (instability of square\n # root derivative at 0) This way is neccessary, as gradients do not\n # work with inplace operations, such as e.g.\n # -> distances[mask==0] = 0.0\n tmp_distances = torch.zeros_like(distances)\n tmp_distances[neighbor_mask != 0] = distances[neighbor_mask != 0]\n distances = tmp_distances\n\n if return_vecs:\n tmp_distances = torch.ones_like(distances)\n tmp_distances[neighbor_mask != 0] = distances[neighbor_mask != 0]\n\n if normalize_vecs:\n dist_vec = dist_vec / tmp_distances[:, :, :, None]\n return distances, dist_vec\n return distances", "title": "" }, { "docid": "308a1c36befae803a1f5a804485fcfaf", "score": "0.5936027", "text": "def distances(self):\n dists = list()\n for p0, p1 in self.edges():\n dists.append(distance(p0, p1))\n return np.array(dists)", "title": "" }, { "docid": "baf3103db9c5513456e595daa651a23e", "score": "0.5931504", "text": "def compute_distances(points: Points) -> PointsWithDistance:\r\n points_with_distances = []\r\n total_distance = 0\r\n\r\n for i, point in enumerate(points):\r\n lat, lng, alt = point\r\n\r\n if i == 0:\r\n points_with_distances.append((lat, lng, alt, 0))\r\n else:\r\n # Haversine function returns a distance in kilometers\r\n # But we want a distance in meters\r\n total_distance += haversine(coords_from_point(point), coords_from_point(points[i - 1])) * 1000\r\n\r\n points_with_distances.append((lat, lng, alt, int(total_distance)))\r\n\r\n return points_with_distances", "title": "" }, { "docid": "49559abd88971ac5cc46c278a32666aa", "score": "0.5927783", "text": "def _init_min_dist(self):\n points = self.points\n n = len(points)\n fn = self.fn_visibles\n m = self.min_dist\n for j in xrange(n):\n adj_j = []\n for i in xrange(n):\n if i==j:\n m[i,j]=0\n continue\n ix0, jx0 = i,j\n if fn(points[i],points[j]):\n ix1, jx1 = i,j\n assert((points[ix0]==points[ix1]) and\n (points[jx0]==points[jx1]))\n m[i,j] = abs(points[i]-points[j])\n adj_j.append(i)\n else:\n m[i,j] = bignum\n self.adj.append(adj_j)", "title": "" }, { "docid": "066a911041d52a4b0b309da97a082b34", "score": "0.59232354", "text": "def distance_to_closest(self, iterator) -> Union[int, float]:\n assert iterator\n closest_distance_squared = inf\n for po2 in iterator:\n if not isinstance(po2, Point2):\n po2 = po2.position\n distance = (self[0] - po2[0]) ** 2 + (self[1] - po2[1]) ** 2\n if distance < closest_distance_squared:\n closest_distance_squared = distance\n return closest_distance_squared ** 0.5", "title": "" }, { "docid": "29ea46cfcccc560000eb76e948192930", "score": "0.5923077", "text": "def get_distance(input_lattice, mc_lattice_size, lattice_constant):\n distances_travelled_matrix = np.zeros([mc_lattice_size[0], mc_lattice_size[1], mc_lattice_size[2]]\n ) # array the same size as the mc_lattice, to be filled with the starting coordinates of each atom, in the location it currently occupies\n # list of the net distances travelled, loses starting point info\n distances_travelled_list = []\n # sum of deltat i and delta j coordinates of all atoms\n d_j_distance_ij = np.array([0., 0.])\n for i, j, k in itertools.product(range(mc_lattice_size[0]), range(mc_lattice_size[1]), range(mc_lattice_size[2])):\n atom_starting_point_indices = input_lattice[i][j][k]\n # is there an atom in the site? for vaccancies the starting point index will be [-1,0,0]\n if atom_starting_point_indices[0] >= 0:\n # adding to deal with atoms that have looped around the periodic boundaries\n i_index = i + \\\n mc_lattice_size[0] * \\\n round(float(atom_starting_point_indices[2]) / 100)\n j_index = j + mc_lattice_size[1] * (atom_starting_point_indices[2] - round(\n float(atom_starting_point_indices[2]) / 100) * 100)\n distances_travelled_matrix[i][j][k] = np.sqrt(np.abs((atom_starting_point_indices[0] - i_index)**2 + (atom_starting_point_indices[1] - j_index)**2 + (\n atom_starting_point_indices[0] - i_index) * (atom_starting_point_indices[1] - j_index))) # hexagonal distance between atom start point and where it is now, matrix form\n # distance travelled per atom, in list form, no order to it though\n distances_travelled_list = np.append(\n distances_travelled_list, distances_travelled_matrix[i][j][k])\n d_j_distance_ij += [(atom_starting_point_indices[0] - i_index),\n (atom_starting_point_indices[1] - j_index)]\n d_j_distance = np.sqrt(np.abs(d_j_distance_ij[0]**2 + (d_j_distance_ij[1]) **\n 2 + d_j_distance_ij[0] * d_j_distance_ij[1])) # vector sum of all distances\n return distances_travelled_matrix * lattice_constant, distances_travelled_list * lattice_constant, d_j_distance * lattice_constant", "title": "" }, { "docid": "888d41beb653cb261780928282362fdc", "score": "0.59214205", "text": "def dist(positions):\n return array([abs(array(positions) - i) for i in positions])", "title": "" }, { "docid": "425f3821a14b58c4722253ca3645a9b4", "score": "0.5919702", "text": "def compute_distances(self, x1, x2=None):\n callbacks = StepwiseCallbacks(self.callback, [40, 30, 30])\n\n if self.normalize:\n x1 = x1 - self.means\n x1 /= np.sqrt(2 * self.vars)\n # adapted from sklearn.metric.euclidean_distances\n xx = row_norms(x1.T, squared=True)[:, np.newaxis]\n distances = _safe_sparse_dot(x1.T, x1, dense_output=True,\n callback=callbacks.next())\n distances *= -2\n distances += xx\n distances += xx.T\n with np.errstate(invalid=\"ignore\"): # Nans are fixed below\n np.maximum(distances, 0, out=distances)\n distances.flat[::distances.shape[0] + 1] = 0.0\n\n fixer = _distance.fix_euclidean_cols_normalized if self.normalize \\\n else _distance.fix_euclidean_cols\n fixer(distances, x1, self.means, self.vars, callbacks.next())\n return _interruptible_sqrt(distances, callback=callbacks.next())", "title": "" }, { "docid": "b980177874b9aa8a03c401546a55f1e0", "score": "0.5913501", "text": "def closest_intersect(self):\n # where did the wires cross?\n crossed = self.wire1_points & self.wire2_points\n\n print(crossed)\n manhatten_dist = None\n for point in crossed:\n this_dist = self.man_dist(point)\n if manhatten_dist is None:\n manhatten_dist = this_dist\n elif this_dist < manhatten_dist:\n manhatten_dist = this_dist\n return manhatten_dist", "title": "" }, { "docid": "8275ae0f46f30df77b00b6e470ad4fdd", "score": "0.5902141", "text": "def distances(article, articles):\n allDistances = []\n targetVec = article.wordvecCentroidForArticleText\n\n for a in articles:\n euclideanDist = np.linalg.norm(targetVec - a.wordvecCentroidForArticleText)\n allDistances.append((euclideanDist, a))\n\n # (optinal) come back and refactor to list comp\n return allDistances", "title": "" }, { "docid": "05936bb93162602deae506a31b90f2ab", "score": "0.58917534", "text": "def get_epicentral_distances(self, lon, lat):\n\t\tdistances = geodetic.spherical_distance(lon, lat, self.get_longitudes(),\n\t\t\t\t\t\t\t\t\t\t\t\tself.get_latitudes())\n\t\treturn distances / 1000.", "title": "" }, { "docid": "7bde1deeb4f7c696d9759e4042eb3bde", "score": "0.5890775", "text": "def _calc_distances(preds, targets, mask, normalize):\n N, K, _ = preds.shape\n distances = np.full((N, K), -1, dtype=np.float32)\n distances[mask] = np.linalg.norm(\n ((preds - targets) / normalize[:, None, :])[mask], axis=-1)\n return distances.T", "title": "" }, { "docid": "4f8fc7cf3e0f3b5963645e575c30f5ce", "score": "0.5886527", "text": "def closest_euclidean(self, point, centroids):\n point_distance = []\n\n for centroid in centroids:\n point_distance.append(self.euclidean(point, centroid))\n\n min_dist, min_index = [], np.argmin(point_distance)\n \n for m in range(len(point_distance)):\n if (point_distance[m] - point_distance[min_index]) < (10 ** -10):\n min_dist.append(m)\n\n return np.random.choice(min_dist)", "title": "" }, { "docid": "fcd3ee06b1c9a0dd54a71698e15ac10c", "score": "0.5881926", "text": "def closest_approach(self, i1, i2):\n\n x1 = self.pos_hist(i1, 0)\n x2 = self.pos_hist(i2, 1)\n\n r1 = self.solution[:, i1 * self.d:(i1 + 1) * self.d]\n r2 = self.solution[:, i2 * self.d:(i2 + 1) * self.d]\n v1 = self.solution[:, self.d*(self.no_masses + i1):self.d * (self.no_masses + i1 + 1)]\n v2 = self.solution[:, self.d*(self.no_masses + i2):self.d * (self.no_masses + i2 + 1)]\n\n mod_r = np.sqrt(np.sum((r1-r2)**2, axis=1))\n mod_v = np.sqrt(np.sum((v1-v2)**2, axis=1))\n\n min_r = np.min(mod_r)\n max_v = np.max(mod_v) # max_v occurs at min_r\n\n return min_r, max_v", "title": "" }, { "docid": "64113d1a807e379b2ead34a94c37e71a", "score": "0.5874901", "text": "def GetDistance(self):\r\n\r\n temp = self.__tan1 * self.__tan1 + self.__tan2 * self.__tan2 - 2. * self.__tan1 * self.__tan2 * self.__cos3;\r\n w = np.where(temp < 0)[0]\r\n temp[w] = 0.0\r\n self.__temp = temp # used by other functions ??\r\n distance = np.sqrt(temp)\r\n\r\n return distance", "title": "" }, { "docid": "89ef4a73a4d7b235aae388250d5fe201", "score": "0.58727086", "text": "def distance_to_nearest(residues, distance_matrix=None):\n\n if distance_matrix is None:\n distance_matrix = residue_distance_matrix(residues)\n\n residue_indices = [np.array([seq1(r.get_resname()) == aa for r in residues]) for\n aa in protein_alphabet.letters]\n\n for res_index in range(len(residues)):\n dists = distance_matrix[res_index,]\n\n non_self = np.ones_like(dists, dtype=bool)\n non_self[res_index] = False\n\n yield np.array([min(dists[aa & non_self]) if any(aa & non_self) else np.inf for\n aa in residue_indices])", "title": "" }, { "docid": "e2fea41ce9875c51882a27499fc0e107", "score": "0.58679074", "text": "def calculate_distances(fhandle, cutoff, inter):\n\n coord_re = re.compile('^(ATOM|HETATM)')\n fhandle = fhandle\n cutoff = cutoff\n inter = inter\n\n atoms = []\n for line in fhandle:\n line = line.strip()\n if coord_re.match(line):\n # atom name, altloc, res number, chain name\n atom_uid = (line[12:16].strip(), line[17:20].strip(), line[22:26].strip(), line[21])\n x, y, z = line[30:38], line[38:46], line[46:54]\n x, y, z = float(x), float(y), float(z)\n\n atoms.append((atom_uid, (x, y, z)))\n\n for i, atom_i in enumerate(atoms):\n for atom_j in atoms[i+1:]:\n # Avoid intra-residue calculations & intra chain if requested\n if not ((atom_i[0][1:] == atom_j[0][1:]) or (inter and atom_i[0][3] == atom_j[0][3])):\n d_ij = _calculate_atom_distance(atom_i[1], atom_j[1])\n if d_ij <= cutoff:\n print(_OUTPUT_FORMAT.format(atom_i[0], atom_j[0], d_ij))", "title": "" }, { "docid": "9c74b3c57cf2601410615f2a6be4824a", "score": "0.5867786", "text": "def findClosestAtoms(self, obj_verts, atoms,\n cutoff_from=3.5, cutoff_to=BHTree_CUT,instanceMatrices=None ):\n if not len(obj_verts):\n return None\n from bhtree import bhtreelib\n atom_coords = atoms.coords\n natoms = len(atom_coords)\n if instanceMatrices:\n coordv = Numeric.ones(natoms *4, \"f\")\n coordv.shape = (natoms, 4)\n coordv[:,:3] = atom_coords[:]\n new_coords = []\n ## for im in instanceMatrices:\n## for v in coordv:\n## atom_coords.append(list(Numeric.dot(im, v)))\n \n for m in instanceMatrices:\n new_coords.append(Numeric.dot(coordv, \\\n Numeric.transpose(m))[:, :3])\n atom_coords = Numeric.concatenate(new_coords)\n print((\"len atom_coords: \", len(atom_coords)))\n bht = bhtreelib.BHtree( atom_coords, None, 10)\n cl_atoms = []\n mdist = cutoff_from\n print (\"** Bind Geometry to Molecule Info: **\")\n print((\"** looking for closest atoms (cutoff range: %2f-%2f)... **\"%(cutoff_from, cutoff_to)))\n cl_atoms = bht.closestPointsArray(obj_verts, mdist)\n while len(cl_atoms) == 0 and mdist <cutoff_to:\n print((\"** ... no closest atoms found for cutoff = %2f; continue looking ... **\"%mdist))\n mdist=mdist+0.2\n cl_atoms = bht.closestPointsArray(obj_verts, mdist)\n #print \"mdist: \", mdist, \" len cl_atoms: \", len(cl_atoms)\n print((\"**... done. %d closest atoms found within distance: %2f **\"%(len(cl_atoms) , mdist)))\n if instanceMatrices:\n if cl_atoms:\n return [x%natoms for x in cl_atoms]\n return cl_atoms", "title": "" }, { "docid": "f98c84eaf4c934646b9857be1f1940d2", "score": "0.5860692", "text": "def distance_from_nearest_object(self):\n pos_drone = self.c.getMultirotorState().kinematics_estimated.position\n x, y = pos_drone.x_val, pos_drone.y_val\n return min([self._distance_from_object(x, y, obj_id) for obj_id in self.obstacles])", "title": "" }, { "docid": "a9b27e4981b03ff08c0b903eda7bf4ce", "score": "0.58539516", "text": "def closest(self, x) -> float:\n if self.size == 0:\n return float(\"+inf\")\n x = np.array(x, dtype=float)\n return np.linalg.norm(self.coordinates - x[None, :], axis=1).min()", "title": "" }, { "docid": "b13f81dcf782f4678f94c16935a66d0a", "score": "0.58425796", "text": "def nearest(x):\n return np.argmin(distance_fcn(nodes, x[:, None]))", "title": "" }, { "docid": "fe736c6831920adba7d5e877485fe05a", "score": "0.58339596", "text": "def get_distance(self, a: list) -> float:\n dist_x = a[0] ** 2\n dist_y = a[1] ** 2\n return (dist_x + dist_y) ** 0.5", "title": "" }, { "docid": "af6e1b24435a2a987b8fe9b202b7f89a", "score": "0.5829329", "text": "def get_distance_dask(__tan1, __tan2, __cos3):\r\n\r\n temp = __tan1 * __tan1 + __tan2 * __tan2 - 2.0 * __tan1 * __tan2 * __cos3\r\n\r\n temp = da.where(temp < 0, 0, temp)\r\n\r\n # w = np.where(temp < 0)[0]\r\n # temp[w] = 0.0\r\n\r\n # TODO\r\n # self.__temp = temp # used by other functions ??\r\n\r\n return da.sqrt(temp)", "title": "" }, { "docid": "f9fd7abf8ef4491d99cf40b09ca14129", "score": "0.58280134", "text": "def get_atoms_dist(traj, prmtop, atom_pairs, file_out):\n\n u = Universe(prmtop, traj)\n atom_group_1 = [u.select_atoms(''.join(['bynum ', str(pair[0])])) for pair in atom_pairs]\n atom_group_2 = [u.select_atoms(''.join(['bynum ', str(pair[1])])) for pair in atom_pairs]\n\n # get atom distance on frame 0\n atom_dist_ref = np.array([dist(atom1, atom2)[2][0] for atom1, atom2 in zip(atom_group_1, atom_group_2)])\n\n with open('.'.join([file_out, 'dat']), 'w') as f_out, open('_'.join([file_out, 'relative.dat']), 'w') as f_out_rel:\n # write file header\n f_out.write(''.join(['frame\\t', ''.join([str(pair) + '\\t' for pair in atom_pairs]).strip(), '\\n']))\n f_out_rel.write(''.join(['frame\\t', ''.join([str(pair) + '\\t' for pair in atom_pairs]).strip(), '\\n']))\n\n for ts in u.trajectory:\n atom_dist = [dist(atom1, atom2)[2][0] for atom1, atom2 in zip(atom_group_1, atom_group_2)]\n\n # format distance to tab separated values\n atom_dist_abs = np.array(atom_dist)\n atom_dist_rel = atom_dist_abs - atom_dist_ref\n\n f_out.write(''.join([str(ts.frame), '\\t', _format_atom_dist(atom_dist_abs), '\\n']))\n f_out_rel.write(''.join([str(ts.frame), '\\t', _format_atom_dist(atom_dist_rel), '\\n']))\n\n _plot_atom_dist(file_out)\n _plot_atom_dist('_'.join([file_out, 'relative']))", "title": "" }, { "docid": "b695b8831290e8bb83ab026f7f56a949", "score": "0.58260834", "text": "def _pairwise_distances(self, embeddings, squared=False):\n dot_product = torch.matmul(embeddings, embeddings.t())\n\n # Get squared L2 norm for each embedding. We can just take the diagonal of `dot_product`.\n # This also provides more numerical stability (the diagonal of the result will be exactly 0).\n # shape (batch_size,)\n square_norm = torch.diag(dot_product)\n\n # Compute the pairwise distance matrix as we have:\n # ||a - b||^2 = ||a||^2 - 2 <a, b> + ||b||^2\n # shape (batch_size, batch_size)\n distances = square_norm.unsqueeze(0) - 2.0 * dot_product + square_norm.unsqueeze(1)\n\n # Because of computation errors, some distances might be negative so we put everything >= 0.0\n distances[distances < 0] = 0\n\n if not squared:\n # Because the gradient of sqrt is infinite when distances == 0.0 (ex: on the diagonal)\n # we need to add a small epsilon where distances == 0.0\n mask = distances.eq(0).float()\n distances = distances + mask * 1e-16\n\n distances = (1.0 -mask) * torch.sqrt(distances)\n \n self.pair_dist = distances\n return distances", "title": "" }, { "docid": "1a4e153f5034184ee36ca7afa00cf89c", "score": "0.58234185", "text": "def naive_closest_pair(plane):\n if len(plane)==1:\n return plane\n a=[dist(plane[0],plane[1]),plane[0],plane[1]]\n \n for i in range(len(plane)):\n for j in range(len(plane)):\n if i!=j:\n e=dist(plane[i],plane[j])\n if a[0]>e:\n a[0]=e\n a[1]=plane[i]\n a[2]=plane[j]\n return a", "title": "" }, { "docid": "880438411ec8bc694495b5b49be3d36a", "score": "0.5822974", "text": "def distances(coordinates):\n r = []\n checked = [] # Keep list to ensure no doublecounting\n for i in coordinates:\n x1,y1,z1 = float(i[1][0]),float(i[1][1]),float(i[1][2])\n checked.append(i[1])\n name1 = i[0]\n for u in [x for x in coordinates if x[1] not in checked]:\n name2 = u[0]\n x2,y2,z2 = float(u[1][0]),float(u[1][1]),float(u[1][2])\n r.append((name1,name2,m.sqrt((x2-x1)**2.0+((y2-y1)**2.0)+(z2-z1)**2.0)))\n return r", "title": "" }, { "docid": "5447f92b07ca8f507db632180bc72ced", "score": "0.5822207", "text": "def dist(*args):\n if len(args) == 4:\n return math.sqrt(\n sum([(a - b) ** 2 for a, b in zip(args[:2], args[2:])]))\n else:\n assert (len(args) == 6)\n return math.sqrt(\n sum([(a - b) ** 2 for a, b in zip(args[:3], args[3:])]))", "title": "" }, { "docid": "df2d115ce1582708c6347ddd738dcc71", "score": "0.5813539", "text": "def dist(iter_a, iter_b):\r\n return math.sqrt(sum((a - b)**2 for a, b in zip_longest(iter_a, iter_b,\r\n fillvalue=0)))", "title": "" }, { "docid": "36a88d5d6dad75194eb7b5c52ae2566c", "score": "0.58085537", "text": "def find_nearest_neighbor(s, spheres):\n t = [(mag(s.pos - s2.pos), s2) for s2 in spheres if s2 is not s]\n return min(t)", "title": "" }, { "docid": "e4ebb2ebc0bc826ae8fc207be83ade19", "score": "0.5788029", "text": "def edges_distance(self, edges):\n embed_edges_0 = [self.dict_projections[edge[0]] for edge in edges]\n embed_edges_1 = [self.dict_projections[edge[1]] for edge in edges]\n if self.norm == set('L1 Norm'):\n norms = la.norm(np.subtract(embed_edges_0, embed_edges_1), 1, axis=1)\n elif self.norm == set('L2 Norm'):\n norms = la.norm(np.subtract(embed_edges_0, embed_edges_1), 2, axis=1)\n elif self.norm == set('cosine'):\n all_norms = cosine_similarity(embed_edges_0, embed_edges_1)\n norms = [all_norms[i, i] for i in range(len(all_norms))]\n else:\n raise ValueError(f\"Wrong name of norm, {self.norm}\")\n final_norms = np.array(norms).reshape(-1, 1)\n return final_norms", "title": "" }, { "docid": "f77dc008fa17d5c206d9cfad7efc46af", "score": "0.5786575", "text": "def distance(a, b):\n [a_tonnetz, b_tonnetz] = [_to_tonnetz(x) for x in [a, b]]\n return np.linalg.norm(b_tonnetz - a_tonnetz)", "title": "" }, { "docid": "7ff30da324796fda8a7f9cf11d92c646", "score": "0.57780606", "text": "def compute_distances(self, model, alpha=None, ncat=4, tolerance=1e-6):\n return pairdists(self, model, alpha, ncat, tolerance)", "title": "" }, { "docid": "f0c655a401e906bd71d673cab0cdb03a", "score": "0.5762525", "text": "def _pairwise_distances(embeddings, squared=False):\n # Get the dot product between all embeddings\n # shape (batch_size, batch_size)\n dot_product = np.matmul(embeddings, np.transpose(embeddings))\n\n # Get squared L2 norm for each embedding. We can just take the diagonal of `dot_product`.\n # This also provides more numerical stability (the diagonal of the result will be exactly 0).\n # shape (batch_size,)\n square_norm = dot_product.diagonal()\n\n # Compute the pairwise distance matrix as we have:\n # ||a - b||^2 = ||a||^2 - 2 <a, b> + ||b||^2\n # shape (batch_size, batch_size)\n distances = np.expand_dims(square_norm, 1) - 2.0 * dot_product + np.expand_dims(square_norm, 0)\n\n # Because of computation errors, some distances might be negative so we put everything >= 0.0\n distances = np.maximum(distances, 0.0)\n\n if not squared:\n # Because the gradient of sqrt is infinite when distances == 0.0 (ex: on the diagonal)\n # we need to add a small epsilon where distances == 0.0\n mask = (np.equal(distances, 0.0)).astype(float)\n distances = distances + mask * 1e-16\n\n distances = np.sqrt(distances)\n\n # Correct the epsilon added: set the distances on the mask to be exactly 0.0\n distances = distances * (1.0 - mask)\n\n return distances", "title": "" }, { "docid": "192d3717988e87dbe6d19f366669b810", "score": "0.57553464", "text": "def distance():\n return point() + point() ^ star(Distance)", "title": "" }, { "docid": "9a55fe092ed392cd74230622d9ced4e1", "score": "0.57536966", "text": "def points_distance(self, points, return_nearest=False):\n pass", "title": "" }, { "docid": "6b2883aafc51644fe068e8f3a872200f", "score": "0.5749668", "text": "def calc_distance(intersections):\n return [sum(map(abs, coord)) for coord, _, _ in intersections]", "title": "" }, { "docid": "8f27247e30db3a2d1c2dff06df17a636", "score": "0.5741955", "text": "def closest(\n numbers: Iterable[Computable],\n number: Computable,\n distance_func: Callable[[Computable, Computable], Computable] = euclidean_distance,\n) -> Computable:\n\n return min(numbers, key=partial(distance_func, number))", "title": "" }, { "docid": "b1e826acf3518a1b12d18567b6fb0f10", "score": "0.5740616", "text": "def beam_distances(self):\n dist = np.zeros(len(self.beams))\n for n, beam in enumerate(self.beams):\n dist[n] = beam.total_distance()\n return dist", "title": "" }, { "docid": "4af5d93c9af1ff1c38991bf033cf6418", "score": "0.5739776", "text": "def calculate_distance(atom1_coord, atom2_coord) :\n x_distance = atom1_coord[0] - atom2_coord[0]\n y_distance = atom1_coord[1] - atom2_coord[1]\n z_distance = atom1_coord[2] - atom2_coord[2]\n distance = numpy.sqrt((x_distance**2)+(y_distance**2)+(z_distance**2))\n return distance", "title": "" }, { "docid": "f5f5bc8ed8343dff0721e8a85e6225a2", "score": "0.57237065", "text": "def get_tower_distances(self, num_towers=4):\n # TODO: get num of towers from param server\n towers = []\n for t in range(1,num_towers+1):\n tower = self.get_trans_wrt_robot(\"/tower_\"+str(t))\n towers.append(Point(tower[0],tower[1],0))\n\n distances = []\n for t in range(num_towers):\n if t != num_towers-1:\n distances.append(round(spatial.distance.euclidean((towers[t].x,towers[t].y) , (towers[t+1].x,towers[t+1].y)),3))\n else:\n distances.append(round(spatial.distance.euclidean((towers[t].x,towers[t].y), (towers[0].x,towers[0].y)),3))\n \n return towers, distances", "title": "" }, { "docid": "823ef4e6cf7227e346f7e39cd1f1eff1", "score": "0.5723467", "text": "def getDistances(self) -> np.ndarray:\n\t\treturn self._deltas", "title": "" }, { "docid": "1391fac83def9a4d4063957076ddb8f2", "score": "0.5719997", "text": "def get_pairwise_distances(\n self, subtract_radius: bool = False, grid: Optional[GridBase] = None\n ) -> np.ndarray:\n if grid is None:\n\n def get_distance(p1, p2):\n \"\"\"helper function calculating the distance between points\"\"\"\n return np.linalg.norm(p1 - p2)\n\n else:\n get_distance = grid.distance_real\n\n # calculate pairwise distance and return it in requested form\n num = len(self)\n dists = np.zeros((num, num))\n # iterate over all droplet pairs\n for i in range(num):\n for j in range(i + 1, num):\n d1, d2 = self[i], self[j]\n dist = get_distance(d1.position, d2.position)\n if subtract_radius:\n dist -= d1.radius + d2.radius\n dists[i, j] = dists[j, i] = dist\n\n return dists", "title": "" }, { "docid": "eb4d2cdd996a66a216487fdc0e4c1360", "score": "0.5718138", "text": "def all_query_nearest(similarity_matrix, selected_vector_set, queries):\n dist_mat = compute_squared_distance_no_loops(selected_vector_set, queries)\n chosen_indices = np.argmin(dist_mat, axis = 1)\n\n return dist_mat, chosen_indices", "title": "" }, { "docid": "d3b8c6511130be02c4c3b83dc8bc84d4", "score": "0.5716584", "text": "def calc_distances(self, wrt='master'):\r\n\t\tfrom mapping.geotools.geodetic import spherical_distance\r\n\r\n\t\tdistances = {}\r\n\r\n\t\tif wrt == 'master':\r\n\t\t\tmaster_solution = self.get_master_solution()\r\n\t\t\tfor agency in self.get_secondary_agencies():\r\n\t\t\t\tsolution = self.solutions[agency]\r\n\t\t\t\td = spherical_distance(master_solution.lon, master_solution.lat,\r\n\t\t\t\t\t\t\t\t\t\t\tsolution.lon, solution.lat) / 1000.\r\n\t\t\t\tdistances[agency] = d\r\n\t\telif wrt == 'all':\r\n\t\t\tfrom itertools import combinations\r\n\t\t\tagencies = self.get_agencies()\r\n\t\t\tfor (agency1, agency2) in combinations(agencies, 2):\r\n\t\t\t\tsolution1 = self.solutions[agency1]\r\n\t\t\t\tsolution2 = self.solutions[agency2]\r\n\t\t\t\td = spherical_distance(solution1.lon, solution1.lat,\r\n\t\t\t\t\t\t\t\t\t\t\tsolution2.lon, solution2.lat) / 1000.\r\n\t\t\t\tdistances[tuple(sorted([agency1, agency2]))] = d\r\n\r\n\t\treturn distances", "title": "" }, { "docid": "fe5f6d5aaac3f192e41fbff46ddd3406", "score": "0.571297", "text": "def calculateDistances(self, n = 10, offset = 0, stage = all, valid = False):\n \n data = self.data(label = ['x', 'y'], stage = stage, valid = valid);\n dx = np.concatenate([[np.NaN], np.asarray(np.diff(data[:,0]), dtype = float)]);\n dy = np.concatenate([[np.NaN], np.asarray(np.diff(data[:,1]), dtype = float)]);\n \n dx0 = dx.copy();\n dy0 = dy.copy();\n \n delta = np.zeros((data.shape[0], n)); \n delta[:,offset] = np.sqrt(dx * dx + dy * dy);\n \n for i in range(1,n-offset):\n dx0 = np.concatenate([[np.NaN], dx0[:-1]]);\n dy0 = np.concatenate([[np.NaN], dy0[:-1]]);\n dx += dx0;\n dy += dy0;\n delta[:,i+offset] = np.sqrt(dx * dx + dy * dy); \n \n if offset > 0:\n dx = np.concatenate([[np.NaN], np.asarray(np.diff(data[:,0]), dtype = float)]);\n dy = np.concatenate([[np.NaN], np.asarray(np.diff(data[:,1]), dtype = float)]);\n dx0 = dx.copy();\n dy0 = dy.copy();\n\n delta[:,offset-1] = np.sqrt(dx * dx + dy * dy);\n \n for i in range(1,offset):\n dx0 = np.concatenate([dx0[1:], [np.NaN]]);\n dy0 = np.concatenate([dy0[1:], [np.NaN]]);\n dx += dx0;\n dy += dy0;\n delta[:,offset-i-1] = np.sqrt(dx * dx + dy * dy); \n \n return delta;", "title": "" }, { "docid": "5555ab08faba01d4776c0c967fac8d65", "score": "0.5711904", "text": "def distance(a, b):\n return norm(subs3(a, b))", "title": "" }, { "docid": "504dfdc0e0b1fc63ca403544db420d22", "score": "0.5710202", "text": "def distance(atom1, atom2):\n (Z1,pos1) = atom1\n (Z2,pos2) = atom2\n d = norm(array(pos1) - array(pos2))\n return d", "title": "" }, { "docid": "08a2c9fa437d258abda07054c6b13c8e", "score": "0.5705143", "text": "def compute_distances_one_loop(self, X):\n num_test = X.shape[0]\n num_train = self.X_train.shape[0]\n dists = np.zeros((num_test, num_train))\n for i in xrange(num_test):\n dists[i] = np.linalg.norm(self.X_train - X[i], axis=1)\n return dists", "title": "" }, { "docid": "cab4cb30e05bd265b2cf1b23235cd489", "score": "0.57049716", "text": "def distM(self, x):\n\n result = np.zeros((len(x), len(x)))\n for i in range(len(x)):\n for j in range(len(x)):\n result[i,j] = euclid_distance(x[i],x[j])\n return result", "title": "" }, { "docid": "78f0b5c34e5c98f7f657e8519581a383", "score": "0.5704114", "text": "def compute_distances(self, x1, x2=None):\n callbacks = StepwiseCallbacks(self.callback, [20, 10, 50, 5, 15])\n\n if self.continuous.any():\n data1, data2 = self.continuous_columns(\n x1, x2, self.means, np.sqrt(2 * self.vars))\n\n # adapted from sklearn.metric.euclidean_distances\n xx = row_norms(data1, squared=True)[:, np.newaxis]\n if x2 is not None:\n yy = row_norms(data2, squared=True)[np.newaxis, :]\n else:\n yy = xx.T\n distances = _safe_sparse_dot(data1, data2.T, dense_output=True,\n callback=callbacks.next())\n distances *= -2\n distances += xx\n distances += yy\n with np.errstate(invalid=\"ignore\"): # Nans are fixed below\n np.maximum(distances, 0, out=distances)\n if x2 is None:\n distances.flat[::distances.shape[0] + 1] = 0.0\n fixer = _distance.fix_euclidean_rows_normalized if self.normalize \\\n else _distance.fix_euclidean_rows\n fixer(distances, data1, data2,\n self.means, self.vars, self.dist_missing2_cont,\n x2 is not None, callbacks.next())\n else:\n distances = np.zeros((x1.shape[0],\n (x2 if x2 is not None else x1).shape[0]))\n\n if np.any(self.discrete):\n data1, data2 = self.discrete_columns(x1, x2)\n _distance.euclidean_rows_discrete(\n distances, data1, data2, self.dist_missing_disc,\n self.dist_missing2_disc, x2 is not None, callbacks.next())\n\n if x2 is None:\n _distance.lower_to_symmetric(distances, callbacks.next())\n return _interruptible_sqrt(distances, callback=callbacks.next())", "title": "" }, { "docid": "58e8e477f9f4c251d3ff1467b786f098", "score": "0.5694595", "text": "def slow_closest_pair(cluster_list):\n dist_tuple = (float(\"inf\"), -1, -1)\n for outer in range(len(cluster_list)):\n for inner in range(len(cluster_list)):\n if inner == outer:\n continue\n curr_dist = pair_distance(cluster_list, outer, inner)\n if curr_dist[0] < dist_tuple[0]:\n dist_tuple = curr_dist\n return dist_tuple", "title": "" }, { "docid": "7b18f1771f6c7b3aa98fec0a4b90b55d", "score": "0.56847805", "text": "def calculate_path_distances(coords):\n\n offset = np.roll(coords, (1, 1))\n\n # get the latitude and longitude differences, in radians\n diff = (coords - offset)[1:] * np.pi / 180\n diff_lat, diff_lng = np.split(diff, 2, axis=1)\n diff_lat = np.squeeze(diff_lat)\n diff_lng = np.squeeze(diff_lng)\n\n # get the mean latitude for every latitude, in radians\n mean_lat = ((coords + offset)[1:, 0] * np.pi / 180) / 2\n cosine_mean_lat = np.cos(mean_lat)\n\n # multiply the latitude difference with the cosine_mean_latitude\n diff_lng_adjusted = cosine_mean_lat * diff_lng\n\n # square, sum and square-root\n square_lat = np.square(diff_lat)\n square_lng = np.square(diff_lng_adjusted)\n square_sum = square_lat + square_lng\n\n path_distances = constants.EARTH_RADIUS * np.sqrt(square_sum)\n\n return path_distances", "title": "" }, { "docid": "c094c1b60219a51acf70fbf5b4ccc894", "score": "0.5682819", "text": "def get_distances(self):\r\n\r\n distances = []\r\n for k, v in self.distances.items():\r\n tmp = (k[0], k[1], v)\r\n distances.append(tmp)\r\n\r\n return distances", "title": "" }, { "docid": "8fdaf4b4e38a8a5c8adcd1ad6cc9b15a", "score": "0.56714857", "text": "def distance(self, ast_weight=0.8, fcall_weight=1.5):\n assert self.literal_dist >= 0.0 # reject calls if model not initialised fully\n ast_prod = self.ast_dist * ast_weight\n fcall_prod = self.function_dist * fcall_weight\n return (ast_prod + fcall_prod) * (fcall_prod + self.literal_dist) + sum([ast_prod, fcall_prod, self.literal_dist])", "title": "" }, { "docid": "f0aa2de817676a8d63d1810032de93a8", "score": "0.56711054", "text": "def steer_candidates(x_nearest, x_rand):\n new_paths = [sim(x_nearest, ui, opts['lambda']) for ui in u_c]\n new_free = np.where([world.obstacle_free(traj_i) for traj_i in new_paths])[0]\n valid_new_paths = [new_paths[i] for i in new_free]\n \n if np.any(new_free):\n dist_to_x_rand = [distance_fcn(xi[:, -1], x_rand) for xi in valid_new_paths]\n else:\n dist_to_x_rand = -1\n\n return valid_new_paths, dist_to_x_rand", "title": "" }, { "docid": "a8474008a9b070d77ef87a0de4be35b0", "score": "0.5669161", "text": "def minIntramolDist(coords, mols):\n dist_min = 0\n for i, mol in enumerate(mols):\n natoms = len(coords[i])\n for j in range(0, natoms - 1):\n for k in range(j + 1, natoms):\n if not mol.close_atoms[j][k]:\n diff = coords[i][j] - coords[i][k]\n dist = np.sqrt(diff.dot(diff))\n if dist_min > dist or dist_min == 0:\n dist_min = dist\n return dist_min", "title": "" }, { "docid": "605e19abc5880afbd61a44951cdef7e2", "score": "0.5660234", "text": "def get_dist(pos_list):\n return([ np.sqrt(np.sum(list(map(lambda m: m**2, p)))) for p in pos_list ])", "title": "" }, { "docid": "dc530b29ee1db79526763602cdf92732", "score": "0.5658574", "text": "def computeNearestNeighbor(data, username):\n distances = []\n for instance in data:\n if instance != username:\n distance = pearson(data[username],\n data[instance])\n if abs(distance) != 0:\n distances.append((instance, distance))\n # sort based on distance -- closest first\n distances.sort(key=lambda artistTuple: artistTuple[1],\n reverse=True)\n return distances", "title": "" }, { "docid": "c70c2c3ee1f8a40a8befb76dd55d00a6", "score": "0.56583726", "text": "def calculate_distances(train: np.ndarray, test: np.ndarray) -> np.ndarray:\n vector_diff = test[:, np.newaxis] - train\n distances = np.linalg.norm(vector_diff, ord=2, axis=2)\n return distances", "title": "" }, { "docid": "c40a067a85ccb08a03383f50e280e090", "score": "0.56555676", "text": "def nearest_neighbor(nodes):\n # This code is inspired by\n # https://github.com/wouterkool/attention-learn-to-route/blob/master/problems/tsp/tsp_baseline.py.\n\n # distances[i][j] is the Euclidean distance from nodes[i] to nodes[j].\n distances = scipy.spatial.distance_matrix(nodes, nodes)\n current_node = 0\n tour = [current_node]\n tour_cost = 0.0\n distance_to_start = distances[current_node].copy()\n\n for _ in range(len(nodes) - 1):\n # current_node is no longer a valid neighbor (of any other node).\n distances[:, current_node] = np.Inf\n\n neighbor = distances[current_node].argmin()\n tour_cost += distances[current_node][neighbor]\n tour.append(neighbor)\n current_node = neighbor\n\n tour_cost += distance_to_start[current_node]\n return tour_cost, tour", "title": "" }, { "docid": "8922dd1745ff861ea3d7d5091f0663e0", "score": "0.56534785", "text": "def compute_distances(self, x1, x2):\n callbacks = StepwiseCallbacks(self.callback, [5, 5, 60, 30])\n\n if self.continuous.any():\n data1, data2 = self.continuous_columns(\n x1, x2, self.medians, 2 * self.mads)\n distances = _distance.manhattan_rows_cont(\n data1, data2, x2 is not None, callbacks.next())\n if self.normalize:\n _distance.fix_manhattan_rows_normalized(\n distances, data1, data2, x2 is not None, callbacks.next())\n else:\n _distance.fix_manhattan_rows(\n distances, data1, data2,\n self.medians, self.mads, self.dist_missing2_cont,\n x2 is not None, callbacks.next())\n else:\n distances = np.zeros((x1.shape[0],\n (x2 if x2 is not None else x1).shape[0]))\n\n if np.any(self.discrete):\n data1, data2 = self.discrete_columns(x1, x2)\n # For discrete attributes, Euclidean is same as Manhattan\n _distance.euclidean_rows_discrete(\n distances, data1, data2, self.dist_missing_disc,\n self.dist_missing2_disc, x2 is not None, callbacks.next())\n\n if x2 is None:\n _distance.lower_to_symmetric(distances, callbacks.next())\n return distances", "title": "" }, { "docid": "95beb43f4434de788e47aa68f0ed4cfe", "score": "0.5651158", "text": "def distance(position):\n return sum(abs(i) for i in position)", "title": "" }, { "docid": "d0673224d69ffa2acfb6596188577645", "score": "0.5640221", "text": "def determine_shortest_path_distances(tree):\n distances = defaultdict(int)\n queue = set([0])\n while queue:\n u = queue.pop()\n for v, w in tree.successors(u, with_weight=True):\n distances[v] = distances[u] + w\n queue.add(v)\n return distances", "title": "" }, { "docid": "c1c560be93810e64ecf7dbc96bfe9fe1", "score": "0.56340516", "text": "def naive_closest_pair(plane):\n d=dist(plane[0],plane[1])\n p1=plane[0]\n p2=plane[1]\n for i in range(len(plane)):\n for j in range(i+1,len(plane)):\n d1=dist(plane[i],plane[j])\n if(d>d1 and d1!=0):\n d=d1\n p1=plane[i]\n p2=plane[j]\n return [d,p1,p2]", "title": "" }, { "docid": "5e013cf8dd165ecae8459a47ad7585b2", "score": "0.5633987", "text": "def closest_euclidean(X, Pts):\n if Pts.size <= 0:\n return np.inf\n else:\n return np.min(np.linalg.norm(Pts - X, axis=1))", "title": "" }, { "docid": "9056556ef96aaf0906c9a86dd7b92e80", "score": "0.56339043", "text": "def distance(t1, t2):\n e2 = 0\n for a, b in zip(t1, t2):\n e2 += (a - b) ** 2\n return math.sqrt(e2)", "title": "" } ]
4638e155b95bdf2494f442218efe45ac
Gets the parameters for the grid search testing with bagging.
[ { "docid": "ccf7f78b93c8282a23d02b26fe7fdd86", "score": "0.78553545", "text": "def get_bagging_grid_parameters():\n return dict(\n n_estimators=[5, 25, 100],\n bootstrap_features=[False, True],\n bootstrap=[False, True])", "title": "" } ]
[ { "docid": "b89661190b87e1ffca562123b1d18608", "score": "0.71491104", "text": "def get_boosting_grid_parameters():\n return dict(\n n_estimators=[5, 25, 100],\n learning_rate=[0.5, 1, 2])", "title": "" }, { "docid": "3f8730fa6fbd992972292a7c161543f3", "score": "0.67523724", "text": "def get_pipe_params_for_gridsearch(self, algo, grid):\n# keys = []\n clf_grid = {}\n if len(grid) == 0:\n clf_grid = {}\n else:\n for key, value in grid.items():\n clf_grid[algo+'__'+key] = value\n\n return clf_grid", "title": "" }, { "docid": "75bc14ce652071dc2062c674d01cff68", "score": "0.6298128", "text": "def get_GS_params_lightgbm():\n params_grid = {\n 'estimator__boosting_type': ['gbdt'],\n 'estimator__objective': ['binary'],\n 'estimator__num_boost_round': [200], \n 'estimator__learning_rate': [0.01, 0.1],\n 'estimator__max_depth': [6, 100],\n 'estimator__reg_alpha': [0, 0.1],\n 'estimator__min_data_in_leaf': [5, 10],\n 'estimator__learning_rate': [0.01],\n 'estimator__scale_pos_weight': [0.2, 1, 3, 10],\n 'estimator__verbose': [-1]\n }\n\n estimator = lgb.LGBMClassifier()\n return estimator, params_grid", "title": "" }, { "docid": "e312440bdd139b5d31abe4ab3a54680d", "score": "0.62707114", "text": "def get_stacking_grid_parameters():\n return dict(\n stack_method=['auto', 'predict_proba', 'decision_function', 'predict'],\n passthrough=[False, True])", "title": "" }, { "docid": "e78418dceca0ac2c62792b97aae480c3", "score": "0.6269222", "text": "def get_params():\n params = {\n 'learning_rate': 0.01,\n 'n_estimators': 1000,\n 'verbose': 1,\n 'max_depth': 6,\n 'min_child_weight': 4,\n 'gamma': 0.6,\n 'subsample': 0.8,\n 'colsample_bytree': 0.8,\n 'reg_alpha': 5e-05,\n 'max_depth': 10,\n 'objective': 'binary:logistic',\n 'nthread': 20,\n # 'scale_pos_weight': w,\n 'seed': 42}\n return params", "title": "" }, { "docid": "003071b357f34d512a9458b24d8540c1", "score": "0.6244513", "text": "def grid_search(self, base_estimator, grid : dict) -> (dict, dict):\n estimator = copy.deepcopy(base_estimator)\n best_grids = {}\n best_scores = {}\n for label in self.label_list:\n train_df = self.processed_train_df[self.processed_train_df[self.filter_col_name]].dropna()\n train_features = train_df[self.feature_list]\n train_target = train_df[label]\n test_df = self.processed_test_df[self.processed_test_df[self.filter_col_name]].dropna()\n test_features = test_df[self.feature_list]\n test_target = test_df[label]\n best_grid = None\n best_score = 0\n for g in ParameterGrid(grid):\n estimator.set_params(**g)\n estimator.fit(train_features, train_target)\n test_prediction = estimator.predict(test_features)\n score = precision_score(test_target, test_prediction)\n if score > best_score:\n best_score = score\n best_grid = g\n best_scores[label] = best_score\n best_grids[label] = best_grid\n return best_scores, best_grids", "title": "" }, { "docid": "28d40f51fadefc0c459b8aa5a36c1815", "score": "0.61652166", "text": "def getSearchParams(self):\n\n if self.params is None:\n raise ValueError('Define the space search params.')\n\n elif type(self.params) != dict:\n raise ValueError('Space search params must be a Dict. (e.g. {\"lr\": [0.01,0.001], \"optimizer\": [\"adam\",\"sgd\"], \"epochs\": [100, 1000] }')\n\n return list(dict(zip(self.params, x)) for x in itertools.product(*self.params.values()))", "title": "" }, { "docid": "d9985543c3ba8eab8a1ddf87f4e18060", "score": "0.616108", "text": "def get_params(self):\n pass", "title": "" }, { "docid": "d9985543c3ba8eab8a1ddf87f4e18060", "score": "0.616108", "text": "def get_params(self):\n pass", "title": "" }, { "docid": "f6b90e933ca5b518125ffe37f068303b", "score": "0.6159354", "text": "def get_params(self):\n\n pass", "title": "" }, { "docid": "5636f94f2ea25b2953a243c3c748d2c7", "score": "0.60611826", "text": "def get_hyperparameter_grid(self):\n self.config.model_components_param = self.apply_prophet_model_components_defaults(\n model_components=self.config.model_components_param,\n time_properties=self.time_properties)\n # Returns a set of parameters for grid search\n hyperparameter_grid = {\n \"estimator__growth\": self.config.model_components_param.growth[\"growth_term\"],\n \"estimator__seasonality_mode\": self.config.model_components_param.seasonality[\"seasonality_mode\"],\n \"estimator__seasonality_prior_scale\": self.config.model_components_param.seasonality[\"seasonality_prior_scale\"],\n \"estimator__yearly_seasonality\": self.config.model_components_param.seasonality[\"yearly_seasonality\"],\n \"estimator__weekly_seasonality\": self.config.model_components_param.seasonality[\"weekly_seasonality\"],\n \"estimator__daily_seasonality\": self.config.model_components_param.seasonality[\"daily_seasonality\"],\n \"estimator__add_seasonality_dict\": self.config.model_components_param.seasonality[\"add_seasonality_dict\"],\n \"estimator__holidays\": [self.config.model_components_param.events[\"holidays_df\"]],\n \"estimator__holidays_prior_scale\": self.config.model_components_param.events[\"holidays_prior_scale\"],\n \"estimator__changepoint_prior_scale\": self.config.model_components_param.changepoints[\"changepoint_prior_scale\"],\n \"estimator__changepoints\": self.config.model_components_param.changepoints[\"changepoints\"],\n \"estimator__n_changepoints\": self.config.model_components_param.changepoints[\"n_changepoints\"],\n \"estimator__changepoint_range\": self.config.model_components_param.changepoints[\"changepoint_range\"],\n \"estimator__mcmc_samples\": self.config.model_components_param.uncertainty[\"mcmc_samples\"],\n \"estimator__uncertainty_samples\": self.config.model_components_param.uncertainty[\"uncertainty_samples\"],\n \"estimator__add_regressor_dict\": self.config.model_components_param.regressors[\"add_regressor_dict\"]\n }\n\n # Overwrites values by `model_components.hyperparameter_override`\n # This may produce a list of dictionaries for grid search.\n hyperparameter_grid = update_dictionaries(\n hyperparameter_grid,\n overwrite_dicts=self.config.model_components_param.hyperparameter_override)\n\n # Ensures all items have the proper type for\n # `sklearn.model_selection.RandomizedSearchCV`.\n # List-type hyperparameters are specified below\n # with their accepted non-list type values.\n hyperparameter_grid = dictionaries_values_to_lists(\n hyperparameter_grid,\n hyperparameters_list_type={\"estimator__changepoints\": [None]}\n )\n return hyperparameter_grid", "title": "" }, { "docid": "2bdd37878f406a749291f8ea48aabfc0", "score": "0.6058149", "text": "def get_global_parameters(self):", "title": "" }, { "docid": "3b984ed9e2f69c058645448bb2295ca2", "score": "0.6025352", "text": "def getParameters(self):\t \n\t\treturn []", "title": "" }, { "docid": "4b9c9aa6260f526d82a6781625e3f056", "score": "0.59862345", "text": "def get_params_for_gridsearch(self, level, params_):\n keys = []\n for key, value in params_.items():\n keys.append(key)\n if type(value) is int:\n CV = value\n else:\n p_grid = value\n CV = 10\n if level == 'light':\n p_grid = {}\n CV = 2\n elif level == 'medium':\n p_grid = {}\n else:\n pass\n \n return p_grid, CV", "title": "" }, { "docid": "428620765b90e95b6707768956829552", "score": "0.597835", "text": "def parameters(self):\n\n\n return self.varstest", "title": "" }, { "docid": "eab605082c41c4666de78b13e438aefa", "score": "0.59545165", "text": "def get_params(self):\n parameter_dict = {\n 'gluon_model': self.gluon_model,\n 'epochs': self.epochs,\n 'learning_rate': self.learning_rate,\n 'context_length': self.context_length,\n 'regression_type': self.regression_type,\n }\n return parameter_dict", "title": "" }, { "docid": "24a41b465af115ecdbba04a7603b9fd3", "score": "0.5947067", "text": "def get_test_params(cls, parameter_set=\"default\"):\n params1 = {\"k_factors\": 1, \"factor_order\": 1}\n params2 = {\"maxiter\": 25, \"low_memory\": True}\n\n return [params1, params2]", "title": "" }, { "docid": "ec648ad5eb3e14680395bb0fb22da261", "score": "0.5943957", "text": "def params(self):\n return self.config['params']", "title": "" }, { "docid": "592fd0fe5bd29fa722262c9a1ad44a6f", "score": "0.59437007", "text": "def get_param_grid(**parameters):\n\n classifier = parameters.get('classify') # get the classifier\n # get the other params\n pipeline_grids = PARAM_COMBINATIONS.get(classifier, None)\n if pipeline_grids is None:\n raise ValueError('classifier must be \"logistic\", \"svm\", \"tree\", \"random\" or \"deep\"')\n\n # create the estimator and parameter combinations\n estimator = create_pipeline(**parameters)\n\n # pop the other params\n for param in ['classify', 'use_numeric', 'use_classes', 'use_ids',\n 'use_tags', 'height', 'depth']:\n parameters.pop(param, None)\n\n # return the estimator and the parameter grid\n param_grid = list(dict_combinations(*pipeline_grids, [parameters]))[0]\n return estimator, param_grid", "title": "" }, { "docid": "3267d30afb78d6b9e1441bae4cbee3ff", "score": "0.5929704", "text": "def get_params():\n pretrained = \"glove.840B.300d\" # fixed\n embed_dim = 300 # fixed\n batch_sz = 64 # powers of 2\n lr = np.random.choice([1, 3]) * np.random.choice([1e-3]) # categorical X log of 10\n x_dim = embed_dim + np.random.randint(-3, -1) * 50 # higher than embedding\n hidden = x_dim + np.random.randint(0, 3) * 50 # higher than embedding and x_dim\n zy_dim = np.random.randint(1, 2) * 20 # no constraint but not too big\n zg_dim = np.random.randint(1, 2) * 50 # no constraint\n llm = np.random.randint(70, 100) # 20 to 50\n glm = np.random.randint(70, 100) # 20 to 50\n llm = glm = 1\n return pretrained, embed_dim, batch_sz, lr, x_dim, hidden, zy_dim, zg_dim, llm, glm", "title": "" }, { "docid": "c4545caaa74dc0feb3cb7ad28133ffe6", "score": "0.5927663", "text": "def get_parameters():\n\n params = {\n 'bronze_bucket': ssm_client.get_parameter(Name='/emr_demo/bronze_bucket')['Parameter']['Value'],\n 'silver_bucket': ssm_client.get_parameter(Name='/emr_demo/silver_bucket')['Parameter']['Value']\n }\n\n return params", "title": "" }, { "docid": "2a7fad2f810f09a4a7cc876db33ef285", "score": "0.5919669", "text": "def get_test_params(cls, parameter_set=\"default\"):\n params = [{}, {\"p\": 1, \"q\": 1}]\n return params", "title": "" }, { "docid": "3ba4b24b2192c8cfb0f815ed8a26d32a", "score": "0.58964926", "text": "def get_params(self):\n return []", "title": "" }, { "docid": "3ba4b24b2192c8cfb0f815ed8a26d32a", "score": "0.58964926", "text": "def get_params(self):\n return []", "title": "" }, { "docid": "98cd45d61596485f2da975e3dac64169", "score": "0.58953035", "text": "def get_params(self):\n\n raise Exception(\"not implemented yet\")", "title": "" }, { "docid": "88cd4db8578cc937ae04697140299bf9", "score": "0.5871237", "text": "def get_params(self):\n return self.top_rbm.get_params()", "title": "" }, { "docid": "254b4cd9e4f23e275b5f4f2d8a6b46f8", "score": "0.58636165", "text": "def get_params(self):\n\t\treturn {'nsgp_params':self.nsgp_params, 'l_gp_params':self.l_gp_params, 'n_gp_params':self.n_gp_params}", "title": "" }, { "docid": "73464deb87e88bdec05ef72afc5f7f3b", "score": "0.5845009", "text": "def get_params(self):\n raise NotImplementedError", "title": "" }, { "docid": "73464deb87e88bdec05ef72afc5f7f3b", "score": "0.5845009", "text": "def get_params(self):\n raise NotImplementedError", "title": "" }, { "docid": "73464deb87e88bdec05ef72afc5f7f3b", "score": "0.5845009", "text": "def get_params(self):\n raise NotImplementedError", "title": "" }, { "docid": "73464deb87e88bdec05ef72afc5f7f3b", "score": "0.5845009", "text": "def get_params(self):\n raise NotImplementedError", "title": "" }, { "docid": "6fbc48ca667e638b85f19b2c536ba70a", "score": "0.58422834", "text": "def params(self) -> dict:", "title": "" }, { "docid": "7642f3174fc620e8337d38830e42a7fe", "score": "0.5841496", "text": "def get_best_params(self):\n return self._cma.result.xbest", "title": "" }, { "docid": "22855b6aa591234d569f16bd8f602f17", "score": "0.5833012", "text": "def hyperparameters(self):\n return []", "title": "" }, { "docid": "22855b6aa591234d569f16bd8f602f17", "score": "0.5833012", "text": "def hyperparameters(self):\n return []", "title": "" }, { "docid": "684d876084adc0132c18e8a9ba9706e0", "score": "0.58005667", "text": "def get_params(self):\n return [self.kernel, self.b]", "title": "" }, { "docid": "11326756ae5aa785e676039844fc7b56", "score": "0.5790779", "text": "def _gridsearch(self, models_param_grid, scoring):\n chosen_models, all_results = self._perform_gridsearch(models_param_grid, scoring)\n self._gridsearch_results = self._wrap_results_dataframe(self._create_gridsearch_results_dataframe(all_results))\n return chosen_models", "title": "" }, { "docid": "c0dc29a5fcf4842c445b49d6688c70e4", "score": "0.5781614", "text": "def KB_test_param():\n phil_scope = phil.parse(\n \"\"\"\n include scope dials.algorithms.scaling.scaling_options.phil_scope\n include scope dials.algorithms.scaling.model.model.model_phil_scope\n include scope dials.algorithms.scaling.scaling_refiner.scaling_refinery_phil_str\n \"\"\",\n process_includes=True,\n )\n\n parser = ArgumentParser(phil=phil_scope, check_format=False)\n parameters, _ = parser.parse_args(args=[], quick_parse=True, show_diff_phil=False)\n parameters.model = \"KB\"\n parameters.reflection_selection.method = \"use_all\"\n return parameters", "title": "" }, { "docid": "9a6b5a1e1a2aea240d16f6d0975e4128", "score": "0.5776802", "text": "def hyper_parameters(self) -> Dict[str, Any]:\n return {}", "title": "" }, { "docid": "c3dd797209eee426ddf5faf828a16309", "score": "0.5767918", "text": "def get_params(self, *args, **kwargs):\n return self.clf.regressor.get_params(*args, **kwargs)", "title": "" }, { "docid": "46bb8606da0b2681fe6a1e498c34f353", "score": "0.5766607", "text": "def getparams(self):\n return self.params", "title": "" }, { "docid": "ce03871db2df2ebbdccabdaac59d8bc4", "score": "0.5760161", "text": "def _get_params(self):\r\n return param.get_params(self._evaluator, self.base, self.var_args)", "title": "" }, { "docid": "7021b8acd88e74187deb1f9d56a78e4d", "score": "0.5731372", "text": "def get_parameters():\n if rospy.has_param('~k_p'):\n global k_p\n k_p = rospy.get_param('~k_p')\n if rospy.has_param('~k_i'):\n global k_i\n k_i= rospy.get_param('~k_i')\n if rospy.has_param('~k_d'):\n global k_d\n k_d = rospy.get_param('~k_d')", "title": "" }, { "docid": "0addec9c5750b1b2c2d54052decbfc10", "score": "0.5721322", "text": "def _get_parameters(self, trial: Trial) -> CustomDict:\r\n params = super()._get_parameters(trial)\r\n\r\n if self._get_param(\"bootstrap_type\", params) == \"Bernoulli\":\r\n params.pop(\"bagging_temperature\", None)\r\n elif self._get_param(\"bootstrap_type\", params) == \"Bayesian\":\r\n params.pop(\"subsample\", None)\r\n\r\n return params", "title": "" }, { "docid": "6238a3a61347881f16bdff200905d177", "score": "0.5716831", "text": "def get_parameters(self):\n # Get global parameters\n params = {'dim': self.dim, 'edge_embed_method': self.edge_embed_method}\n\n # Get data related parameters\n params.update(self.traintest_split.get_parameters())\n\n return params", "title": "" }, { "docid": "8ab6d72bc3e6779963f890128934746a", "score": "0.5715652", "text": "def get_params(self):\n raise NotImplementedError()", "title": "" }, { "docid": "8ab6d72bc3e6779963f890128934746a", "score": "0.5715652", "text": "def get_params(self):\n raise NotImplementedError()", "title": "" }, { "docid": "ac9f8c2f86019c895e381f86820ef9b1", "score": "0.569982", "text": "def get_test_params(cls, parameter_set=\"default\"):\n params = [{}, {\"p\": 1}]\n return params", "title": "" }, { "docid": "6f7521944fe4dd1e91aeac9095f3ca99", "score": "0.56940436", "text": "def get_params(self, deep=False):\n return {'n_bins': self.n_bins}", "title": "" }, { "docid": "34bfda06864cb67b0a109b8db57a7611", "score": "0.5681332", "text": "def getParameters(self):\n return []", "title": "" }, { "docid": "91bbf1ee625301dff4a673b3880420b4", "score": "0.56744695", "text": "def getParameters(self):\n return (self.tag1Combo.currentText(),\n self.tag2Combo.currentText(),\n self.targetCombo.currentText(),\n self.forceCheckBox.isChecked())", "title": "" }, { "docid": "efb1395e70522adb18a651e5deb0931a", "score": "0.5671469", "text": "def gbpPy_params():\n # Set/fetch all the project details we need\n project = prj.project()\n\n # Print project information\n project.print()", "title": "" }, { "docid": "3efade939530d25c5284b2a005015d9e", "score": "0.5664936", "text": "def parameters(self):\n return []", "title": "" }, { "docid": "3efade939530d25c5284b2a005015d9e", "score": "0.5664936", "text": "def parameters(self):\n return []", "title": "" }, { "docid": "fbf98f9c2149eda8283f3334de1666d8", "score": "0.56516093", "text": "def prepare_randomsearch_estimators(self) -> Iterator[Estimator]:\n grid = ParameterSampler(\n self.param_grid, n_iter=self.n_iter, random_state=config.RANDOM_STATE\n )\n\n yield from (\n clone(self.estimator).set_params(\n **{\n k: np.array(v).item() # To ensure compatibility with skopt Spaces\n for k, v in p.items()\n }\n )\n for p in grid\n )", "title": "" }, { "docid": "59d199b4c6f949061f2c24f7e356dd99", "score": "0.5643293", "text": "def _prepare_hyperparam_grid(self, cfg_method: dict):\n space = cfg_method['base_param']\n space.update(cfg_method['model_param'])\n param_grid = list(ParameterGrid(space))\n return param_grid", "title": "" }, { "docid": "dbdb6dc282d686e3faee7bf25e2a4cdc", "score": "0.56396145", "text": "def get_env_params():\n global na3x_cfg\n global IS_TEST\n return na3x_cfg[NA3X_ENV][CFG_ENV_TEST if IS_TEST else CFG_ENV_PROD]", "title": "" }, { "docid": "6adb4cef63aad4cb129072d88a64a279", "score": "0.5630755", "text": "def get_params(self):\n return {\n 'window_size': self.window_size,\n 'input_dim': self.input_dim,\n 'output_dim': self.output_dim,\n 'normalize_window': self.normalize_window,\n 'max_windows': self.max_windows,\n 'regression_type': self.regression_type,\n 'regression_model': self.regression_model,\n }", "title": "" }, { "docid": "dcb615420e4358f342e0eb20ace9034d", "score": "0.56297857", "text": "def __create_default_sgd_grid_parameters():\n grid_parameters = {\n 'vect__min_df': (0.01, 0.025, 0.05, 0.075, 0.1),\n 'vect__max_df': (0.25, 0.5, 0.75, 0.95, 1.0),\n # 'vect__max_features': (None, 5000, 10000, 50000),\n # 'vect__ngram_range': ((1, 1), (1, 2)), # unigrams or bigrams\n 'tfidf__use_idf': (True, False),\n 'tfidf__norm': ('l1', 'l2'),\n 'clf__alpha': (0.00001, 0.000001),\n 'clf__penalty': ('l1', 'l2', 'elasticnet'),\n 'clf__n_iter': (10, 50, 75, 100),\n # 'clf__loss': ('hinge', 'log', 'modified_huber', 'squared_hinge', 'perceptron'),\n }\n return grid_parameters", "title": "" }, { "docid": "f241757765c68e44ae511274c84a4894", "score": "0.5628675", "text": "def _test_params(self, idx, **kwargs):\n\n self.semaphore.acquire()\n self.logger.debug(\"Grid search parameters %s\" % dict(**kwargs))\n clst = self._clustering(**kwargs)\n clst.fit(self._reference_input[self._algorithm_name])\n self.logger.debug(\"Model fit done %s\" % dict(**kwargs))\n self._gridsearch_results.register(\n new_results=pd.DataFrame([[idx] + self.assess_clustering(labels=clst.labels_)],\n columns=[\"params_idx\", \"n_clst\", \"clst_size\", \"clst_qscore\", \"silhouette_score\",\n \"singletons\"]))\n self.logger.debug(\"Done with grid search %s\" % dict(**kwargs))\n self.semaphore.release()", "title": "" }, { "docid": "0599b749a5a074ddf5062cdd5c787c73", "score": "0.5628131", "text": "def params(data):\n \n names = data['Company'].values\n \n # parameters\n companies = data['Company'].index\n betas = data['Beta'].values\n returns = data['Expected return'].values\n sustainabilities = data['ESG score'].values\n cleans = data['Clean200']+data['ScienceBasedTargets']\n dys = data['Dividend yield'].values\n pes = data['P/E'].values\n params = [companies,betas,returns,sustainabilities,dys,cleans,pes]\n return names, params", "title": "" }, { "docid": "1d27a3325664e3d7a9128e2556d6362a", "score": "0.5625303", "text": "def summarize_gridsearch(cv):\n results = pd.DataFrame(cv.cv_results_)\n # Now look at the 10 best results to get a sense of the parameters\n bestind = (-results['mean_test_score']).argsort().values\n best = results.iloc[bestind[0:10]]\n # Let's get a list of all keys used in the param grid search\n param_grid_keys = set(cv.param_grid[0].keys())\n for param in cv.param_grid[1:]:\n param_grid_keys.union(list(param.keys()))\n param_grid_keys = ['param_' + key for key in list(param_grid_keys)]\n summary_keys = ['mean_test_score'] + param_grid_keys\n print('Best parameters summary for broad hyperparameter search')\n print(best[summary_keys])", "title": "" }, { "docid": "e48ace54689aded0935beeae57bb1869", "score": "0.561184", "text": "def getHyperParams(self):\n kernel_params = self.gp.kernel_.get_params()\n rbf_var = kernel_params['k1__k1__constant_value']\n rbf_len = kernel_params['k1__k2__length_scale']\n noise = kernel_params['k2__noise_level']\n hyperparams = GaussianProcessHyperparams(rbf_len, rbf_var, noise)\n return hyperparams", "title": "" }, { "docid": "559fc2d290800e613ed7a0ee7f68dca4", "score": "0.5606776", "text": "def grid_search(verbose=0):\n\n\n return", "title": "" }, { "docid": "681a7d60eff3c342f0fce8fda524c424", "score": "0.56031865", "text": "def find_best_params(self):\n parameters = {\n 'n_estimators': [40, 80, 160, 320],\n 'max_depth': range(7, 13), 'criterion': ['entropy', 'gini']}\n rf = RandomForestClassifier(self.base_args)\n clf = grid_search.GridSearchCV(rf, parameters)\n train_data = get_data('../data/train.csv')\n if 'SexuponOutcome' in train_data.columns:\n train_data = train_data[train_data.SexuponOutcome.notnull()]\n train_data = train_data[train_data.AgeuponOutcome.notnull()]\n train_data = select_features(train_data, self.animal_type)\n X = train_data.drop(['OutcomeType'], axis=1)\n y = train_data['OutcomeType']\n clf.fit(X, y)\n print clf.best_params_", "title": "" }, { "docid": "4e95304c741c2280a945ce4117f89fd7", "score": "0.56007904", "text": "def define_search_grid(param_grid, output_file=None):\n m = ParameterSearch(output_file=output_file)\n for param_setting in ParameterGrid(param_grid):\n m.add_parameter_setting(param_setting)\n return m", "title": "" }, { "docid": "f8be5ea5eda6e4722edca156743d785e", "score": "0.56003183", "text": "def get_params(self) -> Dict[str, object]:\n return {\n \"window_size\": self.window_size,\n \"window_size_unit\": self.window_size_unit,\n \"group_by\": self.group_by,\n \"country\": self.country,\n \"continent\": self.continent,\n }", "title": "" }, { "docid": "42862f56b9e2c8ee33ac6418c53c734c", "score": "0.55790865", "text": "def _getbestfitparams(self): \n \n fname = \"{:s}Results.txt\".format(self._pathoutputs)\n file_res = open(fname, 'r')\n res = \"\"\n for line in file_res:\n res = res + line\n file_res.close()\n\n res = res.split(\"Best-fitting parameters\")[1]\n res = res.split(\"Site\")[0]\n res = res.replace(\"\\n\", \"\").split(\" \")\n res = [a for a in res if a != \"\"]\n res = [a for a in res if a != \"=\"]\n res = np.reshape(res, (len(res)/2, 2))\n\n bf = dict()\n [bf.update({res.T[0][i]: float(res.T[1][i])}) for i in range(res.shape[0])]\n self._bf = bf", "title": "" }, { "docid": "96b0590a7650646847b260debbfee300", "score": "0.55737764", "text": "def _get_parameters( self ) :\n\n # Set observation if not done before\n if self.obs().is_empty() :\n self._require_inobs( 'csdmatter::get_parameters' )\n self.obs( self._get_observations() )\n\n # Set Obs statistic\n self._set_obs_statistic( gammalib.toupper( self[ 'statistic' ].string() ) )\n\n # Set Models\n #if self.obs().models().is_empty() :\n # self.obs().models( self[ 'inmodel' ].filename() )\n\n # Query source name\n self[ 'srcname' ].string()\n\n # Collect number of unbinned, binned and OnOff obs\n # in observation container\n n_unbinned = 0\n n_binned = 0\n n_onoff = 0\n\n for obs in self.obs() :\n if obs.classname() == 'GCTAObservation' :\n if obs.eventtype() == 'CountsCube' :\n n_binned += 1\n else :\n n_unbinned += 1\n elif obs.classname() == 'GCTAOnOffObservation' :\n n_onoff += 1\n n_cta = n_unbinned + n_binned + n_onoff\n n_other = self.obs().size() - n_cta\n\n # Query other parameters\n self[ 'edisp' ].boolean()\n self[ 'calc_ulim' ].boolean()\n self[ 'calc_ts' ].boolean()\n self[ 'fix_bkg' ].boolean()\n self[ 'fix_srcs' ].boolean()\n\n # Query all dark-matter related parameters\n self[ 'mmin' ].real()\n self[ 'mmax' ].real()\n self[ 'mnumpoints' ].integer()\n self[ 'process' ].string()\n self[ 'channel' ].string()\n self[ 'ewcorrections' ].boolean()\n self[ 'logsigmav' ].real()\n self[ 'logastfactor' ].real()\n self[ 'redshift' ].real()\n self[ 'eblmodel' ].string()\n self[ 'emin' ].real()\n self[ 'emax' ].real()\n self[ 'modtype' ].string()\n\n # Query parameters according to the Source Model type\n if self[ 'modtype' ].string() == 'PointSource' :\n\n self[ 'ra' ].real()\n self[ 'dec' ].real()\n\n if self[ 'modtype' ].string() == 'DiffuseSource' :\n\n self[ 'map_fits' ].filename()\n\n\n # Set mass points\n self._mlogspace()\n\n # self[ 'dmass' ].real()\n # self[ 'sigmav' ].real()\n\n # Read ahead output parameters\n if self._read_ahead() :\n self[ 'outfile' ].filename()\n\n # Write into logger\n self._log_parameters( gammalib.TERSE )\n\n # Set number of processes for multiprocessing\n self._nthreads = mputils.nthreads( self )\n\n self._log_header1( gammalib.TERSE , 'DM analysis' )\n self._log_value( gammalib.TERSE , 'Unbinned observations' , n_unbinned )\n self._log_value( gammalib.TERSE , 'Binned observations' , n_binned )\n self._log_value( gammalib.TERSE , 'OnOff Observations' , n_onoff )\n self._log_value( gammalib.TERSE , 'NonCTA Observations' , n_other )\n\n if n_other == 0 :\n\n if n_unbinned == 0 and n_binned != 0 and n_onoff == 0 :\n self._binned_mode = True\n\n elif n_unbinned == 0 and n_binned == 0 and n_onoff != 0 :\n self._onoff_mode = True\n\n elif n_unbinned == 0 and n_binned != 0 and n_onoff != 0 :\n msg = 'Mixing of binned and OnOff Observations'\n raise RuntimeError( msg )\n\n elif n_unbinned != 0 and ( n_binned != 0 or n_onoff != 0 ) :\n msg = 'Mixing of different CTA Observations'\n raise RuntimeError( msg )\n\n else :\n\n msg = 'csdmatter only supports CTA-observations'\n raise RuntimeError( msg )\n\n return", "title": "" }, { "docid": "8a49ed1dff3f9c58c5d20339babe6cce", "score": "0.5554195", "text": "def get_param_grid(modelname):\n param_combs = dict(\n max_vocabsize=[100_000, 200_000, 300_000],\n max_seqlen=[20, 40],\n # max_tgtlen=[1, 4],\n max_tgtlen=[1], # for SLO, we just need 1 token\n profile=[False, True],\n max_prflen=[20, 40],\n dropout=[0.1, 0.3, 0.6, 0.9], # an advice from Shaukat\n lr=[0.1, 0.01, 0.001, 0.0001],\n validation_split=[0.2],\n epochs=[200], # note that early_stopping is applied\n batch_size=[32, 64, 128, 256],\n patience=[30] # early stopping\n )\n if modelname in ['crossnet', 'cn', 'CrossNet', 'crossNet']:\n param_combs['dim_lstm'] = [100, 200, 300]\n param_combs['num_reason'] = [1, 2, 3]\n param_combs['dim_dense'] = [100, 200, 300]\n elif modelname in ['memnet', 'MemNet', 'mn', 'memNet', 'AttNet', 'attnet']:\n param_combs['dim_lstm'] = [100, 200, 300]\n param_combs['num_layers'] = [1, 2, 3, 4]\n elif modelname in ['tf', 'transformer', 'Transformer']:\n param_combs['target'] = [False, True]\n # param_combs['dim_pff'] = [64, 128, 256, 512]\n param_combs['num_head'] = [2, 4, 8]\n param_combs['num_layers'] = [1, 2, 3, 4]\n else:\n raise NotImplementedError\n\n return list(ParameterGrid(param_combs))", "title": "" }, { "docid": "daed6293ea3954102a65746df141dace", "score": "0.555288", "text": "def get_params(self):\n return self.params", "title": "" }, { "docid": "daed6293ea3954102a65746df141dace", "score": "0.555288", "text": "def get_params(self):\n return self.params", "title": "" }, { "docid": "8a4945696fd878ed82d69644d0a20153", "score": "0.55482626", "text": "def test_get_parameters(self):\n self.assert_initialize_driver()\n startup_params = self.test_config.driver_startup_config[DriverConfigKey.PARAMETERS]\n for key, value in startup_params.items():\n self.assert_get(key, value)", "title": "" }, { "docid": "268acf2bb9ad34e27f638bfdc8c18ba1", "score": "0.5546985", "text": "def getParameters(self):\n return dict();", "title": "" }, { "docid": "d61b8611ab3fa5f18bee4c2afe5cef4b", "score": "0.55456", "text": "def run_gridsearch(X, y, clf, param_grid, cv=5):\r\n\t\t\r\n\t\tgrid_search = GridSearchCV(clf,param_grid=param_grid,cv=cv)\r\n\t\tstart \t\t= time()\r\n\t\tgrid_search.fit(X, y)\r\n\t\tprint((\"\\nGridSearchCV took {:.2f} \"\"seconds for {:d} candidate \"\"parameter settings.\").format(time() - start,len(grid_search.grid_scores_)))\r\n\t\ttop_params = report(grid_search.grid_scores_, 3)\r\n\t\treturn top_params", "title": "" }, { "docid": "3b73fd6ab2d549a48f27af3c4df14169", "score": "0.5542438", "text": "def test_custom_params(self) -> None:\n model = GSCV(self.valid_clf, self.x, self.y, n_jobs=-1, params=[\n {\n 'kernel': ['rbf'],\n 'C': [100, 1000],\n 'gamma': [0.01, 'auto']\n },\n ]\n )\n self.assertEqual(\n model.params, [\n {\n 'kernel': ['rbf'],\n 'C': [100, 1000],\n 'gamma': [0.01, 'auto']\n },\n ]\n )", "title": "" }, { "docid": "ba32d217155a2dd268bbe06d53b01794", "score": "0.55402964", "text": "def distributed_params(self) -> Dict:\n pass", "title": "" }, { "docid": "8b201d6ab12121e53921abde2fe8bde7", "score": "0.5539902", "text": "def parameters(self):\n return {'symbolicated': 1,\n 'page': self._page,\n 'sort': self._sort,\n 'order': self._order}", "title": "" }, { "docid": "8b201d6ab12121e53921abde2fe8bde7", "score": "0.5539902", "text": "def parameters(self):\n return {'symbolicated': 1,\n 'page': self._page,\n 'sort': self._sort,\n 'order': self._order}", "title": "" }, { "docid": "28372f72595b9805af1afbbeb9dea177", "score": "0.55374664", "text": "def parameters(self):\n return self.trainer_parameters", "title": "" }, { "docid": "96130333391a87558d3d7a4d5329ebb3", "score": "0.5531114", "text": "def get_params(self):\n return {\n 'gru_layer_size': self._gru_layer_size,\n 'num_gru_layers': self._num_gru_layers,\n 'embedding_layer_size': self._embedding_layer_size\n }", "title": "" }, { "docid": "4dad30eb87a0c9efdfc0bd6bd089c323", "score": "0.55304575", "text": "def get_GS_params_RFClassifier():\n params_grid = {'estimator__bootstrap': [True],\n 'estimator__criterion': ['entropy'],\n 'estimator__max_depth': [3,6],\n 'estimator__max_features': [3,10],\n 'estimator__min_samples_leaf': [4],\n 'estimator__min_samples_split': [3]}\n\n estimator = RandomForestClassifier()\n\n return estimator, params_grid", "title": "" }, { "docid": "34b0ba001faa89ff0b31e5b57b0b6e79", "score": "0.5522207", "text": "def params():\n raise NotImplementedError", "title": "" }, { "docid": "34b0ba001faa89ff0b31e5b57b0b6e79", "score": "0.5522207", "text": "def params():\n raise NotImplementedError", "title": "" }, { "docid": "3aef46f875584d7ffeca1496b8a11bc2", "score": "0.55210674", "text": "def grid_search(configs: dict, train_data: typing.Union[typing.Tuple, DataLoader],\n test_data: typing.Union[typing.Tuple, DataLoader] = None, max_iter: int = None,\n cv: typing.Union[int, typing.List] = 1, random_state: int = 42, ):\n configs = prepare_search(configs)\n return base_search(configs, train_data, test_data, 'grid', max_iter, cv, random_state)", "title": "" }, { "docid": "b1fcccbcb3761d3c6101948b88350953", "score": "0.5517643", "text": "def get_params(self) -> dict:\n return {}", "title": "" }, { "docid": "7d9488d48ef0ab76217a4336c09f9d8d", "score": "0.55152476", "text": "def hyperopt_parameters() -> Dict[str, Any]:\n\n return {\n \"consider_prior\": True,\n \"prior_weight\": 1.0,\n \"consider_magic_clip\": True,\n \"consider_endpoints\": False,\n \"n_startup_trials\": 20,\n \"n_ei_candidates\": 24,\n \"gamma\": hyperopt_default_gamma,\n \"weights\": default_weights,\n }", "title": "" }, { "docid": "4821966687e743f47b72e48f962e232f", "score": "0.5514461", "text": "def run_gridsearch(X, y, clf, param_grid,cv=5 ):\n grid_search = GridSearchCV(clf,param_grid=param_grid,cv=cv)\n start = time()\n grid_search.fit(X, y)\n extra_name=self.data_name\n filename = extra_name + str(clf)[:6]+str(self.feature_number)\n cwd = os.getcwd()\n filename_suffix = 'model'\n pathfile = os.path.join(cwd, filename + \".\" + filename_suffix)\n with open(pathfile, \"wb\") as fp:\n pickle.dump(grid_search, fp)\n\n print((\"\\nGridSearchCV took {:.2f} \"\n \"seconds for {:d} candidate \"\n \"parameter settings.\").format(time() - start,\n len(grid_search.grid_scores_)))\n\n top_params = report(grid_search.grid_scores_, 3)\n return top_params", "title": "" }, { "docid": "29b99c1a9263b85ef09b0d04e57ca696", "score": "0.5512495", "text": "def _get_common_params(self):\n return ctx.get_params(self.tc)", "title": "" }, { "docid": "22a1a7b04f4587f094e7a4e750b952dc", "score": "0.5509202", "text": "def getGridParams(self):\n self.setParams({'MM': True, 'ELE': 1})\n gd = self._forceFields['ELE'].grid_data\n dims = gd['counts']\n center = factor * (gd['counts'] * gd['spacing'] / 2. + gd['origin'])\n spacing = factor * gd['spacing'][0]\n return (dims, center, spacing)", "title": "" }, { "docid": "d1080399a852907f0eb9bb18a445facb", "score": "0.5505685", "text": "def get_params(self):\n return self.parameters", "title": "" }, { "docid": "e6fbf793bd79463e91c5a334844f3b49", "score": "0.54962426", "text": "def get_params(self):\n return {\n 'model': self.model,\n 'model_parameters': self.model_parameters,\n 'decomposition': self.decomposition,\n 'n_components': self.n_components,\n }", "title": "" }, { "docid": "947c1afb233198ab61f09df394be99e6", "score": "0.5488683", "text": "def parameters(self):\n return self.get_parameters()", "title": "" }, { "docid": "a2f639bc08067ffc8ebebd72b5a13954", "score": "0.548097", "text": "def _get_parameters(self, trial: Trial) -> CustomDict:\r\n params = super()._get_parameters(trial)\r\n\r\n if not self._get_param(\"bootstrap\", params):\r\n params.pop(\"max_samples\", None)\r\n\r\n return params", "title": "" }, { "docid": "a2f639bc08067ffc8ebebd72b5a13954", "score": "0.548097", "text": "def _get_parameters(self, trial: Trial) -> CustomDict:\r\n params = super()._get_parameters(trial)\r\n\r\n if not self._get_param(\"bootstrap\", params):\r\n params.pop(\"max_samples\", None)\r\n\r\n return params", "title": "" }, { "docid": "a2f639bc08067ffc8ebebd72b5a13954", "score": "0.548097", "text": "def _get_parameters(self, trial: Trial) -> CustomDict:\r\n params = super()._get_parameters(trial)\r\n\r\n if not self._get_param(\"bootstrap\", params):\r\n params.pop(\"max_samples\", None)\r\n\r\n return params", "title": "" }, { "docid": "99267e883d93e40444a2df2459d95c89", "score": "0.54766554", "text": "def parameters(self):\n return {}", "title": "" }, { "docid": "700087cde4e1f4bea8234bdd8138287d", "score": "0.54612595", "text": "def define_clfs_params(grid_size):\n\n large_grid = { \n 'RF': {'n_estimators': [1,10,100,1000,10000], 'max_depth': [1,5,10,20,50,100], 'max_features': ['sqrt','log2'],\n 'min_samples_split': [2,5,10,50,100], 'n_jobs': [-1], 'random_state': [SEED]},\n 'ET': {'n_estimators': [1,10,100,1000,10000], 'criterion' : ['gini', 'entropy'] ,'max_depth': [1,5,10,20,50,100],\n 'max_features': ['sqrt','log2'],'min_samples_split': [2,5,10,50,100], 'n_jobs': [-1], 'random_state': [SEED]},\n 'AB': {'algorithm': ['SAMME', 'SAMME.R'], 'n_estimators': [1,10,100,1000,10000], 'random_state': [SEED]},\n 'GB': {'n_estimators': [1,10,100,1000,10000], 'learning_rate' : [0.001,0.01,0.05,0.1,0.5],'subsample' : [0.1,0.5,1.0],\n 'max_depth': [1,5,10,20,50,100], 'random_state': [SEED]},\n 'KNN': {'n_neighbors': [1,5,10,25,50,100],'weights': ['uniform','distance'],'algorithm': ['auto','ball_tree','kd_tree']},\n 'DT': {'criterion': ['gini', 'entropy'], 'max_depth': [1,5,10,20,50,100], 'min_samples_split': [2,5,10,50,100], 'random_state': [SEED]},\n 'SVM': {'C' :[0.00001,0.0001,0.001,0.01,0.1,1,10], 'random_state': [SEED]},\n 'LR': {'penalty': ['l1','l2'], 'C': [0.00001,0.0001,0.001,0.01,0.1,1,10], 'random_state': [SEED]},\n 'BAG': {'n_estimators': [1,10,100,1000,10000], 'n_jobs': [-1], 'random_state': [SEED]},\n 'NB': {'alpha': [0.00001,0.0001,0.001,0.01,0.1,1,10], 'fit_prior': [True, False]}\n }\n \n small_grid = {\n 'RF': {'n_estimators': [10,100,1000], 'max_depth': [1,5,10,20], 'max_features': ['sqrt','log2'],\n 'min_samples_split': [2,10,50], 'n_jobs': [-1], 'random_state': [SEED]},\n 'ET': {'n_estimators': [10,100,1000], 'criterion' : ['gini', 'entropy'] ,'max_depth': [1,5,10,20],\n 'max_features': ['sqrt','log2'],'min_samples_split': [2,10,50], 'n_jobs': [-1], 'random_state': [SEED]},\n 'AB': {'algorithm': ['SAMME', 'SAMME.R'], 'n_estimators': [10,100,1000], 'random_state': [SEED]},\n 'GB': {'n_estimators': [10,100,1000], 'learning_rate' : [0.001,0.01],'subsample' : [0.1,0.5],\n 'max_depth': [1,5,10,20], 'random_state': [SEED]},\n 'KNN': {'n_neighbors': [1,10,25,50],'weights': ['uniform','distance'],'algorithm': ['auto','ball_tree','kd_tree']},\n 'DT': {'criterion': ['gini', 'entropy'], 'max_depth': [1,5,10,20], 'min_samples_split': [2,10,50], 'random_state': [SEED]},\n 'SVM': {'C' :[0.01,0.1,1,10], 'random_state': [SEED]},\n 'LR': {'penalty': ['l1','l2'], 'C': [0.01,0.1,1,10], 'random_state': [SEED]},\n 'BAG': {'n_estimators': [10,100,1000], 'n_jobs': [-1], 'random_state': [SEED]},\n 'NB': {'alpha': [0.01,0.1,1,10], 'fit_prior': [True, False]}\n }\n \n test_grid = {\n 'RF': {'n_estimators': [100], 'max_depth': [5], 'max_features': ['sqrt'], 'min_samples_split': [10], 'n_jobs': [-1], 'random_state': [SEED]},\n 'ET': {'n_estimators': [100], 'criterion' : ['gini'] ,'max_depth': [5],\n 'max_features': ['sqrt'],'min_samples_split': [10], 'n_jobs': [-1], 'random_state': [SEED]},\n 'AB': {'algorithm': ['SAMME.R'], 'n_estimators': [5], 'random_state': [SEED]},\n 'GB': {'n_estimators': [5], 'learning_rate' : [0.1],'subsample' : [0.5], 'max_depth': [5], 'random_state': [SEED]},\n 'KNN': {'n_neighbors': [1],'weights': ['uniform'],'algorithm': ['auto']},\n 'DT': {'criterion': ['gini'], 'max_depth': [5], 'min_samples_split': [10], 'random_state': [SEED]},\n 'SVM': {'C' :[10], 'random_state': [SEED]},\n 'LR': {'penalty': ['l1'], 'C': [10], 'random_state': [SEED]},\n 'BAG': {'n_estimators': [1], 'n_jobs': [-1], 'random_state': [SEED]},\n 'NB': {'alpha': [1], 'fit_prior': [True, False]} \n }\n\n \n if (grid_size == 'large'):\n return large_grid\n elif (grid_size == 'small'):\n return small_grid\n elif (grid_size == 'test'):\n return test_grid\n else:\n return 0, 0", "title": "" }, { "docid": "872f96edef6243cad6e8c08f3c0b8b5c", "score": "0.54597384", "text": "def params(self):\n return self.model.get_params()", "title": "" }, { "docid": "95701b602bdfa5f76edd97f729f95e8e", "score": "0.5453483", "text": "def get_new_params(method='random'):\n if method != 'full':\n ensemble_choice = random.choices(\n [\n None,\n ['simple'],\n ['simple', 'horizontal-max'],\n [\n 'simple',\n \"distance\",\n \"horizontal\",\n \"horizontal-max\",\n ],\n ],\n [0.3, 0.1, 0.2, 0.2],\n )[0]\n max_generations = random.choices([5, 15, 25, 50], [0.2, 0.5, 0.1, 0.4])[0]\n else:\n max_generations = random.choices([15, 25, 50, 200], [0.2, 0.5, 0.2, 0.1])[0]\n ensemble_choice = random.choices(\n [\n None,\n ['simple'],\n ['simple', 'horizontal-max'],\n [\n 'simple',\n \"distance\",\n \"horizontal\",\n \"horizontal-max\",\n \"mosaic\",\n 'mosaic-window',\n 'mosaic-crosshair',\n \"subsample\",\n \"mlensemble\",\n ],\n ],\n [0.3, 0.1, 0.2, 0.2],\n )[0]\n if method in [\"full\", \"fast\", \"superfast\"]:\n metric_weighting = {\n 'smape_weighting': random.choices([0, 1, 5, 10], [0.3, 0.2, 0.3, 0.1])[\n 0\n ],\n 'mae_weighting': random.choices([0, 1, 3, 5], [0.1, 0.3, 0.3, 0.3])[0],\n 'rmse_weighting': random.choices([0, 1, 3, 5], [0.1, 0.3, 0.3, 0.3])[0],\n 'made_weighting': random.choices([0, 1, 3, 5], [0.7, 0.3, 0.1, 0.05])[\n 0\n ],\n 'mage_weighting': random.choices([0, 1, 3, 5], [0.8, 0.1, 0.1, 0.0])[0],\n 'mle_weighting': random.choices([0, 1, 3, 5], [0.8, 0.1, 0.1, 0.0])[0],\n 'imle_weighting': random.choices([0, 1, 3, 5], [0.8, 0.1, 0.1, 0.0])[0],\n 'spl_weighting': random.choices([0, 1, 3, 5], [0.1, 0.3, 0.3, 0.3])[0],\n 'oda_weighting': random.choices([0, 1, 3, 5], [0.8, 0.1, 0.1, 0.0])[0],\n 'mqae_weighting': random.choices([0, 1, 3, 5], [0.4, 0.2, 0.1, 0.0])[0],\n 'dwae_weighting': random.choices([0, 1, 3, 5], [0.8, 0.1, 0.1, 0.0])[0],\n 'maxe_weighting': random.choices([0, 1, 3, 5], [0.8, 0.1, 0.1, 0.0])[0],\n 'containment_weighting': random.choices(\n [0, 1, 3, 5], [0.9, 0.1, 0.05, 0.0]\n )[0],\n 'contour_weighting': random.choices(\n [0, 1, 3, 5], [0.7, 0.2, 0.05, 0.05]\n )[0],\n 'runtime_weighting': random.choices(\n [0, 0.05, 0.3, 1], [0.1, 0.6, 0.2, 0.1]\n )[0],\n 'uwmse_weighting': random.choices(\n [0, 0.05, 0.3, 1, 5], [0.1, 0.6, 0.2, 0.1, 0.1]\n )[0],\n 'smoothness_weighting': random.choices(\n [0, 0.05, 3, 1, -0.5, -3], [0.4, 0.1, 0.1, 0.1, 0.2, 0.1]\n )[0],\n 'ewmae_weighting': random.choices(\n [0, 0.05, 0.3, 1, 5], [0.1, 0.6, 0.2, 0.1, 0.1]\n )[0],\n }\n validation_method = random.choices(\n ['backwards', 'even', 'similarity', 'seasonal 364', 'seasonal'],\n [0.4, 0.1, 0.3, 0.3, 0.2],\n )[0]\n else:\n metric_weighting = {\n 'smape_weighting': random.choices([0, 1, 5, 10], [0.3, 0.2, 0.3, 0.1])[\n 0\n ],\n 'mae_weighting': random.choices([0, 1, 3, 5], [0.1, 0.3, 0.3, 0.3])[0],\n 'rmse_weighting': random.choices([0, 1, 3, 5], [0.1, 0.3, 0.3, 0.3])[0],\n 'made_weighting': random.choices([0, 1, 3, 5], [0.7, 0.3, 0.1, 0.05])[\n 0\n ],\n 'mage_weighting': random.choices([0, 1, 3, 5], [0.8, 0.1, 0.1, 0.0])[0],\n 'mle_weighting': random.choices([0, 1, 3, 5], [0.8, 0.1, 0.1, 0.0])[0],\n 'imle_weighting': random.choices([0, 1, 3, 5], [0.8, 0.1, 0.1, 0.0])[0],\n 'spl_weighting': random.choices([0, 1, 3, 5], [0.1, 0.3, 0.3, 0.3])[0],\n 'oda_weighting': random.choices([0, 1, 3, 5], [0.8, 0.1, 0.1, 0.0])[0],\n 'mqae_weighting': random.choices([0, 1, 3, 5], [0.4, 0.2, 0.1, 0.0])[0],\n 'maxe_weighting': random.choices([0, 1, 3, 5], [0.8, 0.1, 0.1, 0.0])[0],\n 'containment_weighting': random.choices(\n [0, 1, 3, 5], [0.9, 0.1, 0.05, 0.0]\n )[0],\n 'contour_weighting': random.choices(\n [0, 1, 3, 5], [0.7, 0.2, 0.05, 0.05]\n )[0],\n 'runtime_weighting': random.choices(\n [0, 0.05, 0.3, 1], [0.1, 0.6, 0.2, 0.1]\n )[0],\n }\n validation_method = random.choices(\n ['backwards', 'even', 'similarity', 'seasonal 364'],\n [0.4, 0.1, 0.3, 0.3],\n )[0]\n preclean_choice = random.choices(\n [\n None,\n {\n \"fillna\": \"ffill\",\n \"transformations\": {0: \"EWMAFilter\"},\n \"transformation_params\": {\n 0: {\"span\": 3},\n },\n },\n {\n \"fillna\": \"mean\",\n \"transformations\": {0: \"EWMAFilter\"},\n \"transformation_params\": {\n 0: {\"span\": 7},\n },\n },\n {\n \"fillna\": None,\n \"transformations\": {0: \"StandardScaler\"},\n \"transformation_params\": {0: {}},\n },\n {\n \"fillna\": None,\n \"transformations\": {0: \"QuantileTransformer\"},\n \"transformation_params\": {0: {}},\n },\n {\n \"fillna\": None,\n \"transformations\": {0: \"AnomalyRemoval\"},\n \"transformation_params\": {\n 0: {\n \"method\": \"IQR\",\n \"transform_dict\": {},\n \"method_params\": {\n \"iqr_threshold\": 2.0,\n \"iqr_quantiles\": [0.4, 0.6],\n },\n \"fillna\": 'ffill',\n }\n },\n },\n {\n 'fillna': None,\n 'transformations': {\n '0': 'ClipOutliers',\n '1': 'RegressionFilter',\n '2': 'ClipOutliers',\n },\n 'transformation_params': {\n '0': {\n 'method': 'remove',\n 'std_threshold': 2.5,\n 'fillna': None,\n }, # \"SeasonalityMotifImputerLinMix\"\n '1': {\n \"sigma\": 2,\n \"rolling_window\": 90,\n \"run_order\": \"season_first\",\n \"regression_params\": {\n \"regression_model\": {\n \"model\": \"ElasticNet\",\n \"model_params\": {},\n },\n \"datepart_method\": ['common_fourier'],\n \"polynomial_degree\": None,\n \"transform_dict\": None,\n \"holiday_countries_used\": False,\n },\n \"holiday_params\": None,\n },\n '2': {\n 'method': 'remove',\n 'std_threshold': 3.0,\n 'fillna': \"SeasonalityMotifImputerLinMix\",\n },\n },\n },\n {\n 'fillna': None,\n 'transformations': {\n '0': 'ClipOutliers',\n '1': \"LevelShiftMagic\",\n '2': 'RegressionFilter',\n '3': 'ClipOutliers',\n },\n 'transformation_params': {\n '0': {\n 'method': 'remove',\n 'std_threshold': 2.5,\n 'fillna': None,\n }, # \"SeasonalityMotifImputerLinMix\"\n '1': {\n 'window_size': 90,\n 'alpha': 2.5,\n 'grouping_forward_limit': 3,\n 'max_level_shifts': 5,\n 'alignment': 'average',\n },\n '2': {\n \"sigma\": 2,\n \"rolling_window\": 90,\n \"run_order\": \"season_first\",\n \"regression_params\": {\n \"regression_model\": {\n \"model\": \"ElasticNet\",\n \"model_params\": {},\n },\n \"datepart_method\": ['common_fourier'],\n \"polynomial_degree\": None,\n \"transform_dict\": None,\n \"holiday_countries_used\": False,\n },\n \"holiday_params\": None,\n },\n '3': {\n 'method': 'remove',\n 'std_threshold': 3.0,\n 'fillna': \"SeasonalityMotifImputerLinMix\",\n },\n },\n },\n {\n \"fillna\": None,\n \"transformations\": {\"0\": \"LocalLinearTrend\"},\n \"transformation_params\": {\n \"0\": {\n 'rolling_window': 30,\n 'n_tails': 0.1,\n 'n_future': 0.2,\n 'method': 'mean',\n 'macro_micro': True,\n },\n },\n },\n 'random',\n ],\n [0.9, 0.1, 0.05, 0.1, 0.1, 0.1, 0.1, 0.05, 0.15, 0.1],\n )[0]\n if preclean_choice == \"random\":\n preclean_choice = RandomTransform(\n transformer_list=\"fast\", transformer_max_depth=2\n )\n if method == 'full':\n model_list = random.choices(\n [\n 'fast',\n 'superfast',\n 'default',\n 'fast_parallel_no_arima',\n 'all',\n 'motifs',\n 'no_shared_fast',\n 'multivariate',\n 'univariate',\n 'all_result_path',\n 'regressions',\n 'best',\n 'regressor',\n 'probabilistic',\n 'no_shared',\n ],\n [\n 0.2,\n 0.4,\n 0.1,\n 0.2,\n 0.01,\n 0.1,\n 0.1,\n 0.05,\n 0.05,\n 0.05,\n 0.05,\n 0.05,\n 0.05,\n 0.05,\n 0.05,\n ],\n )[0]\n elif method == 'fast':\n model_list = random.choices(\n [\n 'fast',\n 'superfast',\n 'motifs',\n 'no_shared_fast',\n 'fast_parallel_no_arima',\n ],\n [\n 0.2,\n 0.3,\n 0.2,\n 0.2,\n 0.05,\n ],\n )[0]\n elif method == \"superfast\":\n model_list = 'superfast'\n else:\n model_list = random.choices(\n [\n 'fast',\n 'superfast',\n 'default',\n 'fast_parallel',\n 'motifs',\n 'no_shared_fast',\n ],\n [0.2, 0.3, 0.2, 0.2, 0.05, 0.1],\n )[0]\n\n return {\n 'max_generations': max_generations,\n 'model_list': model_list,\n 'transformer_list': random.choices(\n ['all', 'fast', 'superfast'],\n [0.2, 0.5, 0.3],\n )[0],\n 'transformer_max_depth': random.choices(\n [1, 2, 4, 6, 8, 10],\n [0.1, 0.2, 0.3, 0.3, 0.2, 0.1],\n )[0],\n 'num_validations': random.choices(\n [0, 1, 2, 3, 4, 6], [0.1, 0.2, 0.3, 0.2, 0.1, 0.05]\n )[0],\n 'validation_method': validation_method,\n 'models_to_validate': random.choices(\n [0.15, 0.10, 0.25, 0.35, 0.45], [0.3, 0.1, 0.3, 0.3, 0.1]\n )[0],\n 'ensemble': ensemble_choice,\n 'initial_template': random.choices(\n ['random', 'general+random'], [0.8, 0.2]\n )[0],\n 'subset': random.choices([None, 10, 100], [0.9, 0.05, 0.05])[0],\n 'models_mode': random.choices(['random', 'regressor'], [0.95, 0.05])[0],\n # 'drop_most_recent': random.choices([0, 1, 2], [0.8, 0.1, 0.1])[0],\n 'introduce_na': random.choice([None, True, False]),\n 'prefill_na': None,\n 'remove_leading_zeroes': False,\n 'constraint': random.choices(\n [\n None,\n {\n \"constraint_method\": \"stdev_min\",\n \"constraint_regularization\": 0.7,\n \"upper_constraint\": 1,\n \"lower_constraint\": 1,\n \"bounds\": True,\n },\n {\n \"constraint_method\": \"stdev\",\n \"constraint_regularization\": 1,\n \"upper_constraint\": 2,\n \"lower_constraint\": 2,\n \"bounds\": False,\n },\n {\n \"constraint_method\": \"quantile\",\n \"constraint_regularization\": 0.9,\n \"upper_constraint\": 0.99,\n \"lower_constraint\": 0.01,\n \"bounds\": True,\n },\n {\n \"constraint_method\": \"quantile\",\n \"constraint_regularization\": 0.4,\n \"upper_constraint\": 0.9,\n \"lower_constraint\": 0.1,\n \"bounds\": False,\n },\n ],\n [0.9, 0.1, 0.1, 0.1, 0.1],\n )[0],\n 'preclean': preclean_choice,\n 'metric_weighting': metric_weighting,\n }", "title": "" } ]
a2a3b4e78852fea42966da891506ed81
Parses the ranking CSVs and writes them to the database.
[ { "docid": "f86987d541533bda041b2f677783d392", "score": "0.0", "text": "def fillrankingdb(path=os.path.join(SCRAPED_DATA_DIR, \"rankings\", \"bgg\")):\n django.core.management.call_command(\"fillrankingdb\", path)", "title": "" } ]
[ { "docid": "b0c5db9da7df4319b33a769deacddba5", "score": "0.63729155", "text": "def save_ranks(self):\n ranks_out = os.path.join(self.out_dir, \"ranks.tsv\")\n self.ranks.to_csv(ranks_out, sep=\"\\t\")", "title": "" }, { "docid": "bf5de976bf2b4566427a0d2912a11649", "score": "0.6218092", "text": "def tbs2_ranking_intz():#initialize male data of players\n with open('MALE PLAYERS.csv', newline='') as csvfile:\n reader = csv.DictReader(csvfile)\n for row in reader:\n player_ranking_tbs2 = tbs2_ranking.tbs2_ranking(row['MALE PLAYERS'],0,0) #parameters - player name, ranking, prizeMoney\n tbs2_ranking_points_list_mens.append(player_ranking_tbs2)", "title": "" }, { "docid": "ffd4b0dd694f33d18732c2bbcaf88aa4", "score": "0.5942872", "text": "def process_csv(session):\n import csv\n file = 'event_datafile_new.csv'\n with open(file, encoding = 'utf8') as f:\n csvreader = csv.reader(f)\n next(csvreader) # skip header\n for line in csvreader:\n session.execute(insert_query1, (int(line[8]), int(line[3]),line[0],line[9],float(line[5])))\n session.execute(insert_query2, (int(line[10]),int(line[8]), int(line[3]),line[0],line[9],line[1],line[4]))\n session.execute(insert_query3, (line[9],int(line[10]),line[1],line[4]))", "title": "" }, { "docid": "d4e91bc4b2943ebfa6e513b3a591307f", "score": "0.59027725", "text": "def taw11_ranking_intz():#initialize male data of players\n with open('MALE PLAYERS.csv', newline='') as csvfile:\n reader = csv.DictReader(csvfile)\n for row in reader:\n player_ranking_taw11 = taw11_ranking.taw11_ranking(row['MALE PLAYERS'],0,0) #parameters - player name, ranking, prizeMoney\n taw11_ranking_points_list_mens.append(player_ranking_taw11)", "title": "" }, { "docid": "a41a46672c9583c00fb1ece9a078fcff", "score": "0.58899355", "text": "def tbs2_ranking_intzW():#initialize male data of players\n with open('FEMALE PLAYERS.csv', newline='') as csvfile:\n reader = csv.DictReader(csvfile)\n for row in reader:\n player_ranking_tbs2 = tbs2_ranking.tbs2_ranking(row['FEMALE PLAYERS'],0,0) #parameters - player name, ranking, prizeMoney\n tbs2_ranking_points_list_womens.append(player_ranking_tbs2)", "title": "" }, { "docid": "38dc043a7f497a460f57a188cdb9af23", "score": "0.58713216", "text": "def tae21_ranking_intz():#initialize male data of players\n with open('MALE PLAYERS.csv', newline='') as csvfile:\n reader = csv.DictReader(csvfile)\n for row in reader:\n player_ranking_tae21 = tae21_ranking.tae21_ranking(row['MALE PLAYERS'],0,0) #parameters - player name, ranking, prizeMoney\n tae21_ranking_points_list_mens.append(player_ranking_tae21)", "title": "" }, { "docid": "9fcd4e723a405a41afe85eeb6b59ed0a", "score": "0.5845593", "text": "def validateRankTable(self):\n rankData = []\n if not self.mySql.tableExists('ranks'):\n self.mySql.createTable('ranks', json.dumps(model.RANKS))\n rankDataPath = \"%s/%s\"%(self.assetsFolder, \"rankData.json\")\n with open(os.path.normpath(rankDataPath), 'r') as _file:\n rankData = json.load(_file)\n if rankData:\n for rank in rankData:\n rankDict = {'rankName':rank['rankName'],\n 'rankDescription':rank['Rank'].replace(u\"\\xa0\", u\" \"),\n 'rankCategory':rank['Category'],\n 'minTime': rank['Minimum Time'],\n 'sortNum':rank['Numerical Order']}\n self.mySql.addValue('ranks', json.dumps(rankDict))", "title": "" }, { "docid": "41d9a469588c3d7216171d06918a8df6", "score": "0.58365047", "text": "def tac1_ranking_intz():#initialize male data of players\n with open('MALE PLAYERS.csv', newline='') as csvfile:\n reader = csv.DictReader(csvfile)\n for row in reader:\n player_ranking_tac1 = tac1_ranking.tac1_ranking(row['MALE PLAYERS'],0,0) #parameters - player name, ranking, prizeMoney\n tac1_ranking_points_list_mens.append(player_ranking_tac1)", "title": "" }, { "docid": "5c42ad78670ae8e679028faa8646db34", "score": "0.58361924", "text": "def compileRanks(level):\n\tdf = pd.read_csv(outputPath(f\"\"\"{level}/fullDataset.gzip\"\"\"), compression='gzip', index_col=0)\n\trank = df.select_dtypes('number').copy()\n\tfor col in rank.columns:\n\t\tprint(col)\n\t\trank[col] = rank.groupby(df['date'])[col].rank(pct=True)\n\t\t\n\trank['date'] = df['date']\n\trank['subreddit'] = df['subreddit']\n\n\trank.to_csv(outputPath(f\"\"\"{level}/ranks.gzip\"\"\"), compression='gzip')", "title": "" }, { "docid": "c6d11622b601ce3807942e8e9efcc3e2", "score": "0.5835561", "text": "def process_csv():\r\n\t# Process the CSV file that contains submissions data from Devpost by assigning tables and recording what prizes a project was entered for\r\n\twith open(\"submissions.csv\") as csv_file:\r\n\t\tcsv_reader = csv.reader(csv_file, delimiter=\",\")\r\n\t\ttable_number = 0\r\n\t\tfor row in csv_reader:\r\n\t\t\tif table_number == 0:\r\n\t\t\t\ttable_number += 1\r\n\t\t\telse:\r\n\t\t\t\t# Add a project name to the dictionary with a table number\r\n\t\t\t\tprojects[table_number] = {\"project\":row[0], \"num_of_our_prizes\":1, \"num_judges\":{}} \r\n\t\t\t\t# num_of_prizes is 1 by default bc they are entered for general prize\r\n\t\t\t\t# Enter into General category by default\r\n\t\t\t\tour_tracks[\"General\"].append(table_number)\r\n\t\t\t\tprojects[table_number][\"num_judges\"][\"General\"] = 0\r\n\t\t\t\tdesired_prizes = row[6].split(\", \")\r\n\r\n\t\t\t\t# Submit table number for entry for each prize they entered for on Devpost\r\n\t\t\t\tfor prize in desired_prizes:\r\n\t\t\t\t\tif prize in our_tracks.keys():\r\n\t\t\t\t\t\tprojects[table_number][\"num_of_our_prizes\"] += 1\r\n\t\t\t\t\t\tour_tracks[prize].append(table_number)\r\n\t\t\t\t\t\tprojects[table_number][\"num_judges\"][prize] = 0\r\n\t\t\t\t\telif prize in sponsor_tracks.keys():\r\n\t\t\t\t\t\tsponsor_tracks[prize].append(table_number)\r\n\t\t\t\ttable_number += 1\r\n\t\ttotal_projects = table_number - 1\r\n\t\tprint(\"Processed {0} projects.\".format(total_projects))\r\n\t\t#print(our_tracks)\r\n\treturn projects", "title": "" }, { "docid": "0683bf648101b82418c45b2d2f825b8a", "score": "0.56947255", "text": "def run_csv():\n print(\"\\n\\n====\")\n peopledata = [\n ('John', 'second guitar', 117.45),\n ('Paul', 'bass', 22.01),\n ('George', 'lead guitar', 45.99),\n ('Ringo', 'drume', 77.0),\n ('Roger', 'vocals', 12.5),\n ('Keith', 'drums', 6.25),\n ('Pete', 'guitar', 0.1),\n ('John', 'bass', 89.71)\n ]\n print(\"Step 8: Write csv file\")\n with open('../data/rockstars.csv', 'w') as people:\n peoplewriter = csv.writer(people)\n peoplewriter.writerow(peopledata)\n\n print(\"Step 9: Read csv file back\")\n with open('../data/rockstars.csv', 'r') as people:\n people_reader = csv.reader(people, delimiter=',', quotechar='\"')\n for row in people_reader:\n pprint.pprint(row)", "title": "" }, { "docid": "b1cee722058f8ca983642c6c88d7e0cb", "score": "0.5692272", "text": "def process_csv_file(csv_file):\n\n try: # bit of a long try / catch but anything here can break due to file formatting\n csv_data = [row for row in csv.reader(csv_file.read().splitlines())]\n csv_headers = csv_data[0]\n del csv_data[0:1]\n # Rest all data\n Employee.objects.all().delete()\n User.objects.filter(is_superuser=False).delete()\n Expense.objects.all().delete()\n for csv_record in csv_data: # Process each record\n\n json_record = csv_row_to_json(csv_record, csv_headers)\n first_name, last_name = json_record.get('employee name').strip().split()\n user, _ = User.objects.get_or_create(first_name=first_name, last_name=last_name,\n username='{0}_{1}'.format(first_name.lower(), last_name.lower()))\n user.save()\n\n employee, _ = Employee.objects.get_or_create(user=user)\n employee.address = json_record.get('employee address', None)\n employee.save()\n\n expense = Expense()\n date = json_record.get('date', '')\n if date:\n date = datetime.datetime.strptime(date, '%m/%d/%Y')\n expense.date = date\n expense.employee = employee\n expense.description = json_record.get('expense description', None)\n expense.amount = decimal.Decimal(json_record.get('pre-tax amount').replace(\",\", \"\"))\n expense.tax_name = json_record.get('tax name')\n expense.tax_amount = decimal.Decimal(json_record.get('tax amount'))\n expense.tax_rate = decimal.Decimal(expense.tax_amount/expense.amount)\n expense.category = json_record.get('category')\n expense.total_amount = expense.tax_amount + expense.amount\n expense.save()\n except (AttributeError, csv_error) as e:\n #raise Exception(\"Oops! Looks like there was a problem processing your file: {0}\".format(e))\n raise Exception(\"Oops! Looks like there was a problem processing your file: {0}\".format(e))", "title": "" }, { "docid": "49d9dcfe0cef7d02cf2173fbdb16f6a5", "score": "0.56512785", "text": "def test_csv_import(self):\r\n towns = get_towns_from_csv()\r\n\r\n for town in towns:\r\n save_town_and_parents_to_db(town)\r\n\r\n self.assertEqual(Town.objects.count(), len(towns))", "title": "" }, { "docid": "b2dae52ede3ba31d50e201007ff7f9f3", "score": "0.5648601", "text": "def taw11_ranking_intzW():#initialize male data of players\n with open('FEMALE PLAYERS.csv', newline='') as csvfile:\n reader = csv.DictReader(csvfile)\n for row in reader:\n player_ranking_taw11 = taw11_ranking.taw11_ranking(row['FEMALE PLAYERS'],0,0) #parameters - player name, ranking, prizeMoney\n taw11_ranking_points_list_womens.append(player_ranking_taw11)", "title": "" }, { "docid": "d3a53d47b0e4f5e1290c1272d7604f3e", "score": "0.56390196", "text": "def run_csv_habitat():\n with open('CSVs/monster_habitat.csv', 'rb') as ifile:\n data = csv.reader(ifile)\n for row in data:\n monster_habitat_save(row)", "title": "" }, { "docid": "73083f8e9d54b50caeda23fa5b4c48ef", "score": "0.5602464", "text": "def tac1_ranking_intzW():#initialize male data of players\n with open('FEMALE PLAYERS.csv', newline='') as csvfile:\n reader = csv.DictReader(csvfile)\n for row in reader:\n player_ranking_tac1 = tac1_ranking.tac1_ranking(row['FEMALE PLAYERS'],0,0) #parameters - player name, ranking, prizeMoney\n tac1_ranking_points_list_womens.append(player_ranking_tac1)", "title": "" }, { "docid": "9788f96c50d504b14b11b4a436daa868", "score": "0.555817", "text": "def tae21_ranking_intzW():#initialize male data of players\n with open('FEMALE PLAYERS.csv', newline='') as csvfile:\n reader = csv.DictReader(csvfile)\n for row in reader:\n player_ranking_tae21 = tae21_ranking.tae21_ranking(row['FEMALE PLAYERS'],0,0) #parameters - player name, ranking, prizeMoney\n tae21_ranking_points_list_womens.append(player_ranking_tae21)", "title": "" }, { "docid": "b020805acf46e4f91ac1b7140ee03e06", "score": "0.55148774", "text": "def parse_input_csv(self):\n for i, row in enumerate(self.in_csvfile):\n # if tweet_id is seen before, pass it\n if row[8] in self.tweet_id_seen:\n continue\n row = [ entry for entry in row ]\n # Tracer()()\n self.inq.put( (i, row) )\n\n for i in range(self.numprocs):\n self.inq.put(\"STOP\")", "title": "" }, { "docid": "e14f403729692c6d987e80058b5e1c54", "score": "0.5502141", "text": "def annotate_rank_summary_with_pscore(filename, delimiter=','):\n\tmotif_dir = os.path.dirname(filename)\n\tin_csv = csv.DictReader(open(filename), delimiter=delimiter) # cmfinder rank summaries are comma-separated\n\twith open(filename + '.pscore_added', 'w') as out_f:\n\t\tif in_csv.fieldnames is None:\n\t\t\tprint >> sys.stderr, \"file {0} is odd. IGNORE now\".format(filename)\n\t\t\treturn\t\n\t\tnew_fieldnames = in_csv.fieldnames + ['pscore']\n\t\tout_csv = csv.DictWriter(out_f, new_fieldnames, delimiter=delimiter)\n\t\t# need to write out the field names\n\t\t#out_csv.writeheader()# lol this function only in 2.7 and i have 2.6 Orz \n\t\tout_f.write(delimiter.join(new_fieldnames) + '\\n')\n\t\tfor obj in in_csv:\n\t\t\tmotif_full_path = os.path.join(motif_dir, obj['motif'])\n\t\t\tpscore = os.popen(\"grep \\\"Total pair posterior\\\" {0}.pscoreout\".format(motif_full_path)).read().strip()\n\t\t\tobj['pscore'] = float( pscore[len('Total pair posterior '):] )\n\t\t\tout_csv.writerow(obj)", "title": "" }, { "docid": "fcf66a95950813dba7dde5bbdc5c4055", "score": "0.54964685", "text": "def data_ranking(year):\n df = pd.read_csv('data.csv', header=None)\n print df.size\n rank_words={}\n for x in xrange(0, 2770):\n ab=datetime.datetime.strptime(df.iloc[x][0], \"%Y-%m-%d %H:%M:%S.%f\").date()\n if(ab.year>year):\n file_name=str(year)+\".json\"\n with io.FileIO(file_name, \"w\") as file:\n data=[]\n for a in rank_words:\n word={}\n word[\"text\"]=a\n word[\"size\"]=rank_words.get(a)\n data.append(word)\n json.dump(data, file, indent=1)\n rank_words={}\n year=ab.year\n for y in xrange(1, df.iloc[x].size):\n if not pd.isnull(df.iloc[x][y]):\n df.iloc[x][y]=df.iloc[x][y].lower()\n if df.iloc[x][y] in rank_words.keys():\n # print str(rank_words[df.iloc[x][y]])+\":\"+str(y)\n rank_words[df.iloc[x][y]]=rank_words[df.iloc[x][y]]+(21-y)\n else:\n rank_words[df.iloc[x][y]]=21-y\n #print df.iloc[x][y]+\" \"+str(rank_words[df.iloc[x][y]])\n pass\n pass \n with io.FileIO(\"ranked_data.json\", \"w\") as file:\n data=[]\n for a in rank_words:\n word={}\n word[\"text\"]=a\n word[\"size\"]=rank_words.get(a)\n data.append(word)\n json.dump(data, file, indent=1)\n #print sorted_x\n pass", "title": "" }, { "docid": "b035583641c71c921d98aaeac6be41af", "score": "0.5473313", "text": "def import_players():\n # All sources are expected to have the same column names:\n # player_name, pos, projection, team\n csv_list = [\"espn_Players.csv\", \"fantasydata_Players.csv\"]\n api_list = [\"https://www.fantasyfootballdatapros.com/api/projections\"]\n\n print('Removing all current projections...')\n Projection.query.delete()\n\n print('Adding from csv...')\n for csv_path in csv_list:\n print(f' - {csv_path}')\n with open(csv_path, newline='', encoding='utf-8-sig') as csv_file:\n csv_reader = csv.DictReader(csv_file)\n for row in csv_reader:\n add_projection(row)\n\n print('Adding from apis...')\n for api_path in api_list:\n print(f' - {api_path}')\n response = requests.get(api_path)\n if response.status_code == 200:\n players = response.json()\n for player in players:\n add_projection(row=player)\n print('Committing changes')\n db.session.commit()", "title": "" }, { "docid": "d30cc5559a89e1e6a0c3d794512c41f2", "score": "0.5439127", "text": "def csv_to_tables():\n conn = None\n try:\n # read the connection parameters\n params = config()\n # connect to the PostgreSQL server\n conn = psycopg2.connect(**params)\n cur = conn.cursor()\n # read from csv file\n with open('user_visits.csv', 'r') as f:\n # skip the header row\n next(f)\n cur.copy_from(f, 'user_visits', sep=',')\n # commit the changes\n conn.commit()\n print('Data conversion from csv is completed!')\n except (Exception, psycopg2.DatabaseError) as error:\n print(error)\n finally:\n if conn is not None:\n conn.close()", "title": "" }, { "docid": "36aea657a3e347c6f427ddef0d28f3d0", "score": "0.5407696", "text": "def run_mtoq_csv():\n with open('CSVs/monstertoquest.csv', 'rb') as ifile:\n data = csv.reader(ifile)\n for row in data:\n add_monster(row)", "title": "" }, { "docid": "104d6dc10c8707a3ea79b3779ded477b", "score": "0.5396435", "text": "def run_csv_monster():\n with open('CSVs/monsters.csv', 'rb') as ifile:\n data = csv.reader(ifile)\n for row in data:\n monster_save(row)", "title": "" }, { "docid": "d289894e54f917da3c8ec364aef99652", "score": "0.5335074", "text": "def parse_file_to_csv(data_dir, dataset_name):\n\n # Use random seed as parameter\n np.random.seed(0)\n\n # Load the file as DataFrame\n file_path = os.path.join(data_dir, dataset_name)\n df = load_file_to_df(file_path, sort=False)\n\n # Get the info of users who have more than 20 ratings on items\n grouped = df.groupby(_USER_COLUMN)\n df = grouped.filter(lambda x: len(x) >= _MIN_NUM_RATINGS)\n original_users = df[_USER_COLUMN].unique()\n original_items = df[_ITEM_COLUMN].unique()\n\n # Map the ids of user and item to 0 based index for following processing\n tf.logging.info(\"Generating user_map and item_map...\")\n user_map = {user: index for index, user in enumerate(original_users)}\n item_map = {item: index for index, item in enumerate(original_items)}\n\n df[_USER_COLUMN] = df[_USER_COLUMN].apply(lambda user: user_map[user])\n df[_ITEM_COLUMN] = df[_ITEM_COLUMN].apply(lambda item: item_map[item])\n assert df[_USER_COLUMN].max() == len(original_users) - 1\n assert df[_ITEM_COLUMN].max() == len(original_items) - 1\n\n # Generate data for train and test\n all_ratings, test_ratings, test_negs = generate_train_eval_data(\n df, original_users, original_items)\n\n # Serialize to csv file. Each csv file contains three columns\n # (user_id, item_id, interaction)\n # As there are only two fields (user_id, item_id) in all_ratings and\n # test_ratings, we need to add a fake rating to make three columns\n df_train_ratings = pd.DataFrame(all_ratings)\n df_train_ratings[\"fake_rating\"] = 1\n train_ratings_file = os.path.join(\n FLAGS.data_dir, dataset_name + \"-\" + constants.TRAIN_RATINGS_FILENAME)\n df_train_ratings.to_csv(\n train_ratings_file,\n index=False, header=False, sep=\"\\t\")\n tf.logging.info(\"Train ratings is {}\".format(train_ratings_file))\n\n df_test_ratings = pd.DataFrame(test_ratings)\n df_test_ratings[\"fake_rating\"] = 1\n test_ratings_file = os.path.join(\n FLAGS.data_dir, dataset_name + \"-\" + constants.TEST_RATINGS_FILENAME)\n df_test_ratings.to_csv(\n test_ratings_file,\n index=False, header=False, sep=\"\\t\")\n tf.logging.info(\"Test ratings is {}\".format(test_ratings_file))\n\n df_test_negs = pd.DataFrame(test_negs)\n test_negs_file = os.path.join(\n FLAGS.data_dir, dataset_name + \"-\" + constants.TEST_NEG_FILENAME)\n df_test_negs.to_csv(\n test_negs_file,\n index=False, header=False, sep=\"\\t\")\n tf.logging.info(\"Test negatives is {}\".format(test_negs_file))", "title": "" }, { "docid": "0ee8c28dc1b4a8ba57c298aef76fc54b", "score": "0.53265804", "text": "def read_data(self):\r\n # This matrix has the following shape: num_movies x num_users\r\n # The values stored in each row i and column j is the rating for\r\n # movie i by user j\r\n # A dictionary from spelling variant to actual movie title.\r\n self.titlesDict = {}\r\n # For example, this would map \"The Matrix\" and \"Matrix, The\" to the\r\n # same object\r\n self.titlesWithoutYear = set([])\r\n self.titlesWithYear = set([])\r\n self.titles, self.ratings = ratings()\r\n self.binarize()\r\n # Collect a list of all titles\r\n self.titlesPlain = [(title[0]) for title in self.titles]\r\n for title in self.titlesPlain:\r\n year = title[-7:]\r\n self.titlesDict[title] = title\r\n # For each title:\r\n # Get the set of all spelling variants using this method ([:-7]\r\n # ignores the year)\r\n handled = self.handleArticle(title[:-7])\r\n for variant in handled:\r\n # For each variant, add it to the titlesDict\r\n self.titlesDict[variant] = title\r\n self.titlesDict[variant+year] = title\r\n self.titlesWithoutYear = self.titlesWithoutYear.union(\r\n handled) # Keep track of all the existing titles\r\n # print self.titlesWithoutYear.union(handled)\r\n # print \"\\n\\n\"\r\n self.titlesWithYear = self.titlesWithYear.union(set(\r\n [(handledTitle, title[-5:-1]) for handledTitle in handled])) # Keep track with years\r\n reader = csv.reader(open('data/sentiment.txt', 'rb'))\r\n self.sentiment = dict(reader)", "title": "" }, { "docid": "fd56de01f7965c652cac7f665b88c8c0", "score": "0.53252447", "text": "def import_stock_records(filename):\n account_bank = Account.query.filter(Account.name == \"신한 입출금\").first()\n account_stock = Account.query.filter(Account.name == \"신한 주식\").first()\n with open(filename) as fin:\n for parsed in parse_stock_records(fin):\n insert_stock_record(parsed, account_stock, account_bank)", "title": "" }, { "docid": "aa4124bb9b3c60d1b49e418ae3430ded", "score": "0.53231597", "text": "def read_file_build_database(table,errortable,myfile):\n Query=\"DROP TABLE IF EXISTS \"+table\n print(Query)\n cursor.execute(Query)\n Query=\"CREATE TABLE IF NOT EXISTS \"+table+\" (Id INTEGER PRIMARY KEY, Date TEXT, StockTicker TEXT, Rank INTEGER)\"\n print(Query)\n cursor.execute(Query)\n\n Query=\"DROP TABLE IF EXISTS \"+errortable\n print(Query)\n cursor.execute(Query)\n Query=\"CREATE TABLE IF NOT EXISTS \"+errortable+\" (Id INTEGER PRIMARY KEY, Date TEXT, StockTicker TEXT, Rank INTEGER)\"\n print(Query)\n cursor.execute(Query)\n\n counter=0\n countermod=0\n for line in myfile:\n \"\"\" if this is a blank line, reset our counter \"\"\"\n if not line.strip():\n counter=-1\n countermod=3\n\n \"\"\" if processing a comment line, reset \"\"\"\n if line[0]=='#': \n counter=-1\n countermod=3\n\n \"\"\" countermod === 0 on the Date line, this is used to delineate the beginning of a record \"\"\"\n if countermod == 0:\n Date=line[:-1].strip() #chop off the newline at the end\n\n \"\"\"this comment must be indented otherwise elif barfs\"\"\"\n \"\"\"the next line *MUST* be a data line of tickers. I should perform an error check in case \"\"\"\n elif countermod == 1:\n String=line[:-1].split()#split variable on delimiter, removing newline at the end\n counter2=0\n for each in String: \n Ticker=String[counter2].strip().upper()\n Rank=counter2+1\n #here we ignore the DotW check since they sorta have moved and I imagine that if they moved in the future it'd break a whole bunch. in the end I don't think I really care if I have the date 100% right.\n query='INSERT INTO '+table+' VALUES(null,\"'+Date+'\",\"'+String[counter2].strip().upper()+'\",'+str(counter2+1)+')' #needed to quote the string for entry, also needed to quote the date or it gets chopped when its sqlite enters and validates/converts it.\n cursor.execute(query)\n\n counter2+=1\n counter+=1\n countermod=counter%2", "title": "" }, { "docid": "5dd0701124ad47f429bb79086033a3e2", "score": "0.5315432", "text": "def run_csv_mstatus():\n with open('CSVs/monster_status.csv', 'rb') as ifile:\n data = csv.reader(ifile)\n for row in data:\n monster_status_save(row)", "title": "" }, { "docid": "5ebfe92f88f967a94c4bbdc591a14100", "score": "0.5303378", "text": "def run_csv():\n log.info(\"\\nStep 1: define baseball team data\")\n baseball_teams = [\n ('Mariners', 'Seattle', 'blue/teal'),\n ('Angels', 'Anaheim', 'red'),\n ('Athletics', 'Oakland', 'green/yellow'),\n ('Rangers', 'Texas', 'blue/red'),\n ('Giants', 'San Francisco', 'orange/black'),\n ('Dodgers', 'Los Angeles', 'blue'),\n ('Padres', 'San Diego', 'brown/yellow'),\n ('Diamondbacks', 'Arizona', 'red/teal')\n ]\n log.info(\"Step 2: Write csv file\")\n with open('../data/baseball_teams.csv', 'w') as bball_teams:\n bball_writer = csv.writer(bball_teams)\n bball_writer.writerow(baseball_teams)\n\n log.info(\"Step 9: Read csv file back\")\n with open('../data/baseball_teams.csv', 'r') as bball_teams:\n bball_reader = csv.reader(bball_teams, delimiter=',', quotechar='\"')\n for row in bball_reader:\n pprint.pprint(row)", "title": "" }, { "docid": "46cd3f991f163c39ef3f1b909ee496cc", "score": "0.5288448", "text": "def run_skill_tree_csv():\n with open('CSVs/skill_tree.csv', 'rb') as ifile:\n data = csv.reader(ifile)\n for row in data:\n skill_tree_save(row)", "title": "" }, { "docid": "c908e1e2307da614074ae9b96f2ab417", "score": "0.52770483", "text": "def parse_results(fname):\n for l in open(fname,\"r\"):\n fields=l.split()\n query_name=fields[0]\n ranks=[int(rank) for rank in fields[1::2]]\n yield (query_name,zip(ranks,fields[2::2]))", "title": "" }, { "docid": "8bde0761ad396b5de0a48d52357ef71a", "score": "0.52713966", "text": "def calculate_karma(subreddit, list_of_scores, filename):\n\n csvname = \"{} {}.csv\".format(subreddit, filename)\n\n with open(csvname, \"ab\") as csvfile:\n KarmaWriter = csv.writer(csvfile)\n for score in list_of_scores:\n KarmaWriter.writerow([score])", "title": "" }, { "docid": "9b512ee07785a94075fcac3780129c8d", "score": "0.5267227", "text": "def run_skill_csv():\n with open(\"CSVs/skills.csv\", 'rb') as ifile:\n data = csv.reader(ifile)\n for row in data:\n skill_save(row)", "title": "" }, { "docid": "5adc9d35e0e1fa9272187c8458c7c7f6", "score": "0.52624834", "text": "def process(lines, filename):\n for line in lines:\n if line.strip() is not \"\":\n try:\n lon, lat, ew, ns = line.strip().split()\n except ValueError:\n continue\n\n # we are cool with NaN as a value in Postgres.\n data = { 'lat': lat, 'lon': lon, 'ew': ew, 'ns': ns }\n\n data.update(parse_filename(filename))\n write_sql(data)\n print \"finished %s\" % filename", "title": "" }, { "docid": "051580da89765146159a6f359094824b", "score": "0.52596235", "text": "def save_scrape_to_db(conn, results):\n for result in results:\n insert_record_to_db(conn, result)", "title": "" }, { "docid": "281fd50efb452a0198ee0d1cb5528a06", "score": "0.52589273", "text": "def ProcessCSV(FileName):\n Train_URLs = []\n for line in open(FileName):\n line = line.split(',')\n line[1] = int(line[1])\n Train_URLs.append(line)\n return Train_URLs", "title": "" }, { "docid": "1d916dfde981afa05ffed63fe929b2ef", "score": "0.5258085", "text": "def load_ratings(rating_filename):\n print(\"Ratings\")\n\n for i, row in enumerate(open(rating_filename)):\n row = row.rstrip()\n\n user_id, movie_id, score, timestamp = row.split(\"\\t\")\n rating = Rating(user_id=user_id,\n movie_id=movie_id,\n score=score)\n\n # We need to add to the session or it won't ever be stored\n db.session.add(rating)\n # provide some sense of progress\n if i % 1000 == 0:\n print(i)\n\n # An optimization: if we commit after every add, the database\n # will do a lot of work committing each record. However, if we\n # wait until the end, on computers with smaller amounts of\n # memory, it might thrash around. By committing every 1,000th\n # add, we'll strike a good balance.\n\n db.session.commit()\n\n # Once we're done, we should commit our work\n db.session.commit()", "title": "" }, { "docid": "a22acbcd79c28ed5e4d82cdbecdb1d4e", "score": "0.5246554", "text": "def process_csv(self):\n self._clean()\n self._write_cleaned_csv()\n self._write_uncleaned_csv()\n self._move_original_file()", "title": "" }, { "docid": "59cddb31cc88ec23cb6f9406c7e6be99", "score": "0.5245768", "text": "def read_data(filename, rank, size, sub_dic, db):\n\n with open(filename) as f:\n list = {}\n next(f)\n i = 0\n line = f.readline()\n while line:\n line_validate = line.strip('\\n')\n try:\n if line_validate[-1] == \",\":\n line_validate = line_validate[:-1]\n elif line_validate == \"]}\":\n break\n except IndexError:\n # invalid line\n break\n else:\n if i % size == rank:\n data = json.loads(line_validate)\n processor = tweet_processer.Proccesser()\n text = data[\"doc\"][\"text\"]\n hashtags = data[\"doc\"][\"entities\"][\"hashtags\"]\n try:\n dic_tweet = processor.get_formated_tweet(data[\"doc\"], text, hashtags, sub_dic)\n except:\n pass\n # Save to db\n try:\n db.save(dic_tweet)\n print(\"success\")\n except:\n print(\"Duplicated tweet or system error\")\n pass\n i += 1\n line = f.readline()", "title": "" }, { "docid": "7912d5d56f488d3c3f228fc7e0ed03b5", "score": "0.5237944", "text": "def dataMalePlayersAll():#initialize male data of players\n with open('MALE PLAYERS.csv', newline='') as csvfile:\n reader = csv.DictReader(csvfile)\n for row in reader:\n playerdataAll = Player_Data_All.Player_Data_All(row['MALE PLAYERS'],0,0,0,0,0) #parameters - player name, ranking, prizeMoney\n malePlayerNamesDatatableAll.append(playerdataAll)", "title": "" }, { "docid": "22160d292da709d69e6bcd99e0d5c61a", "score": "0.52379423", "text": "def dataFemalePlayersTaw11():#initialize female data of players\n with open('FEMALE PLAYERS.csv', newline='') as csvfile:\n reader = csv.DictReader(csvfile)\n for row in reader:\n playerdataTaw11 = Player_Data_Taw11.Player_Data_Taw11(row['FEMALE PLAYERS'],0,0,0,0,0) #parameters - player name, ranking, prizeMoney\n femalePlayerNamesDatatableTaw11.append(playerdataTaw11)", "title": "" }, { "docid": "43f385e6d24ae42674a096cc04ca3d98", "score": "0.5237179", "text": "def load_scores():\n\n # Loads the 50 busiest airports from a file, with both the code and their city/state\n load_airports_from_file()\n\n # Calculate the FlightScores and other stats for each airport\n airports_and_scores = calculate_stats()\n\n # Seed the table\n for airport in airports_and_scores.keys():\n if all_airports.get(airport):\n city = all_airports.get(airport)\n else:\n city = \"\"\n\n score = Score(airport_code=airport,\n city=city,\n score=airports_and_scores[airport][0],\n volume=airports_and_scores[airport][1],\n pct_delay=airports_and_scores[airport][2],\n avg_delay=airports_and_scores[airport][3])\n\n # Add each airport to the session\n db.session.add(score)\n\n # Once we're done, commit all the stats to the database\n db.session.commit()", "title": "" }, { "docid": "de960ebd1be84753ca7093ec3b79e1dc", "score": "0.52291244", "text": "def femalePlayers(): #initialize female players\n with open('FEMALE PLAYERS.csv', newline='') as csvfile:\n reader = csv.DictReader(csvfile)\n malePlayerNames = []\n for row in reader:\n malePlayerNames.append(row['FEMALE PLAYERS'])\n player = Player_Ranking_PrizeMoney.Player_Ranking_PrizeMoney(row['FEMALE PLAYERS'],0,0,0) #parameters - player name, ranking, prizeMoney\n femaleLeaderBoard.append(player)", "title": "" }, { "docid": "224d8291f3ed1a70aabaa24ca7bbfec1", "score": "0.52280116", "text": "def import_imdb_ratings():\n url = 'https://datasets.imdbws.com/title.ratings.tsv.gz'\n response = requests.get(url)\n layer = get_channel_layer()\n __send_data_to_channel(layer=layer, message=f\"Downloading file: {url}\")\n with open('title.ratings.tsv.gz', 'wb') as f:\n f.write(response.content)\n if response.status_code == 200:\n contents = __unzip_file('title.ratings.tsv.gz')\n reader = csv.reader(contents, delimiter='\\t')\n all_imdb_ids = Movie.objects.filter(fetched=True) \\\n .exclude(imdb_id__isnull=True)\\\n .exclude(imdb_id__exact='')\\\n .all()\\\n .values_list('imdb_id', flat=True)\n\n imdb_ids_length = len(all_imdb_ids)\n count = 0\n for chunk in __chunks(list(reader), 100):\n movies = dict()\n for movie in chunk:\n if movie[0] in all_imdb_ids:\n movies[movie[0]] = movie\n data = Movie.objects.filter(imdb_id__in=movies.keys())\n with transaction.atomic():\n for db_row in data:\n data = movies[db_row.imdb_id]\n db_row.imdb_vote_average = data[1]\n db_row.imdb_vote_count = data[2]\n db_row.weighted_rating = db_row.calculate_weighted_rating()\n db_row.save()\n count += len(movies.keys())\n __send_data_to_channel(layer=layer, message=f\"Processed {len(movies.keys())} ratings out of {count}/{imdb_ids_length}\")\n else:\n __send_data_to_channel(layer=layer, message=f\"Exception: {response.status_code} - {response.content}\")", "title": "" }, { "docid": "a685eb21bcb310d3d6275b465a889022", "score": "0.5224725", "text": "def test_ModelJudge_csv(self):\n judge = ModelJudge(EvalTest.list_of_names,\n EvalTest.list_of_pickles,\n EvalTest.pt_analogy_path)\n judge.compare()\n df = pd.read_csv(judge.filename_csv)\n best_df = df.nlargest(1, 'Score*Preci')\n best_model_from_csv = list(best_df[\"Name\"])[0]\n self.assertEqual(EvalTest.best_model,\n best_model_from_csv,\n msg=\"\\ndf = \\n {}\".format(best_df.to_string()))", "title": "" }, { "docid": "943297a2d0f0692161c914332e7d49b7", "score": "0.5224428", "text": "def parse_csv(self):\n\n # TODO Be more precise. Use the class member, consume it line by line.\n # Maybe via a generator.\n splited = self.raw_csv.split('\\n', 3)\n\n self.parse_header_line1(splited[0])\n self.parse_header_line2(splited[1])\n self.parse_header_line3(splited[2])\n\n # TODO get the stock object\n stock = None\n\n # Now the rest\n for row in splited[3].splitlines():\n #print row\n\n # Remove empty lines\n if row == '':\n continue\n\n self.consume_day_notation(row)", "title": "" }, { "docid": "b545b278813035f92721eae46356d5f0", "score": "0.5217548", "text": "def dataFemalePlayersTbs2():#initialize female data of players\n with open('FEMALE PLAYERS.csv', newline='') as csvfile:\n reader = csv.DictReader(csvfile)\n for row in reader:\n playerdataTbs2 = Player_Data_Tbs2.Player_Data_Tbs2(row['FEMALE PLAYERS'],0,0,0,0,0) #parameters - player name, ranking, prizeMoney\n femalePlayerNamesDatatableTbs2.append(playerdataTbs2)", "title": "" }, { "docid": "8406c77bd11fa540d01317ed931d4d24", "score": "0.52119046", "text": "def process_ratings_file(self, s3_source):\n df = self._read_df_csv(s3_source)\n\n # Create final dataframes for writing\n final_df = df.select(\n col(\"userId\").cast(IntegerType()).alias(\"user_id\"),\n col(\"movieId\").cast(IntegerType()).alias(\"movie_id\"),\n col(\"rating\").cast(FloatType()).alias(\"rating\"),\n col(\"timestamp\").cast(TimestampType()).alias(\"timestamp\"))\\\n .repartition(1)\n\n # Is the source file ratings.csv or ratings_small.csv?\n if s3_source.endswith(\"ratings.csv\"):\n name = \"ratings\"\n else:\n name = \"ratings_small\"\n\n # Write dataframes to s3 destinations\n self._write_df_json(final_df,\n self.base_s3_destination.format(name=name))", "title": "" }, { "docid": "df741b0d09608e99cdefac0c6eec29ac", "score": "0.52070457", "text": "def import_rentals(input_data):\n #start = time.time()\n # mydb = CLIENT.storedata\n # myrental = mydb.rental\n # rental_count_before = myrental.count_documents({})\n error_count = 0\n insert_count = 0\n LOGGER.info('Starting rental import')\n with open(input_data, 'r', newline='') as p_file:\n file_list = csv.DictReader(p_file, delimiter=',')\n for onerent in file_list:\n try:\n Rental(onerent['rental_id'], onerent['user_id'], onerent['product_id']).save(full_clean=True,\n force_insert=True)\n insert_count += 1\n except ValidationError as valerror:\n LOGGER.exception(\"Error importing data from csv: %s \", valerror.message)\n error_count += 1\n except OperationError as operror:\n LOGGER.exception(\"Error importing data from csv: %s \", operror)\n error_count += 1\n # rental_count_after = myrental.count_documents({})\n # end = time.time()\n # elasped_time = end - start\n # LOGGER.info(\"Time taken to execute import_rental %s\", elasped_time)\n return insert_count, error_count", "title": "" }, { "docid": "03b605fbb633ffd5c6fd321dc3cf0317", "score": "0.5204527", "text": "def run_quest_csv():\n with open('CSVs/quests.csv', 'rb') as ifile:\n data = csv.reader(ifile)\n for row in data:\n quests_save(row)", "title": "" }, { "docid": "286be5c05d4196d0ea1b78f28813e7ba", "score": "0.5204322", "text": "def dataMalePlayersTbs2():#initialize male data of players\n with open('MALE PLAYERS.csv', newline='') as csvfile:\n reader = csv.DictReader(csvfile)\n for row in reader:\n playerdataTbs2 = Player_Data_Tbs2.Player_Data_Tbs2(row['MALE PLAYERS'],0,0,0,0,0) #parameters - player name, ranking, prizeMoney\n malePlayerNamesDatatableTbs2.append(playerdataTbs2)", "title": "" }, { "docid": "242f264033056e7c8cee756c65b57678", "score": "0.51871496", "text": "def insert_database():\n global lines_read, valid_lines, insertions\n file_name = CSV_PATH + FILE_NAME + EXTENSION\n\n # get connection and cursor\n conn = get_conn()\n cur = conn.cursor()\n\n print(\"insertions: %d\" % (insertions))\n\n # read line by line, parsing the results and putting on database\n with open(file_name, newline = '', encoding = ENCODING) as fp:\n reader = csv.reader(fp)\n\n # skip first line\n next(reader, None)\n\n # iterate through the rows, inserting in the table\n for row in reader:\n parse_insert(row, cur, conn)\n\n \n # close connection\n close_conn(conn)\n\n print(\"insertions: %d\" % (insertions))", "title": "" }, { "docid": "7aee9ae361fe79bcf1e7b360e92bb830", "score": "0.5185915", "text": "def dataFemalePlayersAll():#initialize male data of players\n with open('FEMALE PLAYERS.csv', newline='') as csvfile:\n reader = csv.DictReader(csvfile)\n for row in reader:\n playerdataLadiesAll = Player_Data_All.Player_Data_All(row['FEMALE PLAYERS'],0,0,0,0,0) #parameters - player name, ranking, prizeMoney\n femalePlayerNamesDatatableAll.append(playerdataLadiesAll)", "title": "" }, { "docid": "bf11efaad10b4f3d6999243b10464b12", "score": "0.51738334", "text": "def leaderboard(self):\n records = record.RecordList()\n self.read_data()\n for item in self._save_info:\n for index in item:\n records.append(record.Record(index[0],index[1],index[2]))\n records.sort()\n for item in records.get_list():\n self.load_records(str(item.write_record()))", "title": "" }, { "docid": "60141ffb4ebf0912607b800579703fc7", "score": "0.5171244", "text": "def splitrankings(\n src=os.path.join(SCRAPED_DATA_DIR, \"scraped\", \"bgg_rankings_GameItem.jl\"),\n dst_dir=os.path.join(SCRAPED_DATA_DIR, \"rankings\", \"bgg\", \"bgg\"),\n dst_file=f\"{DATE_FORMAT_COMPACT}.csv\",\n overwrite=False,\n):\n django.core.management.call_command(\n \"splitrankings\",\n src,\n out_dir=dst_dir,\n out_file=dst_file,\n overwrite=parse_bool(overwrite),\n )", "title": "" }, { "docid": "17c061fc4c0541bb857645f15c13bf92", "score": "0.51627624", "text": "def add_to_ranking():\n global scores_data\n for position, record in enumerate(scores_data):\n if score > record[\"score\"]:\n new_record = {\"name\": player_name, \"score\": score}\n scores_data.insert(position, new_record)\n break # prevents infinite loop\n scores_data = scores_data[0:3]", "title": "" }, { "docid": "fa06273d6107e2dd791b3e8c90e5cef7", "score": "0.51617616", "text": "def process_guppy_csv(guppy_csv):\n\n # modify ReadCountsDict to include this sample's sample_name and machine_run\n guppy_csv, sample_name, machine_run = process_sample_run_handle(guppy_csv)\n add_guppy_csv_to_dict(sample_name, machine_run)\n\n # open up the guppy csv\n csv_file = open(guppy_csv, 'r')\n csv_file.readline() # skip first line\n\n # Go through each line of the CSV file and collect edge_num, tax_id\n for line in csv_file:\n line_elts = line.split(\",\")\n edge_num = line_elts[3]\n tax_id = line_elts[10]\n\n # Add these values to dictionary if they're not already there and start counting:\n if edge_num not in ReadCountsDict[sample_name][machine_run]:\n ReadCountsDict[sample_name][machine_run][edge_num] = {}\n ReadCountsDict[sample_name][machine_run][edge_num]['tax_id'] = tax_id\n ReadCountsDict[sample_name][machine_run][edge_num]['raw_count'] = 1\n\n # and add it to the set of edge_num:\n edge_num_set.add(edge_num)\n\n # And if the edge_num is already there, increment the counter\n elif edge_num in ReadCountsDict[sample_name][machine_run]:\n ReadCountsDict[sample_name][machine_run][edge_num]['raw_count'] += 1\n\n # When we've gone through all the lines, we can multiply the raw_count by the norm_factor\n norm_factor = get_norm_factor(sample_name, machine_run)\n\n for edge_num in ReadCountsDict[sample_name][machine_run]:\n ReadCountsDict[sample_name][machine_run][edge_num]['norm_count'] = int(ReadCountsDict[sample_name][machine_run][edge_num]['raw_count']) * float(norm_factor)", "title": "" }, { "docid": "ae1cf4a8264c42b00bb685186c9e36eb", "score": "0.5158056", "text": "def main():\n conn = db_util.create_connection(sys.argv[1])\n with open(\"csv/vivo_author_nnumber.csv\", \"rb\") as authors_file:\n reader = csv.reader(authors_file)\n for row in reader:\n insert_person(conn, row)\n conn.commit()", "title": "" }, { "docid": "c832eb2faae724ef84af66932dc22a3b", "score": "0.5151109", "text": "def process_song_file(cur, filepath):\n\n # open song file\n df = pd.read_json(filepath, lines=True)\n #df2 = pd.read_csv('data/SongCSV.csv')\n\n # insert song record\n\n #song_df = df2.copy()[['SongID', 'Title', 'ArtistID', 'Year', 'Duration']]\n #song_df = song_df.applymap(lambda x: x[1:].strip(\"'\").strip('\"\"\"') if type(x) == str else x)\n\n song_data = df.copy()[['song_id', 'title', 'artist_id', 'year', 'duration']]\n for i, row in song_data.iterrows():\n\n try:\n cur.execute(song_table_insert, list(row))\n\n except psycopg2.Error as e:\n print(\"Error: data not inserted\")\n print(e)\n\n # for i, row in song_df.iterrows():\n #\n # try:\n # cur.execute(song_table_insert, list(row))\n #\n # except psycopg2.Error as e:\n # print(\"Error: data not inserted\")\n # print(e)\n\n # # insert artist record\n artist_data = df.copy()[['artist_id', 'artist_name', 'artist_location', 'artist_latitude', 'artist_longitude']]\n artist_data.columns = ['artist_id', 'artist_name', 'location', 'latitude', 'longitude']\n\n # artist_df = df2.copy()[['ArtistID', 'ArtistName', 'ArtistLocation', 'ArtistLatitude', 'ArtistLongitude']]\n # artist_df = artist_df.applymap(lambda x: x[1:].strip(\"'\").strip('\"\"\"') if type(x) == str else x)\n\n for i, row in artist_data.iterrows():\n\n try:\n cur.execute(artist_table_insert, list(row))\n\n except psycopg2.Error as e:\n print(\"Error: data not inserted\")\n print(e)\n\n # for i, row in artist_df.iterrows():\n #\n # try:\n # cur.execute(artist_table_insert, list(row))\n #\n # except psycopg2.Error as e:\n # print(\"Error: data not inserted\")\n # print(e)", "title": "" }, { "docid": "9ab378d2cb2ac458d9cce0522ee9277a", "score": "0.51466614", "text": "def run_csv():\n log.info(\"\\n\\n====\")\n pokemon = [\n ('Charizard', 'dragon', 'red', 117.45, 123.04),\n ('Pikachu', 'rodent', 'yellow', 22.01, 1.32),\n ('Blastoise', 'dinosaur', 'blue', 45.99, 98.54),\n ('Mew', 'cat', 'white', 77.0, 24.2),\n ('Mewtwo', 'cat', 'white', 12.5, 345.4),\n ('Muk', 'slime', 'purple', 6.25, 21.6),\n ('Zapados', 'bird', 'yellow', 0.1, 54.37),\n ('Chansey', 'thing', 'pink', 89.71, 3.23),\n ('Venusaur', 'dinosaur', 'green', 89.71, 7.43),\n ('Ditto', 'thing', 'pink', 89.71, 54.58)\n ]\n\n log.info(\"Writing to csv file\")\n with open('../data/pokemon_data.csv', 'w') as poke:\n poke_writer = csv.writer(poke)\n poke_writer.writerow(pokemon)\n\n log.info(\"Read csv file back\")\n with open('../data/pokemon_data.csv', 'r') as poke:\n poke_reader = csv.reader(poke, delimiter=',', quotechar='\"')\n for row in poke_reader:\n pprint.pprint(row)", "title": "" }, { "docid": "59324b3a46e23fed61ddf05360080d43", "score": "0.5145926", "text": "def insert_items(csv_file):\n\n with open(csv_file, newline=\"\") as f:\n reader = DictReader(f)\n for row in reader:\n item = models.Item(\n name=row[\"name\"],\n is_rainy=eval(row[\"is_rainy\"]),\n is_winter=eval(row[\"is_winter\"]),\n is_optional=eval(row[\"is_optional\"]),\n category=row[\"category\"],\n )\n\n models.db.session.add(item)\n\n models.db.session.commit()", "title": "" }, { "docid": "4abc93c5802b5e15c2373640f01c4fb1", "score": "0.51454234", "text": "def run_csv_file_into_sql(registry_df, stations):\n # setting values to create the inserts\n table_name = 'Registry'\n column_names = 'no2_Val, o3_Val, pm_val, date_generate, station_ID'\n values = '%s,%s,%s,%s,%s'\n\n for i,row in registry_df.iterrows():\n station = stations.query(f'name == \\'{row[\"Station\"].strip()}\\'')\n if (station.size == 0):\n print('no station found with name ',row[\"Station\"])\n # print('\\n\\nWARNING - row not saved \\n',row)\n continue\n station_ID = recover_integer(station.station_ID)\n if (station_ID == -1):\n print('no station saved with name ',row[\"Station\"])\n continue\n no2_value = recover_integer(row['NO2 Value'])\n o3_value = recover_integer(row['O3 Value'])\n pm10_value = recover_integer(row['PM10 Value'])\n date_generated = recover_date(str(row.Generated))\n row_values = [no2_value, o3_value, pm10_value, date_generated, int(station_ID)]\n db_conn.insert_values(table_name, column_names, values, row_values)", "title": "" }, { "docid": "074f708e1094d8fe810a87b283bc172f", "score": "0.51347876", "text": "def import_data(directory_name, product_file, customer_file, rental_file):\n product_count, customer_count, rental_count = 0, 0, 0\n product_error, customer_error, rental_error = 0, 0, 0\n\n product_file_path = path.join(directory_name, product_file)\n customer_file_path = path.join(directory_name, customer_file)\n rental_file_path = path.join(directory_name, rental_file)\n\n mongo = MongoDBConnection()\n with mongo:\n database = mongo.connection.media\n\n products = database[\"products\"]\n customers = database[\"customers\"]\n rentals = database[\"rentals\"]\n\n try:\n with open(product_file_path, encoding=\"utf-8-sig\") as csv_file:\n product_reader = csv.reader(csv_file)\n for row in product_reader:\n product_info = {\"product_id\": row[0],\n \"description\": row[1],\n \"product_type\": row[2],\n \"quantity_available\": row[3]}\n products.insert_one(product_info)\n product_count += 1\n except:\n product_error += 1\n\n try:\n with open(customer_file_path, encoding=\"utf-8-sig\") as csv_file:\n customer_reader = csv.reader(csv_file)\n for row in customer_reader:\n customer_info = {\"customer_id\": row[0],\n \"name\": row[1],\n \"address\": row[2],\n \"phone_number\": row[3],\n \"email\": row[4]}\n customers.insert_one(customer_info)\n customer_count += 1\n except:\n customer_error += 1\n\n try:\n with open(rental_file_path, encoding=\"utf-8-sig\") as csv_file:\n rental_reader = csv.reader(csv_file)\n for row in rental_reader:\n rental_info = {\"rental_id\": row[0],\n \"product_id\": row[1],\n \"customer_id\": row[2]}\n rentals.insert_one(rental_info)\n rental_count += 1\n except:\n rental_error += 1\n\n record_count = (product_count, customer_count, rental_count)\n fail_count = (product_error, customer_error, rental_error)\n total_count = record_count, fail_count\n return total_count", "title": "" }, { "docid": "9f9e9da851d847555905609f4fe5f66d", "score": "0.5124486", "text": "def malePlayers():#initialize male players\n with open('MALE PLAYERS.csv', newline='') as csvfile:\n reader = csv.DictReader(csvfile)\n malePlayerNames = []\n for row in reader:\n malePlayerNames.append(row['MALE PLAYERS'])\n player = Player_Ranking_PrizeMoney.Player_Ranking_PrizeMoney(row['MALE PLAYERS'],0,0,0) #parameters - player name, ranking, prizeMoney\n maleLeaderBoard.append(player)", "title": "" }, { "docid": "1c130f7740d57a2a1d02ffd416ed5b90", "score": "0.5123486", "text": "def dataFemalePlayersTac1():#initialize female data of players\n with open('FEMALE PLAYERS.csv', newline='') as csvfile:\n reader = csv.DictReader(csvfile)\n for row in reader:\n playerdataTac1 = Player_Data_Tac1.Player_Data_Tac1(row['FEMALE PLAYERS'],0,0,0,0,0) #parameters - player name, ranking, prizeMoney\n femalePlayerNamesDatatableTac1.append(playerdataTac1)", "title": "" }, { "docid": "fa50fb248551d524cf3a7184234c7acc", "score": "0.512245", "text": "def dataMalePlayersTaw11():#initialize male data of players\n with open('MALE PLAYERS.csv', newline='') as csvfile:\n reader = csv.DictReader(csvfile)\n for row in reader:\n playerdataTaw11 = Player_Data_Taw11.Player_Data_Taw11(row['MALE PLAYERS'],0,0,0,0,0) #parameters - player name, ranking, prizeMoney\n malePlayerNamesDatatableTaw11.append(playerdataTaw11)", "title": "" }, { "docid": "ade762d5ea0a3cab174d3254fd40d494", "score": "0.5116837", "text": "def process_csv(self, questions=True):\n in_fn = self.questions_fn if questions else self.answers_fn\n cur_time = time.time()\n with open(in_fn, 'rb') as infile:\n reader = csv.reader(infile)\n count = 0\n for line in reader:\n count += 1\n if count == 1:\n continue\n if count % 10000 == 0:\n print count\n print 'time to process 10000: %.2f' % (time.time()-cur_time)\n cur_time = time.time()\n if questions:\n discussion_id, _, _, _, _, body = line\n else:\n _, _, _, discussion_id, _, body = line\n discussion_id = int(discussion_id)\n self.discussions.add(discussion_id)\n self.parse_terms(body, discussion_id)\n print 'read %d lines' % count", "title": "" }, { "docid": "f241f7c5921299c2d5d0550010e45d7f", "score": "0.511361", "text": "def create_ratings_DB(ratings_file):\n \n scores_dict = {}\n #open the file\n with open(ratings_file, 'r', encoding = 'utf-8') as csvfile:\n reader = csv.reader(csvfile)\n reader.__next__()\n for row in reader:\n scores_dict[row[0]] = [row[1], row[2]]\n #return the value \n return scores_dict", "title": "" }, { "docid": "60d6416a06e4db0d0cf26b8460c886e3", "score": "0.510527", "text": "def write_data_to_postgres():\n\n print 'Writing players data to database...'\n write_players_to_database()\n print 'Writing season totals to database...'\n write_season_totals()\n\n print 'Writing game logs to database...'\n seasons = nba_seasons(START_YEAR, END_YEAR)\n season_types = ['Regular Season', 'Playoffs']\n\n for i, s in enumerate(seasons):\n player_ids = player_ids_for_season(s)\n for st in season_types:\n df = all_game_logs(player_ids, s, st)\n write_game_logs_to_database(df)", "title": "" }, { "docid": "903a247a96ee3ebff4cdc702cc71a3fe", "score": "0.50740224", "text": "def restoreCSV():\r\n CSV_CURS.buildFakeFileNames()\r\n print 'loading CSV files into the database'\r\n executeCustomQueries('BEFORE_CSV_LOAD')\r\n loadCSVFiles()\r\n t('loadCSVFiles()')\r\n executeCustomQueries('BEFORE_RESTORE')\r\n t('TOTAL TIME TO LOAD CSV FILES', sinceBegin=True)\r\n buildIndexesAndFK()\r\n restoreAll_imdbIDs()\r\n executeCustomQueries('END')\r\n t('FINAL', sinceBegin=True)", "title": "" }, { "docid": "ce1cde25e93d7eb282247ee066074e3b", "score": "0.5072276", "text": "def batchImport(csvfile, ps):\n result = []\n with open(here+'/'+csvfile, 'r') as f:\n csv_reader = csv.reader(f, delimiter=';')\n for row in csv_reader:\n data = Data(row[0], ps)\n for p in range(ps):\n # Some awful string manipulation to parse numbers\n data.insertLocation(float(row[2*p+1]), [int(float(i)) for i in row[2*p+2].strip(' []').split(',')])\n result.append(data)\n return result", "title": "" }, { "docid": "2e655e737398814d5bddde5f53ced76c", "score": "0.5071505", "text": "def extreu_ranking(font, sortida):\r\n import csv\r\n from urllib.request import urlopen\r\n\r\n # Extreu els id de BGG del fitxer games.csv\r\n with open(font) as csv_file, open(sortida, 'w', newline='') as r:\r\n csv_reader = csv.reader(csv_file, delimiter=';')\r\n output = []\r\n urlapi = []\r\n headers = next(csv_reader)\r\n \r\n throttle = Throttle.Throttle(2)\r\n \r\n for row in csv_reader:\r\n \r\n print('Obtain ranquing for ', row[0])\r\n\r\n try:\r\n ranking = None\r\n if row[5].strip() != '':\r\n # per cada un dels id obtinguts, crea una url per la api\r\n urlapi = 'https://boardgamegeek.com/xmlapi2/thing?stats=1&id='+row[5]\r\n # Fer una crida a la API per cada url \r\n throttle.wait(urlapi)\r\n with urlopen(urlapi) as u: \r\n data=[x.decode().strip() for x in u.readlines()]\r\n # afegir [id, ranking] la llista 'rows'\r\n for linia in data:\r\n if \"<average value\" in linia:\r\n ranking=linia.split('\"')[1]\r\n ranking = ranking.replace('.', ',')\r\n break\r\n output.append(list(row) + [ranking])\r\n except:\r\n print('Exception obtaining rating for id ', row[0])\r\n\r\n # escriu un fitxer csv amb els id i el ranking corresponent\r\n with open(sortida, 'w') as r:\r\n write = csv.writer(r, delimiter=';', quotechar='\\'', quoting=csv.QUOTE_MINIMAL)\r\n write.writerow(list(headers) + ['ranking'])\r\n write.writerows(output)", "title": "" }, { "docid": "49f7c9d9ec3803866a63fc5613e64fa9", "score": "0.5069856", "text": "def save_recommender_ranking(recommender, dst, similarity_model=False):\n\n LOGGER.info(\n \"Saving <%s> ranking to <%s>...\",\n recommender.similarity_model if similarity_model else recommender.model,\n dst,\n )\n\n recommendations = recommender.recommend(users=(), similarity_model=similarity_model)\n if \"name\" in recommendations.column_names():\n recommendations.remove_column(\"name\", inplace=True)\n\n if similarity_model:\n recommendations = recommendations[recommendations[\"score\"] > 0]\n\n recommendations.export_csv(str(dst))", "title": "" }, { "docid": "570d98ef0ec7b33cd13ffc8b15f03625", "score": "0.50585", "text": "def seed_reviewers_table(s):\n\n t = time() \n try:\n file_name = \"transformed_data/reviewers_info_clean.csv\" #sample CSV file used: http://www.google.com/finance/historical?q=NYSE%3AT&ei=W4ikVam8LYWjmAGjhoHACw&output=csv\n fields = ['reviewer_name','number_Zurich_reviews']\n data=load_transformed_data(file_name,fields) \n indexes=[j for j in range(0,len(data),100)]\n indexes.append(len(data))\n indexes_slices=[[indexes[j],indexes[j+1]] for j in range(len(indexes)-1)]\n print('Load reviewers database...')\n for j in range(len(indexes_slices)):\n data_subset=data[indexes_slices[j][0]:indexes_slices[j][1]]\n for i in range(len(data_subset)):\n record = Reviewer(\n reviewer_name= data_subset[i]['reviewer_name'],\n reviewer_number_Zurich_reviews=data_subset[i]['number_Zurich_reviews']\n )\n s.add(record) #Add all the records\n s.commit() #Attempt to commit all the records\n print('...Done.')\n except:\n s.rollback() #Rollback the changes on error\n finally:\n s.close() #Close the connection\n print(\"Time elapsed: \" + str(time() - t) + \" s.\") #0.091s", "title": "" }, { "docid": "5b8cb3b241f7303ae07624129cb217c8", "score": "0.50572544", "text": "def import_flights():\n f = open(\"data/flights.csv\")\n reader = csv.reader(f)\n for origin, destination, duration in reader:\n flight = Flight(origin=origin, destination=destination,\n duration=duration)\n db.session.add(flight)\n db.session.commit()", "title": "" }, { "docid": "6c2834337d3df4900c33ea5e963c5bdc", "score": "0.505509", "text": "def load_csv_into_db(credentials, file_path, data_measure_type, patient_id):\n with open(file_path) as f:\n csv_data = csv.reader(f)\n data = [(int(row[0]) // 1000, float(row[1])) for row in csv_data]\n\n connection = pymysql.connect(\n host=credentials.host,\n user=credentials.user,\n password=credentials.password,\n db=credentials.db_name,\n charset='utf8mb4',\n cursorclass=pymysql.cursors.DictCursor\n )\n\n try:\n with connection.cursor() as cursor:\n if data_measure_type == DataMeasureType.HEART_RATE:\n sql_statement = \"INSERT IGNORE INTO heart_rate (unix_timestamp, heart_rate_measure, rr, \" \\\n \"patient_id) VALUES\"\n for index, row in enumerate(data):\n timestamp, bpm = row\n rr = 60 * 1000 / bpm\n sql_statement += \"{} ({}, {}, {}, {})\".format(\n \",\" if index > 0 else \"\",\n timestamp,\n bpm,\n rr,\n patient_id\n )\n cursor.execute(sql_statement)\n connection.commit()\n elif data_measure_type == DataMeasureType.ACTIVITY_TYPE:\n sql_statement = \"INSERT IGNORE INTO activity_type (unix_timestamp, activity_type, patient_id) VALUES\"\n for index, row in enumerate(data):\n timestamp = row[0]\n activity_type = int(row[1])\n sql_statement += \"{} ({}, {}, {})\".format(\n \",\" if index > 0 else \"\",\n timestamp,\n activity_type,\n patient_id\n )\n cursor.execute(sql_statement)\n connection.commit()\n elif data_measure_type == DataMeasureType.STEPS:\n sql_statement = \"INSERT IGNORE INTO steps (unix_timestamp, num_steps, patient_id) VALUES\"\n for index, row in enumerate(data):\n timestamp = row[0]\n num_steps = int(row[1])\n sql_statement += \"{} ({}, {}, {})\".format(\n \",\" if index > 0 else \"\",\n timestamp,\n num_steps,\n patient_id\n )\n cursor.execute(sql_statement)\n connection.commit()\n else:\n sys.exit(2)\n\n finally:\n connection.close()", "title": "" }, { "docid": "f89bee9c25cff002c092e0ac196238c3", "score": "0.50532705", "text": "def load_individuals_into_database():\n \n from schema.connection import session, metadata\n from schema.connection import Individual, Population, SNP, RefSeqGene\n# session.flush()\n print \"now we are connected to the database:\", metadata\n metadata.bind.echo = True\n \n samples_file = file('../../data/Annotations/samples_subset.csv', 'r')\n \n session.flush()\n \n parsers.samples_parser(samples_file)\n session.commit()\n print 'upload of tables finished'", "title": "" }, { "docid": "1175d88e5f4f6e97e0f34ddb0395f001", "score": "0.5046798", "text": "def dataFemalePlayersTae21():#initialize female data of players\n with open('FEMALE PLAYERS.csv', newline='') as csvfile:\n reader = csv.DictReader(csvfile)\n for row in reader:\n playerdataTAE21 = Player_Data_TAE21.Player_Data_TAE21(row['FEMALE PLAYERS'],0,0,0,0,0) #parameters - player name, ranking, prizeMoney\n femalePlayerNamesDatatableTae21.append(playerdataTAE21)", "title": "" }, { "docid": "1e80990b4965710200d5ad8fce778de4", "score": "0.5029587", "text": "def _save_tourney_results(self, game_outcomes):\n fn1 = game_outcomes[0][1]\n fn2 = game_outcomes[0][2]\n fn1_wins, fn2_wins, draws = 0, 0, 0\n for idx, outcome_list in enumerate(game_outcomes):\n outcome_list[0] = idx+1 # Renumber games\n for game_num, p1_fn, p2_fn, outcome, move_count in game_outcomes:\n if outcome == 'player1_wins':\n if p1_fn == fn1: fn1_wins += 1\n if p1_fn == fn2: fn2_wins += 1 \n elif outcome == 'player2_wins': \n if p2_fn == fn1: fn1_wins += 1\n if p2_fn == fn2: fn2_wins += 1 \n elif outcome == 'draw':\n draws += 1\n fn1_wld = str(fn1_wins) + '/' + str(fn2_wins) + '/' + str(draws)\n fn2_wld = str(fn2_wins) + '/' + str(fn1_wins) + '/' + str(draws)\n summary_table = [[fn1, fn1_wld],[fn2, fn2_wld]]\n summary_headers = ['Neural Network', 'Wins/Losses/Draws']\n headers = ['Game Number', 'Player 1', 'Player 2', 'Outcome', 'Turn Count']\n filename = 'data/tournament_results/Tournament_' + \\\n self._create_timestamp() + '.txt'\n with open(filename, 'w') as file:\n file.write(tabulate(summary_table, tablefmt='fancy_grid',\n headers=summary_headers))\n file.write('\\n\\n')\n file.write(tabulate(game_outcomes, tablefmt='fancy_grid',\n headers=headers))\n return filename", "title": "" }, { "docid": "398c521934127a81582f2f1fa76fc1c9", "score": "0.5023759", "text": "def parse_csv(filepath, username, trip):\n with open(filepath, 'r') as csvfile:\n reader = csv.DictReader(csvfile.read().split('\\n'))\n i = 0\n b = BatchQuery()\n last_dt = None\n for i, line in enumerate(reader):\n if i % 1000 == 0:\n b.execute()\n b = BatchQuery()\n try:\n dt = parser.parse(line['time'])\n if dt == last_dt:\n continue\n pt = {'lon': line['lon'],\n 'lat': line['lat'],\n 'accurracy': line['accuracy'],\n 'username': username,\n 'created_at': dt,\n 'trip_id': trip}\n last_dt = dt\n Point.batch(b).create(**pt)\n except ValueError:\n continue\n b.execute()\n return username, trip", "title": "" }, { "docid": "70c08355fe6ede3f61790e8837333074", "score": "0.5016316", "text": "def _recompute_rank(self):\n\n ranks = [{'rank': rank, 'karma_min': rank.karma_min} for rank in\n self.env['gamification.karma.rank'].search([], order=\"karma_min DESC\")]\n\n # 3 is the number of search/requests used by rank in _recompute_rank_bulk()\n if len(self) > len(ranks) * 3:\n self._recompute_rank_bulk()\n return\n\n for user in self:\n old_rank = user.rank_id\n if user.karma == 0 and ranks:\n user.write({'next_rank_id': ranks[-1]['rank'].id})\n else:\n for i in range(0, len(ranks)):\n if user.karma >= ranks[i]['karma_min']:\n user.write({\n 'rank_id': ranks[i]['rank'].id,\n 'next_rank_id': ranks[i - 1]['rank'].id if 0 < i else False\n })\n break\n if old_rank != user.rank_id:\n user._rank_changed()", "title": "" }, { "docid": "de0771d265304519bb0bba84cbafbbbc", "score": "0.50147194", "text": "def create_records_table():\n with open(\"./data/processed/records.csv\", \"w+\", newline=\"\") as records_file:\n record_writer = csv.DictWriter(\n records_file,\n fieldnames=[\n \"date\",\n \"quadrat\",\n \"waypoint\",\n \"grid_reference\",\n \"photo_up\",\n \"photo_down\",\n \"wetness\",\n \"canopy\",\n \"species\",\n \"comments\",\n ],\n )\n record_writer.writeheader()\n\n for survey_file_path in SURVEY_FILE_PATHS:\n with open(survey_file_path, newline=\"\") as survey_file:\n survey_file_reader = csv.reader(survey_file, delimiter=\",\")\n # Prepare an empty record with the file date\n record = {\"date\": date_from_file(survey_file_path).isoformat(), \"comments\": \"\"}\n waypoint_comments = \"\" # to collect waypoint comments\n for row in survey_file_reader:\n # Read the waypoint information into the record.\n while \"species\" not in row[0]:\n record[row[0]] = row[1] # so just add it to the record\n if row[2]: # there is a comment\n waypoint_comments = waypoint_comments + row[2]\n row = next(survey_file_reader)\n # Get the individual species records\n while True:\n record[\"comments\"] = waypoint_comments\n record[\"species\"] = row[1]\n if row[2]: # there is a comment\n record[\"comments\"] = record[\"comments\"] + \" - \" + row[2]\n # write a species record\n record_writer.writerow(record)\n try:\n row = next(survey_file_reader)\n except StopIteration:\n break # at the end of the file\n if re.match(r\"species|^$\", row[0]) == None:\n # Next waypoint so add the read row to the record\n record[row[0]] = row[1]\n if row[2]: # there is a comment\n waypoint_comments = row[2]\n else:\n waypoint_comments = \"\"\n break # at the end of the species list", "title": "" }, { "docid": "6d413ddc1e79dd57921a145334e5cddf", "score": "0.5014033", "text": "def parse_file(self):\n with open(self.filename, 'rb') as fp:\n filereader = csv.DictReader(fp, delimiter=',')\n for trx_import in filereader:\n trx = self.map_fields(trx_import)\n self.set_accounts(trx)\n trx['duplicate'] = self.is_duplicate(trx)\n self.transactions.append(trx)", "title": "" }, { "docid": "032d3e474d334e5e2d389391b43290b9", "score": "0.50128543", "text": "def init_db():\n csv_files = listdir('input')\n conn = sq.connect('database.db')\n cur = conn.cursor()\n cur.execute('DROP TABLE IF EXISTS matches')\n cur.close()\n conn.commit()\n conn.close()\n\n for csv_file in csv_files:\n print csv_file\n temp_frame = pd.read_csv('input/{0}'.format(csv_file))\n temp_frame.dropna(how='all', inplace=True) # Remove empty rows\n temp_frame.dropna(axis=1, how='all', inplace=True) # Remove empty columns\n temp_frame.dropna(subset=['HTR', 'FTR'], inplace=True) # Remove matches without half time or full time results\n temp_frame['league'] = temp_frame['Div'] # Create new column for league name\n temp_frame['country'] = temp_frame['Div'] # Create new column for country name\n temp_frame.Date = p.map(convert_date, temp_frame.Date)\n temp_frame.country = p.map(country, temp_frame.country)\n temp_frame.league = p.map(league, temp_frame.league)\n temp_frame.HTR = p.map(h_d_a, temp_frame.HTR)\n temp_frame.FTR = p.map(h_d_a, temp_frame.FTR)\n temp_frame['HTFTR'] = p.map(int, temp_frame['HTR'].map(str) + temp_frame['FTR'].map(str))\n temp_frame.drop(['HS', 'AS', 'HST', 'AST', 'HF', 'AF', 'HC', 'AC', 'HY', 'AY', 'HR', 'AR', 'Div', 'BWH', 'BWD',\n 'BWA', 'IWH', 'IWD', 'IWA', 'LBH', 'LBD', 'LBA', 'PSH', 'PSD', 'PSA', 'WHH', 'WHD', 'WHA', 'VCH',\n 'VCD', 'VCA', 'Bb1X2', 'BbMxH', 'BbAvH', 'BbMxD', 'BbAvD', 'BbMxA', 'BbAvA', 'BbOU', 'BbMx>2.5',\n 'BbAv>2.5', 'BbMx<2.5', 'BbAv<2.5', 'BbAH', 'BbAHh', 'BbMxAHH', 'BbAvAHH', 'BbMxAHA', 'BbAvAHA',\n 'PSCH', 'PSCD', 'PSCA', 'BSH', 'BSD', 'BSA', 'Referee', 'GBH', 'GBA', 'GBD', 'SBH', 'SBD', 'SBA',\n 'SJH', 'SJD', 'SJA'], axis=1, inplace=True, errors='ignore')\n temp_frame.replace(\"\", np.nan)\n conn = sq.connect('database.db')\n temp_frame.to_sql('matches', conn, if_exists='append', index=False)\n conn.commit()\n conn.close()", "title": "" }, { "docid": "9388d3f2d29a5fcb49538209258b517b", "score": "0.50086415", "text": "def ingest_tourney_seeds(session, seeds):\n session.execute('''DELETE FROM tourney_seeds''')\n lines = seeds.to_dict('records')\n records = []\n for line in lines:\n records.append(Tourney(Season=line['Season'], Seed=line['Seed'], TeamID=line['TeamID']))\n session.add_all(records)\n session.commit()\n logger.info(f'{len(records)} records were added to the table')", "title": "" }, { "docid": "8d2834b9ed647a1ad59bf4bff5258d31", "score": "0.5006288", "text": "def dataMalePlayersTac1():#initialize male data of players\n with open('MALE PLAYERS.csv', newline='') as csvfile:\n reader = csv.DictReader(csvfile)\n for row in reader:\n playerdataTac1 = Player_Data_Tac1.Player_Data_Tac1(row['MALE PLAYERS'],0,0,0,0,0) #parameters - player name, ranking, prizeMoney\n malePlayerNamesDatatableTac1.append(playerdataTac1)", "title": "" }, { "docid": "968c4375132fa5983bd81b08f2429e09", "score": "0.5005436", "text": "def main():\n\n # Set decimal precision\n decimal.getcontext().prec = 4\n\n startTime = time.perf_counter()\n\n # Sample hardcoded test data\n # filePath = \"redditandtweetsv3.csv\"\n\n # Validate user argument length\n if len(sys.argv) != 2:\n print(\"Usage: py main.py dataFile.csv\")\n sys.exit(1)\n\n filePath = sys.argv[1]\n df = loadData(filePath)\n\n posDF, negDF, neuDF = analyseData(df)\n\n # Create output files in the relative path to the application folder\n createOutputCSVFile(posDF, \"positive\")\n createOutputCSVFile(negDF, \"negative\")\n createOutputCSVFile(neuDF, \"neutral\")\n\n stopTime = time.perf_counter()\n\n print(f\"\\nTime taken: {stopTime - startTime:.02f} seconds\")\n\n sys.exit(0)", "title": "" }, { "docid": "c35705b093d87d785af6be4254954496", "score": "0.49956295", "text": "def import_files(filelist, output_filename):\n BOOTH = 'Booth'\n AREA = 'Area'\n cols = [AREA,BOOTH ,\"Green Party\",\"Labour Party\",\"National Party\",\"New Zealand First Party\",\"Total Valid Party Votes\",\"Informal Party Votes\"]\n out_header = ['Election', 'Electorate'] + cols\n\n with open(output_filename,'w') as outputfile: \n csvwriter = csv.writer(outputfile)\n csvwriter.writerow(out_header)\n \n for filename in filelist:\n with open(filename,'r') as inputfile, open(output_filename,'a') as outputfile:\n csvwriter = csv.writer(outputfile)\n \n election_year = filename.split('/')[-1].split('_')[2]\n\n next(inputfile) # Skip first row. \n # First col: Electorate Name SPACE Electorate Code\n electorate_name, electorate_code = next(inputfile).strip().replace('\"','').replace(\"'\",\"\").split(',')[0].rsplit(' ',1)\n in_header = [col.strip() for col in next(inputfile).replace(\"'\",\"\").replace('\"','').split(',')]\n in_header[0] = cols[0]\n in_header[1] = cols[1] \n csvreader = csv.DictReader(inputfile, fieldnames= in_header)\n \n suburb = \"\"\n specials = False\n for row in csvreader:\n\n \n out_row = [row[x] for x in cols]\n \n \n if row[BOOTH] == FIRST_SPECIAL:\n specials = True\n out_row[cols.index(AREA)] = \"Other Vote\" \n elif specials:\n out_row[0] = \"Other Vote\"\n\n \n #if specials:\n # if \"-\" in row[BOOTH]:\n # out_row[0] = row[BOOTH].split(\" - \", 1)[0]\n \n \n \n if out_row[0] == \"\":\n out_row[0] = suburb\n else:\n suburb = out_row[0]\n\n out_row = [election_year, electorate_name] + out_row\n csvwriter.writerow(out_row) \n if row[BOOTH] == DATA_FINISHED:\n break", "title": "" }, { "docid": "b19a5a06397272bb225c23de971e3040", "score": "0.49951804", "text": "def _parse_tourney_results(self):\n for game_outcomes in self.game_outcomes:\n for game_num, p1_fn, p2_fn, outcome, move_count in game_outcomes:\n p1_idx = self.model_fn_list.index(p1_fn)\n p2_idx = self.model_fn_list.index(p2_fn)\n if outcome == 'player1_wins':\n self.table[p1_idx, p2_idx] += 1\n self.table[p2_idx, p1_idx] -= 1\n elif outcome == 'player2_wins': \n self.table[p1_idx, p2_idx] -= 1\n self.table[p2_idx, p1_idx] += 1 \n model_scores = np.sum(self.table, axis=1)\n self._plot_model_scores(model_scores)\n col_headers = self.model_iter_list + ['Total']\n table = np.hstack((self.table, np.transpose(model_scores[np.newaxis])))\n filename = 'data/final_eval/Checkers_Final_Evaluation_' + \\\n self._create_timestamp() + '.txt'\n with open(filename, 'w') as file:\n file.write(tabulate(table, headers=col_headers, \n showindex=self.model_iter_list, \n tablefmt='fancy_grid'))", "title": "" }, { "docid": "9622955dab584dcad39d5e49db8a410b", "score": "0.49935636", "text": "def _open_convert_csv_files(self):\n\t\tcomb_index = None\n\t\tfor s in self.symbol_list:\n\t\t\t# Load the CSV file with no header information, indexed on date\n\t\t\tself.symbol_data[s] = pd.io.parsers.read_csv(\n\t\t\t\tos.path.join(self.csv_dir, '%s.csv' % s),\n\t\t\t\theader=0, index_col=0,\n\t\t\t\tnames=['datetime', 'open', 'low', 'high', 'close', 'volume', 'oi']\n\t\t\t)\n\n\t\t\t# Combine the index to pad forward values\n\t\t\tif comb_index is None:\n\t\t\t\tcomb_index = self.symbol_data[s].index\n\t\t\telse:\n\t\t\t\tcomb_index.union(self.symbol_data[s].index)\n\n\t\t\t# Set the latest symbol_data to None\n\t\t\tself.latest_symbol_data[s] = []\n\n\t\t# Reindex the dataframes\n\t\tfor s in self.symbol_list:\n\t\t\tself.symbol_data[s] = self.symbol_data[s].reindex(index=comb_index, method='pad').iterrows()", "title": "" }, { "docid": "a4c79036e507f684a77f174b5a692952", "score": "0.49878836", "text": "def add_new_winner(filename: str, year: int, age: int, name:str, title:str):\n try:\n with open(filename, 'r+') as csv_file:\n csv_reader = csv.reader(csv_file, delimiter=',')\n next(csv_reader)\n for row in csv_reader:\n last_index = int(row[0])\n csv_writer = csv.writer(csv_file, delimiter=',', quotechar='\"', quoting=csv.QUOTE_NONNUMERIC)\n csv_writer.writerow([int(last_index + 1), year, age, name, title])\n except OSError as e:\n print(e)", "title": "" }, { "docid": "c6b345894133e5626670f5b7c7d309b0", "score": "0.498404", "text": "def load_ratings():\n\n print \"Ratings\"\n\n # Delete all rows in table, so if we need to run this a second time,\n # we won't be trying to add duplicate users\n Rating.query.delete()\n\n # Read u.data file and insert data\n for row in open(\"seed_data/u.data\"):\n row = row.rstrip()\n user_id, movie_id, score, timestamp = row.split(\"\\t\")\n\n rating = Rating(movie_id=int(movie_id),\n user_id=int(user_id),\n score=int(score))\n\n # We need to add to the session or it won't ever be stored\n db.session.add(rating)\n\n # Once we're done, we should commit our work\n db.session.commit()", "title": "" }, { "docid": "96442c59556e763b4a969678612df784", "score": "0.49836972", "text": "def dataMalePlayersTae21():#initialize male data of players\n with open('MALE PLAYERS.csv', newline='') as csvfile:\n reader = csv.DictReader(csvfile)\n for row in reader:\n playerdataTAE21 = Player_Data_TAE21.Player_Data_TAE21(row['MALE PLAYERS'],0,0,0,0,0) #parameters - player name, ranking, prizeMoney\n malePlayerNamesDatatableTae21.append(playerdataTAE21)", "title": "" }, { "docid": "cddd5a629b9ca8fde74e79214bcdd28d", "score": "0.49835047", "text": "def push_data(Umpire, Deliveries, Matches):\n session = Session()\n\n # country and number of umpire over the history of IPL.\n with open('assets/umpire_data.csv', 'r') as csv_file:\n csv_reader = csv.reader(csv_file)\n csv_reader.__next__()\n for line in csv_reader:\n session.add(Umpire(\n umpire=line[0], nationality=line[1],\n first_officiated=line[2], last_officiated=line[3],\n no_of_matches=line[4]))\n\n # Top batsman data and top runs scored by teams\n with open('assets/deliveries.csv', 'r') as csv_file:\n csv_reader = csv.reader(csv_file)\n csv_reader.__next__()\n for line in csv_reader:\n session.add(Deliveries(\n match_id=line[0], inning=line[1],\n batting_team=line[2], bowling_team=line[3],\n over=line[4], ball=line[5], batsman=line[6],\n non_striker=line[7], bowler=line[8],\n is_super_over=line[9], wide_runs=line[10],\n bye_runs=line[11], legbye_runs=line[12],\n noball_runs=line[13], penalty_runs=line[14],\n batsman_runs=line[15], extra_runs=line[16],\n total_runs=line[17], player_dismissed=line[18],\n dismissal_kind=line[19], fielder=line[20]))\n\n # Number of matches played by teams by season\n with open('assets/matches.csv', 'r') as csv_file:\n csv_reader = csv.reader(csv_file)\n csv_reader.__next__()\n for line in csv_reader:\n session.add(Matches(\n season=line[1], city=line[2],\n date=line[3], team1=line[4],\n team2=line[5], toss_winner=line[6],\n toss_decision=line[7], result=line[8],\n dl_applied=line[9], winner=line[10],\n win_by_runs=line[11], win_by_wickets=line[12],\n player_of_match=line[13], venue=line[14],\n umpire1=line[15], umpire2=line[16],\n umpire3=line[17]))\n\n session.commit()\n session.close()", "title": "" }, { "docid": "8ef06fb79ab5eef1dd92aed25a3b1f2e", "score": "0.49826473", "text": "def load_flights():\n\n for row in open(\"seed_data/flights.csv\"):\n row = row.strip()\n origin, destination, carrier, quarter, time, avg_delay, \\\n duration, num_flights, num_delay, num_can_div, score = row.split(\",\")\n\n flight = Flight(origin=origin, destination=destination, carrier=carrier,\n quarter=int(quarter), time=int(time), num_flights=int(num_flights),\n num_delayed=int(num_delay), num_cancel_divert=int(num_can_div), duration=int(duration), \n avg_delay=int(avg_delay), score=float(score))\n\n # Add each flight to the session\n db.session.add(flight)\n\n # Once we're done, commit all the flights to the database\n db.session.commit()\n return", "title": "" }, { "docid": "c470a4cda7199a81aa37c6bea16c9e78", "score": "0.49812776", "text": "def process_airport_file(cur, filepath, conn):\n # Read airport codes data file\n df2 = pd.read_csv('airport-codes_csv.csv')\n df2.fillna(0,inplace=True)\n\n #Fetching required columns from the data\n airports_data = df2.iloc[:,[0,2,1,6,7,8,10]].values.tolist()\n \n # insert airport codes data records\n for i in range(len(airports_data)):\n try:\n cur.execute(dimairports_table_insert, airports_data[i])\n except psycopg2.IntegrityError as e:\n conn.rollback()\n print('insert failure:',e)\n #continue\n except ValueError as e:\n conn.rollback()\n print('insert failure:',e)\n #continue\n else:\n conn.commit()", "title": "" }, { "docid": "5c5f1172504c7cc2885f2177af08a09d", "score": "0.49776614", "text": "def script():\n\trows ={}\n\tf = 0\n\tUN = open(\"UN-data.csv\",\"rU\")#U opens it as universal\n\treader =UN.read()\n\treader2 = reader.split('\\n')\n\t\n\t\n # csv.writer(load)\n\tfor k in reader2:\n\t\t\n\t\tdata = k.split(',')\n\t\tif k.startswith('prompt'):\n\t\t\tpass\n\t\telse:\n\t\t\trows[f]= data[:]\n\t\tf+=1 \n\t\t#print k\n\t#print rows\n\t\n\tload_higher = open(\"UN-dataHigher.csv\", 'wt')\n\tload_lower = open(\"UN-dataLower.csv\", 'wt')\n\tload_reverse = open(\"UN-dataReverse.csv\", 'wt')\n\n\tfor d in rows:\n\n\t\tif rows[d][1]== 'higher':\n\t\t\t\n\t\t\tload_higher.write(\"\\t\".join([str(p) for p in rows[d]])+\"\\n\")\n\t\t\t\n\t\telif rows[d][1]== 'lower':\n\t\t\tload_lower.write(\"\\t\".join([str(p) for p in rows[d]])+\"\\n\")\n\n\t\tfor w in rows:\n\t\t\tif int(rows[w][2]) <= 50:\n\t\t\t#for line in sorted(UN, key=lambda line: line.split()[2]):\n\t\t\t\tload_reverse.write(\"\\t\".join([str(p) for p in rows[w]])+\"\\n\")\n\n\tload_higher.close()\n\tload_lower.close()\n\tload_reverse.close()\n\t\n\t\n\treverse2 = open(\"UN-dataReverse.csv\",\"rU\").read()\n\t\n\tf ={}\n\th = reverse2.split('\\n')\n\tc = 0\n\t\n\tfor i in h:\n\t\ta = i.split('\\t')\n\t\tf[c]=a[:]\n\t\tc += 1\n\tprint f", "title": "" }, { "docid": "b78934158f5504ffd1fedbe72f211c7d", "score": "0.497576", "text": "def execute(db_host = 'localhost', \r\n db_name = 'nordicmicroalgae', \r\n db_user = 'root', \r\n db_passwd = ''):\r\n db = None\r\n cursor = None\r\n try:\r\n # Connect to db.\r\n db = mysql.connector.connect(host = db_host, db = db_name, \r\n user = db_user, passwd = db_passwd,\r\n use_unicode = True, charset = 'utf8')\r\n cursor=db.cursor()\r\n # Delete all rows.\r\n cursor.execute(\"\"\" delete from taxa_ranks \"\"\") \r\n # Insert new rows.\r\n# cursor.execute(\"\"\"\r\n cursor.execute(\"\"\" insert into taxa_ranks(rank, sort_order) values ('Rootlevel', 0); \"\"\") # For internal use.\r\n cursor.execute(\"\"\" insert into taxa_ranks(rank, sort_order) values ('Domain', 5); \"\"\") # Note: Empire or Domain.\r\n cursor.execute(\"\"\" insert into taxa_ranks(rank, sort_order) values ('Kingdom', 15); \"\"\")\r\n cursor.execute(\"\"\" insert into taxa_ranks(rank, sort_order) values ('Subkingdom', 16); \"\"\")\r\n cursor.execute(\"\"\" insert into taxa_ranks(rank, sort_order) values ('Phylum', 25); \"\"\") # Note: Division in botany.\r\n cursor.execute(\"\"\" insert into taxa_ranks(rank, sort_order) values ('Subphylum', 26); \"\"\")\r\n cursor.execute(\"\"\" insert into taxa_ranks(rank, sort_order) values ('Superclass', 34); \"\"\")\r\n cursor.execute(\"\"\" insert into taxa_ranks(rank, sort_order) values ('Class', 35); \"\"\")\r\n cursor.execute(\"\"\" insert into taxa_ranks(rank, sort_order) values ('Subclass', 36); \"\"\")\r\n cursor.execute(\"\"\" insert into taxa_ranks(rank, sort_order) values ('Infraclass', 37); \"\"\")\r\n cursor.execute(\"\"\" insert into taxa_ranks(rank, sort_order) values ('Superorder', 44); \"\"\")\r\n cursor.execute(\"\"\" insert into taxa_ranks(rank, sort_order) values ('Order', 45); \"\"\")\r\n cursor.execute(\"\"\" insert into taxa_ranks(rank, sort_order) values ('Suborder', 46); \"\"\")\r\n cursor.execute(\"\"\" insert into taxa_ranks(rank, sort_order) values ('Infraorder', 47); \"\"\")\r\n cursor.execute(\"\"\" insert into taxa_ranks(rank, sort_order) values ('Superfamily', 54); \"\"\")\r\n cursor.execute(\"\"\" insert into taxa_ranks(rank, sort_order) values ('Family', 55); \"\"\")\r\n cursor.execute(\"\"\" insert into taxa_ranks(rank, sort_order) values ('Subfamily', 56); \"\"\")\r\n cursor.execute(\"\"\" insert into taxa_ranks(rank, sort_order) values ('Tribe', 65); \"\"\")\r\n cursor.execute(\"\"\" insert into taxa_ranks(rank, sort_order) values ('Genus', 75); \"\"\")\r\n cursor.execute(\"\"\" insert into taxa_ranks(rank, sort_order) values ('Subgenus', 76); \"\"\")\r\n cursor.execute(\"\"\" insert into taxa_ranks(rank, sort_order) values ('Species pair', 84); \"\"\")\r\n cursor.execute(\"\"\" insert into taxa_ranks(rank, sort_order) values ('Species', 8); \"\"\")\r\n cursor.execute(\"\"\" insert into taxa_ranks(rank, sort_order) values ('Subspecies', 86); \"\"\")\r\n cursor.execute(\"\"\" insert into taxa_ranks(rank, sort_order) values ('Variety', 94); \"\"\") # Note: Botany.\r\n cursor.execute(\"\"\" insert into taxa_ranks(rank, sort_order) values ('Form', 95); \"\"\") # Note: Zoology.\r\n cursor.execute(\"\"\" insert into taxa_ranks(rank, sort_order) values ('Hybrid', 96); \"\"\")\r\n #\r\n except mysql.connector.Error as e:\r\n print(\"ERROR: MySQL %d: %s\" % (e.args[0], e.args[1]))\r\n print(\"ERROR: Script will be terminated.\")\r\n sys.exit(1)\r\n finally:\r\n if cursor: cursor.close()\r\n if db: db.close()", "title": "" }, { "docid": "3c33576e9089f69b557fb93c58a8b2fa", "score": "0.49743506", "text": "def process_city_file(cur, filepath, conn):\n # Read cityies information data file\n df3 = pd.read_csv('us-cities-demographics.csv',sep = ';')\n df3.fillna(0,inplace=True)\n \n # using dictionary to convert specific columns \n convert_dict = {'Male Population': int, \n 'Female Population': int\n } \n df3 = df3.astype(convert_dict)\n\n #Fetching required columns from the data\n cities_data = df3.iloc[:,[0,1,9,2,3,4,5]].values.tolist()\n \n # insert city data records\n for i in range(len(cities_data)):\n try:\n cur.execute(dimcities_table_insert, cities_data[i])\n except psycopg2.IntegrityError as e:\n conn.rollback()\n print('insert failure:',e)\n #continue\n except ValueError as e:\n conn.rollback()\n print('insert failure:',e)\n #continue\n else:\n conn.commit()", "title": "" }, { "docid": "243e1b108bbf22e8bdcc996c8926b830", "score": "0.49712095", "text": "def save_data_into_database(file_path):\n # create database and table\n db.connect()\n if not UserOrder.table_exists():\n db.create_table(UserOrder)\n # insert data\n data_frame = pd.read_csv(file_path, sep=';')\n for idx, row in data_frame.iterrows():\n UserOrder.create_or_get(order_id=row['Order_ID'], order_date=row['Order_Date'], user_id=row['User_ID'],\n total_charges_usd=row['Total_Charges_USD'].replace(',', '.'))", "title": "" } ]
46c36c1019f8fb27aefa21a649fc8f63
Performs a scatter of data to the different available parallelizer processes.
[ { "docid": "3a86da7d6c1bf390c35945bfb4e57717", "score": "0.63945365", "text": "def scatter(self, data, **kwargs):\n\n if self.contract is not None:\n self.contract.handle_call(self, \"scatter\")\n if self.on_main:\n locs = list(self.comm.locations) # gotta be safe\n nlocs = len(locs)\n chunk_size = len(data) // nlocs\n chunk_sizes = [chunk_size] * nlocs\n chunk_coverage = (chunk_size*nlocs)\n for i in range(len(data)-chunk_coverage):\n chunk_sizes[i] += 1\n s = chunk_sizes[0]\n main_data = data[:s]\n for i, b in zip(locs[1:], chunk_sizes[1:]):\n chunk = data[s:s+b]\n self.comm.send(chunk, i, **kwargs)\n s+=b\n return main_data\n else:\n return self.comm.send(data, self.comm.location) # effectively a receive...", "title": "" } ]
[ { "docid": "6c8c17da0c38df782cee0143cc7a70a4", "score": "0.7503602", "text": "def scatter(self, data, **kwargs):\n raise NotImplementedError(\"Parallelizer is an abstract base class\")", "title": "" }, { "docid": "1dfb67fb390b7da012f3d07ed338dee0", "score": "0.6475183", "text": "def gather(self, data, **kwargs):\n raise NotImplementedError(\"Parallelizer is an abstract base class\")", "title": "" }, { "docid": "7213593fd6a04c80e6db3341d8d5d957", "score": "0.6470817", "text": "def launch_scatter(cls, db):\n gxapi_cy.WrapCHIMERA._launch_scatter(GXContext._get_tls_geo(), db.encode())", "title": "" }, { "docid": "345367d8a74760f8aec695fab78fa66b", "score": "0.64048016", "text": "def map(self, func, data, extra_args=None, extra_kwargs=None, **kwargs):\n\n # self.wait()\n self.print(\"Scattering Data\", log_level=Logger.LogLevel.MoreDebug)\n data = self.scatter(data, **kwargs)\n # self.wait()\n self.print(\"Broadcasting Extra Args\", log_level=Logger.LogLevel.MoreDebug)\n extra_args = self.broadcast(extra_args, **kwargs)\n # self.wait()\n self.print(\"Broadcasting Extra Kwargs\", log_level=Logger.LogLevel.MoreDebug)\n extra_kwargs = self.broadcast(extra_kwargs, **kwargs)\n if extra_args is None:\n extra_args = ()\n if extra_kwargs is None:\n extra_kwargs = {}\n # try:\n evals = [func(sub_data, *extra_args, **extra_kwargs) for sub_data in data]\n # except Exception as e:\n # self.gather(e, **kwargs)\n # raise\n res = self.gather(evals, **kwargs)\n if self.on_main:\n return sum(res, [])\n else:\n return Parallelizer.InWorkerProcess", "title": "" }, { "docid": "0b479aef1669511027bf065fad6d300f", "score": "0.6325013", "text": "def scatter(self, data, workers=None):\n return sync(self.loop, self._scatter, data, workers=workers)", "title": "" }, { "docid": "c55f5c60d80e945cbc009355cfa9f091", "score": "0.6124286", "text": "def scatter(\n self,\n data,\n workers=None,\n broadcast=False,\n direct=None,\n hash=True,\n timeout=no_default,\n asynchronous=None,\n ):\n if timeout == no_default:\n timeout = self._timeout\n if isinstance(data, pyQueue) or isinstance(data, Iterator):\n raise TypeError(\n \"Dask no longer supports mapping over Iterators or Queues.\"\n \"Consider using a normal for loop and Client.submit\"\n )\n\n try:\n local_worker = get_worker()\n except ValueError:\n local_worker = None\n return self.sync(\n self._scatter,\n data,\n workers=workers,\n broadcast=broadcast,\n direct=direct,\n local_worker=local_worker,\n timeout=timeout,\n asynchronous=asynchronous,\n hash=hash,\n )", "title": "" }, { "docid": "a021c4e1e1407b3febd87c25507754b3", "score": "0.6092579", "text": "def broadcast(self, data, **kwargs):\n raise NotImplementedError(\"Parallelizer is an abstract base class\")", "title": "" }, { "docid": "dfc6f54e65cbafcd962bcdb2a01e015c", "score": "0.59863514", "text": "def _scatter(self):\n if not cmds.objExists(self.scatter.scatter_objs[0]):\n MGlobal.displayError(\"One or more specified scatter objects do \"\n \"not exist. Please reselect.\")\n self.obj_le.clear()\n self._update_scatter_btn_state()\n return\n for target in self.scatter.target_objs + self.scatter.target_verts:\n if not cmds.objExists(target):\n MGlobal.displayError(\"One or more of the scatter targets does \"\n \"not exist. Please reselect.\")\n self.target_le.clear()\n self._update_scatter_btn_state()\n return\n self._set_scatter_properties_from_ui()\n self.scatter.scatter()", "title": "" }, { "docid": "a71575057d2ebbe2ec82abc85fa6702a", "score": "0.5932825", "text": "def starmap(self, func, data, extra_args=None, extra_kwargs=None, **kwargs):\n data = self.scatter(data, **kwargs)\n extra_args = self.broadcast(extra_args)\n extra_kwargs = self.broadcast(extra_kwargs)\n if extra_args is None:\n extra_args = ()\n if extra_kwargs is None:\n extra_kwargs = {}\n # try:\n evals = [func(*sub_data, *extra_args, **extra_kwargs) for sub_data in data]\n # except Exception as e:\n # self.gather(e, **kwargs)\n # raise\n res = self.gather(evals, **kwargs)\n if self.on_main:\n return sum(res, [])\n else:\n return Parallelizer.InWorkerProcess", "title": "" }, { "docid": "6d7c4bcb52b8f3d62c8c811859b02540", "score": "0.58954185", "text": "def run_all(self, threads):\n print(\"Running all data points...\")\n counter = mp.Value('i', 0)\n sims = []\n pool = mp.Pool(\n initializer=_init_counter, initargs=(counter, ), processes=threads)\n for p in range(self._num_data_points):\n cmd = os.path.join(self._output_dir, str(p), \"run.sh\")\n sims.append(pool.apply_async(_run_simulation, args=(cmd, )))\n for sim in sims:\n sim.get()\n pool.close()\n pool.join()", "title": "" }, { "docid": "55e6a03755fa0c0532e12c5407a392c3", "score": "0.5863241", "text": "def _cmd_scatter(args):\n pset_cvg = CNA.read(args.filename, args.sample_id)\n pset_seg = CNA.read(args.segment) if args.segment else None\n if args.range_list:\n with PdfPages(args.output) as pdf_out:\n for chrom, start, end in ngfrills.parse_regions(args.range_list,\n True):\n region = \"{}:{}-{}\".format(chrom, start, end)\n do_scatter(pset_cvg, pset_seg, args.vcf, args.chromosome, args.gene,\n region, args.background_marker, args.trend, args.width,\n args.sample_id, args.min_variant_depth)\n pyplot.title(region)\n pdf_out.savefig()\n pyplot.close()\n else:\n do_scatter(pset_cvg, pset_seg, args.vcf,\n args.chromosome, args.gene, args.range,\n args.background_marker, args.trend, args.width,\n args.sample_id, args.min_variant_depth)\n if args.output:\n pyplot.savefig(args.output, format='pdf', bbox_inches=\"tight\")\n echo(\"Wrote\", args.output)\n else:\n pyplot.show()", "title": "" }, { "docid": "3afb5d3e640f86bdfc1d9fd9d47150fe", "score": "0.57374126", "text": "async def scatter_to_workers(nthreads, data, rpc=rpc):\n assert isinstance(nthreads, dict)\n assert isinstance(data, dict)\n\n workers = list(concat([w] * nc for w, nc in nthreads.items()))\n names, data = list(zip(*data.items()))\n\n worker_iter = drop(_round_robin_counter[0] % len(workers), cycle(workers))\n _round_robin_counter[0] += len(data)\n\n L = list(zip(worker_iter, names, data))\n d = groupby(0, L)\n d = {worker: {key: value for _, key, value in v} for worker, v in d.items()}\n\n rpcs = {addr: rpc(addr) for addr in d}\n try:\n out = await All([rpcs[address].update_data(data=v) for address, v in d.items()])\n finally:\n for r in rpcs.values():\n await r.close_rpc()\n\n nbytes = merge(o[\"nbytes\"] for o in out)\n\n who_has = {k: [w for w, _, _ in v] for k, v in groupby(1, L).items()}\n\n return (names, who_has, nbytes)", "title": "" }, { "docid": "a72760f6c73dfba38e3c1aab811952d5", "score": "0.5722667", "text": "def scatter(self, message, forward=False):\n self.emit('data', message, forward)", "title": "" }, { "docid": "a3f0712a61bd8d7e5ca0185da25bea83", "score": "0.5648263", "text": "def _cmd_scatter(args):\n pset_cvg = _CNA.read(args.filename, args.sample_id)\n pset_seg = _CNA.read(args.segment) if args.segment else None\n if args.range_list:\n with PdfPages(args.output) as pdf_out:\n for chrom, start, end in _RA.read(args.range_list).coords():\n region = \"{}:{}-{}\".format(chrom, start, end)\n do_scatter(pset_cvg, pset_seg, args.vcf, False, False,\n region, args.background_marker, args.trend,\n args.width, args.sample_id, args.normal_id,\n args.min_variant_depth, args.y_min, args.y_max)\n pyplot.title(region)\n pdf_out.savefig()\n pyplot.close()\n else:\n if args.chromosome and (':' in args.chromosome or\n '-' in args.chromosome):\n show_range = args.chromosome\n args.chromosome = None\n else:\n show_range = None\n do_scatter(pset_cvg, pset_seg, args.vcf, args.chromosome, args.gene,\n show_range, args.background_marker, args.trend, args.width,\n args.sample_id, args.normal_id, args.min_variant_depth,\n args.y_min, args.y_max)\n if args.output:\n pyplot.savefig(args.output, format='pdf', bbox_inches=\"tight\")\n echo(\"Wrote\", args.output)\n else:\n pyplot.show()", "title": "" }, { "docid": "8ee420bf667604a4cf3309676b26f906", "score": "0.5645637", "text": "def scatter(self, data, root=0, shape=None, dtype=None, **kwargs):\n\n if isinstance(data, np.ndarray) or data is None:\n if root == self.location and data is None:\n raise TypeError(\"'None' is not scatterable. Try `broadcast`.\")\n if root == self.location:\n send_buf = np.ascontiguousarray(data)\n else:\n send_buf = None\n if shape is None:\n if root == self.location:\n shape = data.shape\n shape = self.broadcast(shape)\n if dtype is None:\n if root == self.location:\n dtype = data.dtype\n dtype = self.broadcast(dtype)\n np.empty(shape, dtype=dtype)\n ranks = self.comm.Get_size()\n ndat = shape[0]\n block_size = ndat // ranks\n block_remainder = ndat - (block_size*ranks)\n if block_remainder == 0:\n shape = (block_size,) + shape[1:]\n recv_buf = np.empty(shape, dtype=dtype)\n self.comm.Scatter(send_buf, recv_buf, root=root)\n return recv_buf\n else:\n # it turns out MPI4Py has only an unsophisticated\n # implementation of Scatterv that explicitly needs the\n # appropriate block offsets, since it basically expects\n # to have a flattened form of the array (just like MPI)\n block_sizes = [block_size]*ranks\n for i in range(block_remainder):\n block_sizes[i] += 1\n\n recv_buf = np.empty((block_sizes[self.location],) + shape[1:], dtype=dtype)\n\n block_offset = int(np.prod(shape[1:]))\n block_sizes = np.array(block_sizes)*block_offset\n block_offsets = np.concatenate([[0], np.cumsum(block_sizes)])[:-1]\n # print(block_offsets, block_offset, block_sizes)\n\n # self.parent.print(\"block sizes: {} offsets: {}\".format(block_sizes, block_offsets))\n self.comm.Scatterv(\n [\n send_buf,\n block_sizes,\n block_offsets,\n self.get_mpi_type(dtype)\n ],\n recv_buf,\n root=root\n )\n self.parent.print(\"sending\", send_buf, recv_buf, self.get_mpi_type(dtype).name, log_level=Logger.LogLevel.MoreDebug)\n return recv_buf\n else:\n return self.scatter_obj(data, root=root, **kwargs)", "title": "" }, { "docid": "b9936c4546522426b4c449dddfb2b96a", "score": "0.5640514", "text": "def scatter(self, message, forward=False):\n # if self.hasChildren():\n self.emit('data', message, forward)", "title": "" }, { "docid": "f2bfa682419341d06aa039c016ee048e", "score": "0.56394106", "text": "def dataParallel(self):\n print(\"=== Let's use\", torch.cuda.device_count(), \"GPUs!\")\n self.net = nn.DataParallel(self.net)\n self.optimizer = self.adamOptim(\n self.net, lr=self.config[\"model\"][\"learning_rate\"]\n )", "title": "" }, { "docid": "ed6eb2c4698aea5f5ce8a8f99354ea2e", "score": "0.5631969", "text": "def scatter(self, data, shape=None, **kwargs):\n\n if self.contract is not None:\n self.contract.handle_call(self, \"scatter\")\n return self.comm.scatter(data, root=self.root, shape=shape, **kwargs)", "title": "" }, { "docid": "1172952eb3cf51f69d7c264d10247350", "score": "0.5631233", "text": "def run(self, max_workers=1, executor=None):\n\n if(max_workers==1):\n # Default run, no parallelization\n if self._input is not None:\n beam = self.beam()\n \n if self._input['start']['type'] == \"cathode\":\n status = ParticleStatus.CATHODE\n else:\n status = ParticleStatus.ALIVE\n \n self.particles = ParticleGroup(data=beam.data(status=status))\n self.output = self.particles\n vprint(f'Created particles in .particles: \\n {self.particles}', self.verbose > 0, 1, True)\n else:\n print('No input data specified.')\n \n #return self.output\n\n else:\n\n vprint(f'Creating particles in parallel with {max_workers} workers', self.verbose > 0, 0, True)\n if(executor is None):\n \n executor = ProcessPoolExecutor()\n executor.max_workers = max_workers\n\n vprint(f'Setting up workers...', self.verbose > 0, 1, False)\n generators = set_up_worker_generators(self, n_gen=max_workers)\n inputs = [gen.input for gen in generators]\n vprint(f'done.', self.verbose > 0, 0, True)\n \n # Run\n vprint(f'Executing worker tasks...', self.verbose > 0, 1, False)\n with executor as p:\n ps = list(p.map(worker_func, inputs))\n vprint(f'done', self.verbose > 0, 0, True)\n\n vprint(f'Collecting beamlets...', self.verbose > 0, 1, False)\n \n #P = ps[0]\n #for Pi in ps[1:]: P = P + Pi\n #vprint(f'done', self.verbose > 0, 0, True)\n\n #data = {k:np.hstack([pg[k] for pg in ps]) for k in ps[0].data.keys() if k not in ['species']}\n #data['species'] = ps[0].species\n \n self.particles = join_particle_groups(*ps)\n self.output = self.particles\n vprint(f'Created particles in .particles: \\n {self.particles}', self.verbose > 0, 1, True)\n\n return self.output", "title": "" }, { "docid": "20ed4a58734dcfc38df899c1fcef986a", "score": "0.56254774", "text": "def generate_mouse_random_ext_data(self):\n print('INFO: Creating random data sets.')\n ###### MULTIPROCESSING INSERT ######\n if __name__ == '__main__':\n cores = (multiprocessing.cpu_count()-1)\n pool = multiprocessing.Pool(processes=cores)\n print('INFO: Multiprocessing started')\n pool.map(multiprocess_generate_random_mouse_ext_data, range(1,1001))\n print('INFO: Multiprocessing completed')\n ###### END MULTIPROCESSING INSERT ######\n\n return", "title": "" }, { "docid": "6042990e1a74def41a5dce8bad347fcc", "score": "0.5623495", "text": "def spawn_processes_p(self):\n dm = self.dm\n print(\"\\t GPUSpawnerPersistent::__spawn_processes_p\")\n _lock = Lock()\n\n self.data = dm.data\n\n #for j in range(self.nFolds):\n # self.jobs.put(j) # Add a job index to the shared queue.\n\n for _g in range(0,self.nGPUS): #debug 1, for removing GPU[0]\n #if _g == 1: continue\n p = Process(target=spawn_gpu_process, args=(self.dm,_g, _lock, self.jobs, self.data,self.auc_means, self.acc_means, self.cqs[_g]))\n self.processes.append(p)\n p.start()", "title": "" }, { "docid": "dd8afbd6bfdac3593d5276d18685c154", "score": "0.5614742", "text": "def sync_results(self):\n # waits until there is no unfinished task\n\tself.all_tasks_done.acquire()\n\twhile self.unfinished_tasks:\n\t\tself.all_tasks_done.wait()\n\tself.all_tasks_done.release()\n self.barrier.wait()\n\t# updates data based on teh values from the scatter stage\n self.data = self.copy[:]\n self.barrier.wait()", "title": "" }, { "docid": "b5c4f9c779d637503f3c87db523d7c22", "score": "0.5613525", "text": "def parallelism(self):", "title": "" }, { "docid": "67fda58a0e796087ae49d7f377cdcbb2", "score": "0.5588029", "text": "def Spawn(*plist):\n _parallel(plist, False)", "title": "" }, { "docid": "5468cd9367a3a69ddaf8d26cd66d22d5", "score": "0.55876696", "text": "def scatter_v(array_data, displacements, shapes, comm=MPI.COMM_WORLD, root=0):\n rank = comm.Get_rank()\n #Transmit information needed to reconstruct array\n displacements = broadcast_array(displacements, root=root)\n shapes = broadcast_array(shapes, root=root)\n\n#TODO: Look into str/char buffer send for this operation\n array_dtype = np.sctype2char(array_data.dtype) if rank == root else None\n array_dtype = comm.bcast(array_dtype, root=root)\n\n counts = [np.prod(shape) for shape in shapes]\n local_data = np.empty(shapes[rank], dtype=np.dtype(array_dtype))\n\n #Scatter the array\n mpi_dtype = MPI._typedict[array_dtype]\n comm.Scatterv([array_data, counts, displacements, mpi_dtype],\n local_data, root=root)\n\n return local_data", "title": "" }, { "docid": "f74db16f07425141dc57e6c3194498ff", "score": "0.5570273", "text": "def fit_catalogs(catalogs):\n from time import sleep\n full_cat = full_catalogs(catalogs)\n for catalog in catalogs:\n print(\"The number of CPUs: \", count_available_cpu)\n # create a list to store the proccesses\n jobs = []\n # Going through different cores\n for i in range(count_available_cpu):\n # Making a proccess for fitting the SEDs\n p = mp.Process(target=main, args=(i, catalog, full_cat))\n jobs.append(p)\n # Start the process\n p.start()\n sleep(1)\n # Wait untill all of the processes are finished\n for process in jobs:\n process.join()\n\n print(catalog.upper() + \"'s SED fitting is finished!'\")", "title": "" }, { "docid": "23bbc8a6661de3d939928a7681c942b6", "score": "0.5568814", "text": "def scatter(self):\n vertices = self.target_verts[:]\n for obj in self.target_objs[:]:\n vertices += cmds.ls(obj + \".vtx[*]\", flatten=True)\n if self.scatter_density < 1.0:\n vertices = self._sample_vertices(vertices)\n obj_counts = []\n if 100 not in self.obj_proportions:\n for idx in range(len(self.obj_proportions)):\n count = int(self.obj_proportions[idx] / 100 * len(vertices))\n obj_counts.append(count)\n vertices = random.sample(vertices, len(vertices))\n self._instance_scatter_objects(vertices, obj_counts)", "title": "" }, { "docid": "531bc09e78b66ad4745e74c7d281cb52", "score": "0.5549347", "text": "def process(self, datasets):\n raise Exception('process() not overloaded')", "title": "" }, { "docid": "531bc09e78b66ad4745e74c7d281cb52", "score": "0.5549347", "text": "def process(self, datasets):\n raise Exception('process() not overloaded')", "title": "" }, { "docid": "531bc09e78b66ad4745e74c7d281cb52", "score": "0.5549347", "text": "def process(self, datasets):\n raise Exception('process() not overloaded')", "title": "" }, { "docid": "e073bbd60f0621dc1b44d7aba87ad323", "score": "0.5512807", "text": "def parallel():\n p = Pool(CPU_COUNT)\n p.map(worker, gen(), chunksize=2)\n p.close()\n p.join()", "title": "" }, { "docid": "4352741c49a37ca9ba365c8e6f5678f0", "score": "0.5502961", "text": "def data_pipeline() -> co.Serial:\n # Dockerfile installs python, R, and conducto.\n image = co.Image(\n dockerfile=\"docker/Dockerfile.data\", context=\".\", copy_dir=\"./code\", reqs_py=[\"conducto\"]\n )\n\n data_dir = \"demo/data_science/data\"\n\n output = co.Serial(image=image, doc=co.util.magic_doc())\n output[\"usage\"] = co.Exec(\"conducto-data-pipeline --help\")\n\n output[\"parameter_search\"] = ps = co.Parallel()\n\n for window in [25, 50, 100]:\n ps[f\"window={window}\"] = w = co.Parallel()\n\n for mean in [.05, .08, .11]:\n w[f\"mean={mean}\"] = m = co.Parallel()\n\n for volatility in [.1, .125, .15, .2]:\n m[f\"volatility={volatility}\"] = co.Exec(\n f\"python data.py --window={window} --mean={mean} \"\n f\"--volatility={volatility} --data-dir={data_dir}\"\n )\n\n output[\"summarize\"] = co.Exec(f\"Rscript data.R {data_dir}\")\n\n return output", "title": "" }, { "docid": "d0fce2c54c28709a9549eb43c5142b93", "score": "0.5496973", "text": "def process_pairs(data_sets=all_data_sets, multithreaded=False):\n it = iter(data_sets)\n pairs = zip(it, it)\n for data_set1, data_set2 in pairs:\n print(f\"### Comapring graph populations defined by:\\n\"\n f\"\\t- {data_set1}\\n\"\n f\"\\t- {data_set2}\")\n results1 = process_data_set(data_set1, True, multithreaded)\n results2 = process_data_set(data_set2, True, multithreaded)\n plot_results2(data_set1, data_set2, results1, results2, \"horizontal\")\n plot_results2(data_set1, data_set2, results1, results2, \"vertical\")", "title": "" }, { "docid": "1c4e6d4ad6106d7da91823cab82db239", "score": "0.5484823", "text": "def _do_train_job(self, data_iterable:List[tuple], target:ndarray, memory:ndarray) -> [int, int]:\n raise NotImplementedError()", "title": "" }, { "docid": "33240fdfcbec5404cdb781c1b20860ad", "score": "0.54788", "text": "def assign_processes(self, processes):", "title": "" }, { "docid": "b8e8049d7a3a2e19db6edb5151e8c6c7", "score": "0.5472449", "text": "def plot_scatter(self):\n import matplotlib.pyplot as plt\n\n self.model.eval()\n\n pred, truth = {'train': [], 'valid': []}, {\n 'train': [], 'valid': []}\n\n for data in self.train_loader:\n data = data.to(self.device)\n truth['train'] += data.y.tolist()\n pred['train'] += self.model(data).reshape(-1).tolist()\n\n for data in self.valid_loader:\n data = data.to(self.device)\n truth['valid'] += data.y.tolist()\n pred['valid'] += self.model(data).reshape(-1).tolist()\n\n plt.scatter(truth['train'], pred['train'], c='blue')\n plt.scatter(truth['valid'], pred['valid'], c='red')\n plt.show()", "title": "" }, { "docid": "bf8f69146298887948a27146ef31f6da", "score": "0.5468657", "text": "def map(self, function, data, extra_args=None, extra_kwargs=None, **kwargs):\n raise NotImplementedError(\"Parallelizer is an abstract base class\")", "title": "" }, { "docid": "8c8eb99db4e66e133f45b1efc8683912", "score": "0.54424614", "text": "def parallel( self, *args, **kwargs ):\n return self._decorator( 'parallel', *args, **kwargs )", "title": "" }, { "docid": "8848e263f46875153d164973800439a4", "score": "0.54122376", "text": "def plot_everything(self, num_processors=70, num_points=3):\n\n values_to_plot = []\n corr = []\n for i, detuning in enumerate(self.detunings):\n values = (np.geomspace(1, 2**(num_points - 1), num_points) - 1)/(2**(num_points-1) - 1) * detuning[0]\n values = [-value for value in values[::-1]] + list(values[1:])\n # values = np.linspace(-detuning[0], detuning[0], num_points)\n # print(values)\n values_to_plot.append(values)\n corr.append(i)\n combinations = itertools.product(*values_to_plot)\n new_combinations = []\n for combo in combinations:\n new_combo = []\n for index in corr:\n new_combo.append(combo[index])\n new_combinations.append(new_combo)\n combinations = new_combinations\n pool = multiprocessing.Pool(num_processors)\n lst = [(self.controlset, self.ambient_hamiltonian, combo, self.dt,\n self.control_hamiltonians, self.target_operator, self.probs)\n for combo in combinations]\n projs_fidelities = pool.map(compute_dpn_and_fid, lst)\n pool.close()\n projs = [pf[0] for pf in projs_fidelities]\n fidelities = [pf[1] for pf in projs_fidelities]\n\n # projs2 = []\n # for proj in projs:\n # from numbers import Number\n # if not isinstance(proj, Number):\n # projs2.append(proj)\n #\n # projs = projs2\n projs = np.vstack(projs).T\n fidelities = np.vstack(fidelities).T\n plt.figure(1, figsize=(16, 8)) # the first figure\n plt.subplot(211) # the first subplot in the first figure\n\n tuple_length = len(combinations[0])\n standard_ordering = list(range(tuple_length))\n ordering = standard_ordering\n # Switch first index\n\n #ordering[0], ordering[1] = ordering[1], ordering[0]\n print(tuple_length)\n\n indices = generate_indices(len(values), ordering)\n\n for i, row in enumerate(projs[:-1, :]):\n reordered_row = np.array([row[j] for j in indices])\n plt.plot(range(len(row)), reordered_row)\n plt.plot(range(len(projs[-1, :])), [projs[-1, :][i] for i in indices], label=\"min\", color='k', linewidth=2, zorder=10)\n plt.legend()\n plt.ylabel(\"Absolute Sum of Off Diagonal Elements\")\n plt.semilogy()\n\n plt.subplot(212) # the second subplot in the first figure\n for i, row in enumerate(fidelities[:-1, :]):\n reordered_row = np.array([row[j] for j in indices])\n plt.plot(range(len(row)), -np.log(1 - reordered_row))\n plt.plot(range(len(fidelities[-1, :])), [-np.log(1 - fidelities[-1, :][i]) for i in indices], label=\"min\", color='k', linewidth=2, zorder=10)\n plt.legend()\n plt.ylabel(\"f\")\n samples = np.linspace(plt.ylim()[0], plt.ylim()[1], 11)\n labels = -(np.exp(-samples) - 1)\n plt.xlabel(\"Sample Index\")\n plt.tight_layout()\n plt.yticks(samples, labels)\n plt.tight_layout()", "title": "" }, { "docid": "cb9d0d86487a4c31d53d39cb0602f886", "score": "0.5380881", "text": "def compute(self, n_points=None, chunk_size=None, predictor=None):\r\n cluster_mode = core._cluster_mode()\r\n if n_points is None:\r\n n_points = self.batch_size\r\n if chunk_size is None:\r\n chunk_size = self.chunk_size\r\n \r\n graph_dict = core.get_graph_chunked(self.sampling.draw, \r\n self.simulator,\r\n self.summaries.compute, \r\n batch_size=n_points,\r\n chunk_size=chunk_size)\r\n pred = []\r\n if predictor is not None:\r\n if callable(predictor):\r\n pred = core.get_prediction(predictor, graph_dict[\"summarystats\"])\r\n else:\r\n raise ValueError(\"The predictor must be a callable function\")\r\n # persist at workers, will run in background\r\n if cluster_mode:\r\n params_res, processed_res, result_res, pred_res = persist(graph_dict[\"parameters\"], \r\n graph_dict[\"trajectories\"], \r\n graph_dict[\"summarystats\"],\r\n pred)\r\n # convert to futures\r\n futures = core.get_futures(result_res)\r\n f_pred = core.get_futures(pred_res)\r\n f_params = core.get_futures(params_res)\r\n f_ts = core.get_futures(processed_res)\r\n\r\n # keep track of indices...\r\n f_dict = {f.key: idx for idx, f in enumerate(f_pred)}\r\n # ..as we collect result on a \"as completed\" basis\r\n for f, pred in as_completed(f_pred, with_results=True):\r\n idx = f_dict[f.key]\r\n # get the parameter point\r\n params = f_params[idx].result()\r\n # get the trajatories\r\n trajs = f_ts[idx].result()\r\n #get summary stats\r\n stats = futures[idx].result()\r\n # add to data collection\r\n param = np.asarray(params)\r\n traj = np.asarray(trajs)\r\n stats = np.asarray(stats)\r\n pred = np.asarray(pred)\r\n self.data.add_points(inputs=param, time_series=traj,\r\n summary_stats=stats, user_labels=np.ones(len(stats))*-1,\r\n targets=pred)\r\n else:\r\n params_res, processed_res, result_res, pred_res = compute(graph_dict[\"parameters\"], \r\n graph_dict[\"trajectories\"], \r\n graph_dict[\"summarystats\"],\r\n pred)\r\n for e, pred in enumerate(pred_res):\r\n param = np.asarray(params_res[e])\r\n ts = np.asarray(processed_res[e])\r\n stats = np.asarray(result_res[e])\r\n pred = np.asarray(pred)\r\n self.data.add_points(inputs=param, time_series=ts,\r\n summary_stats=stats, user_labels=np.ones(len(pred))*-1,\r\n targets=pred)\r\n\r\n\r\n else:\r\n # TODO: avoid redundancy...\r\n if cluster_mode:\r\n params_res, processed_res, result_res = persist(graph_dict[\"parameters\"], \r\n graph_dict[\"trajectories\"], \r\n graph_dict[\"summarystats\"])\r\n\r\n # convert to futures\r\n futures = core.get_futures(result_res)\r\n f_params = core.get_futures(params_res)\r\n f_ts = core.get_futures(processed_res)\r\n\r\n # keep track of indices...\r\n f_dict = {f.key: idx for idx, f in enumerate(futures)}\r\n # ..as we collect result on a \"as completed\" basis\r\n for f, res in as_completed(futures, with_results=True):\r\n idx = f_dict[f.key]\r\n # get the parameter point\r\n params = f_params[idx].result()\r\n # get the trajatories\r\n trajs = f_ts[idx].result()\r\n # add to data collection\r\n param = np.asarray(params)\r\n traj = np.asarray(trajs)\r\n res = np.asarray(res)\r\n self.data.add_points(inputs=param, time_series=traj,\r\n summary_stats=res, user_labels=np.ones(len(res))*-1)\r\n else:\r\n params_res, processed_res, result_res = compute(graph_dict[\"parameters\"], \r\n graph_dict[\"trajectories\"], \r\n graph_dict[\"summarystats\"])\r\n for e, res in enumerate(result_res):\r\n param = np.asarray(params_res[e])\r\n ts = np.asarray(processed_res[e])\r\n res = np.asarray(res)\r\n self.data.add_points(inputs=param, time_series=ts,\r\n summary_stats=res, user_labels=np.ones(len(res))*-1)", "title": "" }, { "docid": "9a91856ec20e955a6d25b282afcf4e99", "score": "0.5359963", "text": "def draw(self):\n self.duplet = list(self._chunker(self.attr, 2))\n colors = self._set_colors(self.duplet)\n\n for i, duplet in enumerate(self.duplet, start=1):\n self.chart.make_scatter(self.source, duplet[0], duplet[1], i, colors[i - 1])", "title": "" }, { "docid": "1a922c285da04f73d8227a2ff8719086", "score": "0.5330845", "text": "def scatter_obj(self, data, root=0, **kwargs):\n if self.location == root:\n locs = list(self.locations) # gotta be safe\n nlocs = len(locs)\n chunk_size = len(data) // nlocs\n chunk_sizes = [chunk_size] * nlocs\n for i in range(len(data) - (chunk_size*nlocs)):\n chunk_sizes[i] += 1\n\n s = chunk_sizes[0]\n main_data = data[:s]\n for i, b in zip(locs[1:], chunk_sizes[1:]):\n chunk = data[s:s + b]\n self.send(chunk, i, **kwargs)\n s += b\n return main_data", "title": "" }, { "docid": "769accab07a1aee782b0a50918f1a2eb", "score": "0.5306661", "text": "def plot_scatter():\n pass", "title": "" }, { "docid": "f357657155ed036de794a5c2944c40ee", "score": "0.5284634", "text": "def _process_multi_core(self, capacity_class, tie_line_voltage,\n sc_point_gids, nn_sinks=2,\n clipping_buffer=1.05, barrier_mult=100,\n max_workers=2, save_paths=False, radius=None,\n expand_radius=True, mp_delay=3, simplify_geo=None):\n least_costs = []\n num_jobs = 0\n loggers = [__name__, 'reV', 'reVX']\n with SpawnProcessPool(max_workers=max_workers, loggers=loggers) as exe:\n futures = []\n for _, sc_point in self.sc_points.iterrows():\n gid = sc_point['sc_point_gid']\n if gid in sc_point_gids:\n sc_features, sc_radius = self._clip_to_sc_point(\n sc_point, tie_line_voltage, nn_sinks=nn_sinks,\n clipping_buffer=clipping_buffer, radius=radius,\n expand_radius=expand_radius)\n if sc_features.empty:\n continue\n\n future = exe.submit(TransCapCosts.run,\n self._cost_fpath,\n sc_point.copy(deep=True),\n sc_features, capacity_class,\n radius=sc_radius,\n xmission_config=self._config,\n barrier_mult=barrier_mult,\n min_line_length=self._min_line_len,\n save_paths=save_paths,\n simplify_geo=simplify_geo)\n futures.append(future)\n\n num_jobs += 1\n if num_jobs <= max_workers:\n time.sleep(mp_delay)\n\n logger.debug('Completed kicking off {} jobs for {} workers.'\n .format(num_jobs, max_workers))\n for i, future in enumerate(as_completed(futures), start=1):\n sc_costs = future.result()\n if sc_costs is not None:\n least_costs.append(sc_costs)\n\n logger.info('SC point {} of {} complete!'\n .format(i, len(futures)))\n log_mem(logger)\n\n return least_costs", "title": "" }, { "docid": "bc0e142c7461eb7c05fd22e897720409", "score": "0.5273677", "text": "def distibute_three_processors():\n\n\n myid = par.rank()\n par.numprocs = par.size()\n\n if not par.numprocs == 3: return\n\n #print par.numprocs\n\n par.barrier()\n\n if myid == 0:\n\n points, vertices, boundary = rectangular_cross(2,2)\n\n domain = Domain(points, vertices, boundary)\n\n\n domain.set_quantity('elevation', topography) # Use function for elevation\n domain.set_quantity('friction', 0.0) # Constant friction\n domain.set_quantity('stage', expression='elevation') # Dry initial stage\n domain.set_quantity('xmomentum', expression='friction + 2.0') #\n domain.set_quantity('ymomentum', ycoord) #\n\n #----------------------------------------------------------------------------------\n # Test pmesh_divide_metis\n #----------------------------------------------------------------------------------\n nodes, triangles, boundary, triangles_per_proc, quantities = pmesh_divide_metis(domain,par.numprocs)\n\n assert_(num.allclose(nodes,points))\n\n true_vertices = [[0, 9, 1], [3, 9, 0], [4, 9, 3], [1, 9, 4], [1, 10, 2], [4, 10, 1], [5, 10, 4], [2, 10, 5], [3, 11, 4], [6, 11, 3], [7, 11, 6], [4, 11, 7], [4, 12, 5], [7, 12, 4], [8, 12, 7], [5, 12, 8]]\n\n true_triangles = [[4, 9, 3], [4, 12, 5], [7, 12, 4], [8, 12, 7], [5, 12, 8], [0, 9, 1], [1, 9, 4], [1, 10, 2], [4, 10, 1], [5, 10, 4], [2, 10, 5], [3, 9, 0], [3, 11, 4], [6, 11, 3], [7, 11, 6], [4, 11, 7]]\n\n assert_(num.allclose(vertices,true_vertices))\n assert_(num.allclose(triangles,true_triangles))\n\n assert_(num.allclose(triangles_per_proc,[5,6,5]))\n\n\n #----------------------------------------------------------------------------------\n # Test build_submesh\n #----------------------------------------------------------------------------------\n submesh = build_submesh(nodes, triangles, boundary, quantities, triangles_per_proc)\n\n\n assert_(num.allclose(submesh['full_nodes'][0],[[3.0, 0.5, 0.0], [4.0, 0.5, 0.5], [5.0, 0.5, 1.0], [7.0, 1.0, 0.5], [8.0, 1.0, 1.0], [9.0, 0.25, 0.25], [12.0, 0.75, 0.75]]))\n assert_(num.allclose(submesh['full_nodes'][1],[[0.0, 0.0, 0.0], [1.0, 0.0, 0.5], [2.0, 0.0, 1.0], [4.0, 0.5, 0.5], [5.0, 0.5, 1.0], [9.0, 0.25, 0.25], [10.0, 0.25, 0.75]]))\n assert_(num.allclose(submesh['full_nodes'][2],[[0.0, 0.0, 0.0], [3.0, 0.5, 0.0], [4.0, 0.5, 0.5], [6.0, 1.0, 0.0], [7.0, 1.0, 0.5], [9.0, 0.25, 0.25], [11.0, 0.75, 0.25]]))\n\n\n assert_(num.allclose(submesh['ghost_nodes'][0],[[0.0, 0.0, 0.0], [1.0, 0.0, 0.5], [2.0, 0.0, 1.0], [6.0, 1.0, 0.0], [10.0, 0.25, 0.75], [11.0, 0.75, 0.25]]))\n assert_(num.allclose(submesh['ghost_nodes'][1],[[3.0, 0.5, 0.0], [7.0, 1.0, 0.5], [8.0, 1.0, 1.0], [11.0, 0.75, 0.25], [12.0, 0.75, 0.75]]))\n assert_(num.allclose(submesh['ghost_nodes'][2],[[1.0, 0.0, 0.5], [5.0, 0.5, 1.0], [8.0, 1.0, 1.0], [12.0, 0.75, 0.75]]))\n\n\n\n true_full_triangles = [num.array([[ 4, 9, 3],\n [ 4, 12, 5],\n [ 7, 12, 4],\n [ 8, 12, 7],\n [ 5, 12, 8]]),\n num.array([[ 0, 9, 1],\n [ 1, 9, 4],\n [ 1, 10, 2],\n [ 4, 10, 1],\n [ 5, 10, 4],\n [ 2, 10, 5]]),\n num.array([[ 3, 9, 0],\n [ 3, 11, 4],\n [ 6, 11, 3],\n [ 7, 11, 6],\n [ 4, 11, 7]])]\n\n\n assert_(num.allclose(submesh['full_triangles'][0],true_full_triangles[0]))\n assert_(num.allclose(submesh['full_triangles'][1],true_full_triangles[1]))\n assert_(num.allclose(submesh['full_triangles'][2],true_full_triangles[2]))\n\n true_ghost_triangles = [num.array([[ 5, 0, 9, 1],\n [ 6, 1, 9, 4],\n [ 8, 4, 10, 1],\n [ 9, 5, 10, 4],\n [10, 2, 10, 5],\n [11, 3, 9, 0],\n [12, 3, 11, 4],\n [13, 6, 11, 3],\n [14, 7, 11, 6],\n [15, 4, 11, 7]]),\n num.array([[ 0, 4, 9, 3],\n [ 1, 4, 12, 5],\n [ 2, 7, 12, 4],\n [ 4, 5, 12, 8],\n [11, 3, 9, 0],\n [12, 3, 11, 4]]),\n num.array([[ 0, 4, 9, 3],\n [ 1, 4, 12, 5],\n [ 2, 7, 12, 4],\n [ 3, 8, 12, 7],\n [ 5, 0, 9, 1],\n [ 6, 1, 9, 4]])]\n\n\n\n assert_(num.allclose(submesh['ghost_triangles'][0],true_ghost_triangles[0]))\n assert_(num.allclose(submesh['ghost_triangles'][1],true_ghost_triangles[1]))\n assert_(num.allclose(submesh['ghost_triangles'][2],true_ghost_triangles[2]))\n\n true_full_commun = [{0: [1, 2], 1: [1, 2], 2: [1, 2], 3: [2], 4: [1]}, {5: [0, 2], 6: [0, 2], 7: [], 8: [0], 9: [0], 10: [0]}, {11: [0, 1], 12: [0, 1], 13: [0], 14: [0], 15: [0]}]\n\n assert_(true_full_commun == submesh['full_commun'])\n\n\n true_ghost_commun = [num.array([[ 5, 1],\n [ 6, 1],\n [ 8, 1],\n [ 9, 1],\n [10, 1],\n [11, 2],\n [12, 2],\n [13, 2],\n [14, 2],\n [15, 2]]),\n num.array([[ 0, 0],\n [ 1, 0],\n [ 2, 0],\n [ 4, 0],\n [11, 2],\n [12, 2]]),\n num.array([[0, 0],\n [1, 0],\n [2, 0],\n [3, 0],\n [5, 1],\n [6, 1]])]\n\n assert_(num.allclose(submesh['ghost_commun'][0],true_ghost_commun[0]))\n assert_(num.allclose(submesh['ghost_commun'][1],true_ghost_commun[1]))\n assert_(num.allclose(submesh['ghost_commun'][2],true_ghost_commun[2]))\n\n\n\n par.barrier()\n #--------------------------------\n # Now do the comunnication part\n #--------------------------------\n\n\n if myid == 0:\n #----------------------------------------------------------------------------------\n # Test send_submesh\n #----------------------------------------------------------------------------------\n for p in range(1, par.numprocs):\n send_submesh(submesh, triangles_per_proc, p, verbose=False)\n\n #----------------------------------------------------------------------------------\n # Test extract_submesh\n #----------------------------------------------------------------------------------\n points, vertices, boundary, quantities, \\\n ghost_recv_dict, full_send_dict, tri_map, node_map, ghost_layer_width =\\\n extract_submesh(submesh, triangles_per_proc)\n\n\n true_points = [[0.5, 0.0], [0.5, 0.5], [0.5, 1.0], [1.0, 0.5], [1.0, 1.0], [0.25, 0.25], [0.75, 0.75], [0.0, 0.0], [0.0, 0.5], [0.0, 1.0], [1.0, 0.0], [0.25, 0.75], [0.75, 0.25]]\n\n true_vertices = [[1, 5, 0], [1, 6, 2], [3, 6, 1], [4, 6, 3], [2, 6, 4], [7, 5, 8], [8, 5, 1], [1, 11, 8], [2, 11, 1], [9, 11, 2], [0, 5, 7], [0, 12, 1], [10, 12, 0], [3, 12, 10], [1, 12, 3]]\n\n\n true_ghost_recv = {1: [num.array([5, 6, 7, 8, 9]), num.array([ 5, 6, 8, 9, 10])], 2: [num.array([10, 11, 12, 13, 14]), num.array([11, 12, 13, 14, 15])]}\n\n\n true_full_send = {1: [num.array([0, 1, 2, 4]), num.array([0, 1, 2, 4])], 2: [num.array([0, 1, 2, 3]), num.array([0, 1, 2, 3])]}\n\n assert_(num.allclose(ghost_layer_width, 2))\n assert_(num.allclose(points, true_points))\n assert_(num.allclose(vertices, true_vertices))\n assert_(num.allclose(ghost_recv_dict[1],true_ghost_recv[1]))\n assert_(num.allclose(ghost_recv_dict[2],true_ghost_recv[2]))\n assert_(num.allclose(full_send_dict[1],true_full_send[1]))\n assert_(num.allclose(full_send_dict[2],true_full_send[2]))\n\n #print triangles_per_proc\n\n else:\n #----------------------------------------------------------------------------------\n # Test rec_submesh\n #----------------------------------------------------------------------------------\n points, vertices, boundary, quantities, \\\n ghost_recv_dict, full_send_dict, \\\n no_full_nodes, no_full_trigs, tri_map, node_map, ghost_layer_width = \\\n rec_submesh(0, verbose=False)\n\n if myid == 1:\n\n\n true_points = [[0.0, 0.0], [0.0, 0.5], [0.0, 1.0], [0.5, 0.5], [0.5, 1.0], [0.25, 0.25], [0.25, 0.75], [0.5, 0.0], [1.0, 0.5], [1.0, 1.0], [0.75, 0.25], [0.75, 0.75]]\n\n true_vertices = [[0, 5, 1], [1, 5, 3], [1, 6, 2], [3, 6, 1], [4, 6, 3], [2, 6, 4], [3, 5, 7], [3, 11, 4], [8, 11, 3], [4, 11, 9], [7, 5, 0], [7, 10, 3]]\n\n true_ghost_recv = {0: [num.array([6, 7, 8, 9]), num.array([0, 1, 2, 4])], 2: [num.array([10, 11]), num.array([11, 12])]}\n\n true_full_send = {0: [num.array([0, 1, 3, 4, 5]), num.array([ 5, 6, 8, 9, 10])], 2: [num.array([0, 1]), num.array([5, 6])]}\n\n true_tri_map = num.array([ 6, 7, 8, -1, 9, 0, 1, 2, 3, 4, 5, 10, 11])\n\n true_node_map = num.array([ 0, 1, 2, 7, 3, 4, -1, 8, 9, 5, 6, 10, 11])\n\n assert_(num.allclose(ghost_layer_width, 2))\n assert_(num.allclose(tri_map, true_tri_map))\n assert_(num.allclose(node_map, true_node_map))\n assert_(num.allclose(points, true_points))\n assert_(num.allclose(vertices, true_vertices))\n assert_(num.allclose(ghost_recv_dict[0],true_ghost_recv[0]))\n assert_(num.allclose(ghost_recv_dict[2],true_ghost_recv[2]))\n assert_(num.allclose(full_send_dict[0],true_full_send[0]))\n assert_(num.allclose(full_send_dict[2],true_full_send[2]))\n\n\n if myid == 2:\n\n true_points = [[0.0, 0.0], [0.5, 0.0], [0.5, 0.5], [1.0, 0.0], [1.0, 0.5], [0.25, 0.25], [0.75, 0.25], [0.0, 0.5], [0.5, 1.0], [1.0, 1.0], [0.75, 0.75]]\n\n true_vertices = [[1, 5, 0], [1, 6, 2], [3, 6, 1], [4, 6, 3], [2, 6, 4], [2, 5, 1], [2, 10, 8], [4, 10, 2], [9, 10, 4], [0, 5, 7], [7, 5, 2]]\n\n true_ghost_recv = {0: [num.array([5, 6, 7, 8]), num.array([0, 1, 2, 3])], 1: [num.array([ 9, 10]), num.array([5, 6])]}\n\n true_full_send = {0: [num.array([0, 1, 2, 3, 4]), num.array([11, 12, 13, 14, 15])], 1: [num.array([0, 1]), num.array([11, 12])]}\n\n true_tri_map = num.array([5, 6, 7, 8, -1, 9, 10, -1, -1, -1, -1, 0, 1, 2, 3, 4, -1])\n\n true_node_map = num.array([ 0, 7, -1, 1, 2, 8 , 3, 4, 9, 5, -1, 6, 10])\n\n assert_(num.allclose(ghost_layer_width, 2))\n assert_(num.allclose(tri_map, true_tri_map))\n assert_(num.allclose(node_map, true_node_map))\n assert_(num.allclose(points, true_points))\n assert_(num.allclose(vertices, true_vertices))\n assert_(num.allclose(ghost_recv_dict[0],true_ghost_recv[0]))\n assert_(num.allclose(ghost_recv_dict[1],true_ghost_recv[1]))\n assert_(num.allclose(full_send_dict[0],true_full_send[0]))\n assert_(num.allclose(full_send_dict[1],true_full_send[1]))", "title": "" }, { "docid": "8fd686439f76ebb679cf803ec1f5f402", "score": "0.527245", "text": "def test31():\n\n # CASE: Scattering!\n # What triggers scattering?\n # How does a workflow know to scatter, and on what values?\n # STEP 1: The CWL specification must know that it must scatter\n # STEP 2: The inputs must have one or more entries for the scatterable value\n\n pass", "title": "" }, { "docid": "c6274b436c01d244ffbb4069ef8e110f", "score": "0.52720636", "text": "def receive(self, data, loc, **kwargs):\n raise NotImplementedError(\"Parallelizer is an abstract base class\")", "title": "" }, { "docid": "a3c95fd7c13c7b778263783c8c618771", "score": "0.5247113", "text": "def process(self):\n # init Queue\n manager = mp.Manager()\n q = manager.Queue(maxsize=self.n_workers * 2)\n # init listener\n writer = mp.Process(target=self.save_data, args=[q])\n writer.start()\n # init pool\n pool = mp.Pool(self.n_workers)\n # init SMILES generator\n data = self._get_data()\n pb = tqdm(data, total=self.len(), desc=\"Load tasks: \")\n # main loop\n if not self.atom_feat_format:\n worker = self.create_graph\n else:\n worker = self._create_contextpred_graph\n for i, smi in enumerate(pb):\n pool.apply_async(worker, args=[smi, i, q])\n # finish the tasks\n pool.close()\n pool.join()\n q.put(\"END\")\n writer.join()", "title": "" }, { "docid": "3d9a83dfab3f75516568d91c902bf2d6", "score": "0.5246472", "text": "def _process_catalogue(single_halo_method,\n labels: List[str],\n concurrent_threading: bool = False,\n no_multithreading: bool = False) -> pd.DataFrame:\n # Print the CLI arguments that are parsed in the script\n\n if xlargs.refresh:\n _zooms_register = zooms_register\n else:\n _zooms_register = []\n for keyword in xlargs.keywords:\n for zoom in zooms_register:\n if keyword in zoom.run_name and zoom not in _zooms_register:\n _zooms_register.append(zoom)\n\n _name_list = [zoom.run_name for zoom in _zooms_register]\n\n if len(_zooms_register) == 1:\n print(\"Analysing one object only. Not using multiprocessing features.\")\n results = [single_halo_method(_zooms_register[0])]\n\n else:\n\n if no_multithreading:\n print(f\"Running with no multithreading.\\nAnalysing {len(_zooms_register):d} zooms serially.\")\n results = []\n for i, zoom in enumerate(_zooms_register):\n print(f\"({i + 1}/{len(_zooms_register)}) Processing: {zoom.run_name}\")\n results.append(\n single_halo_method(zoom)\n )\n\n else:\n\n print(\"Running with multithreading.\")\n num_threads = len(_zooms_register) if len(_zooms_register) < cpu_count() else cpu_count()\n print(f\"Analysis of {len(_zooms_register):d} zooms mapped onto {num_threads:d} CPUs.\")\n\n threading_engine = Pool(num_threads)\n if concurrent_threading:\n threading_engine = ProcessPoolExecutor(max_workers=num_threads)\n\n try:\n # The results of the multiprocessing Pool are returned in the same order as inputs\n with threading_engine as pool:\n results = list(tqdm(\n pool.imap(single_halo_method, iter(_zooms_register)),\n total=len(_zooms_register),\n disable=xlargs.quiet\n ))\n except Exception as error:\n print((\n f\"The analysis stopped due to the error\\n{error}\\n\"\n \"Please use a different multiprocessing pool or run the code serially.\"\n ))\n raise error\n\n # Recast output into a Pandas dataframe for further manipulation\n results = pd.DataFrame(list(results), columns=labels)\n results.insert(0, 'Run_name', pd.Series(_name_list, dtype=str))\n if xlargs.debug:\n print(\n f\"z = {calibration_zooms.redshift_from_index(xlargs.redshift_index):.2f}\"\n \"\\nOutput dataframe (head only):\\n\",\n results.head()\n )\n\n return results", "title": "" }, { "docid": "c9b713b50fc518133599e8e5538803ce", "score": "0.52392393", "text": "def send(self, data, loc, **kwargs):\n raise NotImplementedError(\"Parallelizer is an abstract base class\")", "title": "" }, { "docid": "4bcdbda6f2170594c6a7ab5de20a03e6", "score": "0.5238542", "text": "def run_all_experiments(n_cores = 3):\n seeds = [41,42,43]\n pool = Pool(n_cores)\n pool.map(multi_mcpg, seeds)\n pool.close()\n\n make_figures_mcpg()", "title": "" }, { "docid": "73409c2b9c8ad8bbe189466ab3371975", "score": "0.5236708", "text": "def _prepare_call(self, name, type_, parallel, *args):\n if type_ == 'initial':\n training_windows = args[0]\n testing_windows = args[1]\n args = list(args)\n args.pop(0)\n args.pop(0)\n\n for train in training_windows:\n for testing in testing_windows:\n test = int(train*testing)\n yield [self._get_class(), self._get_dict(), name, [train] + [test] + args]\n else:\n #divide data to allow faster run\n data_init = args[0]\n length = data_init.get_length()\n window = int(length/self.scaling) + 1 \n args = list(args)\n args.pop(0)\n for scale in range(0, min(self.scaling, length)):\n data_temp = data_init.slice((scale*window), (scale + 1)*window)\n if not args:\n yield [self._get_class(), self._get_dict(), name, [data_temp] + [parallel]]\n else:\n yield [self._get_class(), self._get_dict(), name, [data_temp] + list(args) + [parallel]]\n if type_ == 'sentiment':\n self.data = data_init", "title": "" }, { "docid": "06b0e7f4d784234dde9b1fbd45708622", "score": "0.5234817", "text": "def multi_cluster():\n pass", "title": "" }, { "docid": "4d3c10a409c0066d619a1addebaa7abc", "score": "0.5223614", "text": "def map(self, func, data, input_shape=None, output_shape=None, **kwargs):\n sub_data = self.scatter(data, shape=input_shape, **kwargs)\n res = func(sub_data)\n return self.gather(res, shape=output_shape, **kwargs)", "title": "" }, { "docid": "f375ec6fec49b0a431cbcec5905ca6d3", "score": "0.52178365", "text": "def start(self):\n for task_type, addresses in self._cluster_spec.items():\n for task_id, _ in enumerate(addresses):\n p = multi_process_lib.Process(\n target=self._proc_func_wrapper,\n args=(task_type, task_id) + self._args,\n kwargs=self._kwargs)\n p.start()\n self._processes.append(p)", "title": "" }, { "docid": "4fb326265014b11edecd85b5e4e94525", "score": "0.5214037", "text": "def pair_plot_scatter(axes, data_x: List[np.array], data_y: List[np.array]) -> None:\n colors = ['red', 'yellow', 'blue', 'green']\n for idx, _ in enumerate(data_x):\n axes.scatter(data_x[idx], data_y[idx], s=1, color=colors[idx], alpha=0.5)", "title": "" }, { "docid": "0bee6545bf70c6352f66ceb772b4b176", "score": "0.5203503", "text": "def test_reduce_scatter_process_group_size(self):\n test_fn = functools.partial(self._test_reduce_scatter_process_group_size, config={})\n spawn_and_init(test_fn, world_sizes=[2])", "title": "" }, { "docid": "62f5d4413dec18bdd186c08e5243ce5a", "score": "0.5187359", "text": "def parallelize(self):\n # Create a pool of workers to execute processes\n pool = mp.Pool(processes=self.args.nproc)\n\n # Map (service, tasks), applies function to each partition\n self.wrong_findings = pool.map(self.process_wiki, self.partitions)\n pool.close()\n pool.join()", "title": "" }, { "docid": "620ec3686cb777e93ad0e2d47a18dd0a", "score": "0.51723224", "text": "def generate_scatter_plot(self):\n\n # make Scatter plots\n regex_data = go.Scatter(\n x=list(self.regex.keys()),\n y=list(self.regex.values()),\n name='REGEX',\n line=dict(\n color=('rgb(205, 12, 24)'),\n width=4\n )\n )\n\n pypdf2_data = go.Scatter(\n x=list(self.pypdf2.keys()),\n y=list(self.pypdf2.values()),\n name='PYPDF2',\n line=dict(\n color=('rgb(22, 96, 167)'),\n width=4\n )\n )\n\n pdfrw_data = go.Scatter(\n x=list(self.pdfrw.keys()),\n y=list(self.pdfrw.values()),\n name='PDFRW',\n line=dict(\n color=('rgb(102, 204, 0)'),\n width=4,\n )\n )\n\n pdfquery_data = go.Scatter(\n x=list(self.pdfquery.keys()),\n y=list(self.pdfquery.values()),\n name='PDFQUERY',\n line=dict(\n color=('rgb(178, 102, 255)'),\n width=4,\n )\n )\n\n tika_data = go.Scatter(\n x=list(self.tika.keys()),\n y=list(self.tika.values()),\n name='TIKA',\n line=dict(\n color=('rgb(255, 255, 0)'),\n width=4,\n )\n )\n\n pdfminer_data = go.Scatter(\n x=list(self.pdfminer.keys()),\n y=list(self.pdfminer.values()),\n name='PDFMINER',\n line=dict(\n color=('rgb(204, 0, 102)'),\n width=4,\n )\n )\n\n data = [\n regex_data,\n pypdf2_data,\n pdfrw_data,\n pdfquery_data,\n tika_data,\n pdfminer_data\n ]\n\n layout = self._make_layout()\n\n fig = go.Figure(\n data=data,\n layout=layout\n )\n\n opy.plot(\n fig,\n filename='./plots/pdfs_performance_scatter.html'\n )", "title": "" }, { "docid": "ffc701fdfe0cf8d8529f96d8edd487f9", "score": "0.5137441", "text": "def _parallel_python_execution(self):\n\n # Code updated to use the multiprocessing package by MDS, 7/24/19\n\n if self.verbose:\n print('\\nPerforming parallel execution of the Python model.\\n')\n\n import multiprocessing as mp\n import UQpy.Utilities as Utilities\n\n sample = []\n pool = mp.Pool(processes=self.nsim)\n\n for i in range(self.nsim):\n sample.append([self.model_script, self.model_object_name, self.samples[i]])\n\n results = pool.starmap(Utilities._run_parallel_python, sample)\n\n for i in range(self.nsim):\n if self.model_is_class:\n self.qoi_list[i] = results[i].qoi\n else:\n self.qoi_list[i] = results[i]", "title": "" }, { "docid": "71363a6a0924a4191518e7ef2d525b2b", "score": "0.5121697", "text": "def operate(self):\n x_train, y_train, x_test, y_test = self.split_data()\n proc_data = self.process_emails(x_train)\n clf = self.train_svm_classifier(proc_data, y_train)\n print(self.predict(x_test, y_test, clf))", "title": "" }, { "docid": "bdae5eb7aa5601faa98f7243bcec8884", "score": "0.5116007", "text": "def simulate_batch_par(self):", "title": "" }, { "docid": "749680ba4e90f8709a1d8e54585e5a39", "score": "0.5113983", "text": "def main():\n global data\n gpool = pool.Pool(size=5)\n _data = data\n result = sess.execute('select code from city_code where p_code <> 0').fetchall()\n sess.commit()\n gt = []\n gt.append(gpool.spawn(writeAssist, compInfo))\n for _id in result:\n print(_id)\n _data['c'] = _id[0]\n gt.append(gpool.spawn(search_keyword, _data.copy(), headers()))\n gevent.joinall(gt)", "title": "" }, { "docid": "e145ba4b8ab67cbddc6e3ff8527ab750", "score": "0.51073766", "text": "def run_all(plot_only=False):\n if not plot_only:\n run_all_datasets()\n\n compute_performance()\n plot_performance()", "title": "" }, { "docid": "7e565a76b192d20f008ae02e1c8d170c", "score": "0.5103895", "text": "def generateDataParallel(self):\n num_threads = cpu_count()\n num_entries_per_thread = int(\n (self.num_entries + num_threads - 1) / num_threads\n )\n thread_index = [i for i in range(0, num_threads)]\n\n # making sure we end up having as many fragments as the user asked for\n num_balanced_entries = [\n num_entries_per_thread for _ in range(num_threads)\n ]\n if self.num_entries != num_entries_per_thread * num_threads:\n last_threads_portion = (\n self.num_entries - num_entries_per_thread * (num_threads - 1)\n )\n num_balanced_entries[-1] = last_threads_portion\n\n arguments = zip(thread_index, num_balanced_entries)\n\n with Pool(num_threads) as pool:\n pool.starmap(self.generateData, arguments)", "title": "" }, { "docid": "0b007963aef20ebbba64e7c98e21ccb3", "score": "0.508525", "text": "def preprocess(self, parallel, X, y, dir):\n preprocessing = _expand_instance_list(self.evaluator.preprocessing,\n self.evaluator.indexer)\n\n parallel(delayed(fit_trans)(dir=dir,\n case=case,\n inst=instance_list,\n X=X,\n y=y,\n idx=tri,\n name=None)\n for case, tri, _, instance_list in preprocessing)\n\n self.evaluator.preprocessing_ = \\\n [(tup[0], pickle_load(os.path.join(dir, '%s__t' % tup[0])))\n for tup in preprocessing]", "title": "" }, { "docid": "11de6932c0c27a0ff619d68b00aad536", "score": "0.5080301", "text": "def parallel_main() -> None:\n runs = 4\n simulations_for_architecture = 1\n pool = multiprocessing.Pool()\n for i in range(runs):\n pool.apply_async(random_search_main, args=(i, simulations_for_architecture,))\n pool.close()\n pool.join()", "title": "" }, { "docid": "889afb468e90d68332083cc5f489de1f", "score": "0.50791824", "text": "def split_among_processors(data, samples, ipyclient, noreverse, force, preview):\n ## make output folder for clusters \n data.dirs.clusts = os.path.join(\n os.path.realpath(data.paramsdict[\"working_directory\"]),\n data.name+\"_\"+\"clust_\"+str(data.paramsdict[\"clust_threshold\"]))\n if not os.path.exists(data.dirs.clusts):\n os.makedirs(data.dirs.clusts)\n\n ## If ref mapping, init samples and make the refmapping output directory.\n if not data.paramsdict[\"assembly_method\"] == \"denovo\":\n ## make output directory for read mapping process\n data.dirs.refmapping = os.path.join(\n os.path.realpath(data.paramsdict[\"working_directory\"]),\n data.name+\"_refmapping\")\n if not os.path.exists(data.dirs.refmapping):\n os.makedirs(data.dirs.refmapping)\n\n ## Create threaded_view of engines by grouping only ids that are threaded\n ## load_balanced is not the right thing to use here, since we need to know\n ## which engines are being used for each job\n\n ## multi-node threading\n #threaded = get_threaded_view(ipyclient)\n #threaded_view = ipyclient.load_balanced_view(\n # targets=[i[0] for i in threaded])\n #tpproc = [len(i) for i in threaded][0]\n\n try:\n if preview:\n ## Truncate the input files and temporarily swap out the values for\n ## sample.files.edits\n for sample in samples:\n sample.files[\"edits_preview_bak\"] = sample.files.edits\n sample.files.edits = preview_truncate_fq(data, \n sample.files.edits_preview_bak)\n\n ## single node threading\n tpp = 2\n threaded_view = ipyclient.load_balanced_view(\n targets=ipyclient.ids[::tpp])\n tpproc = len(threaded_view)\n\n ## Initialize the mapped and unmapped file paths per sample\n if not data.paramsdict[\"assembly_method\"] == \"denovo\":\n for sample in samples:\n sample = refmap_init(data, sample)\n\n ## FILL LIST ARGS for funcs mapreads and clustall\n submitted_args = []\n for sample in samples:\n #if not align_only:\n submitted_args.append([data, sample, noreverse, tpproc])\n \n ## MAP READS if reference sequence is specified\n if not data.paramsdict[\"assembly_method\"] == \"denovo\":\n results = threaded_view.map(mapreads, submitted_args)\n results.get()\n \n ## DENOVO CLUSTER returns 0/1 of whether clust.gz built without fail\n ## for samples in the order in which they were submitted\n results = threaded_view.map(clustall, submitted_args)\n results = results.get()\n \n ## record that sample is clustered but not yet aligned\n for success, sample in zip(results, samples):\n if success:\n sample.stats.state = 2.5\n \n ## TODO: should it still look for REFSEQ reads if it had no utemp hits?\n \n ## REFSEQ reads (not denovo or denovo_only) pull in alignments from mapped \n ## bam files and write them to the clust.gz files to fold them back into \n ## the pipeline.\n if \"denovo\" not in data.paramsdict[\"assembly_method\"]: \n for sample in samples:\n finalize_aligned_reads(data, sample, ipyclient)\n \n ## call ipp for muscle aligning only if the Sample passed clust/mapping\n for sample in samples:\n if sample.stats.state == 2.5:\n LOGGER.info(\"muscle aligning\") \n multi_muscle_align(data, sample, ipyclient)\n ## run sample cleanup \n sample_cleanup(data, sample)\n \n ## run data cleanup\n data_cleanup(data, samples)\n\n except Exception as inst:\n LOGGER.warn(inst)\n raise\n\n finally:\n ## For preview/refmap restore original sample.files.edits paths and \n ## clean up the tmp files.\n\n ## If we did refmapping return the samples.files.edits to their original\n ## condition. Have to do this before restoring preview files because\n ## the refmap edits backup will be a backup of the preview truncated \n ## files. The order of these two functions matters.\n if \"denovo\" not in data.paramsdict[\"assembly_method\"]: \n for sample in samples:\n refmap_cleanup(data, sample)\n\n if preview:\n for sample in samples:\n try:\n ## If edits and edits_preview_bak are the same then \n ## something borked so don't delete any files\n if sample.files.edits == sample.files.edits_preview_bak:\n sample.files.pop(\"edits_preview_bak\", None)\n continue\n\n for tmpf in sample.files.edits[0]:\n if os.path.exists(tmpf):\n os.remove(tmpf)\n\n ## Restore original paths to full fastq files\n sample.files.edits = sample.files.edits_preview_bak\n ## Remove the tmp file reference. The second arg defines \n ## what to return if the key doesn't exist.\n sample.files.pop(\"edits_preview_bak\", None)\n except Exception:\n pass", "title": "" }, { "docid": "788c668aaa1d2af5975e7efcfdffc0f8", "score": "0.50720626", "text": "def process_tick(self):\n for p in self.pvds:\n self.readstts(self.san.providers[p])", "title": "" }, { "docid": "0ae54a933b3425037c2c8164d0e2a5fa", "score": "0.5064539", "text": "def process_tick(self):\n for p in self.pvds:\n e=self.readstts(self.san.providers[p])", "title": "" }, { "docid": "2c6c85e7e03080153f1017480a067778", "score": "0.5060664", "text": "def setup(self):\n comm = self.comm\n rank = comm.rank\n\n # NOTE: evenly_distrib_idxs is a helper function to split the array\n # up as evenly as possible\n sizes, offsets = evenly_distrib_idxs(comm.size, self.options['size'])\n local_size, local_offset = sizes[rank], offsets[rank]\n\n start = local_offset\n end = local_offset + local_size\n\n self.add_input('x', val=np.zeros(local_size, float),\n src_indices=np.arange(start, end, dtype=int))\n self.add_output('y', val=np.zeros(local_size, float))", "title": "" }, { "docid": "123c465fbd65a715212a7300e2a6e5e6", "score": "0.5058733", "text": "def dispatcher(coordinate_system, parallel):\n dispatchers = {\n \"cartesian\": {\n True: point_mass_cartesian_parallel,\n False: point_mass_cartesian_serial,\n },\n \"spherical\": {\n True: point_mass_spherical_parallel,\n False: point_mass_spherical_serial,\n },\n }\n return dispatchers[coordinate_system][parallel]", "title": "" }, { "docid": "4fe7e08a5df0932bcbb4f99d361f98ec", "score": "0.50556284", "text": "def task_cluster_by_enriched_shapemers():\n for levels_type in discrete_levels_type:\n for lflank, rflank in flank_configs:\n for cycle in cycles:\n promiscuous = '%s/promiscuous_%s_cycle%d.l%d.r%d.csv' % (top_results_dir, levels_type, cycle, lflank, rflank)\n infile = '%s/table_enriched_%s_cycle%d.l%d.r%d.csv' % (top_results_dir, levels_type, cycle, lflank, rflank)\n heatmap = '%s/fig_heatmap_shapemers_%s_cycle%d_%s.pdf' % (top_results_dir, levels_type, cycle, fg_type)\n pca_csv = '%s/pca_shapemers_%s.cycle%d.l%d.r%d.csv' % (top_results_dir, levels_type, cycle, lflank, rflank)\n pca_pdf = '%s/fig_pca_shapemers_%s_cycle%d_%s.pdf' % (top_results_dir, levels_type, cycle, fg_type)\n yield {\n 'name' : pca_csv,\n 'actions' : [\"results_scripts/cluster_by_shapemers.py %s %s %s %s\" % (infile, promiscuous, heatmap, pca_csv)],\n 'file_dep' : [infile, promiscuous],\n 'targets' : [heatmap, pca_csv],\n 'clean' : True,\n }\n yield {\n 'name' : pca_pdf,\n 'actions' : [\"results_scripts/plot_pca.R %s %s\" % (pca_csv, pca_pdf)],\n 'file_dep' : [pca_csv],\n 'targets' : [pca_pdf],\n 'clean' : True,\n }", "title": "" }, { "docid": "e2d0fcc7bb415f38d225dcc5059a9a3c", "score": "0.50540173", "text": "def prediction_metrics_pool(tuple_var_list, X_data, Y_data, average_par='macro'):\n p = multiprocessing.Pool(10)\n result = p.map(partial(prediction_metrics, X_data=X_data, Y_data=Y_data, average_par=average_par), tuple_var_list)\n p.close()\n p.join()\n p.terminate()\n return result", "title": "" }, { "docid": "86a2a1a377db1a57ab24c2e928a04d6b", "score": "0.5047788", "text": "def process(self):\n start_time = time()\n self.manager.initialize()\n mp_method = self.manager.params.integration.mp.method\n mp_nproc = min(len(self.manager), self.manager.params.integration.mp.nproc)\n mp_njobs = self.manager.params.integration.mp.njobs\n if (\n mp_nproc > 1 and platform.system() == \"Windows\"\n ): # platform.system() forks which is bad for MPI, so don't use it unless nproc > 1\n logger.warning(\n \"Multiprocessing is not available on windows. Setting nproc = 1\\n\"\n )\n mp_nproc = 1\n assert mp_nproc > 0, \"Invalid number of processors\"\n logger.info(self.manager.summary())\n logger.info(\" Using %s with %d parallel job(s)\\n\", mp_method, mp_nproc)\n if mp_nproc > 1:\n\n def process_output(result):\n rehandle_cached_records(result[1])\n self.manager.accumulate(result[0])\n result[0].reflections = None\n result[0].data = None\n\n def execute_task(task):\n log.config_simple_cached()\n result = task()\n handlers = logging.getLogger(\"dials\").handlers\n assert len(handlers) == 1, \"Invalid number of logging handlers\"\n return result, handlers[0].records\n\n multi_node_parallel_map(\n func=execute_task,\n iterable=list(self.manager.tasks()),\n njobs=mp_njobs,\n nproc=mp_nproc,\n callback=process_output,\n cluster_method=mp_method,\n preserve_order=True,\n )\n else:\n for task in self.manager.tasks():\n self.manager.accumulate(task())\n self.manager.finalize()\n end_time = time()\n self.manager.time.user_time = end_time - start_time\n result = self.manager.result()\n return result, self.manager.time", "title": "" }, { "docid": "104c8c917e475bd34d97dd2c69c24f50", "score": "0.5045842", "text": "def Parallel(*plist):\n _parallel(plist, True)", "title": "" }, { "docid": "e72d0b761b520dcdb129ce4e18d60258", "score": "0.50423056", "text": "def parallelize(self, *dags):\n raise NotImplemented()", "title": "" }, { "docid": "6f35a764862273029a12a788944d829a", "score": "0.5039765", "text": "def train(self, data_x_df, data_y_df):\n metrics = [MeanAbsoluteError()]\n\n data = pd.concat([data_x_df, data_y_df], axis=1)\n data.to_csv('sample_dataset.csv', index=False)\n cao_mapping = {\n \"context\": data_x_df.columns.values.tolist(),\n \"actions\": [],\n \"outcomes\": data_y_df.columns.values.tolist()\n }\n with open('sample_cao_mapping.json', 'w') as f:\n json.dump(cao_mapping, f)\n\n for predictor_name, predictor in self.availaible_predictors.items():\n print(f\"Evaluating Predictor: \", predictor_name)\n executor = Executor(\n predictor, data, cao_mapping, {}, {}, metrics, \"\", {}\n )\n executor.execute()", "title": "" }, { "docid": "cbf058992f6118aa6726cc9e1fa31c90", "score": "0.50393915", "text": "def __init_data(self):\n #Create worker vectors (owned + ghosts)\n self.my_x_worker = Epetra.Vector(self.my_worker_map)\n self.my_y_worker = Epetra.Vector(self.my_worker_map)\n self.my_z_worker = Epetra.Vector(self.my_worker_map)\n self.my_area_worker = Epetra.Vector(self.my_worker_map)\n #Import the needed components for local operations\n self.my_x_worker.Import(self.my_x, self.worker_importer, Epetra.Insert)\n self.my_y_worker.Import(self.my_y, self.worker_importer, Epetra.Insert)\n self.my_z_worker.Import(self.my_z, self.worker_importer, Epetra.Insert)\n self.my_area_worker.Import(self.my_area, self.worker_importer, Epetra.Insert)\n if False:\n my_SymSource_worker = Epetra.Vector(self.my_worker_map)\n my_SymSource_worker.Import(self.my_SymSource, self.worker_importer, Epetra.Insert)\n self.my_xSym_owned = np.where(self.my_x<0.0)\n self.my_ySym_owned = np.where(self.my_y<0.0)\n self.my_zSym_owned = np.where(self.my_z<0.0)\n print self.rank,\"shape my_xSym_owned\",np.shape(self.my_xSym_owned[0])\n self.my_allSym_owned = np.unique(np.concatenate(\n (self.my_xSym_owned[0],self.my_ySym_owned[0],self.my_zSym_owned[0])))\n self.my_xSym_worker = np.where(self.my_x_worker<0.0)\n self.my_ySym_worker = np.where(self.my_y_worker<0.0)\n self.my_zSym_worker = np.where(self.my_z_worker<0.0)\n self.my_SymSource_local = [self.my_worker_map.LID(np.int(source)) for source in my_SymSource_worker]\n #Convert the global node ids in the family array to local ids\n self.my_families_local = np.array([self.my_worker_map.LID(i)\n for i in self.my_families.flatten()])\n #Mask local family array\n self.my_families_local.shape = (len(self.my_families),-1)\n self.my_families_local = ma.masked_equal(self.my_families_local, -1)\n self.my_families_local.harden_mask()\n\n #Create matrix that will take real node displacements and spit out displacements\n #for real and virtual nodes\n sourcetmp = self.my_vsources_local[:]\n desttmp = np.empty_like(sourcetmp)\n desttmp[:]=np.arange(self.numWorkers,self.numWorkers+len(desttmp))[:,None]\n sourcetmp=ma.masked_equal(sourcetmp,-1)\n desttmp = ma.array(desttmp,mask=sourcetmp.mask)\n wtmp = ma.array(self.my_vweights,mask=sourcetmp.mask)\n\n builderRow = np.concatenate(\n (np.arange(self.numWorkers),desttmp.compressed()))\n builderCol = np.concatenate(\n (np.arange(self.numWorkers),sourcetmp.compressed()))\n builderDat = np.concatenate(\n (np.ones((self.numWorkers)),wtmp.compressed()))\n\n self.augmentbuilder=scipy.sparse.csr_matrix(\n (builderDat,(builderRow,builderCol)),\n shape=(len(self.my_vsources_local)+self.numWorkers,self.numWorkers))\n\n #Convert the global node ids in the ofamily array to local ids\n self.my_ofamilies_local=self.my_ofamilies[:]\n for row,pairs in enumerate(self.my_ofamilies):\n for col,GID in enumerate(pairs):\n if GID == -1:\n self.my_ofamilies_local[row,col]=-1\n elif GID < self.__global_number_of_nodes:\n LID=self.my_worker_map.LID(GID)\n self.my_ofamilies_local[row,col]=LID\n else:\n LID=self.balanced_vpt_map.LID(GID-self.__global_number_of_nodes)\n self.my_ofamilies_local[row,col]=(self.numWorkers\n + LID)\n self.my_ofamilies_local = ma.masked_equal(self.my_ofamilies_local, -1)\n self.my_ofamilies_local.harden_mask()\n\n #create augmented position/area vectors\n my_x_augmented = np.concatenate((self.my_x_worker[:],self.my_vnodes[:,0]))\n my_y_augmented = np.concatenate((self.my_y_worker[:],self.my_vnodes[:,1]))\n my_z_augmented = np.concatenate((self.my_z_worker[:],self.my_vnodes[:,2]))\n self.my_area_augmented = np.concatenate(\n (self.my_area_worker[:],np.zeros_like(self.my_vnodes[:,2])))\n self.my_augmented_reference = np.transpose(\n [my_x_augmented,my_y_augmented,my_z_augmented])\n\n\n #use family and ofamily data to make bond and pair lists\n famsizes = np.array([ (row > -1).sum()\n for row in self.my_families_local], dtype=np.int32)\n xplength = famsizes.sum()\n\n self.radius = Epetra.Vector(self.balanced_map)\n self.radius.PutScalar(self.grid_spacing)\n\n self.my_bond_x_local = np.empty([xplength], dtype=np.int32)\n self.my_bond_p_local = np.empty([xplength], dtype=np.int32)\n\n\n counter = 0\n for x,fam in enumerate(self.my_families_local):\n for p in fam.compressed():\n self.my_bond_x_local[counter] = x\n self.my_bond_p_local[counter] = p\n counter = counter + 1\n\n my_ref_bonds = np.transpose(\n [my_x_augmented[self.my_bond_p_local] -\n my_x_augmented[self.my_bond_x_local],\n my_y_augmented[self.my_bond_p_local] -\n my_y_augmented[self.my_bond_x_local],\n my_z_augmented[self.my_bond_p_local] -\n my_z_augmented[self.my_bond_x_local]])\n\n self.my_ref_bondLengths = np.sqrt(np.sum(my_ref_bonds*my_ref_bonds,axis=-1))\n\n\n ofamsizes = np.array([ (row > -1).sum()\n for row in self.my_ofamilies_local], dtype=np.int32)\n xpqlength = (ofamsizes.sum())/2\n\n self.my_bondpair_x_local = np.empty([xpqlength], dtype=np.intc)\n self.my_bondpair_p_local = np.empty([xpqlength], dtype=np.intc)\n self.my_bondpair_q_local = np.empty([xpqlength], dtype=np.intc)\n\n counter = 0\n for x,ofamily in enumerate(self.my_ofamilies_local):\n for p,q in zip(ofamily.compressed()[::2],ofamily.compressed()[1::2]):\n self.my_bondpair_x_local[counter] = x\n self.my_bondpair_p_local[counter] = p\n self.my_bondpair_q_local[counter] = q\n counter = counter + 1\n\n #put pairs with virtual nodes at end of list\n virtualsort = self.my_bondpair_q_local.argsort()\n self.sort=virtualsort\n self.unsort = virtualsort.argsort()\n self.my_bondpair_x_local = self.my_bondpair_x_local[virtualsort]\n self.my_bondpair_p_local = self.my_bondpair_p_local[virtualsort]\n self.my_bondpair_q_local = self.my_bondpair_q_local[virtualsort]\n self.numRealPairs = np.where(self.my_bondpair_q_local>=self.numWorkers,0.0,1.0).sum().astype(np.intc)\n self.my_realpair_q_local = np.array(self.my_bondpair_q_local[:self.numRealPairs],copy=True)\n self.my_virtpair_q_local = np.array(self.my_bondpair_q_local[self.numRealPairs:],copy=True)\n if not self.is_beam:\n self.my_vsources_A_local= self.my_vsources_local[:,0]\n self.my_vweights_A = self.my_vweights[:,0]\n self.my_vsources_B_local= self.my_vsources_local[:,1]\n self.my_vweights_B = self.my_vweights[:,1]\n self.my_vsources_C_local= self.my_vsources_local[:,2]\n self.my_vweights_C = self.my_vweights[:,2]\n self.my_vpt_pivot_local = self.my_bondpair_x_local[self.numRealPairs:]\n\n #vnodes not in the plane defined by their supports have an offset\n self.my_vpt_offset=self.my_vnodes-(\n self.my_vweights_A[:,None]*self.my_augmented_reference[self.my_vsources_A_local]+\n self.my_vweights_B[:,None]*self.my_augmented_reference[self.my_vsources_B_local]+\n self.my_vweights_C[:,None]*self.my_augmented_reference[self.my_vsources_C_local]\n )\n\n my_bondpair_ref_P = np.transpose(\n [my_x_augmented[self.my_bondpair_p_local] -\n my_x_augmented[self.my_bondpair_x_local],\n my_y_augmented[self.my_bondpair_p_local] -\n my_y_augmented[self.my_bondpair_x_local],\n my_z_augmented[self.my_bondpair_p_local] -\n my_z_augmented[self.my_bondpair_x_local]])\n\n my_bondpair_ref_Q = np.transpose(\n [my_x_augmented[self.my_bondpair_q_local] -\n my_x_augmented[self.my_bondpair_x_local],\n my_y_augmented[self.my_bondpair_q_local] -\n my_y_augmented[self.my_bondpair_x_local],\n my_z_augmented[self.my_bondpair_q_local] -\n my_z_augmented[self.my_bondpair_x_local]])\n\n #Create lists of bond indices for each pair for energy/failure coupling\n self.bond1=np.empty((xpqlength),dtype=np.intc)\n self.bond2=np.empty((self.numRealPairs),dtype=np.intc)\n if self.material != \"elastic\":\n for i in range(xpqlength):\n x=self.my_bondpair_x_local[i]\n p=self.my_bondpair_p_local[i]\n q=self.my_bondpair_q_local[i]\n bondindex=np.nonzero(np.logical_and(self.my_bond_x_local==x,self.my_bond_p_local==p))\n assert len(bondindex)==1\n self.bond1[i]=bondindex[0]\n for i in range(self.numRealPairs):\n x=self.my_bondpair_x_local[i]\n p=self.my_bondpair_p_local[i]\n q=self.my_bondpair_q_local[i]\n bondindex=np.nonzero(np.logical_and(self.my_bond_x_local==x,self.my_bond_p_local==q))\n assert len(bondindex)==1\n self.bond2[i]=bondindex[0]\n\n #Compute bond extension and bond-pair bending coefficients\n #Bond coefficients\n my_bondLengths = np.sqrt(np.sum(my_ref_bonds*my_ref_bonds,axis=-1))\n my_bond_x_area = self.my_area_augmented[self.my_bond_x_local]\n my_bond_p_area = self.my_area_augmented[self.my_bond_p_local]\n\n my_P_bondLengths = np.sqrt(np.sum(my_bondpair_ref_P*my_bondpair_ref_P,axis=-1))\n my_x_area = self.my_area_augmented[self.my_bondpair_x_local]\n my_pq_area = (self.my_area_augmented[self.my_bondpair_p_local]+\n self.my_area_augmented[self.my_bondpair_q_local])\n\n if self.is_beam: \n #Beam Coefficients\n ext_weights = my_bondLengths*my_bond_p_area\n if self.rank==0:print \"Weighted for linear elasticity using PA-HHB\"\n ext_weights = my_bond_p_area/my_bondLengths\n ext_m_partials = ext_weights*my_bondLengths*my_bondLengths\n ext_m_total = np.bincount(self.my_bond_x_local,ext_m_partials)\n ext_m_dist = ext_m_total[self.my_bond_x_local]\n\n self.my_extension_alpha = np.divide(\n self.ext_modulus*my_bond_x_area,ext_m_dist)\n self.my_extension_stiffness = ext_weights*self.my_extension_alpha\n\n self.iso_ext_coefficients = (2.0*ext_weights*my_bondLengths/ext_m_dist)\n self.my_iso_ext_stiffness = (self.iso_ext_modulus*self.iso_ext_coefficients*\n (self.grid_spacing**2.0))\n\n #Bond-pair coefficients\n my_x_area = self.my_area_augmented[self.my_bondpair_x_local]\n my_pq_area = (self.my_area_augmented[self.my_bondpair_p_local]+\n self.my_area_augmented[self.my_bondpair_q_local])\n\n my_P_bondLengths = np.sqrt(np.sum(my_bondpair_ref_P*my_bondpair_ref_P,axis=-1))\n widths = np.ones_like(my_P_bondLengths)\n widthweights = my_P_bondLengths*my_pq_area\n if self.rank==0:print \"Weighted for linear elasticity only\"\n widthweights = widths*my_pq_area/my_P_bondLengths\n\n my_m_partials = my_P_bondLengths*my_P_bondLengths*widthweights\n my_m_total = np.bincount(self.my_bondpair_x_local,my_m_partials)\n if self.rank==0:\n print \"bend_m max\", my_m_total.max()\n print \"bend_m min\", my_m_total.min()\n my_m_dist = my_m_total[self.my_bondpair_x_local]\n\n my_pair_alpha = np.divide((my_x_area)*self.c,my_m_dist)\n self.my_pair_stiffness = np.multiply(widthweights,my_pair_alpha)\n\n iso_m_partials = widthweights\n iso_m_total = 2.0*np.bincount(self.my_bondpair_x_local,iso_m_partials)\n iso_m_dist = iso_m_total[self.my_bondpair_x_local]\n\n self.iso_pair_coefficients = (2.0*widthweights/\n (my_P_bondLengths*my_P_bondLengths*iso_m_dist))\n self.my_isoBend_stiffness = (self.iso_pair_coefficients*\n self.isoBendMod*(my_x_area))\n else:\n #Plate Coefficients\n ext_weights = my_bondLengths*my_bond_p_area\n if self.PAHHB:\n if self.rank==0:print \"Weighted for linear elasticity using PA-HHB\"\n bond_partial_areas = np.where(\n my_bondLengths<self.horizon-0.5*self.grid_spacing,\n 1.0,0.5+(self.horizon-my_bondLengths)/self.grid_spacing)\n my_bond_p_area = my_bond_p_area*bond_partial_areas\n else:\n if self.rank==0:print \"Weighted for linear elasticity\"\n ext_weights = my_bond_p_area\n ext_m_partials = ext_weights*my_bondLengths*my_bondLengths\n ext_m_total = np.bincount(self.my_bond_x_local,ext_m_partials)\n ext_m_dist = ext_m_total[self.my_bond_x_local]\n self.my_extension_alpha = np.divide(\n self.ext_modulus*my_bond_x_area,ext_m_dist)\n self.my_extension_stiffness = ext_weights*self.my_extension_alpha\n self.iso_ext_coefficients = (2.0*ext_weights*my_bondLengths/ext_m_dist)\n self.my_iso_ext_stiffness = (self.iso_ext_modulus*self.iso_ext_coefficients*\n (self.grid_spacing**2.0))\n #self.my_iso_ext_stiffness = (self.iso_ext_modulus*self.iso_ext_coefficients*\n #my_bond_x_area)\n #Bond-pair coefficients\n my_x_area = self.my_area_augmented[self.my_bondpair_x_local]\n my_pq_area = (self.my_area_augmented[self.my_bondpair_p_local]+\n self.my_area_augmented[self.my_bondpair_q_local])\n\n my_P_bondLengths = np.sqrt(np.sum(my_bondpair_ref_P*my_bondpair_ref_P,axis=-1))\n if self.PAHHB:\n pair_partial_areas = np.where(\n my_P_bondLengths<self.horizon-0.5*self.grid_spacing,\n 1.0,0.5+(self.horizon-my_P_bondLengths)/self.grid_spacing)\n my_pq_area = my_pq_area*pair_partial_areas\n widths = np.ones_like(my_P_bondLengths)\n widthweights = my_P_bondLengths*my_pq_area\n #if self.rank==0:print \"Weighted for linear elasticity only\"\n widthweights = widths*my_pq_area\n\n my_m_partials = my_P_bondLengths*my_P_bondLengths*widthweights\n my_m_total = np.bincount(self.my_bondpair_x_local,my_m_partials)\n my_m_dist = my_m_total[self.my_bondpair_x_local]\n my_pair_alpha = np.divide(16.0*(my_x_area)*self.c/3.0,my_m_dist)\n self.my_pair_stiffness = np.multiply(widthweights,my_pair_alpha)/2.0\n #divide by two because each pair gets applied to both p and q\n iso_m_partials = widthweights\n iso_m_total = 2.0*np.bincount(self.my_bondpair_x_local,iso_m_partials)\n iso_m_dist = iso_m_total[self.my_bondpair_x_local]\n self.iso_pair_coefficients = (2.0*widthweights/\n (my_P_bondLengths*my_P_bondLengths*iso_m_dist))\n self.my_isoBend_stiffness = (self.iso_pair_coefficients*\n self.isoBendMod*(my_x_area))\n self.pair_critical_energy = self.pair_crit_energy_density*my_x_area*my_pq_area\n \n #Form maps to bring all bondhealth/pairhealth to PID 0 and back\n num_bonds_local=len(my_bondLengths)\n all_num_bonds=self.__comm.GatherAll(num_bonds_local)\n my_first_global_index=np.add.accumulate(all_num_bonds)\n total_num_bonds=np.sum(all_num_bonds)\n \n if self.rank==0:\n unbalanced_num=total_num_bonds\n balanced_GEs=np.arange(my_first_global_index[self.rank-1],dtype=np.int32)\n else:\n unbalanced_num=0\n balanced_GEs=np.arange(my_first_global_index[self.rank-1],my_first_global_index[self.rank-1],dtype=np.int32)\n \n self.bondmap_unbalanced=Epetra.Map(total_num_bonds,unbalanced_num,0,self.__comm)\n self.bondmap_balanced=Epetra.Map(total_num_bonds,balanced_GEs,0,self.__comm)\n \n self.bondimporter = Epetra.Import(self.bondmap_balanced,self.bondmap_unbalanced)\n self.bondexporter = Epetra.Export(self.bondmap_balanced,self.bondmap_unbalanced)\n \n self.bondhealth_unbalanced=Epetra.Vector(self.bondmap_unbalanced)\n self.bondhealth_balanced=Epetra.Vector(self.bondmap_balanced)\n self.my_bond_health=np.ones_like(my_bondLengths)\n \n num_pairs_local=len(my_P_bondLengths)\n all_num_pairs=self.__comm.GatherAll(num_pairs_local)\n my_first_global_index=np.add.accumulate(all_num_pairs)\n total_num_pairs=np.sum(all_num_pairs)\n \n if self.rank==0:\n unbalanced_num=total_num_pairs\n balanced_GEs=np.arange(my_first_global_index[self.rank],dtype=np.int32)\n else:\n unbalanced_num=0\n balanced_GEs=np.arange(my_first_global_index[self.rank-1],my_first_global_index[self.rank],dtype=np.int32)\n \n self.pairmap_unbalanced=Epetra.Map(total_num_pairs,unbalanced_num,0,self.__comm)\n self.pairmap_balanced=Epetra.Map(total_num_pairs,balanced_GEs,0,self.__comm)\n \n self.pairimporter = Epetra.Import(self.pairmap_balanced,self.pairmap_unbalanced)\n self.pairexporter = Epetra.Export(self.pairmap_balanced,self.pairmap_unbalanced)\n \n self.pairhealth_unbalanced=Epetra.Vector(self.pairmap_unbalanced)\n self.pairhealth_balanced=Epetra.Vector(self.pairmap_balanced)\n self.my_pair_health=np.ones_like(my_P_bondLengths)\n \n self.my_pair_health = np.ones_like(my_P_bondLengths,dtype=np.float_)\n self.my_num_pairs = np.bincount(self.my_bondpair_x_local,\n minlength = len(self.my_families))\n \n with np.errstate(invalid='ignore'):\n self.my_node_health = np.divide(\n np.bincount(self.my_bondpair_x_local,weights=self.my_pair_health,\n minlength = len(self.my_families)),self.my_num_pairs)\n \n self.my_bondpair_plasticAngle = np.zeros([xpqlength])\n\n self.my_pair_p_vectors=np.zeros((xpqlength,3),dtype=np.float_)\n self.my_pair_q_vectors=np.zeros((xpqlength,3),dtype=np.float_)\n self.my_pair_p_lengths=np.zeros((xpqlength),dtype=np.float_)\n self.my_pair_q_lengths=np.zeros((xpqlength),dtype=np.float_)\n self.my_iso_bending=np.zeros((self.my_num_owned,3),dtype=np.float_)\n self.my_bond_vectors=np.zeros((xplength,3),dtype=np.float_)\n self.my_bond_lengths=np.zeros((xplength),dtype=np.float_)\n self.my_bond_energies=np.zeros((xplength),dtype=np.float_)\n self.my_bond_stretches=np.zeros((xplength),dtype=np.float_)\n self.my_iso_extension=np.zeros((self.my_num_owned),dtype=np.float_)\n self.my_HpqEff=np.zeros((xpqlength,3),dtype=np.float_)\n self.my_f_tmp = np.zeros((self.numWorkers,3),dtype=np.float_)\n self.my_ref_position = np.transpose(\n [self.my_x_worker[:],self.my_y_worker[:],self.my_z_worker[:]])\n\n self.num_broken=0\n\n #The new load balanced map\n balanced_map = self.get_balanced_map()\n\n\n #Create distributed vectors (owned only)\n self.my_force_x = Epetra.Vector(balanced_map)\n self.my_force_y = Epetra.Vector(balanced_map)\n self.my_force_z = Epetra.Vector(balanced_map)\n self.my_u = Epetra.Vector(self.balanced_dof_map)\n\n #Create distributed worker vectors (owned + ghosts)\n self.my_force_x_worker = Epetra.Vector(self.my_worker_map)\n self.my_fx_worker = self.my_force_x_worker.ExtractView()\n self.my_force_y_worker = Epetra.Vector(self.my_worker_map)\n self.my_fy_worker = self.my_force_y_worker.ExtractView()\n self.my_force_z_worker = Epetra.Vector(self.my_worker_map)\n self.my_fz_worker = self.my_force_z_worker.ExtractView()\n self.my_u_worker = Epetra.Vector(self.dof_worker_map,True)\n self.uReshape=self.my_u_worker.ExtractView()\n self.uReshape.shape=(-1,3)\n self.my_u_worker.shape=(-1,3)\n self.xReshape=Epetra.Vector(self.dof_worker_map,self.my_ref_position)\n\n #Create vectors for temporary use\n self.ux_tmp = Epetra.Vector(self.balanced_map)\n self.uy_tmp = Epetra.Vector(self.balanced_map)\n self.uz_tmp = Epetra.Vector(self.balanced_map)\n\n self.counter=0\n return", "title": "" }, { "docid": "1ce3f9fbc30a6340510f939678cfa665", "score": "0.50357", "text": "def communicate(topology):\n for vid, links in topology.items(): # iteritems -> items\n # get data about platoon leader\n leader_data = get_par(links[\"leader\"], cc.PAR_SPEED_AND_ACCELERATION)\n (l_v, l_a, l_u, l_x, l_y, l_t) = cc.unpack(leader_data)\n leader_data = cc.pack(l_v, l_u, l_x, l_y, l_t)\n # get data about front vehicle\n front_data = get_par(links[\"front\"], cc.PAR_SPEED_AND_ACCELERATION)\n (f_v, f_a, f_u, f_x, f_y, f_t) = cc.unpack(front_data)\n front_data = cc.pack(f_v, f_u, f_x, f_y, f_t)\n # pass leader and front vehicle data to CACC\n set_par(vid, cc.PAR_LEADER_SPEED_AND_ACCELERATION, leader_data)\n set_par(vid, cc.PAR_PRECEDING_SPEED_AND_ACCELERATION, front_data)\n # compute GPS distance and pass it to the fake CACC\n f_d = get_distance(vid, links[\"front\"])\n set_par(vid, cc.PAR_LEADER_FAKE_DATA, cc.pack(l_v, l_u))\n set_par(vid, cc.PAR_FRONT_FAKE_DATA, cc.pack(f_v, f_u, f_d))", "title": "" }, { "docid": "c7ac011216fbdcb8a8cec5ca926acdcd", "score": "0.5035242", "text": "def _output_parallel(self, index):\n self._output_serial(index)", "title": "" }, { "docid": "7f30777b11256f3cac74c6b122e66b96", "score": "0.5032703", "text": "def _data_parallel_master(self, intermediates):\n intermediates = sorted(intermediates, key=lambda i: i[1].sum.get_device())\n to_reduce = [i[1][:2] for i in intermediates]\n to_reduce = [j for i in to_reduce for j in i]\n target_gpus = [i[1].sum.get_device() for i in intermediates]\n sum_size = sum([i[1].sum_size for i in intermediates])\n sum_, ssum = ReduceAddCoalesced.apply(target_gpus[0], 2, *to_reduce)\n mean, inv_std = self._compute_mean_std(sum_, ssum, sum_size)\n broadcasted = Broadcast.apply(target_gpus, mean, inv_std)\n outputs = []\n for i, rec in enumerate(intermediates):\n outputs.append((rec[0], _MasterMessage(*broadcasted[i * 2:i * 2 + 2])))\n return outputs", "title": "" }, { "docid": "46560410dc726431c5595c2fcf73bf56", "score": "0.5026786", "text": "def plot_multiscatter(self, to_plot, axe):\n x_coef, y_coef = self.add_scatter_image(to_plot, axe)\n opacity = self.get_opacity(to_plot)\n for i, data in enumerate(to_plot[\"data\"]):\n x_data = [d*x_coef for d in data[0]]\n y_data = [d*y_coef for d in data[1]]\n marker = self.get_marker(to_plot, i)\n size = self.get_marker_size(to_plot, i)\n if \"marker_color\" in to_plot:\n color = self.get_marker_color(to_plot, i)\n axe.scatter(x_data, y_data, alpha=opacity, marker=marker, s=size, c=color)\n else:\n axe.scatter(x_data, y_data, alpha=opacity, marker=marker, s=size)", "title": "" }, { "docid": "f577e4c2262b0a7cf9cec89ee71a6830", "score": "0.5026226", "text": "def computeStatisticsParallel(self, phi0s, params, scale, cpu, model_name):\n \n print(\"Precomputing Statistics using %d Processes...\",cpu)\n npx = len(phi0s) # Number of pixels in an image \n dim = int(N/2)\n k = int(2*np.ceil(scale * self.m_psf_rad ) + 2) # Width of a patch, just for readability\n \n # Create arrays needed for storage\n # PSF Template\n h_template = self.modelFunction(k,model_name, params)\n h_mask = createCircularMask(h_template.shape,radius = self.m_psf_rad*scale)\n h = np.zeros((self.m_height,self.m_width,self.m_p_size*scale**2)) # The off axis PSF at each point\n\n # Store for each image pixel, for each temporal frame an image\n # for patches: for each time, we need to store a column of patches\n patches = []\n patch = np.zeros((self.m_nFrames,self.m_p_size*scale**2)) # 2d selection of pixels around a given point\n\n # the mean of a temporal column of patches at each pixel\n m = np.zeros((self.m_height*self.m_width*self.m_p_size*scale**2)) \n m_C = np.ctypeslib.as_array(m)\n #N*N*self.p_size*scale**2\n\n # the inverse covariance matrix at each point\n Cinv = np.zeros((self.m_height*self.m_width*self.m_p_size*self.m_p_size*scale**2)) \n Cinv_C = np.ctypeslib.as_array(Cinv)\n #N*N*self.p_size*self.p_size*scale**2\n\n for p0 in phi0s:\n if scale!=1:\n h[p0[0]][p0[1]] = resizeImage(h_template,scale)[h_mask]\n else:\n h[p0[0]][p0[1]] = h_template[h_mask]\n\n \n # *** Parallel Processing ***\n start = time.time()\n #print(arglist[0])\n # Create a pool\n #shared_m = sharedctypes.RawArray('d', m_C)\n #shared_m = sharedctypes.Array(ctypes.c_double,m_C,lock = False)\n #shared_Cinv = sharedctypes.RawArray('d',Cinv_C)\n #shared_Cinv = sharedctypes.Array(ctypes.c_double,Cinv_C,lock = False)\n # pynpoint processing/limits.py \n #queue = Queue(100000000)\n\n #patches = [np.copy(np.array(self.get_patch(p0, k, self.mask))) for p0 in phi0s]\n arglist = [np.copy(np.array(self.getPatch(p0, k, self.m_mask))) for p0 in phi0s]\n '''\n jobs = []\n result = []\n for apatch in patches:\n process = Process(target = pixel_calc,\n args = (apatch,\n T,\n self.p_size,\n queue))\n jobs.append(process)\n \n for count,job in enumerate(jobs):\n job.start()\n #print(job,count)\n #print(\"started\")\n if (count+1)%cpu == 0:\n for njob in jobs[(count+1-cpu):(count+1)]:\n njob.join(timeout = 0.5)\n elif (count+1) == len(jobs) and (count+1)%cpu != 0:\n for njob in jobs[count + 1 - (count + 1)%cpu:]:\n njob.join()\n '''\n p = Pool(processes = cpu)\n data = p.map(self.pixelCalc, arglist, chunksize = int(npx/16))\n p.close()\n p.join()", "title": "" }, { "docid": "8b3a15c84caee34ed00ecc546c29834a", "score": "0.502158", "text": "def scatter_dataset_no_comm(dataset, comm, shuffle=False, seed=0):\n\n if hasattr(comm, 'mpi_comm'):\n comm = comm.mpi_comm\n assert hasattr(comm, 'send')\n assert hasattr(comm, 'recv')\n\n order = None\n n_total_samples = len(dataset)\n if shuffle is not None:\n order = numpy.random.RandomState(seed).permutation(\n n_total_samples)\n\n n_sub_samples = (n_total_samples + comm.size - 1) // comm.size\n\n b = n_total_samples * comm.rank // comm.size\n e = b + n_sub_samples\n\n return chainer.datasets.SubDataset(dataset, b, e, order)", "title": "" }, { "docid": "58e5310a2ee47406763668d2fe0d3e45", "score": "0.50174993", "text": "def _send_procs_to_workers(self, updatehash=False, graph=None):", "title": "" }, { "docid": "285029811091a86db3cc2359c48de283", "score": "0.50174683", "text": "def plot_nodes_over_data_scattermatrix(X, Y, mdl, e_nodes, p_nodes, e_nodes_cov, p_nodes_cov):\n \n import pandas as pd\n from pandas.tools.plotting import scatter_matrix\n\n idim = X.shape[1]\n odim = Y.shape[1]\n numplots = idim + odim\n\n # e_nodes, p_nodes = hebbsom_get_map_nodes(mdl, idim, odim)\n \n dfcols = []\n dfcols += [\"e_%d\" % i for i in range(idim)]\n dfcols += [\"p_%d\" % i for i in range(odim)]\n\n # X_plus_e_nodes = np.vstack((X, e_nodes))\n # Y_plus_p_nodes = np.vstack((Y, p_nodes))\n\n # df = pd.DataFrame(np.hstack((X_plus_e_nodes, Y_plus_p_nodes)), columns=dfcols)\n df = pd.DataFrame(np.hstack((X, Y)), columns=dfcols)\n sm = scatter_matrix(df, alpha=0.2, figsize=(5,5), diagonal=\"hist\")\n print(\"sm = %s\" % (sm))\n # loop over i/o components\n idims = range(idim)\n odims = range(idim, idim+odim)\n\n \n for i in range(numplots):\n for j in range(numplots):\n if i != j and i in idims and j in idims:\n # center = np.array()\n # x1, x2 = gmm.gauss_ellipse_2d(centroids[i], ccov[i])\n \n sm[i,j].plot(e_nodes[:,j], e_nodes[:,i], \"ro\", alpha=0.5, markersize=8)\n if i != j and i in odims and j in odims:\n sm[i,j].plot(p_nodes[:,j-idim], p_nodes[:,i-idim], \"ro\", alpha=0.5, markersize=8)\n \n # if i != j and i in idims and j in odims:\n # sm[i,j].plot(p_nodes[:,j-idim], e_nodes[:,i], \"go\", alpha=0.5, markersize=8)\n # if i != j and i in odims and j in idims:\n # sm[i,j].plot(e_nodes[:,j], p_nodes[:,i-idim], \"go\", alpha=0.5, markersize=8)\n\n # get figure reference from axis and show\n fig2 = sm[0,0].get_figure()\n fig2.suptitle(\"Predictions over data scattermatrix (%s)\" % (mdl.__class__.__name__))\n fig2.show()", "title": "" }, { "docid": "c0799816c4dc94288c5843a0bbea964a", "score": "0.5016212", "text": "def main(num_processes: int = 1) -> None:\n\n FULL_RESULT_PATH = f\"optslope_rubisco/results.csv\"\n FIGURE_PATH = f\"optslope_rubisco/heatmap.pdf\"\n\n wt_model = read_sbml_model(read_text(optslope_rubisco, optslope_rubisco.WILDTYPE_MODEL))\n\n dfs = []\n print(f\"Calculating slopes for up to 2 knockouts, and \"\n f\"for {len(optslope_rubisco.CARBON_SOURCES_LIST)} carbon source combinations\")\n\n for carbon_sources in tqdm(optslope_rubisco.CARBON_SOURCES_LIST,\n total=len(optslope_rubisco.CARBON_SOURCES_LIST),\n desc=\"Carbon Sources\"):\n df = calculate_slope_multi(\n wt_model=wt_model,\n carbon_sources=carbon_sources,\n single_knockouts=optslope_rubisco.SINGLE_KOS,\n target_reaction=optslope_rubisco.TARGET_REACTION,\n max_knockouts=2,\n num_processes=num_processes,\n chunksize=100)\n\n dfs.append(df)\n\n result_df = pd.concat(dfs)\n result_df = result_df.round(3)\n\n # write all the slopes to a CSV file\n with open(FULL_RESULT_PATH, 'w') as fp:\n result_df.to_csv(fp)\n\n # the knockouts are given as 2-tuples, we first need to convert them to 2\n # columns of string values\n N = len(optslope_rubisco.SINGLE_KOS)\n data_mat = np.zeros((3*N, 3*N)) * np.nan\n for row in result_df.itertuples():\n if len(row.knockouts) == 0 or len(row.carbon_sources) == 0:\n continue\n\n i0 = optslope_rubisco.SINGLE_KOS.index(row.knockouts[0])\n if len(row.knockouts) == 1:\n i1 = i0\n else:\n i1 = optslope_rubisco.SINGLE_KOS.index(row.knockouts[1])\n\n i2 = optslope_rubisco.CARBON_SOURCES_LIST.index(row.carbon_sources)\n\n x = 3 * i0 + i2 // 3\n y = 3 * i1 + i2 % 3\n data_mat[x, y] = row.slope\n\n fig, ax = plt.subplots(1, 1, figsize=(12, 10))\n\n # make a colormap which assigns a red color to values around 0,\n # and uses Viridis for all positive values\n cmap = ListedColormap([[150 / 256, 10 / 256, 50 / 256, 1]] +\n cm.get_cmap('viridis', 50).colors.tolist())\n\n g = sns.heatmap(data_mat.T, vmin=-0.5, vmax=35, cmap=cmap,\n cbar_kws={'label': 'slope'}, ax=ax)\n g.set_facecolor('darkgrey')\n ax.set_xticks(np.arange(1.5, 3*N, 3))\n ax.set_yticks(np.arange(1.5, 3*N, 3))\n ax.set_xticklabels(optslope_rubisco.SINGLE_KOS, rotation=90, ha='center', fontsize=12)\n ax.set_yticklabels(optslope_rubisco.SINGLE_KOS, rotation=0, va='center', fontsize=12)\n\n ax.text(data_mat.shape[0] * 0.6 + 8, data_mat.shape[0] * 0.02,\n \"Carbon Sources\", fontsize=12, va='center', ha='center')\n for i, cs in enumerate(optslope_rubisco.CARBON_SOURCES_LIST):\n y = i // 3\n x = 2 - (i % 3)\n ax.text(data_mat.shape[0] * 0.6 + 8 * x,\n data_mat.shape[0] * 0.1 + 8 * y,\n cs[0], fontsize=12, va='center', ha='center')\n\n fig.tight_layout()\n sys.stderr.write(f\"Writing heatmap figure to {FIGURE_PATH}\\n\")\n fig.savefig(FIGURE_PATH)", "title": "" }, { "docid": "7e197c703992ec4302e7b5df6f3e3547", "score": "0.50126487", "text": "def multiprocess_parallel_activities(data: pd.DataFrame, job_num: int):\n\tprint_time('parallel activities')\n\tactivity_name = data['name'].unique()\n\n\t# compute for all activities\n\tfor a in activity_name:\n\t\tprint_time('calculate parallel activities for activity %s' % a)\n\n\t\t# get subset\n\t\tsub_data = data.loc[data['name'] == a]\n\n\t\tsteps = get_steps(sub_data, job_num)\n\t\tprint(steps)\n\n\t\tjobs = []\n\t\tout_q = Queue()\n\n\t\t# set time interval (=theta)\n\t\tdelta = datetime.timedelta(days=1)\n\n\t\t# start all jobs\n\t\tfor idx, r in enumerate(steps):\n\t\t\tp = Process(target=parallel_activities, args=(\n\t\t\t\tsub_data, delta,\n\t\t\t\tidx + 1,\n\t\t\t\tr, out_q))\n\t\t\tjobs.append(p)\n\t\t\tp.start()\n\n\t\t# collect results\n\t\tres = {}\n\t\tfor i in range(len(steps)):\n\t\t\tres.update(out_q.get())\n\n\t\t# collect processes\n\t\tfor job in jobs:\n\t\t\tjob.join()\n\n\t\t# update DataFrame\n\t\tfor k, v in res.items():\n\t\t\tdata.at[k, 'parallel_activities'] = v", "title": "" }, { "docid": "46707f67b56e727e9a8eb814496df1c8", "score": "0.4991829", "text": "def test_parallelization(self):\n self.do_parallel_download(Constants.load_links(8), serialize=False)", "title": "" }, { "docid": "2eb93e38d78b1ec609437d593dad4c59", "score": "0.4991328", "text": "def evaluate_in_parallel(eval_suite_parallel, individuals, gen):\n\n\t# init global states\n\t# while len(results) > 0:\n\t# \tresults.pop()\n\t# while len(idle_devices) > 0:\n\t# \tidle_devices.pop()\n\n\t# 1. get idle devices\n\t# idle_devices.extend(emulator.get_devices())\n\n\t# 2. assign tasks to devices\n\tprint('length individuals: ' + repr(len(individuals)))\n\tpool = mp.Pool(processes=1)\n\tfor i in range(0, len(individuals)):\n\t\t# while len(idle_devices) == 0:\n\t\t# \ttime.sleep(0.5)\n\t# eval_suite_parallel(individuals[0],0,gen)\n\t\tpool.apply_async(eval_suite_parallel, args=(individuals[i], gen, i),\n\t\t\t\t\t\tcallback=process_results)\n\n\tprint (\"### evaluate_in_parallel is waiting for all processes to finish ... \")\n\t# should wait for all processes to finish\n\tpool.close()\n\tpool.join()\n\n\tprint (\"### ... evaluate_in_parallel finished\")\n\t# assign results\n\twhile len(results) > 0:\n\t\ti, fitness = results.pop(0)\n\t\t# print (i, fitness)\n\t\tindividuals[i][\"fitness\"][\"values\"] = fitness\n\t\tindividuals[i][\"fitness\"][\"valid\"] = True", "title": "" }, { "docid": "c207da05741a720e08e0839a7df76997", "score": "0.4987711", "text": "def starmap(self, function, data, extra_args=None, extra_kwargs=None, **kwargs):\n\n raise NotImplementedError(\"Parallelizer is an abstract base class\")", "title": "" }, { "docid": "97843290f270207e5fe6723b14531e5a", "score": "0.4985", "text": "def gather_array_data(all_array_data, comm):\n\n array_names = all_array_data.keys()\n\n # gather the data from all processors\n collected_data = comm.gather(all_array_data, root=0)\n\n if comm.Get_rank() == 0:\n all_array_data = {}\n size = comm.Get_size()\n\n # concatenate the arrays\n for array_name in array_names:\n array_data = {}\n all_array_data[array_name] = array_data\n\n _props = collected_data[0][array_name].keys()\n for prop in _props:\n data = [collected_data[pid][array_name][prop]\n for pid in range(size)]\n prop_arr = numpy.concatenate(data)\n array_data[prop] = prop_arr\n return all_array_data", "title": "" }, { "docid": "67a582c2c9625527b49ea0dd47ec2c19", "score": "0.4983959", "text": "def main():\n generate_data()\n measure_tips()\n plot_result()", "title": "" }, { "docid": "266dee6725a29829b2f955cf38e1c20b", "score": "0.49830735", "text": "def __call__(self, trainer):\n with chainer.using_config('train', False), \\\n chainer.using_config('enable_backprop', False):\n predictions = self._model.predict(self._inputs, self._order)\n\n for i in range(self._order + 1):\n pred_send = predictions[i].data\n if self._comm.Get_rank() == 0:\n self._comm.Gatherv(pred_send, self._predictions[i], root=0)\n self._plot(trainer,\n self._coefficients[i] * self._predictions[i],\n self._coefficients[i] * self._labels[i],\n self._properties[i], self._units[i])\n else:\n self._comm.Gatherv(pred_send, None, root=0)\n\n plt.close('all')", "title": "" }, { "docid": "a2d77424db844d103b193fca29315b77", "score": "0.49742383", "text": "def scatter(self, sendobj, root=0):\n\n # Check if obj can be scattered as buffer objects\n use_buffer = use_buffer_meth(sendobj, root)\n\n # If provided object uses a buffer\n if use_buffer:\n # Sender prepares for scattering\n if(self._rank == root):\n # Raise error if length of axis is not divisible by size\n if len(sendobj) % self._size: # pragma: no cover\n raise e13.ShapeError(\"Input argument 'sendobj' cannot \"\n \"be divided evenly over the \"\n \"available number of MPI ranks!\")\n\n # Determine shape of scattered object\n buff_shape = list(sendobj.shape)\n buff_shape[0] //= self._size\n\n # Initialize empty buffer array\n recvobj = np.empty(\n *comm.bcast([buff_shape, sendobj.dtype], root=root))\n\n # Scatter NumPy array\n comm.Scatter(sendobj, recvobj, root=root)\n\n # Receivers receive the array\n else:\n # Initialize empty buffer array\n recvobj = np.empty(*comm.bcast(None, root=root))\n\n # Receive scattered NumPy array\n comm.Scatter(None, recvobj, root=root)\n\n # Remove single dimensional entries from recvobj\n recvobj = recvobj.squeeze()\n\n # If not, scatter obj the normal way\n else:\n recvobj = comm.scatter(sendobj, root=root)\n\n # Return recvobj\n return(recvobj)", "title": "" }, { "docid": "5176f797abf204b6f05bf68a4154e267", "score": "0.4971189", "text": "def pipeline(self):\n # Read data\n print('Loading data')\n self.__load_data_csv()\n\n # Generate datasets\n self.__load_data_as_tensors()\n\n # Fit the model\n self.__fit()\n\n # Predict\n self.__predict()", "title": "" }, { "docid": "463a870755802646326e143007ca4a1b", "score": "0.49693045", "text": "def spawn_threads():\n command_str = \"(rm -r _logs)\"\n subprocess.run(command_str, shell=True)\n start_time = time.time()\n reserved_gpus = check_available_gpus()\n reserved_gpus.pop(0)\n reserved_gpus.pop(0)\n print(Colors.OKGREEN, \"GPUs\", reserved_gpus, \"are available.\", Colors.ENDC)\n p = mp.Pool(len(reserved_gpus))\n for i in range(len(reserved_gpus)+1):\n queue.put(optimizer.ask())\n p.map(tt, reserved_gpus)\n p.close()\n print(\"All Threads finished.\")\n sorted_sets = sorted(list(zip(optimizer.yi, optimizer.Xi)), key=lambda tup: tup[0])\n print(\"BEST SET:\", sorted_sets[0])\n print(\"STARTING TESTING\")\n test_args = map_val_to_param_batch(sorted_sets[0][1], reserved_gpus[0])\n test_args.training = False\n avg_test_accuracy = setup_thread_environment(test_args)\n print(\"Test accuracy:\", avg_test_accuracy)\n end_time = time.time()\n time_elapsed = end_time - start_time\n print(\"It took\", time_elapsed, \"seconds.\")\n pickle.dump(sorted_sets, open(\"_logs/optimizer_points.pkl\", \"wb\"))\n try:\n file_path = \"_logs/dl_optimizer_result.txt\"\n label_file = open(file_path, \"w\")\n label_file.write(\"Best setup found:\\n\")\n label_file.write(str(sorted_sets[0]))\n label_file.write(\"\\nTime to process: \")\n label_file.write(str(end_time - start_time))\n label_file.write(\"\\nTest Accuracy: \")\n label_file.write(str(avg_test_accuracy))\n finally:\n label_file.close()", "title": "" }, { "docid": "87253e118ec295e9727f9528dd81068c", "score": "0.49641848", "text": "def _run_single_processing(self,\r\n star_reshape: np.ndarray,\r\n im_shape: Tuple[int, int, int],\r\n indices: np.ndarray) -> None:\r\n print(\"SINGLE PROCESSING!!!!!!!!!!!!!!!\")\r\n start_time = time.time()\r\n\r\n for i, pca_number in enumerate(self.m_components):\r\n progress(i, len(self.m_components), 'Creating residuals...', start_time)\r\n parang = -1.*self.m_star_in_port.get_attribute('PARANG') + self.m_extra_rot\r\n\r\n residuals, res_rot = psf_subtraction(images=star_reshape,\r\n angles=parang,\r\n ref_data=self.ref_data,\r\n pca_number=int(pca_number),\r\n pca_sklearn=self.m_pca,\r\n im_shape=im_shape,\r\n indices=indices)\r\n\r\n hist = f'max PC number = {np.amax(self.m_components)}'\r\n\r\n # 1.) derotated residuals\r\n if self.m_res_arr_out_ports is not None:\r\n print('derotate')\r\n self.m_res_arr_out_ports[pca_number].set_all(res_rot)\r\n self.m_res_arr_out_ports[pca_number].copy_attributes(self.m_star_in_port)\r\n self.m_res_arr_out_ports[pca_number].add_history('PcaPsfSubtractionModule', hist)\r\n\r\n # 2.) mean residuals\r\n if self.m_res_mean_out_port is not None:\r\n print('mean')\r\n stack = combine_residuals(method='mean', res_rot=res_rot)\r\n self.m_res_mean_out_port.append(stack, data_dim=3)\r\n\r\n # 3.) median residuals\r\n if self.m_res_median_out_port is not None:\r\n print('stack')\r\n stack = combine_residuals(method='median', res_rot=res_rot)\r\n self.m_res_median_out_port.append(stack, data_dim=3)\r\n\r\n # 4.) noise-weighted residuals\r\n if self.m_res_weighted_out_port is not None:\r\n print('noise-weighted')\r\n stack = combine_residuals(method='weighted',\r\n res_rot=res_rot,\r\n residuals=residuals,\r\n angles=parang)\r\n\r\n self.m_res_weighted_out_port.append(stack, data_dim=3)\r\n\r\n # 5.) clipped mean residuals\r\n if self.m_res_rot_mean_clip_out_port is not None:\r\n print('clipped')\r\n stack = combine_residuals(method='clipped', res_rot=res_rot)\r\n self.m_res_rot_mean_clip_out_port.append(stack, data_dim=3)", "title": "" }, { "docid": "1aa4f89f5c930a18328534641590e2cd", "score": "0.49614656", "text": "def execute_pipelines(args):\n\n # Setup some parameter values for find_event_pipeline().\n if args.cadence == \"complex\":\n complex_cadence = True\n if len(args.source_name) < 1:\n print(\"\\n*** plotSETI: Complex cadence requires a source_name.\")\n sys.exit(RETURN_ERROR)\n\n else:\n complex_cadence = False\n if args.cadence == \"on\":\n first_file = \"ON\"\n else:\n first_file = \"OFF\"\n h5_dir = os.path.abspath(args.h5_dir) + \"/\"\n dat_dir = os.path.abspath(args.dat_dir) + \"/\"\n out_dir = os.path.abspath(args.out_dir) + \"/\"\n if not os.path.exists(out_dir):\n os.mkdir(out_dir)\n if args.plot_offset:\n offset=\"auto\"\n else:\n offset=0\n\n # Establish output pathnames,\n path_csvf = out_dir + NAME_CSVF\n clean_event_stuff(out_dir)\n\n # Make the h5 and dat lists.\n # Default to auto-generation?\n if args.h5dat_lists is None:\n SZ_user_list = 0\n else:\n SZ_user_list = len(args.h5dat_lists)\n if args.debug:\n print(f\"DEBUG h5dats_list: #{SZ_user_list} {args.h5dat_lists}\")\n if SZ_user_list == 0: # Default to auto-generation.\n path_h5_list = out_dir + NAME_H5_LIST\n path_dat_list = out_dir + NAME_DAT_LIST\n number_in_cadence = make_lists(h5_dir, path_h5_list, dat_dir, path_dat_list)\n if number_in_cadence == 0:\n return RETURN_ERROR\n else: # User-specified lists\n if SZ_user_list != 2:\n print(f\"\\n*** plotSETI: h5dat_lists had {SZ_user_list} elements; must be 2 (one for h5 and one for dat)!\")\n return RETURN_ERROR\n if args.h5dat_lists[0] is None or args.h5dat_lists[1] is None:\n print(f\"\\n*** plotSETI: h5dat_lists had {SZ_user_list} elements; must be 2 (one for h5 and one for dat)!\")\n return RETURN_ERROR\n # Check the list of h5 files.\n path_h5_list = args.h5dat_lists[0]\n if not os.path.exists(path_h5_list):\n print(f\"\\n*** plotSETI: File {path_h5_list} does not exist!\")\n return RETURN_ERROR\n N_h5 = count_text_lines(path_h5_list)\n print(f\"plotSETI: Found {N_h5} h5 files.\")\n # Check the list of dat files.\n path_dat_list = args.h5dat_lists[1]\n if not os.path.exists(path_dat_list):\n print(f\"\\n*** plotSETI: File {path_dat_list} does not exist!\")\n return RETURN_ERROR\n N_dat = count_text_lines(path_dat_list)\n print(f\"plotSETI: Found {N_dat} dat files.\")\n # Make sure that the lists are of the same size.\n if N_h5 != N_dat:\n print(\"\\n*** plotSETI: Count of dat files must = count of h5 files!\")\n return RETURN_ERROR\n number_in_cadence = N_h5\n\n # Run find_event_pipeline()\n if complex_cadence:\n df_check = find_event_pipeline(path_dat_list,\n path_h5_list,\n filter_threshold = args.filter_threshold,\n number_in_cadence = number_in_cadence,\n on_source_complex_cadence=args.source_name,\n sortby_tstart=True,\n check_zero_drift=False,\n SNR_cut=args.snr_threshold,\n min_drift_rate=args.min_drift_rate,\n max_drift_rate=args.max_drift_rate,\n user_validation=False,\n csv_name=path_csvf,\n saving=True)\n else: # not a complex cadence\n df_check = find_event_pipeline(path_dat_list,\n path_h5_list,\n filter_threshold = args.filter_threshold,\n number_in_cadence = number_in_cadence,\n on_source_complex_cadence=False,\n on_off_first=first_file,\n sortby_tstart=True,\n check_zero_drift=False,\n SNR_cut=args.snr_threshold,\n min_drift_rate=args.min_drift_rate,\n max_drift_rate=args.max_drift_rate,\n user_validation=False,\n csv_name=path_csvf,\n saving=True)\n\n if df_check is None:\n print(\"\\n*** plotSETI: No events produced in find_event_pipeline()!\")\n return RETURN_ERROR\n\n # Make the plots for all of the HDF5/DAT file pairs in batch mode.\n matplotlib.use(\"agg\", force=True)\n plot_event_pipeline(path_csvf,\n path_h5_list,\n plot_dir=out_dir,\n filter_spec=args.filter_threshold,\n offset=offset,\n user_validation=False)\n\n print(f\"\\nplotSETI: Plots are stored in directory {out_dir}.\")\n\n return RETURN_NORMAL", "title": "" } ]
b863464a6d76e353b40be6333e3a7837
Returns a string of text in a filename format
[ { "docid": "0cfbf84790b3da0475cef545c5bfff4b", "score": "0.7140004", "text": "def FileNameFormatter(strText):\n import string\n\n # Strip text of punctuation\n for c in string.punctuation:\n strText = strText.replace(c, \"\")\n # Strip WhiteSpaces from FileName\n if \" \" in strText:\n strText = strText.replace(\" \", \"_\") + \".txt\"\n return strText", "title": "" } ]
[ { "docid": "9654ec3c00778fe5a356bffd25ba6a86", "score": "0.7665484", "text": "def get_file(self, text):\n\n\t\treturn text.strip().replace('_', '/')", "title": "" }, { "docid": "b3c3595ed9340ebfde71fc54b3d4e7f2", "score": "0.7471361", "text": "def filename():\n return ''", "title": "" }, { "docid": "f177f9eb9a1e1ccd3ffb0f3d378bdadb", "score": "0.73278856", "text": "def get_filename(self, text):\n return \"\".join([c for c in text.replace(\" \", \"-\") if c.isalnum() or c == \"-\"][:30])", "title": "" }, { "docid": "39937f0b92ab9b48dd2260b216cf0f23", "score": "0.7308444", "text": "def filename(self) -> str:", "title": "" }, { "docid": "984e88acc48f9ba37a825738e6b27533", "score": "0.7257224", "text": "def file_name(video_name):\r\n\r\n video_name = str(video_name)\r\n text_video = str(video_name[13:-4]) + \".txt\"\r\n\r\n return text_video", "title": "" }, { "docid": "470590ceed62d0e116e14837df532509", "score": "0.7183373", "text": "def txt_filename(self):\n return self.base_name + self.config['general']['file-suffix'] + '.txt'", "title": "" }, { "docid": "ade2e0f87f4e8daf7099045d3622a5af", "score": "0.7126048", "text": "def txt_filename(self) -> str:\n if self.txt is not None:\n return self.filename.replace(\".bin\", \".txt\")", "title": "" }, { "docid": "9c6a6bc245ab4dd4873c00a336f86c62", "score": "0.7101541", "text": "def convert_to_filename(text):\n import unicodedata\n normalized = unicodedata.normalize('NFKD', text).encode('ascii', 'ignore').decode()\n valid_chars = f'-_.() {string.ascii_letters}{string.digits}'\n filename = ''.join(c for c in normalized if c in valid_chars)\n filename = filename.replace(' ', '_') # Replace spaces with underscores\n return filename", "title": "" }, { "docid": "79729143bdd56c29f52c913d8ba80368", "score": "0.70931315", "text": "def extract_file_name(self):\n\n path = ntpath.basename(self.path)\n head, tail = ntpath.split(path)\n filename = tail.replace(\".txt\", \"\")\n\n return filename", "title": "" }, { "docid": "6cefc5e3ccdb97809762a91c7fa1b22e", "score": "0.70490056", "text": "def filename_from_string(text):\n text = text.lower()\n valid_chars = \"-_.\" + string.ascii_letters + string.digits\n return ''.join(c for c in text if c in valid_chars)", "title": "" }, { "docid": "7f1aa17824a1b71901e2063b9a1f2671", "score": "0.6988529", "text": "def get_filename(self) -> str:\n ...", "title": "" }, { "docid": "95fc49d7e464ee5a1920b6ff6bd79cdd", "score": "0.69760257", "text": "def file_str(f_name: str) -> str:\n with open(f_name, 'r') as f:\n return ''.join(f.readlines())", "title": "" }, { "docid": "2d092763820947a41e7e05b69b1ce4ac", "score": "0.69716275", "text": "def get_filename(self, msg):\r\n msg = msg.split()\r\n del msg[0]\r\n filename = \"\"\r\n for i in msg:\r\n filename += f\" {i}\"\r\n return filename.strip()", "title": "" }, { "docid": "1304664186eb8b13e38326f7084eaeb5", "score": "0.69584686", "text": "def format_file_name(artist: Text, song: Text) -> Text:\n return artist + \"_\" + song + \".txt\"", "title": "" }, { "docid": "3667461ef9ff3b4821a20a9cae3d230d", "score": "0.6942993", "text": "def filename(self):\n filename = self.title()\n # Strip anything that isn't alphanumeric or spaces\n filename = re.sub('[^\\w\\s]+', '_', filename)\n # Collapse spaces\n filename = re.sub('\\s+', ' ', filename)\n return filename", "title": "" }, { "docid": "3237894f66a34c7bcbfb370f86779e3d", "score": "0.69147205", "text": "def FileName(self) -> str:", "title": "" }, { "docid": "3237894f66a34c7bcbfb370f86779e3d", "score": "0.69147205", "text": "def FileName(self) -> str:", "title": "" }, { "docid": "406242610564fd7b7b8e3b6b188403e4", "score": "0.6910153", "text": "def make_file_name(found_song: object) -> str:\n\n def remove_space_slash(st):\n st = st.replace('/', '-')\n st = st.replace(' ', '_')\n return st\n\n art = remove_space_slash(found_song.artist)\n title = remove_space_slash(found_song.title)\n return f'{title}-{art}.txt'", "title": "" }, { "docid": "2df4b2bb2ff61e81188860107c4d344f", "score": "0.6904657", "text": "def _filename(self) -> str:", "title": "" }, { "docid": "64498753144b4ec5e4a3dd4a93f2f127", "score": "0.68744636", "text": "def _get_text(fname):\n pass", "title": "" }, { "docid": "9dc3cf420e26e603ab5a553716ab2022", "score": "0.6843803", "text": "def GetFilename(self):", "title": "" }, { "docid": "9dc3cf420e26e603ab5a553716ab2022", "score": "0.6843803", "text": "def GetFilename(self):", "title": "" }, { "docid": "ae72bb039600184e7342ffd8cbfa8ee5", "score": "0.6770153", "text": "def GetFileName(self):", "title": "" }, { "docid": "bf9e03eb7a796fb80e8c72ff3d8aa36c", "score": "0.67528254", "text": "def filename(self) -> str:\n title = FILENAME_REGEX.sub(\"_\", self.title.lower())\n return f\"{title}{constants.FILE_SUFFIX}\"", "title": "" }, { "docid": "c8504ce779c4b6c7ceb1c0492c0ec5fb", "score": "0.6751053", "text": "def file_name(self):\n return \"\".join(self.file_name_tuple)", "title": "" }, { "docid": "792653262e85ed155e2351f47f04fc48", "score": "0.6742423", "text": "def format_filename(s):\n import string\n valid_chars = \"-_.() %s%s\" % (string.ascii_letters, string.digits)\n filename = ''.join(c for c in s if c in valid_chars)\n filename = filename.replace(' ', '_') # replace spaces with underscores\n return filename", "title": "" }, { "docid": "8140ca816f29d6db269552dd9fa595ea", "score": "0.673824", "text": "def format_filename(s):\n valid_chars = \"-_.() %s%s\" % (string.ascii_letters, string.digits)\n filename = ''.join(c for c in s if c in valid_chars)\n filename = filename.replace(' ', '_') # I don't like spaces in filename.\n return filename", "title": "" }, { "docid": "beef93b183ebb9071ccae48ffb1cfe58", "score": "0.67344147", "text": "def getFileName(self) -> unicode:\n ...", "title": "" }, { "docid": "a8c18150d4d23618e6734c09e655c248", "score": "0.67282546", "text": "def get_text(filename: Path) -> str:\n with open(filename, \"r\", encoding=\"utf-8\") as f:\n return f.read()", "title": "" }, { "docid": "855f9f23a322b2910a0a1afdf37f83c9", "score": "0.67143565", "text": "def format_filename(s):\n valid_chars = \"-_.() %s%s\" % (string.ascii_letters, string.digits)\n filename = ''.join(c for c in s if c in valid_chars)\n filename = filename.replace(' ','_') # I don't like spaces in filenames.\n return filename", "title": "" }, { "docid": "855f9f23a322b2910a0a1afdf37f83c9", "score": "0.67143565", "text": "def format_filename(s):\n valid_chars = \"-_.() %s%s\" % (string.ascii_letters, string.digits)\n filename = ''.join(c for c in s if c in valid_chars)\n filename = filename.replace(' ','_') # I don't like spaces in filenames.\n return filename", "title": "" }, { "docid": "855f9f23a322b2910a0a1afdf37f83c9", "score": "0.67143565", "text": "def format_filename(s):\n valid_chars = \"-_.() %s%s\" % (string.ascii_letters, string.digits)\n filename = ''.join(c for c in s if c in valid_chars)\n filename = filename.replace(' ','_') # I don't like spaces in filenames.\n return filename", "title": "" }, { "docid": "6000fef3aa800dce0c26d0512ac8687f", "score": "0.6707387", "text": "def getFileText( filename ):\n\ttry:\n\t\tfile = open( filename, 'r' )\n\t\tfileText = file.read()\n\t\tfile.close()\n\t\treturn fileText\n\texcept IOError:\n\t\tprint >> sys.stderr, ( 'The file ' + filename + ' does not exist, an empty string will be returned.' )\n\t\treturn ''", "title": "" }, { "docid": "7cfb33a28553bb7208ec1c0f2f18a705", "score": "0.66875386", "text": "def format_filename(s: str):\n valid_chars = \"-_.() %s%s\" % (string.ascii_letters, string.digits)\n filename = ''.join(c for c in s if c in valid_chars)\n filename = filename.replace(' ','_') # I don't like spaces in filenames.\n return filename", "title": "" }, { "docid": "a805786177f4641a6e46e7d2753d91b2", "score": "0.6680221", "text": "def get_filename(self):\n\n sBack = \"\"\n oErr = ErrHandle()\n try:\n # Check and/or create the appropriate directory for the user\n dir = os.path.abspath(os.path.join( WRITABLE_DIR, \"../folia\", \"twitter\"))\n if not os.path.exists(dir):\n os.mkdir(dir)\n\n # Check for row-number\n if not self.row is None:\n dir = os.path.abspath(os.path.join(dir, \"row{:04}\".format(self.row)))\n if not os.path.exists(dir):\n os.mkdir(dir)\n\n # Combine into a filename\n if self.row is None:\n sBack = os.path.abspath(os.path.join(dir, \"tw_{}\".format(self.coordinate)))\n else:\n sBack = os.path.abspath(os.path.join(dir, \"tw_{:04}_{}\".format(self.row, self.coordinate)))\n # NOTE: \n # the calling program should append e.g. \".txt\" or what is needed\n except:\n msg = oErr.get_error_message()\n oErr.DoError(\"get_filename\")\n\n return sBack", "title": "" }, { "docid": "67a8b4c5f04027fb232b1464e455eb1c", "score": "0.66739887", "text": "def format_filename(s):\n \n valid_chars = \"-_.() %s%s\" % (string.ascii_letters, string.digits)\n filename = ''.join(c for c in s if c in valid_chars)\n filename = filename.replace(' ','_') # I don't like spaces in filenames.\n return filename", "title": "" }, { "docid": "ee846aa5d5ca25afa0ba3016d8f3396d", "score": "0.66733915", "text": "def format_filename(name):\n import string\n\n valid_chars = \"-_.() %s%s\" % (string.ascii_letters, string.digits)\n filename = ''.join(c for c in name if c in valid_chars)\n filename = filename.replace(' ', '_') # I don't like spaces in filenames.\n return filename", "title": "" }, { "docid": "ad37e346c4a54de5e30c81dabbd4f338", "score": "0.6664718", "text": "def format_filename(s):\r\n valid_chars = \"-_() %s%s\" % (string.ascii_letters, string.digits)\r\n filename = ''.join(c for c in s if c in valid_chars)\r\n filename = filename.replace(' ', '_') # I don't like spaces in filenames.\r\n return filename", "title": "" }, { "docid": "d2dab4df7da43e7f235141fe251da3f2", "score": "0.6661445", "text": "def getFileStr(self):\n fname, mask = QFileDialog.getOpenFileName(self, \\\n \"Get a File String.\", self.currdir, \\\n \"All Files(*.*);;Images(*.jpg *.JPG *.jpeg *.JPEG *.jpe \"\n + \"*.JPE *.png *.PNG *.gif *.GIF *.tiff *.TIFF *.tga *.TGA\"\n + \"*.bmp *.BMP *.xpm *.XPM *.xcf *.XCF *.ico *.ICO);;\"\n + \"Web Pages(*.html *htm *.HTML *.htm *.php *.PHP);;\" \\\n + \"Stylesheets(*.css *.CSS);;\" \\\n + \"Text Files (*.txt *.text *.TXT *.TEXT);;\")\n if (fname):\n return fname\n else:\n return None", "title": "" }, { "docid": "f27c1178fcf7cfcb9a64cd898742c4dc", "score": "0.6661044", "text": "def _get_filename(filepath):\n return str(Path(filepath).name)", "title": "" }, { "docid": "8c7443e8097288edab6e961e8aeb9d04", "score": "0.6630858", "text": "def parse_for_text_file(name):\n return \"{} {}\".format(remove_extension(name), parse_name(name))", "title": "" }, { "docid": "1de31181373323fe9d86e7bb2a173aed", "score": "0.6596556", "text": "def get_filename(self):\n ext = ''\n msgClass = self.message_class()\n if msgClass:\n ext = msgClass.extension()\n src = self.get_source_info()\n if src and src.get('name'):\n src_name = src['name']\n result = os.path.splitext(src_name)[0] + ext\n else:\n result = str(int(time.time())) + ext\n return result", "title": "" }, { "docid": "260d5be5c02a0b8f512b8e0b8747ea09", "score": "0.65861154", "text": "def get_text_file_content(file_name) -> str:\n if isinstance(file_name, str):\n file_name = Path(file_name)\n with file_name.open(\"r\", encoding=\"utf-8\") as file_pointer:\n return file_pointer.read()", "title": "" }, { "docid": "8b140503ec0bcb43de8128c2e5d8a281", "score": "0.6578483", "text": "def get_file_printing(self):\n r = self.s.get(self.base_address + '/api/job')\n if r.status_code != 200:\n raise Exception(\"Error: {code} - {content}\".format(code=r.status_code, content=r.content.decode('utf-8')))\n\n data = r.content.decode('utf-8').split('\\n')\n for line in data:\n if 'name' in line:\n # check if null\n if 'null' in line:\n raise Exception('Error reading filename being printed')\n else:\n return line[line.find(':')+1:line.find(',')].replace('\"', '').strip()\n return ''", "title": "" }, { "docid": "b1d12fe34e3f5e0705d46875ac1ee531", "score": "0.65723854", "text": "def get_filename(file_path):\n return os.path.basename(file_path)", "title": "" }, { "docid": "9f98288e5b95f3fe36adbefaaae8deeb", "score": "0.6555228", "text": "def fetch_template_text(self):\r\n if self.args.rename():\r\n return self.args.input_file()\r\n else:\r\n file = open(self.args.input_file(), 'r')\r\n file_text = file.read()\r\n file.close()\r\n return file_text", "title": "" }, { "docid": "85674102d61a530d81c760f9fdd39bf6", "score": "0.6552386", "text": "def getFullFileName(self):\n\t\tself.fileNumber=self.getFileNumber();\n\t\t\n\t\tfileName=self.filePrefix + \"%04.0f\" %(self.fileNumber) + self.fileFormat[-4:];\n\t\treturn os.path.join(self.filePath, fileName);", "title": "" }, { "docid": "ea48949f4e26270f0630e88161ef962d", "score": "0.6547691", "text": "def get_filename():\n #date_str = maturitydate_from_timestamp(today)\n return '_'.join(['ABSA_COMM_TDB_TRD']) + '.DAT'", "title": "" }, { "docid": "0d30840bf6bb6bc761400a48a98e7fa3", "score": "0.6542504", "text": "def fileName(self):\n # DOC {{{\n # }}}\n\n # CODE {{{\n # return the basename of the full file path if the file is specified as a path {{{\n if (isinstance(self.file_, str)):\n return os.path.basename(self.file_)\n # }}}\n # otherwise return a generic string {{{\n else:\n return \"<DATA>\"\n # }}}\n # }}}", "title": "" }, { "docid": "954288d2c09a8c4c05df3031456fedc3", "score": "0.65384203", "text": "def filename(self):\n return ''", "title": "" }, { "docid": "b2778a2ef0e10c83db3e4ef8191e7b84", "score": "0.65331036", "text": "def filename(self, path):\n if str(path) in self._data:\n return self._data[str(path)][2]\n return \"\"", "title": "" }, { "docid": "c9d3949aad4d203025b92046cb6adb88", "score": "0.6519206", "text": "def get_filename_body(self, lesson_soup):\n filename_body = lesson_soup.title.text\n\n # Sanitize filename. It avoids `OSError: [Errno 22]` while file writing\n # and some potentially problematic characters in filenames\n invalid_chars = '#%&\\/?:*\"<>{|}\\t'\n for char in invalid_chars:\n filename_body = filename_body.replace(char, '')\n\n return filename_body", "title": "" }, { "docid": "14ba69f9e640c47686d23af028c88d6a", "score": "0.6497784", "text": "def get_file_string(file_path):\n with open(file_path) as file:\n return file.read()", "title": "" }, { "docid": "0b13004ece9e32bfb66318679cd71445", "score": "0.6492559", "text": "def getfilename(self):\n return str", "title": "" }, { "docid": "808856958339a50f73847de8eb4c09cc", "score": "0.647806", "text": "def run_text_file(run):\n assert isinstance(run, BPRunBase)\n return \"%s.txt\" % run.get_file_name()", "title": "" }, { "docid": "24bb2cb431a454db532dcbe6e491d77a", "score": "0.6470369", "text": "def filename(self):\n params = list(self._internal.keys())\n params.sort()\n fname = self.model + \"_\"\n for p in params[:-1]:\n fname += (\"%s_%.3f_\" % (self.sanitize(p),\n self._internal[p]))\n fname += (\"%s_%.3f\" % (self.sanitize(params[-1]),\n self._internal[params[-1]]))\n fname += \".txt\"\n return fname", "title": "" }, { "docid": "e234d7b7c19975681efba6ffd4be029f", "score": "0.64669", "text": "def file(filename):\n return str(open(filename, 'r').read())", "title": "" }, { "docid": "cf9462bfbfe1b517ee332b234e6bd6df", "score": "0.6458126", "text": "def extract_filename_tag(file_path: str) -> str:\n\n return Path(file_path).resolve().stem", "title": "" }, { "docid": "ce654c9ac34644351cd53b81c2ef2305", "score": "0.6451068", "text": "def get_filename(self):\n # The id is the unique part of the filename.\n id = str(datetime.datetime.now())\n # Format the id a bit (remove whitspace and special characters)\n id = id.replace(' ', '').replace(':', '').replace('.', '')\n filename = os.path.join(self.output_dir, id) + \".\" + self.format\n return filename", "title": "" }, { "docid": "bf909eaaafa09a658d9fd7b26f232209", "score": "0.6449798", "text": "def _make_filename_from_filepath(self, path):\n return os.path.split(path)[1]", "title": "" }, { "docid": "988ed64a2b6ebcf2d24637a1ad294d4f", "score": "0.644611", "text": "def getFileName():\n name = bpy.path.display_name_from_filepath(bpy.data.filepath)\n if name == \"\":\n return None\n return name", "title": "" }, { "docid": "a5b3c89f89db406ea0092b516f52e3d1", "score": "0.6442713", "text": "def generate_filename(\r\n anime_name_short,anime_name,\r\n song_name,theme_type,\r\n filetype='webm'\r\n):\r\n if Opts.Download.filename:\r\n filename = Opts.Download.filename\r\n else:\r\n filename = f\"%A %t (%S).%e\"\r\n translate = {\r\n '%':'%',\r\n 'a':anime_name_short,\r\n 'A':anime_name,\r\n 't':theme_type,\r\n 's':song_name.replace(' ','_'),\r\n 'S':song_name,\r\n 'e':filetype\r\n }\r\n out = ''\r\n i = 0\r\n while i < len(filename):\r\n if filename[i] == '%':\r\n out += translate[filename[i+1]]\r\n i += 2\r\n else:\r\n out += filename[i]\r\n i += 1\r\n return out", "title": "" }, { "docid": "aaf324c138ff28d1841d64f52677fe97", "score": "0.64105505", "text": "def get_filename(self):\n return \".\".join([\n self.varname,\n self.scenario,\n self.driving_model,\n \"RegCM4\",\n \"1hr\",\n \"NAM-11\",\n \"raw\",\n \"nc\"\n ])", "title": "" }, { "docid": "bb1f765de38d6110be6650d3ba1463a9", "score": "0.6401786", "text": "def filetostring(filename: str) -> str:\n with open(filename, 'r') as file:\n return file.read()", "title": "" }, { "docid": "49a7f299ab094f987dd8b3c62659a9da", "score": "0.6400903", "text": "def to_filename(name):\r\n return name.replace('-','_')", "title": "" }, { "docid": "35dc009d057b4b0a610194c36d67232d", "score": "0.6394188", "text": "def get_filename (directory, post, keep_names=False):\n return os.sep.join ((\n directory, post.board, str(post.thread),\n (post.image.filename if keep_names else str(post.image.tim)) + \\\n post.image.ext\n ))", "title": "" }, { "docid": "a8a5b2351aeb16477d28e8592478744c", "score": "0.63911164", "text": "def SourceFilename(self): \n\n SrcPath = self.Note[\"SourceFile\"]\n # convert the path to just a simple file (without extension)\n # remove everything except the file name (including the extension)\n fileWithExt = pGenUtil.getFileFromPath(SrcPath)\n # return everything before the extension\n return os.path.splitext(fileWithExt)[0]", "title": "" }, { "docid": "d35ceed9e9ad7df95835d77c4d46c35c", "score": "0.63833505", "text": "def filename(start: Any, end: Any) -> Text:\n date = datetime.strftime(start, \"%Y-%m-%d\")\n time_start = datetime.strftime(start, \"%H:%M:%S\")\n time_end = datetime.strftime(end, \"%H:%M:%S\")\n return \"__\".join([date, time_start, time_end])", "title": "" }, { "docid": "04cb54dbd4cb47510e17a1126bc1cd1b", "score": "0.637942", "text": "def _filename(self, tag):\n name = str(self.counter).zfill(6) + \":\"\n name += re.sub(r\"\\s\", \"\", tag.lower())\n name += self.suffix + \".p\"\n return os.path.join(self.file_path, name)", "title": "" }, { "docid": "db7dcf62cab32b82b2124b874ce983b4", "score": "0.637577", "text": "def get_filename(filename):\n # add postfix and extension\n name_base = filename + strftime('_%y%m%d%H%M')\n file_path = Path(name_base + FILE_EXT)\n\n # check file exist and add version postfix if needed\n i = 0\n while file_path.exists():\n i += 1\n file_path = Path(name_base + '_{}'.format(i) + FILE_EXT)\n\n return file_path.name", "title": "" }, { "docid": "2a8e26db7748e6ef1fc9914115c4a029", "score": "0.6373613", "text": "def get_file_text(path):\n file_io = open(path, \"r\")\n text = file_io.read()\n file_io.close()\n return text", "title": "" }, { "docid": "2a8e26db7748e6ef1fc9914115c4a029", "score": "0.6373613", "text": "def get_file_text(path):\n file_io = open(path, \"r\")\n text = file_io.read()\n file_io.close()\n return text", "title": "" }, { "docid": "411c748f700288ccdc4c40170d04d57a", "score": "0.63723373", "text": "def filename(x):\r\n return split(x)[1]", "title": "" }, { "docid": "82eba882b17ceccf9b3f161846ad7360", "score": "0.6367155", "text": "def get_title(filename):\n return filename[3:].replace(\"_\", \" \")", "title": "" }, { "docid": "50e6426ba8e27eb59be34fba45b3df3f", "score": "0.6364247", "text": "def __build_file_name(self):\r\n index = 'title.' + self.__lang\r\n title = self.__config['COVER'][index]\r\n # Remove lateral black spaces, capital letters and replace rest of black spaces with underscores\r\n title = title.strip().lower().replace(' ', '_')\r\n # Add date to PDF file name\r\n title = title + '{}'.format(datetime.now().date())\r\n\r\n # If title has dots, remove then. Need the dot for the extension\r\n if '.' in title:\r\n title = re.sub('\\..*', '', title)\r\n\r\n # Return the output directory (without last '/')/file_name.pdf\r\n return directory_helper.process_directory(self.__output_directory) + '/' + title + '.pdf'", "title": "" }, { "docid": "7ce42cfde2a2cb0b448cdd7f31b4870a", "score": "0.6361237", "text": "def filename(self):\n return str(os.path.basename(self.path))", "title": "" }, { "docid": "97c70334658cca0f0412ebd5de716c3f", "score": "0.6360178", "text": "def get_filename() -> str:\n parser = ArgumentParser(description='Build a decision tree from data stored in a text file.')\n parser.add_argument('filename',\n metavar='filename',\n type=str,\n help='Data in the format described in specification.pdf')\n args = parser.parse_args()\n filename = vars(args)['filename']\n return filename", "title": "" }, { "docid": "e152ad25b02ce134457f26ed8e4864f6", "score": "0.6351904", "text": "def get_file_name(self, *path_elements, **kw):\n\n split=kw.get('split',None)\n extra=kw.get('extra',None)\n ext=kw.get('ext','fits')\n\n dir=self.get_file_dir(*path_elements)\n\n name=[copy.copy(self._run)] + list(path_elements)\n\n if split is not None:\n if len(split) != 2:\n raise ValueError(\"split should be [beg,end]\")\n\n name.append('%06d' % split[0])\n name.append('%06d' % split[1])\n\n if extra is not None:\n name.append(extra)\n\n name='-'.join(name)\n name='%s.%s' % (name, ext)\n\n return os.path.join(dir, name)", "title": "" }, { "docid": "87d55691dc6277fb1787df3142f7adf2", "score": "0.63502085", "text": "def _create_name(self) -> str:\n fname = self.fh.name.split(\"/\")[-1]\n return re.sub(\"[^\\w.]\", \"\", str(fname))", "title": "" }, { "docid": "e49d54151425a68e7aba0d6459cb292f", "score": "0.63457507", "text": "def _get_text(file_name: str):\n with open(file_name, 'r') as file:\n text = file.read()\n return text", "title": "" }, { "docid": "edc3d0a6c31bc1adeec4b2eac33c8d0b", "score": "0.63337356", "text": "def get_text(filename:str) -> str:\n f = codecs.open(filename, encoding='latin-1', mode='r')\n s = f.read()\n f.close()\n return s", "title": "" }, { "docid": "5c93e51ecb0b6ff8f0d29aabb99d96f7", "score": "0.6324948", "text": "def get_filename(file_path, with_extension=False):\n basename = os.path.basename(file_path)\n\n if with_extension:\n filename = basename\n else:\n filename = os.path.splitext(basename)[0]\n\n return filename", "title": "" }, { "docid": "c4e6e0557448c57673c02d1e66f1e241", "score": "0.63238937", "text": "def getFileName(filepath):\r\n return os.path.splitext(os.path.basename(filepath))[0]", "title": "" }, { "docid": "e1007de372edb4b4d7d60c759826ff96", "score": "0.6318899", "text": "def to_file_name(name: str) -> str:\n filename = \"\"\n for char in name:\n if char in '/:.\\\\':\n filename += \"_\"\n else:\n filename += char\n return filename", "title": "" }, { "docid": "61079361a031badbed54bb144e84d1b8", "score": "0.6318021", "text": "def text_to_fpath(text):\n EXTENSION = \".rem\"\n MAX_LEN = 10 # Not including extension\n\n pattern = re.compile(r'[\\W_]+', re.UNICODE) # Restrict to alphanumeric\n alphanum = pattern.sub(\"\", text)\n shortened = alphanum[:MAX_LEN] # Restrict length\n if len(shortened) < 1 or shortened.isdigit(): # If the reminder is something silly like \"@@@@@\"\n if len(text) > 0:\n shortened = hashlib.sha1(text.encode(\"utf8\", 'replace')).hexdigest()[:MAX_LEN]\n else:\n shortened = \"noname\"\n\n fname = shortened + EXTENSION\n fpath = REMIND_DIR.joinpath(fname) # Create path\n\n # We should add a number to the end of the filename if it already exists\n num_rems = 0\n WIDTH = 3 # Digits\n while fpath.exists():\n fname = shortened[:MAX_LEN-WIDTH] + str(num_rems).zfill(WIDTH) + EXTENSION\n fpath = REMIND_DIR.joinpath(fname)\n num_rems += 1\n\n return fpath", "title": "" }, { "docid": "e51f73dd24b8ca9b2d773e2485fd5b42", "score": "0.6313276", "text": "def get_filename(path):\n return os.path.splitext(get_file(path))[0]", "title": "" }, { "docid": "1dd816eb9ba2926830a09f367d0d9f2d", "score": "0.63100487", "text": "def file_name(self) -> str:\n return str(self.file.name)", "title": "" }, { "docid": "06e23e00a979027e3025cf0105b67835", "score": "0.63067704", "text": "def get_txt_path():\n (_, week, year) = FedorestdHandler.get_date()\n path = BASE_TXT_PATH.format(year, week)\n\n #if not os.path.exists(path):\n FedorestdHandler.create_txt()\n \n return path", "title": "" }, { "docid": "ff1cff757cb3792ff5bea86535cfdc7b", "score": "0.6304762", "text": "def __get_file_path(self):\n filename = None\n try:\n name = self.web_dom.select(\".enlaceTitulo\")[0].get_text()\n filename = str(name).replace(\"Aplicaciones.Fuentes.\", \"\")\n except Exception as e:\n print(\"Error al capturar nombre de fichero\", e)\n finally:\n return filename", "title": "" }, { "docid": "d177e4dec0861ab8a532eca63ef767aa", "score": "0.63004977", "text": "def get_php_file(self, text):\n\n\t\treturn self.get_file(text) + '.php'", "title": "" }, { "docid": "4a6e694afb785f695a12c231e7e1543a", "score": "0.6298715", "text": "def filename(path: str):\n return os.path.splitext(File.basename(path))[0]", "title": "" }, { "docid": "5583d0239100c74d351d6d929333535e", "score": "0.6291035", "text": "def txt_source_text(self, item):\n with open(\n os.path.join(FILES_STORE, item[\"files\"][0][\"path\"]), encoding=\"ISO-8859-1\"\n ) as f:\n return f.read()", "title": "" }, { "docid": "a507dc62f0f35f629b19d72fb82cbc8b", "score": "0.6289213", "text": "def format_to_fname(s):\n\n valid_chars = \"_ %s%s\" % (string.ascii_letters, string.digits)\n fname = ''.join(c for c in s if c in valid_chars)\n fname = fname.replace(' ', '_')\n return fname", "title": "" }, { "docid": "62714d468d8219ac90669395f5e9ee28", "score": "0.62855196", "text": "def fname(path):\n filename = os.path.basename(path).split('.')[0]\n return filename", "title": "" }, { "docid": "b56d897f688ff9aa7b05e317303ed223", "score": "0.62845343", "text": "def filename(x):\n return path.splitext(path.basename(x))[0]", "title": "" }, { "docid": "f89cdd63d82ee13d1c5bf03aae2fdc0d", "score": "0.62782794", "text": "def get_file_name(path: str) -> str:\n return os.path.splitext(os.path.basename(path))[0]", "title": "" }, { "docid": "ef5cdc6c747b1111c387e15ed9ffefc3", "score": "0.6272007", "text": "def fileName(self):", "title": "" }, { "docid": "ddeeec27d932bead29390cf0ccb46785", "score": "0.6270834", "text": "def get_filename(self):\r\n # TODO: Perhaps we can let file names change when files are replaced?\r\n if not self.file:\r\n return None\r\n filename = self.file.name.split(\"/\")[-1]\r\n return \".\".join(filename.split(\".\")[:-1])", "title": "" }, { "docid": "2722c3d4b7a088fd747652ed447667cd", "score": "0.62666404", "text": "def file_name(movie_file_number, subtitles_path=CONFIG.subtitles_path):\n tmp_name = zip(movie_file_number, movie_file_number[::-1])\n file_path = [subtitles_path.replace(\"_popular\", \"\")]\n file_name = []\n\n for i, (s1, s2) in enumerate(tmp_name):\n file_name.append(s1)\n if i < 4:\n file_path.insert(0, s2 + \"/\")\n\n return \"\".join(file_path[::-1]) + \"\".join(file_name) + \".gz\"", "title": "" }, { "docid": "38094d55b2f93351481d1d1db6f90439", "score": "0.6263185", "text": "def format_file_name(metadata) -> str:\n author = format_author_name(first_author(metadata.authors))\n return f'{author} - {metadata.title}'", "title": "" } ]
7a67b4d613ccae500306736feb8cf311
Performs a batch normalization using a standard set of parameters.
[ { "docid": "d222ee00d2451852c5d10f9346db41f4", "score": "0.7165613", "text": "def batch_norm(inputs, training, data_format):\n # We set fused=True for a significant performance boost. See\n # https://www.tensorflow.org/performance/performance_guide#common_fused_ops\n return tf.layers.batch_normalization(\n inputs=inputs, axis=1 if data_format == 'channels_first' else 3,\n momentum=_BATCH_NORM_DECAY, epsilon=_BATCH_NORM_EPSILON, center=True,\n scale=True, training=training, fused=True)", "title": "" } ]
[ { "docid": "583df9d602bfa25f63378da156b52475", "score": "0.7308336", "text": "def normalize_batch(x: torch.Tensor) -> torch.Tensor:\n f = Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225])\n\n for b in range(x.shape[0]):\n f(x[b])\n\n return x", "title": "" }, { "docid": "9fe2060a254e89a5c3e437e589ee9bbd", "score": "0.7182276", "text": "def batch_normalization(inputs,\n axis=-1,\n momentum=0.99,\n epsilon=1e-3,\n center=True,\n scale=True,\n beta_initializer=init_ops.zeros_initializer(),\n gamma_initializer=init_ops.ones_initializer(),\n moving_mean_initializer=init_ops.zeros_initializer(),\n moving_variance_initializer=init_ops.ones_initializer(),\n beta_regularizer=None,\n gamma_regularizer=None,\n training=False,\n trainable=True,\n name=None,\n reuse=None):\n layer = BatchNormalization(\n axis=axis,\n momentum=momentum,\n epsilon=epsilon,\n center=center,\n scale=scale,\n beta_initializer=beta_initializer,\n gamma_initializer=gamma_initializer,\n moving_mean_initializer=moving_mean_initializer,\n moving_variance_initializer=moving_variance_initializer,\n beta_regularizer=beta_regularizer,\n gamma_regularizer=gamma_regularizer,\n trainable=trainable,\n name=name,\n _reuse=reuse,\n _scope=name)\n return layer.apply(inputs, training=training)", "title": "" }, { "docid": "1d208547b7dc84c4947e2f1b054eddc7", "score": "0.713501", "text": "def test_batch_normalization(self):\n\t\tmodel = Model(BatchNormalization())\n\n\t\tdata = np.array([[ 0.5, 0.7, 0.6 ],\n\t\t\t [ 0.5, 0.4, 0.6 ],\n\t\t\t [ 0.4, 0.8, 0.9 ]], dtype=np.float32)\n\n\t\tmean = np.mean(data, axis=0)\n\t\tvariance = np.sum((data - mean)**2, axis=0) / (data.shape[0]-1)\n\t\tnormalized = (data - mean)/np.sqrt(variance + 1e-5)\n\n\t\tparameter = prediction.Parameter(); parameter.batchSize = 3\n\t\targs = {'rebuild': {'data_dims': [3, 3], 'parameter': parameter}}\n\n\t\tnet = DAALNet().build(model, trainable=False, **args)\n\t\tinitialize_weights(net.model, 0, np.ones_like(mean), np.zeros_like(variance), False)\n\t\tinitialize_input(net.model, 0, variance, forward.populationVariance, False)\n\t\tinitialize_input(net.model, 0, mean, forward.populationMean, False)\n\n\t\twith net.predict(data, rebuild=False) as predictions:\n\t\t\tassert_allclose(predictions, normalized, rtol=1e-5)", "title": "" }, { "docid": "a1d5453c819367715f7714e80c211edf", "score": "0.70719165", "text": "def batch_normalization(x,\n mean,\n variance,\n offset,\n scale,\n variance_epsilon,\n name=None):\n with ops.name_scope(name, \"batchnorm\", [x, mean, variance, scale, offset]):\n inv = math_ops.rsqrt(variance + variance_epsilon)\n if scale is not None:\n inv *= scale\n # Note: tensorflow/contrib/quantize/python/fold_batch_norms.py depends on\n # the precise order of ops that are generated by the expression below.\n return x * math_ops.cast(inv, x.dtype) + math_ops.cast(\n offset - mean * inv if offset is not None else -mean * inv, x.dtype)", "title": "" }, { "docid": "5cc5a3fdff92083125d48bd78cfdc25a", "score": "0.7050859", "text": "def batch_normalization(inputs, gamma, beta, mean, std,\n mode='low_mem'):\n if mode == 'low_mem':\n elm_bn = theano.tensor.elemwise.Elemwise(scalar_op=BNComposite(dtype=inputs.dtype))\n rval = elm_bn(inputs, mean, std, gamma, beta)\n elif mode == 'high_mem':\n rval = (inputs - mean) * (gamma / std) + beta\n else:\n raise ValueError(\n 'mode must be either \"low_mem\", \"high_mem\"')\n return rval", "title": "" }, { "docid": "9131d044798883ef4bf42c1d532352ef", "score": "0.7048123", "text": "def batch_norm(inputs, is_training):\n # We set fused=True for a significant performance boost. See\n # https://www.tensorflow.org/performance/performance_guide#common_fused_ops\n #\n return tf.layers.batch_normalization(\n inputs=inputs,\n axis=3,\n momentum=_BATCH_NORM_DECAY,\n epsilon=_BATCH_NORM_EPSILON,\n center=True,\n scale=True,\n training=is_training,\n fused=True\n )", "title": "" }, { "docid": "b79fe83e80b770e5e58115dae1134b4e", "score": "0.70229006", "text": "def batch_norm(inputs, training, data_format, name=None):\n # We set fused=True for a significant performance boost. See\n # https://www.tensorflow.org/performance/performance_guide#common_fused_ops\n return tf.layers.batch_normalization(\n inputs=inputs, axis=1 if data_format == 'channels_first' else 3,\n momentum=_BATCH_NORM_DECAY, epsilon=_BATCH_NORM_EPSILON, center=True,\n scale=True, training=training, name=name, fused=_USE_FUSED_BN)", "title": "" }, { "docid": "80417fd4d07082b46c4f6424a53fdeb6", "score": "0.69252026", "text": "def batch_normalize(x, epsilon=1e-5):\n with tf.variable_scope('batch_norm'):\n mean, variance = tf.nn.moments(x, axes=[0, 1, 2])\n\n scale = tf.get_variable('bn_scale',\n shape=[x.get_shape().as_list()[-1]],\n initializer=tf.ones_initializer())\n offset = tf.get_variable('bn_bias',\n shape=[x.get_shape().as_list()[-1]],\n initializer=tf.zeros_initializer())\n normalized = tf.nn.batch_normalization(x=x, mean=mean,\n variance=variance, offset=offset,\n scale=scale, variance_epsilon=epsilon)\n return normalized", "title": "" }, { "docid": "c5aaaa1eab83b9d3b8dcbf5b2ced4371", "score": "0.6857159", "text": "def batch_normalize(x, epsilon=1e-5):\n # Before activation\n with tf.variable_scope('batch_norm'):\n mean, variance = tf.nn.moments(x, axes=[0, 1, 2])\n\n scale = tf.get_variable('bn_scale',\n shape=[x.get_shape().as_list()[-1]],\n initializer=tf.ones_initializer())\n offset = tf.get_variable('bn_bias',\n shape=[x.get_shape().as_list()[-1]],\n initializer=tf.zeros_initializer())\n normalized = tf.nn.batch_normalization(x=x,\n mean=mean,\n variance=variance,\n offset=offset,\n scale=scale,\n variance_epsilon=epsilon)\n return normalized", "title": "" }, { "docid": "8077fcf6a1aa3a005085908accaecbde", "score": "0.6826585", "text": "def bo_batch_norm(self,x, is_training, momentum=0.9, epsilon=0.00001):\n x = tf.layers.batch_normalization(x, momentum=momentum, epsilon=epsilon,training=is_training)\n return x", "title": "" }, { "docid": "da67ba7fc18c83536c40d69550c2f640", "score": "0.6765572", "text": "def batch_norm(self,x, is_training, momentum=0.9, epsilon=0.00001):\n x = tf.layers.batch_normalization(x, momentum=momentum, epsilon=epsilon ,training=is_training)\n return x", "title": "" }, { "docid": "793e2b3127e10f35c9bf403f31c85df3", "score": "0.6765566", "text": "def batch_norm(inputs: tf.Tensor, training: Union[bool, tf.Tensor]) -> tf.Tensor:\n return tf.layers.batch_normalization(\n inputs=inputs, momentum=_BATCH_NORM_DECAY, epsilon=_BATCH_NORM_EPSILON, fused=True, training=training\n )", "title": "" }, { "docid": "5c373941c4c771d136e7fab4f88214ff", "score": "0.6743877", "text": "def batch_norm_resnet(inputs, training, data_format):\n # We set fused=True for a significant performance boost. See\n # https://www.tensorflow.org/performance/performance_guide#common_fused_ops\n return tf.layers.batch_normalization(\n inputs=inputs, axis=1 if data_format == 'channels_first' else 3,\n momentum=_BATCH_NORM_DECAY, epsilon=_BATCH_NORM_EPSILON, center=True,\n scale=True, training=training, fused=True)", "title": "" }, { "docid": "a99db0d7e48c3759273943cacddb5018", "score": "0.6730554", "text": "def normalize_parameters(model, config, **kwargs):\n\n from snntoolbox.parsing.utils import get_inbound_layers_with_params\n\n print(\"Normalizing parameters...\")\n\n norm_dir = kwargs[str('path')] if 'path' in kwargs else \\\n os.path.join(config.get('paths', 'log_dir_of_current_run'),\n 'normalization')\n\n activ_dir = os.path.join(norm_dir, 'activations')\n if not os.path.exists(activ_dir):\n os.makedirs(activ_dir)\n # Store original weights for later plotting\n if not os.path.isfile(os.path.join(activ_dir, 'weights.npz')):\n weights = {}\n for layer in model.layers:\n w = layer.get_weights()\n if len(w) > 0:\n weights[layer.name] = w[0]\n np.savez_compressed(os.path.join(activ_dir, 'weights.npz'), **weights)\n\n batch_size = config.getint('simulation', 'batch_size')\n\n # Either load scale factors from disk, or get normalization data set to\n # calculate them.\n x_norm = None\n if 'scale_facs' in kwargs:\n scale_facs = kwargs[str('scale_facs')]\n elif 'x_norm' in kwargs or 'dataflow' in kwargs:\n if 'x_norm' in kwargs:\n x_norm = kwargs[str('x_norm')]\n elif 'dataflow' in kwargs:\n x_norm = []\n dataflow = kwargs[str('dataflow')]\n num_samples_norm = config.getint('normalization', 'num_samples',\n fallback='')\n if num_samples_norm == '':\n num_samples_norm = len(dataflow) * dataflow.batch_size\n while len(x_norm) * batch_size < num_samples_norm:\n x = dataflow.next()\n if isinstance(x, tuple): # Remove class label if present.\n x = x[0]\n x_norm.append(x)\n x_norm = np.concatenate(x_norm)\n print(\"Using {} samples for normalization.\".format(len(x_norm)))\n sizes = [\n len(x_norm) * np.array(layer.output_shape[1:]).prod() * 32 /\n (8 * 1e9) for layer in model.layers if len(layer.weights) > 0]\n size_str = ['{:.2f}'.format(s) for s in sizes]\n print(\"INFO: Need {} GB for layer activations.\\n\".format(size_str) +\n \"May have to reduce size of data set used for normalization.\")\n scale_facs = OrderedDict({model.layers[0].name: 1})\n else:\n import warnings\n warnings.warn(\"Scale factors or normalization data set could not be \"\n \"loaded. Proceeding without normalization.\",\n RuntimeWarning)\n return\n\n # If scale factors have not been computed in a previous run, do so now.\n if len(scale_facs) == 1:\n i = 0\n sparsity = []\n for layer in model.layers:\n # Skip if layer has no parameters\n if len(layer.weights) == 0:\n continue\n\n activations = try_reload_activations(layer, model, x_norm,\n batch_size, activ_dir)\n nonzero_activations = activations[np.nonzero(activations)]\n sparsity.append(1 - nonzero_activations.size / activations.size)\n del activations\n perc = get_percentile(config, i)\n scale_facs[layer.name] = get_scale_fac(nonzero_activations, perc)\n print(\"Scale factor: {:.2f}.\".format(scale_facs[layer.name]))\n # Since we have calculated output activations here, check at this\n # point if the output is mostly negative, in which case we should\n # stick to softmax. Otherwise ReLU is preferred.\n # Todo: Determine the input to the activation by replacing the\n # combined output layer by two distinct layers ``Dense`` and\n # ``Activation``!\n # if layer.activation == 'softmax' and settings['softmax_to_relu']:\n # softmax_inputs = ...\n # if np.median(softmax_inputs) < 0:\n # print(\"WARNING: You allowed the toolbox to replace \"\n # \"softmax by ReLU activations. However, more than \"\n # \"half of the activations are negative, which \"\n # \"could reduce accuracy. Consider setting \"\n # \"settings['softmax_to_relu'] = False.\")\n # settings['softmax_to_relu'] = False\n i += 1\n # Write scale factors to disk\n filepath = os.path.join(norm_dir, config.get('normalization',\n 'percentile') + '.json')\n from snntoolbox.utils.utils import confirm_overwrite\n if config.get('output', 'overwrite') or confirm_overwrite(filepath):\n with open(filepath, str('w')) as f:\n json.dump(scale_facs, f)\n np.savez_compressed(os.path.join(norm_dir, 'activations', 'sparsity'),\n sparsity=sparsity)\n\n # Apply scale factors to normalize the parameters.\n for layer in model.layers:\n # Skip if layer has no parameters\n if len(layer.weights) == 0:\n continue\n\n # Scale parameters\n parameters = layer.get_weights()\n if layer.activation.__name__ == 'softmax':\n # When using a certain percentile or even the max, the scaling\n # factor can be extremely low in case of many output classes\n # (e.g. 0.01 for ImageNet). This amplifies weights and biases\n # greatly. But large biases cause large offsets in the beginning\n # of the simulation (spike input absent).\n scale_fac = 1.0\n print(\"Using scale factor {:.2f} for softmax layer.\".format(\n scale_fac))\n else:\n scale_fac = scale_facs[layer.name]\n inbound = get_inbound_layers_with_params(layer)\n if len(inbound) == 0: # Input layer\n parameters_norm = [\n parameters[0] * scale_facs[model.layers[0].name] / scale_fac,\n parameters[1] / scale_fac]\n elif len(inbound) == 1:\n parameters_norm = [\n parameters[0] * scale_facs[inbound[0].name] / scale_fac,\n parameters[1] / scale_fac]\n else:\n # In case of this layer receiving input from several layers, we can\n # apply scale factor to bias as usual, but need to rescale weights\n # according to their respective input.\n parameters_norm = [parameters[0], parameters[1] / scale_fac]\n if parameters[0].ndim == 4:\n # In conv layers, just need to split up along channel dim.\n offset = 0 # Index offset at input filter dimension\n for inb in inbound:\n f_out = inb.filters # Num output features of inbound layer\n f_in = range(offset, offset + f_out)\n parameters_norm[0][:, :, f_in, :] *= \\\n scale_facs[inb.name] / scale_fac\n offset += f_out\n else:\n # Fully-connected layers need more consideration, because they\n # could receive input from several conv layers that are\n # concatenated and then flattened. The neuron position in the\n # flattened layer depend on the image_data_format.\n raise NotImplementedError\n\n # Check if the layer happens to be Sparse\n # if the layer is sparse, add the mask to the list of parameters\n if len(parameters) == 3:\n parameters_norm.append(parameters[-1])\n # Update model with modified parameters\n layer.set_weights(parameters_norm)\n\n # Plot distributions of weights and activations before and after norm.\n if 'normalization_activations' in eval(config.get('output', 'plot_vars')):\n from snntoolbox.simulation.plotting import plot_hist\n from snntoolbox.simulation.plotting import plot_max_activ_hist\n\n # All layers in one plot. Assumes model.get_weights() returns\n # [w, b, w, b, ...].\n # from snntoolbox.simulation.plotting import plot_weight_distribution\n # plot_weight_distribution(norm_dir, model)\n\n print(\"Plotting distributions of weights and activations before and \"\n \"after normalizing...\")\n\n # Load original parsed model to get parameters before normalization\n weights = np.load(os.path.join(activ_dir, 'weights.npz'))\n for idx, layer in enumerate(model.layers):\n # Skip if layer has no parameters\n if len(layer.weights) == 0:\n continue\n\n label = str(idx) + layer.__class__.__name__ \\\n if config.getboolean('output', 'use_simple_labels') \\\n else layer.name\n parameters = weights[layer.name]\n parameters_norm = layer.get_weights()[0]\n weight_dict = {'weights': parameters.flatten(),\n 'weights_norm': parameters_norm.flatten()}\n plot_hist(weight_dict, 'Weight', label, norm_dir)\n\n # Load activations of model before normalization\n activations = try_reload_activations(layer, model, x_norm,\n batch_size, activ_dir)\n\n if activations is None or x_norm is None:\n continue\n\n # Compute activations with modified parameters\n nonzero_activations = activations[np.nonzero(activations)]\n activations_norm = get_activations_layer(model.input, layer.output,\n x_norm, batch_size)\n activation_dict = {'Activations': nonzero_activations,\n 'Activations_norm':\n activations_norm[np.nonzero(activations_norm)]}\n scale_fac = scale_facs[layer.name]\n plot_hist(activation_dict, 'Activation', label, norm_dir,\n scale_fac)\n ax = tuple(np.arange(len(layer.output_shape))[1:])\n plot_max_activ_hist(\n {'Activations_max': np.max(activations, axis=ax)},\n 'Maximum Activation', label, norm_dir, scale_fac)\n print('')", "title": "" }, { "docid": "3df4ef9e60e5a1aaad420059df1d7299", "score": "0.6728605", "text": "def SyncBatchNorm(*args, **kwargs):\n if paddle.get_device() == 'cpu':\n return nn.BatchNorm2D(*args, **kwargs)\n else:\n return nn.SyncBatchNorm(*args, **kwargs)", "title": "" }, { "docid": "3df4ef9e60e5a1aaad420059df1d7299", "score": "0.6728605", "text": "def SyncBatchNorm(*args, **kwargs):\n if paddle.get_device() == 'cpu':\n return nn.BatchNorm2D(*args, **kwargs)\n else:\n return nn.SyncBatchNorm(*args, **kwargs)", "title": "" }, { "docid": "b7bffd97205da85f0c8f60c88536cf6c", "score": "0.67089415", "text": "def batch_norm(self, input_layer=None, decay=0.999, scale=False,\n epsilon=0.001):\n if input_layer is None:\n input_layer = self.top_layer\n else:\n self.top_size = None\n name = 'batchnorm' + str(self.counts['batchnorm'])\n self.counts['batchnorm'] += 1\n\n center = True\n with tf.variable_scope(name) as scope:\n if self.use_tf_layers:\n layer_obj = normalization_layers.BatchNormalization(\n momentum=decay,\n scale=scale,\n epsilon=epsilon,\n fused=True,\n axis=_data_format_to_channel_axis[self.data_format],\n # We pass this 'scope' argument for compatibility with checkpoints\n # created with the contrib version of batch norm. tf_cnn_benchmarks\n # used to use the contrib version.\n _scope=scope,\n center=center,\n name=scope.name)\n bn = layer_obj.apply(input_layer, training=self.phase_train)\n else:\n bn = self._batch_norm_without_layers(input_layer, decay, scale, epsilon)\n self.top_layer = bn\n self.top_size = bn.shape[3] if self.data_format == 'NHWC' else bn.shape[1]\n self.top_size = int(self.top_size)\n mlperf.logger.log_batch_norm(\n input_tensor=input_layer, output_tensor=bn, momentum=decay,\n epsilon=epsilon, center=center, scale=scale, training=self.phase_train)\n return bn", "title": "" }, { "docid": "d2b4a71a3318a5309b61824ac94e41b6", "score": "0.6686015", "text": "def batch_norm(self, x, center=True, scope=None):\n #print('cnn.py bn')\n with tf.variable_scope(scope, 'bn', custom_getter=self.custom_getter) as sc:\n y = tf.layers.batch_normalization(x,\n axis=-1 if self.data_format == 'channels_last' else 1,\n momentum=0.997,\n center=center,\n scale=True,\n epsilon=1e-5,\n training=self.train_mode,\n fused=self.train_mode,\n name=sc)\n\n return y", "title": "" }, { "docid": "e4ace42aa8f34492ff273607976f5d01", "score": "0.6682334", "text": "def batch_norm(X, gamma, beta, momentum = 0.9, eps = 1e-5, scope_name = '', is_training = True, debug = False):\n global _BN_MOVING_MEANS, _BN_MOVING_VARS\n\n #########################\n # the usual batch norm transformation\n #########################\n\n if len(X.shape) not in (2, 4):\n raise ValueError('the input data shape should be one of:\\n' + 'dense: (batch size, # of features)\\n' + '2d conv: (batch size, # of features, height, width)')\n\n # dense\n if len(X.shape) == 2:\n # mini-batch mean\n mean = nd.mean(X, axis=0)\n # mini-batch variance\n variance = nd.mean((X - mean) ** 2, axis=0)\n # normalize\n if is_training:\n # while training, we normalize the data using its mean and variance\n X_hat = (X - mean) * 1.0 / nd.sqrt(variance + eps)\n else:\n # while testing, we normalize the data using the pre-computed mean and variance\n X_hat = (X - _BN_MOVING_MEANS[scope_name]) *1.0 / nd.sqrt(_BN_MOVING_VARS[scope_name] + eps)\n # scale and shift\n out = gamma * X_hat + beta\n\n # 2d conv\n elif len(X.shape) == 4:\n # extract the dimensions\n N, C, H, W = X.shape\n # mini-batch mean\n mean = nd.mean(X, axis=(0,2,3))\n # mini-batch variance\n variance = nd.mean((X - mean.reshape((1, C, 1, 1))) ** 2, axis=(0, 2, 3))\n # normalize\n X_hat = (X - mean.reshape((1, C, 1, 1))) * 1.0 / nd.sqrt(variance.reshape((1, C, 1, 1)) + eps)\n if is_training:\n # while training, we normalize the data using its mean and variance\n X_hat = (X - mean.reshape((1, C, 1, 1))) * 1.0 / nd.sqrt(variance.reshape((1, C, 1, 1)) + eps)\n else:\n # while testing, we normalize the data using the pre-computed mean and variance\n X_hat = (X - _BN_MOVING_MEANS[scope_name].reshape((1, C, 1, 1))) * 1.0 \\\n / nd.sqrt(_BN_MOVING_VARS[scope_name].reshape((1, C, 1, 1)) + eps)\n # scale and shift\n out = gamma.reshape((1, C, 1, 1)) * X_hat + beta.reshape((1, C, 1, 1))\n\n #########################\n # to keep the moving statistics\n #########################\n\n # init the attributes\n try: # to access them\n _BN_MOVING_MEANS, _BN_MOVING_VARS\n except: # error, create them\n _BN_MOVING_MEANS, _BN_MOVING_VARS = {}, {}\n\n # store the moving statistics by their scope_names, inplace\n if scope_name not in _BN_MOVING_MEANS:\n _BN_MOVING_MEANS[scope_name] = mean\n else:\n _BN_MOVING_MEANS[scope_name] = _BN_MOVING_MEANS[scope_name] * momentum + mean * (1.0 - momentum)\n if scope_name not in _BN_MOVING_VARS:\n _BN_MOVING_VARS[scope_name] = variance\n else:\n _BN_MOVING_VARS[scope_name] = _BN_MOVING_VARS[scope_name] * momentum + variance * (1.0 - momentum)\n\n #########################\n # debug info\n #########################\n if debug:\n print('== info start ==')\n print('scope_name = {}'.format(scope_name))\n print('mean = {}'.format(mean))\n print('var = {}'.format(variance))\n print('_BN_MOVING_MEANS = {}'.format(_BN_MOVING_MEANS[scope_name]))\n print('_BN_MOVING_VARS = {}'.format(_BN_MOVING_VARS[scope_name]))\n print('output = {}'.format(out))\n print('== info end ==')\n\n #########################\n # return\n #########################\n return out", "title": "" }, { "docid": "a027cf96ea0ddf5047af183aaaf5bfb3", "score": "0.66557914", "text": "def BatchNormalizationTester(map_rank=1,\n init_scale=1,\n init_bias=0,\n normalization_time_constant=5000,\n blend_time_constant=0,\n epsilon=0.00001,\n use_cntk_engine=True,\n norm_shape=(),\n init_mean=None,\n init_variance=None,\n name=''):\n # parameters bound to this Function\n scale = parameter(shape=norm_shape, init=init_scale, name='scale')\n bias = parameter(shape=norm_shape, init=init_bias, name='bias')\n run_mean = constant(shape=norm_shape, value=init_mean,\n name='aggregate_mean')\n run_variance = constant(\n shape=norm_shape, value=init_variance, name='aggregate_variance')\n run_count = constant(0, shape=(), name='aggregate_count')\n\n # expression\n def batch_normalize(x):\n return batch_normalization(x, scale, bias, run_mean, run_variance, running_count=run_count,\n spatial=map_rank == 1, normalization_time_constant=normalization_time_constant,\n blend_time_constant=blend_time_constant, epsilon=epsilon,\n use_cudnn_engine=not use_cntk_engine)\n\n return batch_normalize", "title": "" }, { "docid": "faa4819c19828f52e3056582c390e7ef", "score": "0.6624344", "text": "def batch_normalization(x, phase_train, out_size):\n with tf.variable_scope('bn'):\n beta = tf.Variable(tf.constant(0.0, shape=[out_size]),\n name='beta', trainable=True)\n gamma = tf.Variable(tf.constant(1.0, shape=[out_size]),\n name='gamma', trainable=True)\n batch_mean, batch_var = tf.nn.moments(x, [0], name='moments')\n ema = tf.train.ExponentialMovingAverage(decay=0.5)\n\n def mean_var_with_update():\n ema_apply_op = ema.apply([batch_mean, batch_var])\n with tf.control_dependencies([ema_apply_op]):\n return tf.identity(batch_mean), tf.identity(batch_var)\n\n mean, var = tf.cond(phase_train,\n mean_var_with_update,\n lambda: (ema.average(batch_mean), ema.average(batch_var)))\n normed = tf.nn.batch_normalization(x, mean, var, beta, gamma, 1e-3)\n return normed", "title": "" }, { "docid": "ebfc1936be360ae0f1fe54fe5f65069a", "score": "0.65854925", "text": "def batch_normalization(\n inputs: remote_blob_util.BlobDef,\n axis: int = -1,\n momentum: float = 0.99,\n epsilon: float = 0.001,\n center: bool = True,\n scale: bool = True,\n beta_initializer: Optional[op_conf_util.InitializerConf] = None,\n gamma_initializer: Optional[op_conf_util.InitializerConf] = None,\n beta_regularizer: Optional[op_conf_util.RegularizerConf] = None,\n gamma_regularizer: Optional[op_conf_util.RegularizerConf] = None,\n moving_mean_initializer: Optional[op_conf_util.InitializerConf] = None,\n moving_variance_initializer: Optional[op_conf_util.InitializerConf] = None,\n trainable: bool = True,\n training: bool = True,\n name: str = \"BatchNorm\",\n) -> remote_blob_util.BlobDef:\n if axis < 0:\n axis += len(inputs.shape)\n assert axis >= 0 and axis < len(inputs.shape)\n\n params_shape = [inputs.shape[axis]]\n # Float32 required to avoid precision-loss when using fp16 input/output\n params_dtype = flow.float32 if inputs.dtype == flow.float16 else inputs.dtype\n\n if not flow.current_global_function_desc().IsTrainable() or not trainable:\n training = False\n\n beta, gamma, moving_mean, moving_variance = _get_batch_normalization_variables(\n name,\n center,\n scale,\n params_shape,\n params_dtype,\n trainable,\n beta_initializer,\n beta_regularizer,\n gamma_initializer,\n gamma_regularizer,\n moving_mean_initializer,\n moving_variance_initializer,\n )\n\n if flow.current_scope().device_parallel_desc_symbol.device_tag == \"cpu\":\n if training:\n reduce_axis = []\n for dim in range(len(inputs.shape)):\n if dim != axis:\n reduce_axis.append(dim)\n mean, variance = flow.nn.moments(inputs, reduce_axis, keepdims=False)\n\n def update_moving(moving, this_batch):\n moving_identity = flow.identity(moving)\n flow.assign(\n moving, momentum * moving_identity + (1 - momentum) * this_batch\n )\n\n update_moving(moving_mean, mean)\n update_moving(moving_variance, variance)\n\n return flow.nn.batch_normalization(\n x=inputs,\n mean=mean,\n variance=variance,\n offset=beta,\n scale=gamma,\n variance_epsilon=epsilon,\n axis=axis,\n name=name,\n )\n else:\n mean = moving_mean\n variance = moving_variance\n return flow.nn.batch_normalization(\n x=inputs,\n mean=mean,\n variance=variance,\n offset=beta,\n scale=gamma,\n variance_epsilon=epsilon,\n axis=axis,\n name=name,\n )\n else:\n builder = (\n flow.user_op_builder(name)\n .Op(\"normalization\")\n .Input(\"x\", [inputs])\n .Input(\"moving_mean\", [moving_mean])\n .Input(\"moving_variance\", [moving_variance])\n .Input(\"gamma\", [gamma])\n .Input(\"beta\", [beta])\n .Output(\"y\")\n .Attr(\"axis\", axis)\n .Attr(\"epsilon\", epsilon)\n .Attr(\"training\", training)\n .Attr(\"momentum\", momentum)\n )\n if trainable and training:\n builder = builder.Output(\"mean\").Output(\"inv_variance\")\n\n return builder.Build().InferAndTryRun().RemoteBlobList()[0]", "title": "" }, { "docid": "c25bda49e5e4a764a1fafcdf26d95022", "score": "0.6574302", "text": "def _batch_norm(self, x, phase_train, beta, gamma):\n with tf.variable_scope('bn'):\n# beta = tf.Variable(tf.constant(0.0, shape=[n_out]),\n# name='beta', trainable=True)\n# gamma = tf.Variable(tf.constant(1.0, shape=[n_out]),\n# name='gamma', trainable=True)\n batch_mean, batch_var = tf.nn.moments(x, [0,1], name='moments')\n ema = tf.train.ExponentialMovingAverage(decay=0.5)\n \n def mean_var_with_update():\n ema_apply_op = ema.apply([batch_mean, batch_var])\n with tf.control_dependencies([ema_apply_op]):\n return tf.identity(batch_mean), tf.identity(batch_var)\n \n mean, var = tf.cond(phase_train,\n mean_var_with_update,\n lambda: (ema.average(batch_mean), ema.average(batch_var)))\n normed = tf.nn.batch_normalization(x, mean, var, beta, gamma, 1e-3)\n return normed", "title": "" }, { "docid": "f5de305675fe21b0996c8e66681d2f73", "score": "0.6563682", "text": "def batchnorm_forward(x, gamma, beta, bn_param):\n mode = bn_param['mode']\n eps = bn_param.get('eps', 1e-5)\n momentum = bn_param.get('momentum', 0.9)\n\n N, D = x.shape\n running_mean = bn_param.get('running_mean', np.zeros(D, dtype=x.dtype))\n running_var = bn_param.get('running_var', np.zeros(D, dtype=x.dtype))\n\n out, cache = None, None\n if mode == 'train':\n #############################################################################\n # TODO: Implement the training-time forward pass for batch normalization. #\n # Use minibatch statistics to compute the mean and variance, use these #\n # statistics to normalize the incoming data, and scale and shift the #\n # normalized data using gamma and beta. #\n # #\n # You should store the output in the variable out. Any intermediates that #\n # you need for the backward pass should be stored in the cache variable. #\n # #\n # You should also use your computed sample mean and variance together with #\n # the momentum variable to update the running mean and running variance, #\n # storing your result in the running_mean and running_var variables. #\n #############################################################################\n pass\n #print('x shape ', x.shape)\n\t\n sample_mean = x.mean(axis=0); \n #print('sample_mean shape ', sample_mean.shape)\n xu = x-sample_mean\n #print('xu shape ', xu.shape)\n xu_squared = xu**2\n\t\n sample_variance = xu_squared.mean(axis=0)\n #print('sample_variance shape = ', sample_variance.shape)\n #print('sample_variance = ', sample_variance)\n\t\n num = xu; \n eps_array = -1*eps*np.ones(sample_variance.shape)\n var_eps = sample_variance+eps_array\n #print('var_eps shape = ', var_eps.shape)\n #print('var_eps = ', var_eps)\n\t\n sqrt_var_eps = (var_eps)**(1/2)\n\t\n norm_data = np.divide(num,sqrt_var_eps)\n #print('norm_data shape = ', norm_data.shape)\n #print('norm_data = ', norm_data)\n gamma_norm = gamma*norm_data\n #print('gamma norm = ', gamma_norm)\n y_data = gamma_norm + beta*np.ones(gamma_norm.shape)\n #print('y_data shape = ', y_data.shape)\n\t\n\t\n running_mean = momentum * running_mean + (1 - momentum) * sample_mean\n #print('running_mean shape = ', running_mean.shape)\n running_var = momentum * running_var + (1 - momentum) * sample_variance\n #print('running_var shape =' , running_var.shape)\n # running_mean = np.sum(np.multiply(momentum, running_mean), np.multiply(np.sum(1,np.multiply(-1,momentum)), sample_mean))\n # running_variance = np.sum(np.multiply(momentum, running_variance), np.multiply(np.sum(1,np.multiply(-1,momentum)), sample_variance))\n\t\n out = y_data\n cache = (norm_data,xu,sqrt_var_eps,gamma,beta)\t\n\n #############################################################################\n # END OF YOUR CODE #\n #############################################################################\n\n elif mode == 'test':\n #############################################################################\n # TODO: Implement the test-time forward pass for batch normalization. Use #\n # the running mean and variance to normalize the incoming data, then scale #\n # and shift the normalized data using gamma and beta. Store the result in #\n # the out variable. #\n #############################################################################\n pass\n\t\n #print('x shape ', x.shape)\n\t\n\n xu = x-running_mean\n #print('xushape ', xu.shape)\n\t\n num = xu; \n eps_array = -1*eps*np.ones(running_var.shape)\n var_eps = running_var+eps_array\n #print('var_eps shape = ', var_eps.shape)\n # print('var_eps = ', var_eps)\n\t\n sqrt_var_eps = (var_eps)**(1/2)\n\t\n norm_data = np.divide(num,sqrt_var_eps)\n #print('norm_data shape = ', norm_data.shape)\n\t\n y_data = gamma*norm_data + beta*np.ones(norm_data.shape)\n #print('y_data shape = ', y_data.shape)\n\t\n out = y_data \n cache = (norm_data,xu, sqrt_var_eps, gamma, beta)\n\t\n\t\n\t\n\t\n #############################################################################\n # END OF YOUR CODE #\n #############################################################################\n else:\n raise ValueError('Invalid forward batchnorm mode \"%s\"' % mode)\n\n # Store the updated running means back into bn_param\n bn_param['running_mean'] = running_mean\n bn_param['running_var'] = running_var\n\n return out, cache", "title": "" }, { "docid": "0152dc2eec476cadad70254980faea6e", "score": "0.6534268", "text": "def batch_norm(\n inp: Tensor,\n running_mean: Tensor = None,\n running_var: Tensor = None,\n weight: Optional[Tensor] = None,\n bias: Optional[Tensor] = None,\n *,\n training: bool = False,\n momentum: float = 0.9,\n eps: float = 1e-5,\n inplace: bool = True,\n):\n\n def make_full_if_none(x, value):\n x_ndim = None if x is None else x.ndim\n # in general case, x will be returned here directly\n if x_ndim is not None and x_ndim != 1:\n return x\n\n C = inp.shape[1]\n pshape = (1, C, 1, 1)\n\n if x is None:\n x = Const(value, inp.dtype, inp.device)\n shape = astensor1d(pshape, inp, dtype=\"int32\", device=inp.device)\n (result,) = apply(builtin.Broadcast(), x, shape)\n result.format = inp.format\n return result\n else:\n assert x_ndim == 1\n shape = astensor1d(pshape, inp, dtype=\"int32\", device=inp.device)\n (result,) = apply(builtin.Reshape(), x, shape)\n return result\n\n has_mean = running_mean is not None\n has_var = running_var is not None\n\n if not training:\n assert has_mean, \"running_mean must be provided in inference mode\"\n assert has_var, \"running_var must be provided in inference mode\"\n\n weight = make_full_if_none(weight, 1)\n bias = make_full_if_none(bias, 0)\n\n if not training:\n op = builtin.BatchNorm(\n fwd_mode=BatchNorm.FwdMode.INFERENCE, epsilon=eps, param_dim=\"dim_1c11\"\n )\n ret = apply(op, inp, weight, bias, running_mean, running_var)[-1]\n return ret\n\n else:\n op = builtin.BatchNorm(\n avg_factor=1 - momentum, epsilon=eps, param_dim=\"dim_1c11\"\n )\n if has_mean or has_var:\n running_mean = make_full_if_none(running_mean, 0)\n running_var = make_full_if_none(running_var, 1)\n new_mean, new_var, *_, inp = apply(\n op, inp, weight, bias, running_mean, running_var\n )\n if not has_mean:\n new_mean = None\n if not has_var:\n new_var = None\n\n if inplace:\n if has_mean:\n running_mean[...] = new_mean\n if has_var:\n running_var[...] = new_var\n\n return inp\n else:\n return inp, new_mean, new_var\n else:\n inp = apply(op, inp, weight, bias)[-1]\n return inp", "title": "" }, { "docid": "45ffbb03dd9f1048b867047be469f791", "score": "0.65280694", "text": "def batch_normalization_wrapper(input_tensor, **kwargs):\n training=False # This has been hardcoded for evaluation code - change for training\n return tf.layers.batch_normalization(input_tensor, training=training, **kwargs)", "title": "" }, { "docid": "014d0f5b44e4792185e3778d4042a58d", "score": "0.64980555", "text": "def _normalize(self, batch_data):\n return (batch_data - np.mean(batch_data)) / np.std(batch_data)", "title": "" }, { "docid": "b780751dcb4272778e3da0d1d5ddcaef", "score": "0.649763", "text": "def Normalize(cfg, name, inputs, labels=None, is_training=True):\n if (not cfg.CONDITIONAL) or cfg.LAYER_COND:\n labels = None\n if cfg.CONDITIONAL and cfg.ACGAN and ('Discriminator' in name):\n labels = None\n\n if ('Discriminator' in name) and cfg.NORMALIZATION_D:\n if labels is not None:\n # todo: fix (does not work)\n # return lib.ops.layernorm.Layernorm_cond(name,[1,2,3],inputs,labels=labels,n_labels=N_LABELS)\n return lib.ops.cond_batchnorm.Batchnorm(name, [0, 2, 3], inputs, labels=labels, n_labels=cfg.N_LABELS)\n elif cfg.MODE == 'wgan-gp':\n return lib.ops.layernorm.Layernorm(name,[1,2,3],inputs)\n else:\n return tf.layers.batch_normalization(inputs, axis=1, training=is_training, fused=True)\n\n elif ('Generator' in name) and cfg.NORMALIZATION_G:\n if labels is not None:\n return lib.ops.cond_batchnorm.Batchnorm(name, [0,2,3], inputs,labels=labels, n_labels=cfg.N_LABELS)\n else:\n # return lib.ops.batchnorm.Batchnorm(name,[0,2,3], inputs, fused=True,\n # is_training=is_training, stats_iter=stats_iter,\n # update_moving_stats=update_moving_stats)\n return tf.layers.batch_normalization(inputs, axis=1, training=is_training, fused=True)\n else:\n return inputs", "title": "" }, { "docid": "e89c1ef8ee8b47b73818c4f2270b70bf", "score": "0.6490333", "text": "def batch_normalization(*bricks):\n bn = find_bricks(bricks, lambda b: isinstance(b, BatchNormalization))\n # Can't use either nested() (deprecated) nor ExitStack (not available\n # on Python 2.7). Well, that sucks.\n try:\n for brick in bn:\n brick.__enter__()\n yield\n finally:\n for brick in bn[::-1]:\n brick.__exit__()", "title": "" }, { "docid": "57eb96e6ead3bb8a844fb0b78149d88f", "score": "0.64823306", "text": "def batch_norm(x):\r\n epsilon = 1e-3\r\n batch_mean, batch_var = tf.nn.moments(x, [0])\r\n x = tf.nn.batch_normalization(x,\r\n mean=batch_mean,\r\n variance=batch_var,\r\n offset=None,\r\n scale=None,\r\n variance_epsilon=epsilon)\r\n\r\n return x", "title": "" }, { "docid": "a51bf2d2227bf62fba4b30e0dbd1a459", "score": "0.64763194", "text": "def normalize(args):\n # Arguments passed by multi-processing wrapper\n ind, dshape, inputs = args\n\n # Function inputs\n data = mp.tonumpyarray(mp.shared_arr, dshape) # shared-array\n data_white, data_dark, cutoff, negvals = inputs\n\n # Avoid zero division in normalization\n denominator = data_white - data_dark\n denominator[denominator == 0] = 1\n\n for m in ind:\n proj = data[m, :, :]\n proj = np.divide(proj - data_dark, denominator)\n proj[proj <= 0] = negvals\n if cutoff is not None:\n proj[proj > cutoff] = cutoff\n data[m, :, :] = proj", "title": "" }, { "docid": "180c267453fd8bb03be8053d7094b364", "score": "0.64601105", "text": "def batch_norm_template(inputs, is_training, scope, moments_dims_unused, bn_decay, data_format='NHWC'):\n bn_decay = bn_decay if bn_decay is not None else 0.9\n return tf.contrib.layers.batch_norm(inputs, \n center=True, scale=True,\n is_training=is_training, decay=bn_decay,updates_collections=None,\n scope=scope,\n data_format=data_format)", "title": "" }, { "docid": "1439a8cbb1ecbe573e957e2a056a9c09", "score": "0.64583087", "text": "def batch_feature_normalize(batch, mean_norm: bool=True, std_norm: bool=True):\n ids = [item['utt_id'] for item in batch]\n lengths = np.asarray([item['feat'].shape[1] for item in batch])\n feats = list(\n map(lambda x: pad_right_2d(x, lengths.max()),\n [item['feat'] for item in batch]))\n feats = np.stack(feats)\n\n # Features normalization if needed\n for i in range(len(feats)):\n feat = feats[i][:, :lengths[i]] # Excluding pad values.\n mean = feat.mean(axis=-1, keepdims=True) if mean_norm else 0\n std = feat.std(axis=-1, keepdims=True) if std_norm else 1\n feats[i][:, :lengths[i]] = (feat - mean) / std\n assert feats[i][:, lengths[\n i]:].sum() == 0 # Padding valus should all be 0.\n\n # Converts into ratios.\n # the utterance of the max length doesn't need to padding\n # the remaining utterances need to padding and all of them will be padded to max length\n # we convert the original length of each utterance to the ratio of the max length\n lengths = (lengths / lengths.max()).astype(np.float32)\n\n return {'ids': ids, 'feats': feats, 'lengths': lengths}", "title": "" }, { "docid": "ebd545db38125463d476d238d8a9f1ae", "score": "0.64499485", "text": "def Normalize(name, inputs,labels=None):\r\n if not CONDITIONAL:\r\n labels = None\r\n if CONDITIONAL and ACGAN and ('Discriminator' in name):\r\n labels = None\r\n\r\n if ('Discriminator' in name) and NORMALIZATION_D:\r\n return lib.ops.layernorm.Layernorm(name,[1,2,3],inputs,labels=labels,n_labels=10)\r\n elif ('Generator' in name) and NORMALIZATION_G:\r\n if labels is not None:\r\n return lib.ops.cond_batchnorm.Batchnorm(name,[0,2,3],inputs,labels=labels,n_labels=10)\r\n else:\r\n return lib.ops.batchnorm.Batchnorm(name,[0,2,3],inputs,fused=True)\r\n else:\r\n return inputs", "title": "" }, { "docid": "30209df597941a2d35bdc524d498552a", "score": "0.6442227", "text": "def batch_norm(\n op_name: str,\n input_layer: XLayer,\n mean_layer: XLayer,\n variance_layer: XLayer,\n gamma_layer: XLayer,\n beta_layer: XLayer,\n axis: int,\n epsilon: float = 1e-5,\n **kwargs\n) -> XLayer:\n\n bottoms = [input_layer.name]\n attrs = kwargs\n attrs.update(\n {\n \"epsilon\": epsilon,\n \"axis\": axis,\n }\n )\n mean, variance = mean_layer.data[0], variance_layer.data[0]\n gamma, beta = gamma_layer.data[0], beta_layer.data[0]\n assert mean.shape == variance.shape\n\n bn_data = BatchData(mu=mean, sigma_square=variance, gamma=gamma, beta=beta)\n\n X = XLayer()\n X = X._replace(\n name=op_name,\n type=[\"BatchNorm\"],\n shapes=input_layer.shapes[:],\n sizes=input_layer.sizes[:],\n data=bn_data,\n layer=[op_name],\n tops=[],\n bottoms=bottoms,\n attrs=attrs,\n targets=[],\n )\n\n return X", "title": "" }, { "docid": "9aadf8de6dfa8104a92cd2fa14f95656", "score": "0.6442025", "text": "def batchnorm_forward(x, gamma, beta, bn_param):\n mode = bn_param['mode']\n eps = bn_param.get('eps', 1e-5)\n momentum = bn_param.get('momentum', 0.9)\n\n N, D = x.shape\n running_mean = bn_param.get('running_mean', np.zeros(D, dtype=x.dtype))\n running_var = bn_param.get('running_var', np.zeros(D, dtype=x.dtype))\n\n out, cache = None, None\n if mode == 'train':\n #######################################################################\n # TODO: Implement the training-time forward pass for batch norm. #\n # Use minibatch statistics to compute the mean and variance, use #\n # these statistics to normalize the incoming data, and scale and #\n # shift the normalized data using gamma and beta. #\n # #\n # You should store the output in the variable out. Any intermediates #\n # that you need for the backward pass should be stored in the cache #\n # variable. #\n # #\n # You should also use your computed sample mean and variance together #\n # with the momentum variable to update the running mean and running #\n # variance, storing your result in the running_mean and running_var #\n # variables. #\n #######################################################################\n\n #Compute the emperical mean and variance independently for each dimension\n mu = 1./N * np.sum(x, axis = 0) # x (N, D) -> mu (D,)\n xmu = x - mu #xmu (N, D)\n\n sq = xmu ** 2 #sq (N, D)\n var = 1./N * np.sum(sq, axis = 0) # var (D, )\n sqrtvar = np.sqrt(var + eps) # sqrtvar (D, )\n ivar = 1./sqrtvar #ivar (D, )\n\n xhat = xmu * ivar #xhat (N, D)\n gammax = gamma * xhat #gammax (N, D)\n out = gammax + beta #out (N, D)\n\n running_mean = momentum * running_mean + (1.0 - momentum) * mu\n running_var = momentum * running_var + (1.0 - momentum) * var\n\n cache = (mu, xmu, sq, var, sqrtvar, ivar,\n xhat, gammax, out, gamma, beta, x, bn_param)\n\n #######################################################################\n # END OF YOUR CODE #\n #######################################################################\n elif mode == 'test':\n #######################################################################\n # TODO: Implement the test-time forward pass for batch normalization. #\n # Use the running mean and variance to normalize the incoming data, #\n # then scale and shift the normalized data using gamma and beta. #\n # Store the result in the out variable. #\n #######################################################################\n mu = running_mean\n var = running_var\n xhat = (x - mu) / np.sqrt(var + eps)\n out = gamma * xhat + beta\n cache = (mu, var, gamma, beta, bn_param)\n #######################################################################\n # END OF YOUR CODE #\n #######################################################################\n else:\n raise ValueError('Invalid forward batchnorm mode \"%s\"' % mode)\n\n # Store the updated running means back into bn_param\n bn_param['running_mean'] = running_mean\n bn_param['running_var'] = running_var\n\n return out, cache", "title": "" }, { "docid": "9cd47cc18a9843f9ac4dc83574945511", "score": "0.6427689", "text": "def batch_norm(x, n_out, is_training=True):\n with tf.variable_scope('batch_norm'):\n beta = tf.Variable(tf.constant(0.0, shape=[n_out]),\n name='beta', trainable=True)\n gamma = tf.Variable(tf.constant(1.0, shape=[n_out]),\n name='gamma', trainable=True)\n batch_mean, batch_var = tf.nn.moments(x, [0, 1, 2], name='moments')\n ema = tf.train.ExponentialMovingAverage(decay=0.5)\n\n def mean_var_with_update():\n ema_apply_op = ema.apply([batch_mean, batch_var])\n with tf.control_dependencies([ema_apply_op]):\n return tf.identity(batch_mean), tf.identity(batch_var)\n\n mean, var = tf.cond(tf.cast(is_training, tf.bool),\n mean_var_with_update,\n lambda: (ema.average(batch_mean), ema.average(batch_var)))\n normed = tf.nn.batch_normalization(x, mean, var, beta, gamma, 1e-3)\n return tf.nn.relu(normed)", "title": "" }, { "docid": "008976d8bfaaa29588ed824fdec0075a", "score": "0.63893485", "text": "def batch_norm(x, name_scope, training, epsilon=1e-3, decay=0.999):\n\n with tf.variable_scope(name_scope):\n size = x.get_shape().as_list()[1]\n\n scale = tf.get_variable(\n 'scale', [size], initializer=tf.constant_initializer(0.1))\n offset = tf.get_variable('offset', [size])\n\n pop_mean = tf.get_variable(\n 'pop_mean', [size], initializer=tf.zeros_initializer(), trainable=False)\n pop_var = tf.get_variable(\n 'pop_var', [size], initializer=tf.ones_initializer(), trainable=False)\n batch_mean, batch_var = tf.nn.moments(x, [0])\n\n train_mean_op = tf.assign(pop_mean,\n pop_mean * decay + batch_mean * (1 - decay))\n train_var_op = tf.assign(pop_var, pop_var * decay + batch_var * (1 - decay))\n\n def batch_statistics():\n with tf.control_dependencies([train_mean_op, train_var_op]):\n return tf.nn.batch_normalization(x, batch_mean, batch_var, offset,\n scale, epsilon)\n\n def population_statistics():\n return tf.nn.batch_normalization(x, pop_mean, pop_var, offset, scale,\n epsilon)\n\n return tf.cond(training, batch_statistics, population_statistics)", "title": "" }, { "docid": "dd930469c750ce4fab4bd5ec6a5f89cc", "score": "0.63711804", "text": "def batch_norm(inputs, *args, tag=None, add_summary=True, step=0, **kwargs):\n if step > 0 and 'updates_collections' not in kwargs:\n kwargs['updates_collections'] = 'dump'\n output = layers.batch_norm(inputs, *args, **kwargs)\n if add_summary:\n if tag is None:\n tag = inputs.op.name.split('/')[-1]\n tag = 'batch_norm/' + tag\n tf.summary.histogram(tag, inputs)\n tf.summary.histogram(tag + '_bn', output)\n return output", "title": "" }, { "docid": "60d73b7a20b69ce85a6131bfcd65795b", "score": "0.6366017", "text": "def standardize_batch(self, batch):\r\n with tf.name_scope('standardization'):\r\n batch_norm = tf.layers.batch_normalization(batch, axis=1, scale=False, training=True)\r\n return tf.clip_by_value(batch_norm, clip_value_min=-1.0, clip_value_max=1.0)", "title": "" }, { "docid": "f8b97010667d0607924ea4452ff7513f", "score": "0.6359989", "text": "def instance_norm(inputs,\n center=True,\n scale=True,\n epsilon=1e-6,\n activation_fn=None,\n param_initializers=None,\n reuse=None,\n variables_collections=None,\n outputs_collections=None,\n trainable=True,\n data_format=DATA_FORMAT_NHWC,\n scope=None):\n inputs = ops.convert_to_tensor(inputs)\n inputs_shape = inputs.shape\n inputs_rank = inputs.shape.ndims\n\n if inputs_rank is None:\n raise ValueError('Inputs %s has undefined rank.' % inputs.name)\n if data_format not in (DATA_FORMAT_NCHW, DATA_FORMAT_NHWC):\n raise ValueError('data_format has to be either NCHW or NHWC.')\n\n with variable_scope.variable_scope(\n scope, 'InstanceNorm', [inputs], reuse=reuse) as sc:\n if data_format == DATA_FORMAT_NCHW:\n reduction_axis = 1\n # For NCHW format, rather than relying on implicit broadcasting, we\n # explicitly reshape the params to params_shape_broadcast when computing\n # the moments and the batch normalization.\n params_shape_broadcast = list(\n [1, inputs_shape[1].value] + [1 for _ in range(2, inputs_rank)])\n else:\n reduction_axis = inputs_rank - 1\n params_shape_broadcast = None\n moments_axes = list(range(inputs_rank))\n del moments_axes[reduction_axis]\n del moments_axes[0]\n params_shape = inputs_shape[reduction_axis:reduction_axis + 1]\n if not params_shape.is_fully_defined():\n raise ValueError('Inputs %s has undefined channels dimension %s.' % (\n inputs.name, params_shape))\n\n # Allocate parameters for the beta and gamma of the normalization.\n beta, gamma = None, None\n dtype = inputs.dtype.base_dtype\n if param_initializers is None:\n param_initializers = {}\n if center:\n beta_collections = utils.get_variable_collections(\n variables_collections, 'beta')\n beta_initializer = param_initializers.get(\n 'beta', init_ops.zeros_initializer())\n beta = variables.model_variable('beta',\n shape=params_shape,\n dtype=dtype,\n initializer=beta_initializer,\n collections=beta_collections,\n trainable=trainable)\n if params_shape_broadcast:\n beta = array_ops.reshape(beta, params_shape_broadcast)\n if scale:\n gamma_collections = utils.get_variable_collections(\n variables_collections, 'gamma')\n gamma_initializer = param_initializers.get(\n 'gamma', init_ops.ones_initializer())\n gamma = variables.model_variable('gamma',\n shape=params_shape,\n dtype=dtype,\n initializer=gamma_initializer,\n collections=gamma_collections,\n trainable=trainable)\n if params_shape_broadcast:\n gamma = array_ops.reshape(gamma, params_shape_broadcast)\n\n # Calculate the moments (instance activations).\n mean, variance = nn.moments(inputs, moments_axes, keep_dims=True)\n\n # Compute instance normalization.\n outputs = nn.batch_normalization(\n inputs, mean, variance, beta, gamma, epsilon, name='instancenorm')\n if activation_fn is not None:\n outputs = activation_fn(outputs)\n return utils.collect_named_outputs(outputs_collections, sc.name, outputs)", "title": "" }, { "docid": "9a450f08e21a94a17fca86e918d2c320", "score": "0.6356178", "text": "def fold_batch_norm(conv_layer, bn_layer):\n dddprint(\"Folding bn \"+bn_layer.__class__.__name__+\":\\n\"+pretty(bn_layer.get_config())+\"\\ninto conv \"+conv_layer.__class__.__name__+\":i\\n\"+pretty(conv_layer.get_config())+\"\\n\")\n ddprint(\"Folding batchnorm layer'\"+bn_layer.name+\"' into conv '\"+conv_layer.name+\"', class \"+conv_layer.__class__.__name__+\"'\")\n\n conv_weights = None\n sep_weights = None\n if type(conv_layer) in [SeparableConv2D]:\n # SeparableConv2D has two sets of weights, one for the separable conv, one for the pointwise\n # we will fold the batchnorm layer into the pointwise weights\n sep_weights = conv_layer.get_weights()[0]\n conv_weights = conv_layer.get_weights()[1]\n ddprint(\"sep_weights.shape=\"+str(np.shape(sep_weights)))\n ddprint(\"conv_weights.shape=\"+str(np.shape(conv_weights)))\n else:\n conv_weights = conv_layer.get_weights()[0]\n ddprint(\"conv_weights.shape=\"+str(np.shape(conv_weights)))\n\n # Keras stores the learnable weights for a BatchNormalization layer\n # as four separate arrays:\n # 0 = gamma (if scale == True)\n # 1 = beta (if center == True)\n # 2 = moving mean\n # 3 = moving variance\n bn_weights = bn_layer.get_weights()\n next_index = 0\n if bn_layer.scale:\n gamma = bn_weights[next_index]\n ddprint(\"batch_norm has scale (gamma), shape \"+str(gamma.shape))\n next_index += 1\n else:\n gamma = 1.0\n if bn_layer.center:\n beta = bn_weights[next_index]\n ddprint(\"batch_norm has center (beta) \"+str(beta.shape))\n next_index += 1\n else:\n beta = 0\n\n mean = bn_weights[next_index]\n dprint(\"batch_norm mean shape \"+str(mean.shape))\n variance = bn_weights[next_index + 1]\n ddprint(\"batch_norm variance shape \"+str(variance.shape))\n\n ddprint(\"bn_weights.shape=\"+str(np.shape(bn_weights)))\n \n epsilon = float(bn_layer.get_config()['epsilon'])\n new_weights = None\n if type(conv_layer) in [Conv2D, SeparableConv2D]:\n new_weights = conv_weights * gamma / np.sqrt(variance + epsilon)\n elif type(conv_layer) in [DepthwiseConv2D]:\n new_weights = (conv_weights.transpose(0,1,3,2) * gamma / np.sqrt(variance + epsilon)).transpose(0,1,3,2)\n else:\n raise RuntimeError(\"Unknown conv layer\")\n new_bias = beta - mean * gamma / np.sqrt(variance + epsilon)\n ddprint(\"new_weights.shape=\"+str(np.shape(new_weights)))\n ddprint(\"new_bias.shape=\"+str(np.shape(new_bias)))\n if sep_weights is None:\n return new_weights, new_bias\n else:\n return sep_weights, new_weights, new_bias", "title": "" }, { "docid": "34dd5c7d49a6d03da41327ad4afd4154", "score": "0.6354409", "text": "def batch_norm_for_conv1d(inputs, is_training, bn_decay, scope, data_format):\n return batch_norm_template(inputs, is_training, scope, [0,1], bn_decay, data_format)", "title": "" }, { "docid": "059bdee8bf8474644f117ef15ae4e029", "score": "0.6337776", "text": "def batch_norm_with_global_normalization_v2(input,\n mean,\n variance,\n beta,\n gamma,\n variance_epsilon,\n scale_after_normalization,\n name=None):\n return batch_norm_with_global_normalization(t=input,\n m=mean,\n v=variance,\n beta=beta,\n gamma=gamma,\n variance_epsilon=variance_epsilon,\n scale_after_normalization=scale_after_normalization,\n name=name)", "title": "" }, { "docid": "fcedd8cce7440ff385e2216c0ee3a81b", "score": "0.6316552", "text": "def batch_norm_with_global_normalization(t=None,\n m=None,\n v=None,\n beta=None,\n gamma=None,\n variance_epsilon=None,\n scale_after_normalization=None,\n name=None,\n input=None, # pylint: disable=redefined-builtin\n mean=None,\n variance=None):\n t = deprecated_argument_lookup(\"input\", input, \"t\", t)\n m = deprecated_argument_lookup(\"mean\", mean, \"m\", m)\n v = deprecated_argument_lookup(\"variance\", variance, \"v\", v)\n return batch_normalization(t, m, v, beta, gamma if scale_after_normalization\n else None, variance_epsilon, name)", "title": "" }, { "docid": "88798724b24fa2a17b747ba2517154b3", "score": "0.63148195", "text": "def batch_norm(x, name_scope, training, epsilon=1e-3, decay=0.999):\n\n with tf.variable_scope(name_scope):\n size = x.get_shape().as_list()[1]\n\n scale = tf.get_variable('scale', [size], initializer=tf.constant_initializer(0.1))\n offset = tf.get_variable('offset', [size]) # Should this be a constant zero initializer?\n\n pop_mean = tf.get_variable('pop_mean', [size], initializer=tf.zeros_initializer, trainable=False)\n pop_var = tf.get_variable('pop_var', [size], initializer=tf.ones_initializer, trainable=False)\n batch_mean, batch_var = tf.nn.moments(x, [0])\n\n train_mean_op = tf.assign(pop_mean, pop_mean * decay + batch_mean * (1 - decay))\n train_var_op = tf.assign(pop_var, pop_var * decay + batch_var * (1 - decay))\n\n def batch_statistics():\n with tf.control_dependencies([train_mean_op, train_var_op]):\n return tf.nn.batch_normalization(x, batch_mean, batch_var, offset, scale, epsilon)\n\n def population_statistics():\n return tf.nn.batch_normalization(x, pop_mean, pop_var, offset, scale, epsilon)\n\n return tf.cond(training, batch_statistics, population_statistics)", "title": "" }, { "docid": "2efe8d6c446f79dc76cd9a4b3f069b67", "score": "0.63145334", "text": "def testKerasBatchNorm(self):\n inputs = keras.layers.Input(shape=(128, 128, 1))\n batch_norm = keras.layers.BatchNormalization()(inputs)\n model = keras.models.Model(inputs, batch_norm, name=\"test\")\n model.compile(\n optimizer=\"adam\", loss=\"categorical_crossentropy\", metrics=[\"accuracy\"])\n tensor_names = [tensor.name for tensor in model.inputs + model.outputs]\n\n # Freeze the graph.\n sess = keras.backend.get_session()\n variable_graph_def = sess.graph_def\n variable_graph_def = self._inline_functions(variable_graph_def,\n tensor_names)\n output_tensor = self._get_tensor_names(model.outputs)\n constant_graph_def = graph_util.convert_variables_to_constants(\n sess, variable_graph_def, output_tensor)\n\n # Validate converted graph.\n input_data = np.array(\n np.random.random_sample([1, 128, 128, 1]), dtype=np.int32)\n self._ensure_no_variables_in_graph(constant_graph_def)\n self._test_converted_keras_model(model, constant_graph_def, input_data)", "title": "" }, { "docid": "311a4b0c9cd6bba7d4ea763cea2afa46", "score": "0.630336", "text": "def spatial_batchnorm_forward(x, gamma, beta, bn_param):\n out, cache = None, None\n\n #############################################################################\n # TODO: Implement the forward pass for spatial batch normalization. #\n # #\n # HINT: You can implement spatial batch normalization using the vanilla #\n # version of batch normalization defined above. Your implementation should #\n # be very short; ours is less than five lines. #\n #############################################################################\n pass\n \n #print('x = ', x)\n #print('x shape = ', x.shape)\n \n N = x.shape[0]\n C = x.shape[1]\n H = x.shape[2]\n W = x.shape[3]\n \n\n #print('gamma = ', gamma)\n #print('gamma shape = ', gamma.shape)\n \n #print('beta = ', beta)\n #print('beta shape = ', beta.shape)\n \n x_trans = np.transpose(x , (0,2,3,1))\n #print('x_trans shape = ', x_trans.shape)\n x_reshape = np.reshape(x_trans, [N*H*W,C])\n \n y, cache = batchnorm_forward(x_reshape,gamma, beta, bn_param)\n \n y_reshape = np.reshape(y, [N, H, W, C])\n out = np.transpose(y_reshape, (0,3,1,2))\n \n #############################################################################\n # END OF YOUR CODE #\n #############################################################################\n\n return out, cache", "title": "" }, { "docid": "63731fccdb1342e9c0ce46a433bf76f0", "score": "0.62993693", "text": "def batch_norm(x, n_out, phase_train, scope='bn'):\n #print(\"BNORM IN: \", x)\n with tf.variable_scope(scope):\n beta = tf.Variable(tf.constant(0.0, shape=[n_out]),\n name='beta', trainable=True)\n gamma = tf.Variable(tf.constant(1.0, shape=[n_out]),\n name='gamma', trainable=True)\n batch_mean, batch_var = tf.nn.moments(x, [0,1,2], name='moments')\n ema = tf.train.ExponentialMovingAverage(decay=0.5)\n\n def mean_var_with_update():\n ema_apply_op = ema.apply([batch_mean, batch_var])\n with tf.control_dependencies([ema_apply_op]):\n return tf.identity(batch_mean), tf.identity(batch_var)\n\n mean, var = tf.cond(phase_train,\n mean_var_with_update,\n lambda: (ema.average(batch_mean), ema.average(batch_var)))\n normed = tf.nn.batch_normalization(x, mean, var, beta, gamma, 1e-3)\n\n #print(\"BNORM OUT:\", normed)\n return normed", "title": "" }, { "docid": "41c2e39f599cd347b7c5a7e4be5251c6", "score": "0.62819034", "text": "def _batch_norm_without_layers(self, input_layer, decay, use_scale, epsilon):\n # We make this function as similar as possible to the\n # tf.contrib.layers.batch_norm, to minimize the differences between using\n # layers and not using layers.\n shape = input_layer.shape\n num_channels = shape[3] if self.data_format == 'NHWC' else shape[1]\n beta = self.get_variable('beta', [num_channels], tf.float32, tf.float32,\n initializer=tf.zeros_initializer())\n if use_scale:\n gamma = self.get_variable('gamma', [num_channels], tf.float32,\n tf.float32, initializer=tf.ones_initializer())\n else:\n gamma = tf.constant(1.0, tf.float32, [num_channels])\n # For moving variables, we use tf.get_variable instead of self.get_variable,\n # since self.get_variable returns the result of tf.cast which we cannot\n # assign to.\n moving_mean = tf.get_variable('moving_mean', [num_channels],\n tf.float32,\n initializer=tf.zeros_initializer(),\n trainable=False)\n moving_variance = tf.get_variable('moving_variance', [num_channels],\n tf.float32,\n initializer=tf.ones_initializer(),\n trainable=False)\n if self.phase_train:\n bn, batch_mean, batch_variance = tf.nn.fused_batch_norm(\n input_layer, gamma, beta, epsilon=epsilon,\n data_format=self.data_format, is_training=True)\n mean_update = moving_averages.assign_moving_average(\n moving_mean, batch_mean, decay=decay, zero_debias=False)\n variance_update = moving_averages.assign_moving_average(\n moving_variance, batch_variance, decay=decay, zero_debias=False)\n tf.add_to_collection(tf.GraphKeys.UPDATE_OPS, mean_update)\n tf.add_to_collection(tf.GraphKeys.UPDATE_OPS, variance_update)\n else:\n bn, _, _ = tf.nn.fused_batch_norm(\n input_layer, gamma, beta, mean=moving_mean,\n variance=moving_variance, epsilon=epsilon,\n data_format=self.data_format, is_training=False)\n return bn", "title": "" }, { "docid": "bc0c4d135c8f845c4e7a3247bfcf4f64", "score": "0.62784797", "text": "def normalize(self, x, train=True):\n if train:\n mean, variance = tf.nn.moments(x, [0, 1, 2])\n assign_mean = self.mean.assign(mean)\n assign_variance = self.variance.assign(variance)\n with tf.control_dependencies([assign_mean, assign_variance]):\n return tf.nn.batch_norm_with_global_normalization(\n x, mean, variance, self.beta, self.gamma,\n self.epsilon, self.scale_after_norm)\n else:\n mean = self.ewma_trainer.average(self.mean)\n variance = self.ewma_trainer.average(self.variance)\n local_beta = tf.identity(self.beta)\n local_gamma = tf.identity(self.gamma)\n return tf.nn.batch_norm_with_global_normalization(\n x, mean, variance, local_beta, local_gamma,\n self.epsilon, self.scale_after_norm)", "title": "" }, { "docid": "f505420c176715e7741fb21bff2c2083", "score": "0.6270153", "text": "def normalize_batch_of_chunks(self, batch_of_chunks: np.ndarray) -> np.ndarray:\n # reshape array to 2D (window_num, num_features)\n array_to_normalize = batch_of_chunks.reshape((-1, batch_of_chunks.shape[3]))\n # create scaler\n normalizer = StandardScaler()\n # fit and transform data\n array_to_normalize = normalizer.fit_transform(array_to_normalize)\n # reshape obtained data back to initial shape\n result_array = array_to_normalize.reshape(batch_of_chunks.shape)\n return result_array", "title": "" }, { "docid": "c3f1ed95e9fe70b1413c0d637e0594de", "score": "0.6267697", "text": "def batch_norm(x, n_out, phase_train, scope='bn'):\n with tf.variable_scope(scope):\n beta = tf.Variable(tf.constant(0.0, shape=[n_out]),\n name='beta', trainable=True)\n gamma = tf.Variable(tf.constant(1.0, shape=[n_out]),\n name='gamma', trainable=True)\n batch_mean, batch_var = tf.nn.moments(x, [0, 1], name='moments')\n ema = tf.train.ExponentialMovingAverage(decay=0.5)\n\n def mean_var_with_update():\n ema_apply_op = ema.apply([batch_mean, batch_var])\n with tf.control_dependencies([ema_apply_op]):\n return tf.identity(batch_mean), tf.identity(batch_var)\n\n mean, var = tf.cond(phase_train, mean_var_with_update,\n lambda: (ema.average(batch_mean), ema.average(batch_var)))\n normed = tf.nn.batch_normalization(x, mean, var, beta, gamma, 1e-3)\n return normed", "title": "" }, { "docid": "25d3d374d411d802088100ad1f7266c6", "score": "0.62596625", "text": "def batch_norm(x, n_out, phase_train, scope='bn'):\n with tf.variable_scope(scope):\n beta = tf.Variable(tf.constant(0.0, shape=[n_out]), name='beta', trainable=True)\n gamma = tf.Variable(tf.constant(1.0, shape=[n_out]), name='gamma', trainable=True)\n batch_mean, batch_var = tf.nn.moments(x, [0, 1, 2], name='moments')\n ema = tf.train.ExponentialMovingAverage(decay=0.5)\n\n def mean_var_with_update():\n ema_apply_op = ema.apply([batch_mean, batch_var])\n with tf.control_dependencies([ema_apply_op]):\n return tf.identity(batch_mean), tf.identity(batch_var)\n\n mean, var = tf.cond(phase_train, mean_var_with_update,\n lambda: (ema.average(batch_mean), ema.average(batch_var)))\n normed = tf.nn.batch_normalization(x, mean, var, beta, gamma, 1e-3)\n return normed", "title": "" }, { "docid": "ce92682fb3eb05d120dbc75f7e30b8d1", "score": "0.6246257", "text": "def batch_norm(x, n_out, phase_train, scope='bn'):\n with tf.variable_scope(scope):\n beta = tf.Variable(tf.constant(0.0, shape=[n_out]),\n name='beta', trainable=True)\n gamma = tf.Variable(tf.constant(1.0, shape=[n_out]),\n name='gamma', trainable=True)\n batch_mean, batch_var = tf.nn.moments(x, [0,1,2], name='moments')\n ema = tf.train.ExponentialMovingAverage(decay=0.5)\n\n def mean_var_with_update():\n ema_apply_op = ema.apply([batch_mean, batch_var])\n with tf.control_dependencies([ema_apply_op]):\n return tf.identity(batch_mean), tf.identity(batch_var)\n\n mean, var = tf.cond(phase_train,\n mean_var_with_update,\n lambda: (ema.average(batch_mean), ema.average(batch_var)))\n normed = tf.nn.batch_normalization(x, mean, var, beta, gamma, 1e-3)\n return normed", "title": "" }, { "docid": "7136e7dd2a3d83d43bbc5397df3c430f", "score": "0.6220049", "text": "def batch_norm(Z, gamma, beta, epsilon):\n\n mean = Z.mean(0)\n varianza = Z.var(0)\n\n z_n = (Z - mean) / np.sqrt(varianza + epsilon)\n z = gamma * z_n + beta\n\n return z", "title": "" }, { "docid": "697666f6ed588fd4a08b74cbb0076391", "score": "0.61924803", "text": "def batch_norm(x, n_out, phase_train):\n with tf.variable_scope('bn'):\n beta = tf.Variable(tf.constant(0.0, shape=[n_out]),\n name='beta', trainable=True)\n gamma = tf.Variable(tf.constant(1.0, shape=[n_out]),\n name='gamma', trainable=True)\n batch_mean, batch_var = tf.nn.moments(x, [0], name='moments')\n ema = tf.train.ExponentialMovingAverage(decay=0.5)\n\n def mean_var_with_update():\n ema_apply_op = ema.apply([batch_mean, batch_var])\n with tf.control_dependencies([ema_apply_op]):\n return tf.identity(batch_mean), tf.identity(batch_var)\n\n mean, var = tf.cond(phase_train,\n mean_var_with_update,\n lambda: (ema.average(batch_mean), ema.average(batch_var)))\n normed = tf.nn.batch_normalization(x, mean, var, beta, gamma, 1e-3)\n return normed", "title": "" }, { "docid": "e1e7f9ab4977e87f5a40c501f7469c25", "score": "0.6169661", "text": "def do_batch_normalization(\n feature_matrix, scale_parameter=1., shift_parameter=0.):\n\n error_checking.assert_is_numpy_array_without_nan(feature_matrix)\n error_checking.assert_is_greater(scale_parameter, 0.)\n\n num_dimensions = len(feature_matrix.shape)\n error_checking.assert_is_geq(num_dimensions, 4)\n error_checking.assert_is_leq(num_dimensions, 5)\n\n stdev_matrix = numpy.std(feature_matrix, axis=0, ddof=1)\n stdev_matrix = numpy.expand_dims(stdev_matrix, axis=0)\n stdev_matrix = numpy.repeat(stdev_matrix, feature_matrix.shape[0], axis=0)\n\n mean_matrix = numpy.mean(feature_matrix, axis=0)\n mean_matrix = numpy.expand_dims(mean_matrix, axis=0)\n mean_matrix = numpy.repeat(mean_matrix, feature_matrix.shape[0], axis=0)\n\n return shift_parameter + scale_parameter * (\n (feature_matrix - mean_matrix) / (stdev_matrix + K.epsilon())\n )", "title": "" }, { "docid": "d16e231ee609a596e826698390a04380", "score": "0.615651", "text": "def normalize(bold_data_, run_ids):\n scaler = StandardScaler()\n data = []\n for r in range(vdc_n_runs):\n data.append(scaler.fit_transform(bold_data_[run_ids == r, :]))\n normalized_data = np.vstack(data)\n return normalized_data", "title": "" }, { "docid": "55a409f07405cf25ae82d682d7d21e0e", "score": "0.61537623", "text": "def batch_norm(x, phase_train, name='bn', decay=0.9, reuse=None, affine=True):\n with tf.variable_scope(name, reuse=reuse):\n shape = x.get_shape().as_list()\n beta = tf.get_variable(\n name='beta',\n shape=[shape[-1]],\n initializer=tf.constant_initializer(0.0),\n trainable=True)\n gamma = tf.get_variable(\n name='gamma',\n shape=[shape[-1]],\n initializer=tf.constant_initializer(1.0),\n trainable=affine)\n if len(shape) == 4:\n batch_mean, batch_var = tf.nn.moments(x, [0, 1, 2], name='moments')\n else:\n batch_mean, batch_var = tf.nn.moments(x, [0], name='moments')\n ema = tf.train.ExponentialMovingAverage(decay=decay)\n ema_apply_op = ema.apply([batch_mean, batch_var])\n ema_mean, ema_var = ema.average(batch_mean), ema.average(batch_var)\n\n def mean_var_with_update():\n with tf.control_dependencies([ema_apply_op]):\n return tf.identity(batch_mean), tf.identity(batch_var)\n\n mean, var = control_flow_ops.cond(phase_train, mean_var_with_update,\n lambda: (ema_mean, ema_var))\n\n # tf.nn.batch_normalization\n normed = tf.nn.batch_norm_with_global_normalization(\n x, mean, var, beta, gamma, 1e-6, affine)\n return normed", "title": "" }, { "docid": "d5eb7bb0e9386997a6ba621c9886100e", "score": "0.61471236", "text": "def __call__(self, *args, **kwargs):\n if self.batch_size > 1:\n args = [arg.unsqueeze(-1) if arg.ndimension() == 1 else arg for arg in args]\n # Expand input arguments across batches\n args = list(map(lambda x: x.expand(self.batch_size, *x.shape), args))\n normal = super().__call__(*args, **kwargs)\n\n if self.batch_size > 1:\n return normal\n else:\n return WrappedNormal(normal)", "title": "" }, { "docid": "8983f3a9a956a26a7252f4d5c2f93629", "score": "0.61447364", "text": "def batch_norm(Z, gamma, beta, epsilon):\n # https://wiseodd.github.io/techblog/2016/07/04/batchnorm/\n Z_mean = np.mean(Z, axis=0)\n Z_variance = np.var(Z, axis=0)\n\n Z_norm = (Z - Z_mean) / np.sqrt(Z_variance + epsilon)\n out = gamma * Z_norm + beta\n\n return out", "title": "" }, { "docid": "50bb99a3cd037fa834c0d2f3cc88a521", "score": "0.6127032", "text": "def batch_norm_for_conv2d(inputs, is_training, bn_decay, scope, data_format):\n return batch_norm_template(inputs, is_training, scope, [0,1,2], bn_decay, data_format)", "title": "" }, { "docid": "9b9f1c51b05cf584d1eaf44a447649b9", "score": "0.61248076", "text": "def batch_norm(Z, gamma, beta, epsilon):\n mean = np.sum(Z, axis=0) / Z.shape[0]\n sd = np.sum((Z - mean) ** 2, axis=0) / Z.shape[0]\n Znorm = (Z - mean) / np.sqrt(sd + epsilon)\n return (gamma * Znorm) + beta", "title": "" }, { "docid": "2a91e25ca3588afdb14fd0f63398bce0", "score": "0.6117444", "text": "def normalize(ds):\n\n gs, ys = tuple(zip(*ds))\n y_mean = np.mean(ys)\n y_std = np.std(ys)\n\n def norm(y):\n \"\"\"\n\n Parameters\n ----------\n y :\n\n\n Returns\n -------\n\n \"\"\"\n return (y - y_mean) / y_std\n\n def unnorm(y):\n \"\"\"\n\n Parameters\n ----------\n y :\n\n\n Returns\n -------\n\n \"\"\"\n return y * y_std + y_mean\n\n return y_mean, y_std, norm, unnorm", "title": "" }, { "docid": "1e2dd24e8fffb53b94a7ba9693b63d87", "score": "0.6113002", "text": "def use_batchnorm(self):\n return self._use_batchnorm", "title": "" }, { "docid": "bd297706ddeb349745d011c187408c7d", "score": "0.6095269", "text": "def mlp_norm(num_inputs, num_outputs, layer_sizes, activation):\n a0 = Input(shape = (num_inputs,))\n inputs = a0\n for layer_size in layer_sizes:\n outputs = Dense(layer_size, activation = activation, kernel_initializer = 'glorot_uniform', bias_initializer = 'glorot_uniform')(a0)\n outputs = Batch_norm()(outputs)\n a0 = outputs\n #don't want normalisation for output layer. linear activation\n outputs = Dense(num_outputs, activation = 'linear', kernel_initializer = 'glorot_uniform', bias_initializer = 'glorot_uniform')(a0)\n model = Model(inputs = inputs, outputs = outputs)\n return model", "title": "" }, { "docid": "e192426e2f08c7ffec26687af7df6a3e", "score": "0.60880154", "text": "def normaliseFeatures(self):\n if (self.normalisationType != linReg.NONE):\n if (self.normalisationType == linReg.MINMAX):\n self.X = self.normaliseMinMax(self.X)\n else:\n self.X = self.normaliseStdDev(self.X)", "title": "" }, { "docid": "3b2a5bf88b091783ae46f1c4bdc9bf69", "score": "0.60798115", "text": "def _localNormalizeData(self,values,names,feat):\n if not self.externalNorm:\n self.muAndSigmaFeatures[feat] = (0.0,1.0)\n else:\n super(SciKitLearn, self)._localNormalizeData(values,names,feat)", "title": "" }, { "docid": "9da919390b50867aa367278c3aac23bc", "score": "0.6051023", "text": "def normalize_dataset(self):\n self.normalized = True\n self.x_unscaled = self.x_1d.copy()\n self.y_unscaled = self.y_1d.copy()\n self.z_unscaled = self.z_1d.copy()\n dataset_matrix = np.stack((self.x_1d, self.y_1d, self.z_1d)).T\n self.scaler = preprocessing.StandardScaler().fit(dataset_matrix)\n [self.x_1d, self.y_1d, self.z_1d] = self.scaler.transform(dataset_matrix).T", "title": "" }, { "docid": "0841111333be20b276d0749a96bbc268", "score": "0.6031208", "text": "def train_normalize(self):\n return transforms.Compose([\n transforms.RandomCrop(32, padding=4),\n transforms.RandomHorizontalFlip(),\n transforms.ToTensor(),\n transforms.Normalize(self.mean, self.std)\n ])", "title": "" }, { "docid": "8581344080dd628f031d1b164aa11a08", "score": "0.6023976", "text": "def normalize_1d(x: torch.Tensor) -> torch.Tensor:\n return F.batch_norm(x.unsqueeze(1), None, None, training=True).squeeze(1)", "title": "" }, { "docid": "fad525be3e3c72580313b91d2676f22f", "score": "0.60214615", "text": "def normalize(X, parameters, m):\n mu = np.mean(X[:,1:m], axis=0)\n sigma = np.std(X[:,1:m], axis=0)\n X[:,1:m] = np.divide((X[:,1:m] - mu), sigma)\n parameters[1:m] = np.divide((parameters[1:m] - mu), sigma)\n return X, parameters", "title": "" }, { "docid": "47261d2d01206762e1f3c72723f8e182", "score": "0.60148394", "text": "def test_batchnorm(self):\n from keras.layers import Conv2D\n from keras.layers.normalization import BatchNormalization\n\n # Create a simple Keras model\n model = Sequential()\n model.add(\n Conv2D(\n input_shape=(64, 64, 3),\n filters=32,\n kernel_size=(5, 5),\n strides=(1, 1),\n activation=None,\n padding=\"valid\",\n use_bias=True,\n )\n )\n # epsilon in CoreML is currently fixed at 1e-5\n model.add(BatchNormalization(epsilon=1e-5))\n input_names = [\"input\"]\n output_names = [\"output\"]\n spec = keras.convert(model, input_names, output_names).get_spec()\n self.assertIsNotNone(spec)\n\n # Test the model class\n self.assertIsNotNone(spec.description)\n self.assertTrue(spec.HasField(\"neuralNetwork\"))\n\n # Test the inputs and outputs\n self.assertEquals(len(spec.description.input), len(input_names))\n self.assertEqual(\n sorted(input_names), sorted(map(lambda x: x.name, spec.description.input))\n )\n self.assertEquals(len(spec.description.output), len(output_names))\n self.assertEqual(\n sorted(output_names), sorted(map(lambda x: x.name, spec.description.output))\n )\n\n # Test the layer parameters.\n layers = spec.neuralNetwork.layers\n self.assertIsNotNone(layers[0].convolution)\n self.assertIsNotNone(layers[1].batchnorm)", "title": "" }, { "docid": "0dbfcbc46eafe40ee56580584556a6cf", "score": "0.59928507", "text": "def update_normalization(self, vector_obs: np.ndarray) -> None:\n if self.use_vec_obs and self.normalize:\n self.sess.run(\n self.update_normalization_op, feed_dict={self.vector_in: vector_obs}\n )", "title": "" }, { "docid": "34665ac06d6978f6e3d60b7ec0ad7f25", "score": "0.598705", "text": "def batch_norm_update(\n model, dataset, feature_key, batch_dim=0,\n device=0 if torch.cuda.is_available() else 'cpu'\n):\n if not _check_bn(model):\n return\n was_training = model.training\n model.train()\n\n model.to(device)\n\n momenta = {}\n model.apply(_reset_bn)\n model.apply(lambda module: _get_momenta(module, momenta))\n n = 0\n with torch.no_grad():\n for i, example in enumerate(dataset):\n b = example[feature_key].size(batch_dim)\n\n momentum = b / float(n + b)\n for module in momenta.keys():\n module.momentum = momentum\n\n example = batch_to_device(example, device)\n model(example)\n\n n += b\n\n model.apply(lambda module: _set_momenta(module, momenta))\n model.train(was_training)", "title": "" }, { "docid": "8eb52a8a1a50f268e27ad3716fdd1079", "score": "0.5982175", "text": "def batchnorm_forward(self, x, beta, gamma, bn_param, mode='train', eps=1e-7):\n\n N, D = x.shape\n running_mean = bn_param.get('running_mean', np.zeros(D, dtype=x.dtype))\n running_var = bn_param.get('running_var', np.zeros(D, dtype=x.dtype))\n momentum = bn_param.get('momentum', 0.9)\n cache = None\n\n if mode == 'train':\n\n mean = np.mean(x, axis=0)\n var = np.var(x, axis=0)\n\n inv_var = 1 / np.sqrt(var + eps)\n x_hat = (x - mean) * inv_var\n\n out = x_hat * gamma + beta\n\n # Save running averages for a test pass\n bn_param['running_mean'] = momentum * running_mean + (1.0 - momentum) * mean\n bn_param['running_var'] = momentum * running_var + (1.0 - momentum) * var\n cache = (inv_var, x_hat, gamma) \n\n else:\n # Get running averages and use them to normalize activations\n mean = running_mean\n var = running_var\n\n inv_var = 1 / np.sqrt(var + eps)\n x_hat = (x - mean) * inv_var\n out = x_hat * gamma + beta\n \n return out, cache", "title": "" }, { "docid": "cef11800a1ef14d6046972618506c582", "score": "0.59792626", "text": "def batch_norm_update(\n model, dataset, feature_key, batch_dim=0,\n device=0 if torch.cuda.is_available() else 'cpu'\n):\n if not _check_bn(model):\n return\n was_training = model.training\n model.train()\n\n model.to(device)\n\n momenta = {}\n model.apply(_reset_bn)\n model.apply(lambda module: _get_momenta(module, momenta))\n n = 0\n with torch.no_grad():\n for i, example in enumerate(dataset):\n b = example[feature_key].size(batch_dim)\n\n momentum = b / float(n + b)\n for module in momenta.keys():\n module.momentum = momentum\n\n example = example_to_device(example, device)\n model(example)\n\n n += b\n\n model.apply(lambda module: _set_momenta(module, momenta))\n model.train(was_training)", "title": "" }, { "docid": "6fe64946f55040d53d244b3da687f55b", "score": "0.59703064", "text": "def batch_norm_relu(inputs, train_phase,cpu_variables=False,scope=None):\r\n # We set fused=True for a significant performance boost. See\r\n # https://www.tensorflow.org/performance/performance_guide#common_fused_ops\r\n # inputs = tf.layers.batch_normalization(\r\n # inputs=inputs, axis=3,\r\n # momentum=opts['ema_decay'], epsilon=_BATCH_NORM_EPSILON, center=True,\r\n # scale=True, training=train_phase, fused=True)\r\n inputs = tensornet.layers.batch_normalization(inputs, train_phase,\r\n cpu_variables=cpu_variables,\r\n ema_decay=opts['ema_decay'],\r\n eps=opts['batch_norm_epsilon'],\r\n scope=scope)\r\n inputs = tf.nn.relu(inputs)\r\n return inputs", "title": "" }, { "docid": "2bb3a8c5c0847d3a3d4187623db25cad", "score": "0.5961385", "text": "def Normalizations(tensor_size=None, normalization=None, available=False,\n just_flops=False, **kwargs):\n list_available = [\"batch\", \"group\", \"instance\", \"layer\", \"pixelwise\",\n \"cbatch\", \"frozenbatch\"]\n if available:\n return list_available\n\n normalization = normalization.lower()\n assert normalization in list_available, \\\n \"Normalization must be None/\" + \"/\".join(list_available)\n\n if normalization == \"frozenbatch\":\n if just_flops:\n # inference -> (x - mean) / (std + eps) * gamma + beta\n _eps_adds = tensor_size[1]\n _element_muls_adds = 4\n return _element_muls_adds * np.prod(tensor_size[1:]) + _eps_adds\n return FrozenBatch2D(tensor_size[1])\n elif normalization == \"batch\":\n if just_flops:\n # inference -> (x - mean) / (std + eps) * gamma + beta\n _eps_adds = tensor_size[1]\n _element_muls_adds = 4\n return _element_muls_adds * np.prod(tensor_size[1:]) + _eps_adds\n return torch.nn.BatchNorm2d(tensor_size[1])\n\n elif normalization == \"group\":\n affine = kwargs[\"affine\"] if \"affine\" in \\\n kwargs.keys() else False\n\n if just_flops:\n # inference -> (x - mean) / (std + eps) * gamma + beta\n _eps_adds = tensor_size[1]\n _element_muls_adds = (4 if affine else 2)\n return _element_muls_adds * np.prod(tensor_size[1:]) + _eps_adds\n if \"groups\" in kwargs.keys():\n return torch.nn.GroupNorm(kwargs[\"groups\"], tensor_size[1],\n affine=affine)\n else:\n possible = [tensor_size[1]//i for i in range(tensor_size[1], 0, -1)\n if tensor_size[1] % i == 0]\n groups = possible[len(possible)//2]\n return torch.nn.GroupNorm(groups, tensor_size[1], affine=affine)\n\n elif normalization == \"instance\":\n affine = kwargs[\"affine\"] if \"affine\" in \\\n kwargs.keys() else False\n if just_flops:\n # inference -> (x - mean) / (std + eps)\n _eps_adds = tensor_size[1]\n _element_muls_adds = (4 if affine else 2)\n flops = _element_muls_adds * np.prod(tensor_size[1:]) + _eps_adds\n # mean computation on the fly as track_running_stats=False\n flops += np.prod(tensor_size[1:])\n # std computation on the fly as track_running_stats=False\n flops += np.prod(tensor_size[1:])*3 + np.prod(tensor_size[2:]) + \\\n + tensor_size[1]\n # inference -> (x - mean) / (std + eps) * gamma + beta\n return flops\n return torch.nn.InstanceNorm2d(tensor_size[1], affine=affine)\n\n elif normalization == \"layer\":\n elementwise_affine = kwargs[\"elementwise_affine\"] if \\\n \"elementwise_affine\" in kwargs.keys() else True\n if just_flops:\n # inference -> (x - mean) / (std + eps) * gamma + beta\n _eps_adds = tensor_size[1]\n _element_muls_adds = 4 if elementwise_affine else 2\n return _element_muls_adds * np.prod(tensor_size[1:]) + _eps_adds\n return torch.nn.LayerNorm(tensor_size[1:],\n elementwise_affine=elementwise_affine)\n\n elif normalization == \"pixelwise\":\n if just_flops:\n # inference -> x / x.pow(2).sum(1).pow(0.5).add(eps)\n return np.prod(tensor_size[1:])*3 + np.prod(tensor_size[2:])*2\n return PixelWise()\n\n elif normalization == \"cbatch\":\n if just_flops:\n # inference -> (x - mean) / (std + eps) * gamma\n # TODO: update for n_latent\n _eps_adds = tensor_size[1]\n _element_muls_adds = 3\n return _element_muls_adds * np.prod(tensor_size[1:]) + _eps_adds\n return CategoricalBNorm(tensor_size, **kwargs)", "title": "" }, { "docid": "e059c12328ec860cb427087cfe32a28c", "score": "0.5960707", "text": "def norm_layer(bottom, is_train, name):\n top = tf.layers.batch_normalization(bottom,\n axis=3,\n training=is_train,\n name=name)\n\n return top", "title": "" }, { "docid": "9de8ce76ada895f6b2800a4c476a447d", "score": "0.5949671", "text": "def full_batchnorm(pre_activations, batch, epsilon=1e-8, train=True,\n beta_init=tf.constant_initializer(0),\n gamma_init=tf.constant_initializer(1)):\n # get beta and gamma\n num_units = pre_activations.get_shape()[0]\n beta = tf.get_variable('beta', [num_units])\n gamma = tf.get_variable('gamma', [num_units])\n mean, variance = tf.nn.moments(pre_activations, [0])\n isqr = tf.rsqrt(variance+epsilon)\n centered = pre_activations - mean\n return beta + gamma * centered * isqr", "title": "" }, { "docid": "d4bcc586638a1637b21bc51eb1ad0fe9", "score": "0.5949415", "text": "def batch_norm_layer(x, is_training, decay=0.999, epsilon=1e-5, scope='bn'):\n with tf.name_scope(scope):\n shape = x.get_shape().as_list()\n size = shape[-1]\n # beta: a trainable shift value\n beta = tf.Variable(initial_value=tf.zeros([size]), trainable=True, name='beta')\n # gamma: a trainable scale factor\n gamma = tf.Variable(initial_value=tf.ones([size]), trainable=True, name='gamma')\n\n # tf.nn.moments == Calculate the mean and the variance of the tensor x.\n # The last dimension contains values to compute mean.\n batch_mean, batch_var = tf.nn.moments(x, range(len(shape)-1), name='moments')\n\n # Create an ExponentialMovingAverage object\n ema = tf.train.ExponentialMovingAverage(decay=decay)\n\n # apply creates the shadow variables, and add ops to maintain moving averages of mean and variance.\n maintain_averages_op = ema.apply([batch_mean, batch_var])\n tf.add_to_collection(tf.GraphKeys.UPDATE_OPS, maintain_averages_op)\n\n # Inference uses population average and variance.\n ema_mean, ema_var = ema.average(batch_mean), ema.average(batch_var)\n\n mean, var = tf.cond(\n is_training, lambda: (batch_mean, batch_var), lambda: (ema_mean, ema_var)\n )\n\n bn = tf.nn.batch_normalization(x, mean, var, offset=beta, scale=gamma, variance_epsilon=epsilon)\n\n return bn", "title": "" }, { "docid": "a6058292aca6844903f6d14acb949ce6", "score": "0.5944963", "text": "def batch_norm_for_fc(inputs, is_training, bn_decay, scope):\n return batch_norm_template(inputs, is_training, scope, [0,], bn_decay)", "title": "" }, { "docid": "658b20475b1fa1b2bce873a797eda0b9", "score": "0.59414583", "text": "def _instance_norm(self, x, beta, gamma):\n with tf.variable_scope('in'):\n# beta = tf.Variable(tf.constant(0.0, shape=[n_out]),\n# name='beta', trainable=True)\n# gamma = tf.Variable(tf.constant(1.0, shape=[n_out]),\n# name='gamma', trainable=True)\n batch_mean, batch_var = tf.nn.moments(x, [1], name='moments', keep_dims=True)\n\n normed = tf.nn.batch_normalization(x, batch_mean, batch_var, beta, gamma, 1e-3)\n return normed", "title": "" }, { "docid": "28218a95ccea3c398aea6e0cead1e0f6", "score": "0.59267724", "text": "def use_batchnorm(self, value):\n if self._is_build:\n raise CustomException('The use_batchnorm attribute can only be '\n + 'changed if the network has not been '\n + 'build yet.')\n self._use_batchnorm = value", "title": "" }, { "docid": "41807d11c080c178fb234d43c8525f0d", "score": "0.5916175", "text": "def _batch_norm_relu(self, inputs, training):\n\n\t\t### YOUR CODE HERE\n\t\tbatch_norm_outputs = tf.layers.batch_normalization(inputs, training=training)\n\t\toutputs = tf.nn.relu(batch_norm_outputs) # ''' 這裡為何要relu '''\n\t \t#Refer: https://www.tensorflow.org/api_docs/python/tf/compat/v1/layers/batch_normalization\n\t\t#Refer: https://www.tensorflow.org/api_docs/python/tf/nn/relu\n\t\t### END CODE HERE\n\n\t\treturn outputs", "title": "" }, { "docid": "05f2849cc893fe912cd93500706c05ca", "score": "0.5901665", "text": "def _unnormalise(self, inp, out, out_pred, out_std=None):\n\n inp = inp * self._transform['input_std'] + self._transform['input_mean']\n out = out * self._transform['output_std'] + self._transform['output_mean']\n out_pred = out_pred * self._transform['output_std'] + self._transform['output_mean']\n\n if not(out_std == None):\n out_std = out_std * self._transform['output_std']\n\n return inp, out, out_pred, out_std", "title": "" }, { "docid": "0d1eb494027cb53beaa6fbfd97e24864", "score": "0.58948153", "text": "def crossgpu_batch_norm(\n inputs,\n decay=0.9,\n epsilon=1e-5,\n beta_initializer=tf.zeros_initializer(),\n gamma_initializer=tf.ones_initializer(),\n moving_mean_initializer=tf.zeros_initializer(),\n moving_variance_initializer=tf.ones_initializer(),\n data_format=\"channels_last\",\n add_to_default_updateops=False,\n updates_collections=None,\n is_training=True,\n variables_collections=None,\n trainable=True,\n reuse=None,\n scope=None,\n verbose=False,\n gpu_var_string=COPY_NAME_SCOPE,\n num_dev=None,\n):\n\n assert num_dev is not None\n\n if num_dev != 1:\n TF_version = get_tf_version_tuple()\n assert six.PY2 or TF_version >= (1, 10), (\n \"Cross-GPU BatchNorm is only supported in TF>=1.10 .\"\n \"Upgrade TF or apply this patch manually: https://github.com/tensorflow/tensorflow/pull/20360\"\n )\n\n if TF_version <= (1, 12):\n try:\n from tensorflow.contrib.nccl.python.ops.nccl_ops import (\n _validate_and_load_nccl_so,\n )\n except Exception:\n pass\n else:\n _validate_and_load_nccl_so()\n\n from tensorflow.contrib.nccl.ops import gen_nccl_ops\n else:\n from tensorflow.python.ops import gen_nccl_ops\n\n inp_shp = inputs.get_shape().as_list()\n inp_rank = len(inp_shp)\n if inp_rank == 4: # conv layer\n if data_format == \"channels_last\":\n red_axises = [0, 1, 2]\n num_outputs = inp_shp[-1]\n fused_data_format = \"NHWC\"\n elif data_format == \"channels_first\":\n red_axises = [0, 2, 3]\n num_outputs = inp_shp[1]\n fused_data_format = \"NCHW\"\n else:\n raise ValueError\n elif inp_rank == 2: # fc layer\n red_axises = [0]\n num_outputs = inp_shp[-1]\n else:\n raise ValueError\n\n if (updates_collections is None) and (add_to_default_updateops):\n updates_collections = tf.GraphKeys.UPDATE_OPS\n\n if scope is None:\n scope = \"CrossGPUBatchNorm\"\n\n with tf.variable_scope(scope, reuse=reuse):\n\n gamma = tf.get_variable(\n name=\"gamma\",\n shape=[num_outputs],\n dtype=tf.float32,\n initializer=gamma_initializer,\n trainable=trainable,\n collections=variables_collections,\n )\n\n if verbose:\n gamma = tf.Print(gamma, [gamma], \"gamma\")\n\n beta = tf.get_variable(\n name=\"beta\",\n shape=[num_outputs],\n dtype=tf.float32,\n initializer=beta_initializer,\n trainable=trainable,\n collections=variables_collections,\n )\n\n if verbose:\n beta = tf.Print(beta, [beta], \"beta\")\n\n moving_mean = tf.get_variable(\n name=\"moving_mean\",\n shape=[num_outputs],\n dtype=tf.float32,\n initializer=moving_mean_initializer,\n trainable=False,\n collections=variables_collections,\n )\n\n moving_var = tf.get_variable(\n name=\"moving_variance\",\n shape=[num_outputs],\n dtype=tf.float32,\n initializer=moving_variance_initializer,\n trainable=False,\n collections=variables_collections,\n )\n\n if is_training and trainable:\n\n if num_dev == 1:\n mean, var = tf.nn.moments(inputs, axes=red_axises)\n else:\n multi_device_gpu_var_string = (\n \"/+\" + gpu_var_string + \"[0-\" + str(num_dev - 1) + \"]\"\n )\n shared_name = re.sub(\n multi_device_gpu_var_string, \"\", tf.get_variable_scope().name\n )\n batch_mean = tf.reduce_mean(inputs, axis=red_axises)\n\n if verbose:\n batch_mean = tf.Print(batch_mean, [batch_mean], \"input_mean\")\n\n batch_mean_square = tf.reduce_mean(tf.square(inputs), axis=red_axises)\n\n if verbose:\n batch_mean_square = tf.Print(\n batch_mean_square, [batch_mean_square], \"input_mean_square\"\n )\n\n batch_mean = gen_nccl_ops.nccl_all_reduce(\n input=batch_mean,\n reduction=\"sum\",\n num_devices=num_dev,\n shared_name=shared_name + \"_NCCL_mean\",\n ) * (1.0 / num_dev)\n batch_mean_square = gen_nccl_ops.nccl_all_reduce(\n input=batch_mean_square,\n reduction=\"sum\",\n num_devices=num_dev,\n shared_name=shared_name + \"_NCCL_mean_square\",\n ) * (1.0 / num_dev)\n\n if verbose:\n batch_mean = tf.Print(batch_mean, [batch_mean], \"NCCL_mean\")\n\n mean = batch_mean\n var = tf.nn.relu(\n batch_mean_square - tf.square(batch_mean)\n ) # passing through a relu to prevent small negative values\n\n if verbose:\n var = tf.Print(var, [var], \"NCCL_var\")\n\n outputs = tf.nn.batch_normalization(\n inputs,\n mean=mean,\n variance=var,\n offset=beta,\n scale=gamma,\n variance_epsilon=epsilon,\n )\n\n # each gpu will have a copy of the same moving_mean and moving_var variable, which only gets updated once mean and var have been computed across all gpus\n # this way, when tfutils saves the variables (which it only saves the ones on gpu 0) it will save the correct moving_mean and moving_var\n update_moving_mean_op = tf.assign(\n moving_mean, moving_mean * decay + mean * (1 - decay)\n )\n update_moving_var_op = tf.assign(\n moving_var, moving_var * decay + var * (1 - decay)\n )\n\n if updates_collections is None:\n with tf.control_dependencies(\n [update_moving_mean_op, update_moving_var_op]\n ):\n outputs = tf.identity(outputs)\n else:\n tf.add_to_collections(updates_collections, update_moving_mean_op)\n tf.add_to_collections(updates_collections, update_moving_var_op)\n outputs = tf.identity(outputs)\n else:\n if inp_rank == 4: # fused batch norm only supports convolutional layer outputs\n outputs, _, _ = tf.nn.fused_batch_norm(\n inputs,\n scale=gamma,\n offset=beta,\n mean=moving_mean,\n variance=moving_var,\n epsilon=epsilon,\n data_format=fused_data_format,\n is_training=False,\n )\n elif inp_rank == 2:\n outputs = tf.nn.batch_normalization(\n inputs,\n scale=gamma,\n offset=beta,\n mean=moving_mean,\n variance=moving_var,\n variance_epsilon=epsilon,\n )\n\n else:\n raise ValueError\n\n return outputs", "title": "" }, { "docid": "eb5d573b7f91ad565e47db3ecab0c9a3", "score": "0.5884563", "text": "def create_batch_norm_layer(prev, n, activation):\n kernel = tf.contrib.layers.variance_scaling_initializer(mode=\"FAN_AVG\")\n layer = tf.layers.Dense(units=n, kernel_initializer=kernel)(prev)\n mean, var = tf.nn.moments(layer, axes=[0])\n beta = tf.Variable(tf.zeros([n]))\n gamma = tf.Variable(tf.ones([n]))\n epsilon = 1e-8\n z = tf.nn.batch_normalization(layer,\n mean,\n var,\n beta,\n gamma,\n epsilon)\n return activation(z)", "title": "" }, { "docid": "53b10192fd36f0d53394043b0df51f05", "score": "0.58834475", "text": "def _norm(input, is_training, norm='instance'):\n if norm == 'instance':\n return _instance_norm(input)\n elif norm == 'batch':\n return _batch_norm(input, is_training)\n else:\n return input", "title": "" }, { "docid": "58b00025f5c1e520f7ea9aa518cf3227", "score": "0.5855901", "text": "def unnormalize(tensor, mean, std):\n for t, m, s in zip(tensor, mean, std):\n t.mul_(s).add_(m)\n return tensor", "title": "" }, { "docid": "3a1a8020770448f159c2d931070315e1", "score": "0.5853549", "text": "def split_normalize(X, y, random_state):\n\n X_train, X_test, y_train, y_test = train_test_split(X, y, train_size=0.7, random_state=random_state)\n\n\n sc = StandardScaler() # normalize data\n sc.fit(X_train, y_train)\n\n X_train, X_test = sc.transform(X_train), sc.transform(X_test)\n\n return X_train, y_train, X_test, y_test", "title": "" }, { "docid": "b33b2bce9661f2c5153da2076b2e750c", "score": "0.58508795", "text": "def __normalization(norm_type, n_out, dim):\n if dim == 1:\n return __norm1d(norm_type, n_out)\n elif dim == 2:\n return __norm2d(norm_type, n_out)\n else:\n return NotImplementedError(f\"norm generator for dim={dim} is not implemented\")", "title": "" }, { "docid": "62573c7ef431c313555951787bb82d8e", "score": "0.585019", "text": "def batch_non_norm_fwdprop(self,*argv):\n self.norm_score = self.unnorm_score", "title": "" }, { "docid": "a57b029dd88a297cd910d374dfd6c5f1", "score": "0.5840954", "text": "def contrib_layer_norm(inputs,\n center=True,\n scale=True,\n activation_fn=None,\n reuse=None,\n variables_collections=None,\n outputs_collections=None,\n trainable=True,\n begin_norm_axis=1,\n begin_params_axis=-1,\n scope=None):\n from tensorflow.contrib.framework.python.ops import variables\n from tensorflow.contrib.layers.python.layers import utils\n from tensorflow.python.framework import ops\n from tensorflow.python.ops import init_ops\n from tensorflow.python.ops import nn\n from tensorflow.python.ops import variable_scope\n with variable_scope.variable_scope(\n scope, 'layer_norm', [inputs], reuse=reuse) as sc:\n inputs = ops.convert_to_tensor(inputs)\n #graph_utils.add_dict_to_collection({\"inputs1\": inputs}, \"SAVE_TENSOR\")\n inputs_shape = inputs.shape\n inputs_rank = inputs_shape.ndims\n if inputs_rank is None:\n raise ValueError('Inputs %s has undefined rank.' % inputs.name)\n dtype = inputs.dtype.base_dtype\n if begin_norm_axis < 0:\n begin_norm_axis = inputs_rank + begin_norm_axis\n if begin_params_axis >= inputs_rank or begin_norm_axis >= inputs_rank:\n raise ValueError('begin_params_axis (%d) and begin_norm_axis (%d) '\n 'must be < rank(inputs) (%d)' %\n (begin_params_axis, begin_norm_axis, inputs_rank))\n params_shape = inputs_shape[begin_params_axis:]\n if not params_shape.is_fully_defined():\n raise ValueError(\n 'Inputs %s: shape(inputs)[%s:] is not fully defined: %s' %\n (inputs.name, begin_params_axis, inputs_shape))\n # Allocate parameters for the beta and gamma of the normalization.\n beta, gamma = None, None\n if center:\n beta_collections = utils.get_variable_collections(variables_collections,\n 'beta')\n beta = variables.model_variable(\n 'beta',\n shape=params_shape,\n dtype=dtype,\n initializer=init_ops.zeros_initializer(),\n collections=beta_collections,\n trainable=trainable)\n if scale:\n gamma_collections = utils.get_variable_collections(\n variables_collections, 'gamma')\n gamma = variables.model_variable(\n 'gamma',\n shape=params_shape,\n dtype=dtype,\n initializer=init_ops.ones_initializer(),\n collections=gamma_collections,\n trainable=trainable)\n \n gamma = tf.cast(gamma, tf.float32)\n beta = tf.cast(beta, tf.float32)\n inputs = tf.cast(inputs, tf.float32)\n\n graph_utils.add_dict_to_collection({tf.get_variable_scope().name + \"_input\": inputs}, \"SAVE_TENSOR\")\n graph_utils.add_dict_to_collection({tf.get_variable_scope().name + \"_gamma\": gamma}, \"SAVE_TENSOR\")\n graph_utils.add_dict_to_collection({tf.get_variable_scope().name + \"_beta\": beta}, \"SAVE_TENSOR\")\n\n # Calculate the moments on the last axis (layer activations).\n norm_axes = list(range(begin_norm_axis, inputs_rank))\n mean, variance = nn.moments(inputs, norm_axes, keep_dims=True)\n\n graph_utils.add_dict_to_collection({tf.get_variable_scope().name + \"_mean\": mean}, \"SAVE_TENSOR\")\n graph_utils.add_dict_to_collection({tf.get_variable_scope().name + \"_variance\": variance}, \"SAVE_TENSOR\")\n\n #graph_utils.add_dict_to_collection({\"mean\": mean}, \"SAVE_TENSOR\")\n #graph_utils.add_dict_to_collection({\"variance\": variance}, \"SAVE_TENSOR\")\n # Compute layer normalization using the batch_normalization function.\n variance_epsilon = 1e-6 #if dtype != tf.float16 else 1e-3\n\n #gamma = tf.reshape(gamma, [1, 1, inputs_shape[-1]])\n #beta = tf.reshape(beta, [1, 1, inputs_shape[-1]])\n \n outputs = nn.batch_normalization(\n inputs,\n mean,\n variance,\n offset=beta,\n scale=gamma,\n variance_epsilon=variance_epsilon,\n name=\"batchnorm_nofusion\")\n \n outputs = tf.cast(outputs, dtype)\n\n graph_utils.add_dict_to_collection({tf.get_variable_scope().name + \"_output\": outputs}, \"SAVE_TENSOR\")\n\n outputs.set_shape(inputs_shape)\n if activation_fn is not None:\n outputs = activation_fn(outputs)\n return utils.collect_named_outputs(outputs_collections, sc.name, outputs)", "title": "" }, { "docid": "b2ad783dbe5f480e34f68ff409510530", "score": "0.58232313", "text": "def get_batch_normalization_updates(training_graph, allow_duplicates=False):\n from toolz import isdistinct\n from functools import partial\n from blocks.roles import OUTPUT\n from blocks.filter import VariableFilter, get_application_call\n var_filter = VariableFilter(bricks=[BatchNormalization], roles=[OUTPUT])\n all_app_calls = map(get_application_call, var_filter(training_graph))\n train_app_calls = _training_mode_application_calls(all_app_calls)\n if len(train_app_calls) == 0:\n raise ValueError(\"no training mode BatchNormalization \"\n \"applications found in graph\")\n bricks = [c.application.brick for c in train_app_calls]\n\n if not allow_duplicates and not isdistinct(bricks):\n raise ValueError('multiple applications of the same '\n 'BatchNormalization brick; pass allow_duplicates '\n '= True to override this check')\n\n def extract_pair(brick_attribute, metadata_key, app_call):\n return (getattr(app_call.application.brick, brick_attribute),\n app_call.metadata[metadata_key])\n\n mean_pair = partial(extract_pair, 'population_mean', 'offset')\n stdev_pair = partial(extract_pair, 'population_stdev', 'divisor')\n return sum([[mean_pair(a), stdev_pair(a)] for a in train_app_calls], [])", "title": "" }, { "docid": "a24e3155fdeb3b11377ccffac5f26249", "score": "0.58120847", "text": "def image_normalizer(img, params, type='enhance_contrast'):\n if type == 'enhance_contrast': # Enhance contrast of entire image\n # The Sentinel-2 data has 15 significant bits, but normally maxes out between 10000-20000.\n # Here we clip and normalize to value between 0 and 1\n img_norm = np.clip(img, 0, params.norm_threshold)\n img_norm = img_norm / params.norm_threshold\n\n elif type == 'running_normalization': # Normalize each band of each incoming image based on that image\n # Based on stretch_n function found at https://www.kaggle.com/drn01z3/end-to-end-baseline-with-u-net-keras\n min_value = 0\n max_value = 1\n\n lower_percent = 0.02 # Used to discard lower bound outliers\n higher_percent = 0.98 # Used to discard upper bound outliers\n\n bands = img.shape[2]\n img_norm = np.zeros_like(img)\n\n for i in range(bands):\n c = np.percentile(img[:, :, i], lower_percent)\n d = np.percentile(img[:, :, i], higher_percent)\n t = min_value + (img[:, :, i] - c) * (max_value - min_value) / (d - c)\n t[t < min_value] = min_value\n t[t > max_value] = max_value\n img_norm[:, :, i] = t\n\n elif type == 'landsat8_biome_normalization': # Normalize each band of each incoming image based on Landsat8 Biome\n # Standard deviations used for standardization\n std_devs = 4\n\n # Normalizes to zero mean and half standard deviation (find values in 'jhj_InspectLandsat8Data' notebook)\n img_norm = np.zeros_like(img)\n for i, b in enumerate(params.bands):\n if b == 1:\n img_norm[:, :, i] = (img[:, :, i] - 4654) / (std_devs * 1370)\n elif b == 2:\n img_norm[:, :, i] = (img[:, :, i] - 4435) / (std_devs * 1414)\n elif b == 3:\n img_norm[:, :, i] = (img[:, :, i] - 4013) / (std_devs * 1385)\n elif b == 4:\n img_norm[:, :, i] = (img[:, :, i] - 4112) / (std_devs * 1488)\n elif b == 5:\n img_norm[:, :, i] = (img[:, :, i] - 4776) / (std_devs * 1522)\n elif b == 6:\n img_norm[:, :, i] = (img[:, :, i] - 2371) / (std_devs * 998)\n elif b == 7:\n img_norm[:, :, i] = (img[:, :, i] - 1906) / (std_devs * 821)\n elif b == 8:\n img_norm[:, :, i] = (img[:, :, i] - 18253) / (std_devs * 4975)\n elif b == 9:\n img_norm[:, :, i] = (img[:, :, i] - 380) / (std_devs * 292)\n elif b == 10:\n img_norm[:, :, i] = (img[:, :, i] - 19090) / (std_devs * 2561)\n elif b == 11:\n img_norm[:, :, i] = (img[:, :, i] - 17607) / (std_devs * 2119)\n\n return img_norm", "title": "" }, { "docid": "aaf232861c1f5e98d9ab463a64b079f1", "score": "0.5805777", "text": "def normalizationlayer(x,is_train,norm_type = None,scope = None):\n with tf.name_scope(scope + norm_type): #with的意思就是接下来的操作都做一遍\n if norm_type == None:\n output = x\n elif norm_type == 'batch':\n output = tf.contrib.layers.batch_norm(x,center = True,scale = True,is_training = is_train)\n #center --> 如果True,有beta偏移量,反之亦然\n #scale --> 如果为True,则乘以gama,反之亦然;当下一层是线性时,由于缩放可以下一层完成,所以可以禁用,即center和scale都设为False\n #is_training --> 图层是否处于训练模式。在训练模式下,它将累积转入的统计量moving_mean和moving_variance,使用给定的指数移动平均值delay。\n #当它不是训练模式时,那么它将使用的数值moving_mean和moving_variance\n #训练时,需要更新moving_mean和moving_variance(均值和方差)\n return output", "title": "" }, { "docid": "aaf232861c1f5e98d9ab463a64b079f1", "score": "0.5805777", "text": "def normalizationlayer(x,is_train,norm_type = None,scope = None):\n with tf.name_scope(scope + norm_type): #with的意思就是接下来的操作都做一遍\n if norm_type == None:\n output = x\n elif norm_type == 'batch':\n output = tf.contrib.layers.batch_norm(x,center = True,scale = True,is_training = is_train)\n #center --> 如果True,有beta偏移量,反之亦然\n #scale --> 如果为True,则乘以gama,反之亦然;当下一层是线性时,由于缩放可以下一层完成,所以可以禁用,即center和scale都设为False\n #is_training --> 图层是否处于训练模式。在训练模式下,它将累积转入的统计量moving_mean和moving_variance,使用给定的指数移动平均值delay。\n #当它不是训练模式时,那么它将使用的数值moving_mean和moving_variance\n #训练时,需要更新moving_mean和moving_variance(均值和方差)\n return output", "title": "" } ]
184ed9fcd6e5ef4e35599ae0f89801a1
Split a list into a list containing multiple lists of size
[ { "docid": "0c68170fe1f6aa41880bd752a7ad06b8", "score": "0.0", "text": "def thread_group(total_items, size):\n per_thread = round(len(total_items) / int(size))\n groups = []\n group = []\n\n for item in total_items:\n group.append(item)\n if len(group) >= per_thread:\n groups.append(group)\n group = []\n \n if len(group) > 0: \n groups.append(group)\n\n return groups", "title": "" } ]
[ { "docid": "3ba0007ac8998de32b42301187625b06", "score": "0.7920258", "text": "def get_chunks(input_list, num_chunks):\n\n chunked = np.array_split(input_list, num_chunks)\n return [x.tolist() for x in chunked]", "title": "" }, { "docid": "88c3da6b0e968a4c0f0d53cd0f8dedf7", "score": "0.7864793", "text": "def chunkify(list_, num_chunks):\n chunk_size = int(math.ceil(len(list_) / num_chunks))\n return [list_[i : i + chunk_size] for i in range(0, len(list_), chunk_size)]", "title": "" }, { "docid": "941040f93c042ccd2b79ed9274ba987e", "score": "0.76747143", "text": "def split_into_chunks(alist: List, nchunks: int) -> List[List]:\n\n def chunks(alist, nchunks):\n d, r = divmod(len(alist), nchunks)\n for i in range(nchunks):\n si = (d + 1) * (i if i < r else r) + d * (0 if i < r else i - r)\n ei = si + (d + 1 if i < r else d)\n yield alist[si:ei]\n\n return list(chunks(alist, nchunks))", "title": "" }, { "docid": "87e9cdf7476ea3e2a52e595bd0069683", "score": "0.7638533", "text": "def split_list(alist, wanted_parts=1):\n\n length = len(alist)\n return [ alist[i*length // wanted_parts: (i+1)*length // wanted_parts]\n for i in range(wanted_parts) ]", "title": "" }, { "docid": "3b9c8c7a249fabe97a152f59de898b05", "score": "0.7580311", "text": "async def split_list(arr, size=100):\n\n arrs = []\n\n while len(arr) > size:\n pice = arr[:size]\n arrs.append(pice)\n arr = arr[size:]\n\n arrs.append(arr)\n return arrs", "title": "" }, { "docid": "431e2c5f0908b3e7d31301374caffee7", "score": "0.75785124", "text": "def split_list(alist: List[any], chunkSize: int) -> Iterator[any]:\n n = max(1, chunkSize)\n return (alist[i:i+n] for i in range(0, len(alist), n))", "title": "" }, { "docid": "f3b3c85e6e2cf17c1a23305e841f1830", "score": "0.7551916", "text": "def _create_chunks_from_list(lst, n):\n chunks = []\n for i in range(0, len(lst), n):\n chunks.append(lst[i : i + n])\n return chunks", "title": "" }, { "docid": "dc524079813a6f216b68a6b7b6654f7c", "score": "0.75238013", "text": "def break_list_to_sub_list(self,full_list, chunk_size = 45):\n if chunk_size < 1:\n chunk_size = 1\n return [full_list[i:i + chunk_size] for i in range(0, len(full_list), chunk_size)]", "title": "" }, { "docid": "c0f59a32b152ba4d35e090ced6824d4a", "score": "0.7493115", "text": "def split_list_into_nested_chunks(lst: List,\n elements_in_first_chunk: int,\n elements_in_chunk: int,\n max_elements_in_sub_chunk: int) -> List[List[List]]:\n if elements_in_chunk == 0 or max_elements_in_sub_chunk == 0:\n raise ValueError(\"There can not be zero elements in a chunk or subchunk\")\n\n elements_left = len(lst)\n chunk_sizes = [distribute_into_chunks(min(elements_left, elements_in_first_chunk), max_elements_in_sub_chunk)]\n elements_left -= elements_in_first_chunk\n next_chunk_length = min(elements_left, elements_in_chunk)\n while next_chunk_length > 0:\n chunk_sizes.append(distribute_into_chunks(next_chunk_length, max_elements_in_sub_chunk))\n elements_left -= next_chunk_length\n next_chunk_length = min(elements_left, elements_in_chunk)\n\n chunks = []\n current_start = 0\n for subchunk_sizes in chunk_sizes:\n subchunks = []\n for i in subchunk_sizes:\n subchunks.append(lst[current_start:current_start+i])\n current_start += i\n chunks.append(subchunks)\n\n if not (len(chunks[0]) > 0 or len(chunks) == 1):\n raise AssertionError()\n\n return chunks", "title": "" }, { "docid": "c15114a4a41324780e64a4f855be96fd", "score": "0.74778986", "text": "def chunked_list(lst, size):\n # Quick 'short-circuit' for a list less than size\n if len(lst) < size:\n return [lst]\n\n n_chunks = ceil(len(lst) / size)\n\n # We're returning a list containing lots of sublists\n # rather than yielding items as a generator\n # This is because we use a tqdm progress bar around this\n # function, and that needs to know the number of sublists\n # to be able to show a proper progress bar\n result = []\n\n for i in range(n_chunks):\n result.append(lst[i * size : (i + 1) * size])\n\n return result", "title": "" }, { "docid": "3b0ec1ba1aa910b6028d5dc8073130b0", "score": "0.74365264", "text": "def chunks(li, size):\n chunks = [li[i:i+size] for i in range(0, len(li), size)]\n\n return chunks", "title": "" }, { "docid": "57e072a381c13096cba3b990547e93d7", "score": "0.74201685", "text": "def split_chunks(l, chunk_sizes):\n chunks = []\n i = 0\n for s in chunk_sizes:\n chunks.append(l[i:i+s])\n i += s\n\n return chunks", "title": "" }, { "docid": "921e8a595c840f679ab80a495db57b0a", "score": "0.7414099", "text": "def __chunks(cutting_list: list, chunk_size: int) -> list:\n for i in range(0, len(cutting_list), chunk_size):\n yield cutting_list[i:i + chunk_size]", "title": "" }, { "docid": "305b6a1b6790090d548fec13377290c7", "score": "0.7412102", "text": "def split_list(a_list: List[Any], segment_size: int):\n for i in range(0, len(a_list), segment_size):\n yield a_list[i : i + segment_size]", "title": "" }, { "docid": "e1b8b29f0676c3a3ddc546bc2ba6f6c5", "score": "0.73750186", "text": "def chunks(lst, n):\n return np.array_split(lst, len(lst) / n)", "title": "" }, { "docid": "758d733b728cf6d802af5b15611e10df", "score": "0.73636377", "text": "def split(it, size = 2):\n return [it[i:i + size] for i in xrange(0, len(it), size)]", "title": "" }, { "docid": "2af22a7a3e7ec45537fd7f611dfbfa6f", "score": "0.736333", "text": "def chunks(l: list, n: int) -> list:\n return [l[i:i + n] for i in range(0, len(l), n)]", "title": "" }, { "docid": "2af22a7a3e7ec45537fd7f611dfbfa6f", "score": "0.736333", "text": "def chunks(l: list, n: int) -> list:\n return [l[i:i + n] for i in range(0, len(l), n)]", "title": "" }, { "docid": "a1c1e8a2adb51097c86da5b534e4b23b", "score": "0.7339924", "text": "def split_list(list_: list, n: int) -> list:\n #pylint: disable=invalid-name\n\n k, m = divmod(len(list_), n)\n return [\n list_[i * k + min(i, m):(i + 1) * k + min(i + 1, m)]\n for i in range(n)\n ]", "title": "" }, { "docid": "446afaded1c5b2502d67078dde89d68e", "score": "0.7337117", "text": "def split_in_n_lists(lst, n):\n if n <= 0: # must never happen\n sys.stderr.write(\"Error! Method split_in_n_lists called with n == 0\\n\")\n sys.exit(1)\n lst_len = len(lst)\n if n >= lst_len:\n # pigeonhole principle in work\n return [[x, ] for x in lst]\n ret = [] # list of lists\n sublist_len = lst_len / float(n)\n last = 0.0\n while last < len(lst):\n sublist = lst[int(last): int(last + sublist_len)]\n ret.append(sublist)\n last += sublist_len\n return ret", "title": "" }, { "docid": "825bd15a08b26c8613e0022ac6c740cd", "score": "0.7324186", "text": "def chunks(list_, size):\n for i in range(0, len(list_), size):\n yield list_[i:i + size]", "title": "" }, { "docid": "963d1660f2a21e378f3c268ac18b2ec5", "score": "0.7319891", "text": "def _split_list(self, iterable, n, fillvalue=None):\n args = [iter(iterable)] * n\n return list(itertools.izip_longest(*args, fillvalue=fillvalue))", "title": "" }, { "docid": "908191c37b00e21010f907dfc8cb0594", "score": "0.731868", "text": "def _list_to_chunks(self, lst, chunk_length):\n for i in xrange(0, len(lst), chunk_length):\n yield lst[i:i + chunk_length]", "title": "" }, { "docid": "cabcab50addaffd4ea22b029a349f247", "score": "0.7249488", "text": "def list_in_chunks(orig_list, chunk_dim):\n return [orig_list[i:i + chunk_dim] for i in range(0, len(orig_list), chunk_dim)]", "title": "" }, { "docid": "0f0d5260150eea730761c9ecc1fb6df1", "score": "0.7220823", "text": "def split_list(a, n):\n part_len = len(a) / n\n parts = []\n for i in range(n):\n start_ind = i * part_len\n end_ind = (i + 1) * part_len\n if i == n - 1:\n parts.append(a[start_ind:])\n else:\n parts.append(a[start_ind:end_ind])\n return parts", "title": "" }, { "docid": "50c7dc082c2f906f1392a016b060cd6e", "score": "0.71938175", "text": "def split_list(a, n):\n if IS_PY3:\n xrange = range\n k, m = divmod(len(a), n)\n return (a[i * k + min(i, m):(i + 1) * k + min(i + 1, m)] for i in xrange(n))", "title": "" }, { "docid": "72bafd99331ce7a07bc13c230bdcc6e9", "score": "0.71744436", "text": "def chunks(elements, chunk_size) -> []:\n chunk_size = max(1, chunk_size)\n return (elements[i:i + chunk_size] for i in range(0, len(elements), chunk_size))", "title": "" }, { "docid": "7c3177ce9e4af43a9ad0b8cafc61acfb", "score": "0.71392304", "text": "def split_in_chunks(l,n):\n size = len(l) \n size_chunks = size/n\n residue = size%n\n splitted = []\n i = 0\n j = 0\n while n > 0:\n r = 0\n if residue != 0:\n r = 1\n residue = residue - 1\n j = i + (size_chunks + r)\n splitted.append(l[i:j])\n n -= 1\n i = j\n return splitted", "title": "" }, { "docid": "cf8d7609a99d664d055287a2051b04f4", "score": "0.71363294", "text": "def chunk(lst, n):\n output = list()\n for i in range(0, len(lst), n):\n sublst = lst[i: i+n]\n output.append(sublst)\n return output", "title": "" }, { "docid": "532e764507efb807cf94547d5bce747b", "score": "0.71287626", "text": "def get_chunks(input_lst, desired_size=20):\n for i in range(0, len(input_lst), desired_size):\n yield input_lst[i : i + desired_size] # noqa: E203", "title": "" }, { "docid": "ec0eeaddbd2e234ae031644c8b7161aa", "score": "0.7120271", "text": "def _split_into_segments(input_list):\n max_n = 100 # as of 23.02.2021\n\n # n cuts divide a list into n+1 segments (math.floor(len(song_uris) / max_n) =: number_of_cuts)\n n_of_segments = math.floor(len(input_list) / max_n) + 1\n list_of_lists = []\n\n # i is element of interval [0, n_of_segments) => i always < n_of_segments\n for i in range(n_of_segments):\n if i < n_of_segments - 1:\n list_of_lists.append(input_list[i * max_n: (i + 1) * max_n])\n\n # the last segment can contain < max_n songs\n else:\n list_of_lists.append(input_list[i * max_n: (i * max_n) + len(input_list) % max_n])\n\n return list_of_lists", "title": "" }, { "docid": "34c526ace35d33f5540db3a402f463cc", "score": "0.7081201", "text": "def batchList (\n originalList,\n size\n ):\n\n batches = []\n lenOriginal = len(originalList)\n i = 0\n j = 0\n while j < lenOriginal:\n i = j\n j = j + size\n batches.append (originalList[i:j])\n return batches", "title": "" }, { "docid": "4292a4208732ff873032782e1af17fcc", "score": "0.70676243", "text": "def divide_list_into_similar_length_lists(List):\n \n width = sum(len(x) for x in List)\n width = math.sqrt(width)\n\n stack = list(List)\n lists = []\n remaining = sum(map(len,stack))\n while remaining>width:\n new = stack.pop(0)\n size = len(new)\n line = [new]\n while size<width:\n new = stack.pop(0)\n size+=len(new)\n line+=[new]\n lists.append(line)\n remaining = sum(map(len,stack))\n if stack:\n lists.append(stack)\n\n return lists", "title": "" }, { "docid": "8b8617f3aeab755f70e5e9f84e0b5fa3", "score": "0.70667315", "text": "def split_on_chunks(lst, num):\n for i in range(0, len(lst), num):\n yield lst[i:i + num]", "title": "" }, { "docid": "b119510140939e5c0882e546cde412ef", "score": "0.7065722", "text": "def divide_into_chunks(l: List, n: int):\n for i in range(0, len(l), n):\n yield l[i : i + n]", "title": "" }, { "docid": "e669940d03e75358361deaa68ecec1ba", "score": "0.70634997", "text": "def partition(ls, size):\n return [ls[i:i+size] for i in range(0, len(ls), size)]", "title": "" }, { "docid": "d0b6ba07db1b1974efc6761db79b5c0a", "score": "0.7048526", "text": "def SplitList(data, sizes = []):\n assert(all([ s >= 0 for s in sizes ]))\n if len(sizes) == 0:\n return data\n if sum(sizes) < len(data):\n sizes = list(sizes)\n sizes.append(len(data) - sum(sizes))\n out = list()\n last = 0\n for s in sizes:\n out.append(data[last : last+s])\n last += s\n return out", "title": "" }, { "docid": "3e52b3e93ef439daabcc8c8ad6ea0ebf", "score": "0.70271647", "text": "def split_by_chunks(lst, n):\n for i in range(0, len(lst), n):\n yield lst[i : i + n]", "title": "" }, { "docid": "598b92df93ec2f557d5f5683c3d0c30c", "score": "0.70113766", "text": "def _split_into_chunks(lst, n):\n for i in range(0, len(lst), n):\n yield lst[i:i+n]", "title": "" }, { "docid": "ae4c2c6e1b359fc1294a129ecd5e261d", "score": "0.69919854", "text": "def chunk_list(ls, n):\n for i in range(0, n):\n yield ls[i::n]", "title": "" }, { "docid": "fe1a7f1642afda6032b087cd4d801005", "score": "0.69903475", "text": "def chunks(l: List[str], n: int) -> List[List[str]]:\n\n for i in range(0, len(l), n):\n yield l[i:i + n]", "title": "" }, { "docid": "0387ef9cb495b6daf821b0bbce43f571", "score": "0.69367313", "text": "def chunks(l, n):\n return [l[i:i + n] for i in range(0, len(l), n)]", "title": "" }, { "docid": "0387ef9cb495b6daf821b0bbce43f571", "score": "0.69367313", "text": "def chunks(l, n):\n return [l[i:i + n] for i in range(0, len(l), n)]", "title": "" }, { "docid": "237e6363373dd10438431c9ad69a6f8b", "score": "0.6934385", "text": "def _chunks(self, lst: List, n: int) -> List:\n for i in range(0, len(lst), n):\n yield lst[i:i + n]", "title": "" }, { "docid": "237e6363373dd10438431c9ad69a6f8b", "score": "0.6934385", "text": "def _chunks(self, lst: List, n: int) -> List:\n for i in range(0, len(lst), n):\n yield lst[i:i + n]", "title": "" }, { "docid": "f3d97f80c6808be40599b88220eba5bb", "score": "0.6921365", "text": "def list_divider(list_, chunks):\n\n return [list_[i:i + chunks] for i in range(0, len(list_), chunks)]", "title": "" }, { "docid": "b5d7354acaab3956d0a454bfcc59304b", "score": "0.6909167", "text": "def chunks(l, n):\n out = []\n newn = int(len(l) / n)\n for i in xrange(0, n-1):\n out.append(l[i*newn:i*newn+newn])\n \n out.append(l[n*newn-newn:])\n \n return out", "title": "" }, { "docid": "d4a50d60eae0300a9bd177959ee0d98e", "score": "0.689241", "text": "def make_sublist(split_list):\n split_list = make_list(split_list)\n # adgerdir\n new_list = []\n tomurListi = []\n \n \n for i in range(len(split_list)):\n for j in range(len(split_list-1)):\n new_lis.append(split_list[i:j])\n sub_lists = sorted(new_list)\n return sub_lists", "title": "" }, { "docid": "f2932914926c0a9d41f96be983b22e0a", "score": "0.68511", "text": "def split_into(iterable, sizes):\n # convert the iterable argument into an iterator so its contents can\n # be consumed by islice in case it is a generator\n it = iter(iterable)\n\n for size in sizes:\n if size is None:\n yield list(it)\n return\n else:\n yield list(islice(it, size))", "title": "" }, { "docid": "562353dacb7e8b31edbf76ea055fb5b8", "score": "0.6847553", "text": "def splitSequence(iterable, size):\n it = iter(iterable)\n item = list(itertools.islice(it, size))\n while item:\n yield item\n item = list(itertools.islice(it, size))", "title": "" }, { "docid": "f8470250dbdaf4f750ad7cb236393934", "score": "0.6844929", "text": "def chunks(lst, n):\r\n for i in range(0, len(lst), n):\r\n yield list(lst[i:i + n])", "title": "" }, { "docid": "f170ab9bd97899d4b18a76a56b5c3c91", "score": "0.683193", "text": "def chunker(seq: Iterable, size: int) -> List[Iterable]:\n return [seq[pos : pos + size] for pos in range(0, len(seq), size)]", "title": "" }, { "docid": "8a7907074fcad72f487bb33f32c3a719", "score": "0.6830311", "text": "def chunks(the_list, size):\n for i in range(0, len(the_list), size):\n yield the_list[i:i + size]", "title": "" }, { "docid": "37be5f47cbc24168722a944493ed20e0", "score": "0.682242", "text": "def chunkify(items, chunk_len):\n return [items[i:i+chunk_len] for i in range(0, len(items), chunk_len)]", "title": "" }, { "docid": "6ee0950b1c363a4e5f8a4d6e1564d2f5", "score": "0.68212676", "text": "def partition_list(inlist, blocksize):\n # Make sure we are dealing with integers:\n blocksize = int(round(blocksize, 0))\n return [inlist[i:i + blocksize] for i in range(0, len(inlist), blocksize)]", "title": "" }, { "docid": "ead65bd928bdf8843b8275a8ebf03a6f", "score": "0.6819066", "text": "def chunks(l, n):\n chunks_list = []\n for i in range(0, len(l), n):\n chunks_list.append(l[i:i + n])\n return chunks_list", "title": "" }, { "docid": "ee2bdf5930779e3cdc2fe524b1dd8062", "score": "0.68183523", "text": "def split_to_chunks(l, n):\n for i in xrange(0, len(l), n):\n yield l[i:i+n]", "title": "" }, { "docid": "bae480b266168fe859775569f388ea3a", "score": "0.6815947", "text": "def partition(elements: List[T], group_size: int) -> Iterable[List[T]]:\n for i in range(0, len(elements) + 1 - group_size):\n yield elements[i:(i + group_size)]", "title": "" }, { "docid": "cda94c37da20aa1567d399a73308feb6", "score": "0.6807064", "text": "def packed_to_list(x: torch.Tensor, split_size: Union[list, int]):\n # pyre-fixme[16]: `Tensor` has no attribute `split`.\n return x.split(split_size, dim=0)", "title": "" }, { "docid": "025a63515a5b348e76e0599da82515c9", "score": "0.6790569", "text": "def chunk(l, n):\n if len(l) == 1:\n return [l]\n if n == 0:\n n = 1\n return [l[x:x+n] for x in range(0, len(l), n)]", "title": "" }, { "docid": "73d34127b610b511def6d8caccc73e8c", "score": "0.67766833", "text": "def chunkLists(userList, chunkSize):\n chunkList = []\n\n # x goes from start of list to end in groups of chunkSize\n for x in range(0, len(userList), chunkSize):\n # Eg. If chunkSize = 6:\n # first loop: start = 0 and end = 6\n # second loop: start = 6 and end = 12\n # third loop: start = 12 end = 18 etc.\n chunk=userList[x:x+chunkSize]\n chunkList.append(chunk)\n return chunkList", "title": "" }, { "docid": "d82c60114dad270f6bf2d4d9d9dfc796", "score": "0.6772949", "text": "def split(self, split_sizes: list):\n if not all(isinstance(x, int) for x in split_sizes):\n raise ValueError('Value of split_sizes must be a list of integers.')\n meshlist = []\n curi = 0\n for i in split_sizes:\n meshlist.append(self[curi:curi + i])\n curi += i\n return meshlist", "title": "" }, { "docid": "f36d62641ff115426523bd6c5c9c4d2e", "score": "0.67723674", "text": "def splitblocks(lst, limit):\r\n res = []\r\n start = 0\r\n while start < len(lst):\r\n res.append(lst[start:start + limit])\r\n start += limit\r\n return res", "title": "" }, { "docid": "4132681d46cca8852254b555ac296746", "score": "0.67585164", "text": "def get_chunks(lst, n):\n for i in range(0, len(lst), n):\n yield lst[i:i + n]", "title": "" }, { "docid": "42ff1a7b3cf3804a66b794b2118a07e3", "score": "0.6748555", "text": "def group(lst, n):\n return zip(*[lst[i::n] for i in range(n)])", "title": "" }, { "docid": "42ff1a7b3cf3804a66b794b2118a07e3", "score": "0.6748555", "text": "def group(lst, n):\n return zip(*[lst[i::n] for i in range(n)])", "title": "" }, { "docid": "a3a2ef2e8d3eb759abe3c4313ef29835", "score": "0.67334723", "text": "def padded_to_list(x: torch.Tensor, split_size: Union[Sequence[int], Sequence[Sequence[int]], None]=None):\n x_list = list(x.unbind(0))\n if split_size is None:\n return x_list\n N = len(split_size)\n if x.shape[0] != N:\n raise ValueError('Split size must be of same length as inputs first dimension')\n for i in range(N):\n if isinstance(split_size[i], int):\n x_list[i] = x_list[i][:split_size[i]]\n else:\n slices = tuple(slice(0, s) for s in split_size[i])\n x_list[i] = x_list[i][slices]\n return x_list", "title": "" }, { "docid": "a0dccfd760a6c56309d995a6a8860ca6", "score": "0.6732317", "text": "def chunk_list(l, n):\n for i in range(0, len(l), n):\n yield l[i:i + n]", "title": "" }, { "docid": "53da543bbf5464c970de2e5575d432c2", "score": "0.6731187", "text": "def split(data, chunk_size):\n return (data[i:i + chunk_size] for i in range(0, len(data), chunk_size))", "title": "" }, { "docid": "7d182aaf7ffbdb02e30466e28bbdd5b7", "score": "0.67275196", "text": "def divide_list(big_list, num_groups):\n seed = 10\n random.seed(seed)\n random.shuffle(big_list)\n return [big_list[i::num_groups] for i in xrange(num_groups)]", "title": "" }, { "docid": "2ff921224598460506f0c1ea17e60cdb", "score": "0.67172474", "text": "def split_into_groups(groupSize, list_in):\n # Generator\n return (list_in[i:i + groupSize] for i in range(0, len(list_in), groupSize))\n\n # List\n # return [list_in[i:i + groupSize] for i in range(0, len(list_in), groupSize)]\n\n # Set\n # return {list_in[i:i + groupSize] for i in range(0, len(list_in), groupSize)}\n\n # Dico\n # return {list_in[i:i + groupSize]: i for i in range(0, len(list_in), groupSize)}", "title": "" }, { "docid": "2615c5d7a19b4f6a95efc3d6a1bb807e", "score": "0.6708067", "text": "def chunks(lis, chunk_size):\n for pos in range(0, len(lis), chunk_size):\n yield lis[pos:pos+chunk_size]", "title": "" }, { "docid": "bb1265b8b803308d1cd4c7ac64ce6267", "score": "0.6707903", "text": "def chunk(input_list: list, chunk_size: int):\n for i in range(0, len(input_list), chunk_size):\n yield input_list[i:i + chunk_size]", "title": "" }, { "docid": "f2e9305891d5ead9a0699d77bdb6925b", "score": "0.6695087", "text": "def divide_list(full_list, N):\n partitioned_list = list()\n if(len(full_list) % N == 0):\n one_slice = len(full_list) / N\n for i in range(0, len(full_list), one_slice):\n partitioned_list.append(full_list[i : i + one_slice])\n else:\n one_slice = (len(full_list) - (len(full_list) % (N - 1))) / (N - 1)\n loop_last = (len(full_list) - (len(full_list) % (N - 1)))\n for i in range(0, loop_last, one_slice):\n partitioned_list.append(full_list[i : i + one_slice])\n partitioned_list.append(full_list[loop_last : len(full_list)])\n return partitioned_list", "title": "" }, { "docid": "7a86031a20a12e01458969845588ac3d", "score": "0.6690608", "text": "def helper_chunk_list(l, n):\n for i in xrange(0, len(l), n):\n yield l[i:i+n]", "title": "" }, { "docid": "54d9d758ea9a24a125ff99df2fa09636", "score": "0.6682217", "text": "def chunks(input_list: List[Any], num: int) -> Iterable[Any]:\n for i in range(0, len(input_list), num):\n yield input_list[i : i + num]", "title": "" }, { "docid": "32de5430f00cb279ae39aeadb086c56d", "score": "0.6682181", "text": "def split(img_list, job_size=96):\n df_img = _well_site_table(img_list)\n grouped_list = _group_images(df_img)\n return [chunk for chunk in chunks(grouped_list, job_size)]", "title": "" }, { "docid": "32de5430f00cb279ae39aeadb086c56d", "score": "0.6682181", "text": "def split(img_list, job_size=96):\n df_img = _well_site_table(img_list)\n grouped_list = _group_images(df_img)\n return [chunk for chunk in chunks(grouped_list, job_size)]", "title": "" }, { "docid": "55203efd8eead5c0c14557cec266a6b1", "score": "0.6678547", "text": "def _chunks(lst, n):\n for i in range(0, len(lst), n):\n yield lst[i:i + n]", "title": "" }, { "docid": "d8d70980ca23f53bed8cdd6914b35f7c", "score": "0.6675725", "text": "def list_chunk_gen(lst, size=1000):\n n = max(1, size)\n return (lst[k:k+n] for k in range(0, len(lst), n))", "title": "" }, { "docid": "c853dd3e460098a9fa24b2d93a57f78b", "score": "0.66728485", "text": "def chunks(size: int, iterable: Iterable):\n if size == 0: return list(iterable)\n elif size > ilen(iterable): raise ValueError\n ls = list(iterable)\n return [ls[i:i + size] for i in range(0, ilen(iterable), size)]", "title": "" }, { "docid": "f42135a79229f3cf26b80479a8456fe7", "score": "0.66720825", "text": "def _chunks(lst, n):\n for i in range(0, len(lst), n):\n yield lst[i : i + n]", "title": "" }, { "docid": "f42135a79229f3cf26b80479a8456fe7", "score": "0.66720825", "text": "def _chunks(lst, n):\n for i in range(0, len(lst), n):\n yield lst[i : i + n]", "title": "" }, { "docid": "d8807915b3b4470e2b1f01cfc02316e7", "score": "0.6668224", "text": "def chunk(l, n):\n if n == 0: return []\n for i in range(0, len(l), n):\n yield l[i:i + n]", "title": "" }, { "docid": "2b263a5d2087b55c4263c4473983d60b", "score": "0.6654714", "text": "def chunks(lst, n):\n try:\n length = len(lst)\n except TypeError:\n length = lst.shape[0]\n for i in range(0, length, n):\n yield lst[i:i + n]", "title": "" }, { "docid": "f7cfb12e68a41375b0f48bd6d254cba5", "score": "0.66498834", "text": "def group(lst, n):\n return zip(*[itertools.islice(lst, i, None, n) for i in range(n)])", "title": "" }, { "docid": "bd7a659654c8464f95bdc27c88ce5582", "score": "0.6643912", "text": "def chunks(l : list, n : int):\n for i in range(0, n):\n yield l[i::n]", "title": "" }, { "docid": "0b8d121379312946de211dfc9256af3e", "score": "0.6636952", "text": "def chunks(original_list, chunk_size):\n for i in range(0, len(original_list), chunk_size):\n yield original_list[i:i + chunk_size]", "title": "" }, { "docid": "ce5005020659d6634b25423b7c16209a", "score": "0.6611229", "text": "def chunks(list, number_of_chunks):\n for i in range(0, len(list), number_of_chunks):\n yield list[i : i + number_of_chunks]", "title": "" }, { "docid": "2663836da9ce6447a2b441a31e13541b", "score": "0.6595506", "text": "def split_list(self, l, n):\n\n for i in range(0, len(l), n):\n if len(l[i:i+n]) == n:\n yield l[i:i+n]", "title": "" }, { "docid": "5279cc121ca7007f50c48da44f3844a8", "score": "0.6595066", "text": "def split(linked_list):\n pass", "title": "" }, { "docid": "cc7eb564f607f235460d1206238b7b2e", "score": "0.6594369", "text": "def generate_chunks(input_list, chunk_size):\n for i in range(0, len(input_list), chunk_size):\n yield input_list[i:i + chunk_size]", "title": "" }, { "docid": "b4954b17668b1b00bdadd8523a461726", "score": "0.6591829", "text": "def split_list(l, k):\n n = len(l)\n\n d = n // k\n r = n % k\n\n offset = 0\n for i in range(k):\n if i < r:\n size = d + 1\n else:\n size = d\n\n yield l[offset:offset+size]\n offset += size", "title": "" }, { "docid": "8d70bd95bfd08df984d64ce687fe7e59", "score": "0.65867275", "text": "def split_list(sequence, lengths):\n if len(sequence) != sum(lengths):\n raise ValueError(\"Sub-list lengths don't sum to length of the full list.\")\n\n idx = 0\n\n for length in lengths:\n yield sequence[idx : idx + length]\n idx += length", "title": "" }, { "docid": "ea4df6ed9bd1bc1f77b71f3d4fe50ee4", "score": "0.65843266", "text": "def _group_list(self, mylist):\r\n return [list(v) for i, v in itertools.groupby(mylist, lambda x: x[:4])]", "title": "" }, { "docid": "3791b0a32f01fbc94698ae7977ebb07a", "score": "0.65817106", "text": "def splitMaxSized(l, batchMaxSize):\n batchCount = 1\n if batchMaxSize is not None and batchMaxSize > 0:\n batchCount = math.ceil(len(l) / batchMaxSize)\n return split(l, batchCount)", "title": "" }, { "docid": "ab37ea4522f48e0b938eecc65147da78", "score": "0.6564898", "text": "def rescale_list(input_list, size):\n #import pdb;pdb.set_trace()\n try:\n assert len(input_list) >= size\n except:\n return []\n\n # Get the number to skip between iterations.\n skip = len(input_list) // size\n\n # Build our new output.\n output = [input_list[i] for i in range(0, len(input_list), skip)]\n\n # Cut off the last one if needed.\n return output[:size]", "title": "" }, { "docid": "addaa0ac14ffb6992a3499621155cc50", "score": "0.6550218", "text": "def split(l, n):\n for i in range(0, len(l), n):\n yield l[i:i + n]", "title": "" }, { "docid": "75820bd8b3bb8576707a4745795cb213", "score": "0.6549265", "text": "def chunks(list_like, job_size):\n for i in range(0, len(list_like), job_size):\n yield list_like[i:i+job_size]", "title": "" }, { "docid": "75820bd8b3bb8576707a4745795cb213", "score": "0.6549265", "text": "def chunks(list_like, job_size):\n for i in range(0, len(list_like), job_size):\n yield list_like[i:i+job_size]", "title": "" }, { "docid": "fefbba3cd1d0f3d59a6bbf80ab6f6917", "score": "0.65486324", "text": "def split_in_batches(a_list, batch_size=500):\n for i in range(0, len(a_list), batch_size):\n yield a_list[i:i + batch_size]", "title": "" } ]
acb254396889db07f767e23ddd7235d3
Get LED strip brightness
[ { "docid": "db77cea0e3afdc2fa5b26d136885a699", "score": "0.68747914", "text": "def getBrightness(self) -> int:\n return self._brightness", "title": "" } ]
[ { "docid": "6d2a1e1f20aa611fb3ae4a3d6f103186", "score": "0.7632183", "text": "def brightness(self) -> int:\n pass", "title": "" }, { "docid": "944a8dc948b504cffde835cb0abc5795", "score": "0.7624959", "text": "def brightness(self):\n return libratbag.ratbag_led_get_brightness(self._led)", "title": "" }, { "docid": "4a497265e7cd9f1ff912a443c79c0b8b", "score": "0.75040007", "text": "def get_color_brightness(red, green, blue):\n return (red * 299 + green * 587 + blue * 114) / 1000", "title": "" }, { "docid": "3485ade88ea5a006908561de984785c0", "score": "0.73944825", "text": "def getBrightness(self) -> int:\n raise NotImplementedError", "title": "" }, { "docid": "9ef1edef1c0cc3d6c7456724a59c168f", "score": "0.7393049", "text": "def get_brightness(self):\n attrs = self.get_attrs()\n if attrs:\n return attrs[\"brightness\"]", "title": "" }, { "docid": "4ed448fa0dccbec898d1d524340a82bf", "score": "0.7319831", "text": "def brightness(self):\n return self._matrix.brightness", "title": "" }, { "docid": "76a9c99b037a49d125f33e2f8018551f", "score": "0.7317367", "text": "def brightness(self):\n return self.get_attr_int('brightness')", "title": "" }, { "docid": "45590733ba4723971b5f994a2d1c88da", "score": "0.7298433", "text": "def brightness(self) -> int:\n if self._mode == MODE_WHITE:\n return self.white_value\n return cast(int, self._bulb.brightness)", "title": "" }, { "docid": "7ae55ea7b0975c2e53d823979ea9c349", "score": "0.72874486", "text": "def brightness(self) -> int:\n return self._api.state", "title": "" }, { "docid": "135b19ca94cbd7e33578d4f767b59a34", "score": "0.7256109", "text": "def get_brightness(self):\n return self._brightness", "title": "" }, { "docid": "71d1737c2c45c73af7f97dbb44e2fe8c", "score": "0.7215527", "text": "def brightness(self):\n return self._brightness", "title": "" }, { "docid": "71d1737c2c45c73af7f97dbb44e2fe8c", "score": "0.7215527", "text": "def brightness(self):\n return self._brightness", "title": "" }, { "docid": "71d1737c2c45c73af7f97dbb44e2fe8c", "score": "0.7215527", "text": "def brightness(self):\n return self._brightness", "title": "" }, { "docid": "71d1737c2c45c73af7f97dbb44e2fe8c", "score": "0.7215527", "text": "def brightness(self):\n return self._brightness", "title": "" }, { "docid": "71d1737c2c45c73af7f97dbb44e2fe8c", "score": "0.7215527", "text": "def brightness(self):\n return self._brightness", "title": "" }, { "docid": "71d1737c2c45c73af7f97dbb44e2fe8c", "score": "0.7215527", "text": "def brightness(self):\n return self._brightness", "title": "" }, { "docid": "71d1737c2c45c73af7f97dbb44e2fe8c", "score": "0.7215527", "text": "def brightness(self):\n return self._brightness", "title": "" }, { "docid": "71d1737c2c45c73af7f97dbb44e2fe8c", "score": "0.7215527", "text": "def brightness(self):\n return self._brightness", "title": "" }, { "docid": "71d1737c2c45c73af7f97dbb44e2fe8c", "score": "0.7215527", "text": "def brightness(self):\n return self._brightness", "title": "" }, { "docid": "71d1737c2c45c73af7f97dbb44e2fe8c", "score": "0.7215527", "text": "def brightness(self):\n return self._brightness", "title": "" }, { "docid": "71d1737c2c45c73af7f97dbb44e2fe8c", "score": "0.7215527", "text": "def brightness(self):\n return self._brightness", "title": "" }, { "docid": "29811815f386388b0f904146b4369dae", "score": "0.7098214", "text": "def get_brightness(self):\n ret_val = self._get_brightness()\n return ret_val", "title": "" }, { "docid": "ff9e942d04f701d6a2fc87720911c47c", "score": "0.6998933", "text": "def brightness(self) -> int:\n return self._brightness", "title": "" }, { "docid": "cafd420551b95f59a531587cf0110120", "score": "0.69729394", "text": "def brightness():\n\treturn random.randint(5, 100)", "title": "" }, { "docid": "1087667cda18f558ef637fbaabcdb935", "score": "0.69464725", "text": "def brightness(self) -> float:\n return self._brightness", "title": "" }, { "docid": "0885044309e47c56d7afa674a5e1de76", "score": "0.6911251", "text": "def _calculate_brightness(color):\n return int(math.sqrt(color.red() ** 2 * .241 + \\\n color.green() ** 2 * .691 + \\\n color.blue() ** 2 * .068))", "title": "" }, { "docid": "c05e955a41478f8b070cd4792f82de98", "score": "0.6903321", "text": "def brightness(self):\n brightness = 0\n for state in self._light_states():\n if not 'brightness' in state.attributes:\n return None\n brightness += state.attributes.get('brightness')\n brightness = brightness / float(len(self._entity_ids))\n return brightness", "title": "" }, { "docid": "c8a33878836fe197b2ad0df3a0ef24a4", "score": "0.68510777", "text": "def brightness(self) -> int | None:\n if self._device.is_dimmable and self._device.has_brightness:\n brightness = int(self._device.brightness)\n # Abode returns 100 during device initialization and device refresh\n # Convert Abode brightness (0-99) to Home Assistant brightness (0-255)\n return 255 if brightness == 100 else ceil(brightness * 255 / 99.0)\n return None", "title": "" }, { "docid": "58c4f45e2439514cc9f84ea52078b6c4", "score": "0.6840126", "text": "def brightness(self) -> Optional[int]:\n return self._state_brightness", "title": "" }, { "docid": "0f4adbfa437ed19c9458a7c9a74c7a40", "score": "0.67933553", "text": "def brightness(self) -> Optional[int]:\n return self._brightness", "title": "" }, { "docid": "9731241da9debf41d1cb06399f2ae0ea", "score": "0.67169166", "text": "def color(self) -> str:\n return self.state[\"Action\"][\"LEDStrip\"]", "title": "" }, { "docid": "62e10b47adff054b99ee2d0b52538c1c", "score": "0.6648977", "text": "def brightness(r,g,b):\n return sqrt(pow(r,2) * .241 + pow(g,2) * .691 + pow(b,2) * .068 ) / 255", "title": "" }, { "docid": "ce8c5c658f2872ec047192681ad2f7aa", "score": "0.66256696", "text": "def brightness(self, pixel):\n return (pixel[0] * 299 + pixel[1] * 587 + pixel[2] * 114) / 1000", "title": "" }, { "docid": "69477f0d5db3c71bae81bb69b288a1e6", "score": "0.6595123", "text": "def lightIntensity(self):\n \n pass", "title": "" }, { "docid": "288c6f8e6413f24bd282f4ee7c7722eb", "score": "0.65774924", "text": "def get_backlight(self):\n\n cmd, val = self.__send(\"BLT\")\n\n if cmd != \"BLT\" or val not in BACKLIGHT__VALUES:\n raise UnidenUnexpectedResponseError\n\n return val", "title": "" }, { "docid": "bc92b33ab2c7f771e351a71021b02037", "score": "0.65343595", "text": "def colour_hsv(self):\n hexvalue = self.status()[self.DPS][self.DPS_INDEX_COLOUR]\n return BulbDevice._hexvalue_to_hsv(hexvalue)", "title": "" }, { "docid": "c78668a6702d586f4cb3f71ed9157f99", "score": "0.65139586", "text": "def brightness(self, value):\n try:\n value = float(value)\n except Exception:\n return\n \n if value < 0:\n value = 0\n if value > 1:\n value = 1\n self.mda.set_brightness(value)", "title": "" }, { "docid": "b854a569f6033d1c160cf8d6ef141834", "score": "0.6510059", "text": "def get_led(self, color, led):\n color_text = ['red', 'green']\n led_text = ['left', 'right']\n path = self.path + 'ev3:' + color_text[color] + ':' + led_text[led]\n return int(self.read(path + '/brightness'))", "title": "" }, { "docid": "a086fef0b1b25a528d4f0d553f461d7d", "score": "0.6377531", "text": "def _get_brightness (self):\n\n try:\n return int (open (self._config.file).read ().strip ())\n except (IOError, ValueError):\n return False", "title": "" }, { "docid": "53a503cddb49fc2c99d6c76ba0fe0fb0", "score": "0.63658774", "text": "def brightness_value(self, value):\n self.brightness_value_now = value\n print('Brightness: ', value)\n self.update()", "title": "" }, { "docid": "a8cf8fec2231315c420d95a041f5b40b", "score": "0.6349952", "text": "def get_light_color(self):\n return self.red_light, self.green_light, self.blue_light", "title": "" }, { "docid": "7ad951aca1391bd2b3fefd4968cf132c", "score": "0.6333632", "text": "def get_status_led(npixels = None, *, brightness = None):\n\tglobal pixels\n\n\tif npixels is None:\n\t\tnpixels = get_npixels()\n\n\tif hasattr(board,\"NEOPIXEL\"):\n\t\t\"\"\"\n\t\tFor a board that has a neopixel (eg: QT PY M0)\n\t\t\"\"\"\n\t\timport neopixel\n\t\tpixels = neopixel.NeoPixel(board.NEOPIXEL, npixels)\n\telif hasattr(board,\"APA102_SCK\"):\n\t\t\"\"\"\n\t\tFor a board that has a APA102 (eg: UnexpectedMaker Feather S2, Trinket M0)\n\t\t\"\"\"\n\t\timport adafruit_dotstar\n\t\tpixels = adafruit_dotstar.DotStar(board.APA102_SCK, board.APA102_MOSI, npixels)\n\telif hasattr(board,\"DOTSTAR_CLOCK\"):\n\t\t\"\"\"\n\t\tFor a board that has DOTSTAR pins (eg: FunHouse)\n\t\t\"\"\"\n\t\timport adafruit_dotstar\n\t\tpixels = adafruit_dotstar.DotStar(board.DOTSTAR_CLOCK, board.DOTSTAR_DATA, npixels)\n\telse:\n\t\traise OSError(\"No hardware pixel identified\")\n\n\n\tif hasattr(board,'LDO2'):\n\t\t\"\"\"\n\t\tEnable LDO2 on the Feather S2 so we can use the status LED\n\t\t\"\"\"\n\t\tfrom digitalio import DigitalInOut\n\t\tldo2 = DigitalInOut(board.LDO2)\n\t\tldo2.switch_to_output()\n\t\tldo2.value = True\n\t\tkeep_pins[\"ldo2\"] = ldo2\n\t\ttime.sleep(0.035)\n\n\n\tif hasattr(board,\"NEOPIXEL_POWER\"):\n\t\t\"\"\"\n\t\tSome boards have a NEOPIXEL_POWER pin\n\t\t\"\"\"\n\t\tfrom digitalio import DigitalInOut\n\t\tneopower = DigitalInOut(board.NEOPIXEL_POWER)\n\t\tneopower.switch_to_output()\n\t\tkeep_pins[\"neopower\"] = neopower\n\t\tif \"MagTag\" in machine:\n\t\t\t# On MagTag, pull it down\n\t\t\tneopower.value = False\n\t\telif \"TinyS2\" in machine:\n\t\t\t# On TinyS2, pull it up\n\t\t\tneopower.value = True\n\t\telse:\n\t\t\t# Assume up by default maybe ?\n\t\t\tneopower.value = True\n\n\tif brightness is not None:\n\t\tpixels.brightness = brightness\n\n\treturn pixels", "title": "" }, { "docid": "33cd7a398f15d04d6496b017573a7773", "score": "0.63247114", "text": "def get_led_red_right(self):\n return self.get_led(0, 1)", "title": "" }, { "docid": "82a8e2dbf92b617de49fd4091b16f090", "score": "0.6263252", "text": "def sense_light_dark():\n print('Starting sense_light() function')\n ok()\n while True:\n light_level = light_sensor.value / 256\n light_level = 256 - light_level\n up_to = int(light_level / 256 * 10)\n for i in range(up_to):\n leds[i] = wheel(i * 25)\n for i in range(up_to, 10):\n leds[i] = (0, 0, 0)\n leds.show()\n time.sleep(0.1)", "title": "" }, { "docid": "4979b7c39aeed78a7b28eaa4a05a7a32", "score": "0.62586725", "text": "def calculate_brightness(brightness_hsv):\n\n result = 0\n\n # Brightness value of hsv image was divided to 3 ranges.\n # 0 - very brightness color\n # 1 - average brightness color\n # 2 - dark color.\n if 0 <= brightness_hsv <= 84:\n result = 2\n elif 85 <= brightness_hsv <= 170:\n result = 1\n elif 171 <= brightness_hsv <= 255:\n result = 0\n\n return result", "title": "" }, { "docid": "121ae0584e0b7bbe15df4e99985cfa2a", "score": "0.6238268", "text": "def LEDreadable(postbrightBG, postbrightFG):\n LEDreadconvertBG = Color(postbrightBG[0], postbrightBG[1], postbrightBG[2])\n LEDreadconvertFG = Color(postbrightFG[0], postbrightFG[1], postbrightFG[2])\n return LEDreadconvertBG, LEDreadconvertFG;", "title": "" }, { "docid": "a441a85721e3f072b15cf90930ba49dd", "score": "0.6231188", "text": "def colour_rgb(self):\n hexvalue = self.status()[self.DPS][self.DPS_INDEX_COLOUR]\n return BulbDevice._hexvalue_to_rgb(hexvalue)", "title": "" }, { "docid": "3113550f7566b8cca1c0d2a92b0ba014", "score": "0.62138146", "text": "def get_led_green_right(self):\n return self.get_led(1, 1)", "title": "" }, { "docid": "a726b0a0a2c4ad97eb1c8fc2c4563668", "score": "0.6203854", "text": "def led_line(interval, brightness, led, r, g, b, current_status):\n if led == 0 or led == 7:\n clear()\n set_pixel(led, r, g, b, (brightness / 20.0))\n show()\n custom_sleep(interval, current_status)\n\n elif led == 1 or led == 6:\n clear()\n set_pixel(led, r, g, b, (brightness / 10.0))\n show()\n custom_sleep(interval, current_status)\n\n elif led == 2 or led == 5:\n clear()\n set_pixel(led, r, g, b, (brightness / 5.0))\n show()\n custom_sleep(interval, current_status)\n\n elif led == 3 or led == 4:\n clear()\n set_pixel(led, r, g, b, brightness)\n show()\n custom_sleep(interval, current_status)", "title": "" }, { "docid": "866af263723b5d945be58f3165abfd08", "score": "0.6201627", "text": "def get_status_led(self):\n raise NotImplementedError", "title": "" }, { "docid": "866af263723b5d945be58f3165abfd08", "score": "0.6201627", "text": "def get_status_led(self):\n raise NotImplementedError", "title": "" }, { "docid": "55ab1fe8ae0be62d27d69a56c8da72d4", "score": "0.61412174", "text": "def measure_darkspectrum(self):\n self.dark_array = self.spectro.intensities()", "title": "" }, { "docid": "8d26910c34f22a54433a555685a00b48", "score": "0.61283195", "text": "def update(self):\n\n rgb = self._gateway.send(\"get_rgb\")[0]\n\n self._color = int_to_rgb(rgb)\n self._brightness = int(int_to_brightness(rgb) / 100 * 255)\n self._state = self._brightness > 0", "title": "" }, { "docid": "d608f0432bad831ed3350571dde5f927", "score": "0.6112745", "text": "def getbrightness(alarmtime_hour, alarmtime_min, alarmduration):\n alarmtime_rawmin = alarmtime_hour*60 + alarmtime_min\n nowtime_hour = datetime.datetime.now().hour\n nowtime_min = datetime.datetime.now().minute\n nowtime_sec = datetime.datetime.now().second\n nowtime_rawmin = nowtime_hour*60 + nowtime_min\n sunrisestart_rawmin = alarmtime_rawmin - alarmduration\n progress = 0\n\n# various brightness settings\n displaymax = 1 # brightest the display will be\n displaymin = 0 # dimmest it will be\n FGBGoffset = .25 # foreground and background difference\n avgperiod = 15 # number of brightness readings to take running avg over\n minvalue = 150 # set this to light conditions you want min brightness\n maxvalue = 600 # set this to where you want max\n\n# photoresitor SPI read from MCP3008 ADC chip\n# hardware SPI configuration:\n SPI_PORT = 0\n SPI_DEVICE = 0\n# mcp = Adafruit_MCP3008.MCP3008(spi=SPI.SpiDev(SPI_PORT, SPI_DEVICE))\n# value = mcp.read_adc(0)\n value = 500\n# take running avg, covery into 0 -> 1 brightness\n if len(runavg) < avgperiod:\n for f in range(avgperiod - len(runavg)):\n runavg.append(value) # gets running avg up to necessary length\n runavg.pop(0)\n avgvalue = sum(runavg) / len(runavg)\n brightnessFG = displaymax*(avgvalue - minvalue) / maxvalue\n\n# edgelord cases\n if brightnessFG > displaymax:\n brigtnessFG = displaymax\n brightnessBG = brightnessFG - FGBGoffset\n if brightnessBG < displaymin:\n brightnessBG = displaymin\n\n# here the LORD preprescribes brightness for the sunrise duration and 15min post sunrise\n if (nowtime_rawmin >= sunrisestart_rawmin) and (nowtime_rawmin <= alarmtime_rawmin):\n progress = ((nowtime_rawmin - sunrisestart_rawmin)*60 + nowtime_sec) / (alarmduration*60) #seconds elapsed since alarm started / total seconds in alarm\n brightnessBG = (displaymax - FGBGoffset)*progress\n brightnessFG = FGBGoffset + brightnessBG\n if (nowtime_rawmin > alarmtime_rawmin) and (nowtime_rawmin < (alarmtime_rawmin + 15)):\n progress = ((nowtime_rawmin - sunrisestart_rawmin)*60 + nowtime_sec) / (alarmduration*60) #seconds elapsed since alarm started / total seconds in alarm\n brightnessBG = displaymax - FGBGoffset\n brightnessFG = displaymax\n return brightnessBG, brightnessFG;", "title": "" }, { "docid": "a3cfb3f92091ed001b905c3e29e89a86", "score": "0.60940677", "text": "def check_light_dark(value, intensity=110):\n if isinstance(value, str) and value.startswith('#'):\n value = hex_to_rgb(value)\n if (value[0]*0.299 + value[1]*0.587 + value[2]*0.114) > intensity:\n return 'black'\n return 'white'", "title": "" }, { "docid": "3c11a4cef23a484642f56b1422274c1e", "score": "0.6073936", "text": "def changeBrightness(self, img, value):\n hsv = cv2.cvtColor(img, cv2.COLOR_BGR2HSV)\n h, s, v = cv2.split(hsv)\n lim = 255 - value\n v[v > lim] = 255\n v[v <= lim] += value\n final_hsv = cv2.merge((h, s, v))\n img = cv2.cvtColor(final_hsv, cv2.COLOR_HSV2BGR)\n return img", "title": "" }, { "docid": "73225ce49f66d5baa8a9c2c0b7726e39", "score": "0.6056815", "text": "def rgb_color(self):\n for state in self._light_states():\n if not 'rgb_color' in state.attributes:\n return None\n #return the first value we get since merging color values does not make sense\n return state.attributes.get('rgb_color')", "title": "" }, { "docid": "8a698041d4ddfbb9dfd060545c7e707d", "score": "0.6045772", "text": "def color(self):\n c = libratbag.ratbag_led_get_color(self._led)\n return (c.red, c.green, c.blue)", "title": "" }, { "docid": "2ac068204720293e40aa065b210c0632", "score": "0.6040752", "text": "def getHardwareColor(self):\n \n pass", "title": "" }, { "docid": "92c090da429b2b762a4381f782ca56d5", "score": "0.60397387", "text": "def change_brightness(source, event):\n if event == lv.EVENT.VALUE_CHANGED:\n ui.brightness = source.get_value()\n screen.set_screen_brightness(source.get_value())\n slider_label.set_text(str(source.get_value()))", "title": "" }, { "docid": "0a2aa1a0f8e7c54941a8ec31e1be771c", "score": "0.60302556", "text": "def single_led(current_led, r, g, b, brightness):\n if current_led < LED_MIN or current_led > LED_MAX:\n # Check to see if selected light actually exists on Blinkt\n return False\n set_pixel(current_led, r, g, b, brightness)", "title": "" }, { "docid": "5644da3f21744a166544fce387019794", "score": "0.60167056", "text": "def lightness(self) -> float:\n return statistics.mean(self.rgb)", "title": "" }, { "docid": "9573b9d8c426d492968c387452d76d33", "score": "0.601093", "text": "def led(self) -> bool:\n return self.data[\"light\"]", "title": "" }, { "docid": "0900eeaa2ef2adad8cf9d2e9b6e14f09", "score": "0.5968329", "text": "def get_brightness(video):\n brightness = []\n cap = cv2.VideoCapture(video)\n try:\n while True:\n # Capture frame-by-frame\n _, frame = cap.read()\n\n # Our operations on the frame come here\n gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)\n\n # Display the resulting frame\n mean = cv2.mean(gray)\n brightness.append(mean[0])\n if cv2.waitKey(1) & 0xFF == ord('q'):\n break\n except cv2.error:\n pass\n\n # When everything done, release the capture\n cap.release()\n brig_geom = round(statistics.geometric_mean([x + 1 for x in brightness]), 1)\n\n return brig_geom", "title": "" }, { "docid": "d38530a6e00d01edd8fb731cae67dd45", "score": "0.5952463", "text": "def applybrightness(prebrightBG, prebrightFG, brightnessBG, brightnessFG):\n postbrightBG = [int(255* i * brightnessBG) for i in prebrightBG]\n postbrightFG = [int(255* j * brightnessFG) for j in prebrightFG]\n return postbrightBG, postbrightFG;", "title": "" }, { "docid": "d04e772017cbfc357d2a3db9da2dbada", "score": "0.5950748", "text": "def max_brightness(self):\n return self.get_attr_int('max_brightness')", "title": "" }, { "docid": "3c69e1b85919916bb443534cfa64569f", "score": "0.59444016", "text": "def get_light_status(self) -> bool:\n \n return self.hwi.get_light_gpio()", "title": "" }, { "docid": "ce544823da83ea477184a57e4a26f53d", "score": "0.5936033", "text": "def get_led(self, m, index=0):\n\n # First, try to get a simple LED.\n try:\n return self.request(\"led\", index)\n except ResourceError:\n pass\n\n # Next, try to get an RGB LED, if the platform has one.\n # If we find one, we'll grab only one leg of it, and turn the others off.\n try:\n rgb_led = self.request(\"rgb_led\", index)\n m.d.comb += [\n rgb_led.r.eq(0),\n rgb_led.b.eq(0)\n ]\n return rgb_led.g\n except ResourceError:\n pass\n\n\n # Finally, if we've failed to get an LED entirely, return a NullPin stand-in.\n return NullPin()", "title": "" }, { "docid": "70dfbee23bd9fa5e270d36cd8c15b462", "score": "0.59344417", "text": "def getblue(self):\r\n\r\n return (self.RGB & BLUE_MASK) >> BLUE_SHIFT", "title": "" }, { "docid": "72ad513fb15eb242b0590c85ab760cbc", "score": "0.5930789", "text": "def supported_features(self) -> int:\n return SUPPORT_BRIGHTNESS", "title": "" }, { "docid": "84ee00351dca28071b79a151862b3ec0", "score": "0.59303385", "text": "def _update_leds(self):\n \"\"\"Must be in Safe or Full mode\"\"\"\n cmd = struct.pack(\">BBBB\", 0x8b, self.led_bits.int, self._power_color, self._power_intensity)\n self.ser.write(cmd)", "title": "" }, { "docid": "12c1bb1a72a7a0ddbb67c93005830a8f", "score": "0.58977515", "text": "def _avg_bright(colour):\n r, g, b = colour\n return (r + g + b) / 3", "title": "" }, { "docid": "670ee41f52f89094fde6dd4804f3a3b8", "score": "0.58909386", "text": "def set_rgb(color, brightness = 0):\r\n red_pwm.duty(int(color['r'] * brightness / 100) * 4)\r\n green_pwm.duty(int(color['g'] * brightness / 100) * 4)\r\n blue_pwm.duty(int(color['b'] * brightness / 100) * 4)", "title": "" }, { "docid": "32abc6ff209c892cc52526cd1dc70c11", "score": "0.58817565", "text": "def get_led_red_left(self):\n return self.get_led(0, 0)", "title": "" }, { "docid": "45d19803b46c0117b50fd128ae06b32e", "score": "0.58814937", "text": "def backlight(self):\n\n return self.device.command('BLT')", "title": "" }, { "docid": "415d3f4479fe2b6ec3d315a882684e1d", "score": "0.58761626", "text": "def brightness_decrease(wait=0.01, step=1):\n for j in range(int(256 // step)):\n for i in range(LightsController.pixels.count()):\n r, g, b = LightsController.pixels.get_pixel_rgb(i)\n r = int(max(0, r - step))\n g = int(max(0, g - step))\n b = int(max(0, b - step))\n # we don't need to check the is_rbg flag here because this decreases from the current values\n LightsController.pixels.set_pixel_rgb(i, r, g, b)\n LightsController.pixels.show()\n # if we have reached black, then we are done\n if r == 0 and g == 0 and b == 0:\n break\n if wait > 0:\n time.sleep(wait)", "title": "" }, { "docid": "898b7d96a0b78dee3faa399f7f2ee3df", "score": "0.5869625", "text": "def leds(self):\n return self._leds", "title": "" }, { "docid": "a6ba55897c8b8397b9edd47eae022250", "score": "0.586642", "text": "def brightness_cycle():\n brightness_value = cycle_sequence([1, 0.8, 0.6, 0.4, 0.2])\n while True:\n # pylint: disable=stop-iteration-return\n led.brightness = next(brightness_value)\n yield", "title": "" }, { "docid": "71e5e90e4965a136176311f058b87662", "score": "0.58455455", "text": "def brightness(self, brightness):\n libratbag.ratbag_led_set_brightness(self._led, brightness)", "title": "" }, { "docid": "3bc10019dfb08f14fff1a5969d60b261", "score": "0.5840906", "text": "def measure_white(self, mode=\"auto\"):\r\n log.info(\"Measuring WHITE variability to find DARK threshold...\")\r\n if mode == \"auto\":\r\n # Run the firmware routine to find the dark threshold\r\n self.ser.write(b\"D\")\r\n time.sleep(3)\r\n threshold = int.from_bytes(self.ser.read(2), byteorder=\"little\", signed=True)\r\n self.auto_dark = threshold\r\n log.info(f\"Auto DARK threshold value: {threshold}\")\r\n elif mode == \"manual\":\r\n arr = self.read_sensor(20000)\r\n if len(arr) != 20000:\r\n log.warning(\"Manual DARK threshold value could not be determined.\")\r\n threshold = None\r\n else:\r\n threshold = self._calc_threshold(arr, dark=True)\r\n self.manual_dark = threshold\r\n log.info(f\"Manual DARK threshold value: {threshold}\")\r\n return arr, threshold", "title": "" }, { "docid": "340dc6059b51ef8216ac3345dc053aa4", "score": "0.58365417", "text": "def measure_black(self, mode=\"auto\"):\r\n log.info(\"Measuring BLACK variability to find LIGHT threshold...\")\r\n if mode == \"auto\":\r\n # Run the firmware routine to find the light threshold\r\n self.ser.write(b\"L\")\r\n time.sleep(3)\r\n threshold = int.from_bytes(self.ser.read(2), byteorder=\"little\", signed=True)\r\n self.auto_light = threshold\r\n log.info(f\"Auto LIGHT threshold value: {threshold}\")\r\n elif mode == \"manual\":\r\n arr = self.read_sensor(20000)\r\n if len(arr) != 20000:\r\n log.warning(\"Manual LIGHT threshold value could not be determined.\")\r\n threshold = None\r\n else:\r\n threshold = self._calc_threshold(arr, light=True)\r\n self.manual_light = threshold\r\n log.info(f\"Manual LIGHT threshold value: {threshold}\")\r\n return arr, threshold", "title": "" }, { "docid": "aaf34b7cc00b318b71dedd55e2ca6dd9", "score": "0.58254725", "text": "def set_brightness(brightness_int):\n\n global brightness\n if(brightness_int < 0): brightness = 0\n elif(brightness_int > 15): brightness = 15\n else:brightness = brightness_int", "title": "" }, { "docid": "eb404df760f42fd38f2ec938660d4de4", "score": "0.5800836", "text": "def aug_brightness(image):\n\n image1 = cv2.cvtColor(image, cv2.COLOR_RGB2HSV)\n image1 = np.array(image1, dtype=np.float64)\n random_bright = .5 + np.random.uniform()\n image1[:, :, 2] = image1[:, :, 2] * random_bright\n image1[:, :, 2][image1[:, :, 2] > 255] = 255\n image1 = np.array(image1, dtype=np.uint8)\n image1 = cv2.cvtColor(image1, cv2.COLOR_HSV2RGB)\n return image1", "title": "" }, { "docid": "28919827b3f70112ba60587fe747d90a", "score": "0.57970065", "text": "def getGlowIntensity(self):\n \n pass", "title": "" }, { "docid": "5ecfa6163c9c56b485317d0f76eff759", "score": "0.57944", "text": "def set_brightness(self, brightness):\n\n if \"brightness\" not in self.supported_features:\n errstr = (\n \"Current device \"\n + self.device_name\n + \" does not support brightness changes.\"\n )\n raise Exception(errstr)\n\n data = self.set_attributes({\"brightness\": brightness})\n return data", "title": "" }, { "docid": "2cf4a21de3697387fc99667fe76d59f2", "score": "0.5792764", "text": "def rgb_blink(self):\n\n GPIO.output(self.red_pin, int(self.__color_code[0]))\n GPIO.output(self.green_pin, int(self.__color_code[1]))\n GPIO.output(self.blue_pin, int(self.__color_code[2]))\n time.sleep(0.1)\n GPIO.output(self.red_pin, True)\n GPIO.output(self.green_pin, True)\n GPIO.output(self.blue_pin, True)", "title": "" }, { "docid": "612810c9c5f9f347237677a1ff1c3310", "score": "0.578979", "text": "def brightness_pct(self):\n return float(self.brightness) / self.max_brightness", "title": "" }, { "docid": "834d5bd68c68aa7324eb0d68d57cd6f9", "score": "0.57844466", "text": "def bright_color():\n values = [int(x*255) for x in colorsys.hsv_to_rgb(random.random(), 1, 1)]\n color = discord.Color.from_rgb(*values)\n return color", "title": "" }, { "docid": "5aed5d32eb057a23e292070670ffefdd", "score": "0.5775921", "text": "def read_light(addr=DEVICE):\n data = BUS.read_i2c_block_data(addr, ONE_TIME_HIGH_RES_MODE_1)\n return convert_to_number(data)", "title": "" }, { "docid": "a338eaaa7222ee008e0f3df01404c4e7", "score": "0.57728827", "text": "def SetBrightness(self, level):\r\n pass", "title": "" }, { "docid": "4d19beee53aa8e0b70afc1b930a53d6a", "score": "0.57714206", "text": "def _bright_callback(self, topic_data):\n brightness_pct = int(topic_data)*100//255\n self.brightness_pct = brightness_pct", "title": "" }, { "docid": "c6a3c9b7f955315d47caaa10cba07320", "score": "0.57545096", "text": "def get_reflected_light_intensity(self):\r\n return self._low_level_color_sensor.get_reflected_light_intensity()", "title": "" }, { "docid": "76200212a946dd8e8ab7c5484ee2a079", "score": "0.5753856", "text": "def getcolor(self):\r\n\r\n return self.RGB", "title": "" }, { "docid": "2c41ec175ca3a09ae77368ffde15f79f", "score": "0.57430136", "text": "def test_max_brightness_int(self):\n led = inputs.LED(None, PATH, NAME)\n max_brightness = led.max_brightness()\n self.assertEqual(max_brightness, 2)", "title": "" }, { "docid": "2eb0e0588cc48035a5db4602d93b6de9", "score": "0.57384205", "text": "def increase_brightness(img, value=30):\n hsv = cv2.cvtColor(img, cv2.COLOR_BGR2HSV)\n h, s, v = cv2.split(hsv)\n lim = 255 - value\n v[v > lim] = 255\n v[v <= lim] += value\n final_hsv = cv2.merge((h, s, v))\n img = cv2.cvtColor(final_hsv, cv2.COLOR_HSV2BGR)\n return img", "title": "" }, { "docid": "480ac72b717a1016fb1e4ab6eaf88d54", "score": "0.5732356", "text": "def colorizer_light(self,x, y):\r\n r = min(1, 1 - y / 3)\r\n g = min(1, 1 + y / 3)\r\n b = 1 / 4 + x / 16\r\n return (r, g, b)", "title": "" }, { "docid": "3428184a484a5d05ccd023c30defb0a8", "score": "0.57115126", "text": "def rgb(self) -> List[bool]:\n return self.i2c_switches.read_switches()[4:]", "title": "" }, { "docid": "462574c18f059cb8784b2c5ff88b1ad0", "score": "0.5705725", "text": "def readReflect(self):\n if self.colorSensor is not None:\n if self.colorSensor.mode != \"COL-REFLECT\":\n self.colorSensor.mode = \"COL-REFLECT\"\n return self.colorSensor.reflected_light_intensity\n else:\n print(\"There is no color sensor set up\")", "title": "" }, { "docid": "4673409e1d386cd2253001b85730b2b2", "score": "0.56974816", "text": "def setLEDBrightness(self, led, offset, brightness):\n self.pwm.setPWM(3 * led + offset, brightness << 4, 4095)", "title": "" }, { "docid": "08e563a859b9bcfe7864e9f6423c5f8a", "score": "0.56867677", "text": "def brightness(img, value, max_value, bias=0):\n value = _float_parameter(value, max_value) + bias\n return PIL.ImageEnhance.Brightness(img).enhance(value), value", "title": "" } ]
47e732ddee436c9c5b80be0e25da1794
Parse the next inline element in subject, advancing subject position and adding the result to 'inlines'.
[ { "docid": "719ebba8497de2ee4cbbf01758c86eaa", "score": "0.69148296", "text": "def parse_inline(self, inlines):\n c = self.peek()\n# print 'C', repr(c)\n# print 'POS', self.pos\n# print 'SUB', repr(self.subject)\n# pprint([i.dump() for i in inlines])\n r = None\n if c == '\\n':\n r = self.parse_newline(inlines)\n elif c == '\\\\':\n r = self.parse_escaped(inlines)\n elif c == '`':\n r = self.parse_backticks(inlines)\n elif c == '*' or c == '_':\n r = self.parse_emphasis(inlines)\n elif c == '[':\n r = self.parse_link(inlines)\n elif c == '!':\n r = self.parse_image(inlines)\n elif c == '<':\n r = self.parse_autolink(inlines) or self.parse_html_tag(inlines)\n elif c == '&':\n r = self.parse_entity(inlines)\n\n if not r:\n r = self.parse_string(inlines)\n\n# print 'AfterSUB', repr(self.subject)\n# pprint([i.dump() for i in inlines])\n\n return r", "title": "" } ]
[ { "docid": "3abf27bb73f04c924719f7f6a70c8946", "score": "0.57719964", "text": "def inlines(self):\n for inline in range(self.startInline, self.endInline+1, self.stepInline):\n yield inline", "title": "" }, { "docid": "6621a5f0a1ef41c160ed721707835a08", "score": "0.54631", "text": "def parse_inline(self): # type: () -> None\n if self.inline_children:\n self.children = parser.parse_inline(self.children) # type: ignore\n elif isinstance(getattr(self, \"children\", None), list):\n for child in self.children:\n if isinstance(child, BlockElement):\n child.parse_inline()", "title": "" }, { "docid": "31fb7a390f52482112b802aff152ce5c", "score": "0.5295551", "text": "def parse(self, s, refmap):\n self.subject = s\n self.pos = 0\n self.refmap = refmap or {}\n inlines = []\n while self.parse_inline(inlines):\n# pprint([i.dump() for i in inlines])\n pass\n return inlines", "title": "" }, { "docid": "4c4689e35a95504c8a17a6a30f71c6aa", "score": "0.5292471", "text": "def separate_elements(el_inline):\n\tif count_special(el_inline) < 2:\n\t\treturn [el_inline] \n\telse:\n\t\tend = [] \n\t\twhile count_special(el_inline)>1:\t\n\t\t\tindex = 0\n\t\t\tresult = []\n\t\t\twhile \"src\" not in el_inline[index] and \"href\" not in el_inline[index]:\n\t\t\t\tresult += [el_inline[index]]\n\t\t\t\tindex += 1 \n\t\t\tresult += [el_inline[index]]\n\t\t\tend.append(result)\n\t\t\tel_inline = el_inline[index+1:]\n\t\tend.append(el_inline)\n\t\treturn end", "title": "" }, { "docid": "ba447a2c6d85512ccca3174310cec749", "score": "0.5240676", "text": "def parse_emphasis(self, inlines):\n startpos = self.pos\n c = None\n first_close = 0\n nxt = self.peek()\n\n if nxt == '*' or nxt == '_':\n c = nxt\n else:\n return 0\n\n # Get opening delimiters.\n res = self.scan_delims(c)\n self.pos += res['numdelims']\n\n # We provisionally add a literal string. If we match appropriate\n # closing delimiters, we'll change this to Strong or Emph.\n inlines.append(Inline(t='Str', c=self.subject[self.pos - res['numdelims']:self.pos]))\n\n # Record the position of this opening delimiter\n delimpos = len(inlines) - 1\n\n if not res['can_open'] or res['numdelims'] == 0:\n return 0\n\n first_close_delims = 0\n\n if res['numdelims'] == 1:\n while True:\n res = self.scan_delims(c)\n# pprint([i.dump() for i in inlines])\n# print 'RES', pformat(res)\n if res['numdelims'] >= 1 and res['can_close']:\n self.pos += 1\n # Convert the inline at delimpos, currently a string with the delim,\n # into an Emph whose contents are the succeeding inlines.\n inlines[delimpos].t = 'Emph'\n inlines[delimpos].c = inlines[delimpos + 1:]\n splice(inlines, delimpos + 1)\n break\n else:\n if self.parse_inline(inlines) == 0:\n break\n# print 'END'\n# pprint([i.dump() for i in inlines])\n return self.pos - startpos\n\n elif res['numdelims'] == 2: # We started with ** or __\n while True:\n res = self.scan_delims(c)\n if res['numdelims'] >= 2 and res['can_close']:\n self.pos += 2\n inlines[delimpos].t = 'Strong'\n inlines[delimpos].c = inlines[delimpos + 1:]\n splice(inlines, delimpos + 1)\n break\n else:\n if self.parse_inline(inlines) == 0:\n break\n\n return self.pos - startpos\n\n\n elif res['numdelims'] == 3:\n while True:\n res = self.scan_delims(c)\n# print 'RES', res\n# print 'FCD', first_close_delims\n if res['numdelims'] >= 1 and res['numdelims'] <= 3 and res['can_close'] and res['numdelims'] != first_close_delims:\n if first_close_delims == 1 and res['numdelims'] > 2:\n res['numdelims'] = 2\n elif first_close_delims == 2:\n res['numdelims'] = 1\n elif res['numdelims'] == 3:\n # If we opened with ***, then we interpret *** as ** followed by *\n # giving us <strong><em>\n res['numdelims'] = 1\n\n self.pos += res['numdelims']\n\n if first_close > 0: # If we've already passed the first closer.\n inlines[delimpos].t = 'Strong' if first_close_delims == 1 else 'Emph'\n inlines[delimpos].c = [Inline(\n t='Emph' if first_close_delims == 1 else 'Strong',\n c=inlines[delimpos + 1:first_close],\n )]\n inlines[delimpos].c.extend(inlines[first_close + 1:])\n splice(inlines, delimpos + 1)\n break\n\n else:\n # This is the first closer for now, add literal string.\n # We'll change this when we hit the second closer.\n inlines.append(Inline(t='Str', c=self.subject[self.pos - res['numdelims']:self.pos]))\n# print 'Emph IL'\n# pprint([i.dump() for i in inlines])\n first_close = len(inlines) - 1\n first_close_delims = res['numdelims']\n else:\n if self.parse_inline(inlines) == 0:\n break\n\n return self.pos - startpos\n\n else:\n return 1\n\n return 0", "title": "" }, { "docid": "5fbd65e664bdad44712fe552f229ed9d", "score": "0.51074517", "text": "def process_inlines(self, block):\n if block.t in ['Paragraph', 'SetextHeader', 'ATXHeader']:\n block.inline_content = self.inlineParser.parse(block.string_content.strip(), self.refmap)\n block.string_content = ''\n else:\n pass\n\n if block.children:\n for child in block.children:\n self.process_inlines(child)", "title": "" }, { "docid": "a0aafd42b8444d6e59e8e44c64e18adf", "score": "0.5080264", "text": "def parse_link(self, inlines):\n# print 'PARSING LINK'\n startpos = self.pos\n\n n = self.parse_link_label()\n if n == 0:\n return 0\n\n rawlabel = self.subject[startpos:startpos + n]\n# print 'RAWL', rawlabel\n # If we got this far, we've parse a label.\n # Try to parse an explicit link: [label](url \"title\")\n if self.peek() == '(':\n# print 'PEEK'\n self.pos += 1\n if self.spnl():\n dest = self.parse_link_destination()\n# print 'DEST', dest\n if dest is not None and self.spnl():\n # Make sure there's a space before the title\n match = re.search(r'^\\s', self.subject[self.pos - 1])\n if match:\n title = self.parse_link_title() or ''\n else:\n title = ''\n if self.spnl() and self.match(re.compile(r'^\\)')):\n inlines.append(\n Inline(\n t='Link',\n destination=dest,\n title=title,\n label=parse_raw_label(rawlabel),\n )\n )\n return self.pos - startpos\n\n self.pos = startpos\n return 0\n\n # If we're here, it wasn't an explicit link. Try to parse a reference link.\n # first, see if there's another label\n savepos = self.pos\n self.spnl()\n beforelabel = self.pos\n n = self.parse_link_label()\n if n == 2:\n # Empty second label.\n reflabel = rawlabel\n elif n > 0:\n reflabel = self.subject[beforelabel:beforelabel + n]\n else:\n self.pos = savepos\n reflabel = rawlabel\n\n # Lookup rawlabel in refmap\n link = self.refmap.get(normalize_reference(reflabel))\n if link:\n inlines.append(\n Inline(\n t='Link',\n destination=link.destination,\n title=link.title,\n label=parse_raw_label(rawlabel),\n )\n )\n return self.pos - startpos\n\n else:\n self.pos = startpos\n return 0\n\n # Nothing worked, rewind\n self.pos = startpos\n return 0", "title": "" }, { "docid": "3c5da20be0ef4169ebc7ba5bbd2205b2", "score": "0.49279112", "text": "def parse_escaped(self, inlines):\n subj = self.subject\n pos = self.pos\n if subj[pos] == '\\\\':\n if pos < len(subj) - 1:\n if subj[pos + 1] == '\\n':\n inlines.append(Inline(t='Hardbreak'))\n self.pos = self.pos + 2\n return 2\n elif RE_ESCAPABLE.search(subj[pos + 1]):\n inlines.append(Inline(t='Str', c=subj[pos + 1]))\n self.pos = self.pos + 2\n return 2\n\n self.pos += 1\n inlines.append(Inline(t='Str', c='\\\\'))\n return 1\n\n else:\n return 0", "title": "" }, { "docid": "9a28aebfb939c4857480c8508d7a2417", "score": "0.48500213", "text": "def _parse_inline_image(self, data, disallowed):\n attrs = []\n token = None\n while data.peek(1) and token != b'ID':\n token = self._get_next_token(data, disallowed=disallowed)\n if not token: continue\n attrs.append(self._process_token(data,token,BlackHole, True))\n yield PdfDict({attrs[i]:attrs[i+1] for i in range(0,len(attrs)-1,2)})\n data.read(1)\n img = io.BytesIO()\n buf = bytes(2)\n while buf != b'EI':\n buf = buf[1:]+data.read(1)\n img.write(buf[1:])\n yield PdfRawData(img.getvalue()[:-2]) # This is such an ugly hack\n yield PdfRaw(b'EI')", "title": "" }, { "docid": "3663e159b5fa7911129bd7f583e5f46b", "score": "0.48249736", "text": "def test_linebreak_in_nested_tags(self):\n expected = (\"this is line 1 \\r\\nitalicized this is bold and italics\\r\\nno italics last line\")\n els = self.dfxp.get_subtitles()\n self.assertEqual(expected, \n self.dfxp.get_content_with_markup(els[7], \n mappings=SRTGenerator.MAPPINGS))", "title": "" }, { "docid": "8b2bd3319984c612fac51a511763e30a", "score": "0.47704455", "text": "def inline_crlines(self):\n for inline, crline in product(\n range(self.startInline, self.endInline+1, self.stepInline),\n range(self.startCrline, self.endCrline+1, self.stepCrline)):\n yield (inline, crline)", "title": "" }, { "docid": "acd33e73ad66ebe4999f78153e084e20", "score": "0.475356", "text": "def iternext(seq):\n return iter(seq).next", "title": "" }, { "docid": "83c6d1c69278c77698fc1b20ce95205c", "score": "0.46693477", "text": "def addInlineBox(self, box):\n\n # Split a pre inline box at line breaks, and line breaks only\n if box.pre:\n if len(self.childBoxes) < 1:\n self.addLine()\n lines = box.getLines()\n for line in lines[:-1]:\n self.childBoxes[-1].addChildBox(line)\n self.addLine()\n self.childBoxes[-1].addChildBox(lines[-1])\n return\n\n nextbox = box\n while nextbox:\n box = nextbox\n nextbox = None\n\n width = box.fullWidth()\n height = box.height\n\n # TODO: put float handling code in here\n\n if self._remaining_width <= 0:\n self.addLine()\n \n if width > self._remaining_width:\n boxes = box.split(self._remaining_width)\n box = boxes[0]\n\n width = box.fullWidth()\n\n #continue if the box was split\n if len(boxes) == 2:\n nextbox = boxes[1]\n\n # if the returned box doesn't fit on the current line and there\n # are already elements on the current line, add a new line\n if width > self._remaining_width and \\\n len(self.childBoxes[-1].childBoxes) > 0:\n self.addLine()\n \n self.childBoxes[-1].addChildBox(box)\n self._remaining_width -= width", "title": "" }, { "docid": "34ae4795ebfd8b19dbea17511b86aa96", "score": "0.46257067", "text": "def next(self, part):\n return next(part)", "title": "" }, { "docid": "bec6051ca31e67ea815441a3c0cde5d6", "score": "0.46196488", "text": "def inline_elements_xpath(self, inline_elements_xpath):\n\n self._inline_elements_xpath = inline_elements_xpath", "title": "" }, { "docid": "ce98ae71d1c9e429e83989aa55119e01", "score": "0.45980158", "text": "def iternext(seq):\n return iter(seq).__next__", "title": "" }, { "docid": "b3c5fd5a6b7f9fdd8f87dbb1ade2f1b2", "score": "0.4588808", "text": "def next_in_chain(self):\n pass", "title": "" }, { "docid": "af943f5296143c2daa86da7a0a84cb16", "score": "0.4579555", "text": "def _parse(self):\n self._parsed = [] # [element, ...]\n title = []\n author = []\n\n lines = iter(self._lines)\n for lineno, line in lines:\n if isinstance(line, QuoteBlock):\n self._parsed.append(line)\n continue\n\n if line in ('\\f', '\\f\\f'):\n self._parsed.append(\n Control([(lineno, line)], self._footnotes, self._filename)\n )\n continue\n\n if Section.RE.match(line):\n self._parsed.append(\n Section([(lineno, line)], self._footnotes, self._filename)\n )\n continue\n\n if ItemEnum.RE.match(line):\n lines_withno = [(lineno, line)]\n for lineno, line in lines:\n if not line.strip():\n break\n lines_withno.append((lineno, line))\n\n self._parsed.append(\n ItemEnum(lines_withno, self._footnotes, self._filename)\n )\n continue\n\n if Table.RE.match(line):\n lines_withno = [(lineno, line)]\n for lineno, line in lines:\n if not line.strip():\n break\n lines_withno.append((lineno, line))\n\n self._parsed.append(\n Table(lines_withno, self._footnotes, self._filename)\n )\n continue\n\n if CodeBlock.RE.fullmatch(line):\n m = CodeBlock.RE.fullmatch(line)\n if m is None:\n raise MarkdownSyntaxError(\n 'ill-formed code block',\n (self._filename, lineno, 0, len(line), line)\n )\n\n pre_line = m.group('PREFIX')\n lines_withno = []\n for lineno, line in lines:\n if line == pre_line:\n break\n lines_withno.append((lineno, line))\n\n self._parsed.append(\n CodeBlock(\n lines_withno, self._footnotes, self._filename,\n name=m.group('NAME')\n )\n )\n continue\n\n if Title.RE.match(line):\n title.append((lineno, line))\n continue\n\n if Author.RE.match(line):\n author.append((lineno, line))\n continue\n\n if line.startswith(RawText.PAT):\n pre_line = line\n lines_withno = []\n for lineno, line in lines:\n if line == pre_line:\n break\n lines_withno.append((lineno, line))\n\n self._parsed.append(\n RawText(lines_withno, self._footnotes, self._filename)\n )\n continue\n\n if line.startswith(ShellBlock.PAT):\n pre_line = line\n lines_withno = []\n for lineno, line in lines:\n if line.startswith(ShellBlock.PAT):\n break\n\n lines_withno.append((lineno, line))\n\n self._parsed.append(\n ShellBlock(\n lines_withno, self._footnotes, self._filename,\n style=pre_line.split(':')[1:]\n )\n )\n continue\n\n if line.startswith(AlignedMath.OPENPAT):\n lines_withno = []\n for lineno, line in lines:\n if line.startswith(AlignedMath.CLOSEPAT):\n break\n\n lines_withno.append((lineno, line))\n\n self._parsed.append(\n AlignedMath(lines_withno, None, self._filename)\n )\n continue\n\n # Add here if new elements are implemented\n\n if line.startswith('#'):\n raise MarkdownSyntaxError(\n 'unknown directive',\n (self._filename, lineno, 0, len(line), line)\n )\n\n self._parsed.append(\n Text([(lineno, line)], self._footnotes, self._filename)\n )\n\n self._title = (\n Title(title, self._footnotes, self._filename) if title else None\n )\n self._author = (\n Author(author, self._footnotes, self._filename) if author else None\n )\n if not [\n t._lines_withno for t in self._parsed\n if len(t._lines_withno) > 1 or t._lines_withno[0][1].strip()\n ] and self._title is None:\n print(\n MarkdownSyntaxWarning(\n 'empty input file',\n (self._filename, None, None, None, None)\n ).diagnose(), file=sys.stderr\n )", "title": "" }, { "docid": "228eac70ae31a76b8a77fc394a713293", "score": "0.45482254", "text": "def __myia_next__(self):\n return self.seq[self.idx], SequenceIterator(self.idx + 1, self.seq)", "title": "" }, { "docid": "6ebcd70e9b9a58720edfd29fc6ce0a9d", "score": "0.45469713", "text": "def emline(self):\n\n return self._return_extension('emline')", "title": "" }, { "docid": "fc648b09a5cf28085856503f5a5440a0", "score": "0.45463932", "text": "def change_xlf_inline_tag_to_i_tag(self):\r\n\r\n for tag in self.inline_tag_list:\r\n repatter = re.compile(r'<{0} id=\"(.*?)\">'.format(tag))\r\n self.string = repatter.sub('<i id=\"\\\\1\">', self.string)\r\n repatter = re.compile(r'</{0}>'.format(tag))\r\n self.string = repatter.sub('</i>', self.string)", "title": "" }, { "docid": "8889a1db5e2d3238ffc9a5af23dd8d6a", "score": "0.45217973", "text": "def next(self):\n line = self._lookahead\n if not line:\n return None\n assert line[0]==\">\", line\n header = line.rstrip().strip('>')\n line = self.handle.readline()\n lines=[]\n while line:\n if line[0] == \">\": break\n else :\n lines.append(line.rstrip())\n line = self.handle.readline()\n self._lookahead = line\n return (header,''.join(lines))", "title": "" }, { "docid": "c556467be2fd6360cd97bd6f585d7e63", "score": "0.4505479", "text": "def render_inline(self, inline):\n attrs = None\n if inline.t == 'Str':\n return self.escape(inline.c)\n elif inline.t == 'Softbreak':\n return self.softbreak\n elif inline.t == 'Hardbreak':\n return self.in_tags('br', [], \"\", True) + '\\n'\n elif inline.t == 'Emph':\n return self.in_tags('em', [], self.render_inlines(inline.c))\n elif inline.t == 'Strong':\n return self.in_tags('strong', [], self.render_inlines(inline.c))\n elif inline.t == 'Html':\n return inline.c\n elif inline.t == 'Entity':\n return inline.c\n elif inline.t == 'Link':\n attrs = [['href', self.url_escape(inline.destination, True)]]\n if inline.title:\n attrs.append(['title', self.escape(inline.title, True)])\n return self.in_tags('a', attrs, self.render_inlines(inline.label))\n elif inline.t == 'Image':\n attrs = [\n ['src', self.escape(inline.destination, True)],\n ['alt', self.escape(self.render_inlines(inline.label))],\n ]\n if inline.title:\n attrs.append(['title', self.escape(inline.title, True)])\n return self.in_tags('img', attrs, \"\", True)\n elif inline.t == 'Code':\n return self.in_tags('code', [], self.escape(inline.c))\n else:\n logger.warning('Unknown inline type: {}'.format(inline.t))\n return ''", "title": "" }, { "docid": "63ff9c38dd10966f009c9a236e6d1d29", "score": "0.4489199", "text": "def convertEnumerate(self):\n level_enumerate = 0\n level_item = 0\n new_lines = []\n\n for line in self.lines:\n if r\"\\begin{enumerate}\" in line:\n level_enumerate = level_enumerate + 1\n if level_enumerate == 2:\n line = r\"\"\"<ol type =\"a\" >\"\"\"\n else:\n line = r\"\"\"<ol >\"\"\"\n elif r\"\\end{enumerate}\" in line:\n level_enumerate = level_enumerate - 1\n line = r\"\"\"</li></ol>\"\"\"\n elif r\"\\item\" in line:\n if level_item == 0:\n line = line.replace(r\"\\item\", \"<li>\")\n level_item = level_item + 1\n else:\n line = line.replace(r\"\\item\", \"</li><li>\")\n level_item = level_item - 1\n new_lines.append(line)\n self.lines = new_lines", "title": "" }, { "docid": "08a444da40945d3742fcdebd140d86a6", "score": "0.44862032", "text": "def find_next_sibling_line(element: Tag, tag_type: str) -> int:\n nxt_sib = element.find_next_sibling(tag_type)\n return float(\"inf\") if nxt_sib is None else nxt_sib.sourceline", "title": "" }, { "docid": "caad01680f30e2f823a8295290646b7a", "score": "0.44615152", "text": "def render_inlines(self, inlines):\n result = []\n for inline in inlines:\n result.append(self.render_inline(inline))\n return ''.join(result)", "title": "" }, { "docid": "dd2df4aa4b5bce2492076735907d4190", "score": "0.4457181", "text": "def resolve_linebreaks(self, page):\r\n nextLine = None\r\n for lineId, line in enumerate(page['lines']):\r\n \r\n # ====================================\r\n # Check if this line is not empty or already done\r\n if len(line['words']) == 1 and line['words'][0]['data_type'] == \"empty\":\r\n continue # This line is empty!\r\n \r\n # ====================================\r\n # Check if there is a next line\r\n try:\r\n nextLine = page['lines'][lineId+1]\r\n except:\r\n continue # This line is the last line on the page.\r\n\r\n # ====================================\r\n # Now that we are sure that there is a nextLine,\r\n # check if the nextLine is not empty and get the \r\n # first word of the nextLine:\r\n if len(nextLine['words']) == 0:\r\n continue # nextLine is empty\r\n elif nextLine['words'][0]['data_type'] == \"empty\":\r\n continue # first word of nextLine is empty\r\n else:\r\n nextWord = nextLine['words'][0]\r\n\r\n # ====================================\r\n # Now that we are sure that there is a nextword\r\n # inspect the last word of this line:\r\n\r\n thisWord = line['words'][-1]\r\n\r\n # -----------------------------------\r\n # Last word is PUNCTUATION:\r\n if thisWord['data_type'] == \"punctuation\":\r\n # take the penultimate word\r\n # If it's a hyphenated word: join it with the next word!\r\n if line['words'][-1]['data'] == \"-\" or line['words'][-1]['data'] == \"=\":\r\n # cut off the \"-\" or \"=\", i.e. the last word of this line\r\n line['words'].pop()\r\n # join the two words\r\n line = self.join_words(True, \r\n line, -1,\r\n nextLine,\r\n \"hyphenated\")\r\n continue\r\n # If it's a punctuation but not \"-/=\" --> ends of a part of speech\r\n else: \r\n # do NOT join the two words\r\n line = self.join_words(False,\r\n line, -2,\r\n nextLine,\r\n \"ends a part of speech\")\r\n continue\r\n # -----------------------------------\r\n # Last word is a NORMAL WORD!\r\n elif thisWord['data_type'] == \"word\":\r\n # If the first letter of the nextword is uppercase:\r\n if nextWord['data'][0].isupper(): # nextword is a proper name\r\n line = self.join_words(False,\r\n line, -1,\r\n nextLine,\r\n \"capitalized\")\r\n continue\r\n else:\r\n line = self.join_words(True,\r\n line, -1,\r\n nextLine,\r\n \"try joining\")\r\n\r\n else: # It's something else, e.g. \"unreadable\".\r\n print(f\"CLEANER: INFO: Leaving outLeaving out {line['identifier']}: unknown data_type = {thisWord['data_type']}.\")\r\n continue \r\n\r\n return page", "title": "" }, { "docid": "ae68d3bfc8379fac392259d99412d901", "score": "0.4448285", "text": "def test_italics_after_linebreak(self):\n expected = (\"this is the first line\\r\\nmulti-line\\r\\n\"\n \"italicized second and third\")\n els = self.dfxp.get_subtitles()\n self.assertEqual(expected, \n self.dfxp.get_content_with_markup(els[3], \n mappings=SRTGenerator.MAPPINGS))", "title": "" }, { "docid": "467aa5d5b886f4f20ed0c2178e3ab430", "score": "0.44460878", "text": "def get_next_result(lines, start):\n\n result = {}\n l = lines[start + 2]\n l = l[l.find(\"<a\"):]\n result['url'] = l[l.find('https'): l.find('\">')]\n\n start += 4\n\n while lines[start].strip() != TITLE_TAG:\n start += 1\n\n title = lines[start + 1].strip()\n title = title.replace('<span class=\"search-hit mathjax\">', '')\n title = title.replace('</span>', '')\n result['title'] = title\n\n authors, start = get_authors(lines, start + 5) # orig: add 8\n\n while not lines[start].strip().startswith(ABSTRACT_TAG):\n start += 1\n abstract = lines[start + 1]\n abstract = abstract.replace('<span class=\"search-hit mathjax\">', '')\n abstract = abstract.replace('</span>', '')\n result['abstract'] = abstract\n\n result['authors'] = authors\n\n while not lines[start].strip().startswith(DATE_TAG):\n start += 1\n\n idx = lines[start].find('</span> ')\n end = lines[start][idx:].find(';')\n\n result['date'] = lines[start][idx + 8: idx + end]\n\n return result, start", "title": "" }, { "docid": "6ecb1ec8f7b7c49d087861421319bc54", "score": "0.4439747", "text": "def parselines(self):\n inp = []\n\n for inp, tasks in get_sents(self.fpath, self.parse_morph_):\n while len(inp) > self.max_sent_len:\n inp_ = inp[:self.max_sent_len]\n tasks_ = {}\n for task in tasks:\n tasks_[task] = tasks[task][:self.max_sent_len]\n yield inp_, tasks_\n inp = inp[self.max_sent_len:]\n for task in tasks:\n tasks[task] = tasks[task][self.max_sent_len:]\n yield inp, tasks", "title": "" }, { "docid": "1a3f2af8105fd8838b99afefedabbc3e", "score": "0.44317877", "text": "def test_italics_before_linebreak(self):\n expected = (\"italicized\\r\\nno italics last line\")\n els = self.dfxp.get_subtitles()\n self.assertEqual(expected, \n self.dfxp.get_content_with_markup(els[4], \n mappings=SRTGenerator.MAPPINGS))", "title": "" }, { "docid": "25337900fcc9ce5c7d2a365d103f7384", "score": "0.4422695", "text": "def parse_image(self, inlines):\n if self.match(re.compile(r'^!')):\n n = self.parse_link(inlines)\n if n == 0:\n inlines.append(Inline(t='Str', c='!'))\n return 1\n elif inlines and inlines[-1] and inlines[-1].t == 'Link':\n inlines[-1].t = 'Image'\n return n + 1\n else:\n raise ParseError(\"Shouldn't happen: parsing Image.\")\n\n else:\n return 0", "title": "" }, { "docid": "6187067f57fabbbc234aa0befde1560b", "score": "0.43990967", "text": "def __readLexerAssociations(self):\n while not self.atEnd():\n self.readNext()\n if self.isEndElement() and self.name() == \"LexerAssociations\":\n break\n \n if self.isStartElement():\n if self.name() == \"LexerAssociation\":\n pattern = self.attribute(\"pattern\", \"\")\n lexer = self.attribute(\"lexer\")\n if pattern:\n self.project.pdata[\"LEXERASSOCS\"][pattern] = lexer\n else:\n self.raiseUnexpectedStartTag(self.name())", "title": "" }, { "docid": "fce30ae0d6f519d8410ba5c7a3df54ef", "score": "0.4385171", "text": "def next_token(self, token):\n if isinstance(token, AtxHeadingMarkdownToken):\n self.__in_atx_heading = token.remove_trailing_count\n self.__is_left_in_error = False\n elif isinstance(token, EndMarkdownToken):\n if token.type_name == MarkdownToken.token_paragraph:\n self.__in_atx_heading = False\n elif token.type_name == MarkdownToken.token_atx_heading:\n if self.__is_left_in_error or len(token.extra_end_data) > 1:\n self.report_next_token_error(token)\n elif isinstance(token, TextMarkdownToken):\n if self.__in_atx_heading and len(token.extracted_whitespace) > 1:\n self.__is_left_in_error = True", "title": "" }, { "docid": "e5b32d683357bc2e2a278df18145f3c6", "score": "0.4379602", "text": "def next(self):\n self.line = self.current_file.readline()\n # print repr(self.line)\n if not self.line:\n self.__next_file_()\n return self.next()\n self.fnr += 1\n self.nr += 1\n self.line = self.line.decode(encoding=self.encoding,\n errors=self.errors).rstrip()\n # If the line read should be skipped, print this line and read the next\n # one.\n if self.skip(self.line) or self.skip_xml(self.line):\n self.print_func(self.line)\n return self.next()\n else:\n return self.line", "title": "" }, { "docid": "49f28e9b29b49abcdc2eba926b9bed52", "score": "0.43758473", "text": "def inline_blocks(self):\n yield from self._inline_blocks", "title": "" }, { "docid": "4b413bd2e2c8854232a43c38fea4244d", "score": "0.4372104", "text": "def next(self, part):\n if part == self._ending:\n return None\n else:\n return next(part)", "title": "" }, { "docid": "707b56c6ca7e09eb9beab4ea78fc1ead", "score": "0.43705195", "text": "def next(self):\n if self.prevline == None:\n line = self.fp_iter.next()\n if line.startswith('\\t'):\n # Bad! The output shouldn't start with a \n # partial line\n raise ValueError(\"PBS output contained bad data.\")\n self.prevline = line\n return self.next()\n if self.done:\n raise StopIteration()\n try:\n line = self.fp_iter.next()\n if line.startswith('\\t'):\n self.prevline = self.prevline[:-1] + line[1:-1]\n return self.next()\n else:\n old_line = self.prevline\n self.prevline = line\n return old_line\n except StopIteration:\n self.done = True\n return self.prevline", "title": "" }, { "docid": "2da5781a4ea27cc17099dbc61fd8e894", "score": "0.4369372", "text": "def parse_captions(captions):\n lines = captions.decode('utf-8').split('\\n')\n\n while len(lines) > 0:\n txt = []\n try:\n num = int(lines.pop(0))\n except ValueError:\n continue\n\n st, et = lines.pop(0).replace(',', '.').strip().split(' --> ', 1)\n\n while True:\n ll = lines.pop(0).strip()\n if ll:\n txt.append(ll)\n else:\n break\n yield {\n 'id': num,\n 'start': st,\n 'end': et,\n 'text': txt\n }", "title": "" }, { "docid": "7b7d33190f514b619e207587d5a1503d", "score": "0.43582523", "text": "def _through_sect(prev_lab, next_lab):\n return [tokens.Paragraph.make(prev_lab[:2] + [str(i)])\n for i in range(int(prev_lab[-1]) + 1, int(next_lab[-1]))]", "title": "" }, { "docid": "a0962e8fcf0a02215a4cdf07e52e4c31", "score": "0.434532", "text": "def next(self):\n\n try:\n if self.current_token is not None:\n self.previous_tokens.append(self.current_token)\n self.current_token = next(self.tokenizer)\n except StopIteration:\n self.current_token = MathToken(EOF, None)", "title": "" }, { "docid": "9460d90cbf109def258c056c8d6b1c77", "score": "0.4342013", "text": "def tokenize(self, state: StateInline) -> None:\n ok = False\n rules = self.ruler.getRules(\"\")\n end = state.posMax\n maxNesting = state.md.options[\"maxNesting\"]\n\n while state.pos < end:\n # Try all possible rules.\n # On success, rule should:\n #\n # - update `state.pos`\n # - update `state.tokens`\n # - return true\n\n if state.level < maxNesting:\n for rule in rules:\n ok = rule(state, False)\n if ok:\n break\n\n if ok:\n if state.pos >= end:\n break\n continue\n\n state.pending += state.src[state.pos]\n state.pos += 1\n\n if state.pending:\n state.pushPending()", "title": "" }, { "docid": "16d9ee86b22a168d034bd3478ef6353c", "score": "0.4337421", "text": "def add_line_of_content(self):\n self.element_path_pos[len(self.element_path)] += 1", "title": "" }, { "docid": "87bf28c18e64c8f1dd3524a07159fa2f", "score": "0.43195367", "text": "def parse_from_kai_penn(text: str, current_row: int = -1):\n re_str: \"_sre.SRE_Match\" = re.compile(\n r\"^([_\\d\\w\\-・++=?]*?)(?:-([0-9]+))?(?:;({[^\\s{}]+}|\\*.*\\*|\\*))?$\"\n )\n\n current_items: \"_sre.SRE_Match\" = re_str.match(text)\n\n # Label\n current_label = Object_with_Row_Column(\n content = current_items.group(1) or \"\",\n row = current_row,\n column = current_items.span(1)[0]\n )\n\n # ICHed\n current_ICHed = Object_with_Row_Column(\n content = int(current_items.group(2) or 0),\n row = current_row,\n column = current_items.span(2)[0]\n )\n\n # sort info\n current_sort_info = Object_with_Row_Column(\n content = current_items.group(3) or \"\",\n row = current_row,\n column = current_items.span(3)[0]\n )\n\n # Constitute a label compex\n return Label_Complex_with_Pos(\n label = current_label, \n ICHed = current_ICHed,\n sort_info = current_sort_info\n )\n # ===END===", "title": "" }, { "docid": "c726a36da08d27a917486c365671cda8", "score": "0.4317466", "text": "def _next(self, message=None):\n if self.current:\n self.current.next()", "title": "" }, { "docid": "ea28bb2f052e0fb737943c0bab2b898a", "score": "0.43098027", "text": "def inline_elements_plain(self, inline_elements_plain):\n\n self._inline_elements_plain = inline_elements_plain", "title": "" }, { "docid": "9afd4261ed42d7e8eacad91b49fc5810", "score": "0.43071684", "text": "def parse_autolink(self, inlines):\n dest = None\n m = self.match(re.compile(r'^<([a-zA-Z0-9.!#$%&\\'*+\\\\/=?^_`{|}~-]+@[a-zA-Z0-9](?:[a-zA-Z0-9-]{0,61}[a-zA-Z0-9])?(?:\\.[a-zA-Z0-9](?:[a-zA-Z0-9-]{0,61}[a-zA-Z0-9])?)*)>'))\n if m:\n dest = m[1:-1]\n inlines.append(\n Inline(\n t='Link',\n label=[Inline(t='Str', c=dest)],\n destination='mailto:' + dest,\n title='',\n )\n )\n return len(m)\n\n else:\n keys = [\n 'coap', 'doi', 'javascript', 'aaa', 'aaas', 'about', 'acap',\n 'cap', 'cid', 'crid', 'data', 'dav', 'dict', 'dns', 'file',\n 'ftp', 'geo', 'go', 'gopher', 'h323', 'http', 'https', 'iax',\n 'icap', 'im', 'imap', 'info', 'ipp', 'iris', 'iris.beep',\n 'iris.xpc', 'iris.xpcs', 'iris.lwz', 'ldap', 'mailto', 'mid',\n 'msrp', 'msrps', 'mtqp', 'mupdate', 'news', 'nfs', 'ni',\n 'nih', 'nntp', 'opaquelocktoken', 'pop', 'pres', 'rtsp',\n 'service', 'session', 'shttp', 'sieve', 'sip', 'sips', 'sms',\n 'snmp', 'soap.beep', 'soap.beeps', 'tag', 'tel', 'telnet',\n 'tftp', 'thismessage', 'tn3270', 'tip', 'tv', 'urn', 'vemmi',\n 'ws', 'wss', 'xcon', 'xcon-userid', 'xmlrpc.beep',\n 'xmlrpc.beeps', 'xmpp', 'z39.50r', 'z39.50s', 'adiumxtra',\n 'afp', 'afs', 'aim', 'apt', 'attachment', 'aw', 'beshare',\n 'bitcoin', 'bolo', 'callto', 'chrome', 'chrome-extension',\n 'com-eventbrite-attendee', 'content', 'cvs', 'dlna-playsingle',\n 'dlna-playcontainer', 'dtn', 'dvb', 'ed2k', 'facetime',\n 'feed', 'finger', 'fish', 'gg', 'git', 'gizmoproject', 'gtalk',\n 'hcp', 'icon', 'ipn', 'irc', 'irc6', 'ircs', 'itms', 'jar',\n 'jms', 'keyparc', 'lastfm', 'ldaps', 'magnet', 'maps',\n 'market', 'message', 'mms', 'ms-help', 'msnim', 'mumble',\n 'mvn', 'notes', 'oid', 'palm', 'paparazzi', 'platform',\n 'proxy', 'psyc', 'query', 'res', 'resource', 'rmi', 'rsync',\n 'rtmp', 'secondlife', 'sftp', 'sgn', 'skype', 'smb', 'soldat',\n 'spotify', 'ssh', 'steam', 'svn', 'teamspeak', 'things',\n 'udp', 'unreal', 'ut2004', 'ventrilo', 'view-source',\n 'webcal', 'wtai', 'wyciwyg', 'xfire', 'xri', 'ymsgr',\n ]\n m = self.match(re.compile(r'^<(?:{0}):[^<>\\x00-\\x20]*>'.format('|'.join(keys)), re.I))\n if m:\n dest = m[1:-1]\n inlines.append(Inline(t='Link', label=[Inline(t='Str', c=dest)], destination=dest, title=''))\n return len(m)\n\n else:\n return 0", "title": "" }, { "docid": "94c3b04324d52b31e044ca0c9d07d910", "score": "0.42941877", "text": "def _advance(self):\n dt_tag = self._next_tag()\n self.tag = dt_tag\n\n dd_tag = self._next_tag()\n self.next_tag = dd_tag", "title": "" }, { "docid": "4d29e069416f044db2af3d46b48f3680", "score": "0.42793748", "text": "def parse(self, line, i):\n #i is zero-based. However, once we reach sum(self.line_counts), we need to repeat\n #the line templates again. igroup is the index within the group (instead of global).\n igroup = i % sum(self.line_counts)\n iline = [0 if c < i else 1 for c in self._line_cum].index(1)\n return self.lines[iline].parse(line)", "title": "" }, { "docid": "10f003d7a604474716bae35a31694afd", "score": "0.42754662", "text": "def advance(self):\n self.new_line = self.asm_file.readline()\n while self.new_line in ('\\n', '\\r\\n'):\n self.new_line = self.asm_file.readline()\n line_split = self.new_line.split()\n if len(line_split) > 0:\n self.new_line = line_split[0]", "title": "" }, { "docid": "b21480bad581f6bcf6f34d34a514c71c", "score": "0.42686874", "text": "def _effective_inline_results( # pylint: disable=R0201\n self,\n results: Union[\n Sequence['InlineQueryResult'], Callable[[int], Optional[Sequence['InlineQueryResult']]]\n ],\n next_offset: str = None,\n current_offset: str = None,\n ) -> Tuple[Sequence['InlineQueryResult'], Optional[str]]:\n if current_offset is not None and next_offset is not None:\n raise ValueError('`current_offset` and `next_offset` are mutually exclusive!')\n\n if current_offset is not None:\n # Convert the string input to integer\n if current_offset == '':\n current_offset_int = 0\n else:\n current_offset_int = int(current_offset)\n\n # for now set to empty string, stating that there are no more results\n # might change later\n next_offset = ''\n\n if callable(results):\n callable_output = results(current_offset_int)\n if not callable_output:\n effective_results: Sequence['InlineQueryResult'] = []\n else:\n effective_results = callable_output\n # the callback *might* return more results on the next call, so we increment\n # the page count\n next_offset = str(current_offset_int + 1)\n else:\n if len(results) > (current_offset_int + 1) * MAX_INLINE_QUERY_RESULTS:\n # we expect more results for the next page\n next_offset_int = current_offset_int + 1\n next_offset = str(next_offset_int)\n effective_results = results[\n current_offset_int\n * MAX_INLINE_QUERY_RESULTS : next_offset_int\n * MAX_INLINE_QUERY_RESULTS\n ]\n else:\n effective_results = results[current_offset_int * MAX_INLINE_QUERY_RESULTS :]\n else:\n effective_results = results # type: ignore[assignment]\n\n return effective_results, next_offset", "title": "" }, { "docid": "69c76a7b3886f8e97c0f004f4724060b", "score": "0.42640847", "text": "def render_inlines(value):\n return inlines(value)", "title": "" }, { "docid": "dd269ac0f04374c52d6f9e1b3a859a7f", "score": "0.42546523", "text": "def body_line_iterator(msg, decode=False):\n for subpart in msg.walk():\n payload = subpart.get_payload(decode=decode)\n if isinstance(payload, str):\n yield from StringIO(payload)", "title": "" }, { "docid": "457124b1b4d972a5db3eaee8564b5e24", "score": "0.42512563", "text": "def parse(self, roman):\n\n state = 0 # start state\n result = 0 # sum\n index = 0 # start of unparsed slice\n\n # Following transitions for each character in roman numeral,\n # while applying the mapped increment to sum.\n for c in roman:\n if not c in self.adjacencies[state]:\n # There was not transition for the given character.\n break\n state, k = self.adjacencies[state][c]\n result += k\n index += 1\n \n return result, roman[index:]", "title": "" }, { "docid": "6924a1a949a5c00b31a73e6d0e083848", "score": "0.4244046", "text": "def _through_paragraph(prev_lab, next_lab):\n depth = len(prev_lab)\n start = p_levels[depth - 4].index(prev_lab[-1]) + 1\n end = p_levels[depth - 4].index(next_lab[-1])\n return [tokens.Paragraph.make(prev_lab[:depth - 1] +\n [p_levels[depth - 4][i]])\n for i in range(start, end)]", "title": "" }, { "docid": "a12178bcb5c9273883f3954448a51545", "score": "0.42295712", "text": "def decode_preceding_insn(self, *args):\n return _idaapi.func_item_iterator_t_decode_preceding_insn(self, *args)", "title": "" }, { "docid": "76f341b1ed90e685c2bcf0937bb18999", "score": "0.42220813", "text": "def parse(self, text):\n self.doc = Block.makeBlock('Document', 1, 1)\n self.tip = self.doc\n self.refmap = dict()\n lines = re.split(r'\\r\\n|\\n|\\r', re.sub(r'\\n$', '', text))\n for i, line in enumerate(lines):\n self.incorporate_line(line, i + 1)\n while self.tip:\n self.finalize(self.tip, len(lines) - 1)\n# print 'PREINLINE'\n# pprint(self.doc.dump())\n self.process_inlines(self.doc)\n return self.doc", "title": "" }, { "docid": "5df76e297102815fee0abc4429745735", "score": "0.42202538", "text": "def next( self ):\n\t\tname = self.ifs.readline().strip()\n\t\tsequence = self.ifs.readline().strip()\n\t\tspacer = self.ifs.readline().strip()\n\t\tquality = self.ifs.readline().strip()\n\t\tif not ( name and sequence and spacer and quality ):\n\t\t\traise StopIteration\n\t\treturn Sequence(name,sequence,quality)", "title": "" }, { "docid": "8100de5e427c86999211f09d76af6b8d", "score": "0.4217732", "text": "def parse(self, text, intents, top_n):\n pass", "title": "" }, { "docid": "9b09dde4250036f9dcb39fb3a8ed6379", "score": "0.42170507", "text": "def parse_newline(self, inlines):\n if self.peek() == '\\n':\n self.pos += 1\n last = inlines[-1] if inlines else None\n if last and last.t == 'Str' and last.c[-2:] == ' ':\n last.c = re.sub(r' *$', '', last.c)\n inlines.append(Inline(t='Hardbreak'))\n else:\n if last and last.t == 'Str' and last.c[-1] == ' ':\n last.c = last.c[:-1]\n inlines.append(Inline(t='Softbreak'))\n\n return 1\n else:\n return 0", "title": "" }, { "docid": "ba949ce88427eab16178355964b275d7", "score": "0.42017084", "text": "def GetNextLn(self, *args):\n return _snap.TSIn_GetNextLn(self, *args)", "title": "" }, { "docid": "8cf202d92c24a598ef8e37ec81bfae44", "score": "0.41926092", "text": "def next(self):\n if self.current_token == len(self.tokens):\n return None\n token = self.tokens[self.current_token]\n if token[\"type\"] == \"newline\":\n self.line += 1\n self.line_start = token[\"start\"]\n self.current_token += 1\n if token[\"type\"] == \"unknown\":\n self.error(\"Unknown token\")\n return token", "title": "" }, { "docid": "2cad8f378694a8600b1a6495b5b1a336", "score": "0.41911566", "text": "def next_page_parsed(self):\n self._pages_parsed += 1", "title": "" }, { "docid": "6d9ec0d55d72682c0ce4cfbb8745132d", "score": "0.41889328", "text": "def convert(self):\n for index, line in enumerate(self.all_lines):\n if index == 0:\n # this is the first line of the file, eg PEP: XXX header\n # just print it out as is\n self.outputs.append({\"out\": line})\n else:\n prev_line_obj = LineObj(self.all_lines[index-1].rstrip())\n current_line_obj = LineObj(line.rstrip())\n\n if not current_line_obj.is_blank:\n found_blank_line = False\n\n prev_line = self.outputs[-1]\n prev_line_obj = LineObj(prev_line['out'])\n if prev_line.get('line_obj'):\n prev_line_obj = prev_line['line_obj']\n\n if (prev_line_obj.is_list_item\n or prev_line_obj.list_item_overflow) \\\n and not current_line_obj.is_list_item:\n current_line_obj.list_item_overflow = True\n current_line_obj.list_item_prefix = \\\n prev_line_obj.list_item_prefix\n\n # look up the preceeding lines\n # if the paragraph above is indented less than current\n # and ends with colon,\n # then the current line is likely a code block\n for seen in reversed(self.outputs):\n seen_obj = LineObj(seen['out'])\n if seen.get('original'):\n seen_obj = LineObj(seen['original'])\n\n if not found_blank_line and seen_obj.is_blank:\n found_blank_line = True\n\n if found_blank_line and not seen_obj.is_blank \\\n and seen_obj.indentation_level \\\n < current_line_obj.indentation_level:\n if seen_obj.ends_with_colon:\n current_line_obj.is_code_block = True\n break\n\n else:\n current_line_obj.list_item_overflow = False\n\n if index < (len(self.all_lines) - 1):\n next_line_obj = LineObj(self.all_lines[index+1].rstrip())\n else:\n next_line_obj = None\n\n self.handle_content_type_header(\n current_line_obj, prev_line_obj)\n\n if is_section_heading(\n current_line_obj,\n prev_line_obj,\n next_line_obj):\n stripped = current_line_obj.line.rstrip(':')\n\n # ensure there are two blank lines before section heading\n if len(self.outputs[-2]['out'].strip()) > 0:\n self.outputs.append({\"out\": os.linesep})\n\n self.outputs.append({\"out\": stripped,\n \"original\": current_line_obj.original_line,\n \"line_obj\": current_line_obj\n })\n self.outputs.append({\"out\": os.linesep})\n self.outputs.append(\n {\"out\": current_line_obj.section_header_underline,\n \"line_obj\": current_line_obj})\n self.outputs.append({\"out\": os.linesep})\n\n if stripped.lower() == \"References\".lower():\n self.is_references_section = True\n self.has_references_section = True\n elif self.is_references_section:\n # we were in references section, and now moved on\n self.is_references_section = False\n self.last_ref_id = \"\"\n\n elif current_line_obj.is_local_vars:\n self.has_local_vars_section = True\n return\n\n else:\n if self.is_references_section:\n self.process_reference_line(current_line_obj)\n else:\n self.handle_paragraph(\n current_line_obj\n )", "title": "" }, { "docid": "da265c2fc6d0171dc897e74ad5b5cb54", "score": "0.41873655", "text": "def nextline(self, binrule, line):\n\tnewline = []\n\tboink = lambda x: newline.append(self.whatdo[(line[x-1], line[x], line[x+1])])\n\n\tif self.state:\n\t newline.append(self.whatdo[(line[-1], line[0], line[1])])\n\t for x in range(1, len(line)-1):\n\t\tboink(x)\n\t newline.append(self.whatdo[(line[-2], line[-1], line[0])])\n\telse:\n\t newline.append(line[0])\n\t for x in range(1, len(line)-1):\n\t\tboink(x)\n\t newline.append(line[-1])\n\n\treturn newline", "title": "" }, { "docid": "e7297adbcd6ca94531aa2c06a67cf469", "score": "0.4185132", "text": "def transcode_line(x,tranin,tranout):\n if re.search(r'^\\[Page.*?\\]$',x):\n return x\n parts = re.split(r'(<[^>]*>)',x)\n newparts = []\n for part in parts:\n if part.startswith('<'):\n newparts.append(part)\n else:\n newpart = transcoder.transcoder_processString(part,tranin,tranout)\n newparts.append(newpart)\n y = ''.join(newparts)\n return y", "title": "" }, { "docid": "e7297adbcd6ca94531aa2c06a67cf469", "score": "0.4185132", "text": "def transcode_line(x,tranin,tranout):\n if re.search(r'^\\[Page.*?\\]$',x):\n return x\n parts = re.split(r'(<[^>]*>)',x)\n newparts = []\n for part in parts:\n if part.startswith('<'):\n newparts.append(part)\n else:\n newpart = transcoder.transcoder_processString(part,tranin,tranout)\n newparts.append(newpart)\n y = ''.join(newparts)\n return y", "title": "" }, { "docid": "3da6b0c49f6def70bfbedf6ecad7cce1", "score": "0.41836447", "text": "def IriParsing(self) -> IriParsingElement:", "title": "" }, { "docid": "a3248ef274c2a2138dd8f1b5f23de629", "score": "0.41836414", "text": "def advance_line(self):\n self.current_line += 1", "title": "" }, { "docid": "ab4e80535f70ade00a09f87d6979ab79", "score": "0.41791797", "text": "def next(self):\n if self.subject and self.index < len(self.subject):\n n = self.subject.problem_set.filter(index=self.index + 1)\n if n:\n return n[0]", "title": "" }, { "docid": "93575415b9f199b4f85568a0f2dcb5a3", "score": "0.41672134", "text": "def next(self, item):\n raise NotImplementedError", "title": "" }, { "docid": "aa394f00cef802b64208f392dcf13912", "score": "0.4165029", "text": "def parse_intein_details(self, response): # pylint: disable=no-self-use\n keys = [\n 'Intein Name',\n 'Prototype Allele',\n 'Extein Name',\n 'Intein Class',\n 'Organism Name',\n 'Organism Description',\n 'Domain of Life',\n 'Endonuclease Activity',\n 'Endo Motif',\n 'Location in Extein',\n 'Insert Site Comments',\n 'Intein Size (aa)',\n 'Intein N-terminal',\n 'Intein C-terminal',\n 'Accession No.',\n 'Intein aa Sequence',\n 'Block A',\n 'Block B',\n 'Block C',\n 'Block D',\n 'Block E',\n 'Block F',\n 'Block G',\n 'Initially Contributed by',\n \"Contributor's Address\",\n \"Contributor's Phone No.\",\n \"Contributor's FAX No.\",\n \"Contributor's Email address\",\n 'Independently Found By',\n 'Comments',\n 'Date Submitted',\n 'References',\n ]\n # If we directly use extract() on the XPath .../font/text(), scrapy\n # flattens the 'Intrein aa sequence'.\n values = [''.join(selector.xpath('text()').extract()).rstrip()\n for selector in\n response.xpath('//table[@cellspacing=\"6\"]/tr/td/font')]\n yield dict(zip(keys, values))", "title": "" }, { "docid": "461d9959b658620876e2ee2ca0c551de", "score": "0.41595575", "text": "def convertItemize(self):\n level_itemize = 0\n level_item = 0\n new_lines = []\n\n for line in self.lines:\n if r\"\\begin{itemize}\" in line:\n level_itemize = level_itemize + 1\n if level_itemize == 2:\n line = r\"\"\"<ul >\"\"\"\n else:\n line = r\"\"\"<ul >\"\"\"\n elif r\"\\end{itemize}\" in line:\n level_itemize = level_itemize - 1\n line = r\"\"\"</li></ul>\"\"\"\n elif r\"\\item\" in line:\n if level_item == 0:\n line = line.replace(r\"\\item\", \"<li>\")\n level_item = level_item + 1\n else:\n line = line.replace(r\"\\item\", \"</li><li>\")\n level_item = level_item - 1\n new_lines.append(line)\n self.lines = new_lines", "title": "" }, { "docid": "03946e5edc18d8f7aef1bf9ccd3a7bdf", "score": "0.41558436", "text": "def _parse(self):\n pattern = ur'^[IV]+\\.'\n current_section = None\n # Iterate over the elements in self.tree.iter()\n # We only want paragraphs and table, since these appear to be the main top level elements\n # ie. we don't want to go fully down the tree\n for elem in self.tree.iter('table', 'p'):\n # When we encounter a header element, figure out what section it is a header for\n text = elem.text_content()\n if text and re.search(pattern, text):\n section_name = self._identify_section(text)\n if section_name is not None:\n logger.info(u'Identified header {} as {}'.format(text, section_name))\n current_section = section_name\n else:\n logger.warn(u\"Could not identify section from header {}\".format(text))\n current_section = \"other\"\n # If this is the first time in this section, initailize the array for storing stuff\n if getattr(self, current_section) is None:\n setattr(self, current_section, [])\n else:\n # Add all the elements we encounter to the list for the current section until\n # we encounter another header element, or the end of the document\n if current_section is not None:\n arr = getattr(self, current_section)\n arr.append(elem)\n\n # Once all of the sections are split up, parse each of them separately\n for section in CouncilAgenda.SECTION_MAP.keys():\n if getattr(self, section) is not None:\n getattr(self, \"_parse_{}\".format(section))()\n # We won't parse others, since we don't know what those are", "title": "" }, { "docid": "4242101f4d8c9812d9ef6f1eeafcf7fd", "score": "0.41511327", "text": "def __next_parse(self) -> bool:\n # Check for EOF\n if self.__end_of_file():\n return False\n # Check for invisible symbols at start\n if self.__cut_non_visible_start():\n return True\n # Try to parse next data as *valid* text\n _tag_data = re.match(r'[^<]+', self.__left_data)\n if not _tag_data: # Data not found\n self.__tag_open_or_close()\n else:\n _text: str = _tag_data.group()\n _cut_idx: int = _tag_data.end()\n self.__elem_validate_and_add(_text, _cut_idx)\n return True", "title": "" }, { "docid": "3ebc50382546fb47f227620fd33b2cae", "score": "0.41447917", "text": "def parse(\n self, src: str, md: MarkdownIt, env: EnvType, tokens: list[Token]\n ) -> list[Token]:\n state = StateInline(src, md, env, tokens)\n self.tokenize(state)\n rules2 = self.ruler2.getRules(\"\")\n for rule in rules2:\n rule(state)\n return state.tokens", "title": "" }, { "docid": "fd3806afe87f2954d84b0b11a9a1dc9a", "score": "0.4132763", "text": "def next(self):\n if len(self.containers) > 1:\n # Return a new instance of this class using the tails of\n # the separators and containers lists. Use self.__class__()\n # in case :class:`hl7.ParsePlan` is subclassed\n return self.__class__(\n self.separators[self.separators.find(self.separator) + 1],\n self.separators,\n self.containers[1:],\n self.esc,\n self.factory,\n )\n # When we have no separators and containers left, return None,\n # which indicates that we have nothing further.\n return None", "title": "" }, { "docid": "4096bbff02bb9549088a1c824309fd39", "score": "0.41291505", "text": "def _Next(self, lex_mode=lex_mode_e.DBracket):\n while True:\n self._NextOne(lex_mode=lex_mode)\n if self.op_id != Id.Op_Newline:\n break", "title": "" }, { "docid": "0a7a4ae0ace2a59866d1d7f4309f070a", "score": "0.41257268", "text": "def next(self):\n\n if self.dot < len(self):\n return self.rule[self.dot]", "title": "" }, { "docid": "ba4daa494923ba0e3f1d3d72fb171ef5", "score": "0.41250032", "text": "def append(self, nextItem):\n self.next = nextItem", "title": "" }, { "docid": "9aeb1b091c010655c3804b2391f1d34e", "score": "0.41158158", "text": "def func_item_iterator_decode_preceding_insn(*args):\n return _idaapi.func_item_iterator_decode_preceding_insn(*args)", "title": "" }, { "docid": "15efa9d4bcbee6589b6ba0bffdcc7a42", "score": "0.4112218", "text": "def next(self):\n if len(self.containers) > 1:\n ## Return a new instance of this class using the tails of\n ## the separators and containers lists. Use self.__class__()\n ## in case :class:`hl7.ParsePlan` is subclassed\n return self.__class__(self.separators[1:], self.containers[1:])\n ## When we have no separators and containers left, return None,\n ## which indicates that we have nothing further.\n return None", "title": "" }, { "docid": "2c08be8e1926dd2f78132016f730fe8b", "score": "0.4109597", "text": "def parse_link_label(self):\n if self.peek() != '[':\n return 0\n\n startpos = self.pos\n nest_level = 0\n if self.label_nest_level > 0:\n # If we've already checked to the end of this subject\n # for a label, even with a different starting [, we\n # know we won't find one here and we can just return.\n # This avoids lots of backtracking.\n # Note: nest level 1 would be: [foo [bar]\n # nest level 2 would be: [foo [bar [baz]\n self.label_nest_level -= 1\n return 0\n\n self.pos += 1 # Advance past [ char.\n c = self.peek()\n while c and (c != ']' or nest_level > 0):\n if c == '`':\n self.parse_backticks([])\n elif c == '<':\n self.parse_autolink([]) or self.parse_html_tag([]) or self.parse_string([])\n elif c == '[': # Nested []\n nest_level += 1\n self.pos += 1\n elif c == ']': # Nested []\n nest_level -= 1\n self.pos += 1\n elif c == '\\\\':\n self.parse_escaped([])\n else:\n self.parse_string([])\n c = self.peek()\n\n if c == ']':\n self.label_nest_level = 0\n self.pos += 1 # Advance past ]\n return self.pos - startpos\n else:\n if not c:\n self.label_nest_level = nest_level\n self.pos = startpos\n return 0", "title": "" }, { "docid": "a218b17f4cc7528da14d7289fec3a424", "score": "0.41094857", "text": "def parse_input(subject):\n parsed_subject = {'id': subject.get('id'), 'true_id': subject.get('true_id'),\n 'url': Subject.truncate_str(subject.get('url'), Subject.MAX_URL_LENGTH),\n 'subject_type': subject.get('type'),\n 'name': Subject.truncate_str(subject.get('name'), Subject.MAX_NAME_LENGTH),\n 'name_cn': Subject.truncate_str(subject.get('name_cn'), Subject.MAX_NAME_LENGTH),\n 'summary': Subject.truncate_str(subject.get('summary'), Subject.MAX_SUMMARY_LENGTH),\n 'air_date': Subject.parse_date(subject.get('air_date')),\n 'air_weekday': subject.get('air_weekday'),\n 'rating': Subject.parse_rating(subject.get('rating')),\n 'images': Subject.parse_images(subject.get('images')),\n 'collection': Subject.parse_collection(subject.get('collection')),\n 'rank': subject.get('rank'),\n 'eps': subject.get('eps'), 'eps_count': subject.get('eps_count'),\n 'characters': subject.get('crt'), 'staff': subject.get('staff'), }\n\n return parsed_subject", "title": "" }, { "docid": "0a289950b26039588d691fb1e60e3fe2", "score": "0.4107573", "text": "def next_heading(self):\r\n\t\treturn super(Heading, self).next_item", "title": "" }, { "docid": "fa3742d77fa2039aa6a58e14dd62ebcc", "score": "0.4105801", "text": "def next_token(self, token):\n if (\n not (\n isinstance(token, EndMarkdownToken)\n and token.type_name == MarkdownToken.token_atx_heading\n )\n and self.__is_in_normal_atx\n ):\n self.__last_atx_token = token\n\n if isinstance(token, ParagraphMarkdownToken):\n self.__last_paragraph_token = token\n elif isinstance(token, AtxHeadingMarkdownToken):\n self.__is_in_normal_atx = True\n elif isinstance(token, EndMarkdownToken):\n if token.type_name == MarkdownToken.token_paragraph:\n self.__last_paragraph_token = None\n elif token.type_name == MarkdownToken.token_atx_heading:\n if self.__is_in_normal_atx and isinstance(\n self.__last_atx_token, TextMarkdownToken\n ):\n if self.__last_atx_token.token_text.endswith(\"#\"):\n self.report_next_token_error(token)\n self.__is_in_normal_atx = False\n elif isinstance(token, TextMarkdownToken) and self.__last_paragraph_token:\n split_whitespace = self.__last_paragraph_token.extracted_whitespace.split(\n \"\\n\"\n )\n split_text = token.token_text.split(\"\\n\")\n assert len(split_whitespace) == len(split_text)\n\n for split_index, next_text in enumerate(split_text):\n combined_text = split_whitespace[split_index] + next_text\n if re.search(r\"^\\s{0,3}#{1,6}.*#+\\s*$\", combined_text):\n self.report_next_token_error(token)", "title": "" }, { "docid": "644853d7a5f93bafcade151cb41fe306", "score": "0.41053632", "text": "def _advance(self):\n # TODO this is currently just to keep the message queue going, but\n # eventually it should turn them into events and stuff them in an event\n # queue\n yield from self._read_message()\n\n asyncio.async(self._advance(), loop=self.loop)", "title": "" }, { "docid": "dc91fc6ceb9f1d699ef7959a0411417d", "score": "0.410363", "text": "def next(self):\n if len(self.containers) > 1:\n ## Return a new instance of this class using the tails of\n ## the separators and containers lists. Use self.__class__()\n ## in case :cls:`hl7.ParsePlan` is subclassed\n return self.__class__(self.separators[1:], self.containers[1:])\n ## When we have no separators and containers left, return None,\n ## which indicates that we have nothing further.\n return None", "title": "" }, { "docid": "d9f01bae1b7a317c2b440d1deb79e15c", "score": "0.4099128", "text": "def next_item(self):\n with self._storage.open(self.file_path) as metadata_file:\n metadata_file.seek(self._data_pointer)\n line = metadata_file.readline()\n if \"END\" in line:\n return None\n timestamp, alias, item_data = self._timestamp_alias_data_from_row(line)\n definition = self._alias_definitions[alias]\n parser = definition.parsers[0]\n self._data_pointer = metadata_file.tell()\n\n return parser.parse(item_data, timestamp)", "title": "" }, { "docid": "173186fff6ba1137f5ff6a395c99b948", "score": "0.4092094", "text": "def _linesIterator(self):\n # ignore trailing empty lines\n stop = len(self.lines)\n while stop > 0 and self.lines[stop-1].strip() == \"\":\n stop -= 1\n self.nl = 0\n # read header of PDFFit file\n for self.line in self.lines[:stop]:\n self.nl += 1\n yield self.line\n # end of _linesIterator", "title": "" }, { "docid": "5419161a6440a2f8c2d1ed2944432382", "score": "0.40877464", "text": "def loads_itr(content):\n mdata = email.message_from_string(content)\n\n if not mdata.is_multipart():\n raise ValueError(\"Multi-part MIME data was not found in \"\n \"given string: %s ...\" % content[:100])\n\n for info in parse_itr(mdata):\n yield info", "title": "" }, { "docid": "56c1514a3affc58f71d37111215966c8", "score": "0.40871352", "text": "def next(self):\n return self.offset(1)", "title": "" }, { "docid": "71d11533e2a0c1967288f2c72a6aeabd", "score": "0.4086495", "text": "def l(self, iline):\n prev_dot = self.dot\n self.dot = iline\n line = (self.lines[iline]).rstrip('\\n')\n return line, prev_dot", "title": "" }, { "docid": "2a2574533e04d87f0b36149a77921f8d", "score": "0.4086475", "text": "def feed(self, seq):\n\n lastNode = None\n for i in seq:\n lastNode = self.parseToken(i, lastNode)\n self.addStop(lastNode, self.starting_weight)", "title": "" }, { "docid": "3f972f6ed8a4ce9f3613298ef639e6b6", "score": "0.4080781", "text": "def next(self):\n pass", "title": "" }, { "docid": "3f972f6ed8a4ce9f3613298ef639e6b6", "score": "0.4080781", "text": "def next(self):\n pass", "title": "" }, { "docid": "161cb7c06e0a18c4cf13ca63ba7b2659", "score": "0.4077511", "text": "def extend(self, read_sequence): \n\t\ta = \"\";\n\t\tfor i in range(min(len(read_sequence), len(self.tail))):\n\t\t\tif(read_sequence[i] == self.tail[i]):\n\t\t\t\ta += read_sequence[i];\n\t\t\telse:\n\t\t\t\t#self.sequences[self.anchor + a] += 1;\n\t\t\t\treturn self.anchor + a, i;\n\t\t#self.sequences[self.anchor + a] += 1;\n\t\treturn\tself.anchor + a, min(len(read_sequence), len(self.tail));", "title": "" }, { "docid": "f344ce2abd485900020471d57f6dc978", "score": "0.40767217", "text": "def para_edge(self, direction, more_lines):\n iline = self.dot\n # If dot is empty line following paragraph, search back.\n # iline 0 is invisible empty line before visible line 1.\n while self.match(emptyline, iline) and iline > 0: \n iline -= 1\n if iline == 0: # all lines in buffer are empty\n return 0 # invokes '? Invalid address'\n # Dot is non-empty line in paragraph, search back (forward) for empty.\n while not self.match(emptyline, iline) and more_lines(iline):\n iline += direction \n # When searching forward, last line in buffer can be the edge\n if direction == 1 and not more_lines(iline):\n return iline\n else:\n return iline - direction # edge is line that follows (precedes) empty", "title": "" }, { "docid": "43d640ad2758dadf0f28573f282d437a", "score": "0.4076273", "text": "def _beginning_of_line(text, pos):\n return text, 0", "title": "" }, { "docid": "6baddada3fdaa48238e82f5b8b4ba52f", "score": "0.40749088", "text": "def CV_NEXT_SEQ_ELEM(*args):\n return _cv.CV_NEXT_SEQ_ELEM(*args)", "title": "" } ]
7d2ae14e0f07f38686ce1cc6442e1b10
Helper function to build Sprint name for JIRA.
[ { "docid": "592bbad6008c85f9b56ba95d57f94b38", "score": "0.72261506", "text": "def build_sprint(operational_year, operational_quarter, sprint_length, sprint_index):\n return 'Y%s-Q%s-L%s-S%s' % (\n operational_year, operational_quarter, sprint_length, sprint_index\n )", "title": "" } ]
[ { "docid": "9c995330fc18315b488d0d98da722891", "score": "0.6251241", "text": "def get_trial_name_capture1(user, trial,capture):\n return 'Suturing_%s%03d%s' % (user, trial,capture)", "title": "" }, { "docid": "b5560e31e7d53022dc1693d599b9e19c", "score": "0.6211164", "text": "def make_name(user):\n\treturn str(get_num_msgs() + 1) + '_' + getuser() + '_' + get_date()", "title": "" }, { "docid": "da32f5566a0b131fdd13413c315eb66f", "score": "0.59904903", "text": "def _generate_name(self, prefix, number, padding):\n format_string = prefix + \"-{0:0\" + str(padding) + \"}\"\n return format_string.format(number)", "title": "" }, { "docid": "654df6dc1a718f37798ead1936eab49c", "score": "0.59847397", "text": "def _format_rxn_name(rxn_key):\n rcts = rxn_key[0]\n prds = rxn_key[1]\n for idx, rct in enumerate(rcts):\n if idx == 0:\n rct_str = rct\n else:\n rct_str += '+' + rct\n for idx, prd in enumerate(prds):\n if idx == 0:\n prd_str = prd\n else:\n prd_str += '+' + prd\n rxn_name = rct_str + '=' + prd_str\n \n return rxn_name", "title": "" }, { "docid": "9be66e98f62d24f01158081701564212", "score": "0.5980366", "text": "def make_full_name(login, reponame):\n return '{}/{}'.format(login, reponame)", "title": "" }, { "docid": "47c38c99f6e40d53e474ade9b5f55789", "score": "0.5967793", "text": "def trial_str_creator(trial):\n return \"{}_{}\".format(trial.trainable_name, trial.trial_id)", "title": "" }, { "docid": "d1f4829864375d46e53154a22e14e75f", "score": "0.59640926", "text": "def routine_name(self, routine, iteration):\n name = routine.replace(\"_\", \" \").title()\n return f\"{name} - {iteration}\"", "title": "" }, { "docid": "030a4de6eb135c5f359ba4b7e192025b", "score": "0.58927053", "text": "def format_journal_name(entry):\n journal = entry.fields['journal']\n year = entry.fields['year']\n if 'volume' in entry.fields:\n vol = f\"{entry.fields['volume']}\"\n else:\n vol = \"\"\n if 'issue' in entry.fields:\n iss = f\"({entry.fields['issue']})\"\n else:\n iss = \"\"\n voliss = \"\".join([vol,iss])\n return \" \".join([journal, year, voliss])", "title": "" }, { "docid": "905837ce241fa7e44f1a2fb2bacd8d13", "score": "0.5885409", "text": "def make_name(release):\n\n build_trim = [\n 'linux-ubuntu-',\n 'linux-synology-',\n 'linux-drobo-',\n 'linux-seagate-',\n 'linux-',\n 'freebsd-',\n 'seagate-',\n ]\n build = release['build']\n for prefix in build_trim:\n if build.startswith(prefix):\n build = build[len(prefix):]\n\n if release['distro'] == 'redhat':\n distro = release['label'].split()[0].lower()\n else:\n distro = release['distro']\n\n name = distro + '_' + build\n\n if name.startswith('english_windows-'):\n name = 'windows_' + name[16:]\n return name", "title": "" }, { "docid": "c83a5edebe5b4e3cbc53ad93720261fb", "score": "0.5859951", "text": "def build_name(self):\n if self.comment is None:\n self.name = f\"{self.date}\"\n else:\n self.name = f\"{self.date} {self.comment[:90]}\"", "title": "" }, { "docid": "8a478908578e4b3af0a501b9d4961f86", "score": "0.58272886", "text": "def format_name(self) -> str:\n pass", "title": "" }, { "docid": "10f8dfd9c0a0d2ad3cd26bd4b13ef400", "score": "0.57741535", "text": "def build_quarter_string(operational_year, operational_quarter):\n return 'Y%s-Q%s' % (operational_year, operational_quarter)", "title": "" }, { "docid": "670803cb4b99a0fbf11623166e4fc32e", "score": "0.57653147", "text": "def get_name(project_id):\n if project_id and not project_id.startswith('projects/'):\n project_id = 'projects/{}'.format(project_id)\n return project_id", "title": "" }, { "docid": "e5c392a9cc2dbbd99026773478395e58", "score": "0.5742875", "text": "def _gen_issue_name(obj: ADCMEntity, cause: ConcernCause) -> str:\n return f\"{obj} has issue with {cause.value}\"", "title": "" }, { "docid": "86b17517dc5b9ba8f7cf829baf3f5bc6", "score": "0.5719695", "text": "def getPrintName(opts):\n # getFullOpts(opts)\n\n elitist = '+' if opts['elitist'] else ','\n active = 'Active-' if opts['active'] else ''\n thres = 'Threshold ' if opts['threshold'] else ''\n mirror = 'Mirrored-' if opts['mirrored'] else ''\n ortho = 'Orthogonal-' if opts['orthogonal'] else ''\n tpa = 'TPA-' if opts['tpa'] else ''\n seq = 'Sequential ' if opts['sequential'] else ''\n ipop = '{}-'.format(opts['ipop']) if opts['ipop'] is not None else ''\n weight = '${}$-weighted '.format(opts['weights_option']) if opts['weights_option'] is not None else ''\n\n sel = 'Pairwise selection' if opts['selection'] == 'pairwise' else ''\n sampler = 'a {} sampler'.format(opts['base-sampler']) if opts['base-sampler'] is not None else ''\n\n if len(sel) + len(sampler) > 0:\n append = ' with {}'\n if len(sel) > 0 and len(sampler) > 0:\n temp = '{} and {}'.format(sel, sampler)\n else:\n temp = '{}{}'.format(sel, sampler)\n append = append.format(temp)\n else:\n append = ''\n\n base_string = \"{seq}{thres}{weight}{mirror}{ortho}{active}(mu{elitist}lambda)-{tpa}{ipop}CMA-ES{append}\"\n\n name = base_string.format(elitist=elitist, active=active, thres=thres, mirror=mirror, ortho=ortho,\n tpa=tpa, seq=seq, ipop=ipop, weight=weight, append=append)\n\n return name", "title": "" }, { "docid": "58234704b0caddaeaa57c317985ed6f9", "score": "0.5712482", "text": "def get_name(cls, name: str):\n return f'1 - {name}'", "title": "" }, { "docid": "ebb47b05bb0e1aafcc6758d2a45294a7", "score": "0.57067096", "text": "def release_prefix(prefix, chart):\n return \"{}-{}\".format(prefix, chart[\"chart\"][\"release_name\"])", "title": "" }, { "docid": "83d3cbee316664bef5156158b74be111", "score": "0.56877434", "text": "def build_key_name(username):\n if username is None:\n logging.error(\"Trying to build a key_name for a null username!\")\n return \"\"\n return username.replace('.', '').lower()", "title": "" }, { "docid": "5d84d47dd97d4538afa7b905b84f76ff", "score": "0.56800026", "text": "def getFormattedName(firstname, lastname):\n fullname = f\"{firstname} {lastname}\"\n return fullname.title()", "title": "" }, { "docid": "5b361cb9e52438260d909db9aa58a40e", "score": "0.5677916", "text": "def pull_request_title(string):\n return string[:50]", "title": "" }, { "docid": "39b21b9f8d6f3b1bfc6d1b830a042247", "score": "0.5665729", "text": "def __generate_name(self):\n return self.NAME_PREFIX + \\\n ''.join([random.choice(self.JOBNAME_LEGAL_CHARS)\n for _ in xrange(5)])", "title": "" }, { "docid": "7d42a729b08cc64f8605d1dc21e718e3", "score": "0.5658137", "text": "def _create_full_name(self):\n if self.parent_category:\n return \": \".join(b.name for b in self.all_parent_categories) + \": \" + self.name\n else:\n return self.name", "title": "" }, { "docid": "ba5149f29c219f97ecea7e8df075c29f", "score": "0.564826", "text": "def format_object_name(self, object_name: str, document_id: int, resource='well'):\n return self.get_prefix(document_id, resource) + object_name", "title": "" }, { "docid": "40c957cced4b76914d61abb052d7c543", "score": "0.56451195", "text": "def build_name(self, fields):\n \n name = \"\"\n \n for field in fields:\n \n field = field.strip()\n \n if field in self.details:\n #add a space between fields\n if len(name) > 0:\n name += \" \"\n \n #add the sample detail to the name string\n name += self.details[field]\n \n return name", "title": "" }, { "docid": "80fc7a910f9303bbffc2c8f9f6d0e2eb", "score": "0.5640022", "text": "def generate_name(syllables):\n\n name = \"\"\n for i in range(syllables):\n name += generate_syllable()\n return name.title()", "title": "" }, { "docid": "3525df3fcd7abdae5a32d2e74521ed53", "score": "0.56335866", "text": "def get_name(item):\n\t_prefixes = [item['prefix']]\n\t\n\tif 'burning' in item:\n\t\tif item['burning']:\n\t\t\t_prefixes.append('burning')\n\t\telif item['burnt']:\n\t\t\t_prefixes.append('burnt')\n\t\n\tif 'capacity' in item:\n\t\t_score = item['capacity']/float(item['max_capacity'])\n\t\t\n\t\tif _score >= .75:\n\t\t\t_prefixes.append('nearly full')\n\t\telif _score == 0:\n\t\t\t_prefixes.append('empty')\n\t\n\tif len(_prefixes)>=2:\n\t\tif _prefixes[0] == 'a' and _prefixes[1] in ['empty']:\n\t\t\t_prefixes[0] = 'an'\n\t\n\treturn '%s %s' % (' '.join(_prefixes), item['name'])", "title": "" }, { "docid": "787340262887a545f9d338be0e42c897", "score": "0.56244606", "text": "def _get_condensed_name(self) -> str:\n return f\"{self.get_datetime()}_{self.platform.name}_{self.tile_name}_{self.product_type.value}\"", "title": "" }, { "docid": "fa02ea0cfab6241fe7c1c33f4d442582", "score": "0.5589037", "text": "def _get_condensed_name(self) -> str:\n return f\"{self.get_datetime()}_{self.platform.name}_{self.product_type.name}\"", "title": "" }, { "docid": "c39f4f853fce64f476f40ab5053dcb21", "score": "0.555855", "text": "def get_formatted_name(first,last):\n full_name=first+' '+last\n return full_name.title()", "title": "" }, { "docid": "d1c521936855bd7afc05bd1b2b2b93b0", "score": "0.5551886", "text": "def getShortName(stockType):\r\n if stockType == 1: #FAANG stocks\r\n shortName = 'FAANG';\r\n elif stockType == 2: # Other Top 100 NASDAQ stocks\r\n shortName = 'Top 100';\r\n elif stockType == 3: # All other NASDAQ stocks\r\n shortName = 'Other';\r\n else: # NASDAQ\r\n shortName = 'NASDAQ';\r\n return shortName;", "title": "" }, { "docid": "132c56bb33421a2e843bb92d83d212da", "score": "0.5548043", "text": "def portfolio_name(group_name, tag):\n port_name = '.'.join([group_name, tag, \"portfolio\"])\n return port_name", "title": "" }, { "docid": "91f281b76581436d0355bcfdbdcbb5ec", "score": "0.55315495", "text": "def format_name(log_path):\n return '/'.join(log_path.split('/')[-2:])", "title": "" }, { "docid": "87d5f00cd0c9b75e4f09d04b1b0f053f", "score": "0.55215937", "text": "def fullname_creator(comment_object):\n initial_fullname = str(comment_object.fullname)\n initial_fullname_array = initial_fullname.split('_')\n final_fullname = str(initial_fullname_array[1])\n return final_fullname", "title": "" }, { "docid": "ea154d31046a1a57358f243d94fc2ce6", "score": "0.5514119", "text": "def _get_unique_name(self):\n return self.name.replace(' ', '') + '-' + self.experiment.replace(' ', '')", "title": "" }, { "docid": "ea154d31046a1a57358f243d94fc2ce6", "score": "0.5514119", "text": "def _get_unique_name(self):\n return self.name.replace(' ', '') + '-' + self.experiment.replace(' ', '')", "title": "" }, { "docid": "ea154d31046a1a57358f243d94fc2ce6", "score": "0.5514119", "text": "def _get_unique_name(self):\n return self.name.replace(' ', '') + '-' + self.experiment.replace(' ', '')", "title": "" }, { "docid": "5cd1245fc470be93dfa429b16f5e441c", "score": "0.5509674", "text": "def paralog_name(abbrev, keys):\n id_ = id_generator()\n pname = f'{abbrev}..p{id_}'\n if pname not in keys:\n return pname\n else:\n paralog_name(abbrev, keys)", "title": "" }, { "docid": "33f79add3ed9073786ff0505f44928f4", "score": "0.55079406", "text": "def get_name(self):\n name = str(self.year) + ' ' + self.make + ' ' + self.model\n return name.title()", "title": "" }, { "docid": "66d3e5c4e8ac1e79d38302519fe33104", "score": "0.5503918", "text": "def make_frame_name(frame_num):\n return str(frame_num).zfill(3) + '.jpg'", "title": "" }, { "docid": "f8f567abc174a108ae406a443f8e16f5", "score": "0.54932123", "text": "def _create_room_title(self):\n room_title = self.bug_notification['ID']+\" - Spark Conference\"\n\n return room_title", "title": "" }, { "docid": "beaf1291ef8d736d846bc7c4eb3ba9d0", "score": "0.5489789", "text": "def session_name(session_number):\n if session_number is 1:\n return 'morning'\n elif session_number is 2:\n return 'afternoon'\n else:\n return 'session' + session_number", "title": "" }, { "docid": "b07ab9ed0d8d8116e53651e1b0a7b0ad", "score": "0.5477232", "text": "def get_full_subscription_name(project, subscription):\n return fqrn('subscriptions', project, subscription)", "title": "" }, { "docid": "7d90198ffb85f17cfce6ab04c7078fa2", "score": "0.5475987", "text": "def out_name(stem: str, timestep: Optional[int] = None) -> str:\n if conf.core.shortname:\n return conf.core.outname\n if timestep is not None:\n stem = f\"{stem}{timestep:05d}\"\n return conf.core.outname + \"_\" + stem", "title": "" }, { "docid": "dfa1e249fa235170c0e5bcb03f421549", "score": "0.5463991", "text": "def _requirement_to_str_lowercase_name(requirement):\n parts = [requirement.name.lower()]\n\n if requirement.extras:\n parts.append(\"[{0}]\".format(\",\".join(sorted(requirement.extras))))\n\n if requirement.specifier:\n parts.append(str(requirement.specifier))\n\n if requirement.url:\n parts.append(\"@ {0}\".format(requirement.url))\n\n if requirement.marker:\n parts.append(\"; {0}\".format(requirement.marker))\n\n return \"\".join(parts)", "title": "" }, { "docid": "a4ec76734689b88cc203a4a916955363", "score": "0.54435146", "text": "def gen_field_name(field_id, i, a) -> str:\n\n return \"%d-%d.%d\" % (field_id, i, a)", "title": "" }, { "docid": "5b58fec0c4fc21595321d25d6eeea94c", "score": "0.5441257", "text": "def get_project_name(soup_obj):\n project_name = soup_obj.find('td', class_='header_page_title').text\n project_name = project_name.split('Task List for Project: ')[1]\n project_name = project_name.replace(' ', '_').replace('(', '_').replace(')', '_')\n \n return project_name", "title": "" }, { "docid": "766e5f0c48abaee37fb52ab1ed50751d", "score": "0.54407847", "text": "def ProjectName(self) -> str:", "title": "" }, { "docid": "766e5f0c48abaee37fb52ab1ed50751d", "score": "0.54407847", "text": "def ProjectName(self) -> str:", "title": "" }, { "docid": "bce2acb0018f8651be07e5bdd25e2bca", "score": "0.54263383", "text": "def _generateDefaultName(self) -> str:", "title": "" }, { "docid": "108cd4d6d37e0ed75ee0a4e0ec6066a4", "score": "0.5424971", "text": "def generate_playlist_name(year: str) -> str:\n return f\"{Config.PLAYLIST_NAME_PREFIX}liked from {year}\"", "title": "" }, { "docid": "887f12789522e034103a3a289339cf46", "score": "0.54169303", "text": "def get_name(org_id):\n if org_id and not org_id.startswith('organizations/'):\n org_id = 'organizations/{}'.format(org_id)\n return org_id", "title": "" }, { "docid": "4a97e6eb6eb9caff35df3c8645e95e82", "score": "0.54044205", "text": "def u_name(self):\n u_name = self.owner.username.lower() + \"-\" + self.name.replace(\" \", \"-\").lower()\n if not settings.PRODUCTION or settings.STAGING:\n u_name = u_name + '.staging'\n if not settings.STAGING:\n u_name = \"dev-\" + u_name\n return u_name", "title": "" }, { "docid": "ff29fcb3ec0b4b1a152427c8a484fd0d", "score": "0.5394889", "text": "def GetPrinterName(self):", "title": "" }, { "docid": "8fda160232b8d6b80a61f0aa68b2170a", "score": "0.5374181", "text": "def make_snap_name(self, timestamp=None):\n if not timestamp:\n timestamp = datetime.now()\n return \"%s-%s-%s\" % (DEFAULT_SNAP_PREFIX, self.tag, self._timestamp_to_snaptimestr(timestamp))", "title": "" }, { "docid": "f7f3028b09a5f0d096b7e295e7b07f16", "score": "0.53715", "text": "def get_new_project_name(self, project_name):\n timestamp_str = datetime.datetime.utcnow().strftime('%Y-%m-%d %H:%M')\n return \"{} {}\".format(project_name, timestamp_str)", "title": "" }, { "docid": "daaab601339db395a9bca63fec038424", "score": "0.53667957", "text": "def test_name(num: int, std_grade: int, lang: Text) -> Text:\n return \"{0}_{1}_{2}_{3}\".format(\n date_time(),\n lang.title(),\n grade_number(std_grade),\n test_num(num))", "title": "" }, { "docid": "8f8828095930e34f1e53fe27dc532ec4", "score": "0.53616464", "text": "def getName(self,base_name='slalom'):\n\n name = \"%s_unannotated_%s_unannotated-sparse_%s_it_%s\" % (base_name,self.nLatent,self.nLatentSparse, self.iterationCount)\n return name", "title": "" }, { "docid": "08cad0003aba9b95854c274e10ff559f", "score": "0.535455", "text": "def buildTitle(room_name, topic_name):\n s = Template(MAIL_SUBJECT_FORMAT)\n return s.substitute(topic = topic_name, room = room_name)", "title": "" }, { "docid": "e7472c915a6f9e2bc7aa4ac60ef9526a", "score": "0.5353905", "text": "def get_name_at(index: int) -> str:\n return f\"obs_{index}\"", "title": "" }, { "docid": "409d6f61198af54d68688322eba7431e", "score": "0.53536284", "text": "def get_name(self) -> str:", "title": "" }, { "docid": "aeedb195070ab58c2b8fba02798b3692", "score": "0.53386223", "text": "def get_formatted_name(first, last):\n full_name = first + \" \" + last\n return full_name.title", "title": "" }, { "docid": "c0864a4e6436233a129211e41d58948e", "score": "0.53323096", "text": "def internal_name(_cls, user, name):\n return f'{user.username}{UserGroup.SEPARATOR}{name}'", "title": "" }, { "docid": "e2b458f018685107f5432a9d167d7fa5", "score": "0.5326744", "text": "def name(self):\n if self.school_code == 'MTHS':\n return '{}{} {}'.format(self.period, self.day,\n self.course.course_description)\n elif self.school_code == 'MTMS':\n return 'Period {} {}'.format(self.period,\n self.course.course_description)\n return '{} {} {}'.format(self.course_code, self.course_section,\n self.course.course_description)", "title": "" }, { "docid": "80265a1b068230436a9560d2d547fa33", "score": "0.5323096", "text": "def get_formatted_name(first_name, last_name):\n\tfull_name = f\"{first_name} {last_name}\"\n\treturn full_name.title()", "title": "" }, { "docid": "6c46b9910f8cedbca30681f822d44917", "score": "0.53138155", "text": "def get_new_bucket_name():\n name = '{prefix}{num}'.format(\n prefix=prefix,\n num=next(bucket_counter),\n )\n return name", "title": "" }, { "docid": "041dc601b216bac9761732a434a3470e", "score": "0.53096193", "text": "def make_frame_name(frameNum, space_size=6):\n\tframe_num_str = str(frameNum)\n\tzero_str = ''\n\tfor i in range(space_size-len(frame_num_str)):\n\t\tzero_str += '0'\n\n\treturn zero_str+frame_num_str", "title": "" }, { "docid": "ccff6ccfb4fed4015509dd358d621946", "score": "0.53002405", "text": "def name(self):\n if self._prefix != None:\n return self._prefix + '_' + self._name\n return self._name", "title": "" }, { "docid": "26190370103aa873aff425543ae8d294", "score": "0.52981883", "text": "def formatted_name(first_name,last_name,middle_name=''): #middle_name is optional parameter here\n if middle_name:\n full_name = f\"\\n{first_name} {middle_name} {last_name}\"\n else:\n full_name = f\"\\n{first_name} {last_name}\"\n return full_name.title()", "title": "" }, { "docid": "992238483ed298eb53a8031d7719d5fb", "score": "0.5297774", "text": "def __generateContainerName(self, path=\"tempfs\"):\n\n # Generate docker name filter based on user name\n name = self.__generateContainerUserFilter()\n\n # Append workspace name\n name += \"-\" + os.path.basename(path)\n\n # Append timestamp\n name += \"-\" + datetime.datetime.now().strftime(\"%j%H%M%S\")\n\n return name", "title": "" }, { "docid": "7e4a2aaeb0fc9eedb02bbc441d15903f", "score": "0.5295433", "text": "def format_name(last, first):\n return last + ', ' + first", "title": "" }, { "docid": "c27a7db035015a40e9beee7005cd1aeb", "score": "0.5286941", "text": "def get_env_name(env_cfg):\r\n assert env_cfg.env_type == 'd'\r\n name = f'MarlGridDoors-'\r\n\r\n if env_cfg.num_blind_agents > 0:\r\n name += f'{env_cfg.num_blind_agents}Blind'\r\n\r\n if env_cfg.active_after_done:\r\n name += 'Active'\r\n\r\n if not env_cfg.neutral_shape:\r\n name += 'Tri'\r\n\r\n if env_cfg.discrete_position and (\r\n env_cfg.observe_position or env_cfg.observe_self_position):\r\n name += 'Di'\r\n if env_cfg.observe_door:\r\n name += 'Door'\r\n if env_cfg.observe_position:\r\n name += 'Pos'\r\n if env_cfg.observe_self_position:\r\n name += 'Selfpos'\r\n if env_cfg.observe_self_env_act:\r\n name += 'Selfenv'\r\n\r\n if env_cfg.observe_done:\r\n name += 'D'\r\n\r\n if env_cfg.observe_t:\r\n name += 'T'\r\n\r\n if env_cfg.comm_len > 0:\r\n name += f'{env_cfg.comm_len}C'\r\n if not env_cfg.discrete_comm:\r\n name += 'cont'\r\n\r\n if env_cfg.team_reward_type != 'none':\r\n name += f'TR{env_cfg.team_reward_type}'\r\n\r\n if env_cfg.team_reward_freq != 'none':\r\n name += f'TRF{env_cfg.team_reward_freq}'\r\n\r\n if env_cfg.view_size != 7:\r\n name += f'{env_cfg.view_size}Vs'\r\n\r\n name += f'{env_cfg.grid_size}x{env_cfg.grid_size}-v0'\r\n return name", "title": "" }, { "docid": "356d434eafee4419b58d321879b2cce8", "score": "0.5283878", "text": "def get_name_field(base_name,l,b):\n return(base_name+'_l{0}_b{1}.fits'.format(l, b))", "title": "" }, { "docid": "7446a937d72820bf07b1d6fd0f44856e", "score": "0.5281821", "text": "def getCalName(format, ext: str, sensor: str, serial: int, chopper) -> str:\n if format == \"induction\":\n return inductionName(ext, sensor, serial, chopper)\n elif format == \"metronix\":\n return metronixName(ext, sensor, serial, chopper)\n elif format == \"rsp\":\n return rspName(ext, sensor, serial, chopper)\n elif format == \"rspx\":\n return rspxName(ext, sensor, serial, chopper)\n else:\n return metronixName(ext, sensor, serial, chopper)", "title": "" }, { "docid": "4d163746f1c5b8d832a49fb1d1d7b129", "score": "0.5281802", "text": "def name(self):\n if self.station_name:\n return f\"WAQI {self.station_name}\"\n return f\"WAQI {self.url if self.url else self.uid}\"", "title": "" }, { "docid": "4eaad50dc9fde1180880ec99c923b518", "score": "0.52784276", "text": "def format_name(f_name, l_name):\n formatted_f_name = f_name.title()\n formatted_l_name = l_name.title()\n return f\"{formatted_f_name} {formatted_l_name}\"", "title": "" }, { "docid": "8e4366888166d30f08676a71bdcf56fe", "score": "0.5272961", "text": "def make_trajectory_name(testcase):\n\n testid = testcase.id()\n split_names = testid.split('.')\n #name = 'T__' + '__'.join(split_names[-2:]) + '__' + randintstr\n seed = len(testid) + int(10*time.time())\n random.seed(seed)\n randintstr = str(random.randint(0, 10 ** 5))\n name = 'T__' + split_names[-1] + '__' + randintstr\n\n maxlen = pypetconstants.HDF5_STRCOL_MAX_NAME_LENGTH - 22\n\n if len(name) > maxlen:\n name = name[len(name)-maxlen:]\n\n while name.startswith('_'):\n name = name[1:]\n\n return name", "title": "" }, { "docid": "7fdb88bcaad4231a3204baa15192fa5a", "score": "0.5271804", "text": "def format_song_name(genre, number):\n zero_indexed_number = int(number) - 1\n padded_number = '{:05d}'.format(zero_indexed_number)\n \n name = genre + \".\" + padded_number + \".au\"\n \n return name", "title": "" }, { "docid": "b5acad959e05c6657d046316458a019d", "score": "0.5268555", "text": "def descriptive_name(self):\n return f\"ixpfx{self.id} {self.prefix}\"", "title": "" }, { "docid": "c6d1d4d5723b05fe538de7c50fb638c8", "score": "0.5268172", "text": "def get_spack_name_for(name):\n name = name.lower().replace('.', '-')\n\n if name == 'r':\n return name\n\n if not name.startswith('r-'):\n name = 'r-%s' % name\n\n return name", "title": "" }, { "docid": "f5f86c257625c69b8df42ee60a28eee7", "score": "0.5252144", "text": "def descriptive_name(self):\n return f\"carrierfac{self.id} {self.carrier.name} {self.facility.name}\"", "title": "" }, { "docid": "06fd5a8ccff2a6c7f117a0ddae2f063a", "score": "0.52418727", "text": "def generate_project_name() -> str:\n return f\"{random.choice(adjectives)}_{random.choice(animals)}\"", "title": "" }, { "docid": "47fbc67ba6697ae0f41d61085f793633", "score": "0.5241818", "text": "def short_project_name(full_project_name):\n return full_project_name.split('/')[-1]", "title": "" }, { "docid": "6a4e3d37589e4e117afbab9ff9029d28", "score": "0.5236338", "text": "def name_trial(trial):\n network = 'CNN' if args.use_cnn else 'MLP'\n rollouts = args.num_rollouts\n return f'MCTS{rollouts}vs{trial.trainable_name}-{network}'", "title": "" }, { "docid": "1cdcc3cccf13b1b8da300cbe14aec100", "score": "0.5232728", "text": "def format_name(f_name, l_name):\n return (f_name.capitalize() + \" \" + l_name.capitalize())", "title": "" }, { "docid": "0c3d4fa6f6052a2200945bd0bd3e9060", "score": "0.5230769", "text": "def get_formatted_name(first, last, middle=''):\n if middle:\n full_name = first + ' ' + middle + ' ' + last\n else:\n full_name = first + ' ' + last\n return full_name.title()", "title": "" }, { "docid": "e8ab10c562503cad2bc94f9732cbb7f2", "score": "0.52295035", "text": "def get_job_name(table_name: str, incremental_load: bool) -> str:\n # no underscores or periods are allowed in beam job names\n fixed_table_name = table_name.replace('_', '-').replace('.', '-')\n\n if incremental_load:\n return 'append-' + fixed_table_name\n\n return 'write-' + fixed_table_name", "title": "" }, { "docid": "44eecaef06b856e9656baedf79c66630", "score": "0.52259266", "text": "def get_name(self):\n # the name format is written to the database based on a set_name\n # the database will contain an index that stores the current index.\n # if no name hase bee defined and this is accessed first the name\n # username-0001 will be used where username is your username\n return \"notimplemented-0001\"", "title": "" }, { "docid": "908b10277d03efdddcdf58b760f7ce5f", "score": "0.52258945", "text": "def _BuildUrl(self, division, age_bracket):\n str_fmt = 'schedule/%s/%s-%s/'\n\n div = 'Women'\n if division == scores_messages.Division.OPEN:\n div = 'Men'\n if division == scores_messages.Division.MIXED:\n div = 'Mixed'\n\n age_brak = 'College'\n if age_bracket == scores_messages.AgeBracket.NO_RESTRICTION:\n age_brak = 'Club'\n\n return str_fmt % (div, age_brak, div)", "title": "" }, { "docid": "105d203a9b2af0c9f977f0546ede2bd3", "score": "0.5222019", "text": "def _log_name(s):\n from dulcinea import site_util\n config = site_util.get_config()\n site = os.environ.get('SITE', 'qon')\n return os.path.join(config.defaults().get('log-directory'),\n site,\n s)", "title": "" }, { "docid": "7cd38f31c3ed341451d98a4e3d3532b1", "score": "0.5213068", "text": "def _build_factor_name(self, factor):\n if factor['provider'] == 'DUO':\n return factor['factorType'] + \": \" + factor['provider'].capitalize()\n elif factor['factorType'] == 'push':\n return \"Okta Verify App: \" + factor['profile']['deviceType'] + \": \" + factor['profile']['name']\n elif factor['factorType'] == 'sms':\n return factor['factorType'] + \": \" + factor['profile']['phoneNumber']\n elif factor['factorType'] == 'email':\n return factor['factorType'] + \": \" + factor['profile']['email']\n elif factor['factorType'] == 'call':\n return factor['factorType'] + \": \" + factor['profile']['phoneNumber']\n elif factor['factorType'] == 'token:software:totp':\n return factor['factorType'] + \"( \" + factor['provider'] + \" ) : \" + factor['profile']['credentialId']\n elif factor['factorType'] == 'token':\n return factor['factorType'] + \": \" + factor['profile']['credentialId']\n elif factor['factorType'] == 'u2f':\n return factor['factorType'] + \": \" + factor['factorType']\n elif factor['factorType'] == 'webauthn':\n factor_name = None\n try:\n registered_authenticators = RegisteredAuthenticators(self.ui)\n credential_id = websafe_decode(factor['profile']['credentialId'])\n factor_name = registered_authenticators.get_authenticator_user(credential_id)\n except Exception:\n pass\n\n default_factor_name = factor['profile'].get('authenticatorName') or factor['factorType']\n factor_name = factor_name or default_factor_name\n\n return factor['factorType'] + \": \" + factor_name\n elif factor['factorType'] == 'token:hardware':\n return factor['factorType'] + \": \" + factor['provider']\n\n else:\n return \"Unknown MFA type: \" + factor['factorType']", "title": "" }, { "docid": "00bf9b383c539ba160a814b0208d8040", "score": "0.5210844", "text": "def generate_new_name(surgeon, prefix: str) -> str:\n i = 0\n new_name = \"{}_{}\".format(prefix, i)\n surgeon_node = find_node_by_name(surgeon, new_name, raise_on_fail=False)\n while surgeon_node:\n i += 1\n new_name = \"{}_{}\".format(prefix, i)\n surgeon_node = find_node_by_name(surgeon, new_name, raise_on_fail=False)\n return new_name", "title": "" }, { "docid": "3207752508f1a7bc77da931c1ad524eb", "score": "0.5210305", "text": "def format_bucket_id(project_id, bucket_name): # pragma: no cover\n return '%s/%s' % (project_id, bucket_name)", "title": "" }, { "docid": "fd6f4fb4469a0cf44476ca4272fcc6be", "score": "0.5208774", "text": "def make_label(name: str, unit: str)-> str:\n return '{}_{}'.format(name, unit)", "title": "" }, { "docid": "f455a7210f2362799170335b7407c760", "score": "0.52017426", "text": "def join_full_title(site, ns, title):\n if ns == 0:\n return to_wiki_format(site, title, ignore_ns=True)\n ns_name = site.namespaces[ns].custom_name\n return ns_name + \":\" + to_wiki_format(site, title, ignore_ns=True)", "title": "" }, { "docid": "dc4d012a2921241bfea7a5518547ef54", "score": "0.52009916", "text": "def _GetSequenceName(self, name):\n if name == u'key':\n return u'{0:s}s'.format(name)\n\n if (name[-1] in (u's', u'x', u'z') or (\n name[-1] == u'h' and name[-2] in (u'c', u's'))):\n return u'{0:s}es'.format(name)\n\n if name[-1] == u'y':\n return u'{0:s}ies'.format(name[:-1])\n\n return u'{0:s}s'.format(name)", "title": "" }, { "docid": "d44ecec425747e4998fc65d66f761048", "score": "0.51999176", "text": "def getJobId(job_name):\n\n return '%s-%s' % (job_name, getTimestamp())", "title": "" }, { "docid": "ec3daad116cd28b07fe554cc70f46a8a", "score": "0.519302", "text": "def get_formatted_name(first_name,last_name):\n full_name = first_name+\" \"+last_name\n return full_name.title()", "title": "" }, { "docid": "11057ffb4a97c9932de1e4933499ce82", "score": "0.51924515", "text": "def asciiname(team):\n return team.name.encode('ascii', 'ignore')", "title": "" }, { "docid": "4ef46a07a973c5b39b83f174e5344dfd", "score": "0.5192126", "text": "def room_full_name(room_number):\n if room_number == 1:\n return \"Kew Gardens Suite\"\n elif room_number == 2:\n return \"Oxford Suite\"\n elif room_number == 3:\n return \"London Suite\"\n elif room_number == 4:\n return \"Verulamium Suite\"\n elif room_number == 5:\n return \"Cambridge Botanic Gardens\"\n elif room_number == 6:\n return \"Stonehenge Suite\"\n elif room_number == 7:\n return \"Lucretia's Suite\"\n elif room_number == 8:\n return \"Glasgow Suite\"\n elif room_number == 9:\n return \"Ware Suite\"", "title": "" }, { "docid": "7e7bb62b5c6ea3a94a1114852f4952a7", "score": "0.5189865", "text": "def get_descriptive_name(self):\n long_name = str(self.year) + ' ' + self.make + ' ' + self.model; #long_name is a string made up of three strings and two spaces\n return long_name.title();", "title": "" } ]
4357cc9622aa6e03a139d2d3ea2b28be
Create a string to be written to a Colvars file.
[ { "docid": "2034dd75dedcb8a203a547e72c16281c", "score": "0.0", "text": "def _create(star, dna, lower, upper, k, steps, start, stop):\n group1_end = star\n group2_start = star+1\n group2_end = star+dna\n colvar = \"colvar {{\\n name dist\\n distance {{\\n group1 {{atomNumbersRange 1-{}}}\\n group2 {{atomNumbersRange {}-{}}}\\n}}\\n lowerBoundary {}\\n upperBoundary {}\\n}}\".format(group1_end, group2_start, group2_end, lower, upper)\n harmonic = \"harmonic {{\\n colvars dist\\n forceConstant {}\\n centers {}\\n targetCenters {}\\n targetNumSteps {}\\n writeTIPMF on\\n}}\".format(k, start, stop, steps)\n return \"{}\\n\\n{}\".format(colvar, harmonic)", "title": "" } ]
[ { "docid": "84153e24371b21890b7573ad151587da", "score": "0.6302888", "text": "def make_namd_colvar_string(self):\n serial_group1 = [str(index+1) for index in self.group1]\n serial_group2 = [str(index+1) for index in self.group2]\n serial_group1_str = \" \".join(serial_group1)\n serial_group2_str = \" \".join(serial_group2)\n namd_colvar_string = \"\"\"\ncolvar {{\n name collective_variable_{0}\n outputappliedforce off\n distance {{\n group1 {{ atomNumbers {1} }}\n group2 {{ atomNumbers {2} }}\n }}\n}}\n\"\"\".format(self.index, serial_group1_str, serial_group2_str)\n return namd_colvar_string", "title": "" }, { "docid": "8a8ad40df4ae8bc91ed7fffe342185bc", "score": "0.5921255", "text": "def writeString(c):", "title": "" }, { "docid": "10a3407e0826fa9ee344cf9e3f2e3fde", "score": "0.5907473", "text": "def write_comuni(file_sql_comuni):", "title": "" }, { "docid": "bf90b064c7c2a0a9323dd3371fb0d73e", "score": "0.58252853", "text": "def writestring():", "title": "" }, { "docid": "a19dbbef6a9871db3fcfb837896f7880", "score": "0.5813475", "text": "def save(filename, varnames, data):\n\n fyl=open(filename, 'wb')\n for i, name in enumerate(varnames):\n fyl.write(name + '\\n')#.\\n is a linebreak \n\n var=data[i]\n shape=var.shape\n shape=','.join(np.array(shape, dtype=str))\n fyl.write(shape+'\\n')\n\n dtype=str(var.dtype)\n fyl.write(dtype+'\\n')\n\n var_str=var.flatten().tobytes()\n fyl.write(var_str+'\\n\\n')\n\n fyl.close()", "title": "" }, { "docid": "3952660af3cb2f5687e0d574d1151d89", "score": "0.5762895", "text": "def build_c_file(self):\r\n path = ''\r\n\r\n # the file created here is locked while open, hence we can't delete\r\n # similarly, ctags appears to require an extension hence the suffix\r\n with tempfile.NamedTemporaryFile(delete=False, suffix='.c') as temp:\r\n try:\r\n path = temp.name # store name for later use\r\n temp.writelines([\r\n b'#define foo(x,y) x+y\\n'\r\n b'#define foobar 1\\n'\r\n b'\\n'\r\n b'void bar()\\n'\r\n b'{\\n'\r\n b'\\tfoo(10,2);'\r\n b'\\n'\r\n b'#if foobar\\n'\r\n b'\\tfoo(2,3); \\n'\r\n b'}\\n'])\r\n finally:\r\n temp.close()\r\n\r\n return path", "title": "" }, { "docid": "3cbacc54693c85f62262d5f189a159b6", "score": "0.57444036", "text": "def tofile():", "title": "" }, { "docid": "a02478e64293c056af018aab64903eff", "score": "0.568493", "text": "def write_file(self, plumed_file):\n f = open(plumed_file, \"w+\")\n f.write('# vim:ft=plumed\\n')\n f.write(self.string)\n f.write('\\nPRINT STRIDE={} ARG=* FILE={}'.format(self.print_stride,\n self.colvar))\n f.close()", "title": "" }, { "docid": "931f5bebf861b20ddccdbf8706e321b4", "score": "0.56124777", "text": "def create_file(self):\r\n with open(\"consts_obj.txt\", \"w\") as file:\r\n file.write(\"{0} _ {1}\\n\".format(self.name, self.display_name))\r\n for name, value in sorted(self.consts.items()):\r\n file.write(\"{0} = {1}\\n\".format(name, value))", "title": "" }, { "docid": "35057c92258145c80227391b1168905e", "score": "0.5602817", "text": "def f_file_write(fileName, stringVar):\n\tfileTemp =open(fileName,'w')\n\ttext=fileTemp.write(stringVar); fileTemp.close()", "title": "" }, { "docid": "bf87eefe20d6de1b7fc7c48be8a6ae0e", "score": "0.55527115", "text": "def rist_create_variable_file_header(self, fileName, mode):\n\n mode = mode.lower()\n fileMode = {\n 'write': 'w',\n 'append': 'a'\n }\n admin_creds = data_variables.get_variables()\n fileModeHeader = {\n 'write': '#!/usr/bin/env python\\n\\n' + 'admin_credentials =' + str(admin_creds['ADMIN_CREDENTIALS']),\n 'append': '\\n'\n }\n if mode not in fileMode:\n print 'Warning: Unsupported file access mode in %s.%s' % (__file__, 'rist_create_variable_file_header')\n with open(fileName, fileMode[mode]) as f:\n f.write(fileModeHeader[mode])", "title": "" }, { "docid": "af54d4794b56ee58143264da59dbe9af", "score": "0.5502014", "text": "def generate_var(output_dir='.'):\n cpp = CppFile(os.path.join(output_dir, 'var.cpp'))\n variables = [CppVariable(name=\"var1\",\n type=\"char*\",\n is_class_member=False,\n is_static=False,\n is_const=True,\n initialization_value='0'),\n CppVariable(name=\"var2\",\n type=\"int\",\n is_class_member=False,\n is_static=True,\n is_const=False,\n initialization_value='0'),\n CppVariable(name=\"var3\",\n type=\"std::string\",\n is_class_member=False,\n is_static=False,\n is_const=False),\n CppVariable(name=\"var4\",\n type=\"int\",\n documentation='// A number',\n is_class_member=False,\n is_static=False,\n is_const=False),\n ]\n\n for var in variables:\n var.render_to_string(cpp)\n\n cpp.close()", "title": "" }, { "docid": "5c1d4b27e76af99080d7bb7082957427", "score": "0.5459893", "text": "def dump_var(outfile, var):\n ofile = open(outfile, 'w')\n ofile.write(str(var))\n ofile.close()", "title": "" }, { "docid": "7a9640644609561039bdd1ae5694ed66", "score": "0.54578185", "text": "def write_key(self, varname):\n ret = ''\n if len(self.variables[varname]) == 0:\n print(\"[ERROR] input variable: '%s' contains no elements\" % varname)\n return\n\n # Assume that the variables are integer and test if such assumption\n # is true\n integer = True\n real = False\n string = False\n compact = True\n\n # Get the general kind of values for the input variable\n for j in self.variables[varname]:\n\n try:\n if not float(j).is_integer():\n # This is the case of non integer values\n integer = False\n real = True\n string = False\n if len(str(float(j))) > 7:\n compact = False\n\n except ValueError:\n # This is the case of '*1' that could not\n # be converted because we dont know the size\n # of the array\n integer = False\n real = False\n string = True\n\n ret += (varname.ljust(15)) + \" = \"\n\n for j in range(len(self.variables[varname])):\n\n if real:\n if compact:\n ret += (\"%g\" % self.variables[varname][j]).rjust(8)\n else:\n ret += (\"%17.10e\" % self.variables[varname][j])\n elif integer:\n ret += (\"%d\" % self.variables[varname][j])\n elif string:\n ret += (\"%s\" % self.variables[varname][j])\n\n # Conditions to jump to a new line\n if ((j + 1) % 3) == 0 and real and j < len(self.variables[varname]) - 1:\n ret += \";\\n\"\n ret += 17 * \" \"\n elif j < len(self.variables[varname]) - 1:\n ret += \" \"\n ret += \";\\n\"\n return ret", "title": "" }, { "docid": "601d6f1e4135aee4d6b7188e44b106fd", "score": "0.54199773", "text": "def create_changeo_db_string(self):\n\n changeo_db_header = \"SEQUENCE_ID\\tV_CALL\\tD_CALL\\tJ_CALL\\tSEQUENCE_VDJ\\tJUNCTION_LENGTH\\tJUNCTION\"\n\n #Add sequence_ID at cell level to include cell name\n changeo_db_string = \"\"\n if self.productive:\n V_genes = self.V_genes\n V_call = \",\".join(str(x) for x in V_genes)\n D_call = \"None\"\n J_genes = self.J_genes\n J_call = \",\".join(str(x) for x in J_genes)\n sequence_vdj = self.dna_seq\n\n # Replace JUNCTION with CDR3 sequence\n junction = self.cdr3_seq\n junction_length = int(len(junction))\n changeo_db_string = \"{}_{}\\t{}\\t{}\\t{}\\t{}\\t{}\\t{}\".format(\n self.contig_name, self.identifier, V_call, D_call, \n J_call, sequence_vdj, junction_length, junction)\n\n return(changeo_db_string)", "title": "" }, { "docid": "600554bcb5fa41d71bda2116a3e5c12e", "score": "0.54043365", "text": "def write_file(file_name):\n\n file = open(file_name, 'w')\n file.write(\"ID\\tCategory\\tDayOfWeek\\tMonth\\tHour\\n\")\n\n return file", "title": "" }, { "docid": "0cb336336365f7698c76d9c9a6f1e921", "score": "0.53822714", "text": "def _write(\n self,\n f_var_value: str,\n content: str,\n msg_pre: str = None,\n omit_newline: bool = False,\n ):\n fp = expandpath(f_var_value)\n _LOGGER.info((msg_pre or \"\") + fp)\n with open(fp, \"w\") as f:\n f.write(content)\n if not omit_newline:\n f.write(\"\\n\")", "title": "" }, { "docid": "f6d86305a127a932e7bacdbb7a4716af", "score": "0.53694856", "text": "def WriteDat(inputFile, varNames, vars, varTypes):\n file = open(inputFile, 'w')\n if len(varNames) != len(vars) or len(varNames) != len(varTypes):\n print 'WriteDat() Error, varName, vars and varTypes shall be of the same length'\n return -1\n for i in range (len(varNames)):\n WriteVariable(file, vars[i], varNames[i], varTypes[i])", "title": "" }, { "docid": "2f829d8599269cd27925a91368bc5fb5", "score": "0.53556114", "text": "def put_str(file, tupla):\n if type(tupla) != type((2,)):\n raise 'Need a tuple of variables'\n f = open(file, 'w')\n for i in range(1, len(tupla)):\n if len(tupla[i]) != len(tupla[0]):\n raise 'Variable lists have different lenght'\n for i in range(len(tupla[0])):\n cosas = []\n for j in range(len(tupla)):\n cosas.append(str(tupla[j][i]))\n f.write(''.join(cosas) + '\\n')\n f.close()", "title": "" }, { "docid": "ae8af4377f17afb3eb1291a102f7651f", "score": "0.53536624", "text": "def vars_to_csv(output, name, translation, original, prefix=\"\", keys=[]):\n rows = [[\"name\", \"value\", \"comments\", \"original\"]]\n for var in keys:\n pyvar = prefix + var\n rows.append([\n var, translation.get(pyvar, \"\"), \"\", original.get(pyvar, \"\")\n ])\n del translation[pyvar]\n\n rows_to_file(output, name, rows)", "title": "" }, { "docid": "4636c4a2661807bb8958ed61d84c36ab", "score": "0.53133065", "text": "def writeLUT(filename, cols):\n with open(filename, 'w') as f:\n for c in cols:\n f.write(('%d' % c[0]).rjust(4) + ('%d' % c[1]).rjust(5) + ('%d' % c[2]).rjust(5));\n f.write('\\n');\n f.close();", "title": "" }, { "docid": "a898541260fea7e613cc283ca8ddb93a", "score": "0.52946943", "text": "def variable_str(var):\n xp = cuda.get_array_module(var)\n if xp is numpy:\n arr = var.data\n else:\n arr = var.data.get()\n\n if var.name:\n prefix = 'variable ' + var.name\n else:\n prefix = 'variable'\n\n if arr is None:\n lst = 'None'\n else:\n lst = numpy.array2string(arr, None, None, None, ' ', prefix + '(')\n\n return '%s(%s)' % (prefix, lst)", "title": "" }, { "docid": "3a1871f8ca5308ce25f8f91f66046426", "score": "0.528106", "text": "def write_new_vcf(vcf, newheader,LVAR):\n with open(vcf+\"_addDist.vcf\", \"w\") as f:\n f.write(str(newheader))\n for record in LVAR:\n f.write(str(record))", "title": "" }, { "docid": "631a6b0b7eda9dc81dddfbc7f7c375af", "score": "0.52589667", "text": "def get_print_string(self, fname):\n var_inputs = \"\"\n if self.domain_dim == 1: \n var_inputs = ('%s')\n elif self.domain_dim == 2:\n var_inputs = ('%s', '%s')\n else:\n raise IndexError('Unsupported domain dimension (should be 1 or 2).')\n return \"\"\n ##\n return fname % var_inputs", "title": "" }, { "docid": "d73db4212265a2f0ddfbafca72d86a20", "score": "0.5249638", "text": "def generate_body_string(self):\n out_str = ''\n for var in self:\n val = self[var]\n val = to_string(val)\n\n out_str += '\\n{} = {}'.format(var, val)\n return out_str", "title": "" }, { "docid": "803c7e6de2a4eb3a1c6e045531b80851", "score": "0.5246198", "text": "def out_file(s):\n return s", "title": "" }, { "docid": "08a0fce5d4ca259cf769dd7457093214", "score": "0.5240603", "text": "def makestr(self):\r\n xs = \"\"\r\n ys = \"\"\r\n zs = \"\"\r\n id = \"\"\r\n emo = \"\"\r\n if self.emoname != \"normal\": emo = \"e=\"+self.emoname+\" \"\r\n nt = \"\"\r\n if self.pos[0]: xs = \"x=\"+str(self.pos[0])+\" \"\r\n if self.pos[1]: ys = \"y=\"+str(self.pos[1])+\" \"\r\n if type(self.z)==type(\"\"):\r\n if zlayers.index(self.__class__.__name__)!=zlayers.index(self.z.remove(\"_layer_\")):\r\n zs = \"z=\"+self.z\r\n else:\r\n if self.z != zlayers.index(self.__class__.__name__):\r\n zs = \"z=_layer_\"+zlayers[self.z][0]\r\n if not getattr(self,\"id_name\",\"$$\").startswith(\"$$\"): id = \"name=\"+self.id_name+\" \"\r\n if getattr(self,\"nametag\",self.charname).strip(\"\\n\")!=self.charname: nt = \"nametag=\"+self.nametag+\" \"\r\n hide = {True: \" hide\", False: \"\"}[bool(self.hide)]\r\n stack = {True: \" stack\", False: \"\"}[bool(getattr(self,\"was_stacked\",False))]\r\n return (\"char \"+self.charname+\" \"+xs+ys+zs+id+emo+nt+hide+stack+getattr(self,\"extrastr\",\"\")).strip()", "title": "" }, { "docid": "cc82edf3ed0ab3c03cf0e7e3e75e05df", "score": "0.522156", "text": "def _write_data(self, fileName, variableName, data):\n with open(fileName, 'a+') as f:\n\n f.write(variableName + ' = [\\n')\n f.writelines(data)\n f.write(']\\n')\n f.close()", "title": "" }, { "docid": "0fa3bba2fca0623b36e9768520f6f142", "score": "0.52152765", "text": "def write_structure_to_file(filename, variable_name, dictionary):\n\tf = open(filename, 'w')\n\tf.write('{} = {}'.format(variable_name, str(dictionary)))\n\tf.close()", "title": "" }, { "docid": "eb4f3cd216b5ec177de0958866f17ed4", "score": "0.52141434", "text": "def writefile():", "title": "" }, { "docid": "8810da260fc8f8928d3a6140e346a315", "score": "0.52103317", "text": "def write(str):\n ofile.write(str)", "title": "" }, { "docid": "ae6f726ca1db377a4c5822cd05b3b1bb", "score": "0.52045465", "text": "def loadable_string(self):\n return \"{edges}\\n{vars}\\n{domains}\\n{tables}\\n{title}\".format(\n edges = self._edges,\n vars = self._variables,\n domains = self._variable_domains,\n tables = self._tables,\n title = self._title,\n )", "title": "" }, { "docid": "22668a7dff8241a4824177ca00b3b2e9", "score": "0.5204382", "text": "def save_web_variables(self,FileName=None):\n if not FileName:\n FileName = 'reduce_vars.py'\n \n f=open(FileName,'w')\n f.write(\"standard_vars = {\\n\")\n str_wrapper = ' '\n for key,val in self._wvs.standard_vars.iteritems():\n if isinstance(val,str):\n row = \"{0}\\'{1}\\':\\'{2}\\'\".format(str_wrapper,key,val)\n else:\n row = \"{0}\\'{1}\\':{2}\".format(str_wrapper,key,val)\n f.write(row)\n str_wrapper=',\\n '\n f.write(\"\\n}\\nadvanced_vars={\\n\")\n\n str_wrapper=' '\n for key,val in self._wvs.advanced_vars.iteritems():\n if isinstance(val,str):\n row = \"{0}\\'{1}\\':\\'{2}\\'\".format(str_wrapper,key,val)\n else:\n row = \"{0}\\'{1}\\':{2}\".format(str_wrapper,key,val)\n f.write(row)\n str_wrapper=',\\n '\n f.write(\"\\n}\\n\")\n f.close()", "title": "" }, { "docid": "cf8ac767b95dd224b11832bdb4b4c2aa", "score": "0.52009493", "text": "def var_to_string(self, var: str, options: Optional[str] = None) -> str:\n\n if var == \"box\":\n return \"[{sq.x}, {sq.y}]\".format(sq=Point(self.box.x + 1, self.box.y + 1))\n elif var == \"location\":\n if options == \"r1c1\":\n return \"r{}c{}\".format(self.location.y + 1, self.location.x + 1)\n elif options == \"R1C1\":\n return \"R{}C{}\".format(self.location.y + 1, self.location.x + 1)\n else:\n return \"{}{}\".format(chr(ord(\"A\") + self.location.y), self.location.x + 1)\n # return \"{}{}\".format(chr(ord(\"A\") + self.location.x), self.location.y + 1)\n elif var == \"candidates\":\n return \"{{{}}}\".format(\", \".join(CELL_VALUE_MAP[v] for v in self.candidates))\n elif var == \"value\":\n return str(var) if self.value is not None or options is None else options", "title": "" }, { "docid": "8466f7c9fb67b2d53705c679e0c7e2b4", "score": "0.5200429", "text": "def writetxt(txt_file, X, Y, Z):\r\n X = X.astype(str)\r\n Y = Y.astype(str)\r\n Z = Z.astype(str)\r\n xyz = [X, Y, Z]\r\n with open(txt_file, 'w') as file:\r\n for j in range(3):\r\n for i in range(len(Z)):\r\n row = ' '.join(xyz[j][i]) + '\\n'\r\n file.write(row)\r\n return", "title": "" }, { "docid": "6f439cb468b0ceb2dfd3e04883935c05", "score": "0.5198459", "text": "def __str__(self):\n\t\tret_str = \"\"\n\t\tfor tup in self.__col_tups[1:]:\n\t\t\tret_str += str(tup[0]) + '=' + str(tup[1]) + ' | '\n\t\treturn ret_str", "title": "" }, { "docid": "cbd6ddc14d2541181a0ee665a7212784", "score": "0.5194801", "text": "def __str__(self):\n tg = TableGroup.fromvalue(self.default_metadata())\n for col in self.column_labels:\n if col != self.GRAPHEME_COL:\n tg.tables[0].tableSchema.columns.append(\n Column.fromvalue({\"name\": col, \"null\": self.NULL}))\n\n return tg.tables[0].write(self.iteritems(), fname=None).decode('utf8').strip()", "title": "" }, { "docid": "a1d52f3c0dc22ea977ff919877bcf451", "score": "0.5192991", "text": "def build_python_file(self):\r\n path = ''\r\n\r\n # the file created here is locked while open, hence we can't delete\r\n # similarly, ctags appears to require an extension hence the suffix\r\n with tempfile.NamedTemporaryFile(delete=False, suffix='.py') as temp:\r\n try:\r\n path = temp.name # store name for later use\r\n temp.writelines([\r\n b'def my_definition():\\n',\r\n b'\\toutput = \"Hello, world!\"\\n',\r\n b'\\tprint(output)\\n'])\r\n finally:\r\n temp.close()\r\n\r\n return path", "title": "" }, { "docid": "b6cde358b6e2a86664625bee10806417", "score": "0.5189084", "text": "def dump_to_python(self, data, var_name='data'):\n if len(data) <= HEADER:\n return None\n data = bytes(data)\n out_string = f'{var_name} = ['\n for counter, byte in enumerate(data[HEADER:]):\n mod_check = (counter % 8 == 0) and counter > 0\n if mod_check:\n out_string += '\\n '\n out_string += f'0x{byte:02x},'\n out_string = out_string[:-1]\n out_string += ']\\n'\n return out_string", "title": "" }, { "docid": "b8ab7d109ab16d2868e82dc40e95620e", "score": "0.51879865", "text": "def makesetup(controlfilepath, latlonfile, var, constitstr, \n ocegeo, outtype, correctminors, outpath):\n \n # Generate the setup\n setup = (\n \"{}\\n\".format(controlfilepath) +\n \"{}\\n\".format(latlonfile) + \n \"{}\\n\".format(var) + \n \"{}\\n\".format(constitstr) + \n \"{}\\n\".format(ocegeo) +\n \"{}\\n\".format(outtype) +\n \"{}\\n\".format(correctminors) +\n \"{}\\n\".format(outpath)\n )\n\n return setup", "title": "" }, { "docid": "16246f525d05521e34e8dbc826b90f13", "score": "0.5186358", "text": "def tostring(self, linesep='\\n', variable_header=True, sample_header=True):\n\n def dataitem(row, col):\n val = \"X\" if self.missing[row,col] else str(self.observations[row,col])\n val += \"!\" if self.interventions[row,col] else ''\n return val\n \n def variable(v):\n name = v.name\n\n if isinstance(v, ClassVariable):\n return \"%s,class(%s)\" % (name, ','.join(v.labels)) \n elif isinstance(v, DiscreteVariable):\n return \"%s,discrete(%d)\" % (name, v.arity)\n elif isinstance(v, ContinuousVariable):\n return \"%s,continuous\" % name\n else:\n return v.name\n\n # ---------------------------------------------------------------------\n\n # python strings are immutable, so string concatenation is expensive!\n # preferred way is to make list of lines, then use one join.\n lines = []\n\n # add variable annotations\n if sample_header:\n lines.append(\"\\t\".join([variable(v) for v in self.variables]))\n \n # format data\n nrows,ncols = self.shape\n d = [[dataitem(r,c) for c in xrange(ncols)] for r in xrange(nrows)]\n \n # add sample names if we have them\n if sample_header and hasattr(self.samples[0], 'name'):\n d = [[s.name] + row for row,s in zip(d,self.samples)]\n\n # add data to lines\n lines.extend([\"\\t\".join(row) for row in d])\n \n return linesep.join(lines)", "title": "" }, { "docid": "c0b10244bbe7b5526bdae220f030b8c4", "score": "0.5172795", "text": "def dswrite(data, recordLen, colspecs):\n with open(\"/tmp/file.txt\", \"w\") as f:\n for l in chunkstring(data, recordLen):\n print(l, file=f)", "title": "" }, { "docid": "d08f297f4973fea832d96f8d26123243", "score": "0.51709336", "text": "def create_out_string(self, ctx, out_string_encoding=None):", "title": "" }, { "docid": "0ca930b0d4ffbc4fada343ec69155893", "score": "0.51593894", "text": "def col_def_string(self):\n col_name_string_list = [x + ' TEXT' for x in self.column_names_tuple]\n joined = \", \".join(col_name_string_list)\n return joined", "title": "" }, { "docid": "1e8f72e2bf61382a0dda4e70b6b2fb00", "score": "0.5157736", "text": "def write_def(self, outfile, indent, wdict, allocatable=False,\n dummy=False, add_intent=None, extra_space=0):\n stdname = self.get_prop_value('standard_name')\n if stdname in CCPP_CONSTANT_VARS:\n # There is no declaration line for a constant\n return\n # end if\n if self.is_ddt():\n vtype = 'type'\n else:\n vtype = self.get_prop_value('type')\n # end if\n kind = self.get_prop_value('kind')\n name = self.get_prop_value('local_name')\n aref = self.array_ref(local_name=name)\n if aref is not None:\n name = aref.group(1)\n # end if\n dims = self.get_dimensions()\n if dims:\n if allocatable or dummy:\n dimstr = '(:' + ',:'*(len(dims) - 1) + ')'\n else:\n dimstr = self.call_dimstring(var_dicts=[wdict])\n else:\n dimstr = ''\n # end if\n protected = self.get_prop_value('protected')\n if dummy:\n intent = self.get_prop_value('intent')\n else:\n intent = None\n # end if\n if protected and allocatable:\n errmsg = 'Cannot create allocatable variable from protected, {}'\n raise CCPPError(errmsg.format(name))\n # end if\n if dummy and (intent is None):\n if add_intent is not None:\n intent = add_intent\n else:\n errmsg = \"<add_intent> is missing for dummy argument, {}\"\n raise CCPPError(errmsg.format(name))\n # end if\n # end if\n if protected and dummy:\n intent_str = 'intent(in) '\n elif allocatable:\n if dimstr:\n intent_str = 'allocatable '\n else:\n intent_str = ' '*13\n # end if\n elif intent is not None:\n alloval = self.get_prop_value('allocatable')\n if (intent.lower()[-3:] == 'out') and alloval:\n intent_str = 'allocatable, intent({})'.format(intent)\n else:\n intent_str = 'intent({}){}'.format(intent,\n ' '*(5 - len(intent)))\n # end if\n elif not dummy:\n intent_str = ''\n else:\n intent_str = ' '*13\n # end if\n if intent_str.strip():\n comma = ','\n else:\n comma = ' '\n # end if\n if self.is_ddt():\n dstr = \"type({kind}){cspc}{intent} :: {name}{dims} ! {sname}\"\n cspc = comma + ' '*(extra_space + 13 - len(kind))\n else:\n if kind:\n dstr = \"{type}({kind}){cspc}{intent} :: {name}{dims} ! {sname}\"\n cspc = comma + ' '*(extra_space + 17 - len(vtype) - len(kind))\n else:\n dstr = \"{type}{cspc}{intent} :: {name}{dims} ! {sname}\"\n cspc = comma + ' '*(extra_space + 19 - len(vtype))\n # end if\n # end if\n outfile.write(dstr.format(type=vtype, kind=kind, intent=intent_str,\n name=name, dims=dimstr, cspc=cspc,\n sname=stdname), indent)", "title": "" }, { "docid": "ef1dcb41f2399f7cf46c16ac525abe68", "score": "0.51559347", "text": "def _write(self):\n f = FortranFile(self.filename,mode='w')\n # Default omnivor binary header\n f.writeInts ( self.data['MK'] , 'i' ) \n f.writeInts ( self.data['itime'] , 'i' ) \n f.writeString ( self.data['version'] ) \n f.writeInts ( self.data['file_id'] , 'i' ) \n f.writeString ( self.data['sversion'] ) \n # Velocity field\n f.writeString ( self.data['stype'] ) \n f.writeInts ( self.data['is_grid'] , 'i' ) \n f.writeInts ( self.data['nCPs'] , 'i' ) \n if self.data['MK'] == 8:\n real_char='d'\n else:\n real_char='f'\n if self.data['is_grid']:\n f.writeInts ( self.data['n1'] , 'i' ) \n f.writeInts ( self.data['n2'] , 'i' ) \n f.writeInts ( self.data['n3'] , 'i' ) \n f.writeInts ( self.data['is_straight'] , 'i' ) \n f.writeReals ( self.data['v1'] , real_char ) \n f.writeReals ( self.data['v2'] , real_char ) \n f.writeReals ( self.data['v3'] , real_char ) \n\n CPs = self.data['CPs'].flatten(order = 'F')\n Utot = self.data['Utot'].flatten(order = 'F')\n f.writeReals(CPs,real_char)\n f.writeReals(Utot,real_char)", "title": "" }, { "docid": "f20475a73d2acfc16bc029a757f8103a", "score": "0.5153577", "text": "def write_gmt_format(myVelfield, outfile):\n print(\"writing vector output file %s \" % outfile);\n ofile = open(outfile, 'w');\n ofile.write(\"lon(deg) lat(deg) VE(mm) VN(mm) SE(mm) SN(mm) Corr\\n\");\n for item in myVelfield:\n ofile.write(\"%f %f %f %f %f %f 0.0\\n\" % (item.elon, item.nlat, item.e, item.n, item.se, item.sn));\n ofile.close();\n return;", "title": "" }, { "docid": "c445dbad1e9594840defe86412016c27", "score": "0.51469374", "text": "def _write(self):\n\n grid_name = self.f_name.split(\"/\")[-1].split(\".\")[0]\n grid_input = \"&HEAD CHID='{0}', TITLE='{0}' /\\n\\n\\n\".format(grid_name)\n\n grid_input += self.mesh\n\n grid_input += \"\\n&TIME T_END=0.1 /\\n\"\n grid_input += \"\\n&DUMP WRITE_XYZ=.TRUE. /\\n\"\n grid_input += \"\\n&TAIL /\"\n\n with open(self.f_name, \"w\") as cap_grid_file:\n cap_grid_file.write(grid_input)", "title": "" }, { "docid": "0e3f2bcc5b7d9f2a1fc0af61d488d1a6", "score": "0.5140067", "text": "def writeVar(self, varName):\n d = self.data(varName)\n a, aname, tmp = self.abscissa(varName)\n print('# %s | %s' % (aname, varName))\n for i in range(d.shape[0]):\n print('%f %g' % (a[i], d[i]))", "title": "" }, { "docid": "e834002e16c8c6bb390125cc33425cb6", "score": "0.5128959", "text": "def writeVegFile(s, f, bugFix):\n f.write(' '+str(s.name)+' '+str(s['VegParm'])+'\\n')", "title": "" }, { "docid": "57b89c3c7c3342bec0795a59d4726e74", "score": "0.51198864", "text": "def write(file, variables, header='', format='', append='no'):\n if type(variables) != type((2,)):\n raise 'Need a tuple of variables'\n if format == '':\n format = '%s ' * len(variables)\n if append == 'yes':\n f = open(file, 'a')\n else:\n f = open(file, 'w')\n if header != \"\":\n if header[0] != '#':\n header = '#' + header\n if header[-1] != '\\n':\n header = header + '\\n'\n f.write(header)\n for i in range(len(variables[0])):\n cosas = []\n for j in range(len(variables)):\n cosas.append(variables[j][i])\n line = format % tuple(cosas)\n f.write(line + '\\n')\n f.close()", "title": "" }, { "docid": "5d427f7bd5dfb4fd37fc2b90b0624a59", "score": "0.511762", "text": "def test_write_filename_expansion(self):\n f = cfdm.example_field(0)\n filename = os.path.join(\"$PWD\", os.path.basename(tmpfile))\n cfdm.write(f, filename)", "title": "" }, { "docid": "5acc221be45e2cd95b017e403f6fc0f1", "score": "0.5115013", "text": "def write_string(value, fileobj, byteorder=\"big\"):\r\n data = value.encode(\"utf-8\")\r\n write_numeric(USHORT, len(data), fileobj, byteorder)\r\n fileobj.write(data)", "title": "" }, { "docid": "a687b3fca5e742c3fc9ef1874f669eae", "score": "0.51086044", "text": "def __str__(self):\n\n string=\"// -*- C++ -*-\\n// File generated by PyFoam - sorry for the ugliness\\n\\n\"\n\n generator=FoamFileGenerator(self.content,\n header=self.header if not self.noHeader else None,\n longListThreshold=self.longListOutputThreshold)\n string+=generator.makeString(firstLevel=True)\n\n if len(self.lastDecoration)>0:\n string+=\"\\n\\n\"+self.lastDecoration\n\n return string", "title": "" }, { "docid": "8623f88b7e85b9ee7459ca0f059f5756", "score": "0.51", "text": "def write(string):", "title": "" }, { "docid": "d209bc0b63e0508d63b364a8268ce40b", "score": "0.50974613", "text": "def dumps(self):\n # TODO : this (may) effectively double the amount of\n # data held in memory. It'd be nice to stream the\n # serialized string.\n fobj = StringIO()\n nc = netcdf.netcdf_file(fobj, mode='w')\n # copy the dimensions\n for d, l in self.dimensions.iteritems():\n nc.createDimension(d, l)\n # copy the variables\n for vn, v in self.variables.iteritems():\n nc.createVariable(vn, v.dtype, v.dimensions)\n nc.variables[vn][:] = v.data[:]\n for k, a in v.attributes.iteritems():\n setattr(nc.variables[vn], k, a)\n # copy the attributes\n for k, a in self.attributes.iteritems():\n setattr(nc, k, a)\n # flush to the StringIO object\n nc.flush()\n return fobj.getvalue()", "title": "" }, { "docid": "7acf38e7dde7e7ada54912b069fbb711", "score": "0.5093534", "text": "def __str__(self):\n attrs = vars(self)\n output = ''\n output += 'Variables in partition bucket\\n'\n output += '---\\n'\n for key in attrs.keys():\n output += str(key) + ' : '\n output += str(attrs[key])+ '\\n'\n output += '---\\n'\n return output", "title": "" }, { "docid": "c760a274d322eaefee048c3029f71c06", "score": "0.5089031", "text": "def as_c_macros (self) :\n return \"\\n\".join (self._c_macros ())", "title": "" }, { "docid": "84100c6a6d77bdb2b8fa2afb5e837945", "score": "0.5082681", "text": "def create_out_string(self, ctx, charset=None):\n\n if charset is None:\n charset = 'UTF-8'\n\n ctx.out_string = self.__generate_out_string(ctx, charset)", "title": "" }, { "docid": "a8a19299999cb56bb7ebb9f83375a532", "score": "0.50822943", "text": "def writeSTL(facets, file_name, ascii=False):\n\n f = open(file_name, 'wb')\n if ascii:\n lines = _build_ascii_stl(facets)\n lines_ = \"\\n\".join(lines).encode(\"UTF-8\")\n f.write(lines_)\n else:\n data = _build_binary_stl(facets)\n data = b\"\".join(data)\n f.write(data)\n\n f.close()", "title": "" }, { "docid": "7a0e4591c4260c617121ae7630805ec0", "score": "0.5079906", "text": "def __str__(self):\n\n if (self.initialized == 0):\n myStr = \"ccData1d object not yet initialized\"\n return myStr\n\n myStr = \"cc data: nx = \" + `self.grid.nx` + \\\n \", ng = \" + `self.grid.ng` + \"\\n\" + \\\n \" nvars = \" + `self.nvar` + \"\\n\" + \\\n \" variables: \\n\" \n \n ilo = self.grid.ilo\n ihi = self.grid.ihi\n\n n = 0\n while (n < self.nvar):\n myStr += \"%16s: min: %15.10f max: %15.10f\\n\" % \\\n (self.vars[n],\n numpy.min(self.data[n,ilo:ihi+1]), \n numpy.max(self.data[n,ilo:ihi+1]) )\n myStr += \"%16s BCs: -x: %-12s +x: %-12s \\n\" %\\\n (\" \" , self.BCs[self.vars[n]].xlb, \n self.BCs[self.vars[n]].xrb)\n n += 1\n \n return myStr", "title": "" }, { "docid": "f8d1dfb489457db571c16fc723e70e79", "score": "0.50758106", "text": "def makestr(self):\r\n if not getattr(self,\"name\",None): return \"\"\r\n xs = \"\"\r\n ys = \"\"\r\n zs = \"\"\r\n id = \"\"\r\n if self.pos[0]: xs = \"x=\"+str(self.pos[0])+\" \"\r\n if self.pos[1]: ys = \"y=\"+str(self.pos[1])+\" \"\r\n #Make this better (maybe only allow layer name for z?)\r\n if type(self.z)==type(\"\"):\r\n if zlayers.index(self.__class__.__name__)!=zlayers.index(self.z.remove(\"_layer_\")):\r\n zs = \"z=\"+self.z\r\n else:\r\n if self.z != zlayers.index(self.__class__.__name__):\r\n zs = \"z=_layer_\"+zlayers[self.z][0]\r\n if not getattr(self,\"id_name\",\"$$\").startswith(\"$$\"): id = \"name=\"+self.id_name+\" \"\r\n try:\r\n comm = {\"bg\":\"bg\",\"fg\":\"fg\",\"evidence\":\"ev\"}[self.__class__.__name__]\r\n except KeyError:\r\n return \"\"\r\n return (comm+\" \"+self.name.split(\"/\",1)[1]+\" \"+xs+ys+zs+id).strip()", "title": "" }, { "docid": "1a6f0c4a21f4ea99f9caab28a2a16d90", "score": "0.5075686", "text": "def _write_constants(out_fname, const_map):\n if not const_map:\n return\n const_str = '''\n\"\"\" Auto-generated list of constants \"\"\"\n'''\n with open(out_fname, 'a') as fd:\n fd.write(const_str)\n for name, val in const_map.items():\n fd.write(\"%s = %s\\n\" % (name, val))\n fd.write('\\n\\n')", "title": "" }, { "docid": "39136f348161472f9ebb37de8497e093", "score": "0.5073393", "text": "def out(filename, s):\n\tf = open(filename, 'w')\n\tf.write(s)\n\tf.close()", "title": "" }, { "docid": "759c0b0ab74c2a8003b271dc36a3fc14", "score": "0.50645113", "text": "def outToFile(self, name, value):\n\n '''\n outToFile(name, value)\n\n Argument:\n name - parameter name\n value - parameter value\n\n Retruns:\n formatted name-value pair\n '''\n\n return '%s = %s' % ( name.replace(\"_\", \" \").capitalize(),\n str(value).replace(\"_\", \" \").capitalize() )", "title": "" }, { "docid": "3935db5b01bcd34d89746b5f4640d288", "score": "0.5062953", "text": "def write(self, str):", "title": "" }, { "docid": "c0f01fa4a233e91383b2fd2fe87ba184", "score": "0.505736", "text": "def write_csdata_to_file(data_dict, filename: str, start_csdata_id):\n csdata_id = start_csdata_id\n write_f = open(filename, \"x\")\n write_f.write(\"\\t\".join(['csdata_id', 'e_energy', 'sigma']) + \"\\n\")\n for i in range(len(data_dict['e'])):\n write_f.write(str(csdata_id) + \"\\t\" + str(data_dict['e'][i]) + '\\t' + str(data_dict['sigma'][i]) + '\\n')\n csdata_id = csdata_id + 1\n write_f.close()\n return csdata_id", "title": "" }, { "docid": "801c3a461df67530a14986f501f53729", "score": "0.50535977", "text": "def var_str(name, shape):\n\n size = prod(shape)\n ind = (indices(shape) + 1).reshape(-1, size)\n names = ['[' + ','.join(map(str, i)) + ']' for i in zip(*ind)]\n # if len(name)>12:\n # name = '\\n'.join(name.split('_'))\n # name += '\\n'\n names[0] = '%s %s' % (name, names[0])\n return names", "title": "" }, { "docid": "344b2c2872c245d3eb555585268358f9", "score": "0.5047202", "text": "def build_file_generation_message(env, filename):\n return (\n c(PAINT_PURPLE, '---------------------------') +\n LINE_BREAK +\n c(PAINT_CYAN, 'Environment: {} '.format(env)) +\n LINE_BREAK +\n c(PAINT_YELLOW, '{} '.format(filename)) +\n c(PAINT_GREEN, 'was generated successfully')\n )", "title": "" }, { "docid": "3367669579ae2d54bf88d12010b8aa20", "score": "0.50457686", "text": "def writev(fname,x,Ys,form=\"%+.6g\",sep=\" \",header=None,headerv=None):\n if (type(x) != np.ndarray): x=np.array(x)\n if (type(Ys) != np.ndarray): Ys=np.array(Ys)\n if (len(Ys.shape)==1):\n Ys=Ys.reshape(Ys.shape[0],1)\n nx = len(x)\n if (Ys.shape[0] == nx):\n ny=Ys.shape[1]\n elif (Ys.shape[1] == nx):\n ny=Ys.shape[0]\n Ys=np.transpose(Ys)\n else:\n raise MCError(\"dimension of x (%d) does not match any of the dimensions of Ys (%d,%d)\" % (nx,Ys.shape[0],Ys.shape[1]))\n f=codecs.open(fname,encoding='utf-8',mode=\"w\")\n if (header is not None):\n f.write(header.strip()+\"\\n\")\n if (headerv is not None):\n f.write(\"%d\" % (ny+1))\n for i in range(ny):\n f.write(sep)\n f.write(form % headerv[i])\n f.write(\"\\n\")\n for i in range(nx):\n f.write(form % x[i])\n f.write(sep)\n for j in range(ny-1):\n f.write(form % Ys[i,j])\n f.write(sep)\n f.write(form % Ys[i,-1])\n f.write(\"\\n\")\n f.close()", "title": "" }, { "docid": "224c9ce8345d5f3c2b4af2b776358a19", "score": "0.50368786", "text": "def create_filename(self, fields):\n\n self.filename = f'{self.db_obj.course_id}_{self.db_obj.batch}_asOf_{max(fields[3:])}'\n if self.type == 0:\n self.filename += '(Defaulter)'", "title": "" }, { "docid": "181268d98ff39899f0246b09cc8f745d", "score": "0.50362885", "text": "def construct_filename(self):\n\n filename = \"EVal\"\n if self.F:\n filename += '_F'\n elif self.H:\n filename = filename + '_H'\n\n # This can be changed specific to the test\n filename += self.param_txt()\n\n # This should be removed when all eigenvalues files contain N\n if self.N != None:\n filename += \"_N_\" + str(self.N)\n\n # Checking if the kernel is added to data filname\n if self.K != \"1\":\n filename += \"_K_\" + self.K\n\n if self.V_MIN_MAX:\n filename += \"_on_\" + str(self.V_MIN) + \"-\" + str(self.V_MAX)\n \n if self.PERIODIC:\n filename += self.periodic()\n\n self.filename = filename", "title": "" }, { "docid": "04df5e0e330ab019f9423c5be7899a6f", "score": "0.5028863", "text": "def make_h5_col_file(dat, colname):\n filename = 'thm1eng/msid/' + colname + '.h5'\n if os.path.exists(filename):\n os.unlink(filename)\n \n filters = tables.Filters(complevel=5, complib='zlib')\n h5 = tables.openFile(filename, mode='w', filters=filters)\n \n col = dat[colname]\n h5shape = (0,) + col.shape[1:]\n h5colname = get_h5_colname(colname)\n h5type = tables.Atom.from_dtype(col.dtype)\n h5.createEArray(h5.root, h5colname, h5type, h5shape, title=colname,\n expectedrows=86400*30)\n print 'Made', colname\n h5.close()", "title": "" }, { "docid": "2b72d39722791b49189a81b3c27bdf4e", "score": "0.50258154", "text": "def write_xyz(self, to_file=True):\n\n xyz_string = str(len(self.atomic_numbers)) + '\\n'\n xyz_string += str(self.label) + '\\n'\n\n for symbol, pos in zip(self.atomic_symbols, self.structure):\n xyz_string += '{} {:10.5f} {:10.5f} {:10.5f}\\n'.format(symbol, *pos)\n\n if to_file:\n with open(self.label + '.xyz', 'w') as xyz:\n xyz.write(xyz_string)\n\n else:\n return xyz_string", "title": "" }, { "docid": "5394a76de5617ee7c2c28deba822fe0a", "score": "0.50178796", "text": "def construct_output_line(odict):\n outline = ''\n for vnum in range(1, len(odict) + 1):\n outline += OVAR_FMT[vnum].format(odict[vnum])\n outline += '\\n'\n return outline", "title": "" }, { "docid": "78cdc3ba0b91c030eacf5bd66d347435", "score": "0.50037664", "text": "def build_python_file__extended(self):\r\n path = ''\r\n\r\n # the file created here is locked while open, hence we can't delete\r\n # similarly, ctags appears to require an extension hence the suffix\r\n with tempfile.NamedTemporaryFile(delete=False, suffix='.py') as temp:\r\n try:\r\n path = temp.name # store name for later use\r\n temp.writelines([\r\n b'import os\\n',\r\n b'\\n',\r\n b'COLOR_RED = \"\\\\c800080FF;\"\\t#red\\n',\r\n b'\\n',\r\n b'def my_function(first_name):\\n',\r\n b'\\tprint(\"Hello {0}\".format(first_name))\\n',\r\n b'\\n',\r\n b'class MyClass(object):\\n',\r\n b'\\tlast_name = None\\n',\r\n b'\\taddress = None\\t# comment preceded by a tab\\n',\r\n b'\\n',\r\n b'\\tdef my_method(self, last_name):\\n',\r\n b'\\t\\tself.last_name = last_name\\n',\r\n b'\\t\\tprint(\"Hello again, {0}\".format(self.last_name))\\n'])\r\n finally:\r\n temp.close()\r\n\r\n return path", "title": "" }, { "docid": "401b11ed370251e939d6610d4efc661b", "score": "0.5002994", "text": "def to_zinc(self, path: Optional[PathLike] = None) -> Optional[str]:\n if path is not None:\n with open(path, \"w\", encoding=\"utf-8\") as f:\n f.write(self._grid_info_str())\n f.write(\"\\n\")\n f.write(self._column_info_str())\n f.write(\"\\n\")\n self._zinc_format_data().to_csv(f, mode=\"a\", header=False)\n return None\n else:\n return \"\\n\".join([\n self._grid_info_str(),\n self._column_info_str(),\n self._zinc_format_data().to_csv(header=False)\n ])", "title": "" }, { "docid": "806dd6d16961b6e2700d5393561450f0", "score": "0.50015694", "text": "def script_var_str(varname, v, prefix = \"controller.\"):\n v = \"'\" + v.replace(\"'\", \"\\\\'\").replace(\"\\n\", \" \") + \"'\"\n return script_var(varname, v, prefix)", "title": "" }, { "docid": "5540dc713f17f2c793d3359385dd724f", "score": "0.49986595", "text": "def write_text(self, filename='junk.cat', columns=None, select=None, comment='#'):\n if select is None:\n select = np.ones(self.N) > 0\n \n format_codes = {'int64':'%d','float64':'%.5e','>i8':'%d', '>f8':'%.5e'}\n \n data = []\n formats = []\n header = comment\n if columns is None:\n columns = self.columns\n \n for column in columns:\n header = '%s %s' %(header, column)\n data.append(self.__getitem__(column)[select])\n dtype = str(data[-1].dtype)\n if (column == 'ra') | (column == 'dec'):\n formats.append('%.6f')\n else:\n formats.append(format_codes[dtype])\n \n fp = open(filename,'w')\n fp.write(header+'\\n')\n np.savetxt(fp, np.array(data).T, fmt=tuple(formats))\n fp.close()", "title": "" }, { "docid": "8db3cdbf4c4e78051f59167792b07b90", "score": "0.4997424", "text": "def dump_to_file(self,file):\n words = self._origin_filename.split(\"/\")\n file.write(words[-1] +\"\\n\")\n file.write(\"\\n\")\n file.write(\"VEHICLE\\n\")\n file.write(\"NUMBER \\t CAPACITY\\n\")\n file.write(str(self._number_vehicles) + \"\\t\" + str(self.capacity_cst) + \"\\n\")\n file.write(\"\\n\")", "title": "" }, { "docid": "e76cad6c71b59f8d97f6909ae27f37dc", "score": "0.49957466", "text": "def to_bg_string(self):\n out_str = ''\n out_str += self.get_name_str()\n out_str += self.get_length_str()\n out_str += self.get_sequence_str()\n\n out_str += self.get_define_str()\n out_str += self.get_connect_str()\n\n return out_str", "title": "" }, { "docid": "329b6f56d03b6fc2c2bcd0c143b3af62", "score": "0.4989613", "text": "def generate_file(filename, lines, columns):\r\n\r\n with open(filename, 'w+') as outfile:\r\n for _ in range(lines):\r\n line = random_string(columns)\r\n outfile.write(line + '\\n')\r\n outfile.close()", "title": "" }, { "docid": "c96279da5224024d6b15e3774ada1dfe", "score": "0.49866048", "text": "def makeFileStr(self):\n for URL in self._pypeObjects.keys():\n URLParseResult = urlparse(URL)\n if URLParseResult.scheme != \"task\": continue\n taskObj = self._pypeObjects[URL]\n if not hasattr(taskObj, \"script\"):\n raise TaskTypeError(\"can not convert non shell script based workflow to a makefile\") \n makeStr = StringIO()\n for URL in self._pypeObjects.keys():\n URLParseResult = urlparse(URL)\n if URLParseResult.scheme != \"task\": continue\n taskObj = self._pypeObjects[URL]\n inputFiles = taskObj.inputDataObjs\n outputFiles = taskObj.outputDataObjs\n #for oStr in [o.localFileName for o in outputFiles.values()]:\n if 1:\n oStr = \" \".join( [o.localFileName for o in outputFiles.values()])\n\n iStr = \" \".join([i.localFileName for i in inputFiles.values()])\n makeStr.write( \"%s:%s\\n\" % ( oStr, iStr ) )\n makeStr.write( \"\\t%s\\n\\n\" % taskObj.script )\n makeStr.write(\"all: %s\" % \" \".join([o.localFileName for o in outputFiles.values()]) )\n return makeStr.getvalue()", "title": "" }, { "docid": "530b5d1c2330f781e582f9ec3e79e23e", "score": "0.49834597", "text": "def save_details_file(object):\n\n file_return = \"\"\n\n file_return += \"DLS Machine Interactor Base\\n\"\n file_return += \"===========================\\n\\n\"\n\n file_return += \"Parameter variables:\\n\"\n file_return += \"-------------------\\n\"\n for i in object.param_vars:\n file_return += \"PV name: {0}\\n\".format(i.pv)\n file_return += \"Delay: {0} s\\n\\n\".format(i.delay)\n\n file_return += \"Measurement variables:\\n\"\n file_return += \"---------------------\\n\"\n\n collated_measurement_vars = []\n if hasattr(object, \"measurement_vars_noinj\"):\n collated_measurement_vars = object.measurement_vars_noinj + object.measurement_vars_inj\n else:\n collated_measurement_vars = object.measurement_vars\n\n for i in collated_measurement_vars:\n file_return += \"PV name: {0}\\n\".format(i.pv)\n file_return += \"Minimum counts: {0}\\n\".format(i.min_counts)\n file_return += \"Delay: {0} s\\n\\n\".format(i.delay)\n\n return file_return", "title": "" }, { "docid": "6411cceac6669c76d32aebdb70e50e17", "score": "0.49752492", "text": "def c_text_var(text):\n text = text.replace('_', ' ')\n return ('C' + ' ' + text)", "title": "" }, { "docid": "8f1d1acde55b0cfcc089814c6e668e8b", "score": "0.49666587", "text": "def write_to_file(var, conf, fname):\n # make sure folder exists:\n pathlib.Path(conf[\"log_dir\"]).mkdir(parents=True, exist_ok=True)\n \n if type(var) == dict:\n # Write conf and params dictionary to text file\n list_of_strings = [ '{:25} : {}'.format(key, var[key]) for key in var ]\n with open(\"{}/{}.txt\".format(conf[\"log_dir\"], fname),\"w\") as f:\n [ f.write(f'{st}\\n') for st in list_of_strings ]\n f.close()\n else:\n f = open(\"{}/{}.txt\".format(conf[\"log_dir\"], fname),\"w\")\n f.write( str(var) )\n f.close()", "title": "" }, { "docid": "550268a94d42f26e33ed2ddfc50b27aa", "score": "0.49627316", "text": "def _writeToFile(expression_string, model_directory, parent, header):\n\n fname = compat.getsavefilename(parent=parent,\n caption='Export to .py file',\n basedir=model_directory)[0]\n\n if len(fname) > 0:\n # enforce correct suffix.\n if not fname.endswith(\".py\"):\n fname += \".py\"\n\n f = open(fname, 'w')\n\n f.write(header)\n f.write(expression_string)\n f.close()", "title": "" }, { "docid": "514965cf1f859241d2bf451ad3758ac3", "score": "0.49602672", "text": "def writeCNF(cnfFile, string):\n with file(cnfFile, 'w') as f:\n f.write(string)", "title": "" }, { "docid": "78c5169d599e44f541448f244fff1b62", "score": "0.4958326", "text": "def writeFile(self,fn,data,masks=None,colstart='# '):\n for n,(l,u,f) in self.cols.iteritems():\n for k,(dt,c) in data.dtype.fields.iteritems():\n if k==n:\n if dt != f:\n raise TypeError('Data dtype %s does not match column type %s'%(dt,f))\n break\n else:\n raise TypeError('Data field %s not a column'%n)\n\n sortedcols = sorted([(l,n) for n,(l,u,f) in self.cols.iteritems()])\n sortedcols = [e[1] for e in sortedcols]\n\n\n with open(fn,'w') as f:\n for n in sortedcols:\n l,u,f = self.cols[n]\n f.write(colstart+' '.join((n,l,u,f))+'\\n')\n f.write('\\n')\n\n for rec in data:\n oldu=0\n for n in sortedcols:\n l,u,f = self.cols[n]\n spaces = l-oldu-1\n chrs = u-l+1\n if spaces>0:\n f.write(' '*spaces)\n f.write(str(rec[n])[:(u-l+1)])\n f.write('\\n')", "title": "" }, { "docid": "c77978caf6f78eeab9e41247d926e066", "score": "0.4957485", "text": "def var_name_to_filename(self, var_name):\n chars = []\n\n for c in var_name:\n if c in CheckpointDumper.FILENAME_CHARS:\n chars.append(c)\n elif c == '/':\n chars.append('_')\n\n return ''.join(chars)", "title": "" }, { "docid": "b15490ce09ac5a6553545d0cf6dcb586", "score": "0.4955797", "text": "def write_template(\n fp,\n c,\n name,\n namespace,\n namespace_list,\n default,\n serialize_type,\n items,\n max_value,\n comment,\n):\n c.name = name\n c.namespace = namespace\n c.namespace_list = namespace_list\n c.default = default\n c.serialize_type = serialize_type\n c.items_list = items\n c.max_value = max_value\n c.comment = comment\n fp.writelines(c.__str__())", "title": "" }, { "docid": "a2e4fe7848f8f4bdb7080de18ff9ffd9", "score": "0.49556345", "text": "def generalStr(self):\n retStr = ',,' # use standard delimiters\n retStr += writeGeneralExpr(self.exportProdId,'str') + ','\n retStr += writeGeneralExpr(self.name,'str') + ','\n retStr += writeGeneralExpr(self.natSystem,'str') + ','\n retStr += writeGeneralExpr(self.exporter,'str') + ','\n \n retStr += writeGeneralExpr(self.nIntBits,'int') + ','\n retStr += writeGeneralExpr(self.maxFloatPow,'int') + ','\n retStr += writeGeneralExpr(self.floatDigits,'int') + ','\n retStr += writeGeneralExpr(self.maxDoubPow,'int') + ','\n retStr += writeGeneralExpr(self.doubleDigits,'int') + ','\n \n retStr += writeGeneralExpr(self.importProdId,'str') + ','\n retStr += writeGeneralExpr(self.spaceScale,'dim') + ','\n retStr += writeGeneralExpr(self.unitsFlag,'int') + ','\n retStr += writeGeneralExpr(self.unitsName,'str') + ','\n retStr += writeGeneralExpr(self.maxLineGrads,'int') + ','\n \n retStr += writeGeneralExpr(self.maxLineWeight,'dim') + ','\n retStr += writeGeneralExpr(self.date,'str') + ','\n \n retStr += writeGeneralExpr(self.minRes,'dim') + ','\n retStr += writeGeneralExpr(self.maxCoordVal,'dim') + ','\n retStr += writeGeneralExpr(self.author,'str') + ','\n retStr += writeGeneralExpr(self.organisation,'str') + ','\n retStr += writeGeneralExpr(self.igesVer,'int') + ','\n retStr += writeGeneralExpr(self.draftStd,'int') + ',,' # don't specify last modified\n retStr += writeGeneralExpr(self.userSubset,'str') + ';'\n \n return retStr", "title": "" }, { "docid": "cf69d04216db3fab175d049894868df8", "score": "0.4955436", "text": "def create_script(fname):\r\n text = os.linesep.join([\"# -*- coding: utf-8 -*-\", \"\", \"\"])\r\n encoding.write(unicode(text), fname, 'utf-8')", "title": "" }, { "docid": "0041b521f3051e3f5cb3bce5580a1bde", "score": "0.49535865", "text": "def __str__(self):\n rstr = u''\n for item in self._index:\n rstr = rstr + self._tbl[item]\n return u\"{\\\\colortbl%s}\" % rstr", "title": "" }, { "docid": "71bf6f31b1420c5ebe12f3ed069993f3", "score": "0.4952445", "text": "def writeFile(nameAnt, sugarClod, poisonClod, sizeMatrix, wineClod):\n file = open(\"Settings.txt\", \"w\")\n\n nameAntInt = \"nameAnt\" + \",\" + nameAnt\n nameAntStr = str(nameAntInt)\n file.write(nameAntStr + \"\\n\")\n\n sugarClodInt = \"sugarClod\" + \",\" + sugarClod\n sugarClodStr = str(sugarClodInt)\n file.write(sugarClodStr + \"\\n\")\n\n poisonClodInt = \"poisonClod\" + \",\" + poisonClod\n poisonClodStr = str(poisonClodInt)\n file.write(poisonClodStr + \"\\n\")\n\n wineClodInt = \"wineClod\" + \",\" + wineClod\n wineClodStr = str(wineClodInt)\n file.write(wineClodStr + \"\\n\")\n\n sizeMatrixInt = \"sizeMatrix\" + \",\" + sizeMatrix\n sizeMatrixStr = str(sizeMatrixInt)\n file.write(sizeMatrixStr + \"\\n\")\n\n file.close()", "title": "" }, { "docid": "97804f647e6176ded66f6d3fb4b7e878", "score": "0.49499053", "text": "def writeVOTable(outputFile):\n\t\tif \"tablecoding\" not in contextOpts:\n\t\t\tcontextOpts[\"tablecoding\"] = { \n\t\t\t\tTrue: \"td\", False: \"binary\"}[data.queryMeta[\"tdEnc\"]]\n\t\tif \"version\" not in contextOpts:\n\t\t\tcontextOpts[\"version\"] = data.queryMeta.get(\"VOTableVersion\")\n\n\t\tvotablewrite.writeAsVOTable(\n\t\t\tdata.original, outputFile,\n\t\t\tctx=votablewrite.VOTableContext(**contextOpts))\n\t\treturn \"\"", "title": "" }, { "docid": "7840d53f754a52227e8016f305fcbd4e", "score": "0.4949747", "text": "def toString(self):\n\t\treturn \"@{0}\".format('\\t'.join([self.name] + [self.dataType] + self.labels))", "title": "" }, { "docid": "a90930f7fb968fc0093931ce78998cf5", "score": "0.49412438", "text": "def toSaveF(self, f):\n f.write('Robot;' + str(self.position) + ';' + str(self.direction) + ';' + str(self.dimension) + ';' + str(self.vitesse) + ';\\n')", "title": "" }, { "docid": "a90930f7fb968fc0093931ce78998cf5", "score": "0.49412438", "text": "def toSaveF(self, f):\n f.write('Robot;' + str(self.position) + ';' + str(self.direction) + ';' + str(self.dimension) + ';' + str(self.vitesse) + ';\\n')", "title": "" }, { "docid": "e772a5c592da86413a3f7a026ef959ac", "score": "0.4940689", "text": "def _write_polytope_file(self, polytope, variables, path):\n # Create the string representation of the polytope (LattE format)\n n_ineq = str(len(polytope.bounds))\n n_vars = str(len(variables) + 1)\n latte_repr = \"{} {}\\n\".format(n_ineq, n_vars)\n for _, bound in enumerate(polytope.bounds):\n latte_repr += str(bound.constant) + \" \"\n for var in variables:\n if var in bound.coefficients:\n latte_repr += str(-bound.coefficients[var]) + \" \"\n else:\n latte_repr += \"0 \"\n latte_repr += \"\\n\"\n\n # Write the string on the file\n with open(path, \"w\") as f:\n f.write(latte_repr)", "title": "" }, { "docid": "2f746accd52ab04d8afd9b1625926ca8", "score": "0.4937102", "text": "def writePAL(filename, cols):\n with open(filename, 'w') as f:\n for c in cols:\n f.write(('%3.3f' % c[0]).rjust(10) + ('%3.3f' % c[1]).rjust(11) + ('%3.3f' % c[2]).rjust(11));\n f.write('\\n');\n f.close();", "title": "" } ]
0d28e8ef4d44c78a166c75c014e756ac
Exports a batch of telemetry data.
[ { "docid": "083b6055270d44fb835e141133c0d353", "score": "0.5884133", "text": "def export(\n self, metric_records: Sequence[MetricRecord]\n ) -> \"MetricsExportResult\":", "title": "" } ]
[ { "docid": "a94acecd27598216250fe5ba9c86dc4b", "score": "0.62449056", "text": "def export():\n for agent in Performance.agentList:\n agent.performance.__exportAgentPerformance__()", "title": "" }, { "docid": "fbac169c2b86f9fe4415042d706e091a", "score": "0.6139936", "text": "def ExportSmoke(self):", "title": "" }, { "docid": "1143407724313c5304c4462cf5d1574a", "score": "0.61394966", "text": "def exportAll(exportPath, **kwargs):\n pass", "title": "" }, { "docid": "54c84117429345e2983779cb65b89975", "score": "0.6078771", "text": "def exportAll(exportPath, **kwargs):\n\n pass", "title": "" }, { "docid": "14e6872a4473ce5586c538604e5c85e2", "score": "0.600361", "text": "def export(self) -> None:\n idx = 0\n notify_flush = False\n # currently only a single thread acts as consumer, so queue.pop() will\n # not raise an exception\n while idx < self.max_export_batch_size and self.queue:\n span = self.queue.pop()\n if span is self._FLUSH_TOKEN_SPAN:\n notify_flush = True\n else:\n self.spans_list[idx] = span\n idx += 1\n token = attach(set_value(\"suppress_instrumentation\", True))\n try:\n # Ignore type b/c the Optional[None]+slicing is too \"clever\"\n # for mypy\n self.span_exporter.export(self.spans_list[:idx]) # type: ignore\n # pylint: disable=broad-except\n except Exception:\n logger.exception(\"Exception while exporting Span batch.\")\n detach(token)\n\n if notify_flush:\n with self.flush_condition:\n self.flush_condition.notify()\n\n # clean up list\n for index in range(idx):\n self.spans_list[index] = None", "title": "" }, { "docid": "b814c432985bb53f7d4e93495496909e", "score": "0.59891176", "text": "def __exportAgentPerformance__(self):\n self.__adjustSystemTimes__()\n \n agentId = self.agent.getId()\n fileNameSuffix = Performance.filenameOverride if Performance.filenameOverride != None else datetime.fromtimestamp(time.time()).strftime('%m_%d_%Y_%H_%M_%S')\n fileName = agentId + \"_\" + fileNameSuffix + \".csv\"\n filePath = \"performance\"\n if not os.path.isdir(filePath):\n os.mkdir(filePath)\n\n filePath = os.path.join(filePath, fileName)\n self.data.to_csv(filePath, index=False)\n print(\"{} performance output has been saved to: {}\".format(agentId, filePath))", "title": "" }, { "docid": "5ac30a4e184835e46803c6d2b55147dd", "score": "0.58365333", "text": "def export(self, spans: typing.Sequence[Span]) -> \"SpanExportResult\":", "title": "" }, { "docid": "1dbbf3ce7149c3f90b376ce2d84c19b4", "score": "0.5818301", "text": "async def export_all_data(background_tasks: BackgroundTasks):\n # TODO: Improve this function\n cameras = [(section_dict[\"Id\"], section_dict[\"Name\"]) for section_dict in extract_config(\"cameras\").values()]\n areas = [(section_dict[\"Id\"], section_dict[\"Name\"]) for section_dict in extract_config(\"areas\").values()]\n\n temp_dir = tempfile.mkdtemp()\n export_filename = f\"export-{date.today()}.zip\"\n zip_path = os.path.join(temp_dir, export_filename)\n with ZipFile(zip_path, 'w', compression=ZIP_DEFLATED) as export_zip:\n for (cam_id, name) in cameras:\n object_logs_path = os.path.join(os.getenv(\"SourceLogDirectory\"), cam_id, \"objects_log\")\n social_ditancing_reports_folder = f\"reports/{SocialDistancingMetric.reports_folder}\"\n social_ditancing_reports_path = os.path.join(os.getenv(\"SourceLogDirectory\"), cam_id,\n social_ditancing_reports_folder)\n face_mask_reports_folder = f\"reports/{FaceMaskUsageMetric.reports_folder}\"\n face_mask_reports_path = os.path.join(os.getenv(\"SourceLogDirectory\"), cam_id,\n face_mask_reports_folder)\n export_folder_into_zip(object_logs_path, os.path.join(\n \"cameras\", f\"{cam_id}-{name}\", \"raw_data\"), export_zip)\n export_folder_into_zip(social_ditancing_reports_path, os.path.join(\n \"cameras\", f\"{cam_id}-{name}\", social_ditancing_reports_folder), export_zip)\n export_folder_into_zip(face_mask_reports_path, os.path.join(\n \"cameras\", f\"{cam_id}-{name}\", face_mask_reports_folder), export_zip)\n for (area_id, name) in areas:\n occupancy_logs_path = os.path.join(os.getenv(\"AreaLogDirectory\"), area_id, \"occupancy_log\")\n occupancy_report_folder = f\"reports/{OccupancyMetric.reports_folder}\"\n occupancy_report_path = os.path.join(os.getenv(\"AreaLogDirectory\"), area_id,\n occupancy_report_folder)\n export_folder_into_zip(occupancy_logs_path, os.path.join(\n \"areas\", f\"{area_id}-{name}\", \"raw_data\"), export_zip)\n export_folder_into_zip(occupancy_report_path, os.path.join(\n \"areas\", f\"{area_id}-{name}\", occupancy_report_folder), export_zip)\n background_tasks.add_task(clean_up_file, temp_dir)\n return FileResponse(zip_path, filename=export_filename)", "title": "" }, { "docid": "0e4634e0b8a99808d7b9bc976d373748", "score": "0.5774305", "text": "def export_outputs(self):\n return self._backend.export_outputs()", "title": "" }, { "docid": "2d9d606947c1d4f70c03f444d5be0fb5", "score": "0.57655895", "text": "def do_export(self,args):\n if args == \"csv\":\n self.db.export_csv()\n else:\n self.db.export_json()", "title": "" }, { "docid": "f67c70e8409e09ac4e85e4018ad7eec3", "score": "0.5705766", "text": "def add_metrics_data(self, export_records: Sequence[Metric]) -> None:\n self._metrics_to_export.append(export_records)", "title": "" }, { "docid": "be80eee5efd4ba13d98dcfbb50db50f6", "score": "0.5693509", "text": "def save_all_data(self, modelname: str = \"model\"):\r\n self.test_accuracy.to_csv(\"{}_test_accuracy.csv\".format(modelname))\r\n self.test_precision.to_csv(\"{}_test_precision.csv\".format(modelname))\r\n self.test_recall.to_csv(\"{}_test_recall.csv\".format(modelname))\r\n self.train_accuracies.to_csv(\"{}_train_accuracy.csv\".format(modelname))\r\n self.train_precision.to_csv(\"{}_train_precision.csv\".format(modelname))\r\n self.train_recall.to_csv(\"{}_train_recall.csv\".format(modelname))\r\n pd.DataFrame([x[0] for x in self.all_features]).to_csv(\"{}_features.csv\".format(modelname))", "title": "" }, { "docid": "cf2a901b78f88f664e6fbcd3bdb7eb1e", "score": "0.5683608", "text": "def export_geo_dataset_csv(dataset_id):\n pass", "title": "" }, { "docid": "7ffe75a9b57de50d035ea275566d5820", "score": "0.5640236", "text": "def export(self):\n pass", "title": "" }, { "docid": "18dcdad0087847388e25ee535002f524", "score": "0.56386375", "text": "def write_engagement_data(filepath: str, engagement_data: List[EngagementRecord]):\n writeable_data = [record.data for record in engagement_data]\n write_to_csv(filepath, writeable_data)", "title": "" }, { "docid": "211e81234ea65f09fa45b2c40f77b51f", "score": "0.56178606", "text": "def export(self, category, measure_type, data):", "title": "" }, { "docid": "3fbe405cffb98d6d75082f52c7658201", "score": "0.5598583", "text": "def exportData(self,**kw):\n\n datafile = self._createCSV(self._getDataInfos(**kw))\n return self._createRequest(datafile.getvalue(), \"blogs_sheet_export.csv\")", "title": "" }, { "docid": "d1e9db84623e2055792aad1e358759c4", "score": "0.55933815", "text": "def write_data(x_total, x_body, y_data, labels_data):\n with open('../../output/test/HAR_X_total.csv', 'w') as HAR_t:\n writer1 = csv.writer(HAR_t)\n for total_data in x_total:\n writer1.writerow(total_data)\n with open('../../output/test/HAR_X_body.csv', 'w') as HAR_b:\n writer2 = csv.writer(HAR_b)\n for body_data in x_body:\n writer2.writerow(body_data)\n with open('../../output/test/HAR_y.csv', 'w') as HAR_y:\n writer3 = csv.writer(HAR_y)\n for subject_onehot in y_data:\n # print(set(subject_onehot))\n writer3.writerow(subject_onehot)\n with open('../../output/test/HAR_labels.csv', 'w') as HAR_labels:\n writer4 = csv.writer(HAR_labels)\n for subject_label in labels_data:\n # print(set(subject_label))\n writer4.writerow(subject_label)\n return", "title": "" }, { "docid": "8a3e3a3a32926b5e144a9191a6a6da8c", "score": "0.55879825", "text": "def phases_export(self):\n for idx, phase in enumerate(self.phases[:-1]):\n trans_op = False\n # only export if the phase has a transform operator on the dataset\n # otherwise all stats will be saved in the tabular object\n for task in phase:\n if isinstance(task[0], TransformOperator):\n trans_op = True\n break\n if trans_op:\n tar_path = os.path.join(self.ds_exports, str(idx))\n phase.append([Export(path=f\"{tar_path}\"), None, [], []])", "title": "" }, { "docid": "79da2c3069588b557b670a878594c4be", "score": "0.55589545", "text": "def export_times(self):\n header = ','.join(self.algorithm.timers.keys()) if self.iteration_count == 0 else ''\n with open(self._data_files_dir + 'timers.csv', 'ab') as out_file:\n np.savetxt(out_file, np.asarray([np.asarray([f for f in self.algorithm.timers.values()])]), header=header)", "title": "" }, { "docid": "c1c37d34c7f69bec8f50009eada28d4f", "score": "0.5553183", "text": "def exportSeriesOfTrainData(startI, endI, writeToTxt = False):\n \n ret = []\n for i in range(startI, endI):\n ret += exportTrainData(i)\n return ret", "title": "" }, { "docid": "e2232dd9b479016ea3f3a22ea19f9c7f", "score": "0.5541546", "text": "def write_and_export_results(self):\n\n self._write_results(writer=FileWriter(), column_separator=' ')\n if self._config.export:\n\n # Configure server only results path and export results\n server_metrics_path = os.path.join(\n self._results_export_directory,\n self._config.filename_server_only)\n logging.info(\n f\"Exporting server only metrics to {server_metrics_path}...\")\n self._export_server_only_csv(\n writer=FileWriter(filename=server_metrics_path),\n column_separator=',')\n\n # Configure model metrics results path and export results\n metrics_inference_path = os.path.join(\n self._results_export_directory,\n self._config.filename_model_inference)\n metrics_gpu_path = os.path.join(self._results_export_directory,\n self._config.filename_model_gpu)\n logging.info(\n f\"Exporting inference metrics to {metrics_inference_path}...\")\n logging.info(f\"Exporting GPU metrics to {metrics_gpu_path}...\")\n self._export_model_csv(\n inference_writer=FileWriter(filename=metrics_inference_path),\n gpu_metrics_writer=FileWriter(filename=metrics_gpu_path),\n column_separator=',')", "title": "" }, { "docid": "8069635a935d28a9945f33f5558bb07d", "score": "0.55322987", "text": "def cmd_export(args):\n kwargs = args.copy()\n root = kwargs.pop('root')\n debug = kwargs.pop('debug')\n\n if kwargs['output'] is None:\n kwargs['output'] = sys.stdout.buffer\n\n from .scrapbook import exporter\n for info in exporter.run(root, **kwargs):\n if info.type != 'debug' or debug:\n log(f'{info.type.upper()}: {info.msg}')", "title": "" }, { "docid": "a2aa9eb2566cbf39e94ad3025bc98436", "score": "0.55252844", "text": "def export(self, **kwargs):\n\n return self.api_request(self._get_method_fullname(\"export\"), kwargs)", "title": "" }, { "docid": "e75de5512892c4a98785575326eda014", "score": "0.5521793", "text": "def export(self, output_path: str) -> None:\n df = pd.DataFrame.from_dict(\n {\n k: (np.array([iter for iter, _ in arr]), np.array([value for _, value in arr]))\n for k, arr in self._loss_dynamics.items()\n },\n orient=\"index\",\n columns=[\"iters\", \"loss_dynamics\"],\n )\n\n for (entity_id, label_id), row in df.iterrows():\n item = self._export_dataset.get(entity_id, \"train\")\n for ann in item.annotations:\n if isinstance(ann, dm.Label) and ann.label == self.otx_label_map[label_id]:\n ann.attributes = row.to_dict()\n\n self._export_dataset.export(output_path, format=\"datumaro\")", "title": "" }, { "docid": "a9090569683efb37377fcc849f775831", "score": "0.5503464", "text": "async def export(ctx: commands.Context):\n file = io.StringIO()\n writer = csv.writer(file)\n writer.writerow(['ID', 'Username', 'Team', 'Events', 'FFA Roles', 'Bullet Brackets'])\n user_count = no_team_count = 0\n async for member in ctx.guild.fetch_members(limit=None):\n team = None\n events = []\n ffa_roles = []\n bullet_roles = []\n for role in member.roles:\n if role.name.startswith('Team: '):\n team = role.name.removeprefix('Team: ')\n elif role.name.startswith('Event: '):\n events.append(role.name.removeprefix('Event: '))\n elif role.name[:5] in ('FFA 1', 'FFA 2', 'FFA 3', 'FFA 4'):\n ffa_roles.append(role.name)\n elif role.name in ('Bullet EU', 'Bullet American'):\n bullet_roles.append(role.name)\n if team:\n writer.writerow([member.id, str(member), team, ';'.join(events), ';'.join(ffa_roles), ';'.join(bullet_roles)])\n user_count += 1\n else:\n no_team_count += 1\n file.seek(0)\n await ctx.send(\n f'Exported {user_count} users, skipped {no_team_count} without a '\n 'team role.',\n file=discord.File(file, filename='polympics_users.csv')\n )", "title": "" }, { "docid": "06134c22683a7dd380302dfcbd9354dc", "score": "0.5499687", "text": "def online_write_batch(\n self,\n config: RepoConfig,\n table: Union[FeatureTable, FeatureView],\n data: List[\n Tuple[EntityKeyProto, Dict[str, ValueProto], datetime, Optional[datetime]]\n ],\n progress: Optional[Callable[[int], Any]],\n ) -> None:\n ...", "title": "" }, { "docid": "9b3effbd1623fb8bdc49790816aabeb7", "score": "0.54893017", "text": "def export(self):\n return [t.export() for t in self.timers]", "title": "" }, { "docid": "079e31c95e7791ccd640d32b0127d5ab", "score": "0.54681414", "text": "def export_csv(self, request, *args, **kwargs):\n resource = FloorResource()\n queryset = Floor.objects.filter(is_active=settings.IS_ACTIVE)\n dataset = resource.export(queryset)\n response = HttpResponse(dataset.csv, content_type='text/csv')\n response['Content-Disposition'] = 'attachment; filename=\"floors.csv\"'\n return response", "title": "" }, { "docid": "b2f1c48d12b0c5957a7d3a5468b8b22b", "score": "0.5460049", "text": "def export_data(self):\n return export_data(self.domain, alive=self.alive, fmt=self.fmt, path=self.path)", "title": "" }, { "docid": "a0a0c6d5adfc8e7692bc5cb508d3d0b3", "score": "0.54594177", "text": "def export_metrics(self, metrics):\n nr_metrics = []\n for metric in metrics:\n descriptor = metric.descriptor\n name = descriptor.name\n view = self.views[name]\n measure_name = view.measure.name\n measure_unit = view.measure.unit\n aggregation_type = view.aggregation\n\n tags = {\"measure.name\": measure_name, \"measure.unit\": measure_unit}\n\n for timeseries in metric.time_series:\n value = timeseries.points[0].value\n if hasattr(value, \"value\"):\n value = value.value\n elif hasattr(value, \"count\") and hasattr(value, \"sum\"):\n value = {\"count\": value.count, \"sum\": value.sum}\n else:\n _logger.warning(\n \"Unable to send metric %s with value: %s\", name, value\n )\n break\n\n timestamp = timeseries.points[0].timestamp\n time_tuple = timestamp.utctimetuple()\n epoch_time_secs = calendar.timegm(time_tuple)\n epoch_time_mus = epoch_time_secs * 1e6 + timestamp.microsecond\n end_time_ms = epoch_time_mus // 1000\n\n labels = (\n (k, l.value) for k, l in zip(view.columns, timeseries.label_values)\n )\n\n _tags = tags.copy()\n _tags.update(labels)\n\n if isinstance(value, dict):\n identity = MetricBatch.create_identity(name, _tags, \"summary\")\n\n # compute a delta count based on the previous value. if one\n # does not exist, report the raw count value.\n if identity in self.merged_values:\n last = self.merged_values[identity]\n delta_count = value[\"count\"] - last[\"count\"]\n delta_sum = value[\"sum\"] - last[\"sum\"]\n else:\n delta_count = value[\"count\"]\n delta_sum = value[\"sum\"]\n\n self.merged_values[identity] = value\n\n nr_metric = SummaryMetric(\n name=name,\n count=delta_count,\n sum=delta_sum,\n min=None,\n max=None,\n tags=_tags,\n end_time_ms=end_time_ms,\n interval_ms=None,\n )\n\n elif type(aggregation_type) in COUNT_AGGREGATION_TYPES:\n identity = MetricBatch.create_identity(name, _tags, \"count\")\n\n # Compute a delta count based on the previous value. If one\n # does not exist, report the raw count value.\n delta = value - self.merged_values.get(identity, 0)\n self.merged_values[identity] = value\n value = delta\n\n nr_metric = CountMetric(\n name=name,\n value=value,\n tags=_tags,\n end_time_ms=end_time_ms,\n interval_ms=None,\n )\n\n else:\n nr_metric = GaugeMetric(\n name=name, value=value, tags=_tags, end_time_ms=end_time_ms\n )\n\n nr_metrics.append(nr_metric)\n\n # Do not send an empty metrics payload\n if not nr_metrics:\n return\n\n try:\n response = self.client.send_batch(nr_metrics, common=self._common)\n except Exception:\n _logger.exception(\"New Relic send_metrics failed with an exception.\")\n return\n\n if not response.ok:\n _logger.error(\n \"New Relic send_metrics failed with status code: %r\", response.status\n )\n return response", "title": "" }, { "docid": "2e3d36c0465160bf5749d420233a8745", "score": "0.54589355", "text": "def export(self, raw_data_dir, **kwargs):", "title": "" }, { "docid": "82112e1fa83f58d4dfc196b940f41767", "score": "0.54517", "text": "def export_samples(self, traj_sample_lists, sample_type=''):\n M, N, T, dX, dU = len(traj_sample_lists), len(traj_sample_lists[0]), self.agent.T, self.agent.dX, self.agent.dU\n X = np.empty((M, N, T, dX))\n U = np.empty((M, N, T, dU))\n\n for m in range(M):\n sample_list = traj_sample_lists[m]\n for n in range(N):\n sample = sample_list[n]\n X[m, n] = sample.get_X()\n U[m, n] = sample.get_U()\n\n np.savez_compressed(\n self._data_files_dir + 'samples%s_%02d' % (sample_type, self.iteration_count),\n X=X,\n U=U,\n )", "title": "" }, { "docid": "a3a75dcf615c91134f55e0ce44819516", "score": "0.5447019", "text": "def export_django_model(outputs, model, batch_size=1000, fields=None,\n logger=None, limit=None, **filters):\n if logger is None:\n logger = _logger\n if not hasattr(outputs, '__iter__'):\n outputs = (outputs, )\n qs = model.objects.order_by('pk')\n if filters:\n qs = qs.filter(**filters)\n if fields is None:\n qs = qs.values()\n else:\n qs = qs.values(*fields)\n cnt_all = 0\n\n logger.info('Started exporting of %s' % model.__name__)\n _start = time()\n with nested(*outputs):\n batch_num = 0\n last_id = None\n while True:\n start = time()\n if limit and cnt_all >= limit:\n break\n batch_num += 1\n if limit and batch_num * batch_size > limit:\n batch_size = limit - (batch_num - 1) * batch_size\n cnt_batch = 0\n if last_id is not None:\n chunk = tuple(qs.filter(pk__gt=last_id)[:batch_size])\n else:\n chunk = tuple(qs[:batch_size])\n if not len(chunk):\n break\n for entity in chunk:\n for output in outputs:\n output.export_entity(entity)\n cnt_all += 1\n last_id = chunk[-1]['id']\n cnt_batch += 1\n logger.info(\n 'Batch %d has been processed (%d entities) in %0.3f sec' % (\n batch_num, len(chunk), time() - start\n )\n )\n free_up_memory()\n logger.info('Done in %0.3f sec' % (time() - _start))", "title": "" }, { "docid": "4d94861c196c9864692b3fa3129bc96c", "score": "0.54386985", "text": "def saveActionData(self):\n\n self.ux_data.loc[:, 'Time':].to_csv(os.path.join(os.getcwd(), \"DATA\", 'ux_data.csv'))", "title": "" }, { "docid": "b3a73d1c4ffef02193716e20db350d82", "score": "0.5436421", "text": "def export_dataset(\n dataset_slug: str, include_url_token: bool, annotation_class_ids: Optional[List] = None, name: Optional[str] = None\n):\n client = _load_client(offline=False)\n identifier = DatasetIdentifier.parse(dataset_slug)\n ds = client.get_remote_dataset(identifier)\n ds.export(annotation_class_ids=annotation_class_ids, name=name, include_url_token=include_url_token)\n identifier.version = name\n print(f\"Dataset {dataset_slug} successfully exported to {identifier}\")\n print_new_version_info(client)", "title": "" }, { "docid": "901e8d278bd6fb578399ba6059290988", "score": "0.54323334", "text": "def export():\n tables = [\"montreal_slots\", \"quebec_slots\", \"newyork_slots\", \"seattle_slots\", \"boston_slots\",\n \"cities\", \"city_assets\", \"parking_lots\", \"rules\", \"permits\"]\n\n Logger.info('Exporting processed tables...')\n export_dir = os.path.join(os.path.dirname(os.environ[\"PRKNG_SETTINGS\"]), 'export')\n file_name = 'prkng-data-{}.sql.gz'.format(datetime.datetime.now().strftime('%Y%m%d-%H%M'))\n if not os.path.exists(export_dir):\n os.mkdir(export_dir)\n subprocess.check_call('pg_dump -c {tbls} -U {PG_USERNAME} {PG_DATABASE} | gzip > {path}'.format(\n path=os.path.join(export_dir, file_name), PG_USERNAME=CONFIG[\"PG_USERNAME\"], PG_DATABASE=CONFIG[\"PG_DATABASE\"],\n tbls=\" \".join([\"-t '{}'\".format(x) for x in tables])),\n shell=True)\n Logger.info('Table export created and stored as {}'.format(os.path.join(export_dir, file_name)))", "title": "" }, { "docid": "2d92ff1c153fa71d6eed7e3005aa0018", "score": "0.542638", "text": "def export_fixtures():\n fixtures_dir = os.path.join(os.path.dirname(os.path.abspath(__file__)), 'fixtures')\n models_to_export = [\n models.ActivityType,\n models.EmployeeType,\n models.Level,\n models.OMCategory,\n shared_models.FiscalYear,\n ]\n for model in models_to_export:\n data = serializers.serialize(\"json\", model.objects.all())\n my_label = model._meta.db_table\n f = open(os.path.join(fixtures_dir, f'{my_label}.json'), 'w')\n myfile = File(f)\n myfile.write(data)\n myfile.close()", "title": "" }, { "docid": "cc04d1d76aeb760214a9c76e9d0954cc", "score": "0.5425761", "text": "def export_results(self, target_dir=None):\n pass", "title": "" }, { "docid": "24eaa51d6ba74ce6660b219609c31bcf", "score": "0.5412853", "text": "def save_data(path, with_label):\n one_batch = concat(with_label)\n for index, df_dt in enumerate(one_batch):\n if index == 0:\n print(df_dt.dtypes, \"\\n\")\n print(f\"header of csv:\\n{df_dt.columns.values.tolist()}\")\n df_dt.to_csv(path, index=False)\n else:\n df_dt.to_csv(path, index=False, mode=\"a\", header=None)", "title": "" }, { "docid": "45d02ddb29f6a81a1f62e08582f3a79e", "score": "0.53873307", "text": "def write_daily_outputs_file(self, day_outputs):\n self.wr.writerows(day_outputs)", "title": "" }, { "docid": "6a24133dd29ae093d665a1616bcd9139", "score": "0.538645", "text": "async def export_students(self, ctx: commands.Context):\n await ctx.author.send(\n f'Student records for {ctx.guild.name}:', file=self.students.to_csv_file()\n )", "title": "" }, { "docid": "21d4ee7fccbefb1dc0d65fda9c010757", "score": "0.53722024", "text": "def export_results(self, out_filename=\"trepnfile.csv\"):\n run_duration = self.get_last_run_duration()\n res = self.device.execute_command(\"am broadcast -a {pkg}.export_to_csv -e {pkg}.export_db_input_file {filename} -e {pkg}.export_csv_output_file {outfile}\".format(pkg=self.pkg_name,filename=DEFAULT_FILENAME, outfile=out_filename), args=[], shell=True )\n res.validate(Exception(\"error while exporting results\"))\n time_to_sleep = run_duration * EXPORT_THRESHOLD\n time.sleep(int(time_to_sleep))\n\n return out_filename", "title": "" }, { "docid": "744652af550d8ba011ddd5e67f7030ee", "score": "0.53721136", "text": "def export_data(cls, results, file_format, system_elements, rel_path=\"./results\"):\n out_path, results_filtered = None, None\n try:\n out_path, results_filtered = \\\n cls._prepare_export(results, file_format, system_elements, rel_path)\n except ValueError as v:\n raise v\n \n if file_format == \"csv\":\n with open(out_path, \"w\", encoding=\"utf-8\", newline='') as outfile:\n writer = csv.writer(outfile, delimiter=\";\")\n writer.writerow(results_filtered.keys())\n writer.writerows(zip(*results_filtered.values()))\n\n if file_format == \"json\":\n with open(out_path, \"w\", encoding=\"utf-8\", newline='') as outfile:\n json.dump(results_filtered, outfile, ensure_ascii=False)", "title": "" }, { "docid": "ff033ca546e70fd5aed6b55866e1d390", "score": "0.5368417", "text": "def test_multiple_objects(self):\n data = [\n {\"object_name\": \"Workflow\", \"fields\": \"all\"},\n {\"object_name\": \"TaskGroup\", \"fields\": \"all\"},\n {\"object_name\": \"TaskGroupTask\", \"fields\": \"all\"},\n {\"object_name\": \"Cycle\", \"fields\": \"all\"},\n {\"object_name\": \"CycleTaskGroup\", \"fields\": \"all\"},\n {\"object_name\": \"CycleTaskGroupObjectTask\", \"fields\": \"all\"},\n ]\n request_body = {\n \"export_to\": \"csv\",\n \"objects\": data\n }\n response = self.client.post(\"/_service/export_csv\",\n data=dumps(request_body), headers=self.headers)\n self.assertEqual(response.status_code, 200)\n self.assertIn(\"Workflow,\", response.data)\n self.assertIn(\"Task Group,\", response.data)\n self.assertIn(\"Task,\", response.data)\n self.assertIn(\"Cycle,\", response.data)\n self.assertIn(\"Cycle Task Group,\", response.data)\n self.assertIn(\"Cycle Task,\", response.data)", "title": "" }, { "docid": "01e1a6c4a8a19c1cff85081bbb58a9ba", "score": "0.53664714", "text": "def load_export_data():\n exp = np.load('tracing_sim/results_exponential_withQ_v2_NMEAS_100_ONLYSAVETIME_False/results_mean_err.npz')\n exp_noQ = np.load('tracing_sim/results_exponential_withoutQ_NMEAS_100_ONLYSAVETIME_False/results_mean_err.npz')\n exp_low_eff = np.load('tracing_sim/results_exponential_withQ_halfreact_NMEAS_100_ONLYSAVETIME_False/results_mean_err.npz')\n lockdown = np.load('tracing_sim/results_smallworld_lockdown_withQ_v2_NMEAS_100_ONLYSAVETIME_False/results_mean_err.npz')\n no_lockdown = np.load('tracing_sim/results_erdosrenyi_withQ_v2_NMEAS_100_ONLYSAVETIME_False/results_mean_err.npz')\n sw = np.load('tracing_sim/results_smallworld_withQ_v3_NMEAS_100_ONLYSAVETIME_False/results_mean_err.npz')\n sw_noQ = np.load('tracing_sim/results_smallworld_withoutQ_v2_NMEAS_100_ONLYSAVETIME_False/results_mean_err.npz')\n sw_low_eff = np.load('tracing_sim/results_smallworld_withQ_halfreact_NMEAS_100_ONLYSAVETIME_False/results_mean_err.npz')\n sw_exp = np.load('tracing_sim/results_smallworld_exponential_asc_withQ_NMEAS_100_ONLYSAVETIME_False/results_mean_err.npz')\n sw_exp_ = np.load('tracing_sim/results_smallworld_exponential_random_withQ_NMEAS_100_ONLYSAVETIME_False/results_mean_err.npz')\n\n data = {}\n data[\"exp\"] = exp['mean']\n data[\"exp_noQ\"] = exp_noQ['mean']\n data[\"exp_low_eff\"] = exp_low_eff['mean']\n data[\"lockdown\"] = lockdown['mean']\n data[\"no_lockdown\"] = no_lockdown['mean']\n data[\"sw\"] = sw['mean']\n data[\"sw_noQ\"] = sw_noQ['mean']\n data[\"sw_low_eff\"] = sw_low_eff['mean']\n data[\"sw_exp\"] = sw_exp['mean']\n data[\"sw_exp_\"] = sw_exp_['mean']\n\n\n datalist = [exp,exp_noQ,exp_low_eff,lockdown,no_lockdown,sw,sw_noQ,sw_low_eff,sw_exp,sw_exp_]\n stringlist = [\"exp\",\"exp_noQ\",\"exp_low_eff\",\"lockdown\",\"no_lockdown\",\"sw\",\"sw_noQ\",\"sw_low_eff\",\"sw_exp\",\"sw_exp_\"]\n\n data_dict = {}\n\n for k,v in data.items():\n\n data_dict[k] = {}\n data_dict[k][\"O\"] = np.array([sum([data[k][:,x,0,i] for i in range(5)]) for x in range(4)])/200_000\n data_dict[k][\"DF\"] = (data_dict[k][\"O\"])/(np.array([sum([data[k][:,x,0,i] for i in [2,3]]) for x in range(4)])/200_000)\n data_dict[k][\"red\"] = [(((data_dict[k][\"O\"][x]/data_dict[k][\"O\"][x][0])-1)*100) for x in range(4)]\n try:\n data_dict[k][\"O_y0.5\"] = np.array([sum([data[k][:,x,1,i] for i in range(5)]) for x in range(4)])/200_000\n data_dict[k][\"DF_y0.5\"] = (data_dict[k][\"O_y0.5\"])/(np.array([sum([data[k][:,x,1,i] for i in [2,3]]) for x in range(4)])/200_000)\n data_dict[k][\"red_y0.5\"] = [(((data_dict[k][\"O_y0.5\"][x]/data_dict[k][\"O_y0.5\"][x][0])-1)*100) for x in range(4)]\n except:\n pass\n\n data_new = {}\n\n for k,v in data_dict.items():\n data_new[k] = {}\n data_new[k+\"0.5\"] = {}\n data_new[k][\"absolute\"] = {}\n data_new[k][\"reduction\"] = {}\n data_new[k+\"0.5\"][\"absolute\"] = {}\n data_new[k+\"0.5\"][\"reduction\"] = {}\n for i in range(4):\n data_new[k][\"absolute\"][str(np.round(data_dict[k][\"DF\"][i][0]))] = list(data_dict[k][\"O\"][i])\n data_new[k][\"reduction\"][str(np.round(data_dict[k][\"DF\"][i][0]))] = list(data_dict[k][\"red\"][i])\n try:\n data_new[k+\"0.5\"][\"absolute\"][str(np.round(data_dict[k][\"DF_y0.5\"][i][0]))] = list(data_dict[k][\"O_y0.5\"][i])\n data_new[k+\"0.5\"][\"reduction\"][str(np.round(data_dict[k][\"DF_y0.5\"][i][0]))] = list(data_dict[k][\"red_y0.5\"][i])\n except:\n pass\n\n with open('data_new.json', 'w') as outfile:\n json.dump(data_new, outfile)", "title": "" }, { "docid": "ba49700ab27c00c578d3bc16ac2279f2", "score": "0.53596544", "text": "def write(data: List[DataPoint], path: str) -> None:\n with open(path, \"w\", newline=\"\", encoding=\"utf-8\") as csv_file:\n writer = csv.writer(csv_file)\n writer.writerows(data)", "title": "" }, { "docid": "95652f6b37265cc39e75a9b2207bc06c", "score": "0.535295", "text": "def export(exp_data: ExportData) -> None:\n raise NotImplementedError", "title": "" }, { "docid": "a0a0fd6816b2b4342a56eaf3990d1440", "score": "0.5345472", "text": "def export_users():\n exportable_attributes = ('id', 'name', 'fullname', 'email_addr',\n 'created', 'locale', 'admin')\n\n def respond_json():\n tmp = 'attachment; filename=all_users.json'\n res = Response(gen_json(), mimetype='application/json')\n res.headers['Content-Disposition'] = tmp\n return res\n\n def gen_json():\n users = user_repo.get_all()\n json_users = []\n for user in users:\n json_datum = dictize_with_exportable_attributes(user)\n if 'geotagx_survey_status' in user.info.keys():\n json_datum['geotagx_survey_status'] = user.info['geotagx_survey_status']\n else:\n json_datum['geotagx_survey_status'] = \"RESPONSE_NOT_TAKEN\"\n\n # Append total task_runs to json export data\n json_datum['task_runs'] = len(TaskRun.query.filter(TaskRun.user_id == user.id).all())\n json_users.append(json_datum)\n return json.dumps(json_users)\n\n def dictize_with_exportable_attributes(user):\n dict_user = {}\n for attr in exportable_attributes:\n dict_user[attr] = getattr(user, attr)\n return dict_user\n\n def respond_csv():\n out = StringIO()\n writer = UnicodeWriter(out)\n tmp = 'attachment; filename=all_users.csv'\n res = Response(gen_csv(out, writer, write_user), mimetype='text/csv')\n res.headers['Content-Disposition'] = tmp\n return res\n\n def gen_csv(out, writer, write_user):\n add_headers(writer)\n for user in user_repo.get_all():\n write_user(writer, user)\n yield out.getvalue()\n\n def write_user(writer, user):\n values = [getattr(user, attr) for attr in sorted(exportable_attributes)]\n if 'geotagx_survey_status' in user.info.keys():\n values.append(user.info['geotagx_survey_status'])\n else:\n values.append('RESPONSE_NOT_TAKEN')\n\n # Add total task_runs by the user\n values.append(len(TaskRun.query.filter(TaskRun.user_id == user.id).all()))\n writer.writerow(values)\n\n def add_headers(writer):\n writer.writerow(sorted(exportable_attributes) + ['geotagx_survey_status', 'task_runs'])\n\n export_formats = [\"json\", \"csv\"]\n\n fmt = request.args.get('format')\n if not fmt:\n return redirect(url_for('.index'))\n if fmt not in export_formats:\n abort(415)\n return {\"json\": respond_json, \"csv\": respond_csv}[fmt]()", "title": "" }, { "docid": "e1a8b29a8ba240783be7a86bb22f3a61", "score": "0.53356194", "text": "def export_data(target_path):\n tasks.export_data_dir(target_path)\n tasks.export_database(target_path)\n tasks.export_context(target_path)\n return target_path", "title": "" }, { "docid": "810bff2898472b853fb8f1d45480f794", "score": "0.53267664", "text": "def exportDataToCSV(self):\n print(\"Exporting ticker and sector data as CSV...\")\n try:\n with open('output_profits.csv', 'w', newline='') as csvfile:\n csvwriter = csv.writer(csvfile, delimiter=',', quotechar='|')\n\n ### Sectors Data\n csvwriter.writerow([\"Sector\", \"Total Profit\"])\n for sectorObj in self.sectors.values():\n csvwriter.writerow([sectorObj.name, sectorObj.profits])\n\n csvwriter.writerow([])\n\n ### Tickers Data\n csvwriter.writerow([\"Ticker\", \"Sector\", \"Total Profit\"]+list(self.dataSetProfits.keys()))\n for tickerObj in self.tickers.values():\n csvwriter.writerow([tickerObj.name, tickerObj.sector.name, tickerObj.totalProfits]+list(tickerObj.profits.values()))\n except Exception as err:\n print(\"ERROR: Failed to export ticker and sector data as 'outputs_profits.csv' - \",err)\n else:\n print(\"Successfully exported ticker and sector data as 'outputs_profits.csv'\")", "title": "" }, { "docid": "15d1a822eba176b0473288df86a20b3e", "score": "0.5288715", "text": "def export_data(self):\n data = {\n \"sources\": self._export_triangle(self.sources),\n \"goals\": self._export_triangle(self.goals),\n \"rectangle_obstacles\": self._export_rectangle(self.rect_obstacles),\n \"circle_obstacles\": self._export_circle(self.circle_obstacles)\n }\n return data", "title": "" }, { "docid": "79dba009e69ef957b8b0fe3dabaa04eb", "score": "0.5282997", "text": "def batch_writer(self, *args, **kwargs):\n return self.table.batch_writer(*args, **kwargs)", "title": "" }, { "docid": "2e865c3cab64d9fdff281c4c81f9991c", "score": "0.527038", "text": "def write_batch_csv(self):\n csv = io.StringIO()\n if self.header:\n csv.write(\",\".join(self.header))\n csv.write(self.linesep)\n for row in list(self.chunk()):\n csv.write(\",\".join(map(str, row)))\n csv.write(self.linesep)\n\n csv = csv.getvalue().encode(self.codec)\n return pa.py_buffer(csv)", "title": "" }, { "docid": "651641c4500d9a5b11ad65bd2d12988b", "score": "0.5268414", "text": "def export(self):\n self.bme280.export()\n self.ccs811.export()", "title": "" }, { "docid": "9740fb0167829f108277ad7789f192fb", "score": "0.5263749", "text": "def export_fixtures():\n fixtures_dir = os.path.join(os.path.dirname(os.path.abspath(__file__)), 'fixtures')\n models_to_export = [\n models.ComponentType,\n models.HelpText,\n shared_models.Institute,\n shared_models.Vessel,\n ]\n for model in models_to_export:\n data = serializers.serialize(\"json\", model.objects.all())\n my_label = model._meta.db_table\n f = open(os.path.join(fixtures_dir, f'{my_label}.json'), 'w')\n myfile = File(f)\n myfile.write(data)\n myfile.close()", "title": "" }, { "docid": "4e911c1becebfbe44b0cdc4222bacd74", "score": "0.5252314", "text": "def engagement_per_tag():\n path = export_metric(metrics=[\"engagement_per_tag\"])\n console.log(f\"Exported to {path}\")", "title": "" }, { "docid": "8e720bc0c385bd7b7f52be794bf572cc", "score": "0.52435", "text": "def write_output(self):\n for data in self.output_data.values():\n self.create_output(data.get('key'), data.get('value'), data.get('type'))", "title": "" }, { "docid": "cc74a4cce41f67c67f450779e6bea5ba", "score": "0.5237889", "text": "def write_to_file(batch_df, batch_id):\n batch_df = batch_df\\\n .coalesce(1)\\\n .sort(col('count').desc())\\\n .limit(max_rows)\n\n # Write current aggregations to json file\n batch_df.write \\\n .format(\"json\") \\\n .mode('overwrite') \\\n .option('path', os.path.realpath('results/{}_{}'.format(\n task_name,\n batch_id))) \\\n .option('checkpointLocation', 'checkpoint')\\\n .save()\n\n # Display 100 rows on screen after processing\n LOG.info('Top 10 source airports - With Streaming')\n batch_df.show(100, False)", "title": "" }, { "docid": "9f880712a4227bd6b84ca76d638356b2", "score": "0.52376246", "text": "def export_results(results: List[dict], path: str, fmt: str):\n with open(path, \"w+\") as out:\n Exporter.write(Format(Utils.format_to_int(fmt)), results, out)\n print(Fore.WHITE + Style.BRIGHT, f\"✨ Successfully exported results to {path} ✨\")", "title": "" }, { "docid": "390c9fb4f91059036a25d3baa692f8a9", "score": "0.5233583", "text": "def export(self, name=None):\n full_keys = []\n full_values = []\n for idx in range(len(self.devices)):\n keys_ = None\n vals_ = None\n with ops.device(self.devices[idx]):\n keys_, vals_ = self._tables[idx].export(name=name)\n full_keys.append(keys_)\n full_values.append(vals_)\n return array_ops.concat(full_keys, 0), array_ops.concat(full_values, 0)", "title": "" }, { "docid": "42058d4bde5fb148c7d980bf7802562b", "score": "0.52134186", "text": "def data_artifacts(X_train):\n os.makedirs('artifacts_temp', exist_ok=True)\n \n features = list(X_train.columns)\n indices = list(X_train.index)\n \n with open('artifacts_temp/features.txt', 'w') as features_txt:\n features_txt.write(str(features))\n \n with open('artifacts_temp/indices.txt', 'w') as indices_txt:\n indices_txt.write(str(indices))\n \n X_train.head(10).to_csv('artifacts_temp/X_train_sample.csv', index=False)\n \n mlflow.log_artifacts('artifacts_temp')", "title": "" }, { "docid": "bf7f0c0f89549440ab9ed550adad708d", "score": "0.52047503", "text": "def save_to_file_csv(cls, list_objs):\n pass", "title": "" }, { "docid": "3407a341ce11313b50010fc28ae0ece3", "score": "0.52030444", "text": "def exportdataraw(self, file='temp.data') -> None:\n with open(file,'w') as f:\n f.write(str(self.list))", "title": "" }, { "docid": "93323e13940a03213a6a855df50cf0bd", "score": "0.5193261", "text": "def multi_sum_info(df, num, api_key):\n summoner_df = pd.DataFrame()\n for sum_name in df['summonerName']:\n sum_info = get_all_sum_info(sum_name, api_key)\n summoner_df = pd.concat([summoner_df, sum_info], ignore_index=True)\n time.sleep(1)\n summoner_df.to_csv(f'summoner{num}.csv')", "title": "" }, { "docid": "ee0c70d31e91dde8e2b9942eacfcbe08", "score": "0.51653385", "text": "def build_test_data():\n footprints = ee.FeatureCollection(definitions.EE_CLIFF_FOOTPRINTS)\n footprints = footprints.toList(1000)\n footprints = footprints.map(extract_naip)\n footprints = footprints.flatten()\n footprints = ee.FeatureCollection(footprints)\n\n task = ee.batch.Export.table.toDrive(\n collection=footprints,\n folder='earth-engine',\n description='naip_test', # filename\n fileFormat='TFRecord'\n )\n task.start()", "title": "" }, { "docid": "4c8cb7f04441cd7b9a304277d9b4190c", "score": "0.51539177", "text": "def bulk_reports(self, actions):\n for ok, result in streaming_bulk(\n self.es,\n actions=actions,\n index=self.index,\n doc_type='report',\n chunk_size=50 # keep the batch sizes small for appearances only\n ):\n action, result = result.popitem()\n doc_id = '/%s/report/%s' % (self.index, result['_id'])\n # process the information from ES whether the document has been successfully indexed\n if not ok:\n print('Failed to %s document %s: %r' % (action, doc_id, result))\n # else:\n # print(doc_id)", "title": "" }, { "docid": "3c05c2da790a5ec8fba68d517669ec69", "score": "0.5152921", "text": "def _save_data(self):\n self.widgets.exporter.export()", "title": "" }, { "docid": "cf650774715c50e5ca4e8a252710c4ff", "score": "0.5142004", "text": "def liststore_export(store, columns, cb_write, cb_write_args, row_offset=0, write_columns=True):\n\tcolumn_names, store_columns = _split_columns(columns)\n\tif write_columns:\n\t\tcb_write(0, column_names, *cb_write_args)\n\n\tstore_iter = store.get_iter_first()\n\trows_written = 0\n\twhile store_iter:\n\t\trow = collections.deque()\n\t\tfor column in store_columns:\n\t\t\tvalue = store.get_value(store_iter, column)\n\t\t\tif isinstance(value, datetime.datetime):\n\t\t\t\tvalue = utilities.format_datetime(value)\n\t\t\trow.append(value)\n\t\tcb_write(rows_written + 1 + row_offset, row, *cb_write_args)\n\t\trows_written += 1\n\t\tstore_iter = store.iter_next(store_iter)\n\treturn rows_written", "title": "" }, { "docid": "ed2c5fdbd91dccb9309c252b2e887d3b", "score": "0.51405287", "text": "def save_data():\n logzero.logfile(\"data01.csv\")\n formatter = logging.Formatter('%(name)s - %(asctime)-15s - %(levelname)s: %(message)s');\n logzero.formatter(formatter)\n get_sense_data()\n output_string = \",\".join(str(value) for value in sense_data)\n logger.info(\"%s,%s,%s,%s,%s,%s,%s,%s,%s\", str(latlong),hight, dn, time_stamp, lok1 , picname, picname_previous ,deltat, output_string )", "title": "" }, { "docid": "cfd389f2192d6ec676c39dba53ccdba1", "score": "0.5134208", "text": "def export(self):\n print 'Starting Export'\n # The node network output stream. Every item is a line\n self.out_stream = []\n\n # Find the node that is specified as the source (a shader)\n # and recursivly build out node network\n for key, value in self.nodes.iteritems():\n if key == self.source:\n self.stream_me(key)\n\n self.out_stream = reversed(self.out_stream)\n\n self.save()", "title": "" }, { "docid": "743f3bd80041a0924975220446f8dbda", "score": "0.5134192", "text": "def main():\n footprints = ee.FeatureCollection(definitions.EE_CLIFF_FOOTPRINTS)\n n_cliffs = footprints.size()\n\n footprints = footprints.toList(n_cliffs)\n footprints = footprints.map(extract_naip)\n footprints = footprints.flatten()\n\n n_shards = definitions.N_SHARDS\n step_size = footprints.size().divide(n_shards)\n for shard in trange(n_shards):\n start = step_size.multiply(shard).int()\n stop = step_size.multiply(shard + 1).int()\n footprints_shard = footprints.slice(start, stop)\n footprints_shard = ee.FeatureCollection(footprints_shard)\n\n task = ee.batch.Export.table.toDrive(\n collection=footprints_shard,\n folder='naip_shards',\n description=f'naip_shard_{shard}', # filename\n fileFormat='TFRecord'\n )\n task.start()", "title": "" }, { "docid": "3d44332f6d878da1c7f42ca3a300b8ea", "score": "0.51317644", "text": "def perform_export(self, included_columns):\n\n\t\tself.included_columns = included_columns\n\n\t\tself.create_summary_worksheet()\n\t\tself.create_all_responses_worksheet()\n\n\t\tfor participant in self.participants:\n\t\t\tself.create_participant_worksheet(participant)\n\n\t\tself.output_workbook.save(filename = 'output/responses.xlsx')", "title": "" }, { "docid": "b3a83c1bf70c2698e67c5b265f1c9d0e", "score": "0.5125375", "text": "def export_data(self, out, metadata=True, fmt=\"tab\"):\n if isinstance(metadata, (list, tuple)):\n # list of keys\n metadata_dict = {}\n for key in metadata:\n metadata_dict[key] = self.metadata[key]\n elif isinstance(metadata, bool) and metadata:\n # all metadata\n metadata_dict = self.metadata\n else:\n raise ValueError(\"Metadata must be list, tuple, or bool, got \"\n f\"'{metadata}' of type '{type(metadata)}'!\")\n\n if fmt == \"tab\":\n if isinstance(out, (pathlib.Path, str)):\n fd = pathlib.Path(out).open(\"w\")\n close = True\n elif isinstance(out, io.IOBase):\n fd = out\n close = False\n else:\n raise ValueError(\"Unexpected object class for 'out': \"\n + \"'{}' for format 'tab'!\".format(\n out.__class__))\n self._export_tab(fd, metadata_dict=metadata_dict)\n if close:\n fd.close()\n elif fmt in [\"hdf5\", \"h5\"]:\n if isinstance(out, (pathlib.Path, str)):\n # overrides always\n h5 = h5py.File(out, \"w\")\n close = True\n elif isinstance(out, h5py.Group):\n h5 = out\n close = False\n else:\n raise ValueError(\"Unexpected object class for 'out': \"\n + \"'{}' for format 'hdf5'!\".format(\n out.__class__))\n self._export_hdf5(h5group=h5, metadata_dict=metadata_dict)\n if close:\n h5.close()\n else:\n raise ValueError(\"Unexpected string for 'fmt': {}\".format(fmt))", "title": "" }, { "docid": "75b3549b2cba85437c9c9fe1da6d3aed", "score": "0.5121706", "text": "def output_all_lines_to_csv(self):\n with open(cfg.OUTPUT_LOC+\"\\\\all_lines.csv\", \"wb\") as f:\n writer = csv.writer(f)\n writer.writerows(self.all_lines)", "title": "" }, { "docid": "2efdd409ceaf2c33c4e08b084af49046", "score": "0.51173824", "text": "def export_csv(self, request, *args, **kwargs):\n resource = HouseHoldIncomeResource()\n queryset = HouseHoldIncome.objects.filter(is_active=settings.IS_ACTIVE)\n dataset = resource.export(queryset)\n response = HttpResponse(dataset.csv, content_type='text/csv')\n response['Content-Disposition'] = 'attachment; filename=\"household_incomes.csv\"'\n return response", "title": "" }, { "docid": "76f4a0713e554c955fec22475db89aae", "score": "0.51146877", "text": "def export(self) -> bytes:\n self._update()\n data = self.ivt.export()\n data += self.bdt.export()\n if self.dcd:\n data += self.dcd.export()\n data += self.app.export()\n data += self.csf.export()\n return data", "title": "" }, { "docid": "76f4a0713e554c955fec22475db89aae", "score": "0.51146877", "text": "def export(self) -> bytes:\n self._update()\n data = self.ivt.export()\n data += self.bdt.export()\n if self.dcd:\n data += self.dcd.export()\n data += self.app.export()\n data += self.csf.export()\n return data", "title": "" }, { "docid": "1d4161f333ca190cd3340a5992db59dd", "score": "0.5113959", "text": "def export(sent_list, file_name):\n\n with open(file_name, \"w\") as output:\n writer = csv.writer(output, lineterminator='\\n')\n for sent in sent_list:\n writer.writerow([sent])", "title": "" }, { "docid": "20923bbbb6c028536a8f42d99b9ecff3", "score": "0.51101094", "text": "def dump(self, filename):\n logger.info('Writing measurements to {}'.format(filename))\n with open(filename, 'w') as fh:\n writer = csv.writer(fh)\n for timestamp, value in self.values:\n writer.writerow([timestamp, value])", "title": "" }, { "docid": "d259cc331602bedf5cde2b62b6f36f68", "score": "0.5107198", "text": "def export_samples(\n self,\n sample_batches: List[Any],\n sample_labels: Optional[List[Any]] = None,\n sample_originals: Optional[List[Any]] = None,\n exp_counter: int = 0,\n ):\n sample_batches = [tensors_to_device(batch, \"cpu\") for batch in sample_batches]\n inputs_dir = os.path.join(self._output_dir, \"sample-inputs\")\n outputs_dir = os.path.join(self._output_dir, \"sample-outputs\")\n labels_dir = os.path.join(self._output_dir, \"sample-labels\")\n originals_dir = os.path.join(self._output_dir, \"sample-originals\")\n\n with torch.no_grad():\n for batch, lab, orig in zip(\n sample_batches,\n sample_labels if sample_labels else [None for _ in sample_batches],\n sample_originals\n if sample_originals\n else [None for _ in sample_batches],\n ):\n out = tensors_module_forward(batch, self._module)\n\n exported_input = tensors_export(\n batch,\n inputs_dir,\n name_prefix=\"inp\",\n counter=exp_counter,\n break_batch=True,\n )\n if isinstance(out, dict):\n new_out = []\n for key in out:\n new_out.append(out[key])\n out = new_out\n exported_output = tensors_export(\n out,\n outputs_dir,\n name_prefix=\"out\",\n counter=exp_counter,\n break_batch=True,\n )\n\n if lab is not None:\n tensors_export(\n lab, labels_dir, \"lab\", counter=exp_counter, break_batch=True\n )\n\n if orig is not None:\n tensors_export(\n orig,\n originals_dir,\n \"orig\",\n counter=exp_counter,\n break_batch=True,\n )\n\n assert len(exported_input) == len(exported_output)\n exp_counter += len(exported_input)", "title": "" }, { "docid": "97b313053d05039fd8ae9ef7365afb05", "score": "0.5106559", "text": "def bulk_write(self, metrics):\n try:\n for metric in metrics:\n self.producer.send(self.topic, metric)\n self.producer.flush()\n except (KafkaTimeoutError, NoBrokersAvailable) as exc:\n logger.warning('bulk_write metrics %r failure %r', metrics, exc)", "title": "" }, { "docid": "1d878cdeb380e42aad438f07da2a06f5", "score": "0.5103479", "text": "def export_outputs(self):\n return {}", "title": "" }, { "docid": "adc6a0fa762bfc94ed8efd020e5d9db4", "score": "0.5091859", "text": "def export_data(self, stage_indxs: list or str = \"all\",\n export_folder_name: str = \"aramis_to_txt\"):\n self._check_all_stages_active()\n\n if stage_indxs == \"all\":\n stages = self.project.stages\n\n elif stage_indxs == \"last\":\n stages = [self.project.stages[-1]]\n else:\n stages = []\n for index in stage_indxs:\n for stage in self.project.stages:\n if stage.get(\"index\") == index:\n stages.append(stage)\n export_directory = self._check_export_folder_exists(export_folder_name)\n print(f'Number of stages {len(stages)}')\n for current_stage in stages:\n current_stage_index = int(current_stage.get('index'))\n self.script.sys.show_stage(stage=current_stage)\n current_surface_comp = self._get_current_component(index=0).get('name')\n\n if self.project.inspection[current_surface_comp + '.dX'].computation_status == \"computed\":\n self._export_stage_to_txt(export_directory=export_directory,\n current_stage_index=current_stage_index)", "title": "" }, { "docid": "a2673a5054c5c02cfccd5bd6a11b2db7", "score": "0.5089875", "text": "def export_set(cls, dataset):\n\n stream = BytesIO()\n\n page = markup.page()\n page.table.open()\n\n if dataset.headers is not None:\n new_header = [item if item is not None else '' for item in dataset.headers]\n\n page.thead.open()\n headers = markup.oneliner.th(new_header)\n page.tr(headers)\n page.thead.close()\n\n for row in dataset:\n new_row = [item if item is not None else '' for item in row]\n\n html_row = markup.oneliner.td(new_row)\n page.tr(html_row)\n\n page.table.close()\n\n # Allow unicode characters in output\n wrapper = codecs.getwriter(\"utf8\")(stream)\n wrapper.writelines(str(page))\n\n return stream.getvalue().decode('utf-8')", "title": "" }, { "docid": "0810ae894b68dc85ba3d16c1bce5e17b", "score": "0.5082253", "text": "def wmExportTasks(self):\n\n sIncludeRefs = uiCommon.getAjaxArg(\"sIncludeRefs\")\n sTaskArray = uiCommon.getAjaxArg(\"sTaskArray\")\n\n otids = sTaskArray.split(\",\")\n\n # helpername is just something to stick on the file so it's a little more recognizable.\n # it should really be an argument, but the UI doesn't ask for it at the moment.\n helpername = \"\"\n\n \"\"\"\n The UI sends us a list of OriginalTaskIDs and assumes we want the 'default' version.\n \n So, for now we just turn that list into a list of Task IDs.\n \"\"\"\n task_ids = []\n for otid in otids:\n t = task.Task()\n t.FromOriginalIDVersion(otid)\n if t:\n if not helpername:\n helpername = \"%s_%s\" % (t.Name, uiCommon.GetSessionUserFullName())\n task_ids.append(t.ID)\n\n docs = task.Tasks.Export(task_ids, sIncludeRefs)\n xml = \"\"\n for doc in docs:\n xml += doc\n\n xml = \"<tasks>%s</tasks>\" % xml\n\n # what are we gonna call this file?\n seconds = str(int(time.time()))\n filename = \"%s_%s.xml\" % (helpername.replace(\" \", \"\").replace(\"/\", \"\"), seconds)\n with open(os.path.join(catoconfig.CONFIG[\"tmpdir\"], filename), 'w') as f_out:\n if not f_out:\n uiCommon.log(\"ERROR: unable to write task export file.\")\n f_out.write(xml)\n\n return json.dumps({\"export_file\": filename})", "title": "" }, { "docid": "2547af7bc61856c63e02ec6670921396", "score": "0.50806886", "text": "def write_samples_tsv(cls, samp_ids: list) -> None:\n samp_tab = \"view_samples_ena\"\n samples = Samples.fetch_entries(samp_tab, sample_ids=samp_ids)\n EnaTsvSamples.write(samples)\n with ZipFile(cls.get_tempfile(), \"w\") as zipObj:\n zipObj.write(EnaTsvSamples.get_tempfile(), EnaTsvSamples.filename)", "title": "" }, { "docid": "ba824ed06d89054b1374786c534e9624", "score": "0.5073121", "text": "def export(user: str, start: date, end: date, output_dir: Path) -> None:\n filename = f\"{start}-{end}_usage.csv\"\n\n df = fetch(user, start, end)\n df.to_csv(output_dir/filename, sep=DELIMITER, index=False)", "title": "" }, { "docid": "826a35bc2f4690dc75b563cd5b619a80", "score": "0.5064271", "text": "def contact_export_batch(session, model_name, backend_id, domain=None, fields=None, delay=False, **kwargs):\n connector_env = get_environment(session, model_name, backend_id)\n\n # Get the exporter connector unit\n batch_exporter = connector_env.get_connector_unit(ContactBatchExporter)\n\n # Start the batch export\n batch_exporter.batch_run(domain=domain, fields=fields, delay=delay, **kwargs)", "title": "" }, { "docid": "f37f742fc31f4ab7a6b8fbb92993bebb", "score": "0.5061552", "text": "def export(gen, filepath):\n meta = {} # to be exported as JSON at the end\n meta['descriptors'] = defaultdict(list) # map stream_name to descriptors\n files = {} # map descriptor uid to file handle of CSV file\n desc_counters = defaultdict(itertools.count)\n try:\n for name, doc in gen:\n if name == 'start':\n if 'start' in meta:\n raise RuntimeError(\"This exporter expects documents from \"\n \"one run only.\")\n meta['start'] = doc\n elif name == 'stop':\n meta['stop'] = doc\n elif name == 'descriptor':\n stream_name = doc.get('name')\n meta['descriptors'][stream_name].append(doc)\n filepath_ = f\"{filepath}_{stream_name}_{next(desc_counters[doc['uid']])}.csv\"\n files[doc['uid']] = open(filepath_, 'w+')\n elif name == 'event':\n row = ', '.join(map(str, (doc['time'], *doc['data'].values())))\n f = files[doc['descriptor']]\n f.write(f'{row}\\n')\n finally:\n for f in files.values():\n f.close()\n with open(f\"{filepath}_meta.json\", 'w') as f:\n json.dump(meta, f)\n return (f.name,) + tuple(f.name for f in files.values())", "title": "" }, { "docid": "e9714862822c02586cb532e44a4c27ea", "score": "0.5058238", "text": "def _export_model_csv(self, inference_writer, gpu_metrics_writer,\n column_separator):\n\n gpu_table = self._result_tables[self.model_gpu_table_key]\n non_gpu_table = self._result_tables[self.model_inference_table_key]\n\n self._write_result(table=gpu_table,\n writer=gpu_metrics_writer,\n column_separator=column_separator,\n ignore_widths=True,\n include_title=False)\n\n self._write_result(table=non_gpu_table,\n writer=inference_writer,\n column_separator=column_separator,\n ignore_widths=True,\n include_title=False)", "title": "" }, { "docid": "f5f2a57d88d321fd5d903a363fd0f454", "score": "0.5056593", "text": "def test_generate_telemetry(run_generate, summary_store: SummaryStore):\n run_generate(\"ls8_satellite_telemetry_data\")\n\n _expect_values(\n summary_store.get(\"ls8_satellite_telemetry_data\"),\n dataset_count=1199,\n footprint_count=1199,\n time_range=Range(\n begin=datetime(2016, 1, 1, 0, 0, tzinfo=DEFAULT_TZ),\n end=datetime(2018, 1, 1, 0, 0, tzinfo=DEFAULT_TZ),\n ),\n region_dataset_counts={\n \"91\": 56,\n \"92\": 56,\n \"93\": 56,\n \"90\": 51,\n \"95\": 47,\n \"94\": 45,\n \"96\": 44,\n \"101\": 43,\n \"98\": 43,\n \"100\": 42,\n \"105\": 42,\n \"111\": 42,\n \"99\": 42,\n \"104\": 41,\n \"110\": 41,\n \"112\": 41,\n \"103\": 40,\n \"107\": 40,\n \"108\": 40,\n \"109\": 40,\n \"89\": 40,\n \"97\": 40,\n \"113\": 39,\n \"102\": 37,\n \"106\": 36,\n \"114\": 32,\n \"116\": 29,\n \"115\": 27,\n \"88\": 27,\n },\n newest_creation_time=datetime(2017, 12, 31, 3, 38, 43, tzinfo=tzutc()),\n timeline_period=\"month\",\n timeline_count=24,\n crses={\"EPSG:4326\"},\n size_bytes=10333203380934,\n )", "title": "" }, { "docid": "ca57c3ab98fd6ad5c4005f8cbb549a59", "score": "0.50532097", "text": "def export_dataset(self, filename=None):\n\n file_ac = FileAccess(storage=\"export\", resource_id=self.id, cloud_parms=self._cloud_parms)\n if not filename:\n filename = self.id + \".dset\"\n\n self.load_all_images()\n\n export_data = {\n \"export_version\": \"2\",\n \"export_date\": datetime.datetime.now(),\n \"dataset\": self.dataset_name,\n \"dataset_id\": self.id,\n \"num_examples\": len(self.examples),\n \"images_list\": list(self.images.keys())\n }\n\n # Dataset.export_to_file(filename)\n Dataset.export_to_hdf5(export_data, self.parms, self.examples, self.images, filename, self.id)\n if self._cloud_parms is not None:\n file_ac.sync_files(\n file_list=[filename],\n origin=\"local\"\n )\n log.info(f\"Dataset exported to file {len(self.examples)} examples\")", "title": "" }, { "docid": "b4c21123e7ba99f5cd5e538e0170b34a", "score": "0.50523907", "text": "def write_experiments_tsv(cls, samp_ids: list) -> None:\n samp_tab = \"view_samples_ena_experiment\"\n run_samples = Samples.fetch_entries(samp_tab, sample_ids=samp_ids)\n EnaTsvExperiment.write(run_samples)\n with ZipFile(cls.get_tempfile(), \"a\") as zipObj:\n zipObj.write(EnaTsvExperiment.get_tempfile(),\n EnaTsvExperiment.filename)", "title": "" }, { "docid": "83655634622707ae18a48be2116daeac", "score": "0.5049891", "text": "def dump_data(filename, values, cols=None):\n if filename:\n outfile = open(filename, 'w')\n else:\n outfile = sys.stdout\n\n res_writer = csv.writer(outfile)\n if cols:\n for r in values:\n row = [r[i] for i in cols]\n res_writer.writerow(row)\n else:\n res_writer.writerows(values)\n outfile.write('\\n')\n\n if filename:\n outfile.close()", "title": "" }, { "docid": "713a421f4a20066fcba44a17b0c8e236", "score": "0.50357884", "text": "def output_csv_data(self, top = False, summary = False):\n output_dfs, output_names = self.get_csv_data(top, summary)\n for index, df in enumerate(output_dfs):\n name = output_names[index]\n df.to_csv(name+\".csv\")\n\n print \"Wrote: \"+name+\".csv\"", "title": "" }, { "docid": "01ffe913ce229269a67a7874a5c35a85", "score": "0.50271004", "text": "def write_batch_arrow(self):\n data = list(self.chunk())\n data = zip(*data)\n arrays = []\n for i, column in enumerate(data):\n arrays.append(pa.array(column, self.pa_schema[i].type))\n\n batch = pa.RecordBatch.from_arrays(arrays, names=self.pa_schema.names)\n return batch", "title": "" }, { "docid": "e9fe9033dcda5406736acd335960a780", "score": "0.5026336", "text": "def default_export_aggregated_timers(aggregated_report):\n pass", "title": "" }, { "docid": "79b9358dc9f6e7f59d7085b035c51971", "score": "0.5025023", "text": "def export(self, spans: typing.Sequence[ReadableSpan]) -> SpanExportResult:\n if self._stopped:\n return SpanExportResult.FAILURE\n with self._lock:\n self._finished_spans.extend(spans)\n return SpanExportResult.SUCCESS", "title": "" }, { "docid": "447e4692e1e6b5fe1fd2eec524dd2e49", "score": "0.5025004", "text": "def export_tables():\n with DatabaseAPI() as db_obj:\n for table in Base.metadata.tables.keys():\n export_table(db_obj, to_class(table))", "title": "" } ]
dcd5cfeec1176c6ac0717df07bad0e9a
Sends the help message
[ { "docid": "6a6dad1f3b2ffea5792f1b3258f003ef", "score": "0.0", "text": "async def help(self, ctx):\n\n await ctx.send('Type `>run` and python script to run python')", "title": "" } ]
[ { "docid": "dc0ada3b234879429e494800a76a63e9", "score": "0.8490778", "text": "def do_help(self, *args):\n self.stdout.write(HELP_MESSAGE)", "title": "" }, { "docid": "d47fb2e6b3684f66a3ede28374e5bee6", "score": "0.81821424", "text": "def do_help(self):\n self.send_bot_msg('*Commands:* https://github.com/TinyChat/Tinychat-Bot/wiki/', self.user.nick)", "title": "" }, { "docid": "c802229b407fbf9827caf8e8703c7bd1", "score": "0.806487", "text": "def _help(bot, update):\n help_txt = \"Hey! This is a help text.\"\n bot.send_message(chat_id=update.message.chat_id, text=help_txt)", "title": "" }, { "docid": "f283d24055d60b06f784efd9f9c903cb", "score": "0.8044507", "text": "async def help(self, args, mobj):\n output = \"Hey there! Thank you for using the help functionality.\\n\\n\"\n output += \"##############################################################################\\n\"\n output += \"# Don't forget to run the more server related commands in the actual server! #\\n\"\n output += \"##############################################################################\\n\\n\"\n output += \"Here are the available commands:\\n\\n\"\n\n for c in [f\"{k}\" for k in self.ACTIONS.keys()]:\n output += f\" {c}\\n\"\n\n output += \"\\n For more info on each command, use '!command help'\"\n return await self.message(mobj.author, self.pre_text(output))", "title": "" }, { "docid": "35b94caca6a502f03b7f70978f2a6d77", "score": "0.80123097", "text": "def help_command(update, context):\n update.message.reply_text('Help!')", "title": "" }, { "docid": "cddc4f86384bdf22dc68059aead373cd", "score": "0.79230416", "text": "def help(update, context):\n update.message.reply_text(_(\"Help\"))", "title": "" }, { "docid": "78c687a3a60b7933d78533fd979fd53c", "score": "0.7887985", "text": "def cmd_help(self, params):\n self.help()", "title": "" }, { "docid": "726c5202c0620f1176ef4f949ff8cf7a", "score": "0.7876978", "text": "def do_help(self, inputs):\n cmd.Cmd.do_help(self, inputs)", "title": "" }, { "docid": "7788117b224f2fae21fb4f9fa3c97782", "score": "0.7855003", "text": "async def help_command(message: types.Message):\n await message.reply(msg.help_message)", "title": "" }, { "docid": "cf93a3c658b3ee40f14e35925ff53214", "score": "0.7831273", "text": "def help_help(self):\n\n self.stdout.write(self.help_help.__doc__ + \"\\n\")", "title": "" }, { "docid": "9b269d5944fcb5cc7ce44c53bebf0ede", "score": "0.78282434", "text": "def help_command(self):\n text = _(\"User commands:\\n\") + \\\n _(\"r N - run the simulation for N cycles\\n\") + \\\n _(\"c N - continue the simulation for N cycles\\n\") + \\\n _(\"s X N - set switch X to N (0 or 1)\\n\") + \\\n _(\"m X - set a monitor on signal X\\n\") + \\\n _(\"z X - zap the monitor on signal X\\n\") + \\\n _(\"h - help (this command)\\n\") + \\\n _(\"q - quit the program\")\n wx.MessageBox(text, _(\"Help on Commands\"), wx.ICON_INFORMATION | wx.OK)", "title": "" }, { "docid": "6d7c7054688225af9261c5a56cb0adb6", "score": "0.77996266", "text": "def help(update, context):\n update.message.reply_text('Help!')\n #TODO", "title": "" }, { "docid": "378551e33972ddd9c26d985dc80898fc", "score": "0.77729344", "text": "def do_help(self, args):\n cmd.Cmd.do_help(self, args)", "title": "" }, { "docid": "031d47ae0b747df41a784ede6fd540c4", "score": "0.77707136", "text": "def help_command(update: Update, context: CallbackContext) -> None:\n update.message.reply_text('Help!')", "title": "" }, { "docid": "4f89969c8b3e5a12c9fb2c214dc46893", "score": "0.776831", "text": "def help_command(update: Update, context: CallbackContext) -> None:\n update.message.reply_text('Helping you helping you.')", "title": "" }, { "docid": "4df6f586861ada3181afe2abe004b452", "score": "0.772737", "text": "def help(bot, update):\r\n update.message.reply_text('Help!')", "title": "" }, { "docid": "1286ae6941bf742ad078db7b7c4f271a", "score": "0.7699966", "text": "def helpMsg(actor):\n\tprint(\"Avaiable options: get|help|exit|list_neighbors|list_files\")", "title": "" }, { "docid": "cc13d91d57e8fd854c402fa2c3c699ac", "score": "0.76746005", "text": "def help(update: Update, context: CallbackContext) -> None:\n s = \"\"\n for i in myCommands:\n s += '/'+myCommands[i]+\" - \"+i+\"\\n\"\n update.message.reply_text('Help!\\n' + s)", "title": "" }, { "docid": "87a22a579e53fd6f6fa10f12c2a4c121", "score": "0.7650296", "text": "def help(server, data):\r\n\r\n # Construct the help message's header\r\n message = \"Here are the available commands:\\r\\n\"\r\n\r\n # Construct the content\r\n for command in server.commands.keys():\r\n message = message + \" - \" + command + \"\\r\\n\"\r\n\r\n # Add further help info\r\n message = message + \"To learn more about each command, type <command> help. \" + \"\\r\\n\"\r\n message = message + \"To add arguments to a command, type <command> <argument1> <argument2> (...)\" + \"\\r\\n\"\r\n\r\n # Send the prepared message to the client\r\n server.send(message)", "title": "" }, { "docid": "c31100003ecf78d47fc1db90674a7b8d", "score": "0.7593971", "text": "def do_help():\r\n\r\n global STATE\r\n STATE = STATE_START\r\n attributes = {\"state\":globals()['STATE']}\r\n return response(attributes, response_plain_text(HELP_MESSAGE, False))", "title": "" }, { "docid": "d541374ce4a00a7473d2ca5f1d010a6f", "score": "0.7584389", "text": "def help(self, message, args):\n raise NotImplementedError('This method must be implemented')", "title": "" }, { "docid": "b8b99ac44eacf797ffb7ea375b8a1b38", "score": "0.75822437", "text": "def do_help(self, args):\n ## The only reason to define this method is for the help text in the doc string\n cmd.Cmd.do_help(self, args)", "title": "" }, { "docid": "c654bc6719ec6389c5bc3b0f6dc88156", "score": "0.757944", "text": "def do_help(self, line):\n enterHelp(self.client)", "title": "" }, { "docid": "3a507cc7db8915be98355fe38230588b", "score": "0.75738716", "text": "def help():", "title": "" }, { "docid": "b2a73f5351231ae885d74ce59bdb957e", "score": "0.7564036", "text": "def show_help(bot, update):\n bot.send_message(chat_id=update.message.chat_id,\n text=MESSAGES['HELP'],\n parse_mode=ParseMode.HTML)", "title": "" }, { "docid": "723ff42dfdc99e815b145a9edc2ae69e", "score": "0.7558429", "text": "def help(bot, update):\n bot.sendMessage(update.message.chat_id, text='Help!')", "title": "" }, { "docid": "40b22538455f87cefa1a8d4f66c4fd04", "score": "0.7539667", "text": "def do_help(self, args):\n # The only reason to define this method is for the help text in the doc\n # string\n cmd.Cmd.do_help(self, args)", "title": "" }, { "docid": "e9a787cf9112c25e6fe3f453db0199fb", "score": "0.7531621", "text": "async def help(ctx):\n await server_bot.help(ctx)", "title": "" }, { "docid": "99c2783978243e651e4251be3fd10edc", "score": "0.75235754", "text": "def do_main_help(self, data):\n msg = \"Hi, I'm a bot! I have {0} commands loaded: {1}. You can get help for any command with '!help <command>'.\"\n cmnds = sorted([cmnd.name for cmnd in self.bot.commands])\n msg = msg.format(len(cmnds), ', '.join(cmnds))\n self.reply(data, msg)", "title": "" }, { "docid": "3ef15081b0229e960c2546bddebf77a3", "score": "0.75202906", "text": "def do_help(self, args):\n # The only reason to define this method\n # is for the help text in the doc string\n cmd.Cmd.do_help(self, args)", "title": "" }, { "docid": "6088e28731da3f68f6c5691834839f69", "score": "0.7514919", "text": "def help():\n return help_text", "title": "" }, { "docid": "7c12107736c0a8bef614f88f99e04e2d", "score": "0.7506989", "text": "def help(update, context):\n update.message.reply_text('Help!')\n time.sleep(3)\n update.message.reply_text('Just kidding! HAHAHAHAHAHA')\n update.message.reply_text('''Add triggers by doing: /add <trigger>:<respons>\nRemove triggers by doing: /delete <trigger>\nGet all triggers by doing: /triggers\\\n ''')", "title": "" }, { "docid": "ad16aa42b7001533c46c00b6e26f0045", "score": "0.7504356", "text": "def msg_help(self, text):\n if type(self).help_more:\n usemore = True\n\n if self.session.protocol_key in (\"websocket\", \"ajax/comet\"):\n try:\n options = self.player.db._saved_webclient_options\n if options and options[\"helppopup\"]:\n usemore = False\n except KeyError:\n pass\n\n if usemore:\n evmore.msg(self.caller, text)\n return\n\n self.msg((text, {\"type\": \"help\"}))", "title": "" }, { "docid": "e84239e0b10fdbc1c4506ffa1d6314a0", "score": "0.7500062", "text": "def do_help():\n\n global STATE\n STATE = STATE_START\n # attributes = {\"state\":globals()['STATE']}\n return response_plain_text_ga(HELP_MESSAGE, True)", "title": "" }, { "docid": "e69aa82c0cfab47788efab973ac20334", "score": "0.74996126", "text": "async def help_handler(msg: types.Message) -> None:\n help_text = \"This is a help text for testing\"\n await msg.answer(help_text)\n await msg.reply(\"Son of a bitch, hay que decirlo mas\")", "title": "" }, { "docid": "073e149b82a1d815b030a9d347a1b0d3", "score": "0.7447619", "text": "async def send_command_help(self, command: commands.Command):\n if command.name == 'help':\n desc = Help.help\n else:\n desc = command.help\n desc = desc.replace('{{pre}}', self.context.prefix)\n title = self.get_command_signature(command)\n e = discord.Embed(title=title, color=colours.HELP, description=desc)\n await self.get_destination().send(embed=e)", "title": "" }, { "docid": "d9549a001d995a662f5092ac35c6fc3c", "score": "0.74306446", "text": "def send_help_message(self, channel):\n\n self.logger.debug('Sending help message.')\n\n message = \"\"\"There are a few things I can do:\n\nIf you say `slack`, or `master`, I will speak using everything I have\nlearned from slack. This is my default behaviour.\n\nIf you say `#channel`, I will speak using everything I have learned\nfrom that channel.\n\nIf you say `@user`, I will speak using everything I have learned from\nthat user.\n\nIf you say `$external source`, I will speak using everything I have\nlearned from that source.\n\nHint: You can combine multiple commands.\n \"\"\"\n\n self.send_message(channel, message)", "title": "" }, { "docid": "59da4d65da34e130bbdff150345ef626", "score": "0.7430576", "text": "def help(bot, update):\r\n update.message.reply_text('Hi, I am here to help you!')\r\n update.message.reply_text('To start, you should call'\r\n ' start command with two args:')\r\n update.message.reply_text('Url to wiki and depth of search')\r\n update.message.reply_text('Bot only works with English wiki for now!')", "title": "" }, { "docid": "5bbee8c7e69151b99a54617ff528b80f", "score": "0.7422377", "text": "def help(self):\n self.help_message.place(x=self.absx + 10, y=self.absy + 10)", "title": "" }, { "docid": "9d4a9d1510d808b88b8f46fd2347fc36", "score": "0.7418295", "text": "def handle_help(self, event: Event) -> None:\n content = scripts.get_help()\n\n return self.reply_line(event, content)", "title": "" }, { "docid": "6d049507aebd874542210f03c46e4531", "score": "0.74178636", "text": "async def help(self, context, user : discord.Member=None):\n f = open(helptxt, 'r')\n general=f.read()\n await context.channel.send(general)\n f.close()", "title": "" }, { "docid": "4f7fcbf9fc063295766fd318348c7605", "score": "0.7397008", "text": "async def help(ctx):\n\n print(f\"{ctx.message.author}: {ctx.message.content}\")\n await ctx.message.delete()\n await cdef.print_help_message(ctx)", "title": "" }, { "docid": "a17f2a88519fd1914f6a09d564bd8dbe", "score": "0.73854035", "text": "async def help(ctx):\n embed = discord.Embed(title=\"Battle Ships Bot\",\n description=\"a bot the lets you play the Battle \"\n \"Ships game in a DM channel in Discord\",\n color=0xeee657)\n\n embed.add_field(name=\"^start_game\", value=\"bot creates a dm channel and \"\n \"starts a battle ship game\",\n inline=False)\n embed.add_field(name=\"^shoot X Y\", value=\"Bot shoots the given \"\n \"coordinates\", inline=False)\n embed.add_field(name=\"^stats\", value=\"Gives stats about current game, \"\n \"number of turns, hits and misses\",\n inline=False)\n embed.add_field(name=\"^surrender\", value=\"surrenders and stops a current \"\n \"game session\", inline=False)\n embed.add_field(name=\"^info\", value=\"Gives a little info about the bot\",\n inline=False)\n embed.add_field(name=\"^help\", value=\"Gives this message\", inline=False)\n\n await ctx.send(embed=embed)", "title": "" }, { "docid": "8062de968f18b2702982e6c61f67913c", "score": "0.7379159", "text": "def help(update, context):\n user = update.message.from_user\n \n context.bot.send_message(text=constants.HELP_MESSAGE,\n chat_id=user.id,\n parse_mode=ParseMode.HTML)", "title": "" }, { "docid": "d1ce256bda0e0fab11dcc029e12232fb", "score": "0.7372104", "text": "def chat_help(update: Update, context: CallbackContext):\n update.message.reply_text(HELP_MESSAGE)", "title": "" }, { "docid": "6ad19320a90921d80d9cb2b012106b64", "score": "0.73642117", "text": "def cb_help(self, obj):\n display_help(webpage = WIKI_HELP_PAGE, section = WIKI_HELP_SEC)", "title": "" }, { "docid": "902ba76d51efa1fcc49c9a33cb8112c2", "score": "0.73545146", "text": "def help(self):\n pass", "title": "" }, { "docid": "3a3ae010c4c1282c6e2f29ec82de1d75", "score": "0.73493034", "text": "def callhelp(self):\n self.parser.print_help()", "title": "" }, { "docid": "6b9486437f15f6e10599769045369610", "score": "0.7306459", "text": "def help(update, context):\n #update.message.reply_text(\"Доступные команды: \\n\\n/id\")\n update.message.reply_text(\"Прости, лично я пока ничем не готов помочь...\")", "title": "" }, { "docid": "486000c1422f79eacf699430b9c0db7b", "score": "0.73041505", "text": "def help(update, context):\n update.message.reply_text('cazzo vuoi io non ti aiuto')", "title": "" }, { "docid": "4448dad758c1e8cb9a2f4416dee83436", "score": "0.7293747", "text": "def OnHelp( self, evt ):\n wx.MessageBox( \"Documentation available at\\nhttp://ctrax.sourceforge.net\", \"Help\" )", "title": "" }, { "docid": "304570b1bcf0b535b63d47da99066933", "score": "0.7291669", "text": "async def write(self, ctx):\n await self.bot.send_cmd_help(ctx)", "title": "" }, { "docid": "55decaecaf2b4b4b70a23f29ab08ecc6", "score": "0.72851753", "text": "def help_(self):\n pass", "title": "" }, { "docid": "abf8e0ce9c88791fb70fe0cca953d363", "score": "0.7280757", "text": "def help(self, **kwds):", "title": "" }, { "docid": "260db9140088250b6bd7a365288b2221", "score": "0.7271429", "text": "def help_command(chat_id, text):\n logger.info('COMMAND {}: chat_id={}'.format(text, chat_id))\n get_bot().send_message(chat_id=chat_id, text='\\n'.join(tr('help', chat_id)))", "title": "" }, { "docid": "53956d664d4c78758c5be0fbfad23ae9", "score": "0.72584754", "text": "async def help(client, message, **kwargs):\n commandName = kwargs.get('command')\n if commandName:\n command = cmds.find(commandName)\n if command:\n context_cache = {'db_server': db.get_server(message.server) if message.server else None}\n context_cache = cmds.get_context_cache_update(context_cache, message)\n validated, exc = await command.validate_context(client, message, [], context_cache)\n if validated or isinstance(exc, MissingParameters):\n await client.send_message(message.channel, command.pretty_print())\n else:\n commandsStr = []\n async for c in AuthorizedCommandsWrapper(client, message):\n commandsStr.append(c)\n await client.send_message(message.channel, T_HelpGlobal.format('\\n '.join(commandsStr)))", "title": "" }, { "docid": "100455fbcb2a0247ffce4b72acd63cbb", "score": "0.7258179", "text": "def cmd_help( self, command, params, event, received=\"channel\" ):\n\n if len( params ) < 1:\n self.send_message( event.source, \"List of commands:\" )\n for cmd in self.commands:\n self.send_message( event.source, format.color( \"## \", format.LIME_GREEN ) + '{:<20} {}'.format( *self.commands[ cmd ][1].__doc__.format( format.bold( format_command( cmd, received ) ) ).splitlines()[0].strip().split( \"!X!\", 1 ) ) ) # split and justify\n return\n if params[0].lower() in self.commands:\n self.send_message( event.source, \"Usage info for command {0}:\".format( format.bold( params[0] ) ) )\n for line in self.commands[ params[0].lower() ][1].__doc__.format( *[ format.bold( format_command( c, received ) ) for c in params ] ).splitlines():\n self.send_message( event.source, format.color( \"## \", format.LIME_GREEN ) + '{:<35} {}'.format( *line.strip().split( \"!X!\", 1 ) ) ) # split and justify\n else:\n self.send_message( event.source, \"Unkown Command {0}.\".format( format.bold( params[0] ) ) )", "title": "" }, { "docid": "92d848a72bae335023b22138a3d35af6", "score": "0.7244817", "text": "def show_help(update: Update, context: CallbackContext):\n update.message.reply_text(\"Help!\")\n log_context(context)", "title": "" }, { "docid": "78f55633db05c3e85f0e175abef98f1d", "score": "0.72430915", "text": "def help(self, *args: Any, **kwargs: Any) -> None:\n print(self._help(*args))", "title": "" }, { "docid": "9e3e1c48a28115d5364744716318e318", "score": "0.7238084", "text": "async def help(self, /, timeout: Optional[Union[float, Default]] = _default) -> str:\n await self._ehlo_or_helo_if_needed()\n\n response = await self.execute_command(b\"HELP\", timeout=timeout)\n if response.code not in (\n SMTPStatus.system_status_ok,\n SMTPStatus.help_message,\n SMTPStatus.completed,\n ):\n raise SMTPResponseException(response.code, response.message)\n\n return response.message", "title": "" }, { "docid": "fbfdcd77467d42901dc8a10c54e53bdf", "score": "0.7234989", "text": "def help(update, context):\n update.message.reply_text('Bischd du dumm?')", "title": "" }, { "docid": "16189a160af4e224d10999630669b8a5", "score": "0.7222698", "text": "async def handle_command_help(self, target, nick):\n help_msg = (\"Pump19 is run by Twisted Pear. \"\n \"Check {url} for a list of supported commands.\").format(\n url=COMMAND_URL)\n await self.client.privmsg(target, help_msg)", "title": "" }, { "docid": "22eeab3319c36a80d8f876b16ce3c83e", "score": "0.72175574", "text": "def handle_help(self):\n print(\"Help not implemented yet for PyXtal!\")", "title": "" }, { "docid": "0cf9372768ac0d709c5b908d49961589", "score": "0.7214833", "text": "def help_help(self):\n print \"List commands or print details about a command\"", "title": "" }, { "docid": "243842f7f070518fbd1d39b11d879731", "score": "0.7213134", "text": "def help(update, context):\n help_text = get_help_text(is_telegram=True)\n update.message.reply_text(help_text)\n return", "title": "" }, { "docid": "2904501c85c9a6f7a17c12c5515404b4", "score": "0.7211631", "text": "def __help_msg(self):\n return \"\"\"\\\nHelp:\n - /IGNORE \"\"\" + self.connection.get_nickname() + \"\"\" - Lets you stop reading the bot.\n\n You can send these commands in private message (PM) to \"\"\" + self.connection.get_nickname() + \"\"\":\n - !help - Show this help message.\n - !stats - Show some statistics.\n - !list - Show all feeds.\n - !last - Show last news published in all feeds.\n - !lastfeed <feedid> - Show last news published in a specific feed.\n\"\"\"", "title": "" }, { "docid": "5beca3b23d6b0196b384730dd3f10ad7", "score": "0.720753", "text": "def help(self, mess, args):\n if not args:\n if self.__doc__:\n description = self.__doc__.strip()\n else:\n description = _('Available commands:')\n\n usage = sorted(['%s:\\t%s' % (name, (command.__doc__ or \\\n '(undocumented)').strip().split('\\n', 1)[0])\n for (name, command) in self.commands.iteritems() \\\n if name != 'help' \\\n and not command._jabberbot_command_hidden\n ])\n #usage = '\\n\\n' + '\\n\\n'.join(filter(None, [usage, self.MSG_HELP_TAIL]))\n usage = usage + [self.MSG_HELP_TAIL]\n else:\n description = ''\n if args in self.commands:\n usage = (self.commands[args].__doc__ or \\\n 'undocumented').strip()\n else:\n usage = [self.MSG_HELP_UNDEFINED_COMMAND]\n\n if isinstance(usage, str):\n usage = [usage]\n if isinstance(description, str):\n description = [description]\n\n # return [description] + usage\n self.send_simple_reply(mess, description + usage, True)", "title": "" }, { "docid": "ffdc9fc8c453bbc9a6ba0f4a3171c076", "score": "0.71776325", "text": "def send_help(message):\n bot.send_message(message.chat.id, 'Hei, ' + message.from_user.first_name +\n '! Рад тебя видеть! Чем могу помочь?',\n reply_markup=config.keyboard(content.main_menu))", "title": "" }, { "docid": "4453c93839d26b0d99a84a6569d6c56b", "score": "0.7177051", "text": "async def faq(self, ctx):\n await ctx.send_help(ctx.command)", "title": "" }, { "docid": "260b0efcf7bfe425dabb95754ccf95c7", "score": "0.717615", "text": "def help(self, args):\n print('Various commands for couchpotato')\n print('run with --help for more information')", "title": "" }, { "docid": "ab56972c5a82c239bd5d55933485240b", "score": "0.71744657", "text": "def do_command_help(self, data):\n target = data.args[0]\n\n for command in self.bot.commands:\n if command.name == target or target in command.commands:\n if command.__doc__:\n doc = command.__doc__.replace(\"\\n\", \"\")\n doc = re.sub(r\"\\s\\s+\", \" \", doc)\n msg = 'Help for command \\x0303{0}\\x0F: \"{1}\"'\n self.reply(data, msg.format(target, doc))\n return\n\n msg = \"Sorry, no help for \\x0303{0}\\x0F.\".format(target)\n self.reply(data, msg)", "title": "" }, { "docid": "10ef278124e799cc68a952e9a6630415", "score": "0.716986", "text": "def help(cls, task):\n executor = task.executor\n\n if cls.__doc__:\n out = fmt.FormatList(executor)\n out.add(fmt.Header(f\"Help: {cls.name}\"))\n out.add(fmt.Line(cls.__doc__))\n out.add(fmt.Footer())\n executor.send(out)\n else:\n executor.msg(text=\"Help is not implemented for this command.\")", "title": "" }, { "docid": "d64c3ba2e1dde5452d1b373791760272", "score": "0.7154695", "text": "async def show_bot_help(self):\n\n self.embed.title = 'Using the **C02* bot'\n self.embed.description = 'Hello! Welcome to the help page.'\n self.embed.clear_fields()\n\n entries = (\n ('<argument>', 'This means the argument is __**required**__.'),\n ('[argument]', 'This means the argument is __**optional**__.'),\n ('[A|B]', 'This means the it can be __**either A or B**__.'),\n ('[argument...]', 'This means you can have multiple arguments.\\n__**You do not type in the brackets!**__')\n )\n\n for name, value in entries:\n self.embed.add_field(name=name, value=value, inline=False)\n\n self.embed.set_footer(text=f'We were on page {self.current_page} before this message.')\n await self.message.edit(embed=self.embed)\n\n async def go_back_to_current_page():\n await asyncio.sleep(30.0)\n await self.show_current_page()\n\n self.bot.loop.create_task(go_back_to_current_page())", "title": "" }, { "docid": "c33808163f19aa079d9ffe299507dc38", "score": "0.7152405", "text": "def do_help(self, arg):\n if arg:\n helper = [n[5:] for n in self.get_names()\n if n.startswith('help_' + arg)]\n if len(helper) == 0:\n self.stdout.write('*** Unknown command: %s\\n' % arg)\n return\n elif len(helper) > 1:\n self.stdout.write((\n \"*** {} is a shorcut to several commands\\n\"\n \" Please give more characters for disambiguation\"\n ).format(cmd))\n return\n else:\n arg = helper[0]\n cmd.Cmd.do_help(self, arg)", "title": "" }, { "docid": "3876464481a92f00979051f50995d2b6", "score": "0.7133162", "text": "def help(bot, update):\n update.message.reply_text('These are our commands:\\n/start: to roll with us'\n '\\n/help: to get to this selection'\n '\\n/add_event: to add an event you want to visit'\n '\\n/del_event: to remove an event you dont want to visit any more, or youve visited it already'\n '\\n/list: to get the list of your events you want to visit'\n '\\n/get_all_events: to see the list of all events')", "title": "" }, { "docid": "f04a783e501c825a7080cc5f0020e103", "score": "0.71281564", "text": "def Help(self, verb):\n return ''", "title": "" }, { "docid": "8ee726458ec392f40b97bd97cca8ad7b", "score": "0.7125904", "text": "def help(self, event):\n self.master.help()", "title": "" }, { "docid": "eda42e163a13c9179754de96fc6bf45a", "score": "0.71244067", "text": "def help(self, event):\n\n try:\n path = staticPath(__file__, \"res/help_msg.txt\")\n with open(path, \"r\", encoding=\"utf-8\") as f_in:\n help_msg = f_in.read().strip()\n self.log(help_msg, \"info\")\n except:\n self.log(\"Help Message couldn't be loaded due to some error!\", \"error\")", "title": "" }, { "docid": "0cdf46121ff8744c1b86bcb1e6a5263f", "score": "0.71209794", "text": "def help_command(bot, update):\n\n help_message = \"La lista di comandi:\\n\\n\" \\\n \"/help - Stampa questo messaggio\\n\" \\\n \"/news - Leggi le ultime 10 news\\n\" \\\n \"/news num - Leggi le ultime <num> news\\n\" \\\n \"/newson - Abilita le notifiche per ogni nuova news (default)\\n\" \\\n \"/newsoff - Disabilita le notifiche per ogni nuova news\\n\" \\\n \"/prof - Stampa la lista dei professori\\n\" \\\n \"/prof cognome - Info su un docente\\n\" \\\n \"/segreteria - Info sulla segreteria studenti\\n\" \\\n \"/mensa - Info sugli orari della mensa\\n\" \\\n \"/adsu - Info sull'adsu\" \\\n \"\\n\\nQuesto bot è orgogliosamente open source, sviluppato da Giacomo Cerquone\" \\\n \" e Diego Mariani.\"\n\n bot.sendMessage(update.message.chat_id, text=help_message)", "title": "" }, { "docid": "6bd05572c036a6669b47c975b3081c0b", "score": "0.71194637", "text": "def print_help(self):\n if self.help:\n print(_HELP)\n sys.exit(0)", "title": "" }, { "docid": "cea8894a8508d96491d47442026b452e", "score": "0.71085113", "text": "def do_help(self, argv):\n\n if len(argv) == 0:\n commands = [x for x in dir(self) if x.startswith(\"do_\")]\n else:\n commands = [x for x in dir(self) if x.startswith(\"do_\") and x[3:] in argv]\n\n for c in commands:\n help_msg = getattr(self, c).__doc__\n print(f\"{c[3:]:15s}{help_msg}\")", "title": "" }, { "docid": "3fe455e64a7b2dfc2f6aaf7d11159978", "score": "0.7099045", "text": "def cmd_help(message):\n lines = []\n\n targets = message.parsed_args['<object>'] or (list(command_dispatcher.commands.keys()) + list(plugin_pages.keys()))\n\n for command_name in targets:\n if command_name in command_dispatcher.commands:\n command_handler = command_dispatcher.commands[command_name]\n if hasattr(command_handler, '__m5_permissions__'):\n if not permissions.check(message.source.identity, command_handler.__m5_permissions__):\n continue\n lines.append('Help for command {} (aliases: {}):'.format(\n command_name,\n ', '.join(command_handler.__m5_aliases__))\n )\n if command_handler.__doc__:\n lines.append(_remove_indent(command_handler.__doc__))\n else:\n lines.append(' No help message :(\\n')\n elif command_name in plugin_pages:\n lines.append('Help for plugin {}:'.format(command_name))\n lines.append(_remove_indent(plugin_pages[command_name]))\n else:\n lines.append('Unknown command or plugin: {}!'.format(command_name))\n\n reply('\\n'.join(lines), message)", "title": "" }, { "docid": "8dfd354ac9d0e6a843182fcae2635640", "score": "0.70909584", "text": "def menu_help():\n\thelp_msg= '''\nSTART <level file> - Starts the game with a provided file.\nQUIT - Quits the game\nHELP - Shows this message\n'''\n\tprint(help_msg)", "title": "" }, { "docid": "63e40fdca9cb5d9892351c1c102c842e", "score": "0.70897526", "text": "def help():\r\n print(\"\\nType the name of your next task (e.g. \\\"grading Algebra 1 HW\\\")\",\r\n \"\\nto get started. Or type one of the following commands:\\n\",\r\n \"\\n\\texit\\t\\texit TaskTimer\",\r\n \"\\n\\thistory\\t\\tdisplay previously recorded tasks\",\r\n \"\\n\\ttesting\\t\\tuse a test file for task history\",\r\n \"\\n\\tnormal\\t\\tuse the normal file for task history\",\r\n \"\\n\\thelp\\t\\tdisplay this help menu\",\r\n \"\\n\\topen\\t\\topen task history file\",\r\n \"\\n\\tclear\\t\\tclear the screen\")", "title": "" }, { "docid": "211d236d7ff232e48716395812b26e72", "score": "0.70661205", "text": "def do_help(self, line):\n print('Below commands are supported')\n help_content = \"\"\"\n add: Add data to status logger\n view: View entered data\n update: Update entered data\n commit: Write data to status log file\n sync: Sync status file to google drive \n exit: Exit from log file\n \"\"\"\n print(help_content)", "title": "" }, { "docid": "fa36f572f35173ebf1ad118aa98172b1", "score": "0.706077", "text": "def HelpTopic(self) -> str:", "title": "" }, { "docid": "95f4cd1165d60516ac4181fd60ea5f0d", "score": "0.7056012", "text": "def help(bot, update) -> None:\n update.message.reply_text('/hilfe /temperatur /luftfeuchtigkeit /wassermelder')", "title": "" }, { "docid": "b61a59661f958ee96e7ebe1cd0977f29", "score": "0.7034612", "text": "def on_help_clicked(self, obj):\n display_help(webpage=WIKI_HELP_PAGE, section=WIKI_HELP_SEC)", "title": "" }, { "docid": "6c294b55c665a470d5d5edc4d7d7d639", "score": "0.7034602", "text": "def invoke(self, ctx: click.Context) -> None:\n\n click.echo(self.help, color=ctx.color)\n ctx.exit(1)", "title": "" }, { "docid": "1687a44b9f94095576b6a6db49843f75", "score": "0.70306456", "text": "def help(bot, update):\n formatted_string = ('I can manage and perform stock analysis and broker related tasks.\\n\\n'\n 'Here are some basic commands to get started:\\n\\n'\n '/price (ticker) - find current stock price\\n'\n '/news (ticker) - latest news on company (top 5)\\n\\n'\n '<b>Portfolio Settings</b>\\n'\n '/portfolios - view all portfolios you\\'ve created'\n '/create (name) - creates a portfolio')\n update.message.reply_text(formatted_string, quote=False, parse_mode=ParseMode.HTML)", "title": "" }, { "docid": "ba53e27d4eb77fe79010e3b7a91c1585", "score": "0.6998821", "text": "async def lichess(self, ctx: Context) -> None:\n await ctx.send_help(ctx.command)", "title": "" }, { "docid": "fc4e44ba011c4638b29bfb05e6239eb9", "score": "0.69964015", "text": "def DoHelp(options, args):\n if len(args) == 1 and args[0] in COMMAND_USAGE_TEXT:\n print >>options.stdout, COMMAND_USAGE_TEXT[args[0]]\n else:\n raise Error(\"unknown subcommand '%s'; see 'gclient help'\" % args[0])", "title": "" }, { "docid": "d2c875171cdc29f2cdd6ca1fcb2e7395", "score": "0.6978998", "text": "def help_handler(self, update, context):\n update.message.reply_text(\"Benutze /open, /sub oder /unsub um Aktionen durchzuführen\")", "title": "" }, { "docid": "84cb4bccf2bf42124d4933d4fa8ec066", "score": "0.6978929", "text": "def do_help(self, argv):\r\n if argv[1:]:\r\n for arg in argv[1:]:\r\n if self._do_one_help(arg):\r\n break\r\n else:\r\n # If bare 'help' is called, print this class's doc\r\n # string (if it has one).\r\n doc = self._doc_to_help(self.__class__)\r\n if doc:\r\n sys.stdout.write(doc + '\\n')\r\n sys.stdout.flush()", "title": "" }, { "docid": "0fe54cf26564c37b8e858fafa207fdc1", "score": "0.6975974", "text": "def help(self, print_output=True):\n help_text = self._rpc('help')\n if print_output:\n print(help_text)\n else:\n return help_text", "title": "" }, { "docid": "0574573d2ee6b59c8a8e7d1b3a236d5d", "score": "0.6974609", "text": "def _help(self, *args: Any) -> str:\n return self.dispatch(*self.encode_args(args)).__doc__", "title": "" }, { "docid": "c029ba5a5a73d2c31bb482ff8faf010c", "score": "0.69740677", "text": "async def send_bot_help(self, cogs: typing.Dict[\n commands.Cog, typing.Iterable[\n commands.Command\n ]]):\n e = discord.Embed(title='Help', color=colours.HELP)\n for cog in cogs:\n if not cog:\n continue\n lines = []\n for command in cogs[cog]:\n if command.hidden:\n continue\n line = '**{cmd}** *{brief}*'.format(\n cmd=self.get_command_signature(command),\n brief=command.brief or Help.brief\n )\n if line not in lines: # known bug where commands with\n lines.append(line) # aliases are duplicated\n text = '\\n'.join(lines)\n e.add_field(name=cog.qualified_name, value=text, inline=False)\n await self.get_destination().send(embed=e)", "title": "" }, { "docid": "fe99a6458991e5f15968dedfee02e281", "score": "0.6968065", "text": "def help(self, dummy):\r\n help = self.doc\r\n if help.find(\"%s\") > 0:\r\n help = help.replace(\"%s\", self.progname)\r\n print help,\r\n self.exit(0)", "title": "" }, { "docid": "667a8551ae9ca4590c37b4749bd8984b", "score": "0.69650424", "text": "def help(update, context):\r\n\thelp_mes = '''Comands list:\r\n\tfinish\r\n\tstart\r\n\taddlist\r\n\tadd\r\n\tload\r\n\tlogin\r\n\tresetanswer\r\n\tresetgame\r\n\tstats'''\r\n\tupdate.message.reply_text(help_mes)", "title": "" }, { "docid": "f56444348466292df90a45e14e00e322", "score": "0.69612944", "text": "async def send_cog_help(self, cog: commands.Cog):\n await self.send_bot_help({cog: cog.walk_commands()})", "title": "" }, { "docid": "e2efdfd2125c1ce7a32d54c971a8e6be", "score": "0.69586253", "text": "def help(self):\r\n top = Tk()\r\n top.title('Help on epicsLogger')\r\n text = Pmw.ScrolledText(top, text_font=self.fonts['help'], \r\n text_width=80, text_height=40)\r\n text.pack(fill=BOTH, expand=YES)\r\n text.insert(END, self.__doc__)\r\n text.insert(END, self.__init__.__doc__)", "title": "" } ]
835c11f46f8cfb87636628b33f3e0e4b
Create leaderboard view for Round2
[ { "docid": "2fe3cc33225358d9525373963b157990", "score": "0.56610113", "text": "def rd2leaderboard(request):\n\n #Add views\n playing_players = Rd2SlotModel.objects.filter(player_name__isnull=False)\n\n #Add context\n context = {\n 'playing_players': playing_players,\n }\n\n return render(request, 'rd2Leaderboard.html', context=context)", "title": "" } ]
[ { "docid": "437d7c5565d4948aa3c19c5e239cc8fe", "score": "0.7503058", "text": "def leaderboard(self):\n click.echo(\"\\n----Leaderboard----\")\n players = OrderedDict(\n sorted(self.players.items(), key=lambda x: getitem(x[1], \"round\"))\n )\n columns = [\"Rank\", \"Name\", \"Round\", \"Score\"]\n row_format = \"{:>15}\" * len(columns)\n click.echo(row_format.format(*columns))\n rank = 1\n for k, v in players.items():\n click.echo(row_format.format(rank, v[\"name\"], v[\"round\"], v[\"score\"]))\n rank = rank + 1", "title": "" }, { "docid": "6375733d6284707ba6aaf4b11d8578d3", "score": "0.72021484", "text": "def show_leaderboard():\n leaderboard_data = get_sorted_scores()\n return render_template(\"leaderboard.html\", leaderboard_data=leaderboard_data)", "title": "" }, { "docid": "dda1b561034c1f3cee9e7e65a12b0ebe", "score": "0.6722733", "text": "def buildLeaderboard(self):\n\t\toverall_standings = []\n\t\toverall_fpl_points = {}\n\t\toverall_league_points = {}\n\t\toverall_fines = {}\n\t\tgameweeks = {}\n\t\tfor i in self.league.league_gameweeks:\n\t\t\tgameweeks[i.fpl_event] = []\n\t\tfor i in self.members:\n\t\t\toverall_fpl_points[i] = 0\n\t\t\toverall_league_points[i] = 0\n\t\t\toverall_fines[i] = 0\n\t\t\trunning_league_points = 0\n\t\t\tfor j in i.league_gameweeks:\n\t\t\t\t\tif j.league_id == self.league.league_id:\n\t\t\t\t\t\toverall_fpl_points[i] += j.fpl_points\n\t\t\t\t\t\toverall_league_points[i] += j.championship_points\n\t\t\t\t\t\toverall_fines[i] += j.fines\n\t\t\t\t\t\trunning_league_points += j.championship_points\n\t\t\t\t\t\tgameweeks[j.fpl_event].append({\n\t\t\t\t\t\t\t'player': j.user,\n\t\t\t\t\t\t\t'fpl_points': j.fpl_points,\n\t\t\t\t\t\t\t'league_points': j.championship_points,\n\t\t\t\t\t\t\t'running_league_points': running_league_points,\n\t\t\t\t\t\t\t'fines': j.fines,\n\t\t\t\t\t\t\t'notes': j.notes\n\t\t\t\t\t\t\t})\n\t\tfor i in gameweeks:\n\t\t\tnewlist = sorted(gameweeks[i], key=lambda k: k['fpl_points'], reverse=True)\n\t\t\tgameweeks[i] = newlist\n\t\tfor i in overall_fpl_points:\n\t\t\toverall_standings.append({'player': i.username, 'fpl_points': overall_fpl_points[i], 'P & L': '+£69'})\n\t\tfor i in overall_league_points:\n\t\t\tfor j in overall_standings:\n\t\t\t\tif j['player'] == i.username:\n\t\t\t\t\tj['league_points'] = overall_league_points[i]\n\t\tfor i in overall_fines:\n\t\t\tfor j in overall_standings:\n\t\t\t\tif j['player'] == i.username:\n\t\t\t\t\tj['fines'] = overall_fines[i]\n\t\tsorted_overall = sorted(overall_standings, key=lambda k: k['league_points'], reverse=True)\n\t\tself.leaderboard = sorted_overall\n\t\tself.gameweeks = gameweeks", "title": "" }, { "docid": "f494a072a544585cb57a2fab8d31f093", "score": "0.6574417", "text": "def make_leaderboard(self):\n leaderboard = [(player_id, rating.mu - 3*rating.sigma) for player_id, rating in self._ratings.items()]\n leaderboard = sorted(leaderboard, key=operator.itemgetter(1), reverse=True)\n return leaderboard", "title": "" }, { "docid": "cafa310f61b85a6ec569e99cfafaa7c9", "score": "0.65529096", "text": "async def leaderboard(self, ctx: commands.Context):\n\n\t\treturn await ctx.send(await TrophyLeaderboard(ctx).create())", "title": "" }, { "docid": "b74ed25514064c5e10d0a2346470cd3a", "score": "0.6290081", "text": "def displayLeaderboard():\n\n global leaderboard\n\n Y_OFFSET = 50\n TOP_OFFSET = 150\n\n leaderboard_title = largeFont.render(\"Leader Board\", True, BLACK)\n leaderboard_titleRect = leaderboard_title.get_rect()\n leaderboard_titleRect.center = ((WIN_WIDTH / 2), 50)\n screen.blit(leaderboard_title, leaderboard_titleRect)\n\n for idx, person in enumerate(leaderboard):\n person_entry_txt = person[0] + '.'\n person_entry_txt += (' ') + person[1]\n\n leaderboard_entry = mediumFont.render(person_entry_txt, True, BLACK)\n leaderboard_entryRect = leaderboard_entry.get_rect()\n leaderboard_entryRect.bottomleft = ((WIN_WIDTH / 4), TOP_OFFSET + Y_OFFSET * idx)\n screen.blit(leaderboard_entry, leaderboard_entryRect)\n\n score_entry_txt = person[2]\n rightAlign = mediumFont.size(person[2])[0]\n\n leaderboard_entryScore = mediumFont.render(score_entry_txt, True, BLACK)\n leaderboard_entryScoreRect = leaderboard_entryScore.get_rect()\n leaderboard_entryScoreRect.bottomleft = ((3 * WIN_WIDTH / 4) - rightAlign, TOP_OFFSET + Y_OFFSET * idx)\n screen.blit(leaderboard_entryScore, leaderboard_entryScoreRect)", "title": "" }, { "docid": "29930feaccab07b2952f3ab60de65a3e", "score": "0.6228906", "text": "async def leaderboard(self, ctx):\n leaderboard = await self.bot.database.get_top_reps()\n embed = discord.Embed(colour=EMBED_ACCENT_COLOUR)\n embed.set_author(\n name=\"Reputation Leaderboard\",\n icon_url=\"https://images.emojiterra.com/mozilla/512px/1f3c6.png\",\n )\n for member_id, points in leaderboard:\n member = ctx.guild.get_member(member_id)\n if member:\n embed.add_field(\n name=str(member.nick if member.nick is not None else member.name),\n value=points,\n inline=False,\n )\n await ctx.send(embed=embed)", "title": "" }, { "docid": "6d25344c35e84a3b8b9d248159286e44", "score": "0.6203027", "text": "def leaderboard(self, id):\n from .game import GameLeaderboard\n return GameLeaderboard.leaderboard(self.short_name, id)", "title": "" }, { "docid": "b68db73095cfe00a7258b97ef6b10d3f", "score": "0.61703", "text": "async def leaderboard(self, ctx):\r\n guild = ctx.guild.id\r\n users = await self.bot.db.fetch(\"SELECT * FROM levels WHERE guild_id=$1 ORDER BY xp DESC\", guild)\r\n\r\n ranks = {(y := x + 1): f\"#{y}\" if y > 3 else f\"{self.leaderboard_emojis[y]}\" for x in range(10)}\r\n fields = {\"member\": [], \"level\": [], \"rank\": []}\r\n\r\n for index, value in zip(range(10), users):\r\n user = self.bot.get_user(value[\"user_id\"])\r\n if user:\r\n if (rank := index + 1) == 1:\r\n top_user = f\"Top Member: 🏆 **{str(user)}**\"\r\n fields[\"rank\"].append(ranks[rank])\r\n fields[\"member\"].append(f\"**{user.name}**\")\r\n xp = round((4 * (value['lvl'] ** 3) / 5))\r\n fields[\"level\"].append(f\"Level {value['lvl']} ({value['xp']}/{xp})\")\r\n else:\r\n for value in fields.values():\r\n value.append(\"...\")\r\n\r\n leaderboard = discord.Embed(color=ctx.me.colour, title=f\"Top 10 in {ctx.guild.name}\", description=top_user,\r\n timestamp=ctx.message.created_at)\r\n leaderboard.set_author(name=ctx.author.name, icon_url=ctx.author.avatar_url)\r\n\r\n author = await self.bot.db.fetchrow(\"SELECT * FROM levels WHERE user_id=$1 AND guild_id=$2\",\r\n ctx.author.id, guild)\r\n\r\n if author:\r\n fields[\"rank\"].append(ranks[users.index(author) + 1])\r\n fields[\"member\"].append(f\"**{ctx.author.name}**\")\r\n xp = round((4 * (author['lvl'] ** 3) / 5))\r\n fields[\"level\"].append(f\"Level {author['lvl']} ({author['xp']}/{xp})\")\r\n\r\n leaderboard.add_field(name=\"Rank\", value=\"\\n\".join(fields[\"rank\"]), inline=True)\r\n leaderboard.add_field(name=\"Member\", value=\"\\n\".join(fields[\"member\"]), inline=True)\r\n leaderboard.add_field(name=\"Level\", value=\"\\n\".join(fields[\"level\"]), inline=True)\r\n\r\n await ctx.send(embed=leaderboard)", "title": "" }, { "docid": "8b902558ddeb9651656b204c2bbf7032", "score": "0.6161494", "text": "def display_points(self) -> List[dict]:\n self.log.debug('Generating scores...')\n score_df = self.get_score(in_game=True) # type: pd.DataFrame\n self.log.debug(f'Retrieved {score_df.shape[0]} players\\' scores')\n if score_df.shape[0] == 0:\n return [\n BKitB.make_block_section('No one has scored yet. Check back later!')\n ]\n score_df.loc[:, 'rank_emoji'] = [':blank:' for _ in range(score_df.shape[0])]\n if score_df['current'].sum() != 0:\n # Determine the emojis for 1st, 2nd and 3rd place\n is_zero = (score_df.current == 0)\n for r in range(1, 6):\n score_df.loc[(score_df.current_rank == r) & (~is_zero), 'rank_emoji'] = f':cah-rank-{r}:'\n # Determine if the recent winner is on a streak\n score_df['streak'] = ''\n if self.current_game is not None:\n player_id, n_streak = self.determine_streak()\n if n_streak > 0:\n # Streak!\n score_df.loc[score_df.player_id == player_id, 'streak'] = ':steak:' * n_streak\n # Set order of the columns\n score_df = score_df[['rank_chg_emoji', 'rank_emoji', 'current_rank', 'display_name',\n 'current', 'overall', 'streak']]\n score_df = score_df.sort_values('current_rank', ascending=True)\n\n scores_list = []\n for i, r in score_df.iterrows():\n dname = f\"{r['display_name'][:14].title():_<15}\"\n emos = f\"{r['rank_chg_emoji'] + r['rank_emoji']}\"\n c_rank = f\"{r['current_rank']:>2.0f}\"\n scores = f\"*`{r['current']:>4.0f}`*`({r['overall']:>4.0f})`\"\n streak = f\"{r['streak']}\"\n line = f\"{emos}*`{c_rank}`*` {dname}`:diddlecoin:{scores}{streak}\"\n scores_list.append(line)\n\n return [\n BKitB.make_context_section([BKitB.markdown_section('*Current Scores*')]),\n BKitB.make_block_divider(),\n BKitB.make_block_section(scores_list)\n ]", "title": "" }, { "docid": "3b85cda051d1b14514e22f0914e73c40", "score": "0.6047141", "text": "def show_leaderboard_screen(self):\n self.leaderboard.retrieve()\n self.clear()\n self.add_button(\"Back\", self.show_main_screen)\n\n if self.leaderboard.entries is None:\n self.add_button(\"Error Loading Leaderboard\", None)\n else:\n for i in range(0, 4 if len(self.leaderboard.entries) > 6 else len(self.leaderboard.entries)):\n entry = self.leaderboard.entries[i]\n self.add_button(entry.name + \" Lvl=\" + entry.level + \" Scr=\" + str(entry.score) + \" Wve=\" + str(entry.wave), None)", "title": "" }, { "docid": "74ac9d9e21fe3d8009430143163d4ee6", "score": "0.60361916", "text": "def load_leaderboard(self, name: str, show: bool = False) -> list:\n leaderboard = []\n members = self.__find_models(name, leaderboard_prefix)\n if len(members) > 0:\n for member in members:\n self.cursor.execute(\n f\"SELECT * FROM {self.schema}.{PREPROCESSORS} WHERE MODEL = '{member[0]}' \"\n f\"AND VERSION = {member[1]}\"\n )\n columns = self.cursor.fetchall()[\n 0\n ] # MODEL, VERSION, JSON, TRAIN_ACC, VALID_ACC, ALGORITHM, METRIC\n prep = self.__setup_preprocessor(columns[2])\n if \"Regressor\" in columns[5]:\n algo = self.reg_dict[columns[5]]\n if \"Classifier\" in columns[5]:\n algo = self.cls_dict[columns[5]]\n algo.model = super().load_model(member[0], member[1])\n algo.title = columns[5]\n model_board_member = ModelBoard(algo, 0, prep)\n model_board_member.valid_score = columns[4]\n model_board_member.train_score = columns[3]\n leaderboard.append(model_board_member)\n if show:\n print(\"\\033[33m{}\".format(f\"Loaded leaderboard '{name}':\\n\"))\n place = 1\n for member in leaderboard:\n print(\n \"\\033[33m {}\".format(\n str(place)\n + \". \"\n + str(member.algorithm.model)\n + f\"\\n Train {columns[6]} score: \"\n + str(member.train_score)\n + f\"\\n Holdout {columns[6]} score: \"\n + str(member.valid_score)\n )\n )\n print(\"\\033[0m {}\".format(\"\"))\n place += 1\n else:\n raise StorageError(\"Leaderboard not found!\")\n return leaderboard", "title": "" }, { "docid": "6c44c80cea96895f9c3990277d80b4dc", "score": "0.5880609", "text": "def rd1leaderboard(request):\n\n #Add views\n playing_players = Rd1SlotModel.objects.filter(player_name__isnull=False)\n\n endurance_leader = Rd1SlotModel.objects.aggregate(Max('endurance_score'))\n\n #Add context\n context = {\n 'playing_players': playing_players,\n 'endurance_leader': endurance_leader,\n }\n\n return render(request, 'rd1Leaderboard.html', context=context)", "title": "" }, { "docid": "b70190aa8df97b0bc7195b975aee1d0b", "score": "0.5836765", "text": "def leaderboard():\n #allows the leaderboard to be viewed without updating/adding to it\n if request.method == 'POST':\n new_leaderboard_name = request.form[\"leaderboard_name\"] #store the users input name\n #add the new highscore to the database\n new_highscore= User(username=new_leaderboard_name, score=flask_session['user_score'])\n db.session.add(new_highscore)\n db.session.commit()\n\n leaderboard = User.query.order_by(desc(User.score)).limit(10).all()\n\n return render_template('leaderboard.html', leaderboard=leaderboard)", "title": "" }, { "docid": "f4837939975e559645382e5d71401c45", "score": "0.58296454", "text": "def fullleaderboard (request):\n # Define views here\n score_submit = EventEntryModel.objects.exclude(winner__isnull=True).count()\n active_players = PlayerModel.objects.all()\n\n loaded_points = list(EventEntryModel.objects.aggregate(Sum('points')).values())[0]\n awarded_points = list(EventEntryModel.objects.exclude(winner__isnull=True).aggregate(Sum('points')).values())[0]\n\n context = {\n 'score_submit': score_submit,\n 'active_players': active_players,\n 'loaded_points': loaded_points,\n 'awarded_points': awarded_points,\n 'tour_name_top_line': tour_name_top_line,\n 'tour_name_highlight': tour_name_highlight,\n }\n return render(request, 'fullLeaderboard.html', context=context)", "title": "" }, { "docid": "7dd6028e15ece7f8f95d3f3b2a0caa2c", "score": "0.5812916", "text": "async def _server_leaderboard(self, ctx, top : int=10):\n server = ctx.message.server\n if top < 1:\n top = 10\n bank_sorted = sorted(self.bank.get_server_accounts(server),\n key=lambda x: x.balance, reverse=True)\n if len(bank_sorted) < top:\n top = len(bank_sorted)\n topten = bank_sorted[:top]\n highscore = \"\"\n place = 1\n for acc in topten:\n highscore += str(place).ljust(len(str(top))+1)\n highscore += (acc.name+\" \").ljust(23-len(str(acc.balance)))\n highscore += str(acc.balance) + \"\\n\"\n place += 1\n if highscore:\n if len(highscore) < 1985:\n await self.bot.say(\"```py\\n\"+highscore+\"```\")\n else:\n await self.bot.say(\"```css\\nThe leaderboard is too big to be displayed. Try with a lower <top> parameter.\\n```\")\n else:\n await self.bot.say(\"```css\\nThere are no accounts in the bank.\\n```\")", "title": "" }, { "docid": "bbabea782c7d022c2385f13b6aa721c6", "score": "0.5779456", "text": "def test_get_leaderboard(self):\n pass", "title": "" }, { "docid": "dae9a8857eb42415296cdc731bc2346e", "score": "0.57685685", "text": "async def leaderboard(self, ctx, *args):\r\n if ctx.channel.id == BANK_CHANNEL or ctx.channel.id in GENERAL_CHANNELS:\r\n if len(args) == 0:\r\n name = None\r\n key = 'total'\r\n elif len(args) == 1:\r\n if 'key=' in args[0]:\r\n name = None\r\n for userkey in users.DEFAULT_ACCOUNT.keys():\r\n if args[0][4:] in userkey:\r\n key = userkey\r\n break\r\n else:\r\n if args[0][4:] == 'total':\r\n key = 'total'\r\n else:\r\n await ctx.send(f'Key {args[0]} not found.')\r\n return\r\n\r\n else:\r\n name = args[0]\r\n key = 'total'\r\n else:\r\n key = args[0]\r\n if key not in users.DEFAULT_ACCOUNT.keys():\r\n if key != 'total':\r\n await ctx.send(f'Key {key} not found.')\r\n return\r\n name = ' '.join(args[1:])\r\n\r\n key_name = {\r\n users.ITEMS_KEY: 'gold',\r\n users.SLAYER_XP_KEY: 'slayer',\r\n users.COMBAT_XP_KEY: 'combat',\r\n users.GATHER_XP_KEY: 'gather',\r\n users.ARTISAN_XP_KEY: 'artisan',\r\n users.COOK_XP_KEY: 'cooking',\r\n users.QUESTS_KEY: 'quest points',\r\n 'total': 'total level'\r\n }\r\n if key not in key_name.keys():\r\n await ctx.send(f\"Can't make leaderboard with key {key}.\")\r\n return\r\n\r\n leaderboard = users.get_values_by_account(key=key)\r\n\r\n out = f':hammer_pick: __**{key.upper()} LEADERBOARD**__ :crossed_swords:\\n'\r\n if name is None:\r\n try:\r\n for i in range(10):\r\n user_id, amount = leaderboard[i]\r\n amount_formatted = '{:,}'.format(amount)\r\n member = ctx.message.guild.get_member(user_id)\r\n if member is not None:\r\n name = get_display_name(member)\r\n else:\r\n name = f'User {user_id}'\r\n out += f'**({1 + i}) {name}**: '\r\n if key == users.ITEMS_KEY:\r\n out += f'{amount_formatted} coins\\n'\r\n elif key == users.QUESTS_KEY:\r\n out += f'{amount_formatted} quests\\n'\r\n elif key == 'total':\r\n out += f'{amount_formatted} levels\\n'\r\n else:\r\n out += f'{users.xp_to_level(amount)} *({amount_formatted}xp)*\\n'\r\n except IndexError:\r\n pass\r\n await ctx.send(out)\r\n else:\r\n if name == 'bottom':\r\n try:\r\n for i in range(len(leaderboard) - 10, len(leaderboard)):\r\n user_id, amount = leaderboard[i]\r\n amount_formatted = '{:,}'.format(amount)\r\n member = ctx.message.guild.get_member(user_id)\r\n if member is not None:\r\n name = get_display_name(member)\r\n else:\r\n name = f'User {user_id}'\r\n out += f'**({1 + i}) {name}**: '\r\n if key == users.ITEMS_KEY:\r\n out += f'{amount_formatted} coins\\n'\r\n elif key == users.QUESTS_KEY:\r\n out += f'{amount_formatted} quests\\n'\r\n elif key == 'total':\r\n out += f'{amount_formatted} levels\\n'\r\n else:\r\n out += f'{users.xp_to_level(amount)} *({amount_formatted}xp)*\\n'\r\n except IndexError:\r\n pass\r\n await ctx.send(out)\r\n else:\r\n try:\r\n name_list = [x[0] for x in leaderboard]\r\n name_member = parse_name(ctx.message.guild, name)\r\n name_index = name_list.index(name_member.id)\r\n if name_index < 5:\r\n lower = 0\r\n upper = 10\r\n else:\r\n lower = name_index - 5\r\n upper = name_index + 5\r\n if name_index + 5 > len(leaderboard):\r\n upper = len(leaderboard)\r\n lower = len(leaderboard) - 10\r\n for i in range(lower, upper):\r\n user_id, amount = leaderboard[i]\r\n amount_formatted = '{:,}'.format(amount)\r\n member = ctx.message.guild.get_member(user_id)\r\n if member is not None:\r\n name = get_display_name(member)\r\n else:\r\n name = f'User {user_id}'\r\n out += f'**({1 + i}) {name}**: '\r\n if key == users.ITEMS_KEY:\r\n out += f'{amount_formatted} coins\\n'\r\n elif key == users.QUESTS_KEY:\r\n out += f'{amount_formatted} quests\\n'\r\n elif key == 'total':\r\n out += f'{amount_formatted} levels\\n'\r\n else:\r\n out += f'{users.xp_to_level(amount)} *({amount_formatted}xp)*\\n'\r\n except IndexError:\r\n pass\r\n except ValueError:\r\n await ctx.send(f'Name {name} not found in leaderboard.')\r\n await ctx.send(out)", "title": "" }, { "docid": "6502ffba2dbced13f2f4a4f9f966cc3f", "score": "0.5761324", "text": "def compose(canvas):\n #Write the score for each player\n canvas.draw_text('Blue '+str(blue_score_G),[60,585],30,'Blue')\n canvas.draw_text('Brown '+str(brown_score_G),[430,585],30,'Brown')\n #Draw the grid unless there is no winner\n if get_winner()==0:\n boardDraw=Draw()\n boardDraw.draw_board(canvas)\n set_pieces(canvas,0,0)\n #Draw winner box\n #Brown is the winner\n elif get_winner()==1:\n brownWinsDraw=Draw()\n brownWinsDraw.draw_winner(canvas,'Brown') \n #Blue is the winner \n elif get_winner()==-1:\n blueWinsDraw=Draw()\n blueWinsDraw.draw_winner(canvas,'Blue')", "title": "" }, { "docid": "798b9d5c1c3c546cf1dc37ee60c9219d", "score": "0.5759699", "text": "def getleaderboard(request):\n note_session_activity(request)\n\n if not using_unique_session(request.user):\n return HttpResponseForbidden()\n\n owner_filter = request.GET['owner_filter']\n body_pk = int(request.GET['legislative_body'])\n leg_body = LegislativeBody.objects.get(pk=body_pk)\n\n display = getleaderboarddisplay(leg_body, owner_filter)\n if display is None:\n return HttpResponse(\n _('No display configured'), content_type='text/plain')\n\n plans = getvalidplans(leg_body, request.user\n if owner_filter == 'mine' else None)\n\n try:\n html = display.render(plans, request)\n return HttpResponse(html, content_type='text/html; charset=utf-8')\n except Exception, ex:\n logger.warn('Leaderboard could not be fetched.')\n logger.debug('Reason: %s', ex)\n return HttpResponse(str(ex), content_type='text/plain')", "title": "" }, { "docid": "4c5183029122dd3245874568b3b70244", "score": "0.57568544", "text": "def make_leaderboard(object, leaderboard_frame=None,\n sort_metric=\"AUTO\",\n extra_columns=[],\n scoring_data=\"AUTO\"):\n def _get_models(obj):\n if isinstance(obj, list):\n result = []\n for o in obj:\n res = _get_models(o)\n if isinstance(res, list):\n result.extend(res)\n else:\n result.append(res)\n return result\n elif hasattr(obj, \"leaderboard\"):\n return [row[0] for row in obj.leaderboard.as_data_frame(use_pandas=False, header=False)]\n elif hasattr(obj, \"model_ids\"):\n return obj.model_ids\n elif hasattr(obj, \"model_id\"):\n return obj.model_id\n elif is_type(obj, str):\n return obj\n else:\n raise H2OValueError(\"Unsupported model_id!\")\n\n model_ids = _get_models(object)\n assert is_type(model_ids, [str])\n\n if scoring_data.lower() not in (\"auto\", \"train\", \"valid\", \"xval\"):\n raise H2OValueError(\"Scoring data has to be set to one of \\\"AUTO\\\", \\\"train\\\", \\\"valid\\\", \\\"xval\\\".\")\n\n m_frame = H2OFrame._expr(ExprNode(\n \"makeLeaderboard\",\n model_ids,\n leaderboard_frame.key if leaderboard_frame is not None else \"\",\n sort_metric,\n extra_columns,\n scoring_data))\n return m_frame", "title": "" }, { "docid": "752d02403c47af838429a85f865b45d0", "score": "0.57278216", "text": "def leaderboards(self):\n from .game import GameLeaderboard\n return GameLeaderboard.leaderboards(self.short_name)", "title": "" }, { "docid": "7bddd05f16c5849c0de06c1619c399ae", "score": "0.57092756", "text": "def generate_board():\n\t# score_label.config(text=str(b.score))\n\t# score_label.after(100, generate_board)\n\tfor i in range(len(b.board)):\n\t\tfor j in range(len(b.board[0])):\n\t\t\tnum = b.board[i][j]\n\t\t\tbackground_color = color_map[num]\n\t\t\tcanvas.create_rectangle(j*100, i*100, (j+1)*100, (i+1)*100, fill=background_color)\n\t\t\tif num != 0:\n\t\t\t\tif num == 2 or num == 4:\n\t\t\t\t\tnum_color = \"#776e65\"\n\t\t\t\telse:\n\t\t\t\t\tnum_color = \"#f9f6f2\"\n\t\t\t\tcanvas.create_text(j*100 + offset_map[num], i*100 + 50, anchor=tk.W, font=(\"Arial\", size_map[num]), text=str(num), fill=num_color)", "title": "" }, { "docid": "9674bbc8fbb9aa414e0df80fccf220de", "score": "0.5705014", "text": "async def _global_leaderboard(self, top : int=10):\n if top < 1:\n top = 10\n bank_sorted = sorted(self.bank.get_all_accounts(),\n key=lambda x: x.balance, reverse=True)\n unique_accounts = []\n for acc in bank_sorted:\n if not self.already_in_list(unique_accounts, acc):\n unique_accounts.append(acc)\n if len(unique_accounts) < top:\n top = len(unique_accounts)\n topten = unique_accounts[:top]\n highscore = \"\"\n place = 1\n for acc in topten:\n highscore += str(place).ljust(len(str(top))+1)\n highscore += (\"{} |{}| \".format(acc.name, acc.server.name)).ljust(23-len(str(acc.balance)))\n highscore += str(acc.balance) + \"\\n\"\n place += 1\n if highscore:\n if len(highscore) < 1985:\n await self.bot.say(\"```py\\n\"+highscore+\"```\")\n else:\n await self.bot.say(\"```css\\nThe leaderboard is too big to be displayed. Try with a lower <top> parameter.\\n```\")\n else:\n await self.bot.say(\"```css\\nThere are no accounts in the bank.\\n```\")", "title": "" }, { "docid": "5e04c5112c4339c0075facf6aa06ceb6", "score": "0.5663041", "text": "def scoreboard():\r\n scores = {}\r\n for user in models.User.query.all():\r\n if user.is_admin:\r\n continue\r\n user.solved_questions = list(filter(lambda x: not x.question.hide, user.solved_questions))\r\n scores[user.get_id()] = { 'username' : user.username, 'score': user.total_score, 'last_question_date': user.solved_questions[len(user.solved_questions)-1].date if len(user.solved_questions) > 0 else datetime.datetime.min }\r\n\r\n scores = helpers.sortScoreDict(scores)\r\n \"\"\"\r\n enumerate generates indices for the orderedDict\r\n \"\"\"\r\n return render_template(\"scoreboard.html\",scores=enumerate(scores.items()))", "title": "" }, { "docid": "beb981b58ae7a0cf2ba16781b2f09256", "score": "0.5655952", "text": "def playerdetail (request,name):\n\n #Basic player details\n player_image = PlayerModel.objects.get(name=name).image\n player_HC = PlayerModel.objects.get(name=name).HC\n player_highfinish = PlayerModel.objects.get(name=name).highfinish\n player_tournum = PlayerModel.objects.get(name=name).tournum\n player_totalpoints = LeaderBoardModel.objects.get(player=name).overall_total\n player_totalrank = LeaderBoardModel.objects.filter(overall_total__gt=player_totalpoints).count() + 1\n\n\n target_holes = 10 #Change to 10 in production\n\n ##START ROUND 1 CALCULATIONS -->\n #Trigger to show score only when round finished\n try:\n rd1holes_played = Rd1SlotModel.objects.get(player_name__name=name).player_holesplayed\n if rd1holes_played is None:\n rd1holes_played = 0\n else:\n rd1holes_played = Rd1SlotModel.objects.get(player_name__name=name).player_holesplayed\n except:\n rd1holes_played = 0\n\n\n\n #Rd1 Player golf score & rank\n if rd1holes_played >= target_holes:\n rd1golf_score = Rd1SlotModel.objects.get(player_name__name=name).player_score\n rd1golf_scoreRank = Rd1SlotModel.objects.filter(player_score__lt=rd1golf_score).count() + 1\n rd1golf_stbl = Rd1SlotModel.objects.get(player_name__name=name).player_stbl\n rd1golf_stblRank = Rd1SlotModel.objects.filter(player_stbl__gt=rd1golf_stbl).count() + 1\n else:\n rd1golf_score = \"-\"\n rd1golf_scoreRank= \"n/a\"\n rd1golf_stbl = \"-\"\n rd1golf_stblRank= \"n/a\"\n\n #Rd1PlayerPoints\n try:\n rd1golf_points = LeaderBoardModel.objects.get(player=name).rd1_golf\n except:\n rd1golf_points = \"-\"\n try:\n rd1golf_rank = LeaderBoardModel.objects.filter(rd1_golf__gt=rd1golf_points).count() + 1\n except:\n rd1golf_rank = \"-\"\n try:\n rd1ctpld_points = LeaderBoardModel.objects.get(player=name).rd1_ctpld\n except:\n rd1ctpld_points = \"-\"\n try:\n rd1ctpld_rank = LeaderBoardModel.objects.filter(rd1_ctpld__gt=rd1ctpld_points).count() + 1\n except:\n rd1ctpld_rank = \"-\"\n try:\n rd1bonus_points = LeaderBoardModel.objects.get(player=name).rd1_bonus\n except:\n rd1bonus_points = \"-\"\n try:\n rd1bonus_rank = LeaderBoardModel.objects.filter(rd1_bonus__gt=rd1bonus_points).count() + 1\n except:\n rd1bonus_rank = \"-\"\n try:\n rd1total_points = rd1golf_points + rd1ctpld_points + rd1bonus_points\n except:\n rd1total_points = \"-\"\n try:\n rd1total_rank = LeaderBoardModel.objects.filter(rd1_total__gt=rd1total_points).count() + 1\n except:\n rd1total_rank = \"-\"\n\n try:\n round1overall_points = list(LeaderBoardModel.objects.aggregate(Sum('rd1_total')).values())[0]\n except:\n round1overall_points = 0\n\n\n ##START ROUND 2 CALCULATIONS -->\n #Trigger to show score only when round finished\n try:\n rd2holes_played = Rd2SlotModel.objects.get(player_name__name=name).player_holesplayed\n if rd2holes_played is None:\n rd2holes_played = 0\n else:\n rd2holes_played = Rd2SlotModel.objects.get(player_name__name=name).player_holesplayed\n except:\n rd2holes_played = 0\n\n #Rd2 Player golf score & rank\n if rd2holes_played >= target_holes:\n rd2golf_score = Rd2SlotModel.objects.get(player_name__name=name).player_score\n rd2golf_scoreRank = Rd2SlotModel.objects.filter(player_score__lt=rd2golf_score).count() + 1\n rd2golf_stbl = Rd2SlotModel.objects.get(player_name__name=name).player_stbl\n rd2golf_stblRank = Rd2SlotModel.objects.filter(player_stbl__gt=rd2golf_stbl).count() + 1\n else:\n rd2golf_score = \"-\"\n rd2golf_scoreRank= \"n/a\"\n rd2golf_stbl = \"-\"\n rd2golf_stblRank= \"n/a\"\n\n #Rd2PlayerPoints\n try:\n rd2golf_points = LeaderBoardModel.objects.get(player=name).rd2_golf\n except:\n rd2golf_points = \"-\"\n try:\n rd2golf_rank = LeaderBoardModel.objects.filter(rd2_golf__gt=rd2golf_points).count() + 1\n except:\n rd2golf_rank = \"-\"\n try:\n rd2ctpld_points = LeaderBoardModel.objects.get(player=name).rd2_ctpld\n except:\n rd2ctpld_points = \"-\"\n try:\n rd2ctpld_rank = LeaderBoardModel.objects.filter(rd2_ctpld__gt=rd2ctpld_points).count() + 1\n except:\n rd2ctpld_rank = \"-\"\n try:\n rd2bonus_points = LeaderBoardModel.objects.get(player=name).rd2_bonus\n except:\n rd2bonus_points = \"-\"\n try:\n rd2bonus_rank = LeaderBoardModel.objects.filter(rd2_bonus__gt=rd2bonus_points).count() + 1\n except:\n rd2bonus_rank = \"-\"\n try:\n rd2total_points = rd2golf_points + rd2ctpld_points + rd2bonus_points\n except:\n rd2total_points = \"-\"\n try:\n rd2total_rank = LeaderBoardModel.objects.filter(rd2_total__gt=rd2total_points).count() + 1\n except:\n rd2total_rank = \"-\"\n\n try:\n round2overall_points = list(LeaderBoardModel.objects.aggregate(Sum('rd2_total')).values())[0]\n except:\n round2overall_points = 0\n\n ##START ROUND 3 CALCULATIONS -->\n #Trigger to show score only when round finished\n try:\n rd3holes_played = Rd3SlotModel.objects.get(player_name__name=name).player_holesplayed\n if rd3holes_played is None:\n rd3holes_played = 0\n else:\n rd3holes_played = Rd3SlotModel.objects.get(player_name__name=name).player_holesplayed\n except:\n rd3holes_played = 0\n\n #Rd3 Player golf score & rank\n if rd3holes_played >= target_holes:\n rd3golf_score = Rd3SlotModel.objects.get(player_name__name=name).player_score\n rd3golf_scoreRank = Rd3SlotModel.objects.filter(player_score__lt=rd2golf_score).count() + 1\n rd3golf_stbl = Rd3SlotModel.objects.get(player_name__name=name).player_stbl\n rd3golf_stblRank = Rd3SlotModel.objects.filter(player_stbl__gt=rd2golf_stbl).count() + 1\n else:\n rd3golf_score = \"-\"\n rd3golf_scoreRank= \"n/a\"\n rd3golf_stbl = \"-\"\n rd3golf_stblRank= \"n/a\"\n\n #Rd2PlayerPoints\n try:\n rd3golf_points = LeaderBoardModel.objects.get(player=name).rd3_golf\n except:\n rd3golf_points = \"-\"\n try:\n rd3golf_rank = LeaderBoardModel.objects.filter(rd3_golf__gt=rd3golf_points).count() + 1\n except:\n rd3golf_rank = \"-\"\n try:\n rd3ctpld_points = LeaderBoardModel.objects.get(player=name).rd3_ctpld\n except:\n rd3ctpld_points = \"-\"\n try:\n rd3ctpld_rank = LeaderBoardModel.objects.filter(rd3_ctpld__gt=rd3ctpld_points).count() + 1\n except:\n rd3ctpld_rank = \"-\"\n try:\n rd3bonus_points = LeaderBoardModel.objects.get(player=name).rd3_bonus\n except:\n rd3bonus_points = \"-\"\n try:\n rd3bonus_rank = LeaderBoardModel.objects.filter(rd3_bonus__gt=rd3bonus_points).count() + 1\n except:\n rd3bonus_rank = \"-\"\n try:\n rd3total_points = rd3golf_points + rd3ctpld_points + rd3bonus_points\n except:\n rd3total_points = \"-\"\n try:\n rd3total_rank = LeaderBoardModel.objects.filter(rd3_total__gt=rd3total_points).count() + 1\n except:\n rd3total_rank = \"-\"\n\n try:\n round3overall_points = list(LeaderBoardModel.objects.aggregate(Sum('rd3_total')).values())[0]\n except:\n round3overall_points = 0\n\n ##START OTHER_SCORES CALCULATIONS -->\n\n #Other Player Points\n try:\n social_points = LeaderBoardModel.objects.get(player=name).social\n except:\n social_points = \"-\"\n try:\n social_rank = LeaderBoardModel.objects.filter(social__gt=social_points).count() + 1\n except:\n social_rank = \"-\"\n try:\n bestdressed_points = LeaderBoardModel.objects.get(player=name).best_dressed\n except:\n bestdressed_points = \"-\"\n try:\n bestdressed_rank = LeaderBoardModel.objects.filter(best_dressed__gt=bestdressed_points).count() + 1\n except:\n bestdressed_rank = \"-\"\n try:\n tipping_points = LeaderBoardModel.objects.get(player=name).tipping\n except:\n tipping_points = \"-\"\n try:\n tipping_rank = LeaderBoardModel.objects.filter(tipping__gt=tipping_points).count() + 1\n except:\n tipping_rank = \"-\"\n try:\n othertotal_points = social_points + bestdressed_points + tipping_points\n except:\n othertotal_points = \"-\"\n try:\n othertotal_rank = LeaderBoardModel.objects.filter(other_total__gt=othertotal_points).count() + 1\n except:\n othertotal_rank = \"-\"\n\n try:\n otheroverall_points = list(LeaderBoardModel.objects.aggregate(Sum('other_total')).values())[0]\n except:\n otheroverall_points = 0\n\n## == END SCORING CALCS ==\n\n context ={\n 'name': name,\n 'player_image': player_image,\n 'player_HC': player_HC,\n 'player_highfinish': player_highfinish,\n 'player_tournum': player_tournum,\n 'player_totalpoints': player_totalpoints,\n 'player_totalrank': player_totalrank,\n 'rd1golf_score': rd1golf_score,\n 'rd1golf_stbl': rd1golf_stbl,\n 'rd1golf_scoreRank': rd1golf_scoreRank,\n 'rd1golf_stblRank': rd1golf_stblRank,\n 'rd1golf_points': rd1golf_points,\n 'rd1golf_rank': rd1golf_rank,\n 'rd1ctpld_points': rd1ctpld_points,\n 'rd1ctpld_rank': rd1ctpld_rank,\n 'rd1bonus_points': rd1bonus_points,\n 'rd1bonus_rank': rd1bonus_rank,\n 'rd1total_points': rd1total_points,\n 'rd1total_rank': rd1total_rank,\n 'round1overall_points': round1overall_points,\n 'rd2golf_score': rd2golf_score,\n 'rd2golf_stbl': rd2golf_stbl,\n 'rd2golf_scoreRank': rd2golf_scoreRank,\n 'rd2golf_stblRank': rd2golf_stblRank,\n 'rd2golf_points': rd2golf_points,\n 'rd2golf_rank': rd2golf_rank,\n 'rd2ctpld_points': rd2ctpld_points,\n 'rd2ctpld_rank': rd2ctpld_rank,\n 'rd2bonus_points': rd2bonus_points,\n 'rd2bonus_rank': rd2bonus_rank,\n 'rd2total_points': rd2total_points,\n 'rd2total_rank': rd2total_rank,\n 'round2overall_points': round2overall_points,\n 'rd3golf_score': rd3golf_score,\n 'rd3golf_stbl': rd3golf_stbl,\n 'rd3golf_scoreRank': rd3golf_scoreRank,\n 'rd3golf_stblRank': rd3golf_stblRank,\n 'rd3golf_points': rd3golf_points,\n 'rd3golf_rank': rd3golf_rank,\n 'rd3ctpld_points': rd3ctpld_points,\n 'rd3ctpld_rank': rd3ctpld_rank,\n 'rd3bonus_points': rd3bonus_points,\n 'rd3bonus_rank': rd3bonus_rank,\n 'rd3total_points': rd3total_points,\n 'rd3total_rank': rd3total_rank,\n 'round3overall_points': round3overall_points,\n 'social_points': social_points,\n 'social_rank': social_rank,\n 'bestdressed_points': bestdressed_points,\n 'bestdressed_rank': bestdressed_rank,\n 'tipping_points': tipping_points,\n 'tipping_rank': tipping_rank,\n 'othertotal_points': othertotal_points,\n 'othertotal_rank': othertotal_rank,\n 'otheroverall_points': otheroverall_points,\n\n }\n\n return render(request, 'playerDetail.html', context=context)", "title": "" }, { "docid": "e75b49b24fb2501d75bc989162072b39", "score": "0.56511414", "text": "def high_scores(self):\n self.top = tk.Toplevel(self._master,width=80,height = 80)\n self.top.title(\"Top 3\")\n top_label = tk.Label(self.top,text=\"Hight Scores\", bg = 'medium spring green')\n top_label.config(height =1, font = ((None,20)))\n top_label.pack(side = tk.TOP,fill = tk.X)\n first_player = tk.Label(self.top, text = '{0}:{1}s'.format(self.name[0],self._seconds))\n first_player.pack(side = tk.TOP)\n sec_player = tk.Label(self.top, text = '{0}:{1}s'.format(self.name[1],self._seconds))\n sec_player.pack(side = tk.TOP)\n th_player = tk.Label(self.top, text = '{0}:{1}s'.format(self.name[2],self._seconds))\n th_player.pack(side = tk.TOP)\n\n top_button = tk.Button(self.top, text = 'Done',command = self.done)\n top_button.pack(side = tk.TOP)", "title": "" }, { "docid": "5b003ef09a068df920591e2a1618c889", "score": "0.56331533", "text": "def display_score_window(self):\r\n \r\n #does the score window exist?\r\n try:\r\n if self.score_window.winfo_exists() == 1:\r\n return\r\n except AttributeError:\r\n pass\r\n \r\n self.score_window = tkinter.Toplevel(self.board_window)\r\n\r\n #Player labels and scores\r\n ex_name = tkinter.Label(self.score_window, text='Excluded',\r\n font = self.prefs['disp_font'], fg = 'red')\r\n ex_name.grid(row = 0, column = 0, columnspan = 2)\r\n ex_score = tkinter.Label(self.score_window, textvariable=self.excluded_score,\r\n font = self.prefs['disp_font'], fg = 'red')\r\n ex_score.grid(row = 1, column = 0, columnspan = 2)\r\n \r\n for i,j in enumerate(self.players):\r\n col = (i+1) * 2\r\n \r\n p_name = tkinter.Label(self.score_window, text=j['name'], font = self.prefs['disp_font'])\r\n p_name.grid(row=0, column=col)\r\n\r\n p_score = tkinter.Label(self.score_window, textvariable=self.scores[i],\r\n font = self.prefs['disp_font'])\r\n p_score.grid(row = 1, column = col, columnspan = 2)", "title": "" }, { "docid": "82295137e8c69672c468eeb11df2a14e", "score": "0.5604512", "text": "def _get_highscore(self):\n self._player_list = player_list = self._highscore.get_entries(game='basic')\n \n self.high_scores=tk.Toplevel(root)\n self.high_scores.title('High Score List')\n\n self.high_score_title = tk.Label(self.high_scores, text= 'Top 10 Players\\n')\n self.high_score_title.pack()\n\n for i, player_list in enumerate(player_list): # List top scoring players from 1 to 10 format\n self.high_score_list = tk.Label(self.high_scores, text = \"[{}] ,{}\\n\".format(i+1,player_list)).pack()", "title": "" }, { "docid": "4e9eacde1f1ec23048979852d73d99a7", "score": "0.5563783", "text": "async def leaderboard(self, ctx):\n if ctx.invoked_subcommand is None:\n await ctx.invoke(self._server_leaderboard)", "title": "" }, { "docid": "c65811e0ee90fe44e7802c6af4baf24a", "score": "0.5561256", "text": "def test_scoreboard(self):\n run.add_to_scoreboard(\"testSuite\", 12)\n leaderboard = run.get_scoreboard()\n \n self.assertIn({\"username\":\"testSuite\", \"score\": 12}, leaderboard)", "title": "" }, { "docid": "bb66f89263be375665c7f7303c124495", "score": "0.555781", "text": "def __repr__(self):\n if self._repr is None:\n # Player 0 scoring area\n if self.turn == 1:\n rows = [\" --\", \">| \", \" | \", \" | \", \" --\"]\n else:\n rows = [\" --\", \" | \", \" | \", \">| \", \" --\"]\n rows[2] += str(self.scores[0]) + \" |\"\n for r in [0,4]:\n rows[r] += \"-\"*(len(str(self.scores[0])) + 2)\n for r in [1,3]:\n rows[r] += \" \"*len(str(self.scores[0])) + \" |\"\n # houses\n for h in range(self.houses.shape[1]):\n width = len(str(self.houses[:,h].max()))\n for r in [1,3]:\n rows[r] += \" \" + str(self.houses[r//2,h])\n rows[r] += \" \"*(width - len(str(self.houses[r//2,h]))) +\" |\"\n for r in [0,2,4]:\n rows[r] += \"-\"*(width + 3)\n rows[2] = rows[2][:-1] + \"|\"\n # player 1 scoring area\n rows[2] += \" \" + str(self.scores[1]) + \" |\\n\"\n for r in [0,4]:\n rows[r] += \"-\"*(len(str(self.scores[1])) + 2) + \"\\n\"\n for r in [1,3]:\n rows[r] += \" \"*len(str(self.scores[1])) + \" |\"\n if self.turn == 1:\n rows[1] += \"<\\n\"\n rows[3] += \"\\n\"\n else:\n rows[1] += \"\\n\"\n rows[3] += \"<\\n\"\n self._repr = \"\".join(rows)\n return self._repr", "title": "" }, { "docid": "648e2b9738edcdc22360410c3f34ba3b", "score": "0.55369973", "text": "def showCurrentRankings(self):\n \n font = pygame.font.Font(self.fontSrc, self.fontSize - self.fontSize//6)\n sorted_players = sorted([p for p in self.players], key=lambda x: x.rank)\n headerRankTxt = font.render(\"Here are the rankings:\", True, (0,0,0))\n widthTxt, heightTxt = font.size(\"Here are the rankings:\")\n self.screen.blit(headerRankTxt, (self.width//2 - widthTxt//2, (self.height//2 - heightTxt*2) + self.height//6))\n\n for i in range(len(self.players)):\n sufixes = {1: \"st\", 2: \"nd\", 3: \"rd\", 4: \"th\"}\n p = sorted_players[i]\n playerTxt = font.render(str(p.rank) + sufixes[p.rank] + \": \" + p.name + \" (\" + str(p.points) + \"p)\", True, (0,0,0))\n widthTxt, heightTxt = font.size(str(p.rank) + \": \" + p.name + \" (\" + str(p.points) + \"p)\")\n\n self.screen.blit(playerTxt, (self.width//2 - widthTxt//2, (self.height//2 + heightTxt*i) + self.height//6))", "title": "" }, { "docid": "08aecdfcdb1c829a40d1edf9814ca236", "score": "0.551315", "text": "def submit_leaderboard(self):\n if self.leaderboard_name.text != \"\":\n self.leaderboard.add(self.game.level.name, self.leaderboard_name.text, self.game.level.get_score(), self.game.wave.number)\n self.game.load_level(self.game.level.name)\n self.show_leaderboard_screen()", "title": "" }, { "docid": "8216a688672600362f0f51952fc4ae05", "score": "0.5508417", "text": "def layout_board(self):\n p = models.PieceType\n back_row = [\n p.ROOK, p.KNIGHT, p.BISHOP, p.QUEEN, p.KING, p.BISHOP, p.KNIGHT,\n p.ROOK\n ]\n for file, piece_type in enumerate(back_row):\n models.Piece.create(\n piece_type=piece_type, rank=0, file=file,\n side=models.Side.HOME, game=self.game\n )\n models.Piece.create(\n piece_type=piece_type, rank=7, file=file,\n side=models.Side.AWAY, game=self.game\n )\n for file in range(8):\n models.Piece.create(\n piece_type=p.PAWN, rank=1, file=file, side=models.Side.HOME,\n game=self.game\n )\n models.Piece.create(\n piece_type=p.PAWN, rank=6, file=file, side=models.Side.AWAY,\n game=self.game\n )", "title": "" }, { "docid": "4ecb46d16a7f56cb2aaacb8d0ed103d3", "score": "0.5501579", "text": "def print_matches(self):\n\n os.system('cls' if os.name == 'nt' else 'clear')\n console = Console()\n table = Table(title=colored('MATCHES', 'magenta'), show_header=True, header_style=\"bold blue\")\n table.add_column('Round')\n table.add_column('Player 1')\n table.add_column('Score Player 1')\n table.add_column('Score Player 2')\n table.add_column('Player 2')\n for round in self.rounds:\n for match in round.matches:\n if match.player1[1] == 0.0 and match.player2[1] == 0.0:\n table.add_row(\"[orange1]\"+str(round.number),\n \"[orange1]\"+match.player1[0].surname,\n \"[orange1]\"+str(match.player1[1]),\n \"[orange1]\"+str(match.player2[1]),\n \"[orange1]\"+match.player2[0].surname,\n )\n elif round.number == 2 or round.number == 4:\n table.add_row(\"[yellow]\"+str(round.number),\n \"[yellow]\"+match.player1[0].surname,\n \"[yellow]\"+str(match.player1[1]),\n \"[yellow]\"+str(match.player2[1]),\n \"[yellow]\"+match.player2[0].surname,\n )\n else:\n table.add_row(\"[green]\"+str(round.number),\n \"[green]\"+match.player1[0].surname,\n \"[green]\"+str(match.player1[1]),\n \"[green]\"+str(match.player2[1]),\n \"[green]\"+match.player2[0].surname,\n )\n console.print(table)\n self.print_color_meanings()", "title": "" }, { "docid": "d9f7a217ceeb2c3068b96078deb0bb06", "score": "0.54946154", "text": "def show_top_five_on_leaderboard():\n all_scores = []\n with open(leaderboard_file, 'r') as file:\n for line in file:\n words = line.split('=')\n leaderboard_score = int(words[1])\n player = words[0]\n all_scores.append((leaderboard_score, player))\n print('Here are the current top 5 players in the leaderboard:')\n sorted_scores = sorted(all_scores, reverse=True)[0:5]\n for score, name in sorted_scores:\n print('{} - {}'.format(name, score))", "title": "" }, { "docid": "4164be066d3c3842f6ad5c32b3ac6ac9", "score": "0.5493811", "text": "def get_opponent_view(self, as_list = False):\n if as_list:\n view = [[location.opponent_view() for location in row] for row in self.grid]\n else:\n view = [BOARD_HEADING]\n row_num = 1\n for row in self.grid:\n view.append(str(row_num).rjust(2) + \" \" + \" \".join(\n [location.opponent_view() for location in row]))\n row_num += 1\n view.append(\"\")\n return view", "title": "" }, { "docid": "f46e7b4f7e2f311aa08ccbd1270b2c6c", "score": "0.548125", "text": "def createGraph(self):\n\t\tdata = []\n\t\tfor i in self.members:\n\t\t\tx = \"rgba(220,220,220,1)\"\n\t\t\tv1 = str(randint(1,220))\n\t\t\tv2 = str(randint(1,220))\n\t\t\tv3 = str(randint(1,220))\n\t\t\tcolour = f\"rgba({v1},{v2},{v3},0.4)\"\n\t\t\tdata.append({'player': i.username, 'running_scores': [], 'gameweeks': [], 'colour': colour})\n\t\tfor i in self.gameweeks:\n\t\t\tdata[0]['gameweeks'].append(i)\n\t\t\tfor j in data:\n\t\t\t\tfor k in self.gameweeks[i]:\n\t\t\t\t\tif j['player'] == k['player'].username:\n\t\t\t\t\t\tj['running_scores'].append(k['running_league_points'])\n\t\tself.graph = data", "title": "" }, { "docid": "589b52da5f44bb425ebec4201627a5b8", "score": "0.54764396", "text": "def enter_scores_round(self, one_tournament):\n list_rounds = one_tournament.list_rounds\n tournaments_form = TournamentForm()\n tournaments_form.enter_scores(list_rounds[-1])", "title": "" }, { "docid": "39c82665d991c1f2e789098ec3719e7b", "score": "0.5447808", "text": "def draw_scores(self):\n # player1 score\n Label(\n text=str(self.player1.score),\n x=4,\n y=self.window.height - 40,\n font_name=\"helvetica\",\n font_size=36\n ).draw()\n # var to store the position the label needs to be drawn at\n # stays accurate enough till like 100000 ish\n LABEL_X_POS = self.window.width - len(list(str(self.player2.score))) * 30\n # player2 score\n Label(\n text=str(self.player2.score),\n x=LABEL_X_POS,\n y=self.window.height - 40,\n font_name=\"helvetica\",\n font_size=36\n ).draw()", "title": "" }, { "docid": "542d019bb60548805a2a2244c2043544", "score": "0.54329664", "text": "def make_player_tree(self, frame):\n\n player_tree = ttk.Treeview(frame, columns=(\"name\", \"joins\", \"avgtime\", \"first\", \"second\", \"third\", \"winrate\", \"top3\", \"quits\", \"quitrate\"), height=25)\n\n #adjust column headers\n player_tree.heading(\"name\", text=\"Player Name\", command=lambda: self.sort_tree_column(player_tree, \"name\", False))\n player_tree.heading(\"joins\", text= \"Joins\", command=lambda: self.sort_tree_column_int(player_tree, \"joins\", False))\n player_tree.heading(\"avgtime\", text= \"Average Time\", command=lambda: self.sort_tree_column(player_tree, \"avgtime\", False))\n player_tree.heading(\"first\", text= \"First\", command=lambda: self.sort_tree_column_int(player_tree, \"first\", False))\n player_tree.heading(\"second\", text= \"Second\", command=lambda: self.sort_tree_column_int(player_tree, \"second\", False))\n player_tree.heading(\"third\", text= \"Third\", command=lambda: self.sort_tree_column_int(player_tree, \"third\", False))\n player_tree.heading(\"winrate\", text= \"Win Rate\", command=lambda: self.sort_tree_column_flt(player_tree, \"winrate\", False))\n player_tree.heading(\"top3\", text= \"Top3 Rate\", command=lambda: self.sort_tree_column_flt(player_tree, \"top3\", False))\n player_tree.heading(\"quits\", text= \"Quits\", command=lambda: self.sort_tree_column_int(player_tree, \"quits\", False))\n player_tree.heading(\"quitrate\", text= \"Quit Rate\", command=lambda: self.sort_tree_column_flt(player_tree, \"quitrate\", False))\n\n #adjust column widths\n player_tree.column(\"#0\", width=10)\n player_tree.column(\"name\", width=100, anchor=E)\n player_tree.column(\"joins\", width=50, anchor=E)\n player_tree.column(\"avgtime\", width=85, anchor=E)\n player_tree.column(\"first\", width=50, anchor=E)\n player_tree.column(\"second\", width=50, anchor=E)\n player_tree.column(\"third\", width=50, anchor=E)\n player_tree.column(\"winrate\", width=100, anchor=E)\n player_tree.column(\"top3\", width=100, anchor=E)\n player_tree.column(\"quits\", width=50, anchor=E)\n player_tree.column(\"quitrate\", width=100, anchor=E)\n\n scroll_bar = ttk.Scrollbar(frame, orient=VERTICAL, command=player_tree.yview)\n player_tree.configure(yscrollcommand=scroll_bar.set)\n\n return player_tree, scroll_bar", "title": "" }, { "docid": "730c6743e6ee7ba3c380192aad56d262", "score": "0.54272944", "text": "def playerStandings():\n\n list = []\n conn, c = connect()\n \"\"\"get the wins number of each player\"\"\"\n winCount = \"create view winCount as(select winner as winner , count(*) as num from Matches group by winner );\"\n \"\"\"get the lose number of each player\"\"\" \n loseCount = \"create view loseCount as(select player2 as loser, count(*) as num from Matches group by player2);\"\n c.execute(winCount)\n c.execute(loseCount)\n conn.commit()\n \"\"\"join the player name and id with win number\"\"\"\n joinWin = \"create view joinWin as(select Players.id as id, Players.name as name, winCount.num as wins from Players left join winCount on Players.id = winCount.winner order by wins desc);\"\n \"\"\"join the player id and lose number\"\"\"\n joinLose = \"create view joinLose as(select Players.id as id, loseCount.num as num from Players left join loseCount on Players.id = loseCount.loser);\"\n c.execute(joinWin)\n c.execute(joinLose)\n conn.commit()\n \"\"\"put id, name, win, lose of each player together \"\"\"\n alljoin = \"select joinWin.id as id, joinWin.name as name, joinWin.wins as wins, joinLose.num as lose from joinWin left join joinLose on joinWin.id = joinLose.id order by wins;\"\n c.execute(alljoin)\n conn.commit()\n rows = c.fetchall()\n \"\"\"use the win, lose to get the matches of each player, and change None to zero\"\"\"\n count = 0\n inlist = []\n for row in rows:\n for item in row:\n if count != 3:\n if item is None:\n inlist.append(0)\n else:\n inlist.append(item)\n count = count + 1\n else:\n if item is None:\n inlist.append(inlist[2])\n else:\n inlist.append(item + inlist[2])\n list.append(inlist)\n count = 0\n inlist = []\n c.execute(\"drop view joinLose;\")\n c.execute(\"drop view joinWin;\")\n c.execute(\"drop view loseCount;\")\n c.execute(\"drop view winCount;\")\n conn.commit()\n conn.close()\n return list", "title": "" }, { "docid": "dd808073990afeb033df476dcfe62583", "score": "0.5383781", "text": "def player_standings():\n refresh_views()\n query = \"\"\"\n SELECT players.id, players.name, v_player_wins.wins, v_matches.matches\n FROM players\n LEFT JOIN v_player_wins ON players.id = v_player_wins.player\n LEFT JOIN v_matches ON players.id = v_matches.player\n GROUP BY players.id, players.name, v_player_wins.wins, v_matches.matches\n ORDER BY v_player_wins.wins DESC;\n\t\"\"\"\n db, cursor = connect()\n cursor.execute(query)\n results = cursor.fetchall()\n db.close()\n return results", "title": "" }, { "docid": "e22e90ff922cac6c9dff78bef29f864f", "score": "0.53792036", "text": "def __init__(self, current_player_index: int, nr_players_in_round: int):\n self.print_mode = False\n self.new_round(current_player_index, nr_players_in_round)", "title": "" }, { "docid": "27baa46d22af61f48573c4ab1e51400a", "score": "0.53698516", "text": "def test_generate_rankings_after_round(self):\n matches = Match.objects.filter(date_played=datetime.now())\n players = Player.objects.filter(status=Player.ACTIVE).order_by('-ranking')\n for player in players:\n print(f'{player.first_name}, {player.last_name}, {player.ranking}')\n for match in matches:\n print(f'{match.player1}: {match.games_for_player1} vs {match.games_for_player2}: {match.player2}, {match.date_played}')\n\n print(f'Number of players: {len(players)}')\n generate_rankings_after_round(matches, datetime.now(), f'Test cases round')\n players = Player.objects.filter(status=Player.ACTIVE).order_by('ranking')\n for player in players:\n print(f'{player.first_name}, {player.last_name}, {player.ranking}')", "title": "" }, { "docid": "4d006dcb3dd17fe8fa6dc50a0a24cf16", "score": "0.53599024", "text": "def leaderboard(request):\n try:\n earned_awards = Award.objects.filter(user=request.user)\n except ObjectDoesNotExist:\n earned_awards = {}\n leaders_br = Profile.objects.filter(\n category=\"beginnerrunner\").order_by('-distance')\n leaders_r = Profile.objects.filter(category=\"runner\").order_by('-distance')\n leaders_b = Profile.objects.filter(category=\"biker\").order_by('-distance')\n leaders_d = Profile.objects.filter(\n category=\"duathloner\").order_by('-distance')\n leaders_f = Profile.objects.filter(\n category=\"freestyler\").order_by('-distance')\n\n total_workouts = Workout.objects.all()\n total_kms = 0\n for workout in total_workouts:\n total_kms += workout.distance\n table_leaders_br = ProfileTable(leaders_br, prefix=\"leaders-br-\")\n table_leaders_r = ProfileTable(leaders_r, prefix=\"leaders-r-\")\n table_leaders_b = ProfileTable(leaders_b, prefix=\"leaders-b-\")\n table_leaders_d = ProfileTable(leaders_d, prefix=\"leaders-d-\")\n table_leaders_f = ProfileTable(leaders_f, prefix=\"leaders-f-\")\n RequestConfig(request, paginate={\n \"per_page\": 10\n }).configure(table_leaders_br)\n RequestConfig(request, paginate={\n \"per_page\": 10\n }).configure(table_leaders_r)\n RequestConfig(request, paginate={\n \"per_page\": 10\n }).configure(table_leaders_b)\n RequestConfig(request, paginate={\n \"per_page\": 10\n }).configure(table_leaders_d)\n RequestConfig(request, paginate={\n \"per_page\": 10\n }).configure(table_leaders_f)\n\n return render(\n request, 'ic_marathon_app/leaderboard.html', {\n 'leaders_br': table_leaders_br,\n 'leaders_br_c': len(leaders_br),\n 'leaders_r': table_leaders_r,\n 'leaders_r_c': len(leaders_r),\n 'leaders_b': table_leaders_b,\n 'leaders_b_c': len(leaders_b),\n 'leaders_d': table_leaders_d,\n 'leaders_d_c': len(leaders_d),\n 'leaders_f': table_leaders_f,\n 'leaders_f_c': len(leaders_f),\n 'earned_awards': earned_awards,\n 'total_kms': total_kms\n })", "title": "" }, { "docid": "d267816d22ec1b67bc947c460e240144", "score": "0.53473336", "text": "def _compute_competition_leaderboard(session, submissions, leaderboard_type,\n event_name):\n event = session.query(Event).filter_by(name=event_name).one()\n score_type = event.get_official_score_type(session)\n score_name = event.official_score_name\n\n private_leaderboard = _compute_leaderboard(session, submissions, 'private',\n event_name, with_links=False)\n\n col_selected_private = (['team', 'submission'] +\n ['bag private ' + score_name,\n 'bag public ' + score_name] +\n ['train time [s]', 'test time [s]',\n 'submitted at (UTC)'])\n leaderboard_df = private_leaderboard[col_selected_private]\n leaderboard_df = leaderboard_df.rename(\n columns={'bag private ' + score_name: 'private ' + score_name,\n 'bag public ' + score_name: 'public ' + score_name}\n )\n\n # select best submission for each team\n best_df = (leaderboard_df.groupby('team').min()\n if score_type.is_lower_the_better\n else leaderboard_df.groupby('team').max())\n best_df = best_df[['public ' + score_name]].reset_index()\n best_df['best'] = True\n\n # merge to get a best indicator column then select best\n leaderboard_df = pd.merge(\n leaderboard_df, best_df, how='left',\n left_on=['team', 'public ' + score_name],\n right_on=['team', 'public ' + score_name]\n )\n leaderboard_df = leaderboard_df.fillna(False)\n leaderboard_df = leaderboard_df[leaderboard_df['best']]\n leaderboard_df = leaderboard_df.drop(columns='best')\n\n # dealing with ties: we need the lowest timestamp\n best_df = leaderboard_df.groupby('team').min()\n best_df = best_df[['submitted at (UTC)']].reset_index()\n best_df['best'] = True\n leaderboard_df = pd.merge(\n leaderboard_df, best_df, how='left',\n left_on=['team', 'submitted at (UTC)'],\n right_on=['team', 'submitted at (UTC)'])\n leaderboard_df = leaderboard_df.fillna(False)\n leaderboard_df = leaderboard_df[leaderboard_df['best']]\n leaderboard_df = leaderboard_df.drop(columns='best')\n\n # sort by public score then by submission timestamp, compute rank\n leaderboard_df = leaderboard_df.sort_values(\n by=['public ' + score_name, 'submitted at (UTC)'],\n ascending=[score_type.is_lower_the_better, True])\n leaderboard_df['public rank'] = np.arange(len(leaderboard_df)) + 1\n\n # sort by private score then by submission timestamp, compute rank\n leaderboard_df = leaderboard_df.sort_values(\n by=['private ' + score_name, 'submitted at (UTC)'],\n ascending=[score_type.is_lower_the_better, True])\n leaderboard_df['private rank'] = np.arange(len(leaderboard_df)) + 1\n\n leaderboard_df['move'] = \\\n leaderboard_df['public rank'] - leaderboard_df['private rank']\n leaderboard_df['move'] = [\n '{:+d}'.format(m) if m != 0 else '-' for m in leaderboard_df['move']]\n\n col_selected = [\n leaderboard_type + ' rank', 'team', 'submission',\n leaderboard_type + ' ' + score_name, 'train time [s]', 'test time [s]',\n 'submitted at (UTC)'\n ]\n if leaderboard_type == 'private':\n col_selected.insert(1, 'move')\n\n df = leaderboard_df[col_selected]\n df = df.rename(columns={\n leaderboard_type + ' ' + score_name: score_name,\n leaderboard_type + ' rank': 'rank'\n })\n df = df.sort_values(by='rank')\n return df", "title": "" }, { "docid": "387d09e4c2da76e58f093e14096ea4c5", "score": "0.5346853", "text": "def _high_scores(self):\n\n #Creates a frame for highscore\n high_score_window = tk.Toplevel()\n high_score_window.title(\"High Scores\")\n\n \n #Gets values stored in the file\n data_input=self._high_score.get_entries()\n Output = \"Rank | Name | Score | Comment\\n-----------------------------------\\n\"\n i = 1\n\n \n #Load values and shows on screen\n for dictionary in data_input:\n dictionary_values = str(dictionary.values())\n name,score,comment = dictionary_values.split(sep = \",\")\n Output = Output + str(i) +\" |\"+ name[14:-1] +\" |\"+ score +\" |\"+ comment[:-2]+\"\\n\"\n i += 1\n\n #Button to returen to game \n msg = tk.Message(high_score_window, text=Output)\n msg.pack()\n \n button = tk.Button(high_score_window, text=\"Return to Game\", command=high_score_window.destroy)\n button.pack()", "title": "" }, { "docid": "318a3b4013d7296e10710c0e42af5123", "score": "0.534458", "text": "def new_ranking(cls, user, wins, percent_won, avg_wrong):\n ranking = Ranking(user=user,\n wins=wins,\n percent_won=percent_won,\n avg_wrong=avg_wrong)\n ranking.put()\n return ranking", "title": "" }, { "docid": "1caa32cc323771573d67362870e6a6b3", "score": "0.5330613", "text": "def get_winner_board(self):\n winner_info = self._best_individuals_select(1)\n winner_board = {\"indiv\": winner_info[0][0], \"score\": winner_info[1][0]}\n\n return winner_board", "title": "" }, { "docid": "5c83b0469eaf668bd6f4a781959af545", "score": "0.53277326", "text": "def calculate_standings() -> str:\n leaderboard = defaultdict(list)\n for team, score in LEAGUE.items():\n leaderboard[score].append(team)\n for score, team in leaderboard.items():\n leaderboard[score] = sorted(leaderboard[score], key=str.lower)\n scores = sorted(leaderboard.keys(), reverse=True)\n output_lst = []\n rank = 1\n while rank <= len(LEAGUE.keys()):\n score = scores.pop(0)\n suffix = 's' if score != 1 else ''\n output = [f'{rank}. {team}, {score} pt{suffix}'\n for team in leaderboard[score]]\n\n output_lst.extend(output)\n rank += len(output)\n return '\\n'.join(output_lst)", "title": "" }, { "docid": "6954894d95f9e458b36e65fcbf8211df", "score": "0.5323443", "text": "def get_leaderboard(db):\r\n user_scores = []\r\n prediction_users_ref = db.collection(\"prediction_users\")\r\n user_docs = prediction_users_ref.stream()\r\n for user_doc in user_docs:\r\n user_info = user_doc.to_dict()\r\n if user_info[\"can_display\"]:\r\n email = user_doc.id\r\n username = email.split(\"@\")[0]\r\n user_scores.append((round(user_info[\"current_score\"], 2), username))\r\n user_scores.sort(key=lambda x: (-x[0], x[1]))\r\n return user_scores[:10]", "title": "" }, { "docid": "35cefada041b48405069925789eae52d", "score": "0.53195363", "text": "def get_player_view(self, as_list = False):\n if as_list:\n view = [[location.player_view() for location in row] for row in self.grid]\n else:\n view = [BOARD_HEADING]\n row_num = 1\n for row in self.grid:\n view.append(str(row_num).rjust(2) + \" \" + \" \".join(\n [location.player_view() for location in row]))\n row_num += 1\n view.append(\"\")\n return view", "title": "" }, { "docid": "f119a8d1da96eafb5ebb3aff6fa8cc40", "score": "0.5314359", "text": "async def bet_board(self, ctx: commands.Context, top: int = 10):\n reverse = True\n if top == 0:\n top = 10\n elif top < 0:\n reverse = False\n top = -top\n members_sorted = sorted(\n await self._get_all_members(ctx.bot), key=lambda x: x.karma, reverse=reverse\n )\n if len(members_sorted) < top:\n top = len(members_sorted)\n topten = members_sorted[:top]\n highscore = \"\"\n place = 1\n for member in topten:\n highscore += str(place).ljust(len(str(top)) + 1)\n highscore += \"{0} | \".format(member.name).ljust(18 - len(str(member.karma)))\n highscore += str(member.karma) + \"\\n\"\n place += 1\n if highscore != \"\":\n embed = discord.Embed(color=0xf3f1f6)\n embed.title = \"Karma Returns\"\n embed.description = \"\"\"```xl\n{0}```\"\"\".format(highscore)\n await ctx.send(embed=embed)\n else:\n await ctx.send(\"No one has gained or lost any karma\")", "title": "" }, { "docid": "5535675a0a19e5565fab7643ff6ea84b", "score": "0.5310064", "text": "def print_board(self):\n print(\"row: \" + str(self.col))\n print(\"col: \" + str(self.row))\n top_line = \" \"\n for c in range(self.col):\n top_line += str(c) + \" \"\n for row in range(self.row-1,-1,-1):\n # for row in range(self.row):\n line = str(row) + \" \"\n for col in range(self.col):\n if [row,col] == self.location:\n line += \"[ @]\"\n screen.blit(agentpic, ((col*rectangleWidth)+30, (row*rectangleHeight+rectangleHeight)+30)) \n pygame.display.update()\n '''pygame.draw.rect(screen, khaki, (col*rectangleWidth, row*rectangleHeight+rectangleHeight, rectangleWidth, rectangleHeight))\n \n pygame.draw.rect(screen, darkOliveGreen, (col*rectangleWidth-1, row*rectangleHeight+rectangleHeight-1, rectangleWidth-1, rectangleHeight-1))\n pygame.display.update()'''\n else:\n score = str(self.board[row][col].score())\n #if self.board[row][col].score()!=5:\n\n \n #print(\"My score\",score )\n if len(score) == 2:\n line = line + \"[\" + str(self.board[row][col].score()) + \"]\"\n else:\n line = line + \"[ \" + str(self.board[row][col].score()) + \"]\"\n print(line)\n print(top_line + '\\n');", "title": "" }, { "docid": "b6310100da4bdc7408c643f81e28ba4d", "score": "0.5301851", "text": "def draw_ranks():\n for y in range(SQUARE_SPACE + Y1, Y2-1, SQUARE_SPACE):\n pygame.draw.line(screen, BLACKCOLOR, [X1, y], [X2, y], 1)", "title": "" }, { "docid": "35d1e816f3bb215a64141a82f151423b", "score": "0.5300731", "text": "def rd3leaderboard(request):\n\n #Add views\n playing_players = Rd3SlotModel.objects.filter(player_name__isnull=False)\n\n #Add context\n context = {\n 'playing_players': playing_players,\n }\n\n return render(request, 'rd3Leaderboard.html', context=context)", "title": "" }, { "docid": "77091a277a69ed4f4ef87f5eca07bf1c", "score": "0.52900624", "text": "def print_rounds(rounds):\n win = Color.OKGREEN\n end = Color.ENDC\n for r in rounds:\n print(f\"{Color.WARNING}{r.name}, start: {r.start}, end: {r.end}{Color.ENDC}\")\n for i, game in enumerate(r.games):\n for p in App.players:\n if game[0][0] == p.id:\n if game[0][1] == 1:\n p1 = f\"{win}id: {p.id}, {p.surname} {p.name}, rank: {p.rank}, score: {game[0][1]}{end}\"\n else:\n p1 = f\"id: {p.id}, {p.surname} {p.name}, rank: {p.rank}, score: {game[0][1]}\"\n if game[1][0] == p.id:\n if game[1][1] == 1:\n p2 = f\"{win}id: {p.id}, {p.surname} {p.name}, rank: {p.rank}, score: {game[1][1]}{end}\"\n else:\n p2 = f\"id: {p.id}, {p.surname} {p.name}, rank: {p.rank}, score: {game[1][1]}\"\n print(f\"Game {i + 1} | {p1} vs. {p2}\")\n return", "title": "" }, { "docid": "2fc809d126acf83546c14880505bc463", "score": "0.52662534", "text": "def to_rankings_form(self):\n return UserRanking(\n user_name=self.name,\n games_played=self.games_played,\n total_attempts=self.total_attempts,\n total_points=self.total_points,\n points_per_attempt=self.points_per_attempt)", "title": "" }, { "docid": "86f61654b22f949ed8424bc6bc678300", "score": "0.52253443", "text": "def new_round(self):\n self.reset()\n self.update_score_label() # updates score labels\n self.change_label_p_name() # players name\n self.clear_warning_label() # clear warning label", "title": "" }, { "docid": "6dfd252042867f7bbb8a7934741a04c9", "score": "0.5222292", "text": "def finish():\n global change_leaderboard, leaderboard\n screen.fill(BLACK)\n screen.blit(Font_big.render('Вы уничтожили ' + str(points) + ' вирусов,', True, WHITE), (50, 50))\n screen.blit(Font_big.render('физтех выздоровел, но все равно', True, WHITE), (50, 100))\n screen.blit(Font_big.render('попал в дурку после сессии.', True, WHITE), (50, 150))\n screen.blit(Font.render('leaderboard (score, name, amount of viruses, time)', True, WHITE), (50, 250))\n input_leaders = open(\"leaderboard.txt\", \"r\")\n leaderboard = input_leaders.readlines()\n input_leaders.close()\n output_leaders = open(\"leaderboard.txt\", \"w\")\n for i in range(len(leaderboard)):\n leaderboard[i] = leaderboard[i].split()\n leaderboard[i][0] = int(leaderboard[i][0])\n leaderboard.append([points, name, number_of_targets, str(play_time)])\n leaderboard.sort(reverse=True)\n for i in range(len(leaderboard)):\n text = ''\n for j in range(len(leaderboard[i])):\n text += str(leaderboard[i][j]) + ' '\n output_leaders.write(text + '\\n')\n output_leaders.close()\n for i in range(min(len(leaderboard), 18)):\n text = ''\n for j in range(len(leaderboard[i])):\n text += str(leaderboard[i][j]) + ' '\n screen.blit(Font.render(text, True, WHITE), (50 + 400 * (i // 9), 300 + 50 * (i % 9)))\n change_leaderboard = False\n pygame.display.update()", "title": "" }, { "docid": "6a74d524d6d2049bef83f8d11cb552d5", "score": "0.52201647", "text": "def draftBoard(draftWindow, xwidth, yheight, numTeams, numRounds):\r\n \r\n #clear = Rectangle(Point(0,0),Point(1300,650))\r\n #clear.draw(draftWindow)\r\n #clear.setFill('white')\r\n\r\n #place round text\r\n for rd in range(numRounds): #gives me range from 0 to round-1\r\n roundText = Text(Point( (0.0385 * xwidth),\r\n ( (0.9 * yheight) - (rd *\r\n ( (0.7954 * yheight) /\r\n (numRounds - 1))))),\r\n 'Round '+str(rd+1))\r\n roundText.setSize(13)\r\n roundText.draw(draftWindow)\r\n\r\n \"\"\"\r\n rd1 = Text(Point(50,585),'Round 1')\r\n rd1.setSize(13)\r\n rd1.draw(draftWindow)\r\n rd2 = Text(Point(50,548.072),'Round 2')\r\n rd2.setSize(13)\r\n rd2.draw(draftWindow)\r\n rd3 = Text(Point(50,511.144),'Round 3')\r\n rd3.setSize(13)\r\n rd3.draw(draftWindow)\r\n rd4 = Text(Point(50,474.216),'Round 4')\r\n rd4.setSize(13)\r\n rd4.draw(draftWindow)\r\n rd5 = Text(Point(50,437.288),'Round 5')\r\n rd5.setSize(13)\r\n rd5.draw(draftWindow)\r\n rd6 = Text(Point(50,400.36),'Round 6')\r\n rd6.setSize(13)\r\n rd6.draw(draftWindow)\r\n rd7 = Text(Point(50,363.432),'Round 7')\r\n rd7.setSize(13)\r\n rd7.draw(draftWindow)\r\n rd8 = Text(Point(50,326.504),'Round 8')\r\n rd8.setSize(13)\r\n rd8.draw(draftWindow)\r\n rd9 = Text(Point(50,289.576),'Round 9')\r\n rd9.setSize(13)\r\n rd9.draw(draftWindow)\r\n rd10 = Text(Point(50,252.648),'Round 10')\r\n rd10.setSize(13)\r\n rd10.draw(draftWindow)\r\n rd11 = Text(Point(50,215.72),'Round 11')\r\n rd11.setSize(13)\r\n rd11.draw(draftWindow)\r\n rd12 = Text(Point(50,178.792),'Round 12')\r\n rd12.setSize(13)\r\n rd12.draw(draftWindow)\r\n rd13 = Text(Point(50,141.864),'Round 13')\r\n rd13.setSize(13)\r\n rd13.draw(draftWindow)\r\n rd14 = Text(Point(50,104.936),'Round 14')\r\n rd14.setSize(13)\r\n rd14.draw(draftWindow)\r\n rd15 = Text(Point(50,68.008),'Round 15')\r\n rd15.setSize(13)\r\n rd15.draw(draftWindow)\r\n \"\"\"\r\n\r\n \"\"\"\r\n #agghh, so ineficient, but works for team window\r\n rd1 = Text(Point(50,585),'QB')\r\n rd1.setSize(13)\r\n rd1.draw(teamWindow)\r\n rd2 = Text(Point(50,548.072),'RB')\r\n rd2.setSize(13)\r\n rd2.draw(teamWindow)\r\n rd3 = Text(Point(50,511.144),'RB')\r\n rd3.setSize(13)\r\n rd3.draw(teamWindow)\r\n rd4 = Text(Point(50,474.216),'WR')\r\n rd4.setSize(13)\r\n rd4.draw(teamWindow)\r\n rd5 = Text(Point(50,437.288),'WR')\r\n rd5.setSize(13)\r\n rd5.draw(teamWindow)\r\n rd6 = Text(Point(50,400.36),'TE')\r\n rd6.setSize(13)\r\n rd6.draw(teamWindow)\r\n rd7 = Text(Point(50,363.432),'FLEX')\r\n rd7.setSize(13)\r\n rd7.draw(teamWindow)\r\n rd8 = Text(Point(50,326.504),'K')\r\n rd8.setSize(13)\r\n rd8.draw(teamWindow)\r\n rd9 = Text(Point(50,289.576),'DST')\r\n rd9.setSize(13)\r\n rd9.draw(teamWindow)\r\n rd10 = Text(Point(50,252.648),'Bench')\r\n rd10.setSize(13)\r\n rd10.draw(teamWindow)\r\n rd11 = Text(Point(50,215.72),'Bench')\r\n rd11.setSize(13)\r\n rd11.draw(teamWindow)\r\n rd12 = Text(Point(50,178.792),'Bench')\r\n rd12.setSize(13)\r\n rd12.draw(teamWindow)\r\n rd13 = Text(Point(50,141.864),'Bench')\r\n rd13.setSize(13)\r\n rd13.draw(teamWindow)\r\n rd14 = Text(Point(50,104.936),'Bench')\r\n rd14.setSize(13)\r\n rd14.draw(teamWindow)\r\n rd15 = Text(Point(50,68.008),'Bench')\r\n rd15.setSize(13)\r\n rd15.draw(teamWindow)\r\n \"\"\"\r\n\r\n #vertical lines\r\n for v in range(numTeams+1):\r\n vertLine = Line( Point( ((0.104 * xwidth) + (v * ( (0.896 * xwidth) /\r\n numTeams))),\r\n (0.071 * yheight)),\r\n Point( ((0.104 * xwidth) + (v * ( (0.896 * xwidth) /\r\n numTeams))),\r\n yheight))\r\n vertLine.draw(draftWindow)\r\n\r\n\r\n #horizontal lines\r\n for h in range(numRounds+1):\r\n horizLine = Line( Point( 0,\r\n ( (0.923 * yheight) -\r\n (h * ( (0.7954 * yheight) /\r\n (numRounds - 1))))),\r\n Point( xwidth,\r\n ( (0.923 * yheight) -\r\n (h * ( (0.7954 * yheight) /\r\n (numRounds - 1))))))\r\n horizLine.draw(draftWindow)\r\n\r\n \"\"\"\r\n for x in range(15):\r\n line = Line(Point(135+(x*80.357),46.08), Point(135+(x*80.357),650))\r\n line.draw(draftWindow)\r\n #line = Line(Point(135+(x*80.357),46.08), Point(135+(x*80.357),650))\r\n #line.draw(teamWindow)\r\n for y in range(16):\r\n h = Line(Point(0,600-(y*36.928)), Point(1259.998,600-(y*36.928)))\r\n h.draw(draftWindow)\r\n #h = Line(Point(0,600-(y*36.928)), Point(1259.998,600-(y*36.928)))\r\n #h.draw(teamWindow)\r\n #so much shitty programming\r\n \"\"\" \r\n\r\n #button to submit team name\r\n submit = Button(draftWindow, Point( (0.6154 * xwidth),\r\n (0.0385 * yheight)),\r\n (0.058 * xwidth),\r\n (0.034 * yheight),\r\n 'Submit', 'red')\r\n submit.activate()\r\n\r\n \"\"\"\r\n #button to submit team name\r\n submit = Button(draftWindow,Point(800,25),75,22,'Submit','red')\r\n submit.activate()\r\n \"\"\"\r\n\r\n teamNum = 1 #The current team being labled\r\n\r\n while teamNum < numTeams + 1:\r\n\r\n #text to prompt user to enter the team names\r\n prompt = Text( Point( (0.40385 * xwidth), (0.03846 * yheight)),\r\n 'Enter Team '+str(teamNum))\r\n prompt.setSize(14)\r\n prompt.draw(draftWindow)\r\n\r\n #input window for entering team names\r\n teamNameInput = Entry( Point( (0.51923 * xwidth),\r\n (0.03846 * yheight)), 15)\r\n teamNameInput.draw(draftWindow)\r\n\r\n #wait for submit button to be pressed\r\n c = draftWindow.getMouse()\r\n while not submit.check(c) or teamNameInput.getText() == \"\":\r\n c = draftWindow.getMouse()\r\n\r\n team = Text( Point( ((0.1346 * xwidth) +\r\n ((0.064 * xwidth) * (teamNum - 1))),\r\n (0.9615 * y height)),\r\n teamNameInput.getText())\r\n team.setSize(12)\r\n team.draw(draftWindow)\r\n\r\n prompt.undraw()\r\n teamNum += 1\r\n\r\n#good to this point -----------------------------------\r\n\r\n\r\n \"\"\"\r\n\r\n #the number of the team\r\n tNum = 1\r\n\r\n while tNum < 15:\r\n \r\n #text to prompt user to enter the team names\r\n prompt = Text(Point(525,25),'Enter Team '+str(tNum))\r\n prompt.setSize(14)\r\n prompt.draw(draftWindow)\r\n \r\n #input window for entering the names\r\n tNameInput = Entry(Point(675,25),15)\r\n tNameInput.draw(draftWindow)\r\n \r\n c = draftWindow.getMouse()\r\n while not submit.check(c):\r\n c = draftWindow.getMouse()\r\n #lol, here comes more shitty programming\r\n if tNum == 1:\r\n t1 = Text(Point(175,625),tNameInput.getText())\r\n t1.setSize(12)\r\n t1.draw(draftWindow)\r\n #t1 = Text(Point(175,625),tNameInput.getText())\r\n #t1.setSize(12)\r\n #t1.draw(teamWindow)\r\n elif tNum == 2:\r\n t2 = Text(Point(250.357,625),tNameInput.getText())\r\n t2.setSize(12)\r\n t2.draw(draftWindow)\r\n #t2 = Text(Point(250.357,625),tNameInput.getText())\r\n #t2.setSize(12)\r\n #t2.draw(teamWindow)\r\n elif tNum == 3:\r\n t3 = Text(Point(330.714,625),tNameInput.getText())\r\n t3.setSize(12)\r\n t3.draw(draftWindow)\r\n #t3 = Text(Point(330.714,625),tNameInput.getText())\r\n #t3.setSize(12)\r\n #t3.draw(teamWindow)\r\n elif tNum == 4:\r\n t4 = Text(Point(411.071,625),tNameInput.getText())\r\n t4.setSize(12)\r\n t4.draw(draftWindow)\r\n #t4 = Text(Point(411.071,625),tNameInput.getText())\r\n #t4.setSize(12)\r\n #t4.draw(teamWindow)\r\n elif tNum == 5:\r\n t5 = Text(Point(491.428,625),tNameInput.getText())\r\n t5.setSize(12)\r\n t5.draw(draftWindow)\r\n #t5 = Text(Point(491.428,625),tNameInput.getText())\r\n #t5.setSize(12)\r\n #t5.draw(teamWindow)\r\n elif tNum == 6:\r\n t6 = Text(Point(571.785,625),tNameInput.getText())\r\n t6.setSize(12)\r\n t6.draw(draftWindow)\r\n #t6 = Text(Point(571.785,625),tNameInput.getText())\r\n #t6.setSize(12)\r\n #t6.draw(teamWindow)\r\n elif tNum == 7:\r\n t7 = Text(Point(652.142,625),tNameInput.getText())\r\n t7.setSize(12)\r\n t7.draw(draftWindow)\r\n #t7 = Text(Point(652.142,625),tNameInput.getText())\r\n #t7.setSize(12)\r\n #t7.draw(teamWindow)\r\n elif tNum == 8:\r\n t8 = Text(Point(732.499,625),tNameInput.getText())\r\n t8.setSize(12)\r\n t8.draw(draftWindow)\r\n #t8 = Text(Point(732.499,625),tNameInput.getText())\r\n #t8.setSize(12)\r\n #t8.draw(teamWindow)\r\n elif tNum == 9:\r\n t8 = Text(Point(812.856,625),tNameInput.getText())\r\n t8.setSize(12)\r\n t8.draw(draftWindow)\r\n #t8 = Text(Point(812.856,625),tNameInput.getText())\r\n #t8.setSize(12)\r\n #t8.draw(teamWindow)\r\n elif tNum == 10:\r\n t8 = Text(Point(893.213,625),tNameInput.getText())\r\n t8.setSize(12)\r\n t8.draw(draftWindow)\r\n #t8 = Text(Point(893.213,625),tNameInput.getText())\r\n #t8.setSize(12)\r\n #t8.draw(teamWindow)\r\n elif tNum == 11:\r\n t8 = Text(Point(973.57,625),tNameInput.getText())\r\n t8.setSize(12)\r\n t8.draw(draftWindow)\r\n #t8 = Text(Point(973.57,625),tNameInput.getText())\r\n #t8.setSize(12)\r\n #t8.draw(teamWindow)\r\n elif tNum == 12:\r\n t8 = Text(Point(1053.927,625),tNameInput.getText())\r\n t8.setSize(12)\r\n t8.draw(draftWindow)\r\n #t8 = Text(Point(1053.927,625),tNameInput.getText())\r\n #t8.setSize(12)\r\n #t8.draw(teamWindow)\r\n elif tNum == 13:\r\n t8 = Text(Point(1134.284,625),tNameInput.getText())\r\n t8.setSize(12)\r\n t8.draw(draftWindow)\r\n #t8 = Text(Point(1134.284,625),tNameInput.getText())\r\n #t8.setSize(12)\r\n #t8.draw(teamWindow)\r\n elif tNum == 14:\r\n t8 = Text(Point(1214.641,625),tNameInput.getText())\r\n t8.setSize(12)\r\n t8.draw(draftWindow)\r\n #t8 = Text(Point(1214.641,625),tNameInput.getText())\r\n #t8.setSize(12)\r\n #t8.draw(teamWindow)\r\n \r\n \r\n prompt.undraw()\r\n tNum += 1\r\n \"\"\"\r\n \r\n\r\n prompt = Text(Point(425,25),\"Enter Player Position and Name\")\r\n prompt.setSize(13)\r\n prompt.draw(draftWindow)\r\n\r\n pick = 1\r\n roundNum = 1\r\n\r\n while roundNum < 16: #number of pick in the draft will be 210\r\n tNameInput = Entry(Point(675,25),15)\r\n tNameInput.draw(draftWindow)\r\n #for player name\r\n\r\n #for player position\r\n pInput = Entry(Point(575,25),5)\r\n pInput.draw(draftWindow)\r\n c = draftWindow.getMouse()\r\n while not submit.check(c):\r\n c = draftWindow.getMouse()\r\n pPosition = pInput.getText()\r\n pName = tNameInput.getText()\r\n if pPosition == 'QB':\r\n pcolor = 'orange'\r\n elif pPosition == 'RB':\r\n pcolor = 'lightblue'\r\n elif pPosition == 'WR':\r\n pcolor = 'lightgreen'\r\n elif pPosition == 'TE':\r\n pcolor = 'pink'\r\n elif pPosition == 'K':\r\n pcolor = 'yellow'\r\n elif pPosition == 'DEF':\r\n pcolor = 'brown'\r\n elif pPosition == 'NA':\r\n pcolor = 'black'\r\n\r\n #here is where we need some demensions and to make the picks\r\n #snake back and fourth between the rounds\r\n if roundNum%2 == 1:\r\n placePick = Button(draftWindow,Point(175.1785+((pick-1)*80.357),581.536-((roundNum-1)*36.928)),80.357,36.928,pName,pcolor)\r\n placePick.activate()\r\n else:\r\n placePick = Button(draftWindow,Point((175.1785+(13*80.357))-((pick-1)*80.357),581.536-((roundNum-1)*36.928)),80.357,36.928,pName,pcolor)\r\n placePick.activate()\r\n\r\n pick = pick+1\r\n if pick == 15:\r\n pick = 1\r\n roundNum = roundNum + 1", "title": "" }, { "docid": "288af7fd2976d7772ff78d16ce0b9589", "score": "0.5213266", "text": "def get_leader_board_split(self, date):\n q = SQL.SQL(\n '''\n SELECT (((SUM(tbl.minutes) * 60) + SUM(tbl.seconds))::FLOAT / SUM(tbl.distance)) * 500 AS split, u.username\n FROM (\n SELECT user_id, minutes, seconds, distance\n FROM workout AS w\n JOIN erg AS e \n ON w.workout_id = e.workout_id\n WHERE w.time>{}\n ) AS tbl\n JOIN users AS u\n ON u.user_id = tbl.user_id\n GROUP BY u.username\n ORDER BY split\n '''\n ).format(SQL.Placeholder())\n\n result = self.safe_execute(q, (date,), fetchone=False)\n return result", "title": "" }, { "docid": "fc90d7b48c2e96c47028b7b16b02475b", "score": "0.52115124", "text": "def show_add_to_leaderboard_screen(self):\n self.clear()\n self.add_button(\"Type Your Name\", None)\n self.leaderboard_name = self.add_button(\"\", None)\n self.add_button(\"Submit\", self.submit_leaderboard)", "title": "" }, { "docid": "054d2733a75f9694c50ec659998f2540", "score": "0.521088", "text": "def __init__(self):\n\n self.play = True\n self.status = 'start_screen'\n\n self.winner = None\n self.winning_tiles = []\n self.win_combinations = []\n\n self.move_nr = 0\n self.player_list = []\n self.current_player_nr = 0\n self.settings = {'vs_computer': False, 'board_size': 3}\n\n self.view = GameView()", "title": "" }, { "docid": "ff2f754fd2e9d5b35564bcad28a725c2", "score": "0.5208663", "text": "def tournament(self):\n\n x_axis = []\n y_axis = []\n\n for i in range(self.number_of_matches):\n score = self.single_match()\n\n self.scores[0] += score[0]\n self.scores[1] += score[1]\n\n self.percentage = self.scores[0] / (i + 1)\n\n x_axis.append(i + 1)\n y_axis.append(self.percentage)\n\n print(\"The tournament completed:\\n\" +\n str(self.player1) +\n \": \" +\n str(self.scores[0]) +\n \" points.\\n\" +\n str(self.player2) +\n \": \" +\n str(self.scores[1]) +\n \" points.\")\n\n matplotlib.pyplot.plot(x_axis, y_axis)\n matplotlib.pyplot.axis([0, self.number_of_matches, 0, 1])\n matplotlib.pyplot.grid(True)\n matplotlib.pyplot.axhline(y=0.5, linewidth=0.5, color=\"m\")\n matplotlib.pyplot.xlabel(\"Number of games\")\n matplotlib.pyplot.ylabel(\n \"Winning percentage for \" + str(self.player1) + \" Vs.\\n\" + str(self.player2))\n\n print(\"player moves for \" + str(self.player1) + \": \\t\\t\" +\n str(self.player1.get_results()[self.player1]))\n print(\"player moves for \" + str(self.player2) + \": \\t\\t\" +\n str(self.player2.get_results()[self.player2]))\n\n matplotlib.pyplot.show()", "title": "" }, { "docid": "e61f02e56c7bbe55cfeb1df62165638b", "score": "0.52037007", "text": "def test_fetch_overall_leaderboard_position(self):\n first_user = User.objects.create(username='SomeGuy', email='me@abv.bg', password='123', score=123, role=self.base_role)\n second_user = User.objects.create(username='dGuy', email='d@abv.dg', password='123', score=122, role=self.base_role)\n second_user2 = User.objects.create(username='dGumasky', email='molly@abv.bg', password='123', score=122, role=self.base_role)\n second_user3 = User.objects.create(username='xdGumasky', email='xmolly@abv.bg', password='123', score=122, role=self.base_role)\n fifth_user = User.objects.create(username='dbrr', email='dd@abv.bg', password='123', score=121, role=self.base_role)\n fifth_user.save()\n\n self.assertEqual(fifth_user.fetch_overall_leaderboard_position(), 5)\n self.assertEqual(second_user.fetch_overall_leaderboard_position(), 2)\n self.assertEqual(second_user2.fetch_overall_leaderboard_position(), 2)\n self.assertEqual(second_user3.fetch_overall_leaderboard_position(), 2)\n self.assertEqual(first_user.fetch_overall_leaderboard_position(), 1)", "title": "" }, { "docid": "ae1dfbff0883f6c0bae2341360719c0b", "score": "0.5200481", "text": "def showGameResultScreen(self, pla, sco, rounds_tot=None):\n # pylint: disable=unused-argument,too-many-locals,too-many-statements\n self.fadeUpDown(\"down\")\n self.emptyGroup(self.disp_group)\n\n # Score list group + background + question mark for sorting\n gs_group = Group()\n\n # Pale grey large GAME SCORES background\n bg_scale = 6\n sbg_dob1 = Label(self.font,\n text=\"GAME\",\n scale=bg_scale,\n color=GS_COL)\n sbg_dob1.x = (self.width - 4 * bg_scale * self.font_width) // 2\n sbg_dob1.y = self.height // 4\n sbg_dob2 = Label(self.font,\n text=\"SCORES\",\n scale=bg_scale,\n color=GS_COL)\n sbg_dob2.x = (self.width - 6 * bg_scale * self.font_width) // 2\n sbg_dob2.y = self.height // 4 * 3\n gs_group.append(sbg_dob1)\n gs_group.append(sbg_dob2)\n self.showGroup(gs_group)\n self.fadeUpDown(\"up\")\n\n # Calculate maximum length player name\n # and see if scores happen to already be in order\n max_len = 0\n prev_score = sco[0]\n descending = True\n for idx, (name, _) in enumerate(pla):\n max_len = max(max_len, len(name))\n if sco[idx] > prev_score:\n descending = False\n prev_score = sco[idx]\n\n fmt = \"{:\" + str(max_len) + \"s} {:2d}\"\n x_pos = (self.width - (max_len + 3) * 2 * self.font_width) // 2\n scale = 2\n spacing = 4 if len(pla) <= 6 else 0\n top_y_pos = round((self.height\n - len(pla) * scale * self.font_height\n - (len(pla) - 1) * spacing) / 2\n + scale * self.font_height / 2)\n scores_group = Group()\n gs_group.append(scores_group)\n for idx, (name, _) in enumerate(pla):\n op_dob = Label(self.font,\n text=fmt.format(name, sco[idx]),\n scale=2,\n color=(PLAYER_NAME_COL_FG if idx == 0 else OPP_NAME_COL_FG))\n op_dob.x = x_pos\n op_dob.y = top_y_pos + idx * (scale * self.font_height + spacing)\n scores_group.append(op_dob)\n time.sleep(0.2)\n\n # Sort the entries if needed\n sort_scores = list(sco) # Make an independent local copy\n if not descending:\n empty_group = Group() # minor hack to aid swaps in scores_group\n step = 3\n qm_dob = Label(self.font,\n text=\"?\",\n scale=2,\n color=QM_SORT_FG)\n qm_dob.x = round(x_pos - 1.5 * scale * self.font_width)\n gs_group.append(qm_dob)\n while True:\n swaps = 0\n for idx in range(0, len(sort_scores) -1):\n above_score = sort_scores[idx]\n above_y = scores_group[idx].y\n below_y = scores_group[idx + 1].y\n qm_dob.y = (above_y + below_y) // 2\n if above_score < sort_scores[idx + 1]:\n qm_dob.text = \"<\"\n qm_dob.color = QM_SORTING_FG\n swaps += 1\n\n # make list of steps\n range_y = below_y - above_y\n offsets = list(range(step, range_y + 1, step))\n # Ensure this goes to the exact final position\n if offsets[-1] != range_y:\n offsets.append(range_y)\n\n for offset in offsets:\n scores_group[idx].y = above_y + offset\n scores_group[idx + 1].y = below_y - offset\n time.sleep(0.050)\n\n # swap the scores around\n sort_scores[idx] = sort_scores[idx + 1]\n sort_scores[idx + 1] = above_score\n\n # swap the graphical objects around using empty_group\n # to avoid ValueError: Layer already in a group\n old_above_dob = scores_group[idx]\n old_below_dob = scores_group[idx + 1]\n scores_group[idx + 1] = empty_group\n scores_group[idx] = old_below_dob\n scores_group[idx + 1] = old_above_dob\n\n qm_dob.text = \"?\"\n qm_dob.color = QM_SORT_FG\n time.sleep(0.2)\n else:\n time.sleep(0.6)\n\n if swaps == 0:\n break # Sort complete if no values were swapped\n gs_group.remove(qm_dob)", "title": "" }, { "docid": "72cee9051bbd5417c507c0515250e459", "score": "0.5199042", "text": "def new_game(self):\n\t\tself.__self_setup(len(self.grid[0]), len(self.grid), self.min_score)", "title": "" }, { "docid": "d196f67ffbd74386e379b65d3ec335f8", "score": "0.5162356", "text": "def refresh_score(self):\n self._score_label = tk.Label(self._master, text=\"Score: {} - {}\".format(self._model.get_score()[0],self._model.get_score()[1]),font='Arial 15 bold')\n self._score_label.grid(row=3, column =0)", "title": "" }, { "docid": "ee2ae19a7315996702b7de783a40bef9", "score": "0.5156803", "text": "def create_game_window(self):\r\n self.data_list = [int(self.entry.row.get()),\r\n int(self.entry.col.get()),\r\n str(self.entry.first.get()),\r\n str(self.entry.top_left.get()),\r\n self.entry.condition.get()]\r\n\r\n Othello_Gamewindow.GameWindow(self.master.master, self.data_list)\r\n # the game window launched, mainloop will be withdrawn\r\n\r\n print('Game started'\r\n '\\nrow number:', self.data_list[0],\r\n '\\ncol number:', self.data_list[1],\r\n '\\nfirst turn:', self.data_list[2],\r\n '\\ntop left piece:', self.data_list[3]\r\n )", "title": "" }, { "docid": "a8c2a38b031166b121f5556d2c3f8602", "score": "0.515397", "text": "def get_leaderboards():\n top_scores = leaderboards.get_latest()\n logger.debug('Current leaderboards top score: %s', top_scores)\n return top_scores", "title": "" }, { "docid": "c72b004ac9ce3bd2fe058fdb0138d747", "score": "0.51447207", "text": "def board():\n return render_template('board.html', data={\"active_player\": game.turn, \"passive_player\": game.next_player()})", "title": "" }, { "docid": "82cd4f473c4622de4fa8e29d9047aa1e", "score": "0.5139477", "text": "def __init__(self):\n # starts with player 0's turn\n self.whose_turn = 0\n # positions on the grid are labeled as follows:\n # 0 1 2\n # 3 4 5\n # 6 7 8\n # will hold 1 or 0, indicating whether this player has occupied this position\n self.player_0_positions = [0] * 9\n self.player_1_positions = [0] * 9\n # number of games won in this session\n self.player_0_score = 0\n self.player_1_score = 0\n self.num_ties = 0", "title": "" }, { "docid": "c5f675c37c4c8d913607dafdbfe1ce7c", "score": "0.5138871", "text": "def construct_board():\r\n pass", "title": "" }, { "docid": "9c5e000af1bbf8499c0d6ed4a36c38d5", "score": "0.5136407", "text": "def generate_matchs_firstRound(self, tournament):\n list_players = tournament.list_players\n all_list_players = []\n for elt in list_players:\n all_list_players.append(PlayerController().get_one(elt))\n list_triee = sorted(\n all_list_players,\n key=attrgetter(\"rank\"),\n reverse=True\n )\n tournament.matches_dones += [\n f\"{list_triee[0].identifier}:{list_triee[4].identifier}\",\n f\"{list_triee[1].identifier}:{list_triee[5].identifier}\",\n f\"{list_triee[2].identifier}:{list_triee[6].identifier}\",\n f\"{list_triee[3].identifier}:{list_triee[7].identifier}\"\n ]\n tournament.list_rounds.append(Round(\"Round 1\", [\n Match(\"Match1\", list_triee[0], 0, list_triee[4], 0),\n Match(\"Match2\", list_triee[1], 0, list_triee[5], 0),\n Match(\"Match3\", list_triee[2], 0, list_triee[6], 0),\n Match(\"Match4\", list_triee[3], 0, list_triee[7], 0)\n ], \"\", \"\"))\n return tournament", "title": "" }, { "docid": "63e011e2bc5e082ac655bb0c83e02dd2", "score": "0.5134712", "text": "async def show(ctx, *args):\n if len(server.user_list) < 1:\n await ctx.send(\"The server dosen't have any data….\")\n elif (len(args) < 1 or args[0] == \"--name\"):\n user_name = ctx.author.name\n if len(args) > 1 and \"--name\" == args[0]:\n for m in ctx.guild.members:\n if m.name == args[1] or m.nick == args[1]:\n user_name = args[1]\n break\n else:\n await ctx.send(\"Invalid user name\")\n return\n user = server.get(user_name)\n ret = \"Win rate(when Impostor or Crewmate): {:.2%}\\n\".format(\n user.win_num / user.data['Games Finished'])\n ret += \"Win rate(when Impostor): {:.2%}\\n\".format(\n user.impostor_win_num / user.data['Times Impostor'])\n ret += \"Win rate(when Crewmate): {:.2%}\\n\".format(\n user.crewmate_win_num / user.data['Times Crewmate'])\n ret += \"Kills per Impostor: {:.2f}\\n\".format(\n user.data['Impostor Kills'] / user.data['Times Impostor'])\n ret += \"Tasks Completed rate: {:.2%}\\n\".format(\n user.data['All Tasks Completed'] / user.data['Times Crewmate'])\n ret += \"Sabotages Fixed / Games: {:.2f}\".format(\n user.data['Sabotages Fixed'] / user.data['Games Finished'])\n await ctx.send(ret)\n elif args[0] == \"--rank\":\n show_num = 3\n if len(args) > 1 and is_num(args[1]):\n show_num = int(args[1])\n ret = \"leaderboard! (Win Rate) \\n\"\n ret += \"\\n\".join(\"rank {2} {1} : {0:.2%} win\".format(tup[0], tup[1],\n rank)\n for rank, tup in enumerate(\n server.rank_win_rate()[:show_num], start=1))\n ret += \"\\n\\nleaderboard! (Impostor Win Rate) \\n\"\n ret += \"\\n\".join(\"rank {2} {1} : {0:.2%} win\".format(tup[0], tup[1],\n rank)\n for rank, tup in enumerate(\n server.rank_win_rate_when_impostor()[:show_num],\n start=1))\n ret += \"\\n\\nleaderboard! (Crewmate Win Rate) \\n\"\n ret += \"\\n\".join(\"rank {2} {1} : {0:.2%} win\".format(tup[0], tup[1],\n rank)\n for rank, tup in enumerate(\n server.rank_win_rate_when_crewmate()[:show_num],\n start=1))\n ret += \"\\n\\nleaderboard! (Kill / Times Impostor) \\n\"\n ret += \"\\n\".join(\"rank {2} {1} : {0:.2f} killed\".format(tup[0], tup[1],\n rank)\n for rank, tup in enumerate(\n server.rank_kill()[:show_num], start=1))\n ret += \"\\n\\nleaderboard! (All Tasks Completed / Times Crewmate) \\n\"\n ret += \"\\n\".join(\"rank {2} {1} : {0:.2%} completed\".format(\n tup[0], tup[1],\n rank)\n for rank, tup in enumerate(\n server.rank_alltask()[:show_num], start=1))\n ret += \"\\n\\nleaderboard! (Sabotages Fixed / Number of Games) \\n\"\n ret += \"\\n\".join(\"rank {2} {1} : {0:.2f} fixed\".format(tup[0], tup[1],\n rank)\n for rank, tup in enumerate(\n server.rank_sabotagefix()[:show_num], start=1))\n await ctx.send(ret)\n elif args[0] == \"--diff\":\n await ctx.send(\"I'll implement it one day!\")\n elif args[0] == \"--userlist\":\n await ctx.send(\", \".join([k for k in server.user_list.keys()]))\n else:\n await ctx.send(\"invalid command ><\")", "title": "" }, { "docid": "effeee1ac4e2fa12decb59618caa1b25", "score": "0.51342463", "text": "def create_board(self):\n self.add_piece(Floor(self, position=(1.5, -1, 5)))\n for i, tile in enumerate(self.board):\n if tile in [1, 2]:\n self.add_piece(BoardTile(self, position=(i % 3, 0, i // 3), rosetta=tile == 2))", "title": "" }, { "docid": "51730b0ace2c1844e8597d3bb3a74469", "score": "0.5120348", "text": "def createGame(self):\n\t\tself.columnconfigure(0, pad = 5)\n\t\tself.columnconfigure(1, pad = 5)\n\t\tself.rowconfigure(0, pad = 10)\n\t\tself.rowconfigure(1, pad = 10)\n\t\tself.numPlayersField = Entry(self)\n\t\tself.numPlayersField.insert(0,\"1\")\n\t\tself.setPlayersButton = Button(self, text = \"Set number of players\", command = self.setPlayers)\n\t\tself.numPlayersField.grid(row=0, column = 0)\n\t\tself.setPlayersButton.grid(row=0, column = 1)\n\t\tself.gameCanvas = Canvas(self, bg = \"black\", height = self.canvasHeight, width = self.canvasWidth)\n\t\tself.gameCanvas.grid(row = 1, column = 0, columnspan = 2)\n\t\tself.newGameButton = Button(self, text=\"New game\", command = self.newGame)\n\t\tself.quitGameButton = Button(self, text=\"Quit\", command = self.quit)\n\t\tself.newGameButton.grid(row = 2, column = 0)\n\t\tself.quitGameButton.grid(row = 2, column = 1)\n\t\tself.pack()\n\t\tself.createRectangles()", "title": "" }, { "docid": "e8d42cf9252943714075b50c24037924", "score": "0.5118666", "text": "def test_get_leaderboard(self):\n expected_positions = {\n self.fifth_user.username: 5,\n self.second_user.username: 2,\n self.second_user2.username: 2,\n self.second_user3.username: 2,\n self.first_user.username: 1\n }\n auth_token = 'Token {}'.format(self.second_user3.auth_token.key)\n\n received_leaderboard = self.client.get('/challenges/getLeaderboard', HTTP_AUTHORIZATION=auth_token).data\n\n for lead in received_leaderboard:\n user_name = lead['name']\n self.assertEqual(lead['position'], expected_positions[user_name])", "title": "" }, { "docid": "cc9df34f6da374dc65252ceb8c3fda96", "score": "0.5102245", "text": "def ranking_data(request):\n # get the type of the last played round, even if there is no currently active round\n latest_round = Round.objects.order_by('-id')[0] # this is the latest created round\n print \"the latest round type was\"\n print latest_round.type\n if latest_round.type == 0:\n roundtype = 'selection'\n else:\n roundtype = 'final'\n\n ranking_entries = get_ranking_entries()\n\n return render(request, 'ranking_data.xml', {\n 'roundtype': roundtype,\n 'ranking_entries': ranking_entries,\n }, content_type=\"text/xml\")", "title": "" }, { "docid": "864cd37be2a317b7b09ac7cf4913d726", "score": "0.51003873", "text": "def create_game_board(self):\n self.fill_screen()\n self.render_text(\n 'Dealer Chip Number: ' + str(self.dealer_num_chips), \n PLAYER_INFO_FONT_SIZE, \n DEALER_INFO_LOCATION, \n BLACK\n )\n self.render_text(\n 'Your Chip Number: ' + str(self.player_num_chips),\n PLAYER_INFO_FONT_SIZE,\n PLAYER_INFO_LOCATION, \n BLACK\n )\n self.render_image(DECK_IMG_PATH, DECK_IMG_LOCATION)", "title": "" }, { "docid": "92c9084d9287f80537d7ecdf906c08b6", "score": "0.5094188", "text": "def create_label(self):\r\n row_num_label = tkinter.Label(self.master, text='rows :',\r\n width=10, height=1, pady=8)\r\n col_num_label = tkinter.Label(self.master, text='columns :',\r\n width=10, height=1, pady=5)\r\n first_turn_label = tkinter.Label(self.master, text='first turn :',\r\n width=10, height=1, pady=6)\r\n topleft_label = tkinter.Label(self.master, text='top left :',\r\n width=10, height=1, pady=6)\r\n\r\n row_num_label.grid(row=0, column=0)\r\n col_num_label.grid(row=1, column=0)\r\n first_turn_label.grid(row=2, column=0)\r\n topleft_label.grid(row=3, column=0)", "title": "" }, { "docid": "f26bc1dd240605826c9cbcae40026569", "score": "0.5094108", "text": "def retrieve_team_leader(self, tl_pk: int):\n pass", "title": "" }, { "docid": "f803e5acd91853816d9a434b3c435f07", "score": "0.50906175", "text": "def getleaderboarddisplay(leg_body, owner_filter):\n try:\n return ScoreDisplay.objects.get(name=\"%s_leader_%s\" % (leg_body.name,\n owner_filter))\n except:\n return None", "title": "" }, { "docid": "26a338181e6564a8cbc07b220894291f", "score": "0.5077305", "text": "def print_scoreboard(self, gender):\n print('Scoreboard for track %s in season %s' % (gender, self.name))\n rank = 1\n scoreboard = self.get_scoreboard(gender)\n for points, stats in scoreboard:\n print('#%d. %s at %.2f points' % (rank, stats.player.name, stats.points))\n rank += 1", "title": "" }, { "docid": "aee7922d05c5b0a7c973cde5865dc91e", "score": "0.5076507", "text": "def display_result(name, list_of_score):\r\n all_round = 1\r\n print(\"\\nThanks for playing {0}\".format(name))\r\n\r\n for row in list_of_score:\r\n print(\"Round {0} : {1}.\".format(all_round, row))\r\n all_round += 1", "title": "" }, { "docid": "ca43b763c7d4490152090d439344a5f3", "score": "0.504961", "text": "def print_board(board, player1, player2):\n row_len = len(board[0])\n col_len = len(board)\n rows_template = ['A', 'B', 'C', 'D', 'E', 'F', 'G', 'H', 'I']\n cols = []\n for col_index in range(col_len):\n cols.append(col_index + 1)\n cols_str = list(map(str, cols))\n rows = []\n for row_index in range(row_len):\n rows.append(rows_template[row_index])\n line = []\n for i in range(row_len):\n line.append(\"---\")\n \n board_total_length = (row_len * 3) + (row_len - 1) + 2\n score_names = f\"{player1['name']} : {player2['name']}\"\n score = f\"{str(player1['points']).rjust(len(player1['name']))} : {player2['points']}\"\n score_offset = int((board_total_length - len(score_names)) / 2)\n print(\" \"*score_offset + score_names)\n print(\" \"*score_offset + score)\n print(\"\")\n print(\" \", \" \".join(cols_str))\n for index in range(col_len):\n print(f\"{rows[index]} {' | '.join(board[index])}\")\n if index < row_len - 1:\n print(\" \", \"+\".join(line))\n print(\"\")", "title": "" }, { "docid": "12814ecde4093c1958c0ea26271b9777", "score": "0.5046536", "text": "def get_leaderboard(self, id=None, state=None, owner=None, tag=None, min_running_time=None):\n\n leaderboard_entries = self._fetch_leaderboard(id, state, owner, tag, min_running_time)\n\n def make_row(entry):\n channels = dict((\"channel_{}\".format(ch.name), ch.trimmed_y) for ch in entry.channels)\n\n parameters = map_keys(\"parameter_{}\".format, entry.parameters)\n properties = map_keys(\"property_{}\".format, entry.properties)\n\n r = {}\n r.update(entry.system_properties)\n r.update(channels)\n r.update(parameters)\n r.update(properties)\n return r\n\n rows = ((n, make_row(e)) for (n, e) in enumerate(leaderboard_entries))\n\n df = pd.DataFrame.from_dict(data=dict(rows), orient=\"index\")\n df = df.reindex(self._sort_leaderboard_columns(df.columns), axis=\"columns\")\n return df", "title": "" }, { "docid": "8d8f46e2559b39b119d7febd6d19afad", "score": "0.5028831", "text": "def view_scores(self):\n scores = { \n **self.__score['upper'],\n 'total upper': self.__score['total upper'],\n **self.__score['lower'],\n 'total lower': self.__score['total lower'],\n 'grand total': self.__score['grand total'] \n }\n for score in scores:\n s = scores[score]\n if s == None:\n s = 'blank'\n print(f'\\t{score.title()}: {s}')", "title": "" }, { "docid": "e230d6041e2b95be5542e23c2cdc6dcb", "score": "0.5024332", "text": "def refresh_rankings(self):\n \n self.team_ranks_panel.pack_forget() #Get rid of the old rankings\n self.team_ranks_panel = tk.Frame(self.ranking_frame) #Make a new ranks panel\n self.team_ranks_panel.grid(row=3, column=0) #Add the panel\n \n self.team_ranks_textbox = tk.Text(self.team_ranks_panel, width=24, wrap=tk.NONE) #The textbox with the ranking and score information\n self.team_ranks_textbox.pack(side=tk.TOP) #Add the team ranks textbox\n \n r_teams = self.get_teams()[:] #The teams to show ranks for\n r_teams.sort(key=lambda t:-self.score(t)) #Sort the teams by score in descending order\n \n for i in range(0, len(r_teams)): #Add the teams to the team ranks textbox one at a time\n team = r_teams[i] #Get the team to add\n num_string = str(i+1)\n num_string = num_string + ':' + (' ' * (4-len(num_string)))\n string = num_string + str(team) #The rank and the team\n string += ' ' * (11-len(string)) + 'with ' + '%.2f' % self.score(team) #The score of the team\n if i != len(r_teams) - 1: #Add newlines after every string but the last\n string += '\\n'\n self.team_ranks_textbox.insert(tk.INSERT, chars=string) #Add the string to the team ranks textbox\n \n self.team_ranks_textbox.config(state=tk.DISABLED, height=30) #Make it so the team ranks textbox can't be edited \n \n self.parent.do_easter_eggs() #Trigger any appropriate easter eggs", "title": "" }, { "docid": "41f7fc8950403b823955a2b0ed50510a", "score": "0.50169355", "text": "def __init__(self, width=5, height=5, playerA=PLAYER_TYPE_HUMAN, playerB=PLAYER_TYPE_HUMAN, depth = 3, useDecisionTree= True, dynamicDepth= False):\n self.width, self.height = width, height\n assert 2 <= self.width and 2 <= self.height,\\\n \"Game can't be played on this board's dimension.\"\n self.board = {}\n self.squares = {}\n self.player = 0\n self.players = {\n 0: Player(self, playerA, 0, depth, useDecisionTree, dynamicDepth),\n 1: Player(self, playerB, 1, depth, useDecisionTree, dynamicDepth),\n }\n\n self.board_state_size = (self.width-1) * self.height * 2\n self.board_state = np.zeros(shape = (1, (self.width-1) * self.height * 2))\n self.empty_states = set()\n\n self.move_to_state = None\n\n for i in range(self.board_state_size):\n self.empty_states.add(i)\n \n self.scores = [0,0]", "title": "" }, { "docid": "875bfe209602e303398b296f829a5739", "score": "0.5006537", "text": "def create_board(self):\n\t\t\n\t\tprint(\"This will print the board ready for playing.\")", "title": "" }, { "docid": "c74159b2631d46e409a8b4568f1d3d2e", "score": "0.49985194", "text": "def highlight_player(self):\n if self._model.get_turn():\n player_1=tk.Label(self._master,text=\"Player 1: X\",font='Arial 15 bold',bg=\"green\")\n player_1.grid(row=1,column=0)\n player_2=tk.Label(self._master,text=\"Player 2: O\",font='Arial 15 bold')\n player_2.grid(row=2,column=0)\n else:\n player_1=tk.Label(self._master,text=\"Player 1: X\",font='Arial 15 bold')\n player_1.grid(row=1,column=0)\n player_2=tk.Label(self._master,text=\"Player 2: O\",font='Arial 15 bold',bg=\"green\")\n player_2.grid(row=2,column=0)", "title": "" }, { "docid": "85680912e9144fbb7a7965a4bbb6161a", "score": "0.49831086", "text": "def create_board(self):\r\n for i in range(ROW):\r\n temporary_list = []\r\n for j in range(COL):\r\n lable = tkinter.Label(self.frame2, image=self.photo,\r\n width=\"50\", height=\"50\", bg='#66B3FF')\r\n lable.photo = self.photo\r\n lable.grid(row=i, column =j)\r\n\r\n temporary_list.append(lable)\r\n self.cell_list.append(temporary_list)", "title": "" }, { "docid": "3bb94d9db1b4e7ad432e09a4abbba736", "score": "0.49772006", "text": "def new_leader(self, number):\n old_leader = self.get_leader_name()\n self.leader_index = number\n leader = self.get_leader_name()\n print(leader, \"deposes\", old_leader, \"as the new leader of the pack\")", "title": "" }, { "docid": "68110e28de7c91dfd5b54812844ec2ae", "score": "0.49769977", "text": "def __init__(self, master, model):\n self._master = master\n self._model = model\n self._title=tk.Label(self._master,text=\"Tic-tac-toe Game\",font='Arial 20 bold',fg = 'red')\n self._title.grid(row=0,column=0)\n self._score_label = tk.Label(self._master, text=\"Score: {} - {}\".format(self._model.get_score()[0],self._model.get_score()[1]),font='Arial 15 bold')\n self._score_label.grid(row=3, column =0)", "title": "" }, { "docid": "75d99efdbe5c60d83a5b7acc199eb355", "score": "0.49751136", "text": "def show_winner(self, winners: [PlayerBase]):\n pass", "title": "" }, { "docid": "b3e6bbfdb4d032ad04fb990948d68efc", "score": "0.4974871", "text": "def view_round(round_):\n keys = [\n 'id', 'title', 'status',\n 'registration_opens_at', 'is_registration_open',\n 'training_opens_at', 'is_training_open',\n 'min_team_size', 'max_team_size', 'min_team_ratio',\n 'allow_team_changes'\n ]\n return {key: round_[key] for key in keys}", "title": "" } ]
ce428872890f3297fc66b259f0a143e2
Gets the bullets sprite group.
[ { "docid": "02462abbbe3a9e237407eea959f7c752", "score": "0.6066278", "text": "def get_bullets (self):\n\n return self.bullets", "title": "" } ]
[ { "docid": "15d234b9db0d0fc7dd1acbff40c75c5b", "score": "0.6432783", "text": "def sprites(self):\n return self._group.sprites()", "title": "" }, { "docid": "de6a149d4ec31f0f540233fe9e875647", "score": "0.6042333", "text": "def loadBullet(self):\n bullet = Projectile(self.x + 70, self.y + 47, self.facing_left)\n return bullet", "title": "" }, { "docid": "5d08deda44d612b95e9cc7514dae379e", "score": "0.5839755", "text": "def getSprite(self):\n return self.image", "title": "" }, { "docid": "ed2a8668934e3147d90c41e9897823ae", "score": "0.5698468", "text": "def make_sprite_group(self, sprites):\n tempgroup = pygame.sprite.LayeredDirty(sprites)\n return tempgroup", "title": "" }, { "docid": "74c35ed97166d369ffd7bd1447cfec0c", "score": "0.56705916", "text": "def getSprites(self):\n\n ground = load_image('OW_Ground.png')\n grass = load_image('Grass.png')\n water = load_image('Water.png')\n tree = load_image('Tree.png')\n wall = load_image('BrickWall.png')\n breakableWall = [load_image('BreakableWall.png'), load_image('BrokenWall.png')]\n shooter = self.shooterImages()\n player = self.kingOWImages()\n passage = load_image('OW_Ground.png')\n cave = load_image('CaveEntrance.png')\n ball = self.ballImages()\n blank = load_image(\"Blank.png\")\n bomb = load_image(\"BlankBomb.png\")\n nums = self.numberImages()\n potion = load_image(\"Blank_Potion_Health.png\")\n heart = [load_image('Heart_0.png'), load_image(\"Heart_1.png\"), load_image(\"Heart_2.png\"), load_image(\"Heart_3.png\"), load_image(\"Heart_4.png\")]\n kingbomb = load_image(\"Bomb.png\")\n kingarrow = self.arrowImages()\n\n return [ground, grass, water, tree, wall, breakableWall, shooter, shooter, player, passage, cave, ball, blank, bomb, nums, potion, nums, heart, heart, heart, kingbomb, kingarrow]", "title": "" }, { "docid": "7e37240fcc5dfe487b52499105727495", "score": "0.56694347", "text": "def getSprites(self):\n\n ground = load_image('CaveFloor.png')\n wall = load_image('CaveWall.png')\n BreakableWall = [load_image('CaveWallBreakable.png'), load_image('CaveWallBroken.png')]\n pickbomb = load_image('CaveBomb.png')\n player = self.kingCaveImages()\n passage = load_image('CaveEntrance.png')\n blank = load_image(\"Blank.png\")\n bomb = load_image(\"BlankBomb.png\")\n nums = self.numberImages()\n potion = load_image(\"Blank_Potion_Health.png\")\n heart = [load_image('Heart_0.png'), load_image(\"Heart_1.png\"), load_image(\"Heart_2.png\"), load_image(\"Heart_3.png\"), load_image(\"Heart_4.png\")]\n kingbomb = load_image(\"CaveBomb.png\")\n kingarrow = self.arrowImages()\n\n return [ground, wall, BreakableWall, blank, player, passage, pickbomb, blank, bomb, nums, potion, nums, heart, heart, heart, kingbomb, kingarrow]", "title": "" }, { "docid": "273f5bec30716b37e9ce87cd63123f48", "score": "0.56469", "text": "def get_sprite(self, sprite_name: str) -> Sprite:\r\n return self.sprites[sprite_name]", "title": "" }, { "docid": "af87d4c68e8aa78f98dd8062947868b3", "score": "0.56391895", "text": "def getSprites(self):\n ground = load_image('OW_Ground.png')\n grass = load_image('Grass.png')\n water = load_image('Water.png')\n tree = load_image('Tree.png')\n troll = self.trollImages()\n bat = self.batImages()\n player = self.kingOWImages()\n passage = load_image('OW_Ground.png')\n javelin = self.javelinImages()\n blank = load_image(\"Blank.png\")\n bomb = load_image(\"BlankBomb.png\")\n nums = self.numberImages()\n potion = load_image(\"Blank_Potion_Health.png\")\n heart = [load_image('Heart_0.png'), load_image(\"Heart_1.png\"), load_image(\"Heart_2.png\"), load_image(\"Heart_3.png\"), load_image(\"Heart_4.png\")]\n kingbomb = load_image(\"Bomb.png\")\n kingarrow = self.arrowImages()\n\n return [ground, grass, water, tree, troll, bat, player, passage, passage, javelin, blank, bomb, nums, potion, nums, heart, heart, heart, kingbomb, kingarrow]", "title": "" }, { "docid": "d3f46e104f86b8435759121be7d1dcc3", "score": "0.5470347", "text": "def __spawn_bullet (self):\n\n mouse_pos = pygame.mouse.get_pos()\n new_bullet = Bullet(settings_game[\"bullet_type_string\"], mouse_pos,\n self.player.rect.center)\n self.bullets.add(new_bullet)\n self.player.bullets -= 1", "title": "" }, { "docid": "c5ebffe304deb559e74685146db5381d", "score": "0.52890384", "text": "def LoadSprites(self):\n self.mainCloud = MainCloud()\n self.cloud_sprites = pygame.sprite.GroupSingle(self.mainCloud)\n self.grass_sprites = pygame.sprite.Group()\n for x in range(self.numOfCloudsHorizontal):\n for y in range(self.numOfCloudsVertical):\n self.grass_sprites.add(Grass(pygame.Rect(x*10,y*10,10,10))) # rect(left,top,w,h)\n self.black_clouds_sprites = pygame.sprite.Group()", "title": "" }, { "docid": "8013222ce56a01f2c92d78980d7a934d", "score": "0.5272742", "text": "def get_drawables(self):\n return [self.my_group, self.background.grass, self.platform_handler.platforms, self.projectiles.shurikens]", "title": "" }, { "docid": "cb1ebe1084a531c727f7478b50f0e5cd", "score": "0.52546614", "text": "def createExplosionTextureList():\r\n columns = 16\r\n count = 60\r\n sprite_width = 256\r\n sprite_height = 256\r\n file_name = \":resources:images/spritesheets/explosion.png\"\r\n list_textures = arcade.load_spritesheet(file_name, sprite_width, sprite_height, columns, count)\r\n return list_textures", "title": "" }, { "docid": "5e0dbae9e78215f26efddb4f159a127e", "score": "0.5230385", "text": "def sprite(self, name):\n return sprite.Sprite(image=self.image,\n image_pos = self.sprites[name])", "title": "" }, { "docid": "95c51affb86dda7bfc1736359af48926", "score": "0.52288735", "text": "def shoot_bullet(self):\n current_time = pygame.time.get_ticks()\n if current_time - self.last_bullet_shot > DELAY:\n self.last_bullet_shot = current_time\n bullet = Bullet(self.rect.centerx, self.rect.top)\n all_bullets.add(bullet)\n all_sprites.add(bullet)", "title": "" }, { "docid": "1bcda63ad4bd80eda630120ed29e16b1", "score": "0.51727575", "text": "def draw_bullet(self):\r\n pygame.draw.rect(self.screen, self.color, self.rect)", "title": "" }, { "docid": "0518ef58044ce52b20f27062eab966b6", "score": "0.5167384", "text": "def _fire_bullet(self):\n\t\tif len(self.bullets) < self.settings.bullet_allowed:\n\t\t\tnew_bullet = Bullet(self)\n\t\t\tself.bullets.add(new_bullet) # Add the new bullet to the group", "title": "" }, { "docid": "999da1c5050c976ef42556ea849a18bc", "score": "0.514254", "text": "def bulletList(self):\n bullet_list = []\n for p in self.users:\n for b in p.bullet_list:\n b.update()\n bullet_list.append(b.toObj())\n return bullet_list", "title": "" }, { "docid": "503cd7e90e43dd919eb7787b24aabc09", "score": "0.51109165", "text": "def get_overlapping_sprites(self):\n self._check_overlap()\n return self._overlapping_sprites", "title": "" }, { "docid": "23c1f473ed2b4f8d01a1bcce8f2c4405", "score": "0.5107428", "text": "def draw_bullet(self):\n pygame.draw.rect(self.screen, self.color,self.rect)", "title": "" }, { "docid": "bd067004ebf19648e4909101abc34a15", "score": "0.5106111", "text": "def draw_bullet(self):\n pygame.draw.rect(self.screen, self.color, self.rect)", "title": "" }, { "docid": "bd067004ebf19648e4909101abc34a15", "score": "0.5106111", "text": "def draw_bullet(self):\n pygame.draw.rect(self.screen, self.color, self.rect)", "title": "" }, { "docid": "bd067004ebf19648e4909101abc34a15", "score": "0.5106111", "text": "def draw_bullet(self):\n pygame.draw.rect(self.screen, self.color, self.rect)", "title": "" }, { "docid": "bd067004ebf19648e4909101abc34a15", "score": "0.5106111", "text": "def draw_bullet(self):\n pygame.draw.rect(self.screen, self.color, self.rect)", "title": "" }, { "docid": "bd067004ebf19648e4909101abc34a15", "score": "0.5106111", "text": "def draw_bullet(self):\n pygame.draw.rect(self.screen, self.color, self.rect)", "title": "" }, { "docid": "35a504cc1d3d0966339865ca97be9c40", "score": "0.5081203", "text": "def draw_bullet(self):\n pygame.draw.rect(self._screen, self._color, self.rect)", "title": "" }, { "docid": "ebc5554d77a51f7254a6b5c5d74e94ed", "score": "0.50686556", "text": "def player_shoot(self):\n self.playerbullet = PlayerBullet(self.player.rect.centerx, self.player.rect.top, -10, YELLOW)\n self.all_sprites.add(self.playerbullet)\n self.player_bullets.add(self.playerbullet)", "title": "" }, { "docid": "2a01f034c1b151eb3455d2b9ec56705f", "score": "0.5066055", "text": "def loadSprite(filename):\n with open(filename) as f:\n sprite = []\n for line in f:\n sprite.append(line.rstrip())\n return sprite", "title": "" }, { "docid": "5ce7635e763cdf23731ac05a8baf867c", "score": "0.50316167", "text": "def spriteType(self):\n return 'background'", "title": "" }, { "docid": "cfabe96a7a3b0a7d720eeab0995f678e", "score": "0.502615", "text": "def handle_bullet_collisions(self):\r\n # reduce the health value of the enemy\r\n self.enemy_health.reduce_health(10)\r\n\r\n if self.enemy_health.current_health <= 0: # remove enemy sprite from enemies Group\r\n self.kill()\r\n self.inheritance_packs.update_enemy_pos(self.rect.x, self.rect.y)\r\n return self.inheritance_packs\r\n return None", "title": "" }, { "docid": "705462825acf29f772e84fb92688bc4b", "score": "0.50234526", "text": "def get_bullet_points(self, product):\n bullet_points = ''\n for bullet in product.bullet_point_ids:\n bullet_point = \"\"\"<BulletPoint>%s</BulletPoint>\"\"\" % (html.escape(bullet.name))\n bullet_points = '%s %s' % (bullet_points, bullet_point)\n if product.bullet_point_ids:\n return bullet_points", "title": "" }, { "docid": "59d71d576b1eb65d3abeb1e08cb7a5d2", "score": "0.5013284", "text": "def get_sprite_pack(self, sprite_pack_name: str) -> SpritePack:\r\n return self.sprite_packs[sprite_pack_name]", "title": "" }, { "docid": "7238078d0a619f50cf7bc2e3cd00752c", "score": "0.4974574", "text": "def draw_bullet(bullet_obj, game_screen):\n game_screen.blit(bullet_obj.get_img(), (bullet_obj.get_x(), bullet_obj.get_y()))", "title": "" }, { "docid": "37f4aa9392dcefe4fc82fa0217755438", "score": "0.49725246", "text": "def spriteType(self):\n return 'player'", "title": "" }, { "docid": "74a6be708d11175815edbed73ceb2315", "score": "0.49511772", "text": "def show_pile(self):\n img_list = []\n for card in range(len(self.pile)):\n if card <= 2 :\n img = pygame.image.load(\"assets/\"+str(self.pile[card])+\".png\")\n img_list.append(img)\n return img_list", "title": "" }, { "docid": "e7957c421206c66e6a8afce3e0238952", "score": "0.49264708", "text": "def sprite_layers(self) -> Iterator:\n return sorted(self, key=lambda s: getattr(s, \"layer\", 0))", "title": "" }, { "docid": "f41aa2cfc170fc045a29f59b7ccd28fd", "score": "0.49092758", "text": "def _generate_level(self):\n for t in range(self.LEVEL_WIDTH * self.LEVEL_HEIGHT):\n tile = Tile(\"sprites/grass.png\")\n self.LEVEL.append(tile)\n self.sprite_group.add(tile)\n self.LEVEL[0].image.fill((0xff, 0xff, 0xff))\n self.LEVEL[self.LEVEL_WIDTH * self.LEVEL_HEIGHT - 1].image.fill((0, 0, 0))\n print(*self.sprite_group)", "title": "" }, { "docid": "f52edde00d04810b1428c6129f752ce9", "score": "0.490017", "text": "def shoot(self):\n # Instantiates a bullet\n bullet = Bullet()\n bullet.center.x = self.center.x + \\\n math.cos(math.radians(self.angle)) * 10\n bullet.center.y = self.center.y + \\\n math.sin(math.radians(self.angle)) * 10\n bullet.angle = self.angle\n\n bullet.velocity.dx = math.cos(math.radians(self.angle)) * bullet.speed\n bullet.velocity.dy = math.sin(math.radians(self.angle)) * bullet.speed\n\n return bullet", "title": "" }, { "docid": "b5a9306cc7e9f16f7c0f4badd90ff00c", "score": "0.48826018", "text": "def double_fire_smallBullet(self):\r\n \r\n leftB = SmallWeapon()\r\n rightB = SmallWeapon()\r\n leftB.rect.x, leftB.rect.y = self.rect.topleft\r\n rightB.rect.x, rightB.rect.y = self.rect.topright\r\n\r\n # reset the power value and bar\r\n self.power.clear_power(100)\r\n self.bullets_group.add(leftB, rightB)\r\n self.allSprites_group.add(leftB, rightB)", "title": "" }, { "docid": "fd4669578bb9cdd05dacc0868372da21", "score": "0.4875645", "text": "def spritesheet() -> None:", "title": "" }, { "docid": "ba1f50c7fa6efa77a4b47d7b5d81a7a3", "score": "0.48697823", "text": "def SpriteCollision(sprite, spriteGroup, kill=False):\n return pygame.sprite.spritecollide(sprite, spriteGroup, kill)", "title": "" }, { "docid": "6f3055a8bc6772b3220b921c77f69546", "score": "0.48484784", "text": "def fire_bullet(ai_settings, screen, ship, bullets):\n # sounds for bullets\n shoot = pygame.mixer.Sound('sounds/laser.wav')\n\n # Create a new bullet and add it to the bullets group\n if len(bullets) < ai_settings.bullets_allowed:\n new_bullet = Bullet(ai_settings=ai_settings, screen=screen, ship=ship)\n bullets.add(new_bullet)\n shoot.play()", "title": "" }, { "docid": "ef84ec9ed1bb5064cc838df8d15e8094", "score": "0.4846823", "text": "def _fire_bullet(self):\r\n\t\tif len(self.bullets) < 3:\r\n\t\t\tnew_bullet = Bullet(self)\r\n\t\t\tself.bullets.add(new_bullet)", "title": "" }, { "docid": "f3a6842426109a35bdb55404c30189c3", "score": "0.48170656", "text": "def implode(self, player_x, screen_height):\n bullets = []\n for i in range(10):\n bullets.append(Knife(self.rect.x, self.rect.y, player_x, screen_height * i/10))\n return bullets", "title": "" }, { "docid": "e158dd1ec5956c950b7acd816553cd5a", "score": "0.48120666", "text": "def get_drawables(self) -> List[Drawable]:\n return self.drawables", "title": "" }, { "docid": "66257202c264e74a43435a3b5bb173d1", "score": "0.4810104", "text": "def remove_dead(group):\r\n for sprite in group:\r\n if not sprite.alive:\r\n group.remove(sprite)", "title": "" }, { "docid": "1bf6549aa5524d8ce7df3bd8e692d6b7", "score": "0.4807381", "text": "def list_group_icons(self):\n return [(e.struct.Name, e.struct.OffsetToData)\n for e in self.groupiconres.directory.entries]", "title": "" }, { "docid": "bf5a5eb76183666b69515bf710369418", "score": "0.48047295", "text": "def get_groups(self):\n return self._groups", "title": "" }, { "docid": "61697237fc3a7480e7392ce1d652db50", "score": "0.47825295", "text": "def game_loop(self):\n clock = pygame.time.Clock()\n bullet_sound = pygame.mixer.Sound('Sound/bullet.wav')\n score_font = pygame.font.SysFont('arial', 30, True)\n shoot_loop = 0\n bullets = []\n dino = Player(self.level.get_player_start_position())\n\n while True:\n clock.tick(40)\n\n dino.gun = self.level.dino_gun\n\n if dino.pos.x + 350 > self.screen_width:\n self.level.shift_world(-5, 0)\n dino.slide(-5, 0)\n if dino.pos.x - 350 <= 0 and self.level.world_shift_x < 0:\n self.level.shift_world(5, 0)\n dino.slide(5, 0)\n if self.level.world_shift_x <= self.level.level_limit:\n print(self.levels.index(self.level) + 1)\n try:\n self.level = self.levels[self.levels.index(self.level) + 1]\n except IndexError:\n self.end_screen(dino)\n return False\n dino.reset()\n\n if shoot_loop > 0:\n shoot_loop += 1\n if shoot_loop > 4:\n shoot_loop = 0\n\n for event in pygame.event.get():\n if event.type == pygame.QUIT:\n return\n if event.type == pygame.KEYDOWN:\n if event.key == pygame.K_UP:\n dino.jump()\n if event.key == pygame.K_BACKQUOTE:\n for enemy in self.level.enemys:\n enemy.debug_draw(\n draw_bounds=not enemy.draw_bounds,\n draw_hitbox=not enemy.draw_hitbox)\n dino.debug_draw(draw_hitbox=not dino.draw_hitbox)\n\n if not self.jump_button.get_state():\n jump_flag = True\n if self.jump_button.get_state():\n if jump_flag:\n dino.jump()\n jump_flag = False\n\n for bullet in bullets:\n remove = False\n hits = pygame.sprite.spritecollide(bullet,\n self.level.platforms, False)\n if hits:\n remove = True\n if (bullet.pos.x < self.screen_width) and (bullet.pos.x > 0):\n bullet.update_location()\n else:\n remove = True\n\n hits = pygame.sprite.spritecollide(bullet, self.level.enemys,\n False)\n for hit in hits:\n self.score += 5\n hit.hit()\n remove = True\n\n if remove:\n bullets.pop(bullets.index(bullet))\n\n keys = pygame.key.get_pressed()\n\n if keys[pygame.K_LEFT] or self.left_button.get_state():\n dino.move_left()\n elif keys[pygame.K_RIGHT] or self.right_button.get_state():\n dino.move_right()\n else:\n dino.stop()\n\n if keys[pygame.K_ESCAPE]:\n return\n if keys[pygame.K_a]:\n print(dino.pos.x - self.level.world_shift_x)\n\n if (keys[pygame.K_SPACE] or self.shoot_button.get_state()) and (\n shoot_loop == 0) and dino.gun:\n if dino.facing_left:\n facing = -1\n else:\n facing = 1\n\n if len(bullets) < 5:\n bullets.append(\n Projectile(\n int(dino.rect.centerx), int(dino.rect.centery), 6,\n (0, 0, 0), facing))\n bullet_sound.play()\n shoot_loop = 1\n\n dino.update_location(self.screen_height)\n\n self.win.blit(self.level.background, (0, 0))\n text = score_font.render('Score:' + str(self.score), 1, (0, 0, 0))\n self.win.blit(text, (0, 10))\n text = score_font.render(\n 'Speed:' + str(self.ellip.get_speed()) + \" RPM\", 1, (0, 0, 0))\n self.win.blit(text, (0, 40))\n heart_rate = 132\n text = score_font.render('Heart Rate:' + str(heart_rate), 1,\n (0, 0, 0))\n self.win.blit(text, (0, 70))\n\n hits = pygame.sprite.spritecollide(dino, self.level.platforms,\n False)\n for hit in hits:\n dino.touch_down(hit.rect, hit.friction)\n\n hits = pygame.sprite.spritecollide(dino, self.level.enemys, False)\n for hit in hits:\n dino.hit(hit)\n if dino.rect.bottom <= (hit.rect.centery -\n (hit.rect.height // 4)\n ) and dino.rect.bottom >= hit.rect.top:\n self.score += 20\n if dino.dead:\n self.level.shift_world(0 - self.level.world_shift_x,\n 0 - self.level.world_shift_y)\n dino.reset()\n self.level.draw(self.win)\n dino.draw(self.win)\n for bullet in bullets:\n bullet.draw(self.win)\n pygame.display.update()", "title": "" }, { "docid": "6017b8cd03ee3844893912aa72f7c490", "score": "0.47737974", "text": "def spriteType(self):\n #returns the type of sprite we've created\n return 'enemy'", "title": "" }, { "docid": "443e85be50488c30952cc7b9962788fb", "score": "0.47479936", "text": "def initialize_tiles():\n sprite_sheet = spritesheet.SpriteSheet(TILES_TEXTURES)\n # Extracting sprites out of the linear sprite sheet\n tiles = sprite_sheet.all_sprites(\n mahjong_tiles_dictionary,\n SCREEN_WIDTH / 2,\n SCREEN_HEIGHT / 2)\n tile_sprite_group = pygame.sprite.Group()\n\n # Constructing the tile group for rendering and collision detection\n for i in range(len(tiles)):\n # print(tiles[i][0])\n tile_sprite_group.add(tiles[i][0])\n\n return tile_sprite_group", "title": "" }, { "docid": "52e5d8d4c7d99e5597ff0fd4cc72156c", "score": "0.47370762", "text": "def update_bullets(self):\r\n #Update bullet positions\r\n self.bullets.update()\r\n\r\n #Get rid of bullets that have disappeared.\r\n\r\n for bullet in self.bullets.copy():\r\n if bullet.rect.bottom <= 0:\r\n self.bullets.remove(bullet)\r\n print(len(self.bullets))\r\n\r\n #Check for any bullets that have hit enemy planes\r\n\r\n #IF so, get rid of the bullet and the plane\r\n\r\n collisions = pygame.sprite.groupcollide(\r\n self.bullets, self.enemy_planes, True, True)", "title": "" }, { "docid": "cdb2e121935fae972edc49c70f440b53", "score": "0.47323436", "text": "def double_fire_bigBullet(self):\r\n leftB = BigWeapon()\r\n rightB = BigWeapon()\r\n leftB.rect.x, leftB.rect.y = self.rect.topleft\r\n rightB.rect.x, rightB.rect.y = self.rect.topright\r\n\r\n # reset the power bar and value\r\n self.power.clear_power(200)\r\n self.bullets_group.add(leftB, rightB)\r\n self.allSprites_group.add(leftB, rightB)", "title": "" }, { "docid": "000b755999bdcb33324f179208c65811", "score": "0.47318715", "text": "def __get_timer(self) -> ImageSpriteTimer:\n return self.__timer.sprite", "title": "" }, { "docid": "ba04a1781c82d2fff62aadf884922d44", "score": "0.47135156", "text": "def BulletMove(self):", "title": "" }, { "docid": "69c60a183bb23e2bb66fff7316927ce4", "score": "0.47133464", "text": "def spriteType(self):\n return 'health bar'", "title": "" }, { "docid": "1e0943314bcde818d5833f50a5778dbd", "score": "0.4692101", "text": "def shoot(self, player_x, player_y):\n self.bullet_counter += 1\n self.bullet_counter %= self.bullet_counter_reset # modulo\n\n if self.bullet_counter in self.bullet_frames:\n self.pew_sound.play()\n return Knife2(self.rect.x, self.rect.y, player_x)\n else:\n return None", "title": "" }, { "docid": "793e60563325970dd3ee6bf05780887a", "score": "0.46920672", "text": "def playercollideany(sprite, group):\n spritecollide = sprite.rect.colliderect\n for s in group:\n if spritecollide(s.rect) and s != sprite:\n return s\n return None", "title": "" }, { "docid": "becdf8f3639deda811300b83c2646bfe", "score": "0.46877676", "text": "def fire_bullet(ai_settings, screen, ship, ship_bullets):\n # Create a new bullet and add it to the bullets group\n if len(ship_bullets) < ai_settings.ship_bullets_allowed:\n new_bullet = Ship_Bullet(ai_settings, screen, ship)\n ship_bullets.add(new_bullet)", "title": "" }, { "docid": "0ada7620b420bef1ce111fa14fdfc236", "score": "0.4684961", "text": "def bullet(radius, outline, fill):\n \n # init drawing\n bitmap = wx.Bitmap(2*radius+2, 2*radius+2)\n mdc = wx.MemoryDC()\n mdc.SelectObject(bitmap)\n dc = wx.GCDC(mdc) if wx.Platform != \"__WXMSW__\" else mdc\n \n # draw bullet\n dc.SetPen(outline)\n dc.SetBrush(fill)\n dc.DrawCircle(radius+1, radius+1, radius)\n \n # release bitmap\n mdc.SelectObject(wx.NullBitmap)\n \n # set mask\n if wx.Platform == \"__WXMSW__\":\n bitmap.SetMaskColour(wx.BLACK)\n \n return bitmap", "title": "" }, { "docid": "9492b3ca35f0f4164f1af65b7864e233", "score": "0.46735826", "text": "def fire_smallWeapon(self):\r\n now = pygame.time.get_ticks()\r\n if now - self.smBullet_timer >= self.sm_bullet_attack_interval:\r\n self.smBullet_timer = now\r\n\r\n sm_bullet = SmallWeapon()\r\n # place it the position of the player\r\n mid_x, mid_y = self.rect.midtop\r\n sm_bullet.rect.x = mid_x\r\n sm_bullet.rect.y = mid_y\r\n sm_bullet.add(self.allSprites_group, self.bullets_group)", "title": "" }, { "docid": "7cfd270247041cd9d0dfa2755bd549e0", "score": "0.46691424", "text": "def _fire_bullet(self):\n if len(self.bullets) < self.settings.bullets_allowed:\n new_bullet = Bullet(self) # limit 3 at a time\n self.bullets.add(new_bullet) # add new bullets to bullets", "title": "" }, { "docid": "7274b657148ea4334cec0234ca592566", "score": "0.46582806", "text": "def _fire_bullet(self):\n if len(self.bullets) < self.settings.bullets_allowed:\n new_bullet = Bullet(self)\n self.bullets.add(new_bullet)\n self.ship_fire_sound.play()", "title": "" }, { "docid": "dee57bac06361bbf43bbe1fd1e92543d", "score": "0.46534377", "text": "def sprite(index: int, x: int, y: int) -> None:", "title": "" }, { "docid": "a9d7302d7ac83511a0f779d5daeceaaf", "score": "0.46506235", "text": "def charger_sprite(self):\r\n for mouvement in self.animation: # Parcours des mouvements\r\n numero = 0 # Compteur utilisé dans le parcours des sprites\r\n for sprite in self.animation[mouvement]: # Parcours des sprites\r\n if isinstance(sprite, str): # Si le sprite est encore un texte\r\n img = pg.image.load(sprite).convert_alpha() # Charger Img\r\n self.animation[mouvement][numero] = img # Sauvegarder\r\n numero += 1 # Numéro du sprite actuel + 1\r", "title": "" }, { "docid": "69fcc736a6c327a022d084a74ada7be3", "score": "0.46479815", "text": "def update_bullets(g_settings, screen, stats, ship, scores, \\\n\t\t\t\t\t\t\tprojectiles, powerups, enter, exit, \\\n\t\t\t\t\t\t\t\t\t\t\taliens=0, twits=0):\n\n\tglobal twits_list\n\tglobal total_twits\n\t# OPT maybe just pass them by index to functions? These reassign every time\n\tbombs \t= projectiles[0]\n\tbullets = projectiles[1]\n\tlazers \t= projectiles[2]\n\n\t\"\"\" \n\t\tThere are several ways to determine an explosion. \n\t\ti.e. use the global vars - determine where to mark \n\t\tan explosion based upon which twit disappeared. \n\n\t\tbullets is a general term, unfortunately. It could either\n\t\tbe a bomb, lazer, bullet or gun projectile\n\n\t\"\"\"\n\n\t# REFAC\n\tbullets.update()\n\tpowerups.update()\n\tlazers.update()\n\tbombs.update()\n\t# Flag\n\n\t# REFAC\n\tfor item in projectiles:\n\t\t# Copy, because you cannot modify the sprite group WHILE iterating over it. Pygame constraint\n\t\tfor proj in item.copy():\n\t\t\tif proj.rect.bottom <= 0 or proj.rect.top >= 800\\\n\t\t\t\tor proj.rect.x <= 0 or proj.rect.x >= 1200:\n\t\t\t\titem.remove(proj)\n\t\n\tfor power in powerups.copy():\n\t\tif power.rect.top > 800:\n\t\t\tpowerups.remove(power)\n\n\tshot_down = pygame.sprite.groupcollide(bullets, twits, True, False)\n\tbomb_down = pygame.sprite.groupcollide(bombs, twits, True, False)\n\tlaze_down = pygame.sprite.groupcollide(lazers, twits, False, False)\n\t\n\t# Shoosting\n\tif shot_down:\n\t\t\"\"\" specific to bullet hits \"\"\"\n\t\tfor members in shot_down.values():\n\t\t\t# Calculate score per kill and drop powerups\n\t\t\tcheck_kills(g_settings, screen, stats, twits, \\\n\t\t\t\t\t\tmembers, powerups, weapon=\"bullets\")\n\t\t\tbreak\n\t\t# Update Score\n\t\tscores.prep_score()\n\tif bomb_down:\n\n\t\t\"\"\" specific to bullet hits \"\"\"\n\t\tfor members in bomb_down.values():\n\t\t\t# Calculate score per kill and drop powerups\n\t\t\tcheck_kills(g_settings, screen, stats, twits, \\\n\t\t\t\t\t\t\tmembers, powerups, weapon=\"bomb\")\n\t\t\tbreak\n\t\t# Update Score\n\t\tscores.prep_score()\n\tif laze_down:\n\t\t\"\"\" specific to bullet hits \"\"\"\n\t\tfor members in laze_down.values():\n\t\t\t# Calculate score per kill and drop powerups\n\t\t\tcheck_kills(g_settings, screen, stats, twits, \\\n\t\t\t\t\t\t\tmembers, powerups, weapon=\"lazer\")\n\t\t\tbreak\n\t\t# Update Score\n\t\tscores.prep_score()\n\n\t\"\"\" insert other projectile instances here \"\"\"\n\n\t# Powah\n\tget_power = pygame.sprite.spritecollideany(ship, powerups)\n\tif get_power: # The power\n\n\t\t# Changes apply to g_settings values\n\t\t# Initializes ship power from g_settings\n\t\tship.power_up(pwr=apply_power(g_settings, stats, str(get_power.pwr), enter))\n\n\t\tpowerups.remove(get_power)", "title": "" }, { "docid": "b8cdcce4f561a5a3ae76cf1be4e97431", "score": "0.4646886", "text": "def extrae_sprite(fichero_imagen):\n sprites_final = []\n return sprites_final", "title": "" }, { "docid": "4b597e4a87c54b7eca6ca00a52cca347", "score": "0.46461502", "text": "def GFX(self):\n return self.GFXlist[0]", "title": "" }, { "docid": "2aa1e6644d37c20aaca72c0d3d9d1f58", "score": "0.46369523", "text": "def draw(self, screen):\n for sprite in self._group:\n sprite.draw(screen)", "title": "" }, { "docid": "1d54171f70aa40692be43a9efa6154ba", "score": "0.46335545", "text": "def bullet(r,h):\n sphere = Sphere(r=1)\n cut_cube = Cube(size=[3*r, 3*r, 3*r])\n cut_cube = Translate(cut_cube,v=[0,0,-1.5*r])\n bullet = Difference([sphere, cut_cube])\n vscale = [r,r,h]\n bullet = Scale(bullet,v=vscale)\n return [bullet]", "title": "" }, { "docid": "37c986c1d4f0bcb32e802dc6d3758de7", "score": "0.46294105", "text": "def __init__(self, player):\r\n self.platform_list = pygame.sprite.Group()\r\n self.enemy_list = pygame.sprite.Group()\r\n \r\n self.player = player", "title": "" }, { "docid": "4bf0ef06008ac263371cadc5515f3fb3", "score": "0.46194234", "text": "def __init__(self):\r\n pygame.sprite.Sprite.__init__(self)\r\n \r\n self.image = boss1\r\n \r\n self.rect = self.image.get_rect()", "title": "" }, { "docid": "60ad9af90fe5cc7d9ae4068624166c65", "score": "0.46187717", "text": "def getGroups():", "title": "" }, { "docid": "449d7293fdd1dc66f73f5ac335c166e5", "score": "0.45991948", "text": "def _fire_bullets(self):\r\n if len(self.bullets) <= self.settings.bullets_allowed:\r\n new_bullet = Bullet(self)\r\n self.bullets.add(new_bullet)\r\n se.bullet_sound.play()", "title": "" }, { "docid": "29104e9720cfca955f441ba718bf39ea", "score": "0.45642242", "text": "def _fire_bullet(self):\r\n if len(self.bullets) < self.settings.bullets_allowed:\r\n new_bullet = Bullet(self)\r\n self.bullets.add(new_bullet)", "title": "" }, { "docid": "29104e9720cfca955f441ba718bf39ea", "score": "0.45642242", "text": "def _fire_bullet(self):\r\n if len(self.bullets) < self.settings.bullets_allowed:\r\n new_bullet = Bullet(self)\r\n self.bullets.add(new_bullet)", "title": "" }, { "docid": "222bc76c146747a66d2ea99a45de382d", "score": "0.4561723", "text": "def DrawBullet(self):\r\n self._display_surf.blit(self.bullet, self.bulletrect)\r\n self._display_surf.blit(self.plane, self.planerect)\r\n self.bulletrect = self.bulletrect.move(self.speed_down)", "title": "" }, { "docid": "abd7def2c58198686c2289efc35b13b7", "score": "0.45615393", "text": "def fire_bigWeapon(self):\r\n now = pygame.time.get_ticks()\r\n if now - self.bgBullet_timer >= self.bg_bullet_attack_interval:\r\n self.bgBullet_timer = now\r\n\r\n bg_bullet = BigWeapon()\r\n # place it the position of the player\r\n mid_x, mid_y = self.rect.midtop\r\n bg_bullet.rect.x = mid_x\r\n bg_bullet.rect.y = mid_y\r\n bg_bullet.add(self.allSprites_group, self.bullets_group)", "title": "" }, { "docid": "e7cd37a7d408d457a168468469c97785", "score": "0.45562005", "text": "def draw_bullet(self):\r\n pygame.Surface.blit( self.screen,self.rotatedSurface, self.rect)", "title": "" }, { "docid": "a1149c2beb8e12473dd7e7f538e65f97", "score": "0.45488045", "text": "def _fire_bullet(self):\n if len(self.bullets) < self.settings.bullets_allowed:\n new_bullet = Bullet(self)\n self.bullets.add(new_bullet)", "title": "" }, { "docid": "a1149c2beb8e12473dd7e7f538e65f97", "score": "0.45488045", "text": "def _fire_bullet(self):\n if len(self.bullets) < self.settings.bullets_allowed:\n new_bullet = Bullet(self)\n self.bullets.add(new_bullet)", "title": "" }, { "docid": "a1149c2beb8e12473dd7e7f538e65f97", "score": "0.45488045", "text": "def _fire_bullet(self):\n if len(self.bullets) < self.settings.bullets_allowed:\n new_bullet = Bullet(self)\n self.bullets.add(new_bullet)", "title": "" }, { "docid": "9ec008fdcd541120d3348fdc05adec70", "score": "0.4543154", "text": "def shooting(self):\n return self.skills[Skill.Shooting]", "title": "" }, { "docid": "aa98bc992cbe2f9a8b15ad5f61048160", "score": "0.45428386", "text": "def update_bullets(bullets):\n bullets.update()\n\n #Get rid of bullets that have disappeared .\n for bullet in bullets.copy():\n if bullet.rect.bottom <= 0:\n bullets.remove(bullet)", "title": "" }, { "docid": "b69e2ad9bc7d98756a7580463ee06a46", "score": "0.45413607", "text": "def create_and_add_digit(self, digit_group):\n digit_object = DigitSprite(self.settings, self.screen, self.settings.image_res.lcd_digit_images, 0)\n digit_group.add(digit_object)\n return digit_object", "title": "" }, { "docid": "b0546d740d537048444ac1a0912e7437", "score": "0.45328695", "text": "def getSpritesbyClass(cls, sclass):\n return App._spritesdict.get(sclass, [])", "title": "" }, { "docid": "0eb76d97571dd8fe8429455b15afd8ef", "score": "0.45319355", "text": "def fire_bullet(ai_settings, screen, ship, bullets):\n if len(bullets) < ai_settings.bullets_allowed:\n new_bullet = Bullet(ai_settings, screen, ship)\n bullets.add(new_bullet)", "title": "" }, { "docid": "623572ac2bbf5301fd61024e05683afd", "score": "0.45170495", "text": "def update_bullets(ai_settings, screen, stats, score_board, ship, aliens, bullets):\n bullets.update()\n\n for bullet in bullets.copy():\n if bullet.rect.bottom <= 0:\n bullets.remove(bullet)\n\n check_bullet_alien_collision(ai_settings, screen, stats, score_board, ship, aliens, bullets)", "title": "" }, { "docid": "00820041d56408b1fd99e76b9e8ad2ee", "score": "0.4512272", "text": "def __fire_bullet(self):\n if len(self.__bullets) < self.__ai_settings.bullets_allowed:\n new_bullet = Bullet(self.__ai_settings, self.__screen,\n self.__ship)\n self.__bullets.add(new_bullet)", "title": "" }, { "docid": "bd3cac4a36a4104df67f554e49fb8d0b", "score": "0.45053065", "text": "def sprite(self, x, y, width, height):\n return Image(self.sheet, x, y, width, height)", "title": "" }, { "docid": "3c49b51caad36257a76387911fa09063", "score": "0.44994795", "text": "def create_icons_children(children, batch_labels):\n i = 1\n icons = []\n for child in children:\n small_icon = pyglet.sprite.Sprite(child.img_happy, x=35, y=490 - i*19, batch=batch_labels)\n small_icon.scale = 0.11\n icons.append(small_icon)\n i += 1\n return icons", "title": "" }, { "docid": "9bd26d07175332a724587ef3f2c6d2d5", "score": "0.44973153", "text": "def on_mouse_press(self, x, y, button, modifiers):\n\n # Only allow the user so many bullets on screen at a time to prevent\n # them from spamming bullets.\n if len(self.player_bullet_list) < MAX_PLAYER_BULLETS:\n\n # Gunshot sound\n gun_sounds = [\"throw1\", \"throw3\", \"throw4\", \"throw4\"]\n random_sound = gun_sounds[random.randrange(0, 4)]\n self.gun_sound = arcade.load_sound(str(Path(__file__).parent.resolve()) + f\"\\\\assets\\\\{random_sound}.wav\")\n arcade.play_sound(self.gun_sound)\n\n # Create a player bullet randomly from the list of options\n player_bullets = [\"frying_pan\", \"rolling_pin\", \"spoon\", \"knife\"]\n random_bullet = player_bullets[random.randrange(0, 4)]\n bullet_sprite_path = str(Path(__file__).parent.resolve()) + f\"\\\\assets\\\\{random_bullet}.png\"\n bullet = arcade.Sprite(bullet_sprite_path, SPRITE_SCALING_LASER)\n\n # The image points to the right, and we want it to point up. So\n # rotate it.\n # Set up the initial angle, and the \"spin\"\n bullet.angle = random.randrange(360)\n bullet.change_angle = random.randrange(15, 16)\n\n\n # Give the bullet a speed\n bullet.change_y = BULLET_SPEED\n\n # Position the bullet\n bullet.center_x = self.player_sprite.center_x\n bullet.bottom = self.player_sprite.top\n\n # Add the bullet to the appropriate lists\n self.player_bullet_list.append(bullet)", "title": "" }, { "docid": "c1a3d411737694c89bfda90119f3d221", "score": "0.4478823", "text": "def _get_group_icon_entries(self, num=0):\n groupicon = self.groupiconres.directory.entries[num]\n if groupicon.struct.DataIsDirectory:\n # Select the first language from subfolders as needed.\n groupicon = groupicon.directory.entries[0]\n\n # Read the data pointed to by the group icon directory (GRPICONDIR) struct.\n rva = groupicon.data.struct.OffsetToData\n size = groupicon.data.struct.Size\n data = self._pe.get_data(rva, size)\n file_offset = self._pe.get_offset_from_rva(rva)\n\n grp_icon_dir = self._pe.__unpack_data__(GRPICONDIR_FORMAT, data, file_offset)\n # logger.debug(grp_icon_dir)\n\n if grp_icon_dir.Reserved:\n raise InvalidIconDefinitionError(\"Invalid group icon definition (got Reserved=%s instead of 0)\" % hex(grp_icon_dir.Reserved))\n\n # For each group icon entry (GRPICONDIRENTRY) that immediately follows, read its data and save it.\n grp_icons = []\n icon_offset = grp_icon_dir.sizeof()\n for idx in range(grp_icon_dir.Count):\n grp_icon = self._pe.__unpack_data__(GRPICONDIRENTRY_FORMAT, data[icon_offset:], file_offset+icon_offset)\n icon_offset += grp_icon.sizeof()\n grp_icons.append(grp_icon)\n # logger.debug(\"Got logical group icon %s\", grp_icon)\n\n return grp_icons", "title": "" }, { "docid": "2b9973149da558072cb0d3533d6c0d92", "score": "0.4469877", "text": "def fire_bullet(ai_settings, screen, ship, bullets):\n #create a new bullet if limit not reached yet.\n if len(bullets) < ai_settings.bullet_allowed:\n new_bullet = Bullet(ai_settings, screen, ship)\n bullets.add(new_bullet)", "title": "" }, { "docid": "6b28e48909366e80064165f63540e562", "score": "0.4454238", "text": "def getSpriteSheet(cls, filename):\n try:\n return cls.spritesheets[filename]\n except KeyError:\n pass\n\n # La feuille de sprite n'était pas en mémoire, on va donc l'ajouter\n spritesheet = SpriteSheet(filename)\n cls.spritesheets[filename] = spritesheet\n\n # Et on la retourne\n return spritesheet", "title": "" }, { "docid": "ff2828373c176f42f5a35971bf1c0aa8", "score": "0.44487923", "text": "def _update_bullets(self):\n # Update bullet position.\n # Below we will control to get rid of the bullets that have\n # Dissapear.\n self.bullets.update()\n for bullet in self.bullets.copy():\n if bullet.rect.bottom <= 0:\n self.bullets.remove(bullet)\n # print(len(self.bullets))", "title": "" }, { "docid": "468e8cc68c276656f2d2ab6a925d802d", "score": "0.44471857", "text": "def get_all_sprites(self):\n all_sprites = self.maze.walls + self.maze.gates + \\\n self.maze.items + self.maze.guardians\n all_sprites.append(self.character)\n\n return all_sprites", "title": "" }, { "docid": "38dc2c8f0f81a1ee121b28b7e7b9e2b8", "score": "0.44355947", "text": "def bulletslide(self,\n title='Here goes the title of the slide',\n bullets=[], # list of bullet points\n dim=False, # dimming of bullet points\n intro='',\n outro='',\n figure=None, # filename(s) with figure(s)\n figure_pos='s', # north, east, south, west\n figure_fraction_width=1.0,\n figure_angle=0, # indicates rotation (90, 270)\n bullet_block=True,\n bullet_block_heading='',\n intro_block = True,\n intro_block_heading = '',\n outro_block = True,\n outro_block_heading = '',\n left_column_width=0.5,\n header_footer=None,\n hide=False,\n ):\n content = []\n if intro:\n if intro_block:\n if intro_block_heading:\n intro = TextBlock(intro, heading=intro_block_heading)\n else:\n intro = TextBlock(intro)\n else:\n intro = Text(intro)\n content.append(intro)\n if bullets:\n bulletblock = BulletBlock(bullets, heading=bullet_block_heading)\n content.append(bulletblock)\n if outro:\n if outro_block:\n if outro_block_heading:\n outro = TextBlock(outro, heading=outro)\n else:\n outro = TextBlock(outro)\n else:\n outro = TextBlock(outro)\n content.append(outro)\n return latexslides.Slide(title=title, content=content, dim=dim,\n figure=figure, figure_pos=figure_pos,\n figure_size=figure_fraction_width,\n figure_angle=figure_angle,\n left_column_width=left_column_width,\n hidden=hide)", "title": "" }, { "docid": "18d870d2aff29216aa0f976435ee4f35", "score": "0.4433096", "text": "def create_background_sprites(self, k):\n\n for y in range(0, (k * ROAD_IMAGE_RECT.height), ROAD_IMAGE_RECT.height):\n self.background_tiles.add(Background_Sprite(0, y))", "title": "" }, { "docid": "7689275140f4a67922e849a082ceec35", "score": "0.44326815", "text": "def r_group(self, name):\r\n return self.r_maps_groups().get(name)", "title": "" }, { "docid": "8dda0edab3a2790bfebaaf3ba8198133", "score": "0.443171", "text": "def tile_list():\n\treturn [GrassTile, WaterTile, RockTile]", "title": "" } ]
9b1add6e10b15c71fe389d9b7764923e
Whether node to node encryption is enabled.
[ { "docid": "8b7f9b5071a1577ec845863ad1fdb178", "score": "0.57095563", "text": "def enabled(self) -> bool:\n return pulumi.get(self, \"enabled\")", "title": "" } ]
[ { "docid": "3fbe407ec13d3ca2f66c52dec688511e", "score": "0.72343796", "text": "def encryption_at_host_enabled(self) -> pulumi.Output[Optional[bool]]:\n return pulumi.get(self, \"encryption_at_host_enabled\")", "title": "" }, { "docid": "87e151fc000a0308a51f52aafeee4a4e", "score": "0.72056943", "text": "def encryption_at_host_enabled(self) -> Optional[pulumi.Input[bool]]:\n return pulumi.get(self, \"encryption_at_host_enabled\")", "title": "" }, { "docid": "87e151fc000a0308a51f52aafeee4a4e", "score": "0.72056943", "text": "def encryption_at_host_enabled(self) -> Optional[pulumi.Input[bool]]:\n return pulumi.get(self, \"encryption_at_host_enabled\")", "title": "" }, { "docid": "e7bf5a0bba541079fa0d4e384f0097a6", "score": "0.69992346", "text": "def is_encrypted(self):\n return True", "title": "" }, { "docid": "558056a25e4399eed971031660c498bd", "score": "0.6929175", "text": "def is_encrypted(self):\n return False", "title": "" }, { "docid": "558056a25e4399eed971031660c498bd", "score": "0.6929175", "text": "def is_encrypted(self):\n return False", "title": "" }, { "docid": "f027121bf255676a98dc4cb0fdbff744", "score": "0.6759366", "text": "def encrypted(self) -> bool:\n return pulumi.get(self, \"encrypted\")", "title": "" }, { "docid": "f027121bf255676a98dc4cb0fdbff744", "score": "0.6759366", "text": "def encrypted(self) -> bool:\n return pulumi.get(self, \"encrypted\")", "title": "" }, { "docid": "f027121bf255676a98dc4cb0fdbff744", "score": "0.6759366", "text": "def encrypted(self) -> bool:\n return pulumi.get(self, \"encrypted\")", "title": "" }, { "docid": "f027121bf255676a98dc4cb0fdbff744", "score": "0.6759366", "text": "def encrypted(self) -> bool:\n return pulumi.get(self, \"encrypted\")", "title": "" }, { "docid": "f027121bf255676a98dc4cb0fdbff744", "score": "0.6759366", "text": "def encrypted(self) -> bool:\n return pulumi.get(self, \"encrypted\")", "title": "" }, { "docid": "f027121bf255676a98dc4cb0fdbff744", "score": "0.6759366", "text": "def encrypted(self) -> bool:\n return pulumi.get(self, \"encrypted\")", "title": "" }, { "docid": "f027121bf255676a98dc4cb0fdbff744", "score": "0.6759366", "text": "def encrypted(self) -> bool:\n return pulumi.get(self, \"encrypted\")", "title": "" }, { "docid": "bd77dd6301189b6715f1137edffce600", "score": "0.67175406", "text": "def data_node_disk_encrypted(self) -> Optional[pulumi.Input[bool]]:\n return pulumi.get(self, \"data_node_disk_encrypted\")", "title": "" }, { "docid": "bd77dd6301189b6715f1137edffce600", "score": "0.67175406", "text": "def data_node_disk_encrypted(self) -> Optional[pulumi.Input[bool]]:\n return pulumi.get(self, \"data_node_disk_encrypted\")", "title": "" }, { "docid": "6f0ee646334793784a15ce7a7c07d4c2", "score": "0.65968424", "text": "def get_enable_encryption_at_host(self) -> bool:\n # read the original value passed by the command\n enable_encryption_at_host = self.raw_param.get(\"enable_encryption_at_host\", False)\n # try to read the property value corresponding to the parameter from the `agentpool` object\n if (\n self.agentpool and\n hasattr(self.agentpool, \"enable_encryption_at_host\") and # backward compatibility\n self.agentpool.enable_encryption_at_host is not None\n ):\n enable_encryption_at_host = self.agentpool.enable_encryption_at_host\n\n # this parameter does not need dynamic completion\n # this parameter does not need validation\n return enable_encryption_at_host", "title": "" }, { "docid": "3e1b6381e0f05ed9cc306baefd716988", "score": "0.6583299", "text": "def data_node_disk_encrypted(self) -> pulumi.Output[Optional[bool]]:\n return pulumi.get(self, \"data_node_disk_encrypted\")", "title": "" }, { "docid": "5a53ea69b12b77cd3f4513c86f5260b1", "score": "0.65536994", "text": "def get_security_key_manager_nve_support(self, node):\n api_args = {'node': node}\n try:\n result = self.send_request(\n 'security-key-manager-volume-encryption-supported', api_args)\n vol_encryption_supported = result.get_child_content(\n 'vol-encryption-supported') or 'false'\n except netapp_api.NaApiError as e:\n LOG.debug(\"NVE disabled due to error code: %s - %s\",\n e.code, e.message)\n return False\n\n return strutils.bool_from_string(vol_encryption_supported)", "title": "" }, { "docid": "e960fa63a98b8fc9cdacbc0d29307168", "score": "0.6504919", "text": "def is_encrypted(self) -> Optional[bool]:\n return pulumi.get(self, \"is_encrypted\")", "title": "" }, { "docid": "41cbce4ead5a5bb43be0126023e45cda", "score": "0.6412266", "text": "def is_encrypted(self):\n if \"isEncrypted\" in self._prop_dict:\n return self._prop_dict[\"isEncrypted\"]\n else:\n return None", "title": "" }, { "docid": "136a05d4bc973f9b0b823c4aa2075f13", "score": "0.63555044", "text": "def enable_inter_container_traffic_encryption(self) -> Optional[pulumi.Input[bool]]:\n return pulumi.get(self, \"enable_inter_container_traffic_encryption\")", "title": "" }, { "docid": "136a05d4bc973f9b0b823c4aa2075f13", "score": "0.63555044", "text": "def enable_inter_container_traffic_encryption(self) -> Optional[pulumi.Input[bool]]:\n return pulumi.get(self, \"enable_inter_container_traffic_encryption\")", "title": "" }, { "docid": "136a05d4bc973f9b0b823c4aa2075f13", "score": "0.63555044", "text": "def enable_inter_container_traffic_encryption(self) -> Optional[pulumi.Input[bool]]:\n return pulumi.get(self, \"enable_inter_container_traffic_encryption\")", "title": "" }, { "docid": "136a05d4bc973f9b0b823c4aa2075f13", "score": "0.63555044", "text": "def enable_inter_container_traffic_encryption(self) -> Optional[pulumi.Input[bool]]:\n return pulumi.get(self, \"enable_inter_container_traffic_encryption\")", "title": "" }, { "docid": "136a05d4bc973f9b0b823c4aa2075f13", "score": "0.63555044", "text": "def enable_inter_container_traffic_encryption(self) -> Optional[pulumi.Input[bool]]:\n return pulumi.get(self, \"enable_inter_container_traffic_encryption\")", "title": "" }, { "docid": "5cd0b8f345d49b71526d222f799230fc", "score": "0.6334567", "text": "def require_infrastructure_encryption(self) -> Optional[pulumi.Input[bool]]:\n return pulumi.get(self, \"require_infrastructure_encryption\")", "title": "" }, { "docid": "38cb1e60596f682ddb556d27d496f584", "score": "0.6291327", "text": "def return_connection_password_encrypted(self) -> bool:\n return pulumi.get(self, \"return_connection_password_encrypted\")", "title": "" }, { "docid": "38cb1e60596f682ddb556d27d496f584", "score": "0.6291327", "text": "def return_connection_password_encrypted(self) -> bool:\n return pulumi.get(self, \"return_connection_password_encrypted\")", "title": "" }, { "docid": "52c5020d3ce9120ce176ba26f8602a12", "score": "0.62867194", "text": "def is_default_encrypted(self):\n return self.container['is_default_encrypted']", "title": "" }, { "docid": "a32881ce716b670b741988a69882ca59", "score": "0.62780434", "text": "def storage_encrypted(self) -> bool:\n return pulumi.get(self, \"storage_encrypted\")", "title": "" }, { "docid": "2d1c8aebffb1f9d53a4f7ebbb9a9bac5", "score": "0.6211994", "text": "def encryption_needed(self):\n\t\treturn ISC_REQ.CONFIDENTIALITY in self.flags", "title": "" }, { "docid": "40bde7a6f545011189ed7b45e02db391", "score": "0.6198789", "text": "def encrypted(self) -> Optional[pulumi.Input[bool]]:\n return pulumi.get(self, \"encrypted\")", "title": "" }, { "docid": "b813116d31eba0a6caa507c9646cadb5", "score": "0.6118295", "text": "def _tx_enable(self):\n return bool(self._get_iio_dev_attr(\"tx_en\", self._ctrl))", "title": "" }, { "docid": "5d5031b7c95007b16c4e9f3ba3b4c136", "score": "0.6021948", "text": "def tx_vm_enable(self):\n return bool(self._get_iio_dev_attr(\"tx_vm_enable\", self._ctrl))", "title": "" }, { "docid": "81e3ecb440b23fb7579b56720fc9a531", "score": "0.5994821", "text": "def on_enabled(self, node_address):\n # pylint: disable=no-self-use\n return False", "title": "" }, { "docid": "bc8220d316cd1d864edafffd5adf1543", "score": "0.5970832", "text": "def encrypt_root_block_device(self) -> Optional[pulumi.Input[bool]]:\n return pulumi.get(self, \"encrypt_root_block_device\")", "title": "" }, { "docid": "549f7648a673e8865da6c51c3bbfcf11", "score": "0.58906674", "text": "def enabled(self) -> ConfigNodePropertyBoolean:\n return self._enabled", "title": "" }, { "docid": "fbdace2b8c3eb7f8ec205dcfbae1979a", "score": "0.58827895", "text": "def enable_vtpm(self) -> bool:\n return pulumi.get(self, \"enable_vtpm\")", "title": "" }, { "docid": "21d8bfa7c2efaccd8b4c3e62094ef1ec", "score": "0.58779424", "text": "def enable_passivemode(self) -> pulumi.Output[Optional[bool]]:\n return pulumi.get(self, \"enable_passivemode\")", "title": "" }, { "docid": "9cc01d29c8197ef41724509ed82e49db", "score": "0.5809031", "text": "def enabled(cls):\n return cls.connect_api_key and cls.secret_key", "title": "" }, { "docid": "2597f0addf893227c4fe4aa056f5d6b7", "score": "0.5794412", "text": "def disable_app_encryption_if_device_encryption_is_enabled(self):\n if \"disableAppEncryptionIfDeviceEncryptionIsEnabled\" in self._prop_dict:\n return self._prop_dict[\"disableAppEncryptionIfDeviceEncryptionIsEnabled\"]\n else:\n return None", "title": "" }, { "docid": "c9fbcf8dc891b346e64320f0e8ae6503", "score": "0.5774297", "text": "def protect_external_id(self) -> ConfigNodePropertyBoolean:\n return self._protect_external_id", "title": "" }, { "docid": "d5c58d560e23cdc415bc92e281cda222", "score": "0.5749571", "text": "def eas_enable(self) -> pulumi.Output[Optional[bool]]:\n return pulumi.get(self, \"eas_enable\")", "title": "" }, { "docid": "5fcd166a1e04624b135be41d7d08adae", "score": "0.57307905", "text": "def is_enabled(self):\r\n return True", "title": "" }, { "docid": "4a1627254ac379b86364edeb3534de21", "score": "0.57281464", "text": "def get_is_enabled(self):\n return False", "title": "" }, { "docid": "d013bcfa4104b376a2ec9f6ea8b0f366", "score": "0.5714363", "text": "def is_enabled(self) -> bool:\n return self.properties.get('enabled', False)", "title": "" }, { "docid": "7e73275e972c2988c5effab4b5c256dd", "score": "0.57100356", "text": "def tx_pa_enable(self):\n return bool(self._get_iio_dev_attr(\"tx_drv_enable\", self._ctrl))", "title": "" }, { "docid": "d749c635fca3403a3b4bcd934481d344", "score": "0.5697388", "text": "def IsEncrypted(self):\n if self.version == 1:\n return self._details['IsEncrypted']\n else:\n return self._details['vault']['encrypted']", "title": "" }, { "docid": "8af91981e85ef91764f8297a36417b3f", "score": "0.569568", "text": "def enable_network_isolation(self) -> Optional[pulumi.Input[bool]]:\n return pulumi.get(self, \"enable_network_isolation\")", "title": "" }, { "docid": "8af91981e85ef91764f8297a36417b3f", "score": "0.569568", "text": "def enable_network_isolation(self) -> Optional[pulumi.Input[bool]]:\n return pulumi.get(self, \"enable_network_isolation\")", "title": "" }, { "docid": "8af91981e85ef91764f8297a36417b3f", "score": "0.569568", "text": "def enable_network_isolation(self) -> Optional[pulumi.Input[bool]]:\n return pulumi.get(self, \"enable_network_isolation\")", "title": "" }, { "docid": "8af91981e85ef91764f8297a36417b3f", "score": "0.569568", "text": "def enable_network_isolation(self) -> Optional[pulumi.Input[bool]]:\n return pulumi.get(self, \"enable_network_isolation\")", "title": "" }, { "docid": "8af91981e85ef91764f8297a36417b3f", "score": "0.569568", "text": "def enable_network_isolation(self) -> Optional[pulumi.Input[bool]]:\n return pulumi.get(self, \"enable_network_isolation\")", "title": "" }, { "docid": "8838b04a0700093974bb33a247290efc", "score": "0.56901956", "text": "def enable_passivemode(self) -> Optional[pulumi.Input[bool]]:\n return pulumi.get(self, \"enable_passivemode\")", "title": "" }, { "docid": "8838b04a0700093974bb33a247290efc", "score": "0.56901956", "text": "def enable_passivemode(self) -> Optional[pulumi.Input[bool]]:\n return pulumi.get(self, \"enable_passivemode\")", "title": "" }, { "docid": "0757fddf3d7996484cb6012cd564683b", "score": "0.5673478", "text": "def get_secure_bool(cls, key):\n return False", "title": "" }, { "docid": "46facffb9ac04d83f8bf343500ce78e2", "score": "0.5667529", "text": "def tx_vga_enable(self):\n return bool(self._get_iio_dev_attr(\"tx_vga_enable\", self._ctrl))", "title": "" }, { "docid": "9800b65b9c5d7ce2263bd9584aac575b", "score": "0.56520045", "text": "def node_override_enabled(node):\n\n raise NotImplementedError()", "title": "" }, { "docid": "dc7ab58f24c7a0607e90db39a94cac72", "score": "0.56506175", "text": "def encryption_mode(self) -> pulumi.Output[Optional[str]]:\n return pulumi.get(self, \"encryption_mode\")", "title": "" }, { "docid": "4901a5bb46491d8bd1dc6f64703d98cb", "score": "0.56451625", "text": "def is_enabled(self):\n return False", "title": "" }, { "docid": "4901a5bb46491d8bd1dc6f64703d98cb", "score": "0.56451625", "text": "def is_enabled(self):\n return False", "title": "" }, { "docid": "9f9569b359400fd7d9e303f3ccf63317", "score": "0.56451625", "text": "def enabled(cls):\n if cls.ipmicap_ip != None:\n return True\n else:\n return False", "title": "" }, { "docid": "65ca8668509609b140b76e78e75aae8a", "score": "0.5640848", "text": "def on_enabled(self, node_address):\n self._enable_node(node_address)\n return True", "title": "" }, { "docid": "200a55007d787f95c5f91cfbad69de4f", "score": "0.5640331", "text": "def encryption_mode(self):\n return self._encryption_mode", "title": "" }, { "docid": "aa5416637c18eb087a9a22b7bb1587b2", "score": "0.56334734", "text": "def require_infrastructure_encryption(self) -> Optional['outputs.WorkspaceCustomBooleanParameterResponse']:\n return pulumi.get(self, \"require_infrastructure_encryption\")", "title": "" }, { "docid": "11e611c6a36ce10b8dbf7a792a13bd53", "score": "0.5628905", "text": "def enable(self) -> Optional[bool]:\n return pulumi.get(self, \"enable\")", "title": "" }, { "docid": "c44a7a128204ac3360ae4af46cf8150a", "score": "0.56287897", "text": "def enabled(cls):\n return True", "title": "" }, { "docid": "66d8af3bdbe51fa6ecbd031b9b540820", "score": "0.5622277", "text": "def vtpm_enabled(self) -> pulumi.Output[Optional[bool]]:\n return pulumi.get(self, \"vtpm_enabled\")", "title": "" }, { "docid": "0c0ed1ec99ecab1ca75ec0548d4e7f07", "score": "0.56167656", "text": "def prepare_encryption(self) -> Optional['outputs.WorkspaceCustomBooleanParameterResponse']:\n return pulumi.get(self, \"prepare_encryption\")", "title": "" }, { "docid": "c0de7d3fa6f654c81f75489794042090", "score": "0.55842865", "text": "def is_enabled(cls):\n return False", "title": "" }, { "docid": "d7e7d23e288adacacfb651fa7e78ca58", "score": "0.5580329", "text": "def isNodeModel(self) -> bool:\n if self is AttackMode.EDGE:\n return False\n return True", "title": "" }, { "docid": "e20d414784d0207334cf988bf2090250", "score": "0.55722606", "text": "def enable_xml(self) -> ConfigNodePropertyBoolean:\n return self._enable_xml", "title": "" }, { "docid": "95804346c908bcb111b2b9494a82dd74", "score": "0.55621606", "text": "def is_enabled(self) -> bool:\n return pulumi.get(self, \"is_enabled\")", "title": "" }, { "docid": "9158e67f75f16e47c7dcffca8935a3c6", "score": "0.556087", "text": "def is_enabled(self):\r\n return False", "title": "" }, { "docid": "c4521e51ca2b7b6a56010764e412e7e5", "score": "0.5555338", "text": "def is_enabled(self, key):\n return key in self and self[key].enabled", "title": "" }, { "docid": "7f6d65e403e8e69c204b04b9906b4a8d", "score": "0.5554758", "text": "def enabled(self) -> pulumi.Output[bool]:\n return pulumi.get(self, \"enabled\")", "title": "" }, { "docid": "7f6d65e403e8e69c204b04b9906b4a8d", "score": "0.5554758", "text": "def enabled(self) -> pulumi.Output[bool]:\n return pulumi.get(self, \"enabled\")", "title": "" }, { "docid": "948f8da864ad4a096e6f875232fcfab5", "score": "0.5548992", "text": "def is_enabled(self) -> bool:\n return self._redis_connection is not None", "title": "" }, { "docid": "df7d5456caaabcac24d0fc23919880b1", "score": "0.55483466", "text": "def eas_enable(self) -> Optional[pulumi.Input[bool]]:\n return pulumi.get(self, \"eas_enable\")", "title": "" }, { "docid": "df7d5456caaabcac24d0fc23919880b1", "score": "0.55483466", "text": "def eas_enable(self) -> Optional[pulumi.Input[bool]]:\n return pulumi.get(self, \"eas_enable\")", "title": "" }, { "docid": "5e05928edec7900ef6284bef71caf1be", "score": "0.55150837", "text": "def vtpm_enabled(self) -> Optional[pulumi.Input[bool]]:\n return pulumi.get(self, \"vtpm_enabled\")", "title": "" }, { "docid": "5e05928edec7900ef6284bef71caf1be", "score": "0.55150837", "text": "def vtpm_enabled(self) -> Optional[pulumi.Input[bool]]:\n return pulumi.get(self, \"vtpm_enabled\")", "title": "" }, { "docid": "9124042b38276986cd4f47da93a5ce35", "score": "0.54987264", "text": "def _get_secure_session_enable(self):\n return self.__secure_session_enable", "title": "" }, { "docid": "ee2c717cff280c4af3b483f38a129c46", "score": "0.54901534", "text": "def enabled(cls):\n return bool(\n setting(cls.SETTINGS_KEY_NAME) and setting(cls.SETTINGS_SECRET_NAME)\n )", "title": "" }, { "docid": "dd644fa31fc45fd27de5cb35cebd35e1", "score": "0.5473464", "text": "def enable_txt(self) -> ConfigNodePropertyBoolean:\n return self._enable_txt", "title": "" }, { "docid": "7232994f6b2a92922c96d48fb9867592", "score": "0.5442348", "text": "def enable_vtpm(self) -> Optional[pulumi.Input[bool]]:\n return pulumi.get(self, \"enable_vtpm\")", "title": "" }, { "docid": "7f8e6b1f71d920fc0350565ca7293c97", "score": "0.54363567", "text": "def enabled(self) -> bool:\n raise NotImplementedError()", "title": "" }, { "docid": "313657514565519f13f28298785bc3c2", "score": "0.5435427", "text": "def __synchronize_encryption(self, mode='train'):\n # Send if this host use encryption or not\n use_encryption_id = self.transfer_variable.generate_transferid(\n self.transfer_variable.use_encrypt, mode\n )\n LOGGER.debug(\"Start to remote use_encrypt: {}, transfer_id: {}\".format(self.use_encrypt, use_encryption_id))\n\n federation.remote(self.use_encrypt,\n name=self.transfer_variable.use_encrypt.name,\n tag=use_encryption_id,\n role=consts.ARBITER,\n idx=0)\n\n # Set public key\n if self.use_encrypt:\n pubkey_id = self.transfer_variable.generate_transferid(self.transfer_variable.paillier_pubkey, mode)\n pubkey = federation.get(name=self.transfer_variable.paillier_pubkey.name,\n tag=pubkey_id,\n idx=0)\n LOGGER.debug(\"Received pubkey\")\n self.encrypt_operator.set_public_key(pubkey)\n LOGGER.info(\"Finish synchronized ecryption\")\n self.has_sychronized_encryption = True", "title": "" } ]
b2e408f4c573f516b292aaff446c06b2
r""" Return a Steiner Quadruple System on `n` points.
[ { "docid": "d1bc147bc816209e50c649bbb8517d26", "score": "0.7664719", "text": "def steiner_quadruple_system(n, check = False):\n n = int(n)\n if not ((n%6) in [2, 4]):\n raise ValueError(\"n mod 6 must be equal to 2 or 4\")\n elif n == 4:\n sqs = IncidenceStructure(4, [[0,1,2,3]], copy = False, check = False)\n elif n == 14:\n sqs = IncidenceStructure(14, _SQS14(), copy = False, check = False)\n elif n == 38:\n sqs = IncidenceStructure(38, _SQS38(), copy = False, check = False)\n elif n%12 in [4,8]:\n nn = n // 2\n sqs = two_n(steiner_quadruple_system(nn, check = False))\n elif n%18 in [4,10]:\n nn = (n+2) // 3\n sqs = three_n_minus_two(steiner_quadruple_system(nn, check = False))\n elif (n%36) == 34:\n nn = (n+8) // 3\n sqs = three_n_minus_eight(steiner_quadruple_system(nn, check = False))\n elif (n%36) == 26 :\n nn = (n+4) // 3\n sqs = three_n_minus_four(steiner_quadruple_system(nn, check = False))\n elif n%24 in [2,10]:\n nn = (n+6) // 4\n sqs = four_n_minus_six(steiner_quadruple_system(nn, check = False))\n elif n%72 in [14,38]:\n nn = (n+10) // 12\n sqs = twelve_n_minus_ten(steiner_quadruple_system(nn, check = False))\n else:\n raise ValueError(\"This shouldn't happen !\")\n\n if check and not sqs.is_t_design(3,n,4,1):\n raise RuntimeError(\"Something is very very wrong.\")\n\n return sqs", "title": "" } ]
[ { "docid": "cf3f839797ef63cebb2f18a384cec215", "score": "0.60831946", "text": "def chebpts(n):\r\n\r\n # Special case (no points)\r\n if n == 0:\r\n x = []\r\n w = [] \r\n v = []\r\n t = []\r\n # Special case (single point)\r\n elif n == 1:\r\n x = 0 \r\n w = 2 \r\n v = 1 \r\n t = np.pi/2\r\n \r\n else:\r\n # Chebyshev points:\r\n m = n - 1\r\n # (Use of sine enforces symmetry.)\r\n x = np.sin(np.pi*(np.r_[-m:m+1:2]/(2*m)))\r\n \r\n # [TODO] how to do a nargout in python?\r\n # Quadrature weights: \r\n w = quadwts.quadwts(n)\r\n \r\n # Barycentric weights:\r\n v = barywts.barywts(n)\r\n \r\n # Angles:\r\n t = angles.angles(n);\r\n\r\n return x, w, v, t", "title": "" }, { "docid": "a1e81c4047e3b439e5df8ea217f0d874", "score": "0.6047414", "text": "def climbStairs2(n):\n p = 1 \n q = 1\n for i in xrange(2, n + 1):\n tmp = q\n q = p + tmp\n p = tmp\n return q", "title": "" }, { "docid": "7e1fe757d7344fa36a4106e03939d381", "score": "0.6005188", "text": "def qft(n: int) -> qiskit.circuit.Gate:\n def rotations(my_circuit: qiskit.circuit.Gate, m: int):\n if m == 0:\n return my_circuit\n else:\n my_circuit.h(m-1) #Add a Haddamard gate to the most significant qubit\n \n for i in range(m-1):\n my_circuit.crz(pi/(2**(m-1-i)), i, m-1)\n\n rotations(my_circuit, m-1) \n \n my_circuit = qiskit.QuantumCircuit(n, name='QFT')\n \n rotations(my_circuit, n)\n\n for m in range(n//2):\n my_circuit.swap(m, n-m-1)\n\n return my_circuit.to_gate()", "title": "" }, { "docid": "bbaac5247bdaabdd669fc051f1302807", "score": "0.5952135", "text": "def speye(n):\n r = range(n)\n return spmatrix(1.0, r, r)", "title": "" }, { "docid": "581d6fb3db6977e2460bc5b9a347b985", "score": "0.5816251", "text": "def trianglequadrature(n):\n\n x00,w00 = scipy.special.orthogonal.p_roots(n)\n x01,w01 = scipy.special.orthogonal.j_roots(n,1,0)\n x00s = (x00+1)/2\n x01s = (x01+1)/2\n w = numpy.outer(w01, w00).reshape(-1,1) / 8 # a factor of 2 for the legendres and 4 for the jacobi10\n x = numpy.outer(x01s, numpy.ones(x00s.shape)).reshape(-1,1)\n y = numpy.outer(1-x01s, x00s).reshape(-1,1)\n return numpy.hstack((x, y)), w", "title": "" }, { "docid": "5b57c334762796d651f900b09c50d022", "score": "0.5796928", "text": "def bezier_curve_range(n, points):\n for i in range(n):\n t = i / float(n - 1)\n yield bezier(t, points)", "title": "" }, { "docid": "4f70f34deecb57382c3eed0fad8ccb2a", "score": "0.57019246", "text": "def Stair(n):\n r = []\n u = [0]\n\n for i in xrange(1,n-1,2):\n r.append(i)\n r.append(i-1)\n u.append(i+1)\n u.append(i)\n\n if n%2:\n r.append(n-1)\n else:\n r.append(n-1)\n r.append(n-2)\n u.append(n-1)\n\n positions = [((i+1)//2,i//2) for i in xrange(n)]\n o = Origami(r,u,as_tuple=True,positions=positions,name=\"Stair origami with %d squares\" %n)\n return o", "title": "" }, { "docid": "98fd71083614979ceec006e94a3a8aa7", "score": "0.5669432", "text": "def climbStairs(self, n):\n \n steps = [1,2]\n for i in range(n-2):\n steps.append(steps[-2]+steps[-1])\n \n return steps[n-1]", "title": "" }, { "docid": "bb84d8f90a64502535d53641faa5e8f1", "score": "0.56559414", "text": "def generate_fibonacci_spiral_hemisphere(n):\n phi = (1 + 5 ** 0.5) / 2\n coordinates = []\n # We generate one more coordinate than we actually need, because the first point of the fibonacci spiral lies in\n # the (y, z)-plane and is therefore useless to center fourier support of a wavelet around.\n N = n + 1\n\n for i in range(N):\n lat = math.asin(2*i/(2*N + 1))\n lon = 2*math.pi*i*(1/phi)\n\n # swap x and z since we want a hemisphere with (1, 0, 0) as north pole.\n z, y, x = latlon_to_xyz(lat, lon)\n coordinates.append( (x, y, z) )\n\n return coordinates[1:]", "title": "" }, { "docid": "a668db7abcab2b4dd24fdb2377c4baa3", "score": "0.5645339", "text": "def qft_gate_sequence(n=1, swapping=True):\n\n if n < 1:\n raise ValueError(\"Minimum value of n can be 1\")\n\n qc = QubitCircuit(n)\n if n == 1:\n qc.add_gate(\"SNOT\", targets=[0])\n else:\n for i in range(n):\n qc.add_gate(\"SNOT\", targets=[i])\n for j in range(2, n - i + 1):\n qc.add_gate(r\"CPHASE\", targets=[i], controls=[i + j - 1],\n arg_label=r\"{\\pi/2^{%d}}\" % (j - 1),\n arg_value=numpy.pi / (2 ** (j - 1)))\n if swapping is True:\n for i in range(n // 2):\n qc.add_gate(r\"SWAP\", targets=[i, n - 1 - i])\n\n return qc", "title": "" }, { "docid": "e1ff6c3c8a4083b6b090d45c48ecf929", "score": "0.5613465", "text": "def spirals(n_points, noise=.5):\n\n n = np.sqrt(np.random.rand(n_points, 1)) * 780 * (2 * np.pi) / 360\n d1x = -np.cos(n) * n + np.random.rand(n_points, 1) * noise\n d1y = np.sin(n) * n + np.random.rand(n_points, 1) * noise\n return (np.vstack((np.hstack((d1x, d1y)), np.hstack((-d1x, -d1y)))),\n np.hstack((np.zeros(n_points), np.ones(n_points))))", "title": "" }, { "docid": "1baf6e88d4e92ebccd44ea8ca1463039", "score": "0.556967", "text": "def spread_quaternions(points,num=100,quats_per_step=100):\n #cria uma matrix de zeros num por 4\n quats = np.zeros((num,4))\n #print quats\n # cria 'num' matrizes, cada uma com points.shape[0] linhas e 3 colunas\n rot_points = np.zeros((num,points.shape[0],3))\n #print rot_points\n #a primeira posição de quats fica =\n quats[0,:] = [0,0,0,1]\n #print quats\n # primeiro conjunto de matrizes fica igual aos points\n rot_points[0,:,:] = points\n #print rot_points\n for ix in range(1,num):\n # criar quats_per_step quaterniões aleatorios \n rand_quats = random_quaternions(quats_per_step) \n # fazer a rotação dos pontos usando os quaterniões aleatorios\n positioned = rotate_points(points,rand_quats)\n # ver as distancias (retorna já os mais proximos!)\n dists = point_dists(rot_points[:ix],positioned)\n # ir buscar o maximo das distancias mais proximas\n new_rot = np.argmax(dists)\n \n quats[ix,:]=rand_quats[new_rot,:]\n rot_points[ix,:,:] = positioned[new_rot,:,:]\n print \"number of quaternions: \",len(quats) \n return quats,rot_points", "title": "" }, { "docid": "0295ab84e67b7afcea16bacaf65f712e", "score": "0.55645245", "text": "def make_corner_seq(n):\n seq = [1]\n step_size = 2\n while len(seq) < n:\n for i in range(4):\n seq.append(seq[-1] + step_size)\n step_size += 2\n return seq", "title": "" }, { "docid": "92851d156b088fe7335f99405bfd890b", "score": "0.5559808", "text": "def Towers(n,fr,to,spare):\n if n ==1:\n printMove(fr,to)\n else:\n Towers(n-1,fr,spare,to)\n Towers(1,fr,to,spare)\n Towers(n-1,spare,to,fr)", "title": "" }, { "docid": "9da986e885ae9cea60daca210fb2b528", "score": "0.5547893", "text": "def randpoints(n, hw=1000., hh=1000.):\n P = [(i, uniform(-hw,-hw/4), uniform(-hh,hh)) for i in range(n//2)]\n Q = [(i, uniform(hw/4, hw), uniform(-hh,hh)) for i in range(n//2,n//1)]\n \n return P,Q", "title": "" }, { "docid": "1451e624529b62f6e70ee9c2303e3859", "score": "0.54642177", "text": "def generate_tri_seq(N, t):\n for i in range(1, N + 1):\n t.add(i * (i + 1) / 2)", "title": "" }, { "docid": "d0c470728e0f4965d142190c20f437da", "score": "0.54572886", "text": "def get_spline_curve(points, n=11):\n order = len(points) - 1\n coef = Utils.pascal_triangle_row(order)\n offset = 1.0 / (n-1)\n curve_points = []\n curve_points.append(points[0]) # add start point as first curve point\n # add n-2 curve points in middle\n for factor in range(1, n-1):\n t = offset * factor\n x, y = 0.0, 0.0\n for i in range(order+1):\n x += coef[i] * (1-t)**(order-i) * t**(i) * points[i][0]\n y += coef[i] * (1-t)**(order-i) * t**(i) * points[i][1]\n curve_points.append((x, y))\n curve_points.append(points[-1]) # add end point as last curve point\n return curve_points", "title": "" }, { "docid": "27395edd4876511e4af24e371086e207", "score": "0.54327834", "text": "def _triangular(n):\n return sum(range(n + 1))", "title": "" }, { "docid": "a1ce280c30a9b4f6c2f41c813b9c2bac", "score": "0.5419874", "text": "def generate_vertices(self):\n self.vertices = []\n self.refinement = np.zeros(self.tri.shape[0],dtype=int)\n for n,tri in enumerate(self.tri):\n limits = []\n k1, k2, k3 = tri\n if np.abs(k1-k2-k3) <= self.dk or np.any(tri == self.dk): # configurations with non-cubical integration volume\n nstep = self.nstep[1]\n else: # cubic integration volume\n nstep = self.nstep[0]\n if k3*1./(k1+k2) < self.squeezed_cut: # highly squeezed configurations\n nstep = self.nstep[2]\n if (k2+k3)*1./k1 <= self.collinear_cut: # collinear (or close to coll.) configurations\n nstep = self.nstep[2]\n self.refinement[n] = nstep\n for x1 in range(nstep):\n for x2 in range(nstep):\n q1 = k1 - self.dk*1./2 + self.dk*1./(nstep-1)*x1\n q2 = k2 - self.dk*1./2 + self.dk*1./(nstep-1)*x2\n q3min = max(k3-self.dk*1./2,abs(q1-q2))\n q3max = min(k3+self.dk*1./2,q1+q2)\n if abs(q3min-q3max) < 1e-4:\n limits.append([q1,q2,q3min])\n elif q3min < q3max:\n limits.append([q1,q2,q3min])\n limits.append([q1,q2,q3max])\n if np.abs(q3max-q3min-self.dk) < 1e-4:\n for i in range(nstep-2):\n limits.append([q1,q2,q3min+(q3max-q3min)*1./(nstep-1)*(i+1)])\n elif np.abs(q3max-q3min-self.dk*1./2) < 1e-4:\n for i in range(nstep-4):\n limits.append([q1,q2,q3min+(q3max-q3min)*1./(nstep-3)*(i+1)])\n if nstep == 2 and self.midpoint:\n limits.append([k1,k2,k3])\n self.vertices.append(np.unique(np.array(limits),axis=0))", "title": "" }, { "docid": "9e8590812ec69b284ce1f880a3e11704", "score": "0.54086983", "text": "def squad(self):\n raise NotImplementedError()", "title": "" }, { "docid": "85f33a9c2f7587a21a1e9f1e447bb1f6", "score": "0.5399124", "text": "def spiral(n):\r\n \r\n total = 1\r\n for n in range(3, spiral_range + 1, 2):\r\n # Every turn in the spiral adds four terms.\r\n ur = n * n # up-right term\r\n ul = n * n - n + 1 # up-left term\r\n dl = n * n - 2 * n + 2 # down-left term\r\n dr = n * n - 3 * n + 3 # down-right term\r\n subtotal = ur + ul + dl + dr\r\n total += subtotal\r\n return total", "title": "" }, { "docid": "1dfbd8a3ff8c4a0a3bd98f5d09863011", "score": "0.5392255", "text": "def quarters(n):\n if n<25:\n return dimes(n)\n else:\n return quarters(n-25)+dimes(n)", "title": "" }, { "docid": "0e0a61ebc75e11715c7eaf71a3efe519", "score": "0.53626245", "text": "def triangular(n):\n triang = n * (n + 1) / 2\n return triang", "title": "" }, { "docid": "1fb3d6e69021477f7cc222bbc4d1f7c8", "score": "0.5330278", "text": "def stepPatternForStar(n, step): # draw platform independent\n\ti = 0\n\tindexList = []\n\t# Skip around the points of the n-gon to create the draw-path for the star.\n\twhile True:\n\t\tbreakAfterThis = False\n\t\tnextIndex = (i*step)%n\n\t\tif nextIndex in indexList:\n\t\t\tbreakAfterThis = True\n\t\tindexList.append(nextIndex)\n\t\tif breakAfterThis:\n\t\t\tbreak\n\t\ti = i + 1\n\treturn indexList", "title": "" }, { "docid": "94a2074d913e53f0afa1167ea748bcf2", "score": "0.53275067", "text": "def make_quad(points, row_size, col_size):\n # Start with generating a zig-zag shape in row direction and then take its reverse\n new_points = make_zigzag(points, row_size)\n new_points.reverse()\n\n # Start generating a zig-zag shape in col direction\n forward = True\n for row in range(0, row_size):\n temp = []\n for col in range(0, col_size):\n temp.append(points[row + (col * row_size)])\n if forward:\n forward = False\n else:\n forward = True\n temp.reverse()\n new_points += temp\n\n return new_points", "title": "" }, { "docid": "271fce1869ad21ffd32616edd4bf1529", "score": "0.5291062", "text": "def _generate_segment(self, n: 'int', basis: 'ndarray',\n geometry: 'ndarray'):\n def out(x, y):\n \"\"\"Test if given point is outside the window.\"\"\"\n return x > 1 or x < -1 or y > 1 or y < - 1\n\n delta = 1 / n\n fwd_x, fwd_y = self._init_curve_algorithm(basis, geometry, delta)\n dim = basis.shape[0]\n\n prev_out = True\n prev_point = None\n for _ in range(n+1):\n if out(fwd_x[0], fwd_y[0]):\n if not prev_out: # Leaving the window\n self._cr.line_to(*self.resolution_transform(clip_line(\n [prev_point, (fwd_x[0], fwd_y[0])])[0]))\n self._cr.stroke()\n prev_out = True\n else:\n if prev_out: # Entering the window\n if prev_point is None:\n self._cr.move_to(\n *self.resolution_transform((fwd_x[0], fwd_y[0])))\n else:\n point_at_window = clip_line(\n [(fwd_x[0], fwd_y[0]), prev_point])[0]\n self._cr.move_to(\n *self.resolution_transform(point_at_window))\n self._cr.line_to(\n *self.resolution_transform((fwd_x[0], fwd_y[0])))\n prev_out = False\n else: # Already inside window\n self._cr.line_to(\n *self.resolution_transform((fwd_x[0], fwd_y[0])))\n\n prev_point = (fwd_x[0], fwd_y[0])\n\n for i in range(dim-1):\n fwd_x[i] += fwd_x[i+1]\n fwd_y[i] += fwd_y[i+1]\n self._cr.stroke()", "title": "" }, { "docid": "408ae05ac1207400397e5a51dd26fcd9", "score": "0.52766865", "text": "def _get_quadrature_points(n, a, b):\n x, w = roots_legendre(n)\n x = np.real(x)\n\n # Legendre domain is [-1, 1], convert to [a, b]\n scalar = (b - a) * 0.5\n return scalar * (x + 1) + a, scalar * w", "title": "" }, { "docid": "e1fc684c5ccc1c8f12787a0f053ffc13", "score": "0.5271362", "text": "def Fan(self, n, deg_three_verts=False):\n f = graphs.WheelGraph(n)\n if n>2:\n f.delete_edge(1,n-1)\n if deg_three_verts:\n f.allow_multiple_edges(True)\n f.add_edges([(0,1),(0,n-1)])\n return Sandpile(f,0)\n elif n==1:\n return Sandpile(f,0)\n elif n==2:\n if deg_three_verts:\n return Sandpile({0:{1:3}, 1:{0:3}})\n else:\n return Sandpile(f,0)", "title": "" }, { "docid": "c629243a09bf6213500f9425dff9ec1b", "score": "0.5268188", "text": "def test_qft_dagger(self):\n n = 5\n qsve = QSVE(np.identity(4))\n qreg = QuantumRegister(n)\n circ = QuantumCircuit(qreg)\n qsve._iqft(circ, qreg)\n\n print(circ)", "title": "" }, { "docid": "bce381020721bdaf1324a6be4e842c2d", "score": "0.52361876", "text": "def triangs(self):\n return self.polys(3)", "title": "" }, { "docid": "6243f618cc6855274d65b3b151d1d984", "score": "0.52284604", "text": "def ten_pairs(n):\n \"*** YOUR CODE HERE ***\"\n return ten_pairs_helper(n, [0,0,0,0,0,0,0,0,0,0])", "title": "" }, { "docid": "cf0ab0e6356867b0ac0c52d8570a1c11", "score": "0.52240753", "text": "def triangle(n):\n T = {'sink':{}}\n for i in range(n):\n for j in range(n-i):\n T[(i,j)] = {}\n if i<n-j-1:\n T[(i,j)][(i+1,j)] = 1\n T[(i,j)][(i,j+1)] = 1\n if i>0:\n T[(i,j)][(i-1,j+1)] = 1\n T[(i,j)][(i-1,j)] = 1\n if j>0:\n T[(i,j)][(i,j-1)] = 1\n T[(i,j)][(i+1,j-1)] = 1\n d = len(T[(i,j)])\n if d<6:\n T[(i,j)]['sink'] = 6-d\n T = sandpile(T,'sink')\n pos = {}\n for x in T.nonsink_vertices():\n coords = list(x)\n coords[0]+=1/2*coords[1]\n pos[x] = coords\n pos['sink'] = (-1,-1)\n T.set_pos(pos)\n return T", "title": "" }, { "docid": "61d436f0aa397e103d7d1b4d6f6fcd66", "score": "0.5217978", "text": "def squareSpiral(n_squares=60, sep_degrees=5, init_length=5, increment_length=5):\n for s in range(n_squares):\n square(init_length + increment_length*s)\n rt(sep_degrees)", "title": "" }, { "docid": "ec088477b33913dca17a9e20042eb6f6", "score": "0.5209428", "text": "def chain(\n n: int = 4,\n r: float = 1.0,\n) -> tuple[list[str], np.ndarray, str]:\n\n xyz = np.zeros((n, 3))\n atoms = [\"H\"] * n\n\n z = np.arange(-(n - 1) / 2, (n) / 2) * r\n assert len(z) == n\n assert sum(z) == 0.0\n\n xyz[:, 2] = z\n\n description = \"H\" + str(n) + \" 1D chain, \" + str(r) + \" Angstroms\\n\"\n\n return atoms, xyz, description", "title": "" }, { "docid": "a54530c9f4b0259be9edac076d0a4d4c", "score": "0.5197737", "text": "def generate_pascal_triangle_summation(self, n):\n # Write your code here\n row=[]\n pt=[]\n if n>12:\n return pt\n for i in range(n):\n nr = []\n for i in range(len(row)):\n v = row[i]\n if i>0:\n v += row[i-1]\n nr.append(1)\n pt.append(\" \".join([str(x) for x in nr]))\n row = nr\n \n return pt", "title": "" }, { "docid": "92876a73ad5321395d42cd35f7103f1e", "score": "0.51925355", "text": "def get_trapezoid_quadrature_nodes_and_weights(n, a, b):\n if n < 1:\n raise ValueError(\"n must be at least one\")\n\n nodes = np.linspace(a, b, n)\n dx = nodes[1] - nodes[0]\n\n weights = dx * np.ones(n)\n weights[0] *= 0.5\n weights[-1] *= 0.5\n\n return nodes, weights", "title": "" }, { "docid": "01ebe9cf90d673f2c09d8c59cf1370f6", "score": "0.5177878", "text": "def farey_sequence(n) -> Generator[Tuple[int, int], None, None]:\n\ta, b, c, d = 0, 1, 1, n\n\tyield (a, b)\n\twhile c <= n:\n\t\tk = (n + b) // d\n\t\ta, b, c, d = c, d, (k * c - a), (k * d - b)\n\t\tyield (a, b)", "title": "" }, { "docid": "c1a538bb4c69d1fdb1f6228d3615d5f0", "score": "0.51766396", "text": "def get_simpsons_quadrature_nodes_and_weights(n, a, b):\n nodes = np.linspace(a, b, n)\n dx = nodes[1] - nodes[0]\n weights = np.tile([2.0, 4.0], int((n + 1) / 2))\n weights = weights[:n]\n weights[0] = weights[-1] = 1\n weights = (dx / 3.0) * weights\n\n return nodes, weights", "title": "" }, { "docid": "e332b4fde36cdbb9d0c5ebff8f563a9e", "score": "0.51730764", "text": "def newFSPRootGen(pins, n):\n\t#Does so by generating all canonical arrangments with newFullArrGen, and adding the ascending values using newPinPopulate.\n\treturn chain.from_iterable(newPinPopulate(a, pins, n) for a in newFullArrGen(pins))", "title": "" }, { "docid": "1ecbf78672686ecd31b3fce64280a6b9", "score": "0.51591086", "text": "def complete_sandpile(n):\n return Sandpile(graphs.CompleteGraph(n), 0)", "title": "" }, { "docid": "e5761ffa9734706078094d9bb840f183", "score": "0.51550394", "text": "def spine_tupled_SG(n):\n base = list(range(n))\n SG = [base]\n def helper(L):\n if len(L) == 1:\n return\n if len(L) == 2:\n SG.append([L[0]])\n SG.append([L[1]])\n return\n l = random.sample(L, k = 1)\n r = list(set(L) - set(l))\n l = sorted(l)\n r = sorted(r)\n SG.append(l)\n SG.append(r)\n helper(l)\n helper(r)\n\n helper(base)\n SG = (tuple(x) for x in SG)\n return tuple(SG)", "title": "" }, { "docid": "b7f82df5afb3ea6f74bb139c04e88f2e", "score": "0.5148555", "text": "def path_tupled_SG(n):\n base = list(range(n))\n SG = [base]\n def helper(L):\n if len(L) == 1:\n return\n if len(L) == 2:\n SG.append([L[0]])\n SG.append([L[1]])\n return\n l = [i for i in L if L.index(i) <= len(L)//2]\n r = list(set(L) - set(l))\n l = sorted(l)\n r = sorted(r)\n SG.append(l)\n SG.append(r)\n helper(l)\n helper(r)\n\n helper(base)\n SG = (tuple(x) for x in SG)\n return tuple(SG)", "title": "" }, { "docid": "8c81b46b3ff341efcca918340de21de5", "score": "0.51460874", "text": "def gen_vertices(n):\n\n k = np.arange(n) + 1\n theta = deg2rad((2*k - 1) * (180.0/n))\n x = np.cos(theta)\n y = np.sin(theta)\n dx = np.roll(x, -1) - x\n dy = np.roll(y, -1) - y\n dtheta = np.roll(theta, -1) - theta\n #return zip(theta, dtheta)\n return zip(theta, dx, dy)", "title": "" }, { "docid": "9c422136d461cce6c612932adf167f25", "score": "0.5142337", "text": "def GenerateRandomQuadrantPointsSurvey(self, *args):\n return _stomp.Map_GenerateRandomQuadrantPointsSurvey(self, *args)", "title": "" }, { "docid": "3ff9680e1d5fa17cf8762debc6ea00bd", "score": "0.5140021", "text": "def spiral_simple(x_range_egu, y_range_egu, dr_egu, nth):\n half_x = x_range_egu / 2\n half_y = y_range_egu / 2\n\n r_max_egu = np.sqrt(half_x ** 2 + half_y ** 2)\n num_ring = 1 + int(r_max_egu / dr_egu)\n\n x_points = []\n y_points = []\n for i_ring in range(1, num_ring + 2):\n radius_egu = i_ring * dr_egu\n angle_step = 2. * np.pi / (i_ring * nth)\n\n for i_angle in range(int(i_ring * nth)):\n angle = i_angle * angle_step\n x_egu = radius_egu * np.cos(angle)\n y_egu = radius_egu * np.sin(angle)\n if abs(x_egu) <= half_x and abs(y_egu) <= half_y:\n x_points.append(x_egu)\n y_points.append(y_egu)\n\n return x_points, y_points", "title": "" }, { "docid": "cb4bc5f233deba4de14f86370ea4263d", "score": "0.51391864", "text": "def __pow__(self, n):\n r = Quaternion()\n for i in range(n):\n r = r * self\n return r", "title": "" }, { "docid": "1924f2ca76e9089b586dab8e9127686f", "score": "0.5116036", "text": "def getcorners(n):\n corners = []\n corners.append(n * n - 3 * n + 3)\n corners.append(n * n -2 * n + 2)\n corners.append(n * n - n + 1)\n corners.append(n * n)\n return corners", "title": "" }, { "docid": "bb3ef0bdd0eab677bdd3df63d96b0899", "score": "0.51098365", "text": "def latin_hypercube(n):\n\n low_lim = np.arange(0, n)/n\n up_lim = np.arange(1, n+1)/n\n\n #random points on grid\n points = np.random.uniform(low=low_lim, high = up_lim, size=[2,n]).T\n\n np.random.shuffle(points[:,1])\n\n print(\"LHS:\")\n print(points)\n\n return points", "title": "" }, { "docid": "a4b9ca0d2c49edd42ebbd4af6e36a41a", "score": "0.51075774", "text": "def gwfs(S, N=6):\n return GridUtil.smooth(S, \"GWFS\", int(N))", "title": "" }, { "docid": "f9a2884e25e2b9b8a82aa503955865ad", "score": "0.51066434", "text": "def compute_srideal(R, triang):\n k = R.ngens()\n n = len(triang[0])\n srideal = []\n for i in range(n):\n for c in map(set, combinations(range(k), i + 1)):\n # SR ideal generators must be minimal\n for prev_c in srideal:\n if prev_c.issubset(c):\n break\n else:\n for t in map(set, triang):\n # 1-cones in a simplex correspond to intersecting divisors\n if c.issubset(t):\n break\n else:\n srideal.append(c)\n X = R.gens()\n return [R.ideal(*[X[i] for i in s]) for s in srideal]", "title": "" }, { "docid": "bce90c54af1dc1f2cca05a535dffbee7", "score": "0.51010764", "text": "def generateMatrix(self, n):\n '''\n 临场能写出\n def generateMatrix(self, n):\n A = [[n*n]]\n while A[0][0] > 1:\n A = [range(A[0][0] - len(A), A[0][0])] + zip(*A[::-1])\n return A * (n>0)\n 就相当了不起了\n '''\n matrix = []\n s = n * n + 1 # make it starting from 1 rather than 0\n while s > 1:\n s, e = s - len(matrix), s\n matrix = [range(s, e)] + zip(*matrix[::-1])\n return matrix # spiral counter clockwise return zip(*matrix)", "title": "" }, { "docid": "143ee46b1b1eaa4aaec2524edc278990", "score": "0.5097594", "text": "def climbStairs(self, n):\n climb = {}\n climb[1] = 1\n climb[2] = 2\n for i in xrange(3, n + 1):\n climb[i] = climb[i - 1] + climb[i - 2]\n return climb[n]", "title": "" }, { "docid": "25defad497f6dddbb8e427e6dd2a0fde", "score": "0.5091647", "text": "def Wheel(self, n):\n return Sandpile(graphs.WheelGraph(n),0)", "title": "" }, { "docid": "97e6c2168176986341eaf055f142729e", "score": "0.50915474", "text": "def n_step(self,n=1 , plot=False):\n for i in range(n):\n if len(self.x) < self.N_max:\n self.next_point()\n self.model = GPR(kernel=self.kernel, normalize_y=True).fit(np.array(self.x).reshape(-1, 1), np.array(self.y).reshape(-1, 1))\n else:\n print(\"Optimisation terminee\")\n if plot:\n self.plot()", "title": "" }, { "docid": "a0f6bde02e8aed08bc528258321e1a52", "score": "0.50800806", "text": "def sim(self, n):\n return Results(self.draw() for _ in range(n))", "title": "" }, { "docid": "eda1fe42b0b2abaf47752f123840d80e", "score": "0.5056638", "text": "def triangle():\n \n triangle_points = np.linspace(0, 1, 500)\n return signal.sawtooth(10 * np.pi * triangle_points, 0.5)", "title": "" }, { "docid": "46bd68766f6d0df76ba5505d23331eaa", "score": "0.50512314", "text": "def randQuad(n):\n A = np.random.rand(n,n)\n b = np.random.rand(n,1)\n Q = np.matmul(A.transpose(), A)\n return Q,b", "title": "" }, { "docid": "9a70183fbcc523694538fd6831c4a074", "score": "0.50500363", "text": "def generate_random(n):\r\n points = random.Random()\r\n points_list = []\r\n for i in range(n):\r\n points_list.append((round(points.uniform(0, 1), 2), round(points.uniform(0, 1), 2)))\r\n return (points_list)", "title": "" }, { "docid": "9a5f21e6f036819b5e231c9ed6b7f496", "score": "0.5047165", "text": "def outer(self, P, n):\n if n == 0:\n return Point(0, 1)\n Q = self.outer(P, n // 2)\n Q = self.inner(Q, Q)\n if n & 1:\n Q = self.inner(Q, P)\n return Q", "title": "" }, { "docid": "1e473bd0756b4788239e9c5cc9fc1e4e", "score": "0.50384766", "text": "def get_quadratic_data_sample(n, add_noise=False, x_min=-1, x_max=1, noise=0.01, pow=2):\n\n ### Sanity Checks ###\n if type(n) is not int or n <= 0: raise Exception(\"Sample size must be a positive integer\")\n if type(pow) is not int or pow % 2 != 0 or pow <= 0: raise Exception(\"Power must be a positive even integer \")\n if x_min >= x_max: raise Exception(\"Min value ({}) is higher or equal than max value ({})\".format(x_min, x_max))\n\n x = uniform(x_min, x_max, n) # Points along x\n mid = (x_max-x_min)/2 + x_min # Middle point\n y = power((x-mid), pow) # Raise x\n\n if add_noise: y += (2 * random(n) - 1) * noise # Add noise to y\n\n # combine x and y as points in 2D plane\n coordinates = list(zip(x,y))\n # Return coordinates as numpy array\n return array(coordinates, dtype=float32)", "title": "" }, { "docid": "b9863ad3ae6d67a52d606522b04ddb68", "score": "0.5035669", "text": "def twelve_n_minus_ten(B):\n n = B.num_points()\n B14 = steiner_quadruple_system(14)\n r = lambda i,x : i%(n-1)+(x%12)*(n-1)\n\n # Line 1.\n Y = []\n for s in B14._blocks:\n for i in range(n-1):\n Y.append([r(i,x) if x<= 11 else r(n-2,11)+x-11 for x in s])\n\n for s in B._blocks:\n if s[-1] == n-1:\n u,v,w,B = s\n dd = {0:u,1:v,2:w}\n d = lambda x:dd[x%3]\n for b in range(12):\n for bb in range(12):\n bbb = -(b+bb)%12\n for h in range(2):\n # Line 2\n Y.append([r(n-2,11)+1+h,r(u,b),r(v,bb),r(w,bbb+3*h)])\n\n for i in range(3):\n # Line 38.3\n Y.append([r(d(i),b+4+i), r(d(i),b+7+i), r(d(i+1),bb), r(d(i+2),bbb)])\n\n for j in range(12):\n for eps in range(2):\n for i in range(3):\n # Line 38.4-38.7\n Y.append([ r(d(i),j), r(d(i+1),j+6*eps ), r(d(i+2),6*eps-2*j+1), r(d(i+2),6*eps-2*j-1)])\n Y.append([ r(d(i),j), r(d(i+1),j+6*eps ), r(d(i+2),6*eps-2*j+2), r(d(i+2),6*eps-2*j-2)])\n Y.append([ r(d(i),j), r(d(i+1),j+6*eps-3), r(d(i+2),6*eps-2*j+1), r(d(i+2),6*eps-2*j+2)])\n Y.append([ r(d(i),j), r(d(i+1),j+6*eps+3), r(d(i+2),6*eps-2*j-1), r(d(i+2),6*eps-2*j-2)])\n\n for j in range(6):\n for i in range(3):\n for eps in range(2):\n # Line 38.8\n Y.append([ r(d(i),j), r(d(i),j+6), r(d(i+1),j+3*eps), r(d(i+1),j+6+3*eps)])\n\n for j in range(12):\n for i in range(3):\n for eps in range(4):\n # Line 38.11\n Y.append([ r(d(i),j), r(d(i),j+1), r(d(i+1),j+3*eps), r(d(i+1),j+3*eps+1)])\n # Line 38.12\n Y.append([ r(d(i),j), r(d(i),j+2), r(d(i+1),j+3*eps), r(d(i+1),j+3*eps+2)])\n # Line 38.13\n Y.append([ r(d(i),j), r(d(i),j+4), r(d(i+1),j+3*eps), r(d(i+1),j+3*eps+4)])\n\n for alpha in [4,5]:\n for ra,sa in P(alpha,6):\n for raa,saa in P(alpha,6):\n for i in range(3):\n for ii in range(i+1,3):\n # Line 38.14\n Y.append([ r(d(i),ra), r(d(i),sa), r(d(ii),raa), r(d(ii),saa)])\n\n for g in range(6):\n for eps in range(2):\n for i in range(3):\n for ii in range(3):\n if i == ii:\n continue\n # Line 38.9\n Y.append([ r(d(i),2*g+3*eps), r(d(i),2*g+6+3*eps), r(d(ii),2*g+1), r(d(ii),2*g+5)])\n # Line 38.10\n Y.append([ r(d(i),2*g+3*eps), r(d(i),2*g+6+3*eps), r(d(ii),2*g+2), r(d(ii),2*g+4)])\n\n else:\n x,y,z,t = s\n for a in range(12):\n for aa in range(12):\n for aaa in range(12):\n aaaa = -(a+aa+aaa)%12\n # Line 3\n Y.append([r(x,a), r(y,aa), r(z,aaa), r(t,aaaa)])\n return IncidenceStructure(12*n-10,Y,check=False,copy=False)", "title": "" }, { "docid": "45030b70ce82664a8b29a7cd4d5e8e60", "score": "0.5024016", "text": "def calculate_squirrels(n, k):\n return k % n", "title": "" }, { "docid": "aab0bdca5c38e6ade2118152e70a3863", "score": "0.5020411", "text": "def turan_graph(n, r):\n\n if not 1 <= r <= n:\n raise NetworkXError(\"Must satisfy 1 <= r <= n\")\n\n partitions = [n // r] * (r - (n % r)) + [n // r + 1] * (n % r)\n G = complete_multipartite_graph(*partitions)\n return G", "title": "" }, { "docid": "fd19bd7c6ed80a258dafc6d099b3a545", "score": "0.50122505", "text": "def sqaure(t, length):\r\n\tpolygon(t, length, 4)", "title": "" }, { "docid": "ed734147745756fdf920f35bee7c4210", "score": "0.5011693", "text": "def spheres(n):\n indices = np.arange(0, n, dtype=float) + 0.5\n golden_angle = np.pi * (1 + 5**0.5)\n phi = np.arccos(1 - 2*indices/n)\n theta = golden_angle * indices\n\n points = np.zeros((n, 3))\n points[:, 0] = np.cos(theta) * np.sin(phi)\n points[:, 1] = np.sin(theta) * np.sin(phi)\n points[:, 2] = np.cos(phi)\n\n return points", "title": "" }, { "docid": "d34dc0411617dd44ecab62b78dc4c80d", "score": "0.500812", "text": "def S(n):\n return n * (n + 1) // 2 # Gauss's formula", "title": "" }, { "docid": "c2c6875ce9e2df714e630f8513f97b70", "score": "0.50018275", "text": "def polys(self, n):\n candidate = 1\n m = 0\n while True:\n yield candidate\n candidate += n - 1 + m*(n - 2)\n m += 1", "title": "" }, { "docid": "6d91aa91ea884915c86013e704300bf3", "score": "0.49799803", "text": "def RCUEWG(n): # Taken from lectures\n \n M = np.random.random((n, n))\n G = nx.Graph()\n for i in range(n):\n # notice we are using the upper triangle only\n # that is we discard M_ii and M_ji where j<i\n for j in range(i+1, n):\n G.add_edge(i, j, weight=M[i, j])\n return G", "title": "" }, { "docid": "7c40a8c9b0d434bee08fc2410fb31aca", "score": "0.49780157", "text": "def count_stair_ways(n):\n def repeat(s):\n if s <= 1:\n return s \n return repeat(s - 1) + repeat(s - 2)\n return repeat(n + 1)", "title": "" }, { "docid": "3705f8d01a520d2b16c7fe584d2db08a", "score": "0.49743107", "text": "def NRSur7dq4_samples(i):\n\n assert i in [0,1,2]\n\n if i==0:\n chiA = [-0.2, 0.4, 0.1]\n chiB = [-0.5, 0.2, -0.4]\n precessing_opts = {'init_quat': [1,0,0,0],\n 'return_dynamics': True,\n 'init_orbphase': 0.0}\n #'use_lalsimulation_conventions': True}\n return [2., chiA, chiB], None, precessing_opts\n elif i==1:\n chiA = [-0.2, 0.4, 0.1]\n chiB = [-0.5, 0.2, -0.4]\n precessing_opts = {'init_quat': [1,0,0,0],\n 'return_dynamics': True,\n 'init_orbphase': 1.0}\n #'use_lalsimulation_conventions': False}\n return [3., chiA, chiB], None, precessing_opts\n elif i==2:\n chiA = [-0.2, 0.4, 0.1]\n chiB = [-0.5, 0.2, -0.4]\n precessing_opts = {'init_quat': [1,0,0,0],\n 'return_dynamics': True,\n 'init_orbphase': 0.0}\n #'use_lalsimulation_conventions': True}\n return [5., chiA, chiB], None, precessing_opts", "title": "" }, { "docid": "ec3ff4d653276546d707072f2617cc76", "score": "0.4971743", "text": "def calc_base_spline(n=50):\n\n x = np.linspace(0, 1, 4) # knot points to fit curve onto\n y = [0, 0.05, 0.07, 0]\n xvec = np.linspace(0, 1, n)\n tck = interpolate.splrep(x, y)\n yvec = interpolate.splev(xvec, tck, der=0)\n\n return xvec, yvec", "title": "" }, { "docid": "59a79414a829df3d5e70cb42436b98e5", "score": "0.49713317", "text": "def first_n_cubes(n):\r\n output = []\r\n for i in range(1, (n + 1)):\r\n output = output + [i ** 3]\r\n return output", "title": "" }, { "docid": "dd8be42f6af67ea82f02aa89d4568723", "score": "0.49665156", "text": "def step(self, n):\n return self.slice(step=n)", "title": "" }, { "docid": "fbe638ee595af471754c1d06978ea601", "score": "0.49548364", "text": "def generate_pascal_triangle_binomial(self, n):\n # Write your code here\n row=[]\n pt=[]\n if n>12:\n return pt\n for i in range(n):\n nr = []\n for i in range(len(row)):\n v = row[i]\n if i>0:\n v += row[i-1]\n nr.append(1)\n pt.append(\" \".join([str(x) for x in nr]))\n row = nr\n \n return pt", "title": "" }, { "docid": "95cf0a25e98934dfd769be3e84a14e81", "score": "0.494955", "text": "def triangle(n):\n return sum([n for n in range(1,n+1)])", "title": "" }, { "docid": "9c11810778197f6a56fc6e0c47b3b295", "score": "0.49451405", "text": "def _twospirals(self,n_points, noise=.75):\n n = np.sqrt(np.random.rand(n_points,1)) * 780 * (2*np.pi)/360\n d1x = -np.cos(n)*n + np.random.rand(n_points,1) * noise\n d1y = np.sin(n)*n + np.random.rand(n_points,1) * noise\n return (np.vstack((np.hstack((d1x,d1y)),np.hstack((-d1x,-d1y)))), \n np.hstack((np.zeros(n_points),np.ones(n_points))))", "title": "" }, { "docid": "fa3ab0e585ad5cf37cbb3c9fb2e4738c", "score": "0.4933968", "text": "def Make_Spiral_Matrix(n):\n dx,dy = 1,0 # Starting increments\n x,y = 0,0 # Starting location\n myarray = [[None]* n for j in range(n)]\n for i in range(n**2, 0, -1):\n myarray[x][y] = i\n nx,ny = x+dx, y+dy\n if 0<=nx<n and 0<=ny<n and myarray[nx][ny] == None:\n x,y = nx,ny\n else:\n dx,dy = -dy,dx\n x,y = x+dx, y+dy\n return np.fliplr(np.matrix(myarray).transpose())", "title": "" }, { "docid": "2a9cb1abe54607768f1330ba2c81ade2", "score": "0.49325886", "text": "def generate_individual_series(N_points=3600):\n # toggles\n reorientation_chance = 10\n \n state = (0, 0, 0, 0)\n states = [state] \n\n while len(states) < N_points + 2:\n N_seg = int(sts.expon.rvs(scale=100))\n\n #intialize velocity and angular change\n new_v = random.choice([0, 0.01, 0.04])\n d_ang = 0\n if new_v > 0:\n d_ang = np.random.normal(0, 0.01)\n \n\n # before moving. try reorientation chance.\n rando = random.uniform(0, 1) * 100 \n if rando < reorientation_chance and new_v > 0:\n # reorientation make current direction equally randomly face anywhere.\n x, y, v, ang = states[-1]\n states[-1] = (x, y, v, np.random.uniform(- np.pi, np.pi)) \n \n # now generate all changes.\n x, y, v, ang = states[-1]\n for i in range(N_seg):\n x, y, v, ang = (x + v * np.cos(ang),\n y + v * np.sin(ang),\n new_v,\n ang+d_ang)\n states.append((x, y, v, ang)) \n\n\n # remove first point (which was dummy values) and cut down to N_points points\n x, y, vel, thetas = zip(*states[2:N_points +2])\n return x, y, vel, thetas", "title": "" }, { "docid": "8acf3431b2b2c9befc762fae439a990b", "score": "0.4931785", "text": "def triangulate(self, n=None, symmetry=None, centroid=True, printout=False):\n # Inherit class arguments\n if symmetry is None:\n symmetry = self.symmetry\n # Build origin and supremum vectors\n origin = [i[0] for i in self.bounds]\n self.origin = origin\n supremum = [i[1] for i in self.bounds]\n\n self.supremum = supremum\n\n #TODO: Add check that len(symmetry) is equal to len(self.bounds)\n if symmetry is None:\n cbounds = self.bounds\n else:\n cbounds = copy.copy(self.bounds)\n for i, j in enumerate(symmetry):\n if i is not j:\n # pop second entry on second symmetry vars\n cbounds[i] = [self.bounds[symmetry[i]][0]]\n # Sole (first) entry is the sup value and there is no origin\n cbounds[i] = [self.bounds[symmetry[i]][1]]\n if self.bounds[symmetry[i]] is not self.bounds[symmetry[j]]:\n logging.warning(f\"Variable {i} was specified as \"\n f\"symmetetric to variable {j}, however,\"\n f\"the bounds {i} =\"\n f\" {self.bounds[symmetry[i]]} and {j} =\"\n f\" {self.bounds[symmetry[j]]} do not \"\n f\"match, the mismatch was ignored in \"\n f\"the initial triangulation.\")\n cbounds[i] = self.bounds[symmetry[j]]\n\n\n if n is None:\n # Build generator\n self.cp = self.cyclic_product(cbounds, origin, supremum, centroid,\n printout)\n for i in self.cp:\n i\n\n try:\n self.triangulated_vectors.append((tuple(self.origin),\n tuple(self.supremum)))\n except (AttributeError, KeyError):\n self.triangulated_vectors = [(tuple(self.origin),\n tuple(self.supremum))]\n\n else:\n #Check if generator already exists\n try:\n self.cp\n except (AttributeError, KeyError):\n self.cp = self.cyclic_product(cbounds, origin, supremum,\n centroid, printout)\n\n try:\n while len(self.V.cache) < n:\n next(self.cp)\n except StopIteration:\n #TODO: We should maybe append and add the possibility of\n # of starting new triangulated domains on different\n # complexes\n try:\n self.triangulated_vectors.append((tuple(self.origin),\n tuple(self.supremum)))\n except (AttributeError, KeyError):\n self.triangulated_vectors = [(tuple(self.origin),\n tuple(self.supremum))]\n\n\n if printout:\n print(\"=\" * 19)\n print(\"Initial hyper cube:\")\n print(\"=\" * 19)\n # for v in self.C0():\n # v.print_out()\n for v in self.V.cache:\n self.V[v].print_out()\n\n print(\"=\" * 19)\n\n return", "title": "" }, { "docid": "f115e851e2e2de8d14e7ec1a1aff0129", "score": "0.49307755", "text": "def shifted(self, n_lanes: int):\n return StraightLane(self.p + self.n * self.w * n_lanes,\n self.q + self.n * self.w * n_lanes, self.w)", "title": "" }, { "docid": "c28a1e2d4174e47918599c899ec2af96", "score": "0.4926417", "text": "def make_series_paired(n_parcels, n_samples, n_cut_samples=40, widths=range(5,6), time_shift=3, seed=None):\n \n decim_factor = 5\n n_parcels_half = n_parcels//2\n remainder = n_parcels%2\n time_shift = (0, time_shift) # Do not shift across parcels, only time.\n \n pairs = list(range(0, n_parcels-remainder))\n if seed != None: random.seed(seed)\n random.shuffle(pairs)\n pairs = np.reshape(pairs, (n_parcels_half, 2))\n \n # Do signals for half of the parcels. Time shift the other half from the first half.\n if seed != None: np.random.seed(seed)\n s = randn(n_parcels_half+remainder, n_samples*decim_factor+2*n_cut_samples)\n \n for i in np.arange(0, n_parcels_half+remainder):\n s[i, :] = signal.cwt(s[i, :], signal.ricker, widths)\n \n # Separate last sample of s in case there is an odd numbered number of parcels.\n if remainder == 1:\n s_rem = s[-1,:]\n s = np.delete(s, -1, 0) # Delete last row from s.\n s_rem = signal.hilbert(s_rem)\n s_rem = s_rem[n_cut_samples:-n_cut_samples]\n s_rem = scipy.signal.decimate(s_rem, decim_factor)\n \n s_shift = shift(s, time_shift, mode='wrap')\n s = signal.hilbert(s)\n s_shift = signal.hilbert(s_shift)\n s = s[:, n_cut_samples:-n_cut_samples]\n s_shift = s_shift[:, n_cut_samples:-n_cut_samples]\n # Decimate the signals separately.\n s = scipy.signal.decimate(s, decim_factor, axis=1)\n s_shift = scipy.signal.decimate(s_shift, decim_factor, axis=1)\n \n # Slice the generated signals to correct indices.\n s_comb = np.zeros((n_parcels, n_samples), dtype=complex)\n s_comb[pairs[:,0],:] = s\n s_comb[pairs[:,1],:] = s_shift\n if remainder == 1:\n s_comb[-1,:] = s_rem\n return s_comb, pairs", "title": "" }, { "docid": "b253bb4e4fd6ca15bb0fa4beaec024d0", "score": "0.49240783", "text": "def row(n):\n return range((n**2-n+2)//2, n*(n+1)//2+1)", "title": "" }, { "docid": "83f7a46f1411b3482bb579fc16a27b68", "score": "0.49209148", "text": "def rotation_matrices_fibonacci_spiral_unit_x(n):\n unit_x = np.array([1, 0, 0])\n points = generate_fibonacci_spiral_hemisphere(n)\n rotation_matrices = [ rotation_matrix(unit_x, np.array(point)) for point in points ]\n return rotation_matrices", "title": "" }, { "docid": "7122126ca33c8a69a05cd615d25c99a0", "score": "0.49208692", "text": "def pis(self):\n\n q, r, t, k, n, l = 1, 0, 1, 1, 3, 3\n while True:\n if 4*q + r - t < n*t:\n yield n\n nr = 10*(r - n * t)\n n = ((10*(3*q + r))//t) - 10*n\n q *= 10\n r = nr\n else:\n nr = (2*q+r)*l\n nn = (q*(7*k)+2+(r*l))//(t*l)\n q *= k\n t *= l\n l += 2\n k += 1\n n = nn\n r = nr", "title": "" }, { "docid": "376a072f2c80c1a1f2ef22d13e381590", "score": "0.49205878", "text": "def exo4():\n q = 30\n Tlist = linspace(2.5, 4, q)*sigma\n err = []\n for i in 1: q:\n fWT = Thresh(fW, Tlist(i))\n f1 = PsiS(fWT)\n err(i) = snr(f, f1)\n plot(Tlist/ sigma, err)\n axis('tight')\n set_label('T/ \\sigma', 'SNR')", "title": "" }, { "docid": "58d1f45a679ea9d0e12b5259b3919caa", "score": "0.49188805", "text": "def generate_separated_square(lx, ly, c, p, n):\n ys = []\n yws = []\n xs = []\n xws = []\n wx = 2*lx/n\n wy = 2*ly/n\n for i in range(n):\n y, w = cheb(p, -ly + i*wy, -ly + (i+1)*wy)\n ys.append(y)\n yws.append(w)\n x, w = cheb(p, -lx + i*wx, -lx + (i+1)*wx)\n xs.append(x)\n xws.append(w)\n x = np.concatenate(xs)\n xw = np.concatenate(xws)\n y = np.concatenate(ys)\n yw = np.concatenate(yws)\n # stitch sides together to get nodes\n lxl = np.repeat(lx, p*n)\n lyl = np.repeat(ly, p*n)\n left = np.row_stack([ -lxl + c[0], y + c[1] ])\n right = np.row_stack([ lxl + c[0], y + c[1] ])\n bottom = np.row_stack([ x + c[0], -lyl + c[1] ])\n top = np.row_stack([ x + c[0], lyl + c[1] ])\n\n return left, right, bottom, top", "title": "" }, { "docid": "b0ef0a753696aeffac1916309bedf972", "score": "0.49136946", "text": "def _idx_to_quadruple(cls, idx):\n\n row, col = divmod(idx, cls.get_number())\n id1, k1 = divmod(row-1, cls.k)\n id2, k2 = divmod(col-1, cls.k)\n return id1+1, k1+1, id2+1, k2+1", "title": "" }, { "docid": "8f19be6fde47914afa233def65530b43", "score": "0.49114504", "text": "def get_sample_space(self, n=1000, gridding=False):\n if gridding:\n return self._get_grid_sample_space()\n else:\n return self._get_random_sample_space(n)", "title": "" }, { "docid": "f6ae40f657423b9479569e79cca402ca", "score": "0.49063462", "text": "def triangle(n):\n return (n * (n + 1)) / 2", "title": "" }, { "docid": "7f4507ce36e22217ef55cc6abe9c50b1", "score": "0.49051", "text": "def spiral(n):\n\n step = 20\n direc = \"e\"\n newddict = {\"e\": \"n\",\n \"n\": \"w\",\n \"w\": \"s\",\n \"s\": \"e\"}\n dummy = 1\n \n while n > 0:\n\n turtle.forward(step)\n turn(direc,newddict[direc])\n \n if dummy % 2 == 0:\n step += 20\n\n direc = newddict[direc]\n n += -1\n dummy += 1\n\n turtle.exitonclick()", "title": "" }, { "docid": "cdcc57a3bb5b2480a95f16229931ea05", "score": "0.4898313", "text": "def generate(self, n):\n candidates = []\n scores = []\n nv = len(self.xmin)\n for i in np.arange(1, min(n, self._seed_size) + 1):\n if i < 3:\n # 1/2 points\n seed = delayed(np.arange(1, i + 1)[:, None] * np.ones(shape=(1, nv)))\n else:\n # Larger seeds using recursive division\n seed = LatinHypercube(self.xmin, self.xmax, self.use_logger, seed_size=i - 1).generate(i)\n\n # Create candidate designs and compute inter-site distance\n ns = seed.shape[0]\n x = self._tplhsdesign(n, seed, ns).compute()\n candidates.append(x)\n scores.append(np.min(pdist(x)))\n\n # Select the top-ranked candidate\n lhd = candidates[np.argmax(scores)]\n\n # Scale to [xmin, xmax]\n lhd_scaled = self.scale_to_new_domain(lhd, self.xmin, self.xmax)\n\n if self.use_logger:\n self.logger.info(\"Latin hypercube design: generated {0} points in {1} dimensions\".format(n, nv))\n\n return lhd_scaled", "title": "" }, { "docid": "3e1466fb7c5b344c19a4f07b57f40bf7", "score": "0.48947856", "text": "def generateParenthesis(self, n: int) -> List[str]:\n res = []\n\n def util(cur_str, n_lb, n_rb):\n if n_lb > n or n_rb > n:\n return\n if n_lb == n_rb == n:\n res.append(cur_str)\n if n_lb == n_rb:\n util(cur_str+'(', n_lb+1, n_rb)\n if n_lb > n_rb:\n util(cur_str+'(', n_lb+1, n_rb)\n util(cur_str+')', n_lb, n_rb+1)\n if n_lb < n_rb:\n return\n\n util('', 0, 0)\n return res", "title": "" }, { "docid": "37cbdd59a9813cb0ecab8491be77ea9d", "score": "0.4889421", "text": "def stools3(n, source, temp1, destination):\n \n if n == 1:\n model.move(source, destination)\n if animate:\n time.sleep(delay_btw_moves)\n print(str(model))\n else:\n stools3(n - 1, source, destination, temp1)\n model.move(source, destination)\n if animate:\n time.sleep(delay_btw_moves)\n print(str(model))\n stools3(n - 1, temp1, source, destination)", "title": "" }, { "docid": "1c5ab84df5b008eb97a51ab96b08584e", "score": "0.48801392", "text": "def twospirals(n_points, noise=.5, random_state=920):\n n = np.sqrt(np.random.rand(n_points,1)) * 600 * (2*np.pi)/360\n d1x = -1.5*np.cos(n)*n + np.random.randn(n_points,1) * noise\n d1y = 1.5*np.sin(n)*n + np.random.randn(n_points,1) * noise\n return (np.vstack((np.hstack((d1x,d1y)),np.hstack((-d1x,-d1y)))),\n np.hstack((np.zeros(n_points),np.ones(n_points))))", "title": "" }, { "docid": "d081fbe2ce24f2de3f61ccd19032295b", "score": "0.48767608", "text": "def trigrid(self, n=10):\n lam = []\n for lam1 in range(n):\n for lam2 in range(n - lam1):\n lam3 = n - lam1 - lam2\n lam.append([lam1, lam2, lam3])\n return np.array(lam) / float(n)", "title": "" }, { "docid": "daf1a5792ee024d7757c43a80e7fa35c", "score": "0.48743713", "text": "def structure_factor(trj, Q_range=(0.5, 50), n_points=1000, framewise_rdf=False, method='fz'):\n rho = np.mean(trj.n_atoms / trj.unitcell_volumes)\n L = np.min(trj.unitcell_lengths)\n\n top = trj.topology\n elements = set([a.element for a in top.atoms])\n\n compositions = dict()\n form_factors = dict()\n rdfs = dict()\n\n Q = np.logspace(np.log10(Q_range[0]),\n np.log10(Q_range[1]),\n num=n_points)\n S = np.zeros(shape=(len(Q)))\n\n for elem in elements:\n compositions[elem.symbol] = len(top.select('element {}'.format(elem.symbol)))/trj.n_atoms\n form_factors[elem.symbol] = elem.atomic_number\n\n for i, q in enumerate(Q):\n num = 0\n denom = 0\n\n for elem in elements:\n denom += compositions[elem.symbol] * form_factors[elem.symbol]\n\n for (elem1, elem2) in it.product(elements, repeat=2):\n e1 = elem1.symbol\n e2 = elem2.symbol\n\n f_a = form_factors[e1]\n f_b = form_factors[e2]\n\n x_a = compositions[e1]\n x_b = compositions[e2]\n \n try:\n g_r = rdfs['{0}{1}'.format(e1, e2)]\n except KeyError:\n pairs = top.select_pairs(selection1='element {}'.format(e1),\n selection2='element {}'.format(e2))\n if framewise_rdf:\n r, g_r = rdf_by_frame(trj,\n pairs=pairs,\n r_range=(0, L / 2),\n bin_width=0.001)\n else:\n r, g_r = md.compute_rdf(trj,\n pairs=pairs,\n r_range=(0, L / 2),\n bin_width=0.001)\n rdfs['{0}{1}'.format(e1, e2)] = g_r\n integral = simps(r ** 2 * (g_r - 1) * np.sin(q * r) / (q * r), r)\n\n if method == 'al':\n pre_factor = np.sqrt(x_a * x_b) * 4 * np.pi * rho\n partial_sq = (integral*pre_factor) + int(e1==e2)\n num += (f_a*f_b) * (partial_sq+1) * np.sqrt(x_a*x_b)\n elif method == 'fz':\n pre_factor = 4 * np.pi * rho\n partial_sq = (integral*pre_factor) + 1\n num += (x_a*f_a*x_b*f_b) * (partial_sq)\n S[i] = (num/(denom**2))\n return Q, S", "title": "" }, { "docid": "7b75b639d5883703a6a503ff70d28aa3", "score": "0.48729777", "text": "def symmetric_triangle(n: int = None, **kwargs) -> list:\n n = int(fabs(n)) if n is not None else 2\n\n if n == 2:\n triangle = [1, 1]\n\n if n > 2:\n if n % 2 == 0:\n front = [i + 1 for i in range(0, floor(n / 2))]\n triangle = front + front[::-1]\n else:\n front = [i + 1 for i in range(0, floor(0.5 * (n + 1)))]\n triangle = front.copy()\n front.pop()\n triangle += front[::-1]\n\n if kwargs.pop(\"weighted\", False):\n triangle_sum = npSum(triangle)\n triangle_weights = triangle / triangle_sum\n return triangle_weights\n\n return triangle", "title": "" }, { "docid": "be678b1f1a4a5d2a66153e3294cd2525", "score": "0.48695523", "text": "def gen_triangle_nums(limit=None):\n n = 1\n while not limit or n < limit:\n yield n*(n+1)/2\n n += 1", "title": "" }, { "docid": "15cb6d38612b2981bbdf69f27fb66308", "score": "0.48652288", "text": "def simulate(self, n, surv_df=False):\n raise NotImplementedError", "title": "" }, { "docid": "dd4b3852ddd5b550df11103dbbd3184f", "score": "0.48641437", "text": "def ggpl_zstair(dx,dy,dz): \n\tvertex = [[0,0],[0,0.5],[2,0.5],[1,0]]\n\tcells = [[1,2,3,4]]\n\tpolls = None\n\t\"\"\"creation of the first step\"\"\"\n\txstep = MKPOL([vertex, cells, 1])\n\tstep = PROD([QUOTE([3]),xstep])\n\t\"\"\"calculation of the height of the ramps\"\"\"\n\trampeHeight = math.ceil(dz/2)\n\t\"\"\"calculation of the number of the steps according to the dimension of the box\"\"\"\n\tnumStep = math.ceil(dx)\n\t\"\"\"creation of the 2 ramps\"\"\"\n\txRampe= step\n\txRampe2= xRampe\n\tfor i in range(int(numStep)-3):\n\t\txRampe = STRUCT([xRampe,T([2,3])([1,0.5]),xRampe])\n\t\txRampe2 = STRUCT([xRampe2,T([2,3])([1,0.5]),xRampe2])\n\t\"\"\"creation of the platform on the last step\"\"\"\n\tfinalStep = CUBOID([dx,2,0.5])\n\t\"\"\"concatenation of the ramps with the platform\"\"\"\n\txRampe = STRUCT([xRampe,T([2,3])([(numStep-2)*1,(numStep-2)*0.5]),finalStep])\n\txRampe2 = STRUCT([xRampe2,T([2,3])([(numStep-3)*1,(numStep-3)*0.5]),finalStep])\n\t\"\"\"rotate the second ramp of 180 degrees\"\"\"\n\txRampe2= R([1,2])(PI)(xRampe2)\n\t\"\"\"create and display the box\"\"\"\n\tbox = SKEL_1(CUBOID([dx,dy,dz]))\n\tresult = STRUCT([box,xRampe])\n\t\"\"\"concatenation of the 2 ramps and display of the result\"\"\"\n\tresult = STRUCT([result,T([1,2,3])([dx,dy-1,(numStep-2)*0.5]),xRampe2])\n\tVIEW(result)\n\treturn result", "title": "" } ]
3a3a2e849b00f11158ef1aeb7e55f162
See the real Accessor for a description.
[ { "docid": "db55e38a64c281fd6e8ab26938b883f2", "score": "0.0", "text": "def fetch_points(self, metric_name, time_start, time_end, step,\n aggregator_func=None, _fake_query_results=None):\n if not _fake_query_results:\n points = self._metric_to_points[metric_name]\n rows = []\n for ts in points.irange(time_start, time_end):\n # A row is time_base_ms, time_offset_ms, value\n row = (ts * 1000.0, 0, float(points[ts]))\n rows.append(row)\n _fake_query_results = [(True, rows)]\n return self._real_accessor.fetch_points(\n metric_name, time_start, time_end, step, aggregator_func, _fake_query_results,\n )", "title": "" } ]
[ { "docid": "9b1f057f9b6b248713bf759651dc2c80", "score": "0.7138444", "text": "def get(self, ):\n\t\tpass", "title": "" }, { "docid": "4560c322427918f793eb8b1980ad174e", "score": "0.71203256", "text": "def _get(self):\n raise NotImplementedError", "title": "" }, { "docid": "4560c322427918f793eb8b1980ad174e", "score": "0.71203256", "text": "def _get(self):\n raise NotImplementedError", "title": "" }, { "docid": "116d39b46b0151afdec97424ca07055e", "score": "0.7103711", "text": "def getter(self, *args, **kwargs): # real signature unknown\n pass", "title": "" }, { "docid": "fd29b7f91beb0c1b813cbb854c0ad837", "score": "0.6972076", "text": "def get(self):\r\n raise NotImplemented", "title": "" }, { "docid": "584b5533eedcff65f50d97263b417e04", "score": "0.6880212", "text": "def get(self):\n raise NotImplementedError(\"get should have been implemented.\")", "title": "" }, { "docid": "caa2aa3b7edf4bf733f0d9c702b82957", "score": "0.6811639", "text": "def value(self):", "title": "" }, { "docid": "caa2aa3b7edf4bf733f0d9c702b82957", "score": "0.6811639", "text": "def value(self):", "title": "" }, { "docid": "e5a6cb973d773fc2d2c0edfc9301a023", "score": "0.6799362", "text": "def Value(self) -> object:", "title": "" }, { "docid": "e5a6cb973d773fc2d2c0edfc9301a023", "score": "0.6799362", "text": "def Value(self) -> object:", "title": "" }, { "docid": "7696d6f4a2f087ca81c6a76bd6d630f7", "score": "0.6743941", "text": "def __get__(self, *args, **kwargs): # real signature unknown\n pass", "title": "" }, { "docid": "10b3fd49f537277d9f5b9be39f6dc847", "score": "0.672471", "text": "def get(self):", "title": "" }, { "docid": "10b3fd49f537277d9f5b9be39f6dc847", "score": "0.672471", "text": "def get(self):", "title": "" }, { "docid": "2297141bdf3fcc25feae8a0e1435d07f", "score": "0.6699795", "text": "def get(self):\n ...", "title": "" }, { "docid": "2f3f56e17906e180ec146610945de5e0", "score": "0.6695477", "text": "def Get(self):", "title": "" }, { "docid": "67a4ba360513253a76eddd5044798cb4", "score": "0.6689475", "text": "def get(self):\n raise MethodNotDefined()", "title": "" }, { "docid": "93844b9f79edf46512427d6223db171a", "score": "0.6682644", "text": "def get(self):\n pass", "title": "" }, { "docid": "93844b9f79edf46512427d6223db171a", "score": "0.6682644", "text": "def get(self):\n pass", "title": "" }, { "docid": "93844b9f79edf46512427d6223db171a", "score": "0.6682644", "text": "def get(self):\n pass", "title": "" }, { "docid": "93844b9f79edf46512427d6223db171a", "score": "0.6682644", "text": "def get(self):\n pass", "title": "" }, { "docid": "9b6866e04e46a65fd1f437802bf9498e", "score": "0.66704756", "text": "def Get(self):\n pass", "title": "" }, { "docid": "9b6866e04e46a65fd1f437802bf9498e", "score": "0.6670471", "text": "def Get(self):\n pass", "title": "" }, { "docid": "34d8effbe8936e74eead595d9793dd04", "score": "0.6670356", "text": "def GetValue(self):", "title": "" }, { "docid": "34d8effbe8936e74eead595d9793dd04", "score": "0.6670356", "text": "def GetValue(self):", "title": "" }, { "docid": "a2f3ca330cfb067924f901b45e1ee6d2", "score": "0.6505851", "text": "def get_value(self):", "title": "" }, { "docid": "3fdbd4fd9d31a3aac9fef9b18b68a200", "score": "0.64627343", "text": "def __getitem__(self):", "title": "" }, { "docid": "3fdbd4fd9d31a3aac9fef9b18b68a200", "score": "0.64627343", "text": "def __getitem__(self):", "title": "" }, { "docid": "dcefa20cf12f87220b4df30394928906", "score": "0.64367425", "text": "def get(self):\n raise NotImplementedError(NOT_IMPLEMENTED_MESSAGE)", "title": "" }, { "docid": "1defc4aac572247f7ac5567ab79a4c84", "score": "0.6375044", "text": "def attributes(self):", "title": "" }, { "docid": "1defc4aac572247f7ac5567ab79a4c84", "score": "0.6375044", "text": "def attributes(self):", "title": "" }, { "docid": "9c7049dbb8ee75c985da127807b7bc61", "score": "0.63655734", "text": "def _GetValue(self, obj, attr_name):\n raise NotImplementedError()", "title": "" }, { "docid": "94d4ea76975b6937137437f466d8f573", "score": "0.6365386", "text": "def Dereference(self):", "title": "" }, { "docid": "1d267e298a58889a7ecc74667e23de92", "score": "0.63451064", "text": "def get(self):\n pass", "title": "" }, { "docid": "3babe54ee1d85a4be1a4e5b70ace171e", "score": "0.63309646", "text": "def get(self) -> object:\n return 0", "title": "" }, { "docid": "0d8a17dc6764aef7191072c9cd06e3d7", "score": "0.6325798", "text": "def value(self):\n pass", "title": "" }, { "docid": "489f64c21aa14dac331367333e77bfa9", "score": "0.6324996", "text": "def GetValue(self):\r\n pass", "title": "" }, { "docid": "489f64c21aa14dac331367333e77bfa9", "score": "0.6324996", "text": "def GetValue(self):\r\n pass", "title": "" }, { "docid": "5a8d8e8fac630a1ff164761499f50011", "score": "0.6316454", "text": "def value(self):\n raise NotImplementedError", "title": "" }, { "docid": "46dcd9dc80ec46d4bfc65b9c112b850e", "score": "0.6279591", "text": "def __get__(self, instance, owner):\n pass", "title": "" }, { "docid": "28eea685fdebe30274b7291fa93a99ed", "score": "0.62328744", "text": "def __index__(self):\n return self.value", "title": "" }, { "docid": "288d61f111c4aef669b89cbb06ea35a4", "score": "0.62171674", "text": "def accessor(self):\n attr = self.attr or self.name\n\n if isinstance(attr, Accessor):\n return attr\n\n return Accessor(getter=attr, setter=attr)", "title": "" }, { "docid": "586449bc2343fea361c6bd35bf525f81", "score": "0.62025666", "text": "def value(self):\n raise Exception('value attribute is read-only') # TODO what is the right exception for this?", "title": "" }, { "docid": "a959245ec0987f719cb7db7b3b1cd620", "score": "0.62014097", "text": "def DataMember(self) -> str:", "title": "" }, { "docid": "90d478c1d9a201b5abeb60164e4688d8", "score": "0.6195045", "text": "def Item(self) -> _n_0_t_17:", "title": "" }, { "docid": "66b0e4f1dd27dcb8ff1c289823da41b9", "score": "0.6180862", "text": "def __getattr__(self, attr):\n return getattr(self.reader, attr)", "title": "" }, { "docid": "856c2224d5909675fa4baa23b44f3e16", "score": "0.6173647", "text": "def ValueMember(self) -> str:", "title": "" }, { "docid": "aaee88ff46d96687184105dbbd6e3e5a", "score": "0.6166939", "text": "def get_value(self):\n raise NotImplementedError", "title": "" }, { "docid": "cda552e0a37764ff2a648560d3279145", "score": "0.61566144", "text": "def Item(self) -> _n_0_t_6:", "title": "" }, { "docid": "aaea567392d958285cadbbd69ce3bf83", "score": "0.6150415", "text": "def Item(self) -> object:", "title": "" }, { "docid": "8304bcaf57f785034e2a639014d35963", "score": "0.6135629", "text": "def __getitem__(self, name): # reliably restored by inspect\n pass", "title": "" }, { "docid": "f1b33df859786a25cd38304e85eb5d94", "score": "0.6128802", "text": "def __getattr__(self, name):\n ...", "title": "" }, { "docid": "3b176e30babec8ed5de54c31963e6014", "score": "0.6122868", "text": "def __getitem__(self, name):\n ...", "title": "" }, { "docid": "b27e8206afa26bba268b66b080c5442a", "score": "0.6116649", "text": "def __getattr__(self, attrname):\n #print attrname\n raise\n return self.cord.__dict__[attrname]", "title": "" }, { "docid": "4f10e913586a6e7055febfa7c378af6c", "score": "0.6110326", "text": "def PropertyDescriptor(self) -> PropertyDescriptor:", "title": "" }, { "docid": "4f10e913586a6e7055febfa7c378af6c", "score": "0.6110326", "text": "def PropertyDescriptor(self) -> PropertyDescriptor:", "title": "" }, { "docid": "4f10e913586a6e7055febfa7c378af6c", "score": "0.6110326", "text": "def PropertyDescriptor(self) -> PropertyDescriptor:", "title": "" }, { "docid": "53669388caa9a740e56ec4240f0a8179", "score": "0.6104911", "text": "def _get(self, at):\n raise NotImplementedError", "title": "" }, { "docid": "981d02c8e315fdfdddc358ee4ee65427", "score": "0.6102636", "text": "def detail(self):\n raise NotImplementedError", "title": "" }, { "docid": "944fb2df87fb227ccee6471df3301cdd", "score": "0.6102496", "text": "def __call__(self):\n raise NotImplementedError", "title": "" }, { "docid": "e678fc0d4987c64281943fbf20b611e9", "score": "0.60937124", "text": "def Value(self):\n raise NotImplementedError", "title": "" }, { "docid": "50294053955bd84ebf0c33ca637c8ee7", "score": "0.60913783", "text": "def retrieve(self):\n raise NotImplementedError()", "title": "" }, { "docid": "5cd1a6affda2ac3eec58816bc19a8d71", "score": "0.6087685", "text": "def __getitem__(self, name):", "title": "" }, { "docid": "961421d2e73e188a9d52b08b78717f28", "score": "0.60782784", "text": "def GetProperty(self, name):", "title": "" }, { "docid": "92b848d75b2aff59720ace7f3a46bc9c", "score": "0.6076875", "text": "def get(self):\n raise NotImplementedError(\n 'ValueProvider.get implemented in derived classes')", "title": "" }, { "docid": "bec599f88c5b063e9fff36d4353aaac7", "score": "0.6062502", "text": "def __call__(self):\n return self.value", "title": "" }, { "docid": "7a05aab78b7615cffbfcc5405a94e13d", "score": "0.60552454", "text": "def __aiter__(self):\n pass", "title": "" }, { "docid": "7a05aab78b7615cffbfcc5405a94e13d", "score": "0.60552454", "text": "def __aiter__(self):\n pass", "title": "" }, { "docid": "4344a295f7c8e2fa561e0317ae6b650e", "score": "0.6047716", "text": "def _get(self):\n return self._value", "title": "" }, { "docid": "c701b538396e23eba5372b332513d6b8", "score": "0.60210425", "text": "def properties(self):", "title": "" }, { "docid": "c701b538396e23eba5372b332513d6b8", "score": "0.60210425", "text": "def properties(self):", "title": "" }, { "docid": "c701b538396e23eba5372b332513d6b8", "score": "0.60210425", "text": "def properties(self):", "title": "" }, { "docid": "c701b538396e23eba5372b332513d6b8", "score": "0.60210425", "text": "def properties(self):", "title": "" }, { "docid": "c701b538396e23eba5372b332513d6b8", "score": "0.60210425", "text": "def properties(self):", "title": "" }, { "docid": "77cb6bc301522d34bb54d01c7dc38c1a", "score": "0.6011629", "text": "def details(self):\r\n raise NotImplementedError", "title": "" }, { "docid": "d7d9e8ddbfca2dcd57773c0c45163e03", "score": "0.5982891", "text": "def __getitem__(self, key):\n pass", "title": "" }, { "docid": "95fbbe5c9d3a01a21f1a09f12acb71f1", "score": "0.59764487", "text": "def __getitem__(self, key):\n return self.__getattribute__(key)", "title": "" }, { "docid": "540ebc2af2399a790354af689ba3709b", "score": "0.59499943", "text": "def fields(self):", "title": "" }, { "docid": "07175f9040bf12c04ee46ffd67d0644e", "score": "0.5947179", "text": "def retrieve(self):\n raise NotImplemented()", "title": "" }, { "docid": "2d79fe3a29c6c98758d5372c19158019", "score": "0.594129", "text": "def GetPropertyDouble(self, name):", "title": "" }, { "docid": "1632fe64f0dd2a22fc87f845ee70c33b", "score": "0.5940678", "text": "def getData(self, ):", "title": "" }, { "docid": "b2bca5196c0c107ed3e00b05593b76be", "score": "0.593788", "text": "def get(self, name):\n raise NotImplementedError(\"The subclass must implement\")", "title": "" }, { "docid": "aad9269ba4ef5d47753706d32e503abc", "score": "0.5936652", "text": "def Item(self) -> _n_0_t_8:", "title": "" }, { "docid": "790c2aef1d04d5a584af8e8632c0e8c5", "score": "0.5935721", "text": "def __getattr__(self, key):\n return object.__getattribute__(self, key)", "title": "" }, { "docid": "013018e4ed738a13ce1f21990c06ccc3", "score": "0.5930763", "text": "def val(self):\n pass", "title": "" }, { "docid": "3c57498502488ccd416434e4efc46579", "score": "0.59278953", "text": "def __getattr__(self, key: str) -> t.Any:\n return getattr(self.description, key)", "title": "" }, { "docid": "422a22090d1d9b29004906830e34fa45", "score": "0.59277046", "text": "def get(self, key):\n pass", "title": "" }, { "docid": "0684b21394f419d22e84286f4b935186", "score": "0.5924251", "text": "def __getattr__(self, name):\n return self.metadata[name]", "title": "" }, { "docid": "0684b21394f419d22e84286f4b935186", "score": "0.5924251", "text": "def __getattr__(self, name):\n return self.metadata[name]", "title": "" }, { "docid": "c70cb8495f04c15923f6dae858e995ab", "score": "0.59172446", "text": "def __getattr__(self, name):\r\n return getattr(self._data, name)", "title": "" }, { "docid": "0e2a95b6147e54f88911ad7751ec2ad4", "score": "0.59099627", "text": "def data(self):\n return NotImplementedError()", "title": "" }, { "docid": "46cef4a8249e7271328778a86d5c18a9", "score": "0.59029305", "text": "def A(self):\n raise NotImplementedError()", "title": "" }, { "docid": "46fcaac9ed86964f2064282cf3c0bc04", "score": "0.59020084", "text": "def _get_age(self):\n return self.__age", "title": "" }, { "docid": "fa9f047ef9c5d678185f2e0eb34ad467", "score": "0.58975035", "text": "def __getattr__(self, item):\n pass", "title": "" }, { "docid": "ee17aeef2f914527331859da09edf351", "score": "0.58961254", "text": "def Reference(self):", "title": "" }, { "docid": "87ade92f7f21c7bb164f406d19097906", "score": "0.5893345", "text": "def __index__(self, *args, **kwargs): # real signature unknown\n pass", "title": "" }, { "docid": "87ade92f7f21c7bb164f406d19097906", "score": "0.5893345", "text": "def __index__(self, *args, **kwargs): # real signature unknown\n pass", "title": "" }, { "docid": "87ade92f7f21c7bb164f406d19097906", "score": "0.5893345", "text": "def __index__(self, *args, **kwargs): # real signature unknown\n pass", "title": "" }, { "docid": "87ade92f7f21c7bb164f406d19097906", "score": "0.5893345", "text": "def __index__(self, *args, **kwargs): # real signature unknown\n pass", "title": "" }, { "docid": "87ade92f7f21c7bb164f406d19097906", "score": "0.5893345", "text": "def __index__(self, *args, **kwargs): # real signature unknown\n pass", "title": "" }, { "docid": "87ade92f7f21c7bb164f406d19097906", "score": "0.5893345", "text": "def __index__(self, *args, **kwargs): # real signature unknown\n pass", "title": "" }, { "docid": "87ade92f7f21c7bb164f406d19097906", "score": "0.5893345", "text": "def __index__(self, *args, **kwargs): # real signature unknown\n pass", "title": "" } ]
633bb61656b4648540046f3aadfbf43e
main function for lcoal usage
[ { "docid": "a8ec5a21efbccc76636cc0ff1be8f8cc", "score": "0.58221775", "text": "def main():\r\n test()", "title": "" } ]
[ { "docid": "08aa0c5cc07c5efe590ba89320b45dbc", "score": "0.69941837", "text": "def main():", "title": "" }, { "docid": "08aa0c5cc07c5efe590ba89320b45dbc", "score": "0.69941837", "text": "def main():", "title": "" }, { "docid": "08aa0c5cc07c5efe590ba89320b45dbc", "score": "0.69941837", "text": "def main():", "title": "" }, { "docid": "08aa0c5cc07c5efe590ba89320b45dbc", "score": "0.69941837", "text": "def main():", "title": "" }, { "docid": "08aa0c5cc07c5efe590ba89320b45dbc", "score": "0.69941837", "text": "def main():", "title": "" }, { "docid": "08aa0c5cc07c5efe590ba89320b45dbc", "score": "0.69941837", "text": "def main():", "title": "" }, { "docid": "08aa0c5cc07c5efe590ba89320b45dbc", "score": "0.69941837", "text": "def main():", "title": "" }, { "docid": "08aa0c5cc07c5efe590ba89320b45dbc", "score": "0.69941837", "text": "def main():", "title": "" }, { "docid": "08aa0c5cc07c5efe590ba89320b45dbc", "score": "0.69941837", "text": "def main():", "title": "" }, { "docid": "08aa0c5cc07c5efe590ba89320b45dbc", "score": "0.69941837", "text": "def main():", "title": "" }, { "docid": "08aa0c5cc07c5efe590ba89320b45dbc", "score": "0.69941837", "text": "def main():", "title": "" }, { "docid": "08aa0c5cc07c5efe590ba89320b45dbc", "score": "0.69941837", "text": "def main():", "title": "" }, { "docid": "08aa0c5cc07c5efe590ba89320b45dbc", "score": "0.69941837", "text": "def main():", "title": "" }, { "docid": "08aa0c5cc07c5efe590ba89320b45dbc", "score": "0.69941837", "text": "def main():", "title": "" }, { "docid": "29e1f6ab065fa2f570f2241972e9c859", "score": "0.6961694", "text": "def\tmain():\r\n\t\r\n\tpass", "title": "" }, { "docid": "6a72f0523c7f4b19a3ff495c1baaa5c5", "score": "0.669952", "text": "def main() -> None:", "title": "" }, { "docid": "6a72f0523c7f4b19a3ff495c1baaa5c5", "score": "0.669952", "text": "def main() -> None:", "title": "" }, { "docid": "a1e1239bd97fc0c2dd7ee8ea2003aadd", "score": "0.65233755", "text": "def main_CL():\r\n parser = OptionParser(usage=usage(), version='1.0')\r\n parser.add_option(\"-c\", \"--cmd\", dest=\"cmd\", default=\"unknown\", help=\"What command to run.\")\r\n parser.add_option(\"-w\", \"--walk\", dest=\"walk\", default=\"none\", help=\"Scope of status to walk in the directory.\")\r\n parser.add_option(\"-s\", \"--cs\", dest=\"code\", default=\"4\", help=\"The Code version <4|4.0|4.1|4.2|all> to use.\")\r\n parser.add_option(\"-t\", \"--tcId\", dest=\"tcId\", default=\"a0Q300000000Hj9\", help=\"Tech Campaign ID in SalesForce\")\r\n parser.add_option(\"-n\", \"--num\", dest=\"num\", default=\"4000\", help=\"Max number of items to process\")\r\n parser.add_option(\"-x\", \"--parm\", dest=\"parm\", default=\"\", help=\"Parms used by a method\")\r\n parser.add_option(\"-f\", \"--file\", dest=\"file\", default=\"\", help=\"path to the file to load, current dir if just a filename\")\r\n parser.add_option(\"-q\", \"--quiet\", dest='quiet',default=\"1\", help=\"Show less info in output\")\r\n parser.add_option(\"-d\", \"--debug\", dest='debug',action=\"count\",help=\"The debug level, use multiple to get more.\")\r\n (options, args) = parser.parse_args()\r\n if options.debug > 2:\r\n print ' verbose %s\\tdebug %s' %(options.quiet, options.debug)\r\n print ' command %s\\twalk %s' %(options.cmd, options.walk)\r\n print ' version %s' %options.code\r\n print ' tcId %s' %options.tcId\r\n print ' num %s' %options.num\r\n print ' file %s' %options.file\r\n print ' args: %s' %args\r\n else:\r\n options.debug = 0\r\n crList = []\r\n version = str(options.code)\r\n \r\n if options.cmd in ['load']:\r\n loadQOR(options=options)\r\n elif options.cmd in ['dir']:\r\n loadQORDir(options=options)\r\n elif options.cmd in ['rpt']:\r\n walk_list(options=options)\r\n else:\r\n print '%s' %usage()", "title": "" }, { "docid": "7c948d070347c04c157c9bc1a32db75f", "score": "0.6512523", "text": "def main(self):", "title": "" }, { "docid": "b7ec2b67be7af2e3e3f7abf4d1854636", "score": "0.6511092", "text": "def main():\n core.main()", "title": "" }, { "docid": "318cdf2f663487bb0a9d9fa8d2987875", "score": "0.64981115", "text": "def main():\n return 0", "title": "" }, { "docid": "737edbcdda02609a89cb40f455a292fb", "score": "0.6491304", "text": "def main(args):\n pass", "title": "" }, { "docid": "bc0d536b098278d704e13b8e318d6af5", "score": "0.64139795", "text": "def main():\n pass", "title": "" }, { "docid": "bc0d536b098278d704e13b8e318d6af5", "score": "0.64139795", "text": "def main():\n pass", "title": "" }, { "docid": "bc0d536b098278d704e13b8e318d6af5", "score": "0.64139795", "text": "def main():\n pass", "title": "" }, { "docid": "bc0d536b098278d704e13b8e318d6af5", "score": "0.64139795", "text": "def main():\n pass", "title": "" }, { "docid": "bc0d536b098278d704e13b8e318d6af5", "score": "0.64139795", "text": "def main():\n pass", "title": "" }, { "docid": "bc0d536b098278d704e13b8e318d6af5", "score": "0.64139795", "text": "def main():\n pass", "title": "" }, { "docid": "bc0d536b098278d704e13b8e318d6af5", "score": "0.64139795", "text": "def main():\n pass", "title": "" }, { "docid": "bc0d536b098278d704e13b8e318d6af5", "score": "0.64139795", "text": "def main():\n pass", "title": "" }, { "docid": "bc0d536b098278d704e13b8e318d6af5", "score": "0.64139795", "text": "def main():\n pass", "title": "" }, { "docid": "bc0d536b098278d704e13b8e318d6af5", "score": "0.64139795", "text": "def main():\n pass", "title": "" }, { "docid": "bc0d536b098278d704e13b8e318d6af5", "score": "0.64139795", "text": "def main():\n pass", "title": "" }, { "docid": "bc0d536b098278d704e13b8e318d6af5", "score": "0.64139795", "text": "def main():\n pass", "title": "" }, { "docid": "bc0d536b098278d704e13b8e318d6af5", "score": "0.64139795", "text": "def main():\n pass", "title": "" }, { "docid": "bc0d536b098278d704e13b8e318d6af5", "score": "0.64139795", "text": "def main():\n pass", "title": "" }, { "docid": "bc0d536b098278d704e13b8e318d6af5", "score": "0.64139795", "text": "def main():\n pass", "title": "" }, { "docid": "bc0d536b098278d704e13b8e318d6af5", "score": "0.64139795", "text": "def main():\n pass", "title": "" }, { "docid": "bc0d536b098278d704e13b8e318d6af5", "score": "0.64139795", "text": "def main():\n pass", "title": "" }, { "docid": "bc0d536b098278d704e13b8e318d6af5", "score": "0.64139795", "text": "def main():\n pass", "title": "" }, { "docid": "bc0d536b098278d704e13b8e318d6af5", "score": "0.64139795", "text": "def main():\n pass", "title": "" }, { "docid": "bc0d536b098278d704e13b8e318d6af5", "score": "0.64139795", "text": "def main():\n pass", "title": "" }, { "docid": "bc0d536b098278d704e13b8e318d6af5", "score": "0.64139795", "text": "def main():\n pass", "title": "" }, { "docid": "bc0d536b098278d704e13b8e318d6af5", "score": "0.64139795", "text": "def main():\n pass", "title": "" }, { "docid": "bc0d536b098278d704e13b8e318d6af5", "score": "0.64139795", "text": "def main():\n pass", "title": "" }, { "docid": "bc0d536b098278d704e13b8e318d6af5", "score": "0.64139795", "text": "def main():\n pass", "title": "" }, { "docid": "bc0d536b098278d704e13b8e318d6af5", "score": "0.64139795", "text": "def main():\n pass", "title": "" }, { "docid": "59a11114512ac46a2a2d976a77d1c6d6", "score": "0.63336045", "text": "def main(self, *args):\n pass", "title": "" }, { "docid": "535c6e7d32028af3cc3d681a0804815d", "score": "0.631539", "text": "def main(args=None):\n return 0", "title": "" }, { "docid": "535c6e7d32028af3cc3d681a0804815d", "score": "0.631539", "text": "def main(args=None):\n return 0", "title": "" }, { "docid": "535c6e7d32028af3cc3d681a0804815d", "score": "0.631539", "text": "def main(args=None):\n return 0", "title": "" }, { "docid": "8795b9ca19d8f12a8d63e1f20619a3ca", "score": "0.6273365", "text": "def lafs():", "title": "" }, { "docid": "5aa4372be4d7b9d84cb0f1e4c22aa424", "score": "0.62235725", "text": "def main():\n if not os.path.exists(\"./data/features\"):\n os.makedirs(\"./data/features\")\n make_cts()\n make_coldata()\n command = \"Rscript ./src/features/r_scripts/lfc.R\"\n os.system(command)\n return", "title": "" }, { "docid": "bdea5fa1d9b0ba3dfa8573d74c9cd641", "score": "0.6218749", "text": "def main(argv):", "title": "" }, { "docid": "2230f3b32896a1d0f35daa753ea7307c", "score": "0.61689687", "text": "def main():\n Main()", "title": "" }, { "docid": "64a1a4261cea121e609f61fd60d0b36f", "score": "0.6127713", "text": "def main():\n return None", "title": "" }, { "docid": "c328a8e3f0c73dfb795255c779d6bb21", "score": "0.6124877", "text": "def main():\n # set up the program to take in arguments from the command line", "title": "" }, { "docid": "04688cd98d9fcf835947139440a6440f", "score": "0.60574085", "text": "def main():\n\n pass\n\n return None", "title": "" }, { "docid": "9849beb2becdf68e2bf5a55ba6c8e058", "score": "0.6054235", "text": "def main():\n\n BASIC.run(PROGRAM)", "title": "" }, { "docid": "9849beb2becdf68e2bf5a55ba6c8e058", "score": "0.6054235", "text": "def main():\n\n BASIC.run(PROGRAM)", "title": "" }, { "docid": "9849beb2becdf68e2bf5a55ba6c8e058", "score": "0.6054235", "text": "def main():\n\n BASIC.run(PROGRAM)", "title": "" }, { "docid": "dc5586b84901eecdd4033d77b1277901", "score": "0.6052881", "text": "def main(args):\r\n\tprint args", "title": "" }, { "docid": "693e66b4f00edb2c4872791ea15557fe", "score": "0.60299456", "text": "def main():\n parser = ArgumentParser(description='추출된 오분석 패치 후보를 검증하는 스크립트')\n parser.add_argument('-c', '--corpus-dir', help='corpus dir', metavar='DIR', required=True)\n parser.add_argument('--rsc-src', help='resource source dir <default: ../rsc/src>',\n metavar='DIR', default='../rsc/src')\n parser.add_argument('--lib-path', help='khaiii shared library path', metavar='FILE', default='')\n parser.add_argument('--rsc-dir', help='resource dir', metavar='DIR', default='')\n parser.add_argument('--input', help='input file <default: stdin>', metavar='FILE')\n parser.add_argument('--output', help='output file <default: stdout>', metavar='FILE')\n parser.add_argument('--debug', help='enable debug', action='store_true')\n args = parser.parse_args()\n\n if args.input:\n sys.stdin = open(args.input, 'r', encoding='UTF-8')\n if args.output:\n sys.stdout = open(args.output, 'w', encoding='UTF-8')\n if args.debug:\n logging.basicConfig(level=logging.DEBUG)\n else:\n logging.basicConfig(level=logging.INFO)\n\n run(args)", "title": "" }, { "docid": "25d2156a2c147b735919830a88870675", "score": "0.6025334", "text": "def climain():\n pass", "title": "" }, { "docid": "f5faf6af6fd1d678aa5d61b752570c5b", "score": "0.60207397", "text": "def main():\n CLI.from_command_line()\n exit(0)", "title": "" }, { "docid": "50333a46fe2be3b14f8763c7450461cc", "score": "0.5991564", "text": "def main(args=None):\n pass", "title": "" }, { "docid": "2dddb624e75a16297354fe192004dae4", "score": "0.5971752", "text": "def main() -> None:\n pass", "title": "" }, { "docid": "03eab569e1fa019e05e3f17a76f89d8e", "score": "0.5965668", "text": "def main():\n\n\ta = LedSwitcher(sys.argv[1])\n\ta.parseFile()\n\tprint(a.getResult())", "title": "" }, { "docid": "9474d60d1ad14443e46dc45f9d255de4", "score": "0.5958696", "text": "def main():\n args = parse_args()", "title": "" }, { "docid": "ac7d54f6876130c19c424c66f5c633c3", "score": "0.59350556", "text": "def cli():\n\n print(\"Context of the main.\")", "title": "" }, { "docid": "0c44f7b85e1489e30026e387428624fc", "score": "0.59238666", "text": "def EntryPoint(self) -> _n_5_t_1:", "title": "" }, { "docid": "d7b8a92fa493427136b876598a229cee", "score": "0.5900915", "text": "def main_alt():\n print \"Hello, world!\"", "title": "" }, { "docid": "8fa7ebb63b49d9b7b6661663eb1a9990", "score": "0.58937246", "text": "def main():\n ### read dump\n timestep, natoms, lx, ly, xs, ys = read_dump(ifname)\n ### correct pbcs for all molecules\n correct_pbcs(xs, ys, nfil, natoms)\n ### add new beads\n #xsd, ysd, natomsd, nfild = add_beads(xs,ys,nfil,natoms)\n ### add new per/atom information\n tpe, molid = add_per_atom_info(natoms, nfil)\n ### add new bond information\n bid, btpe, b1, b2 = add_bond_information(natoms, nfil)\n ### add new angle information\n aid, atpe, a1, a2, a3 = add_angle_information(natoms, nfil)\n ### scale xs and ys\n lx *= lscale\n ly *= lscale\n xs *= lx\n ys *= ly\n ### write results to xyz file\n write_xyz(xs,ys)\n ### write results to lammps data file\n write_data(natoms, lx,ly, tpe, molid, xs, ys, bid, btpe, b1, b2, aid, atpe, a1, a2, a3,ofname)\n return", "title": "" }, { "docid": "ed20152113aec51270e2db79970f1f50", "score": "0.58698523", "text": "def llc(args=None):\n args = parser.parse_args(args)\n with LogSetup(args) as log_setup:\n march = get_arch_from_args(args)\n ir_module = api.llvm_to_ir(args.source)\n do_compile([ir_module], march, log_setup.reporter, log_setup.args)", "title": "" }, { "docid": "ba8ad90da83a3ae86af26ef07a134e88", "score": "0.5863602", "text": "def test_lc_file():\n lc_from_file(lc, cache, X, y, F, wf, P, wp)", "title": "" }, { "docid": "6b54c30c3c8ce763241d1d10b860290b", "score": "0.58604974", "text": "def main():\n\n read_config(os.path.join(os.path.dirname(os.path.realpath(__file__)), \"cupp.cfg\"))\n\n parser = get_parser()\n args = parser.parse_args()\n\n if not args.quiet:\n print_cow()\n\n if args.version:\n version()\n elif args.interactive:\n interactive()\n elif args.download_wordlist:\n download_wordlist()\n elif args.alecto:\n alectodb_download()\n elif args.improve:\n improve_dictionary(args.improve)\n else:\n parser.print_help()", "title": "" }, { "docid": "6e57c8a0092568cb746b76b654f5c47f", "score": "0.58221513", "text": "def main() -> None:\n\n new_location_main(sys.argv[1:])", "title": "" }, { "docid": "945a136452b0e0f7d5cc4dc0c0dc227c", "score": "0.5822095", "text": "def main():\n print(\"Welcome to the XY77 Battering Ram\")\n\n state = get_vault_state()\n print(\"State acquired. Let's start.\")\n\n print(\"\\n**History Layer**\")\n history_layer(state)\n\n print(\"\\n**Code Layer**\")\n code_layer(state)\n\n print(\"\\n**Switches Layer**\")\n switches_layer(state)\n\n print(\"\\n**Button Layer**\")\n button_layer(state)\n\n print(\"Layers bypassed.\")\n print(\"Wait\", state['suspicion level'],\n \"seconds or more to allow suspicion level to dissipate.\")", "title": "" }, { "docid": "ab7c7f19a627fee85f5b4de482ebcb8d", "score": "0.58100647", "text": "def main():\n ibc_nlp_classification()", "title": "" }, { "docid": "be5ec6cfb53f8bc192c6396a7b0c49a2", "score": "0.5809807", "text": "def main():\n User = os.environ.get(\"USER\", \"unknown\")\n Host = os.environ.get(\"HOSTNAME\",\"unknown\")\n # push User, host and command line on to XALT_Stack\n XALT_Stack.push(\"User: \" + User)\n XALT_Stack.push(\"Host: \" + Host)\n sA = []\n sA.append(\"CommandLine:\")\n for v in sys.argv:\n sA.append('\"'+v+'\"')\n\n s = \" \".join(sA)\n XALT_Stack.push(s)\n\n try:\n uuid = sys.argv[ 1]\n status = sys.argv[ 2]\n wd = sys.argv[ 3]\n syshost = sys.argv[ 4]\n execname = sys.argv[ 5]\n xaltobj = sys.argv[ 6]\n build_epoch = sys.argv[ 7]\n linklineFn = sys.argv[ 8]\n funclist = sys.argv[ 9]\t\t\t# ftrack2015\n resultFn = sys.argv[10] # ftrack2015\n\n if (execname.find(\"conftest\") != -1):\n return 1\n \n hash_line = capture(['sha1sum', execname]) \n if (hash_line.find(\"No such file or directory\") != -1):\n return 1\n hash_id = hash_line.split()[0]\n\n # Step one clean up linkline data\n sA = cleanup(xaltobj, linklineFn)\n sC = cleanfunc(funclist)\t\t\t# ftrack2015\n \n resultT = {}\n resultT['uuid'] = uuid\n resultT['link_program'] = extract_compiler()\n resultT['build_user'] = User\n resultT['exit_code'] = int(status)\n resultT['build_epoch'] = float(build_epoch)\n resultT['exec_path'] = os.path.abspath(execname)\n resultT['hash_id'] = hash_id\n resultT['wd'] = wd\n resultT['build_syshost'] = syshost\n resultT['func_list'] = sC\t\t#ftrack2015\n resultT['linkA'] = sA\n \n xfer = XALT_transmission_factory.build(XALT_TRANSMISSION_STYLE,\n syshost, \"link\", resultFn)\n xfer.save(resultT)\n\n except Exception as e:\n print(\"XALT_EXCEPTION(xalt_generate_linkdata.py): \",e)\n logger.exception(\"XALT_EXCEPTION:xalt_generate_linkdata\"+XALT_Stack.contents())\n\n return 0", "title": "" }, { "docid": "13bc587f5a0ea07889f8b8ab9bd164fb", "score": "0.57808703", "text": "def main(): # everything that exists is coded in the main function\n # 1, 1, 2, 3, 5, 8, 13, 21, 34", "title": "" }, { "docid": "59bf6ae06b4c2b766abb7b8f181776f7", "score": "0.57786876", "text": "def main():\n utils.vip_main(lPCBAgent, \n version=__version__)", "title": "" }, { "docid": "22b3744ce64d4ee5aa8dcf390966664d", "score": "0.5766914", "text": "def main():\n\n # read config files using liftoff\n opt = read_config()\n\n # create paths if not using liftoff\n # liftoff takes care of this otherwise\n opt = create_paths(opt)\n\n # run your experiment\n run(opt)", "title": "" }, { "docid": "9672271c1d73a534226a7ee696d2ba6b", "score": "0.57410455", "text": "def main():\n print(\"main\")", "title": "" }, { "docid": "48738a5d4d3b5d381714ae989cc8e3b8", "score": "0.57282174", "text": "def main():\n parser = apollocaffe.base_parser()\n parser.add_argument('--config', required=True)\n args = parser.parse_args()\n config = json.load(open(args.config, 'r'))\n print (\"Test config file is \" + config[\"data\"][\"test_idl\"] )\n apollocaffe.set_random_seed(config[\"solver\"][\"random_seed\"])\n apollocaffe.set_device(0) # gpu\n test(config)", "title": "" }, { "docid": "e605c23ee9e63769d916ba1b761e86b2", "score": "0.5723054", "text": "def main():\n args = cli()\n lines = args.sourcefile.readlines()\n #tuple is used to symbol table and error counter\n symbol_table, error_count = resolve_labels(lines)\n if error_count == 0:\n #increments the address counter and refers back to assembly instructional code\n transform_instructions(lines, symbol_table)\n for line in lines:\n #prints onto the object file being created\n print(line.strip(), file=args.objfile)", "title": "" }, { "docid": "4181124ff189d7d93d38e464da14095c", "score": "0.57198083", "text": "def main(self):\n pass", "title": "" }, { "docid": "60e1e9849df493a9f87b8e4f1fbf6759", "score": "0.57158166", "text": "def main():\n # TODO: 2. Implement and test this function.\n olympic_rings()", "title": "" }, { "docid": "ad5aba213279aa0d3e812b1fef03749f", "score": "0.5710833", "text": "def main() -> int:\n return special_agent_main(parse_arguments, agent_dell_idrac_main)", "title": "" }, { "docid": "4e8734cb90f3573fc922a5271fd4b22a", "score": "0.5708046", "text": "def main():\n\n pp = Sinuca_TracerPinPoints()\n result = pp.Run()\n return result", "title": "" }, { "docid": "ebbac7fd45bc295e9f171e55d632a3ef", "score": "0.57066834", "text": "def main(arguments=None):\n # setup the command-line util settings\n su = tools(\n arguments=arguments,\n docString=__doc__,\n logLevel=\"WARNING\",\n options_first=True,\n projectName=\"panstamps\"\n )\n arguments, settings, log, dbConn = su.setup()\n\n #parser = self.add_options(usage=usagestring)\n #options, args = parser.parse_args()\n #self.options = options\n ra,dec = 0,0\n # unpack remaining cl arguments using `exec` to setup the variable names\n # automatically\n argdict = {}\n for arg, val in arguments.items():\n if arg[0] == \"-\":\n varname = arg.replace(\"-\", \"\") + \"Flag\"\n else:\n varname = arg.replace(\"<\", \"\").replace(\">\", \"\")\n #if isinstance(val, str) or isinstance(val, str):\n argdict[varname] = val\n #exec(\"%s = '%s'\" % (varname,val),globals(),globals())\n #else:\n # argdict[varname] = float(val)\n #exec(\"%s = %s\" % (varname,val,),globals(),globals())\n if arg == \"--dbConn\":\n dbConn = val\n log.debug('%s = %s' % (varname, val,))\n #if varname == 'ra': import pdb; pdb.set_trace()\n #print(ra,dec)\n #import pdb; pdb.set_trace()\n if argdict['ra']:\n try:\n argdict[ra] = float(argdict['ra'])\n except:\n if \":\" not in argdict['ra']:\n log.error(\n \"ERROR: ra must be in decimal degree or sexagesimal format\")\n return\n\n if argdict['dec']:\n try:\n argdict['dec'] = float(argdict['dec'])\n except:\n if \":\" not in argdict['dec']:\n log.error(\n \"ERROR: dec must be in decimal degree or sexagesimal format\")\n return\n\n ## START LOGGING ##\n startTime = times.get_now_sql_datetime()\n log.info(\n '--- STARTING TO RUN THE cl_utils.py AT %s' %\n (startTime,))\n\n # BUILD KEYWORD DICT\n kwargs = {}\n kwargs[\"log\"] = log\n kwargs[\"settings\"] = settings\n kwargs[\"ra\"] = argdict['ra']\n kwargs[\"dec\"] = argdict['dec']\n\n # FITS OPTIONS\n kwargs[\"fits\"] = True # DEFAULT\n if argdict['fitsFlag'] == False and argdict['nofitsFlag'] == True:\n kwargs[\"fits\"] = False\n\n # JPEG OPTIONS\n kwargs[\"jpeg\"] = False # DEFAULT\n if argdict['jpegFlag'] == True and argdict['nojpegFlag'] == False:\n kwargs[\"jpeg\"] = True\n\n # COLOR JPEG OPTIONS\n kwargs[\"color\"] = False # DEFAULT\n if argdict['colorFlag'] == True and argdict['nocolorFlag'] == False:\n kwargs[\"color\"] = True\n\n # WIDTH OPTION\n kwargs[\"arcsecSize\"] = 60\n if argdict['widthFlag']:\n kwargs[\"arcsecSize\"] = float(argdict['widthFlag']) * 60.\n\n # CHOOSE A FILTERSET\n kwargs[\"filterSet\"] = 'gri'\n if argdict['filtersFlag']:\n kwargs[\"filterSet\"] = argdict['filtersFlag']\n\n for i in kwargs[\"filterSet\"]:\n if i not in \"grizy\":\n log.error(\n \"ERROR: the requested filter must be in the grizy filter set\")\n return\n\n # WHICH IMAGE TYPE TO DOWNLOAD\n if argdict['stack']:\n kwargs[\"imageType\"] = \"stack\"\n if argdict['warp']:\n kwargs[\"imageType\"] = \"warp\"\n if argdict['closestFlag']:\n kwargs[\"imageType\"] = \"warp\"\n\n # MJD WINDOW\n kwargs[\"mjdStart\"] = argdict['mjdStart']\n kwargs[\"mjdEnd\"] = argdict['mjdEnd']\n kwargs[\"window\"] = False\n\n try:\n kwargs[\"window\"] = int(argdict['closestFlag'])\n except:\n pass\n\n if not kwargs[\"window\"]:\n if argdict['mjd'] and argdict['closestFlag'] == \"before\":\n kwargs[\"mjdEnd\"] = argdict['mjd']\n elif argdict['mjd'] and argdict['closestFlag'] == \"after\":\n kwargs[\"mjdStart\"] = argdict['mjd']\n else:\n if argdict['mjd'] and kwargs[\"window\"] < 0:\n kwargs[\"mjdEnd\"] = mjd\n elif argdict['mjd'] and kwargs[\"window\"] > 0:\n kwargs[\"mjdStart\"] = mjd\n\n # DOWNLOAD LOCATION\n if argdict['downloadFolderFlag']:\n home = expanduser(\"~\")\n downloadFolderFlag = argdict['downloadFolderFlag'].replace(\"~\", home)\n kwargs[\"downloadDirectory\"] = downloadFolderFlag\n\n # xt-kwarg_key_and_value\n\n # DOWNLOAD THE IMAGES\n images = downloader(**kwargs)\n fitsPaths, jpegPaths, colorPath = images.get()\n jpegPaths += colorPath\n\n # POST-DOWNLOAD PROCESS IMAGES\n kwargs = {}\n kwargs[\"log\"] = log\n kwargs[\"settings\"] = settings\n # WIDTH OPTION\n kwargs[\"arcsecSize\"] = 60\n if argdict['widthFlag']:\n kwargs[\"arcsecSize\"] = float(argdict['widthFlag']) * 60.\n\n # ANNOTATE JPEG OPTIONS\n kwargs[\"crosshairs\"] = True # DEFAULT\n kwargs[\"scale\"] = True\n if argdict['annotateFlag'] == False and argdict['noannotateFlag'] == True:\n kwargs[\"crosshairs\"] = False # DEFAULT\n kwargs[\"scale\"] = False\n\n # INVERT OPTIONS\n kwargs[\"invert\"] = False # DEFAULT\n if argdict['invertFlag'] == True and argdict['noinvertFlag'] == False:\n kwargs[\"invert\"] = True\n\n # GREYSCALE OPTIONS\n kwargs[\"greyscale\"] = False # DEFAULT\n if argdict['greyscaleFlag'] == True and argdict['nogreyscaleFlag'] == False:\n kwargs[\"greyscale\"] = True\n\n # TRANSIENT DOT OPTIONS\n kwargs[\"transient\"] = False # DEFAULT\n if argdict['transientFlag'] == True and argdict['notransientFlag'] == False:\n kwargs[\"transient\"] = True\n\n for j in jpegPaths:\n kwargs[\"imagePath\"] = j\n\n # kwargs[\"transient\"] = False\n\n # kwargs[\"invert\"] = False\n # kwargs[\"greyscale\"] = False\n oneImage = image(**kwargs)\n oneImage.get()\n\n # CALL FUNCTIONS/OBJECTS\n\n if \"dbConn\" in locals() and dbConn:\n dbConn.commit()\n dbConn.close()\n ## FINISH LOGGING ##\n endTime = times.get_now_sql_datetime()\n runningTime = times.calculate_time_difference(startTime, endTime)\n log.info('-- FINISHED ATTEMPT TO RUN THE cl_utils.py AT %s (RUNTIME: %s) --' %\n (endTime, runningTime, ))\n\n return", "title": "" }, { "docid": "9cfc8caf73e0feac3ff58172854bbd68", "score": "0.5703492", "text": "def main(config_location):\n param = af.ParamParser()\n\n # Create artery network\n an = af.ArteryNetwork(param)\n\n # Solve problem and store data\n an.solve()", "title": "" }, { "docid": "b2fbe374c60106763d0f1078a63025e7", "score": "0.5690887", "text": "def main():\n global CONFIG\n CONFIG = get_configuration_from_command_line_args()\n try:\n for line in ls_lines():\n print(line)\n except (FileNotFoundError, PermissionError) as e:\n print(e)\n sys.exit(1)\n except Exception as e:\n print(e)\n sys.exit(2)\n sys.exit(0)", "title": "" }, { "docid": "fd0e15a8f4a3b79dd47de2efb306072d", "score": "0.56873715", "text": "def main():\n # start cluster analysis\n clusters = cluster_analysis()\n return", "title": "" }, { "docid": "7d9d103bc66cee870d990d9f22b1764b", "score": "0.5687207", "text": "def main():\n # Process the command-line arguments\n doc = (\n \"Calculate the elastic properties of a fibrous composite laminate. \"\n \"See the manual (lamprop-manual.pdf) for more in-depth information.\"\n )\n opts = argparse.ArgumentParser(prog=\"lamprop\", description=doc)\n group = opts.add_mutually_exclusive_group()\n group.add_argument(\n \"-i\",\n \"--info\",\n action=\"store_true\",\n help=\"show information about source file (the default is not to)\",\n )\n group.add_argument(\n \"-l\",\n \"--latex\",\n action=\"store_true\",\n help=\"generate LaTeX output (the default is plain text)\",\n )\n group.add_argument(\"-H\", \"--html\", action=\"store_true\", help=\"generate HTML output\")\n opts.add_argument(\n \"-e\",\n \"--eng\",\n action=\"store_true\",\n help=\"output only the engineering properties\",\n )\n opts.add_argument(\n \"-m\",\n \"--mat\",\n action=\"store_true\",\n help=\"output only the ABD matrix and stiffness tensor\",\n )\n opts.add_argument(\n \"-f\", \"--fea\", action=\"store_true\", help=\"output only material data for FEA\"\n )\n group = opts.add_mutually_exclusive_group()\n group.add_argument(\n \"-L\", \"--license\", action=LicenseAction, nargs=0, help=\"print the license\"\n )\n group.add_argument(\"-v\", \"--version\", action=\"version\", version=lp.__version__)\n opts.add_argument(\n \"--log\",\n default=\"warning\",\n choices=[\"debug\", \"info\", \"warning\", \"error\"],\n help=\"logging level (defaults to 'warning')\",\n )\n opts.add_argument(\n \"files\", metavar=\"file\", nargs=\"*\", help=\"one or more files to process\"\n )\n args = opts.parse_args(sys.argv[1:])\n logging.basicConfig(\n level=getattr(logging, args.log.upper(), None),\n format=\"%(levelname)s: %(message)s\",\n )\n del opts, group\n if args.mat is False and args.eng is False and args.fea is False:\n args.eng = True\n args.mat = True\n args.fea = True\n # No files given to process.\n if len(args.files) == 0:\n sys.exit(1)\n # Set the output method.\n out = lp.text_output\n if args.latex:\n out = lp.latex_output\n elif args.html:\n out = lp.html_output\n # Force utf-8 encoding for stdout on ms-windows.\n # Because redirected output uses cp1252 by default.\n if os.name == \"nt\":\n sys.stdout.reconfigure(encoding=\"utf-8\")\n for f in args.files:\n logging.info(\"processing file '{}'\".format(f))\n laminates = lp.parse(f)\n if args.info and lp.info:\n print(f'Information for \"{f}\":')\n for ln in lp.info:\n print(ln)\n print()\n if lp.warn:\n print(f'Warnings for \"{f}\":')\n for ln in lp.warn:\n print(ln)\n print()\n for curlam in laminates:\n print(*out(curlam, args.eng, args.mat, args.fea), sep=\"\\n\")", "title": "" }, { "docid": "db84ebe5716040ebd81902fff0b41cb5", "score": "0.5673952", "text": "def main( ):\n parser = argparse.ArgumentParser(description=\"Count major codes\")\n parser.add_argument('majors', type=argparse.FileType('r'),\n help=\"A text file containing major codes, one major code per line.\")\n args = parser.parse_args() # gets arguments from command line\n majors_file = args.majors\n count_codes(majors_file)", "title": "" }, { "docid": "3037866dbb4d83e949ffdc3cb3c77656", "score": "0.56514245", "text": "def main():\n return 'daoserver'", "title": "" }, { "docid": "e74896eb1e8ec3f7aadb4933b846d810", "score": "0.564998", "text": "def main():\n parser = argparse.ArgumentParser(\n description='Load data into the Neo4j database for the Alliance of Genome Resources.'\n )\n parser.add_argument(\n '-c',\n '--config', help='Specify the filename of the YAML config. It must reside in the src/config/ directory',\n default='default.yml'\n )\n parser.add_argument('-v',\n '--verbose',\n help='Enable DEBUG mode for logging.',\n action='store_true')\n args = parser.parse_args()\n\n # set context info\n context_info = ContextInfo()\n context_info.config_file_location = os.path.abspath('src/config/' + args.config)\n if args.verbose:\n context_info.env[\"DEBUG\"] = True\n\n debug_level = logging.DEBUG if context_info.env[\"DEBUG\"] else logging.INFO\n\n coloredlogs.install(level=debug_level,\n fmt='%(asctime)s %(levelname)s: %(name)s:%(lineno)d: %(message)s',\n field_styles={\n 'asctime': {'color': 'green'},\n 'hostname': {'color': 'magenta'},\n 'levelname': {'color': 'white', 'bold': True},\n 'name': {'color': 'blue'},\n 'programname': {'color': 'cyan'}\n })\n\n logger = logging.getLogger(__name__)\n logging.getLogger(\"ontobio\").setLevel(logging.ERROR)\n\n AggregateLoader(args, logger, context_info).run_loader()", "title": "" }, { "docid": "c2166c86a540913a01f3dc8c9b3a17cd", "score": "0.5635628", "text": "def main():\n parser = cmdline_parser()\n args = parser.parse_args()\n\n if args.check is True:\n depen_list = [\"samtools\", \"bamtools\", \"R\", \"featurecounts\",\n \"hisat2\", \"star\", \"faqcs\", \"stringtie\",\n \"bedtools\", \"diamond\"]\n depen_list.sort()\n check_depen(depen_list)\n check_pydepen()\n sys.exit()\n\n #TODO: figure out why this is not working\n os.environ[\"LUIGI_CONFIG_PATH\"] = args.config\n\n # setting up logging files\n logger = create_logger(args.WORKDIR)\n\n # get parameters from the config file\n paras = parse_config(args.config)\n\n # check if correct options are selected\n check_input(paras, parser=parser)\n\n # getting absolute path of workdir\n workdir = os.path.abspath(args.WORKDIR)\n\n if check_exp_design(args.EXPDSN) is False: # check if experimental design file is OK\n sys.exit(\"Your experimental design file has formatting problems!\")\n\n # convert exerimental design file to a dictionary object\n samp_dic = CheckDesign(args.EXPDSN).extract_sample_fastqs()\n\n no_of_jobs = 1\n\n # assigns filenames for databases if not specified.\n db = check_aligner(aligner=paras[\"aligner\"],\n hisat_index=paras[\"hisat_index\"],\n workdir=workdir, star_db=paras[\"star_index\"])\n\n # TODO move this to log file\n summarize_run(aligner=paras[\"aligner\"]) # print run information in screen\n\n # assign universal fasta and gff variable\n if paras[\"kingdom\"] in [\"prokarya\", \"eukarya\"]:\n if paras[\"kingdom\"] == \"prokarya\":\n fasta = paras[\"fasta_prok\"]\n gff = paras[\"gff_prok\"]\n elif paras[\"kingdom\"] == \"eukarya\":\n fasta = paras[\"fasta_euk\"]\n gff = paras[\"gff_euk\"]\n single_seq = SingleSeq(qc=paras[\"qc\"],\n fastq_dic=samp_dic,\n aligner=paras[\"aligner\"],\n ref_fasta=fasta,\n num_cpus=paras[\"threads\"],\n local_scheduler=True,\n hisat_index=db,\n workdir=workdir,\n kingdom=paras[\"kingdom\"],\n no_of_jobs=no_of_jobs,\n p_value=paras[\"q_value\"],\n exp_desn_file=args.EXPDSN,\n stardb_dir=db,\n emap_dir=paras[\"emap_db\"],\n gff_file=gff,\n pathway=paras[\"pathway\"])\n\n # step 1, run qc\n qc_dic = single_seq.run_faqc()\n\n # step 2, create database, hisat2 or stardb\n single_seq.create_db()\n\n # step 3, map reads, hisat2 or stardb\n single_seq.map_reads(qc_dic=qc_dic)\n\n # step 4, summarize the mapped reads\n single_seq.map_summarize()\n\n # step 5, if needed\n if paras[\"novel\"] is True: # if novel regions are to be reported\n single_seq.extract_pp() # extract properly paired reads\n single_seq.NovelRegions() # find novel regions\n single_seq.create_new_gff() # create a new GFF file that has novel region info\n gff = os.path.join(workdir, \"processes\", \"novel\", \"updated.gff\") # update gff\n # step 6, run read counts\n single_seq.feature_count(new_gff=gff)\n single_seq.merge_stringtie(new_gff=gff)\n\n # find diff expressed gene using edgeR\n if \"edgeR\" in paras[\"method\"]:\n single_seq.run_edger()\n # use DESeq2\n if \"DESeq2\" in paras[\"method\"]:\n single_seq.run_deseq2()\n if \"ballgown\" in paras[\"method\"]:\n single_seq.restringtie()\n single_seq.run_ballgown()\n\n # run opaver pathway analysis\n if paras[\"pathway\"] is True:\n # run emapper\n single_seq.run_emapper(new_gff=gff)\n if \"edgeR\" in paras[\"method\"]:\n single_seq.run_opaver(method=\"edgeR\")\n if \"DESeq2\" in paras[\"method\"]:\n single_seq.run_opaver(method=\"DESeq2\")\n\n # summarize all results into one big json file.\n single_seq.summ_json(new_gff=gff, method=paras[\"method\"],\n NovelRegions=paras[\"novel\"])\n\n elif paras[\"kingdom\"] == \"both\":\n euk_fasta = paras[\"fasta_euk\"]\n euk_gff = paras[\"gff_euk\"]\n prok_fasta = paras[\"fasta_prok\"]\n prok_gff = paras[\"gff_prok\"]\n\n dual_seq = DualSeq(qc=paras[\"qc\"],\n fastq_dic=samp_dic, \n kingdom=paras[\"kingdom\"],\n aligner=paras[\"aligner\"],\n euk_fasta=paras[\"fasta_euk\"],\n prok_fasta=paras[\"fasta_prok\"],\n prok_gff=paras[\"gff_prok\"],\n euk_gff=paras[\"gff_euk\"],\n workdir=workdir,\n p_value=paras[\"q_value\"],\n exp_desn_file=args.EXPDSN,\n hisat_index=db,\n local_scheduler=True,\n pathway=paras[\"pathway\"],\n num_cpus=paras[\"threads\"],\n stardb_dir = paras[\"star_index\"],\n emap_dir=paras[\"emap_db\"])\n\n # step 1, run qc\n qc_dic = dual_seq.run_faqc()\n \n # step 2, create database, hisat2 or stardb\n dual_seq.create_db()\n\n # step 3, map reads, hisat2 or stardb\n dual_seq.map_reads()\n\n # step 4, summarize the mapped reads\n dual_seq.map_summarize()\n dual_seq.split_prokeuk()\n\n # step 5, if needed\n if paras[\"novel\"] is True: # if novel regions are to be reported\n dual_seq.extract_pp() # extract properly paired reads\n dual_seq.NovelRegions() # find novel regions\n dual_seq.create_new_gff() # create a new GFF file that has novel region info\n euk_gff = os.path.join(workdir,\n \"processes\", \"novel\",\n \"euk_updated.gff\") # update gff\n prok_gff = os.path.join(workdir, \"processes\", \"novel\",\n \"prok_updated.gff\") # update gff\n \n # step 6, run read counts\n print(\"Counting eukaryotes\")\n dual_seq.feature_counts(new_gff=euk_gff, kingdom=\"eukarya\")\n dual_seq.feature_counts(new_gff=prok_gff, kingdom=\"prokarya\")\n dual_seq.merge_stringtie(new_gff=prok_gff + \",\" + euk_gff)\n\n # find diff expressed gene using edgeR\n if \"edgeR\" in paras[\"method\"]:\n dual_seq.run_edger(kingdom=\"eukarya\")\n dual_seq.run_edger(kingdom=\"prokarya\")\n if \"DESeq2\" in paras[\"method\"]:\n dual_seq.run_deseq2(kingdom=\"eukarya\")\n dual_seq.run_deseq2(kingdom=\"prokarya\")\n if \"ballgown\" in paras[\"method\"]:\n dual_seq.restringtie()\n dual_seq.run_ballgown()\n\n if paras[\"pathway\"] is True:\n # run emapper\n dual_seq.run_emapper(new_gff=prok_gff, kingdom=\"prokarya\", fasta=prok_fasta)\n dual_seq.run_emapper(new_gff=euk_gff, kingdom=\"eukarya\", fasta=euk_fasta)\n if \"edgeR\" in paras[\"method\"]:\n dual_seq.run_opaver(method=\"edgeR\", kingdom=\"prokarya\")\n dual_seq.run_opaver(method=\"edgeR\", kingdom=\"eukarya\")\n if \"DESeq2\" in paras[\"method\"]:\n dual_seq.run_opaver(method=\"DESeq2\", kingdom=\"prokarya\")\n dual_seq.run_opaver(method=\"DESeq2\", kingdom=\"eukarya\")\n\n # summarize all results into one big json file.\n dual_seq.summ_json(new_gff=euk_gff, method=paras[\"method\"],\n NovelRegions=paras[\"novel\"],\n kingdom=\"eukarya\",\n fasta=euk_fasta)\n dual_seq.summ_json(new_gff=euk_gff, method=paras[\"method\"],\n NovelRegions=paras[\"novel\"],\n kingdom=\"prokarya\",\n fasta=prok_fasta)", "title": "" }, { "docid": "e2338876881ec41fc2d19277660f010d", "score": "0.5627442", "text": "def main():\r\n params = get_configuration(print_diagnostics=True, with_neptune=True, inject_parameters_to_gin=True)\r\n LOG_PATH = os.path.join(params.BASE_PATH, 'tests', params.GAME)\r\n runner = RolloutsRunner(LOG_PATH,create_rainbow_rollouts_agent)\r\n runner.run_experiment()", "title": "" } ]
f12dbbd813dce1dfccea81d087209d5a
Create a simple user that we link all the answers to.
[ { "docid": "f841e3174f163c6984457f84a2649161", "score": "0.0", "text": "def begin(request):\n request.session.flush() # \"Logout\" from any previous session\n\n if request.method == \"POST\":\n new_name = request.POST[\"name\"]\n new_motto = request.POST[\"motto\"]\n # Try this again if they didn't put in a name\n if not new_name or not new_motto:\n context = {\"error_message\": \"Please pick a name and motto for yourself!\"}\n return render(request, \"ggpoll/begin.html\", context)\n # Create the new user and store the user info into the Session\n # then redirect to the first question\n new_user = GGUser(name=new_name, motto=new_motto)\n new_user.save()\n request.session['user_id'] = new_user.id # 'log in' with new user\n first_question = GGQuestion.objects.all()[0]\n return HttpResponseRedirect(reverse(\"ggpoll:ask_question\", args=(first_question.id, )))\n return render(request, \"ggpoll/begin.html\")", "title": "" } ]
[ { "docid": "c40661dc617572feba289b687098d62c", "score": "0.7658639", "text": "def create_user(self, user_id: str, user_name: str) -> None:", "title": "" }, { "docid": "32f5b47e07cd7cd2b99bba64fa162310", "score": "0.7454858", "text": "def createUser():\n user_data = fb.get_initial_user_data()\n raise NotImplementedError", "title": "" }, { "docid": "cd8475fe2dd3bc95aa3e4373508424b9", "score": "0.72297966", "text": "def create_user(\n self,\n user: User,\n ) -> None:\n ...", "title": "" }, { "docid": "ae44b8b6f5f40e693bb63ecf8660983e", "score": "0.7228752", "text": "def create_user():\n\n # Make the new user JSON from form data\n new_user = {\n \"username\": request.form.get('username'),\n \"password\": request.form.get('password'),\n \"bio\": request.form.get('bio'),\n }\n\n # Insert into PyMongo database\n db.users.insert_one(new_user)\n\n # Render some HTML\n return \"\"\"\n User created successfully!\n <a href=\"/users\">Back to Home</a>\n \"\"\"", "title": "" }, { "docid": "1bf872781698788db2659a6f9af33942", "score": "0.7165631", "text": "def sample_user():\n return get_user_model()\\\n .objects.\\\n create_user({'email': fake.email(), 'password': fake.password()})", "title": "" }, { "docid": "0121f67127784198ccbe51393bd12f5a", "score": "0.7164468", "text": "def sample_user(email=user_v['email'], password=user_v['password']):\n return get_user_model().objects.create_user(email, password)", "title": "" }, { "docid": "bc9a94573866bc16c3c7a11df748f484", "score": "0.7090674", "text": "def sample_user(email='test@recipeapi.com', password='somethingrandom'):\n return get_user_model().objects.create_user(email=email, password=password)", "title": "" }, { "docid": "71a6d34f87b4fec35f175451a28a63d8", "score": "0.7038464", "text": "def entry():\n new_user = User(username='myuser',\n email='myuser@example.com',\n created=dt.now(),\n bio=\"In West Philadelphia born and raised, on the playground is where I spent most of my days\",\n admin=False\n )\n db.session.add(new_user)\n db.session.commit()\n return make_response(\"User created!\")", "title": "" }, { "docid": "7144ec956d7748793fd7ac99e6255e41", "score": "0.7023137", "text": "def sample_user(email='test@gmail.com', password='testpass'):\n return get_user_model().objects.create_user(email, password)", "title": "" }, { "docid": "7144ec956d7748793fd7ac99e6255e41", "score": "0.7023137", "text": "def sample_user(email='test@gmail.com', password='testpass'):\n return get_user_model().objects.create_user(email, password)", "title": "" }, { "docid": "d1030726edadce9706609bd1b1f868d6", "score": "0.6992834", "text": "def sample_user(email='test@londonappdev.com', password='testpass'):\n return get_user_model().objects.create_user(email, password)", "title": "" }, { "docid": "d1030726edadce9706609bd1b1f868d6", "score": "0.6992834", "text": "def sample_user(email='test@londonappdev.com', password='testpass'):\n return get_user_model().objects.create_user(email, password)", "title": "" }, { "docid": "31ae0d2f6eca9e31861496894c1e1462", "score": "0.6927966", "text": "def sample_user(email='test@testdomain.com', password='testpass'):\n return get_user_model().objects.create_user(email, password)", "title": "" }, { "docid": "3cde5f7c1a7227f69a76666cd92abbdd", "score": "0.69101703", "text": "def create_user(self, name='Some name'):\n return self.database.insert(name)", "title": "" }, { "docid": "9580115af87da74f76e86883ee807932", "score": "0.68891996", "text": "def sample_user(phone_number='4086432477', password='testpass'):\n return get_user_model().objects.create_user(phone_number, password)", "title": "" }, { "docid": "da914aa06d2faf6057f3473728314bac", "score": "0.6849761", "text": "def create_sample_user(email=\"test@firstapp.com\", password=\"test1\"):\n return get_user_model().objects.create_user(email, password)", "title": "" }, { "docid": "d310e4ae45928ccb60c82c9572fb57da", "score": "0.68457526", "text": "def _create_user(self):\n\n msg = QMessageBox()\n user = kks.create_user(self._username_line.text(), self._password_line.text())\n \n if not user:\n msg.setText('Käyttäjätunnus on jo käytössä')\n msg.exec_()\n else:\n msg.setText('Uusi käyttäjä luotu')\n msg.exec_()", "title": "" }, { "docid": "0aa13ed1e1136f754de8e5124000d701", "score": "0.684187", "text": "def admin_create_user(self):\n resp = self.admin_register()\n reply = self.admin_login()\n token = reply['token']\n user = dict(\n name='Summer Love',\n username='love',\n password='Andela8',\n role='attendant'\n )\n\n resp = self.client.post(\n '/api/v1/users',\n content_type='application/json',\n data=json.dumps(user),\n headers={'Authorization': 'Bearer {}'.format(token)}\n )\n\n reply = json.loads(resp.data.decode())\n return reply", "title": "" }, { "docid": "58d462e89e1fa02990ce29cdeb72043a", "score": "0.68353564", "text": "def create_new_user():\n\n return render_template('create_new_user.html')", "title": "" }, { "docid": "ee862f60b59a9532b3352aeeaf7509f9", "score": "0.68202496", "text": "def create_user(context,galaxy,email,public_name,password,only_check,\n message_template):\n # Check message template is a .mako file\n if message_template:\n if not message_template.endswith(\".mako\"):\n logger.critical(\"Message template '%s' is not a .mako file\"\n % message_template)\n sys.exit(1)\n # Get a Galaxy instance\n gi = context.galaxy_instance(galaxy)\n if gi is None:\n logger.critical(\"Failed to connect to Galaxy instance\")\n sys.exit(1)\n # Sort out email and public name\n if public_name:\n if not users.check_username_format(public_name):\n logger.critical(\"Invalid public name: must contain only \"\n \"lower-case letters, numbers and '-'\")\n sys.exit(1)\n else:\n # No public name supplied, make from email address\n public_name = users.get_username_from_login(email)\n # Create user\n print(\"Email : %s\" % email)\n print(\"Name : %s\" % public_name)\n sys.exit(users.create_user(gi,email,public_name,password,\n only_check=only_check,\n mako_template=message_template))", "title": "" }, { "docid": "bad2afd7b85622a4f1501681f04a73bd", "score": "0.68134016", "text": "def new_user(self):\n username = self.gui.username_entry.get()\n salt = bcrypt.gensalt()\n while not self.gui.client.dbcom.check_salt(salt):\n salt = bcrypt.gensalt()\n password = bcrypt.hashpw(self.gui.password_entry.get().encode(), salt)\n email = self.gui.mail_entry.get()\n phone_number = self.gui.number_entry.get()\n self.gui.client.dbcom.add_user(username, salt, password, email, phone_number)\n self.gui.build_log_in_ui()", "title": "" }, { "docid": "df065ae1bb66009f3880b761a8b871a4", "score": "0.68031585", "text": "def create_new_user_node(i):\n pass", "title": "" }, { "docid": "d7d099ce2dc3b0105b83c17b30d3a70c", "score": "0.67799646", "text": "def new_user(self, **kwargs):\n num = self._user_numbers.pop()\n kwargs.setdefault(\"first_name\", \"FIRST%04d\" % num)\n kwargs.setdefault(\"last_name\", \"LAST%04d\" % num)\n kwargs.setdefault(\"username\", \"user%04d\" % num)\n user = User(**kwargs)\n user.save()\n self._users.append(user)\n return user", "title": "" }, { "docid": "2ffefdc305737cdba44876e77df1a479", "score": "0.67655015", "text": "def new_user():\n\n return render_template(\"user_form.html\")", "title": "" }, { "docid": "cf022b77565799d4b87c4c2cf45bfc05", "score": "0.6746994", "text": "def create_askbot_user(zd_user):\n if zd_user.email is None:\n username = zd_user.name.replace(\" \", \"_\").lower()\n email = ''\n else:\n username = zd_user.email\n email = zd_user.email\n username = ensure_unique_username(username[:30])\n\n # last_seen cannot be null\n last_seen = zd_user.last_login\n if not last_seen:\n last_seen = zd_user.created_at\n\n # lookup organization name (todo: cache this)\n about = \"\"\n if zd_user.organization_id:\n try:\n org = zendesk_models.Organization.objects.get(organization_id=zd_user.organization_id)\n about = org.name\n except zendesk_models.Organization.DoesNotExist:\n pass\n\n ab_user = askbot_models.User(\n username = username,\n first_name = zd_user.name.rpartition(' ')[0].strip()[:30],\n last_name = zd_user.name.rpartition(' ')[2].strip()[:30],\n real_name = zd_user.name[:100],\n email = email,\n email_isvalid = zd_user.is_verified,\n date_joined = zd_user.created_at,\n last_seen = last_seen,\n is_active = zd_user.is_active,\n about = about,\n )\n ab_user.save()\n return ab_user", "title": "" }, { "docid": "c8fee5047f2c9525534ff0983cc091b4", "score": "0.67408246", "text": "def _create_user(username,telefone,nome,endereço,data_de_nascimento):", "title": "" }, { "docid": "2cdd0f157081e922b9a16a8f132fb790", "score": "0.67324775", "text": "def create_user(self) -> 'outputs.ActingUserResponse':\n return pulumi.get(self, \"create_user\")", "title": "" }, { "docid": "e18f170f90740889ed20b07a68c1c9d5", "score": "0.673237", "text": "def create_new_user(self, redditor):\n print(\"Creating user: \" + str(redditor))\n\n new_user_item = {\n 'user_id' : redditor.id,\n 'username' : redditor.name,\n 'submission_score' : 0,\n 'distribution_score' :0,\n 'total_score' : 0\n }\n if self.put_item(DataAccess.Tables.USERS, new_user_item):\n print(\"Created user: \" + str(redditor))\n return True\n else:\n print(\"Failed to create user: \" + str(redditor))\n return False", "title": "" }, { "docid": "68f8a28bf3ec4ce6a4527fd2d38973b9", "score": "0.6729003", "text": "def create_sample_user(email='test@webapps.agency', password='password123'):\n return get_user_model().objects.create_user(email, password)", "title": "" }, { "docid": "3bdaafe6c0ff683d4f193ba80886bedf", "score": "0.6703305", "text": "def _create_user(self, email, password):\n self.parent.browser.find_element_by_id('create-user-button').click()\n time.sleep(DELAY)\n self.parent.browser.find_element_by_id('input-username').send_keys(email)\n time.sleep(DELAY)\n self.parent.browser.find_element_by_id('input-password').send_keys(password)\n time.sleep(DELAY)\n self.parent.browser.find_element_by_id('create-user-commit').click()\n print(\"user with [{}], [{}] created\".format(email, password))", "title": "" }, { "docid": "88baf383dc76ab16135271232abd4d35", "score": "0.6692833", "text": "def create_user(fname,sname,number,password):\n new_user=User(fname,sname,number,password)\n return new_user", "title": "" }, { "docid": "d92155b00dce49485f1c3abed4f2715c", "score": "0.6691482", "text": "def create_user(name, passw):\n new_user = User(name, passw)\n return new_user", "title": "" }, { "docid": "9a9befb3540c8eb46cba6f293ba5302f", "score": "0.66777927", "text": "def create_user(fname, lname, email, password, consent=False):\n\n user = User(fname=fname, lname=lname, email=email, password=password, consent=False) # is consent default value added here as well as in arguments?\n\n db.session.add(user)\n db.session.commit()\n\n return user", "title": "" }, { "docid": "2794fd3617db7cb79400305de31a6aae", "score": "0.66645825", "text": "def testCreate1User(self):\n request = self.factory.post(\"/api/user/create\", json.dumps({ 'username' : 'Tom',\n 'password' : '123456',\n 'user_type' : 'customer',\n 'email' : 'tommeng@berkeley.edu',\n }), content_type='application/json')\n response = views.create_user(request);\n respCreate = self.getDataFromResponse(response)\n\n self.assertSuccessResponse(respCreate)\n self.assertEquals(\"Tom\", respCreate['username'], \"Username wrong\")\n self.assertEquals(\"customer\", respCreate['user_type'])", "title": "" }, { "docid": "f0fa4f9181e412ccd094cbc58c9909aa", "score": "0.6648792", "text": "def test_create_user(self):\n pass", "title": "" }, { "docid": "537ad424e96fdd408cadd158c2e07b49", "score": "0.66471165", "text": "def new_user(cls, user_name, email):\n user = User(name=user_name, email=email)\n user.put()\n score = Score(parent=user.key, date=datetime.now(), attempts=0)\n score.put()\n return 'User {} created!'.format(user_name)", "title": "" }, { "docid": "619dd5bad8b29b3c3dd02f094a3dcefa", "score": "0.6639765", "text": "async def create_user_open(user_in: schemas.UserCreateMe):\n user = await crud.user.get_by_email(email=user_in.email)\n if user:\n raise HTTPException(\n status_code=status.HTTP_400_BAD_REQUEST,\n detail=\"The user with this username already exists in the system\",\n )\n user = await crud.user.create_me(obj_in=user_in)\n return await schemas.UserPydantic.from_tortoise_orm(user)", "title": "" }, { "docid": "a5ec0e6bd78a0b648535eb20644f0512", "score": "0.6637918", "text": "def create_user(fullname, username, password):\n new_user = User(fullname, username, password)\n return new_user", "title": "" }, { "docid": "c65acb9091cd72fd493502f9140f9208", "score": "0.6635635", "text": "def userCreate(self, request, tag):\n userCreator = liveform.LiveForm(\n self.createUser,\n [liveform.Parameter(\n \"localpart\",\n liveform.TEXT_INPUT,\n unicode,\n \"localpart\"),\n liveform.Parameter(\n \"domain\",\n liveform.TEXT_INPUT,\n unicode,\n \"domain\"),\n liveform.Parameter(\n \"password\",\n liveform.PASSWORD_INPUT,\n unicode,\n \"password\")])\n userCreator.setFragmentParent(self)\n return userCreator", "title": "" }, { "docid": "adb20631bd7cc235e7e2c9e07961b275", "score": "0.66353905", "text": "def create_empty_user():\n return User(email=\"\", nick=\"\", level=User.Level.Client)", "title": "" }, { "docid": "fd85e2af2da79e7577fb232ded1fcf6a", "score": "0.6633111", "text": "def create_valid_user_test(self):", "title": "" }, { "docid": "e7354e58a00fcbc8760e89d183e74283", "score": "0.6626516", "text": "def new_user(self, request):\r\n user = get_user_model()()\r\n return user", "title": "" }, { "docid": "1a4e86afd86ebfb8fc145040f6fc99d0", "score": "0.6608983", "text": "def create_initial_user():\n\n # check if there is any user in the db\n if User.query.count() == 0:\n # create a user\n user = User(username=\"admin\", password=hashlib.sha256(\"admin\".encode(\"utf8\")).hexdigest(),\n email=\"webmaster@example.org\")\n user.is_admin = True\n db.session.add(user)\n db.session.commit()", "title": "" }, { "docid": "d3fcc43785b4dee883cfe7dea5f13e95", "score": "0.66077644", "text": "def create_user():\n # check the token\n token = request.form['token']\n if token != ACCEPT_TOKEN:\n return 'Error: not authorized'\n\n u = User(es)\n return u.create_from_req(request)", "title": "" }, { "docid": "2025ed72a4512000278c999195da4cdc", "score": "0.65977323", "text": "def create_user(fname, lname, password):\n new_user = User(fname, lname,password)\n return new_user", "title": "" }, { "docid": "193ef5d2be5026820e55cbc4addeeae2", "score": "0.6583698", "text": "def post(self, user_id=None):\n name = self.get_argument(\"name\", \"\")\n email = self.get_argument(\"email_address\", \"\")\n password = self.get_argument(\"password\", \"\")\n new_user = self.create_user(name, email, password)\n self.write(new_user.to_dict())", "title": "" }, { "docid": "200135420c1600bd526ff1fc6c262b37", "score": "0.6582868", "text": "def create_user(user_data: dict) -> User:\n user = User()\n user.admin = False\n user.name = user_data[\"display_name\"]\n user.external_id = user_data[\"id\"]\n return user", "title": "" }, { "docid": "0a696796bc8685cb6ac69ad4887af3ce", "score": "0.6565887", "text": "def create(self, data):\n return User.objects.create_user(**data)", "title": "" }, { "docid": "6e9db6bc9ce7f3781b7540ca82fbe10e", "score": "0.6561448", "text": "def create_user(backend, details, response, uid, username, user=None, *args, **kwargs):\n if user:\n return {'user': user}\n if not username:\n return None\n\n request = kwargs['request']\n user = UserSocialAuth.create_user(username = username)\n# profile = UserProfile.objects.create_user(user = user)\n\n return {\n 'user': user,\n 'is_new': True\n }", "title": "" }, { "docid": "96e671b4273c7888f75bd6fcc1cd1d33", "score": "0.65509975", "text": "def _create_user(self, data):\n username = data[\"username\"]\n self._signup_user(data)\n validation_mail = mail.outbox[-1]\n self.assertTrue(\n \"signup\" in validation_mail.subject,\n \"There was no email\"\n \" sent which had 'signup' in the subject line\",\n )\n # validate the user with the link that was emailed\n pattern = \"/testserver(.*)\" + PI_LINE_END_REGEX\n validationlink_result = re.search(\n pattern, validation_mail.body, re.IGNORECASE\n )\n self.assertTrue(\n validationlink_result,\n \"could not find any link in\"\n \"registration email. Tried to match pattern '{}' but found no match in\"\n \"this email: {}{}\".format(\n pattern, PI_LINE_END_REGEX, validation_mail.body\n ),\n )\n validationlink = validationlink_result.group(1).strip()\n response = self.client.get(validationlink)\n self.assertEqual(\n response.status_code,\n 302,\n \"Could not load user validation link. Expected\"\n \" a redirect (HTTP 302), got HTTP {} instead\".format(\n response.status_code\n ),\n )\n resp = self.client.get(\"/accounts/\" + username + \"/\")\n self.assertEqual(\n resp.status_code,\n 200,\n \"Could not access user account after using\"\n \"validation link! Expected 200, got {} instead\".format(\n resp.status_code\n ),\n )\n User = get_user_model() # noqa: N806\n query_result = User.objects.filter(username=username)\n return query_result[0]", "title": "" }, { "docid": "a9f718fd99fa61731669d3ecf918ee3c", "score": "0.65470034", "text": "def _create_user(self):\n self.user = User.objects.create_user('test', 'test@test.test', 'test')\n self.api_client.login(\n username='test',\n password='test',\n )", "title": "" }, { "docid": "108f120df0fa6ca83aafd9bc0c87f356", "score": "0.6538275", "text": "def test_create_user(self):\n new_user = {\n \"_id\": ObjectId(\"000000000000000000000002\"),\n \"name\": \"Polly O'Donovan\",\n \"username\": \"pollypocket\",\n \"my_recipes\": [],\n \"favourite_recipes\": []\n }\n app.add_user(new_user)\n self.assertIn(new_user, list(app.get_users()))\n app.delete_user('000000000000000000000002')", "title": "" }, { "docid": "e839f4f52e4b71bd3fdca345790076bb", "score": "0.65374136", "text": "def createuser(username=None, password=None, admin=None):\n u_name = username\n pw = password\n make_admin = admin\n\n if u_name is None:\n while True:\n u_name = prompt(\"User Name\")\n user = User.query.filter(User.name==u_name).first()\n if user is not None:\n print(\"{}: USER ALREADY EXISTS!\".format(u_name))\n else:\n break\n\n if pw is None:\n pw = prompt_pass(\"Password\")\n while True:\n pw_again = prompt_pass(\"Password Again\")\n if pw != pw_again:\n print(\"PASSWORDS DO NOT MATCH!\")\n else:\n break\n\n if make_admin is None:\n make_admin = prompt_bool(\"Do you wish to make {} an administrator?\".\\\n format(u_name))\n\n user = User(name=u_name, password=\"\", admin=make_admin, enabled=True)\n user.set_password(pw)\n\n db.session.add(user)\n db.session.commit()\n print(\"Successfully created '{}' with ID: {}.\".format(user.name, user.id))", "title": "" }, { "docid": "a6f2d85cfa166855cd5d540d43452f9f", "score": "0.6529134", "text": "def user():\n return User()", "title": "" }, { "docid": "36da2f39ee6de223831a14181e018ddd", "score": "0.6527107", "text": "def create(usr, level):\n toret = User()\n\n toret.email = usr.email()\n toret.nick = usr.nickname()\n toret.level = level\n\n return toret", "title": "" }, { "docid": "a1dd381de65afc8bc914d2885f946173", "score": "0.6525508", "text": "def create_new_user( self, trans, **kwd ):\n return trans.response.send_redirect( web.url_for( controller='user',\n action='create_non_feide_user_in_GOLD',\n cntrller='project_admin' ) )", "title": "" }, { "docid": "9ec00f3dae6522b9afe610072c8fcc76", "score": "0.6521141", "text": "def create_example_users(self):\n # TODO: implement the method to add the example users by using \"add_user\" function and return a string that\n # shows it has been added successfully write your code below\n # write your code below\n pass", "title": "" }, { "docid": "3412d63f0dc16ecb34f5599cd9866ddb", "score": "0.6511392", "text": "def create_user(self):\n data = {\n \"username\": \"TestUserdsa\" + str(random.randrange(1, 1000000)),\n \"password\": \"Aa12Aa12\",\n \"email\": \"benharushtomer@gmail.com\"\n }\n response = requests.post(self.urls['create_user'],\n data=data)\n print(response.text)\n return self.get_token(data['username'], data['password'],)", "title": "" }, { "docid": "df31516aaae7ab80e40fd0b6e0cae62c", "score": "0.65057933", "text": "def create_user() -> bool:\n\n # TODO: verify len, number, lower and upper\n\n name = input(\"Insert username: \")\n password = Encrypt.encrypt_data(key=constant.KEY, data=input(\"Insert Password: \"))\n email = input(\"Insert email: \")\n number = input(\"Insert phone number: \")\n\n # Creating user\n new_user = User(name=name, email=email, number=number)\n\n try:\n # Insert the user into the database\n user_id = conn.create_user(user=new_user, password=password)\n except Exception as e:\n print(\"Error in the Database, \", e)\n return False\n\n if user_id == -1:\n return False\n\n return True", "title": "" }, { "docid": "47b8963597772187399b99c9fae530c7", "score": "0.65010655", "text": "def createUser(self):\r\n json = request.get_json()\r\n user = User(None, json[\"email\"], EncryptionService.hashPassword(json[\"password\"]), json[\"name\"],\r\n None, None, json[\"group_id\"], json[\"waterschap_id\"])\r\n user.addUser()\r\n return make_response(jsonify(user.json()))", "title": "" }, { "docid": "d49772e9654759f4ebfbf1944b6ecb66", "score": "0.64808685", "text": "def create_user():\n return render_template('create_page.html')", "title": "" }, { "docid": "6cae7e0251b9be9a04b2b84c43a7817f", "score": "0.64792997", "text": "def add_user():\n # get form variables from register_form.html\n fname = request.form['fname']\n lname = request.form['lname']\n email = request.form['email']\n password = request.form['password']\n mobile = request.form['mobile']\n\n user_id = add_to_users(fname, lname, email, password, mobile)\n flash('Wanda welcomes you! Please set up your preferences.')\n # add user's first name and user_id to session\n session['fname'] = fname\n session['user_id'] = user_id\n # send confirmation text to user's mobile using Twilio\n send_welcome_text(mobile, user_id)\n return render_template('/preferences.html')", "title": "" }, { "docid": "954db34bc27ceea91f04ef40dff73708", "score": "0.64756787", "text": "def add_user(form):\n username = ws.get_cgi_parameter_str(form, PARAM.USERNAME)\n password_1 = ws.get_cgi_parameter_str(form, PARAM.PASSWORD_1)\n password_2 = ws.get_cgi_parameter_str(form, PARAM.PASSWORD_2)\n must_change_password = ws.get_cgi_parameter_bool(\n form, PARAM.MUST_CHANGE_PASSWORD)\n\n may_use_webviewer = ws.get_cgi_parameter_bool(\n form, PARAM.MAY_USE_WEBVIEWER)\n may_view_other_users_records = ws.get_cgi_parameter_bool(\n form, PARAM.MAY_VIEW_OTHER_USERS_RECORDS)\n view_all_patients_when_unfiltered = ws.get_cgi_parameter_bool(\n form, PARAM.VIEW_ALL_PTS_WHEN_UNFILTERED)\n may_upload = ws.get_cgi_parameter_bool(form, PARAM.MAY_UPLOAD)\n superuser = ws.get_cgi_parameter_bool(form, PARAM.SUPERUSER)\n may_register_devices = ws.get_cgi_parameter_bool(\n form, PARAM.MAY_REGISTER_DEVICES)\n may_use_webstorage = ws.get_cgi_parameter_bool(\n form, PARAM.MAY_USE_WEBSTORAGE)\n may_dump_data = ws.get_cgi_parameter_bool(form, PARAM.MAY_DUMP_DATA)\n may_run_reports = ws.get_cgi_parameter_bool(form, PARAM.MAY_RUN_REPORTS)\n may_add_notes = ws.get_cgi_parameter_bool(form, PARAM.MAY_ADD_NOTES)\n\n user = User(username, False)\n if user.user:\n return user_management_failure_message(\n \"User already exists: \" + username)\n if not is_username_permissible(username):\n return user_management_failure_message(\n \"Invalid username: \" + ws.webify(username))\n if password_1 != password_2:\n return user_management_failure_message(\"Passwords don't mach\")\n if len(password_1) < MINIMUM_PASSWORD_LENGTH:\n return user_management_failure_message(\n \"Password must be at least {} characters\".format(\n MINIMUM_PASSWORD_LENGTH\n ))\n\n user = User(username, True)\n user.set_password(password_1)\n\n user.may_use_webviewer = may_use_webviewer\n user.may_view_other_users_records = may_view_other_users_records\n user.view_all_patients_when_unfiltered = view_all_patients_when_unfiltered\n user.may_upload = may_upload\n user.superuser = superuser\n user.may_register_devices = may_register_devices\n user.may_use_webstorage = may_use_webstorage\n user.may_dump_data = may_dump_data\n user.may_run_reports = may_run_reports\n user.may_add_notes = may_add_notes\n\n user.save()\n if must_change_password:\n user.force_password_change()\n\n audit(\n (\n \"User created: {}: \"\n \"may_use_webviewer={}, \"\n \"may_view_other_users_records={}, \"\n \"view_all_patients_when_unfiltered={}, \"\n \"may_upload={}, \"\n \"superuser={}, \"\n \"may_register_devices={}, \"\n \"may_use_webstorage={}, \"\n \"may_dump_data={}, \"\n \"may_run_reports={}, \"\n \"may_add_notes={}, \"\n \"must_change_password={}\"\n ).format(\n user.user,\n may_use_webviewer,\n may_view_other_users_records,\n view_all_patients_when_unfiltered,\n may_upload,\n superuser,\n may_register_devices,\n may_use_webstorage,\n may_dump_data,\n may_run_reports,\n may_add_notes,\n must_change_password\n )\n )\n return user_management_success_message(\"User \" + user.user + \" created\")", "title": "" }, { "docid": "4657cee1d7f3ac5630d6da2adf6c7007", "score": "0.64742255", "text": "def create_user(args):\n args = args_cleaner(args)\n \n # check if username has been passed.\n if args.get('username'): username = args.get('username')\n else: username = input_(msg='username: ')\n \n # check if username already exists.\n if auth.get_user(username):\n print (f\"'{username}' already exists.\")\n return\n \n password = input_(msg=f\"password for '{username}' (hidden): \", pswd=True)\n \n # create the user.\n created = auth.create_user(username, password)\n if not created:\n print ('Something went wrong. please try again.')\n return\n \n print (f\"user '{username}' has been created.\")", "title": "" }, { "docid": "ef3e5841194e3b43517076b80d243866", "score": "0.646342", "text": "def create_user(self, **attrs):\n return self._create(user.User, **attrs)", "title": "" }, { "docid": "217e2594f051ecb05875327043984191", "score": "0.6452617", "text": "def create_user(request, user, details, backend, *args, **kwargs):\n # If user already exists, don't create a user.\n if user is not None:\n return None\n\n is_new = False\n user = None\n\n username = '{0}-{1}'.format(details['username'], backend.name)\n email = details.get('email') or details['username']\n user = User.objects.create_user(username=username, email=email,\n password='!')\n is_new = True\n\n return {\n 'user': user,\n 'is_new': is_new\n }", "title": "" }, { "docid": "593c75839170b6f9031a25f6dff92b11", "score": "0.6444597", "text": "def fake_single(self, fake, approved=True):\n user = self.create_user(\n email=fake.email(),\n password=fake.password(),\n first_name=fake.first_name(),\n last_name=fake.last_name(),\n )\n user.save()\n user.memberprofile.new_notify = not approved\n user.memberprofile.date_of_birth = fake.date_time_between(start_date='-99y', end_date='now').date()\n user.memberprofile.gender = random.choice(('m', 'f'))\n user.memberprofile.address = fake.address()\n user.memberprofile.postcode = fake.postcode()\n user.memberprofile.home_phone = fake.phone_number()\n user.memberprofile.mobile_phone = fake.phone_number()\n user.memberprofile.next_of_kin_name = fake.name()\n user.memberprofile.next_of_kin_relation = fake.first_name()\n user.memberprofile.next_of_kin_phone = fake.phone_number()\n user.memberprofile.save()\n return user", "title": "" }, { "docid": "1d7839379860b0c5c49eb89942d1fb3d", "score": "0.6443251", "text": "def create_test_user(id = \"\"):\n username = f\"test_user{id}\"\n email = f\"example{id}@example.com\"\n password = \"password1\"\n return User.objects.create_user(username, email, password)", "title": "" }, { "docid": "ee1dea61eab82c8a780938aa4afd8a2e", "score": "0.6438962", "text": "def test_create_users():\n User(username=\"\", password=\"\", last_trained_on=None, tbl_rating_user_id=0)", "title": "" }, { "docid": "0b46bf67876d91a14d78d8c98a6164a9", "score": "0.64366597", "text": "def create_user(self, name, admin=False):\n return self.create_users([name], admin)", "title": "" }, { "docid": "a442f9396c678d021a49a30354550049", "score": "0.64342165", "text": "async def create_user(self, username, hashed_password, role_id):\n pass", "title": "" }, { "docid": "83feac2cec19518f591aff2932c27929", "score": "0.641548", "text": "def admin_create_user2(self):\n resp = self.admin_register()\n reply = self.admin_login()\n token = reply['token']\n user = dict(\n name='Paul Walker',\n username='walker',\n password='Andela8',\n role='attendant'\n )\n\n resp = self.client.post(\n '/api/v1/users',\n content_type='application/json',\n data=json.dumps(user),\n headers={'Authorization': 'Bearer {}'.format(token)}\n )\n\n reply = json.loads(resp.data.decode())\n return reply", "title": "" }, { "docid": "aa9f39ba54fadcc24634c32b9b2d74d3", "score": "0.6414347", "text": "async def create_user(\n context: CommandContext,\n username: str,\n password: str,\n email: str,\n first_name: str,\n last_name: str,\n):\n user = User.objects.create_user(\n username=username,\n password=password,\n email=email,\n first_name=first_name,\n last_name=last_name,\n )\n runtime_user = RuntimeUser.from_persistent_user(user)\n context.runtime_user = runtime_user\n return {'user': {'id': user.id}}", "title": "" }, { "docid": "f34a0bc3f8f643ae1275df5664b79b48", "score": "0.6404594", "text": "def create_user(db: Session, user: schemas.UserCreate) -> None:\n new_user = models.User(name=user.name)\n new_user.password = bcrypt.hashpw(user.password, bcrypt.gensalt())\n db.add(new_user)\n db.commit()", "title": "" }, { "docid": "56b27a1e6d45c0527c10288003954da2", "score": "0.6402819", "text": "def add_one_user(p_user):\n print(' Adding user \"{}\" now.'.format(p_user))\n User._do_user_create(p_user)\n User._update_sudoers(p_user)\n User._create_workspace(p_user)", "title": "" }, { "docid": "65bfc46101a35217ae2cec654f26bd8d", "score": "0.6393311", "text": "def create_a_user(self, username='test', email='info@test.co',\n password='Test123.'):\n user = User.objects.create_user(username, email, password)\n user.save()\n return user", "title": "" }, { "docid": "dc2d4bb691278192512a3bc28da3c758", "score": "0.63884735", "text": "def _create_user(self, email, password, first_name, last_name, **extra_fields):\n if not email:\n raise ValueError('The email cannot be empty.')\n email = self.normalize_email(email)\n user = self.model(email=email, first_name=first_name, last_name=last_name, **extra_fields)\n user.set_password(password)\n user.save(using=self._db)\n\n # Send email to newly created user\n subject = 'Welcome to lifTUe!'\n message = 'Welcome to lifTUe!\\n\\n You can start using the application using the following credentials:\\n\\n' \\\n 'Email: ' + email + '\\n Password: ' + password + '\\n\\n You will need to change your password the ' \\\n 'first time you log in.'\n\n send_mail(subject, message, 'noreply@gelift.win.tue.nl', (user.email,), email)\n return user", "title": "" }, { "docid": "683b900e23f13eb37ac73b63c8465e8d", "score": "0.63817245", "text": "def new_user():\n\n return render_template(\"register.html\")", "title": "" }, { "docid": "3cbff842e8fbfe8f9b4609cd812eec10", "score": "0.63801634", "text": "def create_users(self):\n self.user = User._default_manager.create_user(\n email=\"test@example.com\",\n password=\"test\",\n )\n self.superuser = User._default_manager.create_superuser(\n email=\"test2@example.com\",\n password=\"test\",\n )", "title": "" }, { "docid": "de2b5da051d57dad0f4e9c38badc901d", "score": "0.6377397", "text": "def create_user(cls, pseudonym_unique_id, sis_user_id, user_real_first, user_real_last):\n\n payload = {\n 'user[name]': user_real_first + \" \" + user_real_last,\n 'user[short_name]': user_real_first + \" \" + user_real_last,\n 'user[sortable_name]': user_real_last + \",\" + \" \" + user_real_first,\n 'pseudonym[unique_id]': pseudonym_unique_id + '@clark.edu', # ID that will be used to log into the site\n 'pseudonym[sis_user_id]': sis_user_id, # Unique employee identifier. PersonID in prometheus\n\n \"\"\" Due to how often these accounts are potentially being reactivated, I am going to disable the \n notification about account creation. We can probably get around this by tracking the latest CCID value \n and, if the value is higher, send confirmation:true else false.\n\n \"\"\"\n # Sends user an email letting them know of account creation (even upon SIS reactivation)\n # 'pseudonym[send_confirmation': 'True', \n \n # Automatically marks user as registered, making their account established and allowing notifications to be \n # sent, even without logging in\n 'user[skip_registration]': 'True',\n 'communication_channel[type]': 'email',\n\n # The email address the user is contacted at\n 'communication_channel[address]': pseudonym_unique_id + '@clark.edu',\n\n # Allows notifications to be sent to the user even if they don't log in\n 'communication_channel[skip_confirmation]': 'True',\n 'enable_sis_reactivation': 'True' # Reactivates a deleted account if the SIS IDs match\n }\n\n try:\n creation_request = super().request(\n 'POST', str(super().base_url+'api/v1/accounts/'+str(super().canvas_account))+'/users', params=payload)\n if 200 != creation_request.status_code:\n super().error_dump('Failed to create user {} {} with status code {}'.format(\n user_real_first, user_real_last, creation_request.status_code))\n return None\n #/// add logging here\n return creation_request.json()\n\n except Exception as e:\n super().error_dump(\n 'Encountered exception <{}> during user creation for {} {}'.format(e, user_real_first, user_real_last))\n return None", "title": "" }, { "docid": "54e5edabd84ecd188d4137012ad0374b", "score": "0.63752645", "text": "def create_user(self, request):\n if User.query(User.name == request.user_name).get():\n raise endpoints.ConflictException(\n 'A User with that name already exist!')\n user = User(name=request.user_name, email=request.email)\n user.put()\n return StringMessage(message='User {} created!'.format(request.user_name))", "title": "" }, { "docid": "ed2d5150c5eabfd5ce74506f708b5ef7", "score": "0.6369612", "text": "def create_user():\n username = custom_input(\"Enter user name: \\n\")\n position = custom_input(\"Enter user position (Manager/Salesman): \\n\")\n if position == \"Salesman\" or position == \"Manager\":\n exec_insert_query(\n \"INSERT OR IGNORE INTO users VALUES (NULL,'{}','{}','{}','{}')\".format(username, position, 0, 0))\n else:\n print(\"Incorrect position '{}'.\\nYou should specify position 'Manager' or 'Salesman'\".format(position))", "title": "" }, { "docid": "b27a6767be48b03a35074f2b8a3299e9", "score": "0.6366573", "text": "def create_user(admin=False):\n username = prompt(\"Enter username\")\n email = prompt(\"Enter email\")\n password1 = prompt_pass(\"Enter password\")\n password2 = prompt_pass(\"Re-type password\")\n if password1 == password2:\n new_user = User(\n username=username,\n password=password1,\n email=email\n )\n new_user.is_admin = admin\n db.session.add(new_user)\n db.session.commit()\n print('User {0} successfully created'.format(username))\n else:\n print(\"Error: Passwords don't match\")", "title": "" }, { "docid": "e50e626d184d5046733f5c9e53eca221", "score": "0.6366382", "text": "def create_unverified_user(self):\n self.client.post(\n '/api/users/',\n self.testUser2,\n format='json'\n )", "title": "" }, { "docid": "b211b3d1a0cbc231c4f238413153f14f", "score": "0.63663054", "text": "def create_user() -> ci_demo.User:\n\n u = ci_demo.User.query.filter(ci_demo.User.id == BaseTestCase.user_id).first()\n if u is None:\n # Create user with name test and password test\n u = ci_demo.User(id=BaseTestCase.user_id, name=BaseTestCase.user_name)\n u.update_password(BaseTestCase.user_password)\n ci_demo.db.session.add(u)\n ci_demo.db.session.commit()\n\n return u", "title": "" }, { "docid": "b2c80185f0b0718b3b3a025901232e20", "score": "0.6365039", "text": "def create_user():\n \n categories = crud.get_category_entries()\n countries = crud.get_country_entries()\n\n if request.method == 'POST':\n user = crud.create_user(\n #['string'] = the \"name\" field within the form\n request.form['fname'],\n request.form['lname'],\n request.form['email'],\n request.form['username'],\n request.form['password'],\n request.form['category_id'],\n request.form['country_id'])\n if user:\n #testing\n print(\"User has been created!\")\n return redirect('/login')\n else:\n flash(\"User exists. Please try again.\")\n\n\n return render_template(\n 'registration.html',\n categories=categories,\n countries=countries\n )", "title": "" }, { "docid": "2d62fa365c43c1d33a31b0bf7358c80e", "score": "0.6357508", "text": "def test_user_can_create_new_user(self):\n\n # Edith goes to the register page\n self.browser.get(self.server_url + reverse('register'))\n\n # She fills in the inputs\n username_field = self.browser.find_element_by_name('username')\n username_field.send_keys('netuser')\n password_field = self.browser.find_element_by_name('password1')\n password_field.send_keys('password')\n password_confirmation_field = self.browser.find_element_by_name('password2')\n password_confirmation_field.send_keys('password')\n email_field = self.browser.find_element_by_name('email')\n email_field.send_keys('netuser@edith.com')\n\n password_field.send_keys(Keys.RETURN)\n\n body = self.browser.find_element_by_tag_name('body')\n\n # Edith sees a congratulations message\n self.assertIn('Congratulations! You are now registered and logged in!', body.text)\n\n # After successfully logged in she goes to her applications\n # self.browser.get(self.server_url + reverse('tokens:list'))\n\n # body = self.browser.find_element_by_tag_name('body')\n # self.assertIn('Your token', body.text)", "title": "" }, { "docid": "ff0089320fd0b296b242ef5509e68354", "score": "0.63571775", "text": "def test_new_user():\n new_user = User('Test User', 'testuser')\n assert new_user.username == 'testuser'\n assert new_user.name == 'Test User'\n assert not new_user.name == 'Sooper User'", "title": "" }, { "docid": "85f2db83c56894b3a800f0c609c26994", "score": "0.63510406", "text": "def create_user():\n\n # TODO: Validate server-side before adding to DB\n first_name = request.form[\"first_name\"]\n last_name = request.form[\"last_name\"]\n image_url = request.form[\"image_url\"]\n\n # TODO: Error handling\n user = User(first_name=first_name, last_name=last_name, image_url=image_url)\n db.session.add(user)\n db.session.commit()\n\n flash(f\"New User Created: {first_name} {last_name}\")\n return redirect(\"/users\")", "title": "" }, { "docid": "3dbdedc0ab8fd2c47fdc5e9dd7a7c448", "score": "0.6350954", "text": "def new_user(self, request, sociallogin):\r\n return get_account_adapter().new_user(request)", "title": "" }, { "docid": "90b98f229d839e3c25d8c09cbbdcf520", "score": "0.63480407", "text": "def _create_user(user):\n login = 'osf.' + user._id\n pw = str(uuid.uuid4())[:8]\n\n response = requests.post(\n url=settings.PIWIK_HOST,\n data={\n 'module': 'API',\n 'method': 'UsersManager.addUser',\n 'format': 'json',\n 'token_auth': settings.PIWIK_ADMIN_TOKEN,\n 'userLogin': login,\n 'password': pw,\n 'email': user._id + '@osf.io',\n 'alias': 'OSF User: {}'.format(user._id),\n }\n )\n\n if json.loads(response.content)['result'] == 'error':\n raise PiwikException('Piwik user not updated')\n\n user.piwik_token = md5(login + md5(pw).hexdigest()).hexdigest()\n\n user.save()", "title": "" }, { "docid": "456863e96ce85ba2b0952c7e7b8555ed", "score": "0.6346355", "text": "def create_users(self):\n user_fields = ('user_name TEXT PRIMARY KEY', 'password TEXT', 'email TEXT',\\\n 'gender TEXT', 'in_calories_goal INT', 'out_calories_goal INT',\\\n 'budget INT', 'pts INT', 'height INT', 'weight INT', 'steps_goal INT'\\\n 'total_sleep_acquired INT', 'auth_token TEXT', 'user_id TEXT')\n self.tableCreator('users', *user_fields)\n return True", "title": "" }, { "docid": "d9f20f91edcbc4d55ba4e0e405ea0607", "score": "0.63422924", "text": "def test_create_default_user_options():\n\n runner = CliRunner()\n result = runner.invoke(\n create_default_user, [\"--username\", \"Bob\", \"--password\", \"myprecious\"]\n )\n\n user = User.query.one()\n success_string = \"Default user 'Bob' created.\"\n\n assert not result.exception\n assert success_string in result.output\n assert isinstance(user, User)\n assert user.username == \"Bob\"\n assert bool(user.check_password(\"myprecious\"))\n assert bool(user.is_admin)", "title": "" }, { "docid": "768783c8a671f57bd8e6e443e3a990ae", "score": "0.63366306", "text": "def test_create_default_user():\n\n runner = CliRunner()\n result = runner.invoke(create_default_user)\n\n user = User.query.one()\n success_string = \"Default user 'admin' created.\"\n\n assert not result.exception\n assert success_string in result.output\n assert isinstance(user, User)\n assert user.username == \"admin\"\n assert bool(user.check_password(\"farm_monitor\"))\n assert bool(user.is_admin)", "title": "" }, { "docid": "8e60eac3d902fcc201a1a19b54341d25", "score": "0.63360894", "text": "def create_user(email, password, first, last, div, role, tag, is_sup, sup, budget_code=None, object_code=None,\n object_name=None):\n role_obj = Role.query.filter_by(name=role).first()\n sup_obj = User.query.filter_by(id=sup).first()\n u = User(email=email, first_name=first, last_name=last, password=password,\n division=div, role=role_obj, tag_id=tag, is_supervisor=is_sup, supervisor=sup_obj, budget_code=budget_code,\n object_code=object_code, object_name=object_name)\n db.session.add(u)\n db.session.commit()\n u.password_list.update(u.password_hash)\n _send_new_user_email(u, password)\n current_app.logger.info('{} successfully registered user with email {}'.format(current_user.email, u.email))\n return u.email", "title": "" }, { "docid": "3534126b14a4d00d13a3962b8ed5800d", "score": "0.63334095", "text": "def create_user(self, body=None, *args, **_params):\n return self.post(self.user_create_path, body=body)", "title": "" }, { "docid": "a1c629f7d52b34581a6b7fe845277225", "score": "0.63300997", "text": "def create_user(self, device):\r\n ip_address = self.device_id.ip_address\r\n port = self.device_id.port\r\n device_password = self.device_id.device_password\r\n user_id = str(self.device_user_id)\r\n\r\n with c.ConnectToDevice(ip_address, port, device_password) as conn:\r\n\r\n device_users = conn.get_users()\r\n device_user_ids = [int(x.user_id) for x in device_users]\r\n if self.device_user_id not in device_user_ids:\r\n conn.set_user(uid=self.device_user_id, name=self.name, privilege=const.USER_DEFAULT, user_id=user_id)\r\n self.device_uid = self.device_user_id\r\n return {\r\n \"type\": \"ir.actions.act_window\",\r\n \"res_model\": \"device.users\",\r\n \"views\": [[False, \"form\"]],\r\n \"res_id\": self.id,\r\n \"target\": \"main\",\r\n \"context\": {'show_message1': True},\r\n }\r\n\r\n else:\r\n return {\r\n \"type\": \"ir.actions.act_window\",\r\n \"res_model\": \"device.users\",\r\n \"views\": [[False, \"form\"]],\r\n \"res_id\": self.id,\r\n \"target\": \"main\",\r\n \"context\": {'show_message2': True},\r\n }", "title": "" }, { "docid": "0db4070cf034fd0b0e8721137404bdb2", "score": "0.6329164", "text": "def _create_user(self, username, email, password, **extra_fields):\n # print(\"hi hassan\")\n if not username:\n raise ValueError('The given username must be set')\n email = self.normalize_email(email)\n username = self.model.normalize_username(username)\n user = self.model(username=username, email=email, **extra_fields)\n user.set_password(password)\n user.save(using=self._db)\n return user", "title": "" }, { "docid": "e8779f53a68011cb36078e2ffa3d7643", "score": "0.63256544", "text": "def create_user(email, password, pen_name):\n\n signup_user = User()\n signup_user.email = email\n signup_user.password_hash = argon2.hash(password)\n signup_user.pen_name = pen_name\n\n try:\n signup_user.save()\n return signup_user\n except QueryException:\n return None", "title": "" }, { "docid": "aac4513f33f85e64ac280edbb208cd56", "score": "0.6303878", "text": "def _do_user_create(p_user):\n print(' Creating user: \"{}\"'.format(p_user))\n try:\n pwd.getpwnam(p_user)\n print(' *** User \"{}\" already exixts! Skipping Add'.format(p_user))\n except KeyError:\n User._add_user(p_user)", "title": "" }, { "docid": "a5c674d8e2c82809d20a57b6d7c51766", "score": "0.63029075", "text": "def create_new_user(user: UserRegistrationSchema, db: Session):\n\n user.password = generate_password_hash(user.dict()['password'], method='sha256')\n user_obj = User(**user.dict(exclude_unset=True))\n db.add(user_obj)\n db.commit()\n db.refresh(user_obj)\n\n token = generate_token(user.email, 'activate')\n url = f\"{USER_ACTIVATION_URL}?token={token}\"\n context = {\n 'url': url,\n 'email': user.email\n }\n template = 'activate_user.html'\n send_email.delay(\n subject=\"Activate your ChooseOne account\",\n template=template,\n recipients=[user.email],\n context=context\n )\n\n return UserSchema.from_orm(user_obj)", "title": "" } ]
96e071f315bfe84fab392617e3d43f4e
Sets the signup_url of this GoogleEnterprise.
[ { "docid": "c6aaf918a064923c95e551ecbc6b8ad3", "score": "0.782311", "text": "def signup_url(self, signup_url):\n if signup_url is not None and len(signup_url) < 1:\n raise ValueError(\"Invalid value for `signup_url`, length must be greater than or equal to `1`\")\n\n self._signup_url = signup_url", "title": "" } ]
[ { "docid": "78bdf485adf3c2eb3ffcc3a06ee8dbac", "score": "0.6903089", "text": "def signup_url(self):\n return self._signup_url", "title": "" }, { "docid": "c927cb649a708861b0d0105a9309bbd9", "score": "0.57198805", "text": "def ___set_url(self, url):\n self.__set_url(url)", "title": "" }, { "docid": "23b6e2941c090cc70773e4471659f3e2", "score": "0.5717212", "text": "def __set_url(self, url):\n self._url = url", "title": "" }, { "docid": "8e6af63622a10851db6c3007ac016719", "score": "0.5499965", "text": "def docu_sign_landing_url(self, docu_sign_landing_url):\n\n self._docu_sign_landing_url = docu_sign_landing_url", "title": "" }, { "docid": "9bc70489089401fa77013aebca15d6b1", "score": "0.54041976", "text": "def set_storage_url(self, url):\n self.storage_url = url", "title": "" }, { "docid": "4cc977f891f66f23bc523be078b47ac2", "score": "0.53637964", "text": "def url(self, url):\n if url is None:\n raise ValueError(\"Invalid value for `url`, must not be `None`\")\n\n self._url = url", "title": "" }, { "docid": "fece6533edaf108438c9c814cd1d2a4c", "score": "0.5357118", "text": "def url(self, url: str):\n\n self._url = url", "title": "" }, { "docid": "fece6533edaf108438c9c814cd1d2a4c", "score": "0.5357118", "text": "def url(self, url: str):\n\n self._url = url", "title": "" }, { "docid": "fece6533edaf108438c9c814cd1d2a4c", "score": "0.5357118", "text": "def url(self, url: str):\n\n self._url = url", "title": "" }, { "docid": "20e3a9485495f1e40d3d764079aeacf9", "score": "0.5348516", "text": "def url(self, url):\n\n self._url = url", "title": "" }, { "docid": "20e3a9485495f1e40d3d764079aeacf9", "score": "0.5348516", "text": "def url(self, url):\n\n self._url = url", "title": "" }, { "docid": "20e3a9485495f1e40d3d764079aeacf9", "score": "0.5348516", "text": "def url(self, url):\n\n self._url = url", "title": "" }, { "docid": "20e3a9485495f1e40d3d764079aeacf9", "score": "0.5348516", "text": "def url(self, url):\n\n self._url = url", "title": "" }, { "docid": "20e3a9485495f1e40d3d764079aeacf9", "score": "0.5348516", "text": "def url(self, url):\n\n self._url = url", "title": "" }, { "docid": "20e3a9485495f1e40d3d764079aeacf9", "score": "0.5348516", "text": "def url(self, url):\n\n self._url = url", "title": "" }, { "docid": "20e3a9485495f1e40d3d764079aeacf9", "score": "0.5348516", "text": "def url(self, url):\n\n self._url = url", "title": "" }, { "docid": "20e3a9485495f1e40d3d764079aeacf9", "score": "0.5348516", "text": "def url(self, url):\n\n self._url = url", "title": "" }, { "docid": "20e3a9485495f1e40d3d764079aeacf9", "score": "0.5348516", "text": "def url(self, url):\n\n self._url = url", "title": "" }, { "docid": "20e3a9485495f1e40d3d764079aeacf9", "score": "0.5348516", "text": "def url(self, url):\n\n self._url = url", "title": "" }, { "docid": "eb281553a5b535dc7300c8d5706075df", "score": "0.53339446", "text": "def set_server_url(self, new_url):\n self.url = new_url", "title": "" }, { "docid": "e4cfff9ff6662eb4c9d73eb746c72c8a", "score": "0.52958584", "text": "def signup(self, request, user):\n pass", "title": "" }, { "docid": "e4cfff9ff6662eb4c9d73eb746c72c8a", "score": "0.52958584", "text": "def signup(self, request, user):\n pass", "title": "" }, { "docid": "3120b0d9e8036220fea0f9f452a5bd5f", "score": "0.5294734", "text": "def url_auth(self, url_auth):\n self._url_auth = url_auth", "title": "" }, { "docid": "62590cc4de164a249fd15624b1b7f86c", "score": "0.5209565", "text": "def presigned_url(self, presigned_url):\n\n self._presigned_url = presigned_url", "title": "" }, { "docid": "965592fef39f62a38b6b76fdc9053481", "score": "0.5183946", "text": "def callback_url(self, callback_url):\n if callback_url is not None and len(callback_url) < 1:\n raise ValueError(\"Invalid value for `callback_url`, length must be greater than or equal to `1`\")\n\n self._callback_url = callback_url", "title": "" }, { "docid": "388e758bcf9850434523d1bfd3015941", "score": "0.518234", "text": "def add_url(self, url):\n self.resource_url = url\n self.put()", "title": "" }, { "docid": "acaad552cd6bc5fc64e318018f113c84", "score": "0.5175278", "text": "def _create_url(self, endpoint):\r\n self.url = '%s://%s%s%s%s' % (self.PROTOCOL, self.auth.server,\r\n self.PORT, self.DEFAULT_URL, endpoint)", "title": "" }, { "docid": "01b320942a5b4462eef3636e4ff181fe", "score": "0.51287323", "text": "def url(self, url) :\n\t\ttry :\n\t\t\tself._url = url\n\t\texcept Exception as e:\n\t\t\traise e", "title": "" }, { "docid": "d779486e1040b316a3e0e44da8610c75", "score": "0.5109642", "text": "async def signup(self, request: web.Request, *args, **kwargs) -> web.Response:\n\n pass", "title": "" }, { "docid": "0d8c9ff022541da43d2029ba43314a8f", "score": "0.50804883", "text": "def update_url(self, new_url):\n self.url = new_url", "title": "" }, { "docid": "0d8c9ff022541da43d2029ba43314a8f", "score": "0.50804883", "text": "def update_url(self, new_url):\n self.url = new_url", "title": "" }, { "docid": "0c451a2ee30b00e5463e717f1af34e39", "score": "0.5079066", "text": "def setURL(self,URL):\n\t\tpass", "title": "" }, { "docid": "ffa536be882093e7a4ac8dacbb4898f9", "score": "0.5042235", "text": "def set_haskoin_url(self, haskoin_url:str):\n self.haskoin_url = haskoin_url", "title": "" }, { "docid": "f5ec90bef968f3d2e81e183480de8686", "score": "0.5032812", "text": "def setPageURL(self, page_url):\n\t\tself.page_url = page_url", "title": "" }, { "docid": "046100d80c7667415461f3991afd3ddd", "score": "0.5015592", "text": "def auth_url(self):\n raise NotImplementedError('Implement in subclass')", "title": "" }, { "docid": "c6729f47877ed31629c67e3e73520528", "score": "0.5010457", "text": "def _set_url(self, v, load=False):\n parent = getattr(self, \"_parent\", None)\n if parent is not None and load is False:\n raise AttributeError(\"Cannot set keys directly when\" +\n \" within an instantiated list\")\n\n if hasattr(v, \"_utype\"):\n v = v._utype(v)\n try:\n t = YANGDynClass(v,base=six.text_type, is_leaf=True, yang_name=\"url\", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, is_keyval=True, namespace='urn:ietf:params:xml:ns:yang:nfvo:vnfd', defining_module='vnfd', yang_type='string', is_config=True)\n except (TypeError, ValueError):\n raise ValueError({\n 'error-string': \"\"\"url must be of a type compatible with string\"\"\",\n 'defined-type': \"string\",\n 'generated-type': \"\"\"YANGDynClass(base=six.text_type, is_leaf=True, yang_name=\"url\", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, is_keyval=True, namespace='urn:ietf:params:xml:ns:yang:nfvo:vnfd', defining_module='vnfd', yang_type='string', is_config=True)\"\"\",\n })\n\n self.__url = t\n if hasattr(self, '_set'):\n self._set()", "title": "" }, { "docid": "c6729f47877ed31629c67e3e73520528", "score": "0.5010457", "text": "def _set_url(self, v, load=False):\n parent = getattr(self, \"_parent\", None)\n if parent is not None and load is False:\n raise AttributeError(\"Cannot set keys directly when\" +\n \" within an instantiated list\")\n\n if hasattr(v, \"_utype\"):\n v = v._utype(v)\n try:\n t = YANGDynClass(v,base=six.text_type, is_leaf=True, yang_name=\"url\", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, is_keyval=True, namespace='urn:ietf:params:xml:ns:yang:nfvo:vnfd', defining_module='vnfd', yang_type='string', is_config=True)\n except (TypeError, ValueError):\n raise ValueError({\n 'error-string': \"\"\"url must be of a type compatible with string\"\"\",\n 'defined-type': \"string\",\n 'generated-type': \"\"\"YANGDynClass(base=six.text_type, is_leaf=True, yang_name=\"url\", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, is_keyval=True, namespace='urn:ietf:params:xml:ns:yang:nfvo:vnfd', defining_module='vnfd', yang_type='string', is_config=True)\"\"\",\n })\n\n self.__url = t\n if hasattr(self, '_set'):\n self._set()", "title": "" }, { "docid": "c6729f47877ed31629c67e3e73520528", "score": "0.5010457", "text": "def _set_url(self, v, load=False):\n parent = getattr(self, \"_parent\", None)\n if parent is not None and load is False:\n raise AttributeError(\"Cannot set keys directly when\" +\n \" within an instantiated list\")\n\n if hasattr(v, \"_utype\"):\n v = v._utype(v)\n try:\n t = YANGDynClass(v,base=six.text_type, is_leaf=True, yang_name=\"url\", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, is_keyval=True, namespace='urn:ietf:params:xml:ns:yang:nfvo:vnfd', defining_module='vnfd', yang_type='string', is_config=True)\n except (TypeError, ValueError):\n raise ValueError({\n 'error-string': \"\"\"url must be of a type compatible with string\"\"\",\n 'defined-type': \"string\",\n 'generated-type': \"\"\"YANGDynClass(base=six.text_type, is_leaf=True, yang_name=\"url\", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, is_keyval=True, namespace='urn:ietf:params:xml:ns:yang:nfvo:vnfd', defining_module='vnfd', yang_type='string', is_config=True)\"\"\",\n })\n\n self.__url = t\n if hasattr(self, '_set'):\n self._set()", "title": "" }, { "docid": "c38f6f1c3960d9e9d40671bcfa21bcb7", "score": "0.5009564", "text": "def url(self, new: str) -> None:\n self._url = new", "title": "" }, { "docid": "35623e01ad5716cb1934a5684350e5c8", "score": "0.50074023", "text": "def updateURL(self):\n self.url = \"http://\" + self.hostname\n if self.port:\n self.url += \":\" + str(self.port)\n return self.url", "title": "" }, { "docid": "4adb3ff35786cba3ae7d5ea231a160f5", "score": "0.5001295", "text": "def url(self, data):\n self._url = data", "title": "" }, { "docid": "0ae3056c5b8fb72ac9192598ec277ebf", "score": "0.49833047", "text": "def redirect_url(self, redirect_url):\n\n self._redirect_url = redirect_url", "title": "" }, { "docid": "947be940d274994907e17d5b9db21f03", "score": "0.4976722", "text": "def user_url(self):\n return self.properties.get('UserUrl', None)", "title": "" }, { "docid": "b9651072d6bbba352af622b7c7a9f159", "score": "0.4972217", "text": "def set_link(self, url):\n self.URL = url", "title": "" }, { "docid": "ec3a9187fc0f6b38d70fb663507487c2", "score": "0.4956822", "text": "def _meetup_post_url(self):\n # self.client.post(self.register_url, data = json.dumps(self.users[0]), content_type=\"application/json\")\n # self.client.post(self.login_url, data = json.dumps(self.loging_data), content_type=\"application/json\") \n \n self.post_meetup_url='api/v2/auth/meetups'\n\n return self.post_meetup_url", "title": "" }, { "docid": "00a7e9b2ff9496acca80064708ed89b4", "score": "0.49486363", "text": "def signup_user(self):\n\n self.testuser3 = User.signup(\n username=\"testuser3\",\n email=\"abc@abc.com\",\n password=\"123456\",\n image_url=\"/test.jpg\")\n\n db.session.commit()", "title": "" }, { "docid": "c977c0ff520e6ce2c9e864c98374c973", "score": "0.4935947", "text": "def vestorly_url(self, vestorly_url):\n\n self._vestorly_url = vestorly_url", "title": "" }, { "docid": "e2044a5e897aeb77bfb9b1fbd9921ad9", "score": "0.49208295", "text": "def set_base_url(self, base_url):\n self.base_url = base_url", "title": "" }, { "docid": "cce6b5ab8e66df700014b47cd319a682", "score": "0.49193943", "text": "def sign_up(self,event):\n sign_in_frame = Sign_up(None,self.client_actions)\n sign_in_frame.Show()\n self.Close()", "title": "" }, { "docid": "83ca93c68bacb59fbe283a37f92f385f", "score": "0.49059686", "text": "def set_url(self):\n url_input = raw_input(\n 'Enter address of the CatDV Sever (eg. \\'localhost:8080\\'): ')\n self.url = 'http://' + url_input\n return self.url", "title": "" }, { "docid": "c9f7b1322be03b9cd8523ed4b6e91217", "score": "0.48371604", "text": "def notify_url(self, notify_url):\n\n self._notify_url = notify_url", "title": "" }, { "docid": "c43eb1ec2331a77a397b718461fa123b", "score": "0.48288852", "text": "def signUpEvent(self):\n if False in self.isFormValid.values():\n self.confirmButton.config(fg = self.errorColor, highlightbackground=self.errorColor, highlightcolor=self.errorColor, highlightthickness=1)\n else:\n self.confirmButton.config(fg = \"black\", highlightthickness=0)\n ret = self.client.register(self.usernameEntry.get(), self.passwordEntry.get(), self.nameEntry.get(), self.surnameEntry.get(), self.emailEntry.get())\n if ret == 1:\n self.cancelEvent()\n self.loginWindow.showMessage(\"Succefully Registered\", \"#4bf442\")\n elif ret == 0:\n self.showErrorLabel()", "title": "" }, { "docid": "bac88bf8a2724a01b7c385c76c70db01", "score": "0.4808926", "text": "def url(self, value):\r\n self.logger.warn(\"Setting values on url will NOT update the remote Canvas instance.\")\r\n self._url = value", "title": "" }, { "docid": "bac88bf8a2724a01b7c385c76c70db01", "score": "0.4808926", "text": "def url(self, value):\r\n self.logger.warn(\"Setting values on url will NOT update the remote Canvas instance.\")\r\n self._url = value", "title": "" }, { "docid": "bac88bf8a2724a01b7c385c76c70db01", "score": "0.4808926", "text": "def url(self, value):\r\n self.logger.warn(\"Setting values on url will NOT update the remote Canvas instance.\")\r\n self._url = value", "title": "" }, { "docid": "bac88bf8a2724a01b7c385c76c70db01", "score": "0.4808926", "text": "def url(self, value):\r\n self.logger.warn(\"Setting values on url will NOT update the remote Canvas instance.\")\r\n self._url = value", "title": "" }, { "docid": "4a4fe7c8f6880ff27f92d630bccad029", "score": "0.4807739", "text": "def signup():\n params = {\n 'response_type': 'code',\n 'redirect_uri': get_redirect_uri(request),\n 'scopes': ','.join(config.get('scopes')),\n }\n url = generate_oauth_service().get_authorize_url(**params)\n return redirect(url)", "title": "" }, { "docid": "4a4fe7c8f6880ff27f92d630bccad029", "score": "0.4807739", "text": "def signup():\n params = {\n 'response_type': 'code',\n 'redirect_uri': get_redirect_uri(request),\n 'scopes': ','.join(config.get('scopes')),\n }\n url = generate_oauth_service().get_authorize_url(**params)\n return redirect(url)", "title": "" }, { "docid": "ea1c2ee634162a1621f2849e09a02314", "score": "0.48050377", "text": "def url(self, url):\n if url is None:\n raise ValueError(\"Invalid value for `url`, must not be `None`\")\n if url is not None and len(url) > 4096:\n raise ValueError(\"Invalid value for `url`, length must be less than or equal to `4096`\")\n if url is not None and len(url) < 1:\n raise ValueError(\"Invalid value for `url`, length must be greater than or equal to `1`\")\n\n self._url = url", "title": "" }, { "docid": "438201b718bb4de98cef40e5d4838c73", "score": "0.48035112", "text": "def reset_url(self, url):\n\n self.url = url", "title": "" }, { "docid": "9a848323656031d636a5b11ed5e9dbd8", "score": "0.4796168", "text": "def signup_user(self, username, email):\n\n self.testuser3 = User.signup(\n username=username,\n email=email,\n password=\"123456\",\n image_url=\"/test.jpg\")\n\n db.session.commit()", "title": "" }, { "docid": "c335fe590c9cabff54ea15090ac9306f", "score": "0.47913858", "text": "async def sheet_url(self, ctx, url):\n try:\n server = ctx.message.server.id\n if server not in self.settings:\n self.settings[server] = {}\n is_pm = False\n except KeyError:\n is_pm = True\n\n if is_pm:\n await self.bot.say(\"I cannot set this in PM because it's\"\n \" a per-server value\")\n else:\n self.settings[server][\"project_url\"] = url\n dataIO.save_json(\"data/untappd/settings.json\", self.settings)\n await self.bot.say(\"The project endpoint URL has been set\")", "title": "" }, { "docid": "04925737afcbcfdbe51c1e46dc3897bd", "score": "0.47835475", "text": "def set_notify_url( self, notify_url ):\n\t\tif (notify_url is None) or (len(notify_url) == 0): return\n\t\tif len(notify_url) > 2048:\n\t\t\traise ValueError( 'notify_url cannot exceed 2048 characters' )\n\n\t\tself._nvp_request['NOTIFYURL'] = notify_url", "title": "" }, { "docid": "5d393caa2d5e4b334315e7a5a3713a4d", "score": "0.47824097", "text": "def after_signup(self, user, **kwargs):\n pass", "title": "" }, { "docid": "b78ba70615347e953b95a660735f3cbb", "score": "0.4779945", "text": "def external_url(self, external_url):\n\n self._external_url = external_url", "title": "" }, { "docid": "a5a8002ae398d7c8c5f85261eb421e5e", "score": "0.47782236", "text": "def google_sign_in(self):\n self.generate_state_token()\n auth_url = f\"\"\"\n {AuthClient.GOOGLE_AUTH_ENDPOINT}?\n scope=email%20profile&\n response_type=code&\n state={self.state_token}&\n redirect_uri={self.local_web_server.auth_url}&\n client_id={AuthClient.GOOGLE_CLIENT_ID}\n \"\"\".replace('\\n', '').replace(' ', '')\n webbrowser.open(auth_url)", "title": "" }, { "docid": "987028da2420cd005bb619a49b8ba62c", "score": "0.4756313", "text": "def jwks_url(self, jwks_url):\n\n self._jwks_url = jwks_url", "title": "" }, { "docid": "7706185723e510d6bdda0c3fc545a85b", "score": "0.4754741", "text": "def upload_url(self):\n if \"uploadUrl\" in self._prop_dict:\n return self._prop_dict[\"uploadUrl\"]\n else:\n return None", "title": "" }, { "docid": "2d768ecc2ce66be3c874ff9342038b69", "score": "0.47324818", "text": "def base_url(self, base_url):\n if base_url is None:\n raise ValueError(\"Invalid value for `base_url`, must not be `None`\")\n\n self._base_url = base_url", "title": "" }, { "docid": "8d5acd35e119f1841774f25fae548ce9", "score": "0.47320354", "text": "def auth_url(self):\n pass", "title": "" }, { "docid": "72365cac79e54c1ba9f9411b9661534b", "score": "0.47051036", "text": "def redirecturl(self, redirecturl) :\n\t\ttry :\n\t\t\tself._redirecturl = redirecturl\n\t\texcept Exception as e:\n\t\t\traise e", "title": "" }, { "docid": "332d1ac33f18c953218d6ca93df35173", "score": "0.46979165", "text": "def url(self, url):\n if (self.local_vars_configuration.client_side_validation and\n url is not None and len(url) > 2000):\n raise ValueError(\"Invalid value for `url`, length must be less than or equal to `2000`\") # noqa: E501\n if (self.local_vars_configuration.client_side_validation and\n url is not None and len(url) < 1):\n raise ValueError(\"Invalid value for `url`, length must be greater than or equal to `1`\") # noqa: E501\n if (self.local_vars_configuration.client_side_validation and\n url is not None and not re.search(r'[A-Za-z0-9\\-._~:\\/?#[]@!$&\\'()+,;=]*', url)): # noqa: E501\n raise ValueError(r\"Invalid value for `url`, must be a follow pattern or equal to `/[A-Za-z0-9\\-._~:\\/?#[]@!$&'()+,;=]*/`\") # noqa: E501\n\n self._url = url", "title": "" }, { "docid": "343605401d1ee7b31e2c874892488a1f", "score": "0.46773842", "text": "def user_email(self, user_email):\n\n self._user_email = user_email", "title": "" }, { "docid": "343605401d1ee7b31e2c874892488a1f", "score": "0.46773842", "text": "def user_email(self, user_email):\n\n self._user_email = user_email", "title": "" }, { "docid": "1cc56d89ecabcebd22c82a1a1930f430", "score": "0.4646073", "text": "def __init__(__self__, *,\n google_group_email: str,\n user_email: str):\n pulumi.set(__self__, \"google_group_email\", google_group_email)\n pulumi.set(__self__, \"user_email\", user_email)", "title": "" }, { "docid": "b4600091271a888357aed3c106dff623", "score": "0.4645619", "text": "def bot_url(self, bot_url):\n\n self._bot_url = bot_url", "title": "" }, { "docid": "0498b587bd94f54e3d2a1a2f8bac7c9a", "score": "0.46399748", "text": "def set_auth_url(self, testing_on=False):\n # Update which URL to use for auth depending on\n if testing_on == False and use_active_directory == True:\n # If using AD and not testing, use live AD tenant\n self.cip_auth_url = live_100K_auth_url\n elif testing_on == True and use_active_directory == True:\n # If using AD and testing, use beta AD tenant\n self.cip_auth_url = beta_testing_auth_url\n elif testing_on == False and use_active_directory == False:\n # If using LDAP and not testing, use live CIPAPI get-token url\n self.cip_auth_url = live_100k_data_base_url + 'get-token/'\n elif testing_on == True and use_active_directory == False:\n raise ValueError(\n \"LDAP login no longer supported for testing. Please set use_active_directory to True in config.py\"\n )", "title": "" }, { "docid": "f9b9623473fde2bad45ccdb7e9f69b20", "score": "0.46297565", "text": "def request_url(self, request_url):\n\n self._request_url = request_url", "title": "" }, { "docid": "61366bd35588f6ab95d86460c2184664", "score": "0.4627043", "text": "def avatar_url(self, avatar_url: object):\n\n self._avatar_url = avatar_url", "title": "" }, { "docid": "61366bd35588f6ab95d86460c2184664", "score": "0.4627043", "text": "def avatar_url(self, avatar_url: object):\n\n self._avatar_url = avatar_url", "title": "" }, { "docid": "2814604b4cb5e057c8655682e88f81b3", "score": "0.46224588", "text": "def scm_url(self, scm_url):\n self._scm_url = scm_url", "title": "" }, { "docid": "804f6967a2d252ce9c559227bd77b30f", "score": "0.46167585", "text": "def redeem_at_sign_up(self, invitation):\n self.request.session.update(\n {\n \"invitation:pk\": invitation.pk,\n # Auto-verify EmailAddress via django-allauth.\n \"account_verified_email\": invitation.to_email,\n }\n )\n url = reverse(\"account_signup\")\n\n obj = invitation.object\n organization = None\n if isinstance(obj, Team):\n organization = obj.organization\n elif isinstance(obj, Organization):\n organization = obj\n if organization and AdminPermission.has_sso_enabled(organization):\n url += f\"?organization={organization.slug}\"\n return HttpResponseRedirect(url)", "title": "" }, { "docid": "5c2b398a13201cc7aa35b6d3e9f32770", "score": "0.46138364", "text": "def build_url(self):\n return \"https://api.meetup.com/{}/events\".format(self.group_url_name)", "title": "" }, { "docid": "3edda176cb95d00051c3d2ccb6af96ba", "score": "0.460809", "text": "def email(self, email):\n self._email = email", "title": "" }, { "docid": "c7b4c5287de8e4785a754b02f6d436d4", "score": "0.46065032", "text": "def authorise_url(self, callback_url: str = ''):\n authorise_url_endpoint = '/api/v4/oauth/authorise/?oauth_token={0}'.format(\n self.credentials.token\n )\n\n if callback_url:\n authorise_url_endpoint = '{0}&callback={1}'.format(authorise_url_endpoint, callback_url)\n\n return urljoin(self.base_url, authorise_url_endpoint)", "title": "" }, { "docid": "584e6e574999bcbc408b09cb781502c7", "score": "0.45981118", "text": "def click_sign_in_up(self):\n self.browser.click_on_element(self.locator_login.SIGNIN)", "title": "" }, { "docid": "edf73bdd43acf1b4fa8bf69a59e0f338", "score": "0.45980984", "text": "def email(self, email):\n\n self._email = email", "title": "" }, { "docid": "edf73bdd43acf1b4fa8bf69a59e0f338", "score": "0.45980984", "text": "def email(self, email):\n\n self._email = email", "title": "" }, { "docid": "edf73bdd43acf1b4fa8bf69a59e0f338", "score": "0.45980984", "text": "def email(self, email):\n\n self._email = email", "title": "" }, { "docid": "edf73bdd43acf1b4fa8bf69a59e0f338", "score": "0.45980984", "text": "def email(self, email):\n\n self._email = email", "title": "" }, { "docid": "edf73bdd43acf1b4fa8bf69a59e0f338", "score": "0.45980984", "text": "def email(self, email):\n\n self._email = email", "title": "" }, { "docid": "edf73bdd43acf1b4fa8bf69a59e0f338", "score": "0.45980984", "text": "def email(self, email):\n\n self._email = email", "title": "" }, { "docid": "edf73bdd43acf1b4fa8bf69a59e0f338", "score": "0.45980984", "text": "def email(self, email):\n\n self._email = email", "title": "" }, { "docid": "edf73bdd43acf1b4fa8bf69a59e0f338", "score": "0.45980984", "text": "def email(self, email):\n\n self._email = email", "title": "" }, { "docid": "edf73bdd43acf1b4fa8bf69a59e0f338", "score": "0.45980984", "text": "def email(self, email):\n\n self._email = email", "title": "" }, { "docid": "edf73bdd43acf1b4fa8bf69a59e0f338", "score": "0.45980984", "text": "def email(self, email):\n\n self._email = email", "title": "" }, { "docid": "edf73bdd43acf1b4fa8bf69a59e0f338", "score": "0.45980984", "text": "def email(self, email):\n\n self._email = email", "title": "" }, { "docid": "edf73bdd43acf1b4fa8bf69a59e0f338", "score": "0.45980984", "text": "def email(self, email):\n\n self._email = email", "title": "" }, { "docid": "edf73bdd43acf1b4fa8bf69a59e0f338", "score": "0.45980984", "text": "def email(self, email):\n\n self._email = email", "title": "" }, { "docid": "edf73bdd43acf1b4fa8bf69a59e0f338", "score": "0.45980984", "text": "def email(self, email):\n\n self._email = email", "title": "" } ]
850b14adc74914000ecee267cb5a171e
Replaces 'pattern' in 'string' with 'sub' if 'pattern' starts 'string'.
[ { "docid": "ebf3a6bfd34aa2c3d8d9c5914ab345ea", "score": "0.77272403", "text": "def lreplace(pattern, sub, string):\n return re.sub('^%s' % pattern, sub, string)", "title": "" } ]
[ { "docid": "e224a45e914a1459828b24848b832fbe", "score": "0.65532935", "text": "def sub(self, pattern: str, repl: Union[str, callable], string: str, strict: bool = True, case_sensitive: bool = False) -> str:\n if case_sensitive:\n result = re.sub(pattern, repl, string)\n else:\n result = re.sub(pattern, repl, string, flags=re.IGNORECASE)\n if result == string: # No substitution happened!\n self._unexpected_behavior(\"sub\", pattern, \"did not yield any substitutions\", string, strict)\n return result # Returns only if sub happened or non strict behavior", "title": "" }, { "docid": "4e8e907314ef0f2c22ab45f6a9aa1ead", "score": "0.64997524", "text": "def replacePattern( pattern, str ):\r\n\t\r\n\t# Do the thing~\r\n\ttry:\r\n\t\treturn str.replace(pattern[0],pattern[1])\r\n\texcept:\r\n\t\tprint(\"Error(replacePattern): Invalid transform syntax on pattern,\",pattern[0],\"! Skipping...\")\r\n\t\treturn str", "title": "" }, { "docid": "8e5b1f2fe62bdfa29f306874ac2fb011", "score": "0.6368721", "text": "def __preg_replace_str(pat, rep, subject):\n reg = __preg_setup(pat)\n # function call\n if callable(rep):\n def __callback(match):\n match_list = list(match.groups())\n match_list.insert(0, subject)\n match_list = tuple(match_list)\n o = rep(match_list)\n return o\n return reg.sub(__callback, subject)\n # string\n else:\n return reg.sub(rep, subject)", "title": "" }, { "docid": "54055af0b96e09ef37f942309440a4a4", "score": "0.63533336", "text": "def re_sub(pattern, repl, string):\n groups = []\n result = re.sub(pattern, lambda m: re_collect_groups(m, groups, repl), string)\n return result, groups", "title": "" }, { "docid": "c485b5025d7e144d120a118135a08431", "score": "0.6350744", "text": "def worker(string, pattern, replacement, f):\n matches = []\n for i in range(pattern.__len__()):\n matches.extend(gregexpr(pattern[i], string, i, f))\n matches.sort(key=itemgetter(2), reverse=True)\n for i in range(matches.__len__()-1, -1, -1):\n if any([filter_matches(matches[i], m) for m in matches[:i]]):\n matches.pop(i)\n matches.sort(key=itemgetter(1))\n for i in range(matches.__len__()-1, -1, -1):\n s = matches[i][1]\n e = matches[i][3]\n p = pattern[matches[i][0]]\n r = replacement[matches[i][0]]\n pre = string[:s]\n r0 = re.sub(p, r, string[s:e], flags=f)\n end = string[e:]\n string = pre+r0+end\n return string", "title": "" }, { "docid": "079b657a060e2f1b8a6990f6340e2d01", "score": "0.61693805", "text": "def regex_replacer(pattern, repl):\n return lambda x: sub(pattern, repl, x)", "title": "" }, { "docid": "ac2ea56ed410bc70b3e467eedf93d37c", "score": "0.611818", "text": "def mgsub(string, pattern, replacement, flags=0):\n\n assert isinstance(pattern, list), \"Input pattern must be a list\"\n assert ((isinstance(replacement, list) & (replacement.__len__() == pattern.__len__())) |\n isinstance(replacement, str)), \"Input replacement must be a list of equal length to pattern or a string\"\n\n if isinstance(replacement, str):\n replacement = [replacement for p in pattern]\n\n if not isinstance(string, str) and isinstance(string, collections.Sequence):\n output = [worker(s, pattern, replacement, flags) for s in string]\n else:\n output = worker(string, pattern, replacement, flags)\n return output", "title": "" }, { "docid": "1f19eebf28660f4a375263912b9d1aad", "score": "0.60709614", "text": "def replace_patterns(string, patterns):\n for old, new in patterns:\n string = string.replace(old, new)\n return string", "title": "" }, { "docid": "d87f1611d7ec81d74916c5bc22b7f856", "score": "0.6036524", "text": "def sub(pattern,replacement,*,count=0,flags=0):\n regex = re.compile(pattern,flags)\n def _sub(input):\n yield from (regex.sub(repl=replacement,string=line,count=count) for line in input)\n return _sub", "title": "" }, { "docid": "1ef2091d3319c9ea1f8815732a5d2143", "score": "0.6011735", "text": "def replace(pattern, replacement, value):\n return re.sub(pattern, replacement, value)", "title": "" }, { "docid": "b6babf01def905307b7e6472748bc9c4", "score": "0.5923076", "text": "def replace(s, pattern, replacement):\n # the replacement string may contain invalid backreferences (like \\1 or \\g)\n # which will cause python's regex to blow up. Since this should emulate\n # the jam version exactly and the jam version didn't support\n # backreferences, this version shouldn't either. re.sub\n # allows replacement to be a callable; this is being used\n # to simply return the replacement string and avoid the hassle\n # of worrying about backreferences within the string.\n def _replacement(matchobj):\n return replacement\n return re.sub(pattern, _replacement, s)", "title": "" }, { "docid": "6e80f6038cbf2f1f2303b671015b43c7", "score": "0.58642614", "text": "def match(pattern, string):\n return re.match(pattern, string)", "title": "" }, { "docid": "6e9e69048806c5de93bc1a8bb52a17ea", "score": "0.5853369", "text": "def substitution(master_string, replace_string, sub_string):\n length = len(replace_string)\n escape_character = \"+\"\n for index in xrange(len(master_string) - length + 1):\n window = master_string[index: length + index]\n if window == replace_string:\n if master_string[:index].count(escape_character) % 2 == 1:\n # means the replace_string is in the middle of a already\n # replaced string\n continue\n new_string = master_string[:index] + escape_character + \\\n sub_string + escape_character + \\\n master_string[index + length:]\n return new_string\n else:\n return None", "title": "" }, { "docid": "953cfbca525341a9e52621b4e8b55c88", "score": "0.585088", "text": "def replacePatterns( patterns, str ):\r\n\t\r\n\t# Make a copy of the string so we don't get weird behavior~\r\n\ts = str\r\n\t\r\n\t# For each pattern,\r\n\tfor pattern in patterns:\r\n\t\ts = replacePattern(pattern,s)\t# Replace the pattern\r\n\treturn s", "title": "" }, { "docid": "f3fc268e872fdb1594c804010a684a3a", "score": "0.5785354", "text": "def update_word_pattern(word, pattern, letter):\r\n for i in range(len(word)):\r\n if word[i] == letter:\r\n pattern = pattern[:i] + letter + pattern[i + 1:]\r\n return pattern", "title": "" }, { "docid": "801a6ccdf4b3be3e87a685727dd17546", "score": "0.5764851", "text": "def __str_replace_str(pat, rep, subject):\n return subject.replace(pat, rep)", "title": "" }, { "docid": "3424580fb0bc73f4bc07ee3e0e7b39da", "score": "0.57075423", "text": "def apply_and_get_result(self, string):\n if self.is_multiline:\n compiled_pattern = re.compile(self.pattern, re.MULTILINE)\n else:\n compiled_pattern = re.compile(self.pattern)\n\n result = re.sub(compiled_pattern, self.repl, string)\n return result", "title": "" }, { "docid": "e80b0687a31ef2ead799c97a9a359d58", "score": "0.57067484", "text": "def update_word_pattern(word,pattern,letter):\n new_pattern = list(pattern)\n for i in range(len(word)):\n if word[i] == letter:\n new_pattern[i] = letter\n return ''.join(new_pattern)", "title": "" }, { "docid": "5ea8315953fc05cb799dc15312beca51", "score": "0.56539804", "text": "def update_word_pattern(word,pattern,letter):\n\n pattern=list(pattern) # convert the pattern from a string to list\n\n # checks if the the secret word contain the letter\n for j in range(len(word)):\n if letter==word[j]:\n pattern[j]=letter # updates the appropiate place in the pattern\n\n pattern=\"\".join(pattern)\n\n return pattern", "title": "" }, { "docid": "8fd71418c1c1e58945cf6c5b57cfe7ee", "score": "0.5649985", "text": "def _substitute(self, str):\r\n\r\n return self._regex.sub(\r\n lambda mo: self._reflections[mo.string[mo.start() : mo.end()]], str.lower()\r\n )", "title": "" }, { "docid": "69ad2964f41789690a6b699425d9826d", "score": "0.5643551", "text": "def replace_pattern(x, pattern_b, replacement):\n\n try:\n res = x.strip().replace(pattern_b,replacement)\n except:\n res = x\n # insert logging\n return res", "title": "" }, { "docid": "88748eb2bc90038658882ab3ebc926aa", "score": "0.56325567", "text": "def lsub(self, directory,pattern):\n\t\tpass", "title": "" }, { "docid": "c5e87494bf848cb94c4b29f650566e79", "score": "0.5608165", "text": "def sub(\n self, pattern: Union[bytes, str_scalars], repl: Union[bytes, str_scalars], count: int = 0\n ) -> Strings:\n if isinstance(repl, bytes):\n repl = repl.decode()\n return self._get_matcher(pattern).sub(repl, count)", "title": "" }, { "docid": "8e1bf2f7f80d79d450ed5eba39fa0104", "score": "0.5583376", "text": "def sub_re(self, callback, msg):\n self.sub(callback, re.compile(msg))", "title": "" }, { "docid": "8c59dac29c863aaf6a1e1be819df2b2c", "score": "0.557107", "text": "def replace(self, pattern, repl, *args, **kwargs):\n c = MessageMixin.replace(self, pattern, repl, *args, **kwargs)\n self.path, pc = utils.safe_subn(\n pattern, repl, self.path, *args, **kwargs\n )\n c += pc\n return c", "title": "" }, { "docid": "cd6cf86e775803c0b243e67c1759a6d5", "score": "0.5551831", "text": "def _substitute(self, str):\n\n return self._regex.sub(\n lambda mo: self._reflections[mo.string[mo.start() : mo.end()]], str.lower()\n )", "title": "" }, { "docid": "90e4557340c33caefa614d3d9fbf3169", "score": "0.5439628", "text": "def test_pattern_exists_in_string(self):\n self.assertTrue(self.pattern1 in self.string1)\n self.assertEqual(2, search(self.pattern1, self.string1))", "title": "" }, { "docid": "5148954c882450b26ed80e12cabd6fbe", "score": "0.54316485", "text": "def preg_replace(pat, rep, subject):\n out = subject\n if isinstance(pat, list):\n repIsStr = isinstance(rep, list)\n for i in pat:\n if repIsStr:\n repStr = rep\n else:\n repStr = rep[i]\n out = __preg_replace_str(pat[i], repStr, out)\n else:\n out = __preg_replace_str(pat, rep, subject)\n return out", "title": "" }, { "docid": "19209707837832cf55fec527bb92eca2", "score": "0.54167587", "text": "def _pattern(self, pattern, that, topic):\n\n if len(pattern) == 0:\n return None\n # Mutilate the input. Remove all punctuation and convert the\n # text to all caps.\n input = string.upper(pattern)\n input = re.sub(self._brain._puncStripRE, \" \", input)\n if that.strip() == u\"\": that = u\"ULTRABOGUSDUMMYTHAT\" # 'that' must never be empty\n thatInput = string.upper(that)\n thatInput = re.sub(self._brain._puncStripRE, \" \", thatInput)\n thatInput = re.sub(self._brain._whitespaceRE, \" \", thatInput)\n if topic.strip() == u\"\": topic = u\"ULTRABOGUSDUMMYTOPIC\" # 'topic' must never be empty\n topicInput = string.upper(topic)\n topicInput = re.sub(self._brain._puncStripRE, \" \", topicInput)\n \n\n # Pass the input off to the recursive call\n patMatch, template = self._brain._match(input.split(), thatInput.split(), topicInput.split(), self._brain._root)\n\n return patMatch, template", "title": "" }, { "docid": "475358eea83883f9a01f694fadb79842", "score": "0.5416043", "text": "def regex_replace(regex, replacement, input):\n\treturn get_regex(regex).sub(replacement, input)", "title": "" }, { "docid": "2391f2b16cc4985f21d010ffd4d168a2", "score": "0.5412871", "text": "def find(string, pattern):\n # implement find_iterative and find_recursive\n # assert isinstance(string, str)\n return find_iterative(string, pattern)\n # return find_recursive(string, pattern)", "title": "" }, { "docid": "c17ec25f1ee5f4fee7c623b4f25e0047", "score": "0.53997946", "text": "def test_pattern_exists_in_string_and_returns_first_occurrence(self):\n self.assertTrue(self.pattern2 in self.string2)\n self.assertTrue(self.string2.count(self.pattern2) > 1)\n self.assertEqual(3, search(self.pattern2, self.string2))", "title": "" }, { "docid": "2ec6bee06bba45068a8db99bd7f0ba1d", "score": "0.53962237", "text": "def _sub(self, reg, result):\n self._text = re.sub(reg, result, self._text)\n return self", "title": "" }, { "docid": "c598f8fb80df0df95d821614b48d601b", "score": "0.5372047", "text": "def search(pattern, s):\n if pattern not in _regexp_compile_cache:\n _regexp_compile_cache[pattern] = sre_compile.compile(pattern)\n return _regexp_compile_cache[pattern].search(s)", "title": "" }, { "docid": "01e0257a6af1edc45edfbf89f12292c7", "score": "0.53491944", "text": "def make_replacement(\n self, pattern: str, replacement: str, regex: bool = True\n ) -> None:\n if regex:\n self.body = re.sub(pattern, replacement, self.body)\n else:\n self.body = self.body.replace(pattern, replacement)\n for part in self.parts:\n part.make_replacement(pattern, replacement, regex=regex)", "title": "" }, { "docid": "a27fd2ce055258eac810d4c73202901d", "score": "0.5329081", "text": "def regex_replace(s, find, replace):\n return re.sub(find, replace, s)", "title": "" }, { "docid": "149a7f5cc43bbe1b0029e91ad8660c47", "score": "0.53118736", "text": "def match_tokenized_regex_pattern(pattern, string):\n\tif not pattern and not string: # if reach the end of both pattern and string\n\t\treturn True\n\tif not pattern or not string: # if reach the end of either pattern and string\n\t\treturn False\n\n\tp = pattern[0]\n\tc = string[0]\n\n\t# removes cases with (letter-symbol vs. letter) and (non-matching single letters)\n\tif p == c or p == '.':\n\t\treturn match_tokenized_regex_pattern(pattern[1:], string[1:])\n\n\t# X* --> X can appear 0 times, once, or more (3 cases)\n\telif len(pattern) > 1:\n\t\tif pattern[0] == c:\n\t\t\tis_match = match_tokenized_regex_pattern(pattern, string[1:]) # appear >= 1 times\n\t\t\tif is_match:\n\t\t\t\treturn True\n\t\treturn match_tokenized_regex_pattern(pattern[1:], string) # appear 0 times in string\n\n\t# non-matching single letters\n\telse:\n\t\treturn False", "title": "" }, { "docid": "131b0595bca047d5615610417ed1ea8e", "score": "0.53036594", "text": "def _group(s, groups):\n for pattern, replacement in groups:\n if pattern.match(s):\n return replacement\n return s", "title": "" }, { "docid": "45ca4a249418bea4c1a3520fe7d089a7", "score": "0.5294655", "text": "def __call__(self, value, pattern, replacement, data=None, engine=None):\n return sub(pattern, replacement, value)", "title": "" }, { "docid": "6816948b2976bbf9428f34c2de76aa48", "score": "0.5293176", "text": "def match(pattern, s):\n # The regexp compilation caching is inlined in both Match and Search for\n # performance reasons; factoring it out into a separate function turns out\n # to be noticeably expensive.\n if pattern not in _regexp_compile_cache:\n _regexp_compile_cache[pattern] = sre_compile.compile(pattern)\n return _regexp_compile_cache[pattern].match(s)", "title": "" }, { "docid": "405978c54777ac5b96280e88315f4a15", "score": "0.52822506", "text": "def replace(self, pattern, content):\n pass", "title": "" }, { "docid": "32ae4555466b2f80bb2c191daeb6f875", "score": "0.52768046", "text": "def replace(\n self,\n pattern: StringValue,\n replacement: StringValue,\n ) -> StringValue:\n return ops.StringReplace(self, pattern, replacement).to_expr()", "title": "" }, { "docid": "8372720021e963f8558a77960bdd1568", "score": "0.527449", "text": "def sub(self, text):\n if self._regexIsDirty:\n self._update_regex()\n return self._regex.sub(self, text)", "title": "" }, { "docid": "6129eecad220c679072d1adcdba09ddb", "score": "0.52710116", "text": "def rfind_str(text, sub, start=None, end=None):\n assert isinstance(text,str), '%s is not a string' % text\n return text.rfind(sub,start,end)", "title": "" }, { "docid": "d50782465a645681d1d5c092b9f0f291", "score": "0.52637935", "text": "def sub (cls, regex_, replace, query):\n\n cregex = cls.regex_cache.setdefault (\n regex_, regex.compile (regex_, regex.UNICODE | regex.VERSION1))\n return cregex.sub (replace, query)", "title": "" }, { "docid": "419dd68d3f63e92821b6384617b21de8", "score": "0.5258351", "text": "def findfirst(pattern, string, flags=0):\n string_list = re.findall(pattern, string, flags)\n if string_list:\n result = string_list[0]\n else:\n result = \"\"\n return result", "title": "" }, { "docid": "75d906ffab3bcb539473a45f01812a29", "score": "0.5249003", "text": "def make_one_sub(value, sub_list):\n for s in sub_list:\n formatted, n = s['regex'].subn(s['repl'], value, count = 1)\n if n > 0:\n value = formatted\n break\n return value", "title": "" }, { "docid": "5bdffd5466b58d18a78b847bab12f217", "score": "0.5246227", "text": "def replace( self, pattern, replacement ) :\n if self._map == None : self._map = {}\n self._map[pattern] = replacement", "title": "" }, { "docid": "2bee3285b8ddd9d123d27c819b34a4ed", "score": "0.5211044", "text": "def find_str(text, sub, start=None, end=None):\n assert isinstance(text,str), '%s is not a string' % text\n return text.find(sub,start,end)", "title": "" }, { "docid": "61169a7f5e595c7af9e3b1f7c9eaa031", "score": "0.5208249", "text": "def _search_regex(pattern, string, group=None):\n\t\tif isinstance(pattern, (str, compat_str, type(re.compile('')))):\n\t\t\tmobj = re.search(pattern, string, 0)\n\t\telse:\n\t\t\tfor p in pattern:\n\t\t\t\tmobj = re.search(p, string, 0)\n\t\t\t\tif mobj:\n\t\t\t\t\tbreak\n\t\tif mobj:\n\t\t\tif group is None:\n\t\t\t\t# return the first matching group\n\t\t\t\treturn next(g for g in mobj.groups() if g is not None)\n\t\t\telse:\n\t\t\t\treturn mobj.group(group)\n\t\telse:\n\t\t\tprint('[YouTubeVideoUrl] unable extract pattern from string!')\n\t\t\treturn ''", "title": "" }, { "docid": "ceb06e77d428b69be4e1c30f6da9abaa", "score": "0.5192851", "text": "def replace_line(s, start, replace):\n lines = s.split('\\n')\n found = False\n for i in range(len(lines)):\n if lines[i].startswith(start):\n lines[i] = replace\n found = True\n break\n if not found:\n raise ValueError('could not match string %s' % start)\n return '\\n'.join(lines)", "title": "" }, { "docid": "9e8223743c7212570623c42f4bd4fe17", "score": "0.5191779", "text": "def re_replace(\n self,\n pattern: str | StringValue,\n replacement: str | StringValue,\n ) -> StringValue:\n return ops.RegexReplace(self, pattern, replacement).to_expr()", "title": "" }, { "docid": "2a412fd90e17fb182f6ca3fbf6eebb78", "score": "0.5191737", "text": "def addPattern(self, text):\n def ms(s):\n if options.ignore_case:\n return s.lower()\n return s\n\n flags = 0\n if options.ignore_case:\n flags = re.I\n t = \"s\"\n isRegExp = False\n if text.startswith(\"d:\"):\n t = \"d\"\n text = text[2:]\n if text.startswith(\"s:\"):\n text = text[2:]\n if text.startswith(\"~\"):\n text = text[1:]\n isRegExp = True\n if isRegExp:\n try:\n p = re.compile(text, flags)\n except re.error as exc:\n sys.stderr.write(\"%r is not a valid regular expression\\n\" % text)\n sys.stderr.write(\" %s\\n\" % exc)\n sys.exit(1)\n if t == \"d\":\n self.descriptionPatterns.append(p)\n else:\n self.summaryPatterns.append(p)\n else:\n if t == \"d\":\n self.descriptionText.append(ms(text))\n else:\n self.summaryText.append(ms(text))", "title": "" }, { "docid": "fe657592f8b634bacfdb5208ac2c2cd4", "score": "0.5184157", "text": "def genPattern(self,patternStr,ignoreCase=True):\n\t\tif ignoreCase:\n\t\t\treturn re.compile(patternStr,re.IGNORECASE)\n\t\telse:\n\t\t\treturn re.compile(patternStr)", "title": "" }, { "docid": "12a2468e6e0f4532bb6c5050eb3120e3", "score": "0.51750845", "text": "def remove(sub, strng):\n if sub not in strng:\n return strng\n else:\n index = strng.find(sub)\n result = ''\n for i in range(index):\n result += strng[i]\n for i in range(index+len(sub), len(strng)):\n result += strng[i]\n return result", "title": "" }, { "docid": "7167b5980a90c5a3ddd18f814fcfcdd3", "score": "0.5170874", "text": "def parse(expression, pattern, replacer):\n regex = [p[\"regexp\"] for p in Airbot.patterns if p[\"name\"] == pattern][0]\n return re.sub(regex, replacer, expression)", "title": "" }, { "docid": "24eda24a2728378fb019effe84dcbfbf", "score": "0.51561236", "text": "def regexp_replace(\n string: \"ColumnOrName\", pattern: Union[str, Column], replacement: Union[str, Column]\n) -> Column:\n if isinstance(pattern, str):\n pattern_col = _create_column_from_literal(pattern)\n else:\n pattern_col = _to_java_column(pattern)\n if isinstance(replacement, str):\n replacement_col = _create_column_from_literal(replacement)\n else:\n replacement_col = _to_java_column(replacement)\n return _invoke_function(\"regexp_replace\", _to_java_column(string), pattern_col, replacement_col)", "title": "" }, { "docid": "d6b82b230389afbd8225eaa562fef108", "score": "0.51526624", "text": "def make_all_subs(value, sub_list):\n for s in sub_list:\n value = s['regex'].sub(s['repl'], value, count = 1)\n return value", "title": "" }, { "docid": "0ec4f45bfc2b348fc4314ad9eb4f903f", "score": "0.514604", "text": "def regex_replace(string, expr, repl):\n r = re.compile(raw(expr))\n return r.sub(repl, string)", "title": "" }, { "docid": "4bae5403d87001cbc1a63dfc50496790", "score": "0.51357526", "text": "def replace_pattern(\n df: pd.DataFrame,\n column: str,\n *,\n pat: str,\n repl: str,\n new_column: Optional[str] = None,\n case: bool = True,\n regex: bool = True,\n) -> pd.DataFrame:\n new_column = new_column or column\n df.loc[:, new_column] = df[column].str.replace(pat, repl, case=case, regex=regex)\n return df", "title": "" }, { "docid": "753503338cf964a7b9a2760b6771b28f", "score": "0.5132225", "text": "def set_name_from_pattern_xref(base_addr, end, name, pattern):\r\n pattern_offset = ida_search.find_binary(\r\n base_addr, end, pattern, 16, ida_search.SEARCH_DOWN\r\n )\r\n if pattern_offset == ida_idaapi.BADADDR or pattern_offset is None:\r\n return ida_idaapi.BADADDR\r\n\r\n xref = list(idautils.XrefsTo(pattern_offset))\r\n if len(xref) == 0:\r\n return ida_idaapi.BADADDR\r\n\r\n function = idaapi.get_func(xref[0].frm)\r\n if function is None:\r\n return ida_idaapi.BADADDR\r\n idc.set_name(function.start_ea, name, idc.SN_CHECK)\r\n print(f\"[+] {name} : {hex(function.start_ea)}\")\r\n return function.start_ea", "title": "" }, { "docid": "7737a91fd8d061bd93c4dd0cabb7ae85", "score": "0.5100343", "text": "def regex_pattern(stem_str):\n str_list = stem_str.split('-')\n # add word boundary\n if len(str_list) == 2:\n pos = 2 - str_list.index('')*2\n pos2 = 3 - str_list.index('')*3\n str_list.insert(pos, '\\\\b')\n str_list.insert(pos2, '.*')\n # replace '-' with '.*'\n pat_str = ''.join([i if len(i) > 0 else '.*' for i in str_list])\n return pat_str", "title": "" }, { "docid": "ac00ec9da264d03817acf850279af23a", "score": "0.5071816", "text": "def sub_cdn(self, s, m):\r\n res_fn = m.group(self.file_index)\r\n for sub in self.subs:\r\n pat = sub[\"pattern\"]\r\n rep = sub[\"replacement\"]\r\n m2 = re.search(pat, res_fn)\r\n if m2:\r\n s1 = s[:m.start(self.file_index)] + rep + s[m.end(self.file_index):]\r\n return s1\r\n return False", "title": "" }, { "docid": "8d0a2c1561d25b5144a81e4d4db9e227", "score": "0.5058605", "text": "def replaceStrs(s, *args):\n if args == (): return s\n mapping = dict([(frm, to) for frm, to in args])\n return re.sub(\"|\".join(map(re.escape, mapping.keys())),\n lambda match:mapping[match.group(0)], s)", "title": "" }, { "docid": "a63a679edf1e3b6acca19cc5d27d47b8", "score": "0.5044197", "text": "def find_replace_regex(chunk, out, find, sub=\"\"):\n util.write_annotation(out, ((key, re.sub(find, sub, val)) for (key, val) in util.read_annotation_iteritems(chunk)))", "title": "" }, { "docid": "9ed960bf1ec5ca795855a2c463b33e55", "score": "0.5035251", "text": "def search(pattern, text, flags=0):\r\n\tmatch_obj = re.search(pattern, text, flags)\r\n\tdump_results(match_obj, text)", "title": "" }, { "docid": "f69ece641595895143a6e65f4ce110c7", "score": "0.50253934", "text": "def match(pattern,word):\n if not pattern or pattern == '*':\n # No pattern/wildcard, matches everything\n return True\n # Only simple patterns considered for now\n if pattern.endswith('*'):\n # Match the start\n return word.startswith(pattern[:-1])\n else:\n # Match the whole word exactly\n return (word == pattern)", "title": "" }, { "docid": "1f1d0bfab84c4d59b7c6302586f0ab24", "score": "0.50040156", "text": "def _matches(self, pattern: str, string: str) -> bool:\n regex_pattern = '^' + pattern.replace('*', '.*') + '$'\n return bool(re.match(regex_pattern, string))", "title": "" }, { "docid": "14fd9e45f65f1aae8512785edab38951", "score": "0.49961507", "text": "def case_insensitive_replace(haystack: str, needle: str, replacement: str) -> str:\n regex = re.compile(re.escape(needle), re.IGNORECASE)\n return regex.sub(replacement, haystack)", "title": "" }, { "docid": "6078d34c11aa9f1096ba5219677497b9", "score": "0.4988578", "text": "def strlookup(pattern, space):\n return fnmatch.filter(space, pattern)", "title": "" }, { "docid": "d9e5964f7cda67bdebb29d995ba22ce1", "score": "0.49839634", "text": "def kmp(string, pattern):\n\n # either string or pattern is empty, return -1\n if len(string) * len(pattern) == 0:\n return -1\n\n # build table of shifts\n pattern = list(pattern)\n lp = len(pattern)\n shifts = [1] * (lp + 1)\n shift = 1\n for p in range(lp):\n while shift <= p and pattern[p] != pattern[p - shift]:\n shift += shifts[p - shift]\n shifts[p + 1] = shift\n\n # search\n start_pos = 0\n match_len = 0\n for c in string:\n while match_len == lp or \\\n match_len >= 0 and pattern[match_len] != c:\n start_pos += shifts[match_len]\n match_len -= shifts[match_len]\n\n match_len += 1\n\n if match_len == lp:\n return start_pos\n\n return -1", "title": "" }, { "docid": "70c50827d50fb849904fbd45806d753c", "score": "0.49817824", "text": "def pattern(word, pat):\n matches = {}\n if len(word) < len(pat):\n return False\n for (w,p) in zip(word, pat):\n if p not in matches:\n matches[p] = w\n elif matches[p] != w:\n return pattern(word[1:], pat)\n return True", "title": "" }, { "docid": "cef40f328bd2c914cd5377895fe02ac7", "score": "0.4973586", "text": "def replace_wildcard_with_time(aa,string1):\n # Set wild card string\n if aa.outfile_frequency=='year':\n x2=str(aa.year)\n elif aa.outfile_frequency=='month':\n x2=str(aa.year)+str(aa.month).zfill(2)\n else:\n raise ToDoError('Need to code up for different outfile_frequency')\n # Replace wild card characters\n string2=string1.replace(aa.wildcard,x2)\n\n return string2", "title": "" }, { "docid": "a44739ef656aabd86cb9d4d050030768", "score": "0.49441954", "text": "def index_str(text, sub, start=None, end=None):\n assert isinstance(text,str), '%s is not a string' % text\n return text.index(sub,start,end)", "title": "" }, { "docid": "6dd5b1ef8f5df62b3e3a077acbd1a7db", "score": "0.493214", "text": "def is_sublist_of(pattern, text):\n\n if len(pattern) > len(text):\n return False\n\n if not pattern:\n return True\n\n for possible_start in ifindall(pattern[0], text):\n if text[possible_start : possible_start+len(pattern)] == pattern:\n return True\n\n return False", "title": "" }, { "docid": "ac8e594ba0b916545501499a10f85757", "score": "0.49176946", "text": "def __init__(self, pattern):\n self.idx = 0\n self.prefix, replace, self.suffix = re.split(\"(#+)\", pattern)\n self.sublen = len(replace)", "title": "" }, { "docid": "9be0c1e60a1843a85548877ef24ff01d", "score": "0.4915615", "text": "def replace(self, pattern, repl, *args, **kwargs):\n c = self.request.replace(pattern, repl, *args, **kwargs)\n if self.response:\n c += self.response.replace(pattern, repl, *args, **kwargs)\n return c", "title": "" }, { "docid": "7470cbf0d604dcc72e67e4ebdfb06de9", "score": "0.49127653", "text": "def similar_to_pattern(word,pattern):\r\n flag = True\r\n for i in range(len(word)):\r\n if pattern[i] == '_':\r\n continue\r\n elif word[i] != pattern[i]:\r\n flag = False\r\n return flag", "title": "" }, { "docid": "1a916db5fd339a67adefb0a4fbe9ba4f", "score": "0.49120873", "text": "def test_replace(self):\n\n regex = \"th\"\n expected = \"Hello, htis is Fun Ilrys. I just wanted to know how htings goes around hte tests.\" # pylint: disable=line-too-long\n actual = Regex(regex).replace_match(self.data, \"ht\")\n\n self.assertEqual(expected, actual)", "title": "" }, { "docid": "dfc257944cf1d6166c9f0f2fbbf3bbdf", "score": "0.4909576", "text": "def match(pattern, text):\n if pattern == '':\n return True\n elif pattern == '$':\n return (text == '')\n elif len(pattern) > 1 and pattern[1] in '*?':\n p, op, pat = pattern[0], pattern[1], pattern[2:]\n if op == '*':\n return match_star(p, pat, text)\n elif op == '?':\n if match1(p, text) and match(pat, text[1:]):\n return True\n else:\n return match(pat, text)\n else:\n return (match1(pattern[0], text) and\n match( pattern[1:],text[1:])) # fill in this line", "title": "" }, { "docid": "3d377255580b54a0c740f2659da53268", "score": "0.49065128", "text": "def rindex_str(text, sub, start=None, end=None):\n assert isinstance(text,str), '%s is not a string' % text\n return text.rindex(sub,start,end)", "title": "" }, { "docid": "b6997956d36c0ca6461c6cabbb12ae95", "score": "0.49021098", "text": "def substitute_chars(self, column, pattern, val_sub,\n val_exception=np.nan, val_none=np.nan,\n inplace=True, return_series=False,\n target_column=None):\n series = self.df[column].copy().astype(str).apply(\n lambda x: regex_sub_value(\n val=x,\n pattern=pattern,\n val_sub=val_sub,\n val_exception=val_exception,\n val_none=val_none,\n )\n )\n return inplace_return_series(self.df, column, series,\n inplace, return_series, target_column)", "title": "" }, { "docid": "f8a98be8f3e0db7b718392cfab9fef5d", "score": "0.48936597", "text": "def prefix_sub (match_object):\n s = match_object.group (0)\n return cls.prefix_to_prefix.get (s, s)", "title": "" }, { "docid": "f57227a1432322ff07f027d4a6c17e78", "score": "0.48791716", "text": "def apply_pattern(view, pattern, func):\n result = parameterNameRegex.sub(lambda m: func(getparameters_byname(view, m.group(1)),\n m.group(1)),\n pattern)\n result = parameterGuidRegex.sub(lambda m: func(getparameter(view, m.group()),\n m.group()),\n result)\n result = parameterBipRegex.sub(lambda m: func(getparameter(view, \"BuiltInParameter.\" + m.group(1)),\n m.group(1)),\n result)\n return result", "title": "" }, { "docid": "38bb9fb5b3ce6bdf98661652978f5736", "score": "0.48766243", "text": "def matchPat(pre,str):\n if pre=='':\n return True\n pre=duplicateAsteriskRemover(pre)\n if len(pre)>len(str) and pre[0]!='*':\n return False\n elif len(pre) == 1:\n if pre[0]==str[0] or pre[0]=='*':\n return True\n else:\n return False\n elif pre[0]=='*':\n a=1\n c=0\n while c<len(str):\n if pre[a]==str[c]:\n return matchPat(pre[a:],str[c:])\n else:\n c+=1\n if pre[0]!=str[0]:\n return False\n else:\n return matchPat(pre[1:],str[1:])", "title": "" }, { "docid": "729f5921190b2c2612d8932a3096adb0", "score": "0.48736763", "text": "def _glob_match(self, pattern, string):\r\n # regex flags Multi-line, Unicode, Locale\r\n return bool(re.match(fnmatch.translate(pattern), string,\r\n re.M | re.U | re.L))", "title": "" }, { "docid": "25ad35abc2841933328d7853d2ca02bb", "score": "0.4857707", "text": "def generateRegexFromPattern( pattern ):\r\n\t\r\n\treturn re.compile(pattern)", "title": "" }, { "docid": "47cb09b4813e5dfe57c4181928117855", "score": "0.4857365", "text": "def replace_identifiers(string):\n return re.sub(string=string, **RE['identifiers'])", "title": "" }, { "docid": "d93ef63dd421280b5a7f218d356a710c", "score": "0.4850763", "text": "def contains(text, pattern):\n assert isinstance(text, str), 'text is not a string: {}'.format(text)\n assert isinstance(pattern, str), 'pattern is not a string: {}'.format(text)\n # TODO: Implement contains here (iteratively and/or recursively)\n num = 0 #index to keep track of which letter in pattern you're comparing to you\n if len(pattern) == 0:\n return True \n for letter in text:\n #if they arent equal and you're part of the way into comapring to the pattern\n if num>0 and letter != pattern[num]:\n num = 0\n # check if your current letter matches a letter in pattern\n if letter == pattern[num]:\n num+=1\n #if you're finished with the pattern then finish ahead of time\n if num > len(pattern)-1:\n return True\n \n return False", "title": "" }, { "docid": "e09fcfc543e3aae86084548ae081e6c8", "score": "0.48504576", "text": "def match(word, pattern):\n # if the pattern is empty, only match empty word\n if len(pattern) == 0: return len(word) == 0\n # if the word is empty, only match \"*\"\n if len(word) == 0: return pattern == \"*\"\n \n # otherwise try to match first char in pattern\n if pattern[0] == \"?\" or pattern[0] == word[0]:\n # try to match pattern and word without first chars\n return match(word[1:], pattern[1:])\n elif pattern[0] == \"*\":\n # skip chars and try to match with rest of pattern\n for i in range(len(word)+1):\n if match(word[i:], pattern[1:]):\n return True\n return False\n else:\n return False", "title": "" }, { "docid": "b7822c9b72166c429f17ed848a7d058a", "score": "0.4846589", "text": "def resolve(unresolved_pattern, pattern_dictionary, regex_pat=re.compile(\"\"\"%\\{(.+?)\\}\"\"\")):\n #Get all referenced patterns into set\n pattern_set = set(regex_pat.findall(unresolved_pattern))\n #Return original pattern if it does not reference any other patterns\n if not pattern_set:\n return \"(?:\"+unresolved_pattern+\")\"\n for unres_pat in pattern_set:\n #If a pattern is known:\n if unres_pat.split(\":\")[0] in pattern_dictionary.keys():\n #Resolve it\n unresolved_pattern = unresolved_pattern.replace(encaps(unres_pat), resolve(pattern_dictionary[unres_pat.split(\":\")[0]], pattern_dictionary))\n return unresolved_pattern", "title": "" }, { "docid": "4ce31eca01966b34b22296f17c252384", "score": "0.48435923", "text": "def match(pattern='', priority=PRIORITY_LOW):\n return Match(pattern, priority=priority).decorate", "title": "" }, { "docid": "97eb9a2ccbad49e876d552d19ca9b833", "score": "0.48368943", "text": "def convertPattern(pattern, sign):\n\n # Check for include vs exclude patterns.\n if pattern[:2] == \"+ \":\n pattern = pattern[2:]\n sign = \"+\"\n elif pattern[:2] == \"- \":\n pattern = pattern[2:]\n sign = \"-\"\n\n # Express windows, mac patterns in unix patterns (rsync.py extension).\n separator = os.path.normpath(\"/\")\n if separator != \"/\":\n pattern = re.sub(re.escape(separator), \"/\", pattern)\n\n # If pattern contains '/' it should match from the start.\n temp = pattern\n if pattern[0] == \"/\":\n pattern = pattern[1:]\n if temp[-1] == \"/\":\n temp = temp[:-1]\n\n # Convert pattern rules: ** * ? to regexp rules.\n pattern = re.escape(pattern)\n pattern = string.replace(pattern, \"\\\\?\", \".\")\n pattern = string.replace(pattern, \"\\\\*\\\\*\", \".*\")\n pattern = string.replace(pattern, \"\\\\*\", \"[^/]*\")\n pattern = string.replace(pattern, \"\\\\*\", \".*\")\n\n if \"/\" in temp:\n # If pattern contains '/' it should match from the start.\n pattern = \"^\\\\/\" + pattern\n else:\n # Else the pattern should match the all file or folder name.\n pattern = \"\\\\/\" + pattern\n\n if pattern[-2:] != \"\\\\/\" and pattern[-2:] != \".*\":\n # File patterns should match also folders.\n pattern = pattern + \"\\\\/?\"\n\n # Pattern should match till the end.\n pattern = pattern + \"$\"\n return (sign, pattern)", "title": "" }, { "docid": "70beee47c206720b1c48a8cf9a4a2245", "score": "0.48351344", "text": "def reduce_lengthening(self, data, pattern):\n data.loc[:, self.tweets_col] = data.loc[:, self.tweets_col].apply(lambda tweet: pattern.sub(r\"\\1\\1\", tweet))\n return data", "title": "" }, { "docid": "33a995c13cde0ca51549c28ee9104686", "score": "0.48343244", "text": "def assertInString(self, conn_string, pattern):\r\n found = conn_string.find(pattern) != -1\r\n self.assertTrue(found,\r\n \"pattern \\\"%s\\\" was not found in connection string \\\"%s\\\"\" % (pattern, conn_string))", "title": "" }, { "docid": "00df529494eaee98878f970301328f02", "score": "0.48266596", "text": "def compile_pcre_pattern(pattern, additional_flags=0):\n try:\n if pattern[0] != '/':\n return re.compile(pattern, additional_flags)\n\n parts = pattern.split('/')\n modifiers = parts[-1]\n newpattern = pattern[1:-1 - len(modifiers)]\n flags_lut = {'i': re.I, 's': re.S, 'm': re.M, 'u': re.U}\n flags = re.U\n\n for i in modifiers:\n if i in flags_lut:\n flags |= flags_lut[i]\n\n flags |= additional_flags\n return re.compile(newpattern, flags)\n except Exception as e:\n raise type(e)(str(e) + \"\\nFailed pattern: %s, %s\" % (pattern, newpattern)) \\\n .with_traceback(sys.exc_info()[2])", "title": "" }, { "docid": "f1612c4925ca8887d11f9b90513d286c", "score": "0.48177585", "text": "def nth_repl(s, sub, repl, nth):\n find = s.find(sub)\n # if find is not p1 we have found at least one match for the substring\n i = find != -1\n # loop util we find the nth or we find no match\n while find != -1 and i != nth:\n # find + 1 means we start at the last match start index + 1\n find = s.find(sub, find + 1)\n i += 1\n # if i is equal to nth we found nth matches so replace\n if i == nth:\n return s[:find]+repl+s[find + len(sub):]\n return s", "title": "" }, { "docid": "6bbe3889ec0e5ff85302ce4d907f84a8", "score": "0.48167387", "text": "def copy_replace(src, dst, pattern=None, replace_value=None):\n file1 = open(src, 'r') if isinstance(src, str) else src\n file2 = open(dst, 'w') if isinstance(dst, str) else dst\n pattern = (\n [pattern] if isinstance(pattern, str)\n else pattern\n )\n replace_value = (\n [replace_value] if isinstance(replace_value, str)\n else replace_value\n )\n if replace_value and pattern:\n if len(replace_value) != len(pattern):\n raise Exception(\"Invalid parameters: pattern and replace_value\"\n \" have different sizes.\")\n rules = [\n (re.compile(regex, re.IGNORECASE), value)\n for regex, value in zip(pattern, replace_value)\n ]\n else:\n rules = []\n for line in file1:\n if rules:\n for rule in rules:\n line = re.sub(rule[0], rule[1], line)\n file2.write(line)\n if isinstance(src, str):\n file1.close()\n if isinstance(dst, str):\n file2.close()", "title": "" }, { "docid": "d13cf393775558f61af4a4060f68f59c", "score": "0.481368", "text": "def substitute(self,command,callsign):\n if command.find(' *')>0:\n # placeholder not at the beginning\n command.replace('*',callsign.lower())\n elif command.find('*')>0:\n # placeholder at the beginning\n command.replace('*',callsign)\n else:\n # no placeholder, prepend callsign\n command = callsign + '; ' + command\n return command", "title": "" }, { "docid": "4e3b1f3e4e7036edcadb41c14581bc2f", "score": "0.48039755", "text": "def transformPattern(self, pattern):\n raise NotImplementedError(\"Should have implemented this\")", "title": "" } ]
6b82f7758ded5229235eb1ea75beaed1
Show specific post comment
[ { "docid": "e37729c20a16f0bc02a8c8d3290c90c5", "score": "0.59195065", "text": "def comment(post_id, comment_id):\n check_comment(post_id, comment_id, False)\n\n db = get_db()\n comment_sql = get_comment_of_post(db, post_id, comment_id)\n\n comment = dict(comment_sql)\n return jsonify(comment)", "title": "" } ]
[ { "docid": "90b5fce23163cc1947185be27a583845", "score": "0.7372231", "text": "def post_detail(request, id):\n post = Post.objects.get(id=id)\n return render(request, 'comment/post_detail.html', {'post': post})", "title": "" }, { "docid": "c7bd1ff09b7932f489e77bb54fe2afb7", "score": "0.7263986", "text": "def view_post(request, post_id):\n post = get_object_or_404(Post, id=post_id)\n comment_form = CommentForm()\n return render(request, 'view_post.html', {\n 'post': post,\n 'comment_form' : comment_form\n })", "title": "" }, { "docid": "6d1c6f3636bd133d4ec45f8d970aa820", "score": "0.71852803", "text": "def view_post(post_id):\n post = dbmodel.Post.query.get_or_404(post_id)\n comments = dbmodel.Comment.query.filter_by(parent_post_id=post_id)\n return render_template('post.html', post=post, comments=comments)", "title": "" }, { "docid": "8bc9074f80fa2da6473f3f6d35397ef8", "score": "0.7105406", "text": "def get(self, post):\n if not self.user:\n return self.redirect('/login')\n\n post_comments = post.get_all_comments()\n if not post_comments:\n post_comments = []\n return self.render('post.html', post=post, post_comments=post_comments)", "title": "" }, { "docid": "38e1e370a77939dffb733075ce0e8821", "score": "0.6943954", "text": "def get(self, post_id):\n if self.user:\n self.render(\"blog_newcomment_page.html\")\n else:\n self.redirect(\"/login\")", "title": "" }, { "docid": "4a50712a72eba1764a71c5ca05e82be2", "score": "0.68175966", "text": "def show_post(post_id):\n\n post = Post.query.get(post_id)\n\n return render_template('post.html', post=post)", "title": "" }, { "docid": "2746b2faffaf877560839a959cd41301", "score": "0.6796878", "text": "def show_post(post_id):\n post = Post.query.get_or_404(post_id)\n return render_template(\"/post_details.html\", post=post)", "title": "" }, { "docid": "441de05e103f9c735b95d721a066f112", "score": "0.6696225", "text": "def get(self, post_key):\n form = CommentForm()\n is_liked = Like.is_liked_by_user(self.user, self.post)\n\n self.render_viewpost(form, is_liked)", "title": "" }, { "docid": "6981d4e2937bc56aa985e2bbca20128e", "score": "0.66875017", "text": "def show_post_details(post_id):\n\n post = Post.query.get_or_404(post_id)\n\n return render_template('posts/post.html', post= post)", "title": "" }, { "docid": "5bbe9542b66267bf480f0dbbe09cb86c", "score": "0.6674287", "text": "def post(self, post_id):\n\n logging.info(\"comment\")\n if not self.user:\n self.redirect('/blog')\n return\n\n content = self.request.get(\"content\")\n user = self.user.name\n key = db.Key.from_path('Post', int(post_id), parent=blog_key())\n # blogpost = db.get(key)\n logging.info(user)\n\n #escaping the html to avoid xss\n escaped_cont = cgi.escape(content)\n\n if content and user:\n comment = Comments(content=escaped_cont, user=user, post=int(post_id))\n comment.put()\n\n self.redirect('/blog/%s' % str(post_id))\n else:\n error = \"Comment not included\"\n self.render(\"blog_newcomment_page.html\", content=content, error=error)", "title": "" }, { "docid": "3e04684dcce46a0dee36a9db311807e5", "score": "0.6658497", "text": "def post_comment_on_fly(request):\n if request.method == 'GET':\n post_pk = request.GET['post_pk']\n comment_details = request.GET['comment_details']\n post = get_object_or_404(models.Post, pk=post_pk)\n comment = models.Comment.objects.create(post=post, detail=comment_details)\n date_time = comment.created_date.strftime(\"%B %d, %Y %H:%M %p\")\n body = \"\"\"\n <div class=\"comment\">\n <div class=\"date\">\n %s\n </div>\n <p>%s : by <strong>%s</strong></p>\n \"\"\" % (date_time, comment.detail, comment.post.author)\n return HttpResponse(body)", "title": "" }, { "docid": "ae991de82c3276d932448bd15b943e45", "score": "0.6642535", "text": "def post_detail(request, pk):\n\n post = get_object_or_404(Post, pk=pk)\n post.views += 1\n post.save()\n\n post_comments = PostComment.objects.filter(\n post_id=post.id).order_by('-date_commented')\n\n if request.method == 'POST':\n create_comment_form = PostComment(\n comment_detail=request.POST.get('comment_detail'),\n user=request.user,\n post=post\n )\n create_comment_form.save()\n messages.success(request, 'Comment added to Post')\n return redirect('post_detail', pk)\n\n context = {\n 'post': post,\n 'form': AddComment,\n 'comments': post_comments\n }\n return render(request, \"blog/postdetail.html\", context)", "title": "" }, { "docid": "5b22f4522434446a6820d638817bf23d", "score": "0.6613093", "text": "def comment(request, blog_id):\n blog = get_object_or_404(Blog, pk=blog_id)\n return render_to_response('comment.html', \n {'comments': blog.comment_set.all(), 'blog': blog})", "title": "" }, { "docid": "ebc602237fbd1157a569f561a09da587", "score": "0.6592421", "text": "def detail(request, post_id):\n post = get_object_or_404(Post, pk=post_id)\n comments = post.comments.all()\n new_comment = None\n\n if request.method == 'POST':\n comment_form = CommentForm(request.POST)\n if comment_form.is_valid() and request.user.is_authenticated:\n new_comment = comment_form.save(commit=False)\n new_comment.post = post\n new_comment.author = request.user\n new_comment.save()\n messages.success(request, 'Comment added successfully!')\n return redirect(reverse('article-detail', args=[post.id]))\n else:\n messages.error(request, 'Please check the form for errors. \\\n Comment failed to post.')\n else:\n comment_form = CommentForm()\n\n template = 'article_details.html'\n\n context = {\n 'post': post,\n 'comments': comments,\n 'comment_form': comment_form,\n 'new_comment': new_comment,\n }\n\n return render(request, template, context)", "title": "" }, { "docid": "42ddd5c1a8cc3be0762e3a3bc4f193d6", "score": "0.6575711", "text": "def post_detail(request, year, month, day, post):\n post = get_object_or_404(Post, slug=post,\n status='published',\n publish__year=year,\n publish__month=month,\n publish__day=day)\n\n # lista de comentarios para este post\n comments = post.comments.filter(active=True) # recupera todos comentários ativos\n\n new_comment = None\n\n if request.method == \"POST\":\n comment_form = CommentForm(data=request.POST)\n if comment_form.is_valid():\n new_comment = comment_form.save(commit=False)\n new_comment.post = post\n new_comment.save()\n\n else:\n comment_form = CommentForm()\n\n #lista de posts similares\n post_tags_id = post.tags.values_list('id', flat=True)\n\n similar_posts = Post.published.filter(tags__in=post_tags_id)\\\n .exclude(id=post.id)\n similar_posts = similar_posts.annotate(same_tags=Count('tags'))\\\n .order_by('-same_tags','-publish')[:4]\n\n\n return render(request, path_detalhe, {'post':post,\n 'comments':comments,\n 'new_comment':new_comment,\n 'comment_form':comment_form,\n 'similar_posts':similar_posts})", "title": "" }, { "docid": "86c1a78f0f800b7a252a3375b7267dfd", "score": "0.65154254", "text": "def show_post(index):\r\n requested_post = None\r\n for blog_post in post_objects:\r\n if blog_post.id == index:\r\n requested_post = blog_post\r\n return render_template(\"post.html\", post=requested_post)", "title": "" }, { "docid": "9803868473e014f9b0bd143b9b92a24b", "score": "0.65096605", "text": "def post_comment(request, post_pk):\n post = get_object_or_404(models.Post, pk=post_pk)\n form = forms.CommentForm()\n if request.method == 'POST':\n form = forms.CommentForm(request.POST)\n if form.is_valid():\n comment = form.save(commit=False)\n comment.post = post\n comment.save()\n\n return HttpResponseRedirect(post.get_absolute_url())", "title": "" }, { "docid": "eb428f9e3fa1c8e15115e616a7be65d2", "score": "0.6491254", "text": "def post_show(request, pid):\n user = request.user\n\n # populate the session data\n sess = middleware.Session(request)\n tab = \"posts\"\n pill = sess.get_tab()\n \n auth = user.is_authenticated()\n layout = settings.USER_PILL_BAR if auth else settings.ANON_PILL_BAR\n \n params = html.Params(tab=tab, pill=pill, layout=layout)\n \n query = get_post_manager(request)\n\n try:\n root = query.get(id=pid)\n if not root.top_level:\n return html.redirect( root.get_absolute_url() )\n\n # update the views for the question\n models.update_post_views(post=root, request=request, minutes=const.POST_VIEW_UPDATE)\n counts = sess.get_counts()\n \n except models.Post.DoesNotExist, exc:\n messages.warning(request, 'The post that you are looking for does not exists. Perhaps it was deleted!')\n return html.redirect(\"/\")\n \n # get all answers to the root\n children = models.Post.objects.filter(root=root).exclude(type=POST_COMMENT, id=root.id).select_related('author', 'author__profile').order_by('-accepted', '-score', 'creation_date')\n \n # comments need to be displayed by creation date\n comments = models.Post.objects.filter(root=root, type=POST_COMMENT).select_related('author', 'author__profile').order_by('creation_date')\n\n all = [ root ] + list(children) + list(comments)\n # add the various decorators\n \n models.decorate_posts(all, user)\n \n # may this user accept answers on this root\n accept_flag = (user == root.author)\n \n # these are all the answers\n answers = [ o for o in children if o.type == POST_ANSWER ]\n for a in answers:\n a.accept_flag = accept_flag\n \n # get all the comments\n tree = defaultdict(list)\n for comment in comments: \n tree[comment.parent_id].append(comment)\n \n # generate the tag cloud\n #tags = models.Tag.objects.all().order_by('-count')[:50]\n \n return html.template( request, name='post.show.html', root=root, answers=answers, tree=tree, params=params, counts=counts)", "title": "" }, { "docid": "2c8969424c554f552f3d0fff1277bd13", "score": "0.64574575", "text": "def post_comment(self):\n slug = str(self.article.slug)\n url = reverse(\"articles:comment\", args=[slug])\n return self.client.post(\n url, self.comment\n )", "title": "" }, { "docid": "3da2d9f2dc3faae054ae941f4b649911", "score": "0.64555997", "text": "def display_post(post_id):\n\n post = Post.query.get(post_id)\n\n return render_template('post_details.html', post=post)", "title": "" }, { "docid": "c260ed4702864ab81be87afb457f3b19", "score": "0.6418798", "text": "def show_wall_post(request, username, post_id):\n\n try:\n wall_post = UserActivity.objects.select_related().get(\n short_review__id = post_id,\n username__iexact = username,\n status = UserActivity.PUBLIC_STATUS)\n except UserActivity.DoesNotExist:\n raise Http404\n\n try:\n user = User.objects.get(username__iexact=username)\n except User.DoesNotExist:\n raise Http404\n\n data = {\n 'activity' : wall_post,\n 'user_profile': user\n }\n\n return render(\n request,\n templates['WALL_POST'],\n data,\n )", "title": "" }, { "docid": "43416164e422f0829d7c89d459a53d3a", "score": "0.6390816", "text": "def view_post(post_id):\n post = mongo.db.reviews.find_one({\"_id\": ObjectId(post_id)})\n if post:\n comments = list(mongo.db.comments.find(\n {\"location_id\": post['location_id']}))\n if 'user' in session:\n fav_user = list(mongo.db.favourites.find(\n {\"favourite_user\": session['user']}))\n else:\n fav_user = None\n return render_template(\n \"view_post.html\", post=post, comments=comments, fav_user=fav_user)\n flash(\"Post does not exist\")\n return redirect(url_for('get_locations'))", "title": "" }, { "docid": "7b7848bded87fe53154eefa999865b45", "score": "0.6345191", "text": "def show_post(self, request: foundation.Request) -> foundation.Response:\n\n id = request.params['id']\n\n orm_session = orm.sessionmaker(bind=self.get_engine())()\n\n post = orm_session.query(models.Post).filter_by(id=id).one()\n\n return self.render2response(request, 'blog/show.html', post=post,\n blog=self, url_for=self.url_for)", "title": "" }, { "docid": "1dd4dd9dbb263f246a41cb257d4d2210", "score": "0.6344819", "text": "def view_post(request, post_id):\n\n def can_view_post(user, pid):\n post_ = models.Post.objects.get(pk=pid)\n for member in user.member_set.all():\n if member.Organization_id == post_.Creator.Organization_id:\n return True\n return False\n\n post = models.Post.objects.get(pk=post_id)\n if not post.Visible:\n if can_view_post(request.user, post_id):\n return render(request, 'view_post.html',\n {'title': post.Title, 'Content': post.Content,\n 'user': request.user,\n 'loginable': request.user.is_authenticated()})\n else:\n return render(request, 'Error.html',\n {'error_summary': 'Not visible',\n 'error_details': 'You must have permission to view '\n 'this post'})\n else:\n return render(request, 'view_post.html',\n {'title': post.Title, 'Content': post.Content,\n 'user': request.user,\n 'loginable': request.user.is_authenticated()})", "title": "" }, { "docid": "ae89532ae37ecf3280171bbcd3b8dcf1", "score": "0.6330918", "text": "def post_comment(source_post, parent):\n try:\n askbot_comment = source_post.get_author().post_comment(\n parent_post = parent,\n body_text = source_post.get_body_text(),\n timestamp = source_post.created_at\n )\n return askbot_comment\n except Exception, e:\n msg = unicode(e)\n print \"Warning: post %d skipped: %s\" % (source_post.post_id, msg)", "title": "" }, { "docid": "7f3c284c4abc8719b29277d1027512a0", "score": "0.62737817", "text": "def post_comment(request, post_id):\n if request.method == 'POST':\n post = get_object_or_404(Post, id=post_id)\n comment_form = CommentForm(request.POST)\n if comment_form.is_valid():\n comment = Comment(\n body = comment_form.cleaned_data['body'],\n date = datetime.now(),\n post = post,\n user = request.user\n )\n comment.save()\n messages.info(request, 'Comment Posted!')\n else:\n messages.error(request, 'Invalid comment')\n return HttpResponseRedirect(reverse('view_post', args=[post_id]))", "title": "" }, { "docid": "29f2bee9380e862dd2ae637eb7322945", "score": "0.6212544", "text": "def detail(id):\n post = get_post(id, check_author=False)\n comments = get_comments_for_post(post_id=id)\n users_likes = get_users_liking_post(post_id=id)\n\n # Post existence is already checked in get_post\n return render_template(\n \"blog/detail.html\",\n post=post,\n comments=comments,\n users_likes=users_likes\n )", "title": "" }, { "docid": "8085d8f8ff6981e0109333af03ef8f2c", "score": "0.6203446", "text": "def blogDetailView(request, pk):\n user = request.user\n post_detail = get_object_or_404(BlogPost, pk=pk)\n blogPostType = ContentType.objects.get(app_label='blog', model='blogpost')\n res = LikesAndDislikes.objects.filter(content_type=blogPostType, object_id=pk)\n num_of_likes = res.filter(like_type=False).count()\n num_of_dislikes = res.filter(like_type=True).count()\n\n comments = Comment.objects.filter(content_type=blogPostType, object_id=pk)\n\n initial_data = {\n 'user': user,\n 'content': \"leave your comment here ~\",\n 'content_type': blogPostType,\n 'object_id': pk,\n }\n form = CommentForm(initial=initial_data)\n\n context = {\n 'post_detail' : post_detail,\n 'num_of_likes' : num_of_likes,\n 'num_of_dislikes': num_of_dislikes,\n 'liked': False,\n 'disliked': False,\n 'form': form,\n 'comments': comments,\n }\n\n is_done = LikesAndDislikes.objects.filter(\n user=user,\n content_type=blogPostType,\n object_id=pk\n )\n\n if is_done:\n if is_done.get().like_type == False:\n context['liked'] = True\n else:\n context['disliked'] = True\n\n return render(request, 'blog/post.html', context)", "title": "" }, { "docid": "ba490370835895b27530e850d9cc9ab6", "score": "0.618974", "text": "def get(self, post_id):\n blogpost_key = db.Key.from_path('BlogPost', int(post_id))\n blogpost_post = db.get(blogpost_key)\n content = blogpost_post.content\n title = blogpost_post.title\n self.render('edit.html', title=title, content=content)", "title": "" }, { "docid": "2c92b6ae7b0d67da9e0626231a22a6cc", "score": "0.6186363", "text": "def view_post(request, post_id):\n post = get_object_or_404(BlogPost, pk=post_id)\n\n template = 'blog/blog_post.html'\n context = {\n 'post': post\n }\n return render(request, template, context)", "title": "" }, { "docid": "8c58e5ee5f08f1af57fda658f347fe71", "score": "0.6102145", "text": "def post_detail(request, id):\n\n post = get_object_or_404(Post, pk=id)\n return render(request, \"postdetail.html\", {'post': post})", "title": "" }, { "docid": "7c76e9439806fbd7aff6ace5cb7c0ab9", "score": "0.6096827", "text": "def showPost(self):\n print(\"{} ──────────────────────────\".format(self.owner.name))\n print(\"{:%d, %b %Y}\".format(self.timestamp))\n print(\"{}\".format(self.content), end=\"\\n\\n\")", "title": "" }, { "docid": "a3abb50f740a2a601c2d51d2ed9ea94e", "score": "0.6087277", "text": "def blogpost(request, parametr):\n assert isinstance(request, HttpRequest)\n post_1 = Blog.objects.get(id=parametr)\n comments = Comment.objects.filter(post=parametr)\n \n if request.method == \"POST\":\n form = CommentForm(request.POST)\n if form.is_valid():\n comment_f = form.save(commit=False)\n comment_f.author = request.user\n comment_f.date = datetime.now()\n comment_f.post = Blog.objects.get(id=parametr)\n comment_f.save()\n return redirect('blogpost', parametr=post_1.id) \n else:\n form = CommentForm() \n \n return render(\n request,\n 'app/blogpost.html',\n \n {\n 'post_1': post_1,\n 'comments': comments,\n 'form': form,\n 'year':datetime.now().year,\n \n }\n \n )", "title": "" }, { "docid": "b9197dec82e3d1cde8819f6695d820d9", "score": "0.6073559", "text": "def _show_comments(self):\n headers = {\"Authorization\": \"Token %s\" % cfg.koodous.token}\n\n try:\n content_file = __sessions__.current.file.data\n sha256 = hashlib.sha256(content_file).hexdigest()\n except Exception:\n self.log('error', 'You have no file loaded')\n return\n try:\n url = '%s/%s/comments' % (cfg.koodous.base_url, sha256)\n response = requests.get(url=url, headers=headers, proxies=cfg.koodous.proxies,\n verify=cfg.koodous.verify, cert=cfg.koodous.cert)\n\n if response.json().get('count') == 0:\n self.log('info', 'This sample has no comments.')\n return\n for result in response.json().get('results'):\n self.log('info', \"[{}]: {}\".format(result['author']['username'], result['text']))\n except Exception as err:\n self.log('error', 'Network problem, please try again.')\n log.error(\"Network problem, please try again: \\n{}\".format(err))", "title": "" }, { "docid": "2d785282de48b6a4e8ec49022b1ca09a", "score": "0.60732067", "text": "def edit_comment(request, pk):\n comment = get_object_or_404(Comment, pk=pk)\n feed = comment.feed\n \n # Protecting the edit feed page.\n if comment.user != request.user:\n raise Http404\n\n if request.method != 'POST':\n # Initial request; pre-fill form with the current entry\n form = CommentForm(instance=comment)\n else:\n # POST data submitted; process data\n form = CommentForm(instance=comment, data=request.POST)\n if form.is_valid():\n form.save()\n messages.add_message(request, messages.SUCCESS, 'Comment updated')\n return redirect('feed:feed_detail', pk=feed.pk)\n\n context = {'feed': feed, 'comment': comment, 'form': form}\n return render(request, 'feed/edit_comment.html', context)", "title": "" }, { "docid": "00bbd84ada5ba6fa7732c1e6788dedd5", "score": "0.6059708", "text": "def post(self, post_id, comment_id):\n comment = Comment.by_id(comment_id)\n if self.user and self.user.name == comment.username:\n save_clicked = self.request.get(\"save\")\n cancel_clicked = self.request.get(\"cancel\")\n delete_clicked = self.request.get(\"delete\")\n\n edited_comment = self.request.get(\"comment\")\n\n if save_clicked:\n self.save_edit(edited_comment, comment, post_id)\n elif cancel_clicked:\n self.cancel_edit(post_id)\n elif delete_clicked:\n self.delete_edit(comment, post_id)\n else:\n self.redirect(\"/blog\")\n else:\n self.redirect(\"/blog\")", "title": "" }, { "docid": "449041abfd9c0c35e65163dd4b2448b4", "score": "0.60335106", "text": "def post(self):\n\n # keep reference to the post page for redirection later\n referrer = self.request.referer\n post_id = referrer.split(\"/\")[-1]\n\n post = Post.by_id(post_id)\n\n comment_text = self.request.get(\"comment\")\n self.write(comment_text)\n if comment_text:\n new_comment = Comment(post_id=post_id, username=self.user.name,\n comment=comment_text)\n new_comment.put()\n self.redirect(referrer)\n else:\n # TODO should render page with error\n self.redirect(referrer)", "title": "" }, { "docid": "5da1fa25ec79aacc2972e2f5780268f4", "score": "0.60311145", "text": "def manage_comment():\n page = request.args.get('page', 1, type=int)\n logger.info('Visiting comment manage page {}.'.format(page))\n pagination = Comment.query.order_by(Comment.timestamp.desc()).paginate(page,\n per_page=current_app.config['FLASKY_POSTS_PER_PAGE'],\n error_out=False)\n comments = pagination.items\n return render_template('manage_comment.html', comments=comments, pagination=pagination)", "title": "" }, { "docid": "77153d75906691e4cb26e341e493587e", "score": "0.6026122", "text": "def post(self):\n if self.authenticated():\n post_id = self.request.get('post_id')\n post = Post.get_by_id(int(post_id))\n comment_txt = self.request.get('comments')\n comment = Comment(comment = comment_txt, commenter = self.user, post = post)\n comment.put()\n self.redirect('/blog/%s' % (post_id))\n else:\n self.login_redirect()", "title": "" }, { "docid": "435adb6c92bdce29612c05447271c8be", "score": "0.6018611", "text": "def show_edit_post_page(post_id):\n\n post = Post.query.get(post_id)\n\n return render_template('edit_post.html', post=post)", "title": "" }, { "docid": "8a254a746c56282352dcc4740a1155ba", "score": "0.601529", "text": "def commentsindex(topic_id):\n post = get_post(topic_id)\n db = get_db()\n postcoms = query_db(\n \"SELECT * FROM postcomments p JOIN user u ON p.author_id=u.id WHERE topic_id = ?\",[topic_id]\n )\n return render_template(\"blog/commentsindex.html\", postcoms=postcoms, post=post)", "title": "" }, { "docid": "6f7420a7dbd5f2dc7191672e8039fd45", "score": "0.60079324", "text": "def get_comment_by_id(self):\n parent_comment = self.create_comment()\n id = parent_comment.data['id']\n return self.client.get(reverse(\n \"articles:commentdetail\", args=[\"this-is-mine\", id]))", "title": "" }, { "docid": "e9afdbc32d51a0bcf0c94198463f1105", "score": "0.6005923", "text": "def user_comment_post(sender, instance, *args, **kwargs):\n comment = instance\n post = comment.post\n text_preview = comment.body[:90] #take only first 90 chars as a preview.\n sender = comment.user\n\n notify = Notification(post=post, sender=sender, user=post.user, text_preview=text_preview, notification_type=2)\n notify.save()", "title": "" }, { "docid": "c6af3cdc773174518d36eb7596da5090", "score": "0.6001438", "text": "def show(request, slug):\n\ttry:\n\t\tpage = Content.objects.get(slug=slug)\n\texcept Content.DoesNotExist:\n\t\treturn render_to_response('pages/bug.html',\n\t\t\t{'bug': _('Page does not exist')},\n\t\t\tcontext_instance=RequestContext(request))\n\t\n\tadd_topic = False\n\tif page.coment_forum:\n\t\trequest.forum_id = page.coment_forum.id\n\t\tcoment_forum_id = page.coment_forum.id\n\t\tperms = forumContext(request)\n\t\tif perms['perms']['add_topic']:\n\t\t\tadd_topic = True\n\telif page.place and page.place.coment_forum:\n\t\trequest.forum_id = page.place.coment_forum.id\n\t\tcoment_forum_id = page.place.coment_forum.id\n\t\tperms = forumContext(request)\n\t\tif perms['perms']['add_topic']:\n\t\t\tadd_topic = True\n\t\n\tif add_topic and not page.coment_topic:\n\t\tform = diamandaModelwrappers.AddTopicForm()\n\telif add_topic and page.coment_topic:\n\t\tform = diamandaModelwrappers.AddPostForm()\n\telse:\n\t\tform = False\n\t\n\tcomments = False\n\t# check if user wants to add a comment - show the form if possible\n\tif 'c' in request.GET:\n\t\tshow_comment = True\n\t\tif form and page.coment_topic:\n\t\t\tcomments = page.coment_topic.post_set.all().order_by('-id')[:10]\n\telse:\n\t\tshow_comment = False\n\t\n\tif request.POST and add_topic and not page.coment_topic:\n\t\tCOMMENT_POST = _('This is a discussion about article') + ': [url=/w/p/%s/]%s[/url].' % (page.slug, page.title)\n\t\tREDIRECT = (\"/w/p/\" + slug +\"/?c=ok\", _('Comment added succesfuly.'))\n\t\tTITLE = _('Comments for: %s') % page.title\n\t\treturn diamandaModelwrappers.add_topic(request, coment_forum_id, inject_post=COMMENT_POST, inject_title=TITLE, redirect_link=REDIRECT, content_obj=page)\n\telif request.POST and add_topic and page.coment_topic:\n\t\tREDIRECT = (\"/w/p/\" + slug +\"/?c=ok\", _('Comment added succesfuly.'))\n\t\treturn diamandaModelwrappers.add_post(request, page.coment_topic.id, redirect_link=REDIRECT, content_obj=page)\n\t\n\tif page.current_book:\n\t\tcb = page.current_book\n\t\n\tif page.content_type == 'news':\n\t\treturn render_to_response(\n\t\t\t'pages/show_news.html',\n\t\t\t{'page': page, 'add_topic': add_topic, 'form': form, 'show_comment': show_comment, 'comments': comments},\n\t\t\tcontext_instance=RequestContext(request, {'current_book': cb}))\n\treturn render_to_response(\n\t\t'pages/show.html',\n\t\t{'page': page, 'add_topic': add_topic, 'form': form, 'show_comment': show_comment, 'comments': comments},\n\t\tcontext_instance=RequestContext(request, {'current_book': cb}))", "title": "" }, { "docid": "fea07b70f2ea2874392c5226c75175b9", "score": "0.5996286", "text": "def comments(game_id, post_id):\n\n db = get_db()\n\n if (request.method == 'POST') and (g.user != None):\n #If there is a post request and the user is logged in, insert their comment into the database\n\n comment = request.form['comment']\n\n db.execute(\"INSERT INTO comments(user_id, post_id, comment_id, content, hidden) VALUES (?, ?, NULL, ?, 0)\", (g.user['id'], post_id, comment))\n db.commit()\n\n\n #Retrieve all of the necessary data for the page\n post = db.execute(\"SELECT username, users.id, title, body, game_id FROM posts JOIN users ON user_id=users.id WHERE posts.id = ?\", str(post_id)).fetchone()\n\n game = db.execute(\"SELECT id, title FROM games WHERE id = ?\", str(post['game_id'])).fetchone()\n\n comments = db.execute(\"SELECT content, username, users.id FROM comments JOIN users ON user_id=users.id WHERE post_id = ?\", str(post_id)).fetchall()\n\n return render_template(\"layouts/comments.html\", post=post, comments=comments, game=game)", "title": "" }, { "docid": "461d41775f2f75bd350a598908a50f08", "score": "0.5989866", "text": "def comment():\n if request.method == 'POST':\n if 'username' not in session:\n return render_template('login.html')\n\n message = request.form['message']\n post_id = request.form['post_id']\n username = session['username']\n user_id = session['user_id']\n\n print(post_id)\n print(user_id)\n\n # Use backend post api - Post\n body = {'user_id': user_id,\n 'username': username, 'message': message}\n\n # url = '{}/posts'.format(app.config[\"POST_BASE_URI\"])\n url = '{}/posts/{}/comments'.format(app.config[\"POST_BASE_URI\"], post_id)\n print(url)\n response = requests.post(url=url,\n json=body,\n headers={'content-type': 'application/json'},\n timeout=3)\n\n app.logger.info('Get %s with response status code %s',\n url, response.status_code)\n app.logger.info('Get %s with response text %s', url, response.text)\n\n return redirect('/viewpost/' + post_id)\n return render_template('login.html')", "title": "" }, { "docid": "133c58bdaad85c0efe4c270497a91ae3", "score": "0.59606856", "text": "def post_comment(request, next=None):\r\n # Fill out some initial data fields from an authenticated user, if present\r\n data = request.POST.copy()\r\n if request.user.is_authenticated():\r\n if not data.get('name', ''):\r\n data[\"name\"] = request.user.get_full_name() or request.user.username\r\n if not data.get('email', ''):\r\n data[\"email\"] = request.user.email\r\n\r\n # Check to see if the POST data overrides the view's next argument.\r\n next = data.get(\"next\", next)\r\n\r\n # Look up the object we're trying to comment about\r\n ctype = data.get(\"content_type\")\r\n object_pk = data.get(\"object_pk\")\r\n if ctype is None or object_pk is None:\r\n return CommentPostBadRequest(\"Missing content_type or object_pk field.\")\r\n try:\r\n model = models.get_model(*ctype.split(\".\", 1))\r\n target = model._default_manager.get(pk=object_pk)\r\n except TypeError:\r\n return CommentPostBadRequest(\r\n \"Invalid content_type value: %r\" % escape(ctype))\r\n except AttributeError:\r\n return CommentPostBadRequest(\r\n \"The given content-type %r does not resolve to a valid model.\" % \\\r\n escape(ctype))\r\n except ObjectDoesNotExist:\r\n return CommentPostBadRequest(\r\n \"No object matching content-type %r and object PK %r exists.\" % \\\r\n (escape(ctype), escape(object_pk)))\r\n\r\n # Do we want to preview the comment?\r\n preview = \"preview\" in data\r\n\r\n # Construct the comment form\r\n form = comments.get_form()(target, data=data)\r\n\r\n # Check security information\r\n if form.security_errors():\r\n return CommentPostBadRequest(\r\n \"The comment form failed security verification: %s\" % \\\r\n escape(str(form.security_errors())))\r\n\r\n # If there are errors or if we requested a preview show the comment\r\n if form.errors or preview:\r\n template_list = [\r\n \"comments/%s_%s_preview.html\" % tuple(str(model._meta).split(\".\")),\r\n \"comments/%s_preview.html\" % model._meta.app_label,\r\n \"comments/preview.html\",\r\n ]\r\n return render_to_response(\r\n template_list, {\r\n \"comment\" : form.data.get(\"comment\", \"\"),\r\n \"form\" : form,\r\n \"next\": next,\r\n },\r\n RequestContext(request, {})\r\n )\r\n\r\n # Otherwise create the comment\r\n comment = form.get_comment_object()\r\n comment.ip_address = request.META.get(\"REMOTE_ADDR\", None)\r\n if request.user.is_authenticated():\r\n comment.user = request.user\r\n\r\n # Signal that the comment is about to be saved\r\n responses = signals.comment_will_be_posted.send(\r\n sender = comment.__class__,\r\n comment = comment,\r\n request = request\r\n )\r\n\r\n for (receiver, response) in responses:\r\n if response == False:\r\n return CommentPostBadRequest(\r\n \"comment_will_be_posted receiver %r killed the comment\" % receiver.__name__)\r\n\r\n # Save the comment and signal that it was saved\r\n comment.save()\r\n signals.comment_was_posted.send(\r\n sender = comment.__class__,\r\n comment = comment,\r\n request = request\r\n )\r\n\r\n return next_redirect(data, next, comment_done, c=comment._get_pk_val())", "title": "" }, { "docid": "91818ccd34b7b916f67ef9df2bcc1735", "score": "0.59462464", "text": "def view_comments(request, product_id):\n reviews = Review.objects.filter(product=product_id)\n product = get_object_or_404(Product, pk=product_id)\n\n context = {\n \"reviews\": reviews,\n \"product\": product,\n }\n return render(request, \"products/reviews.html\", context)", "title": "" }, { "docid": "6b06e3c34b4f68e6649bfaf23b58ac6b", "score": "0.5928355", "text": "def post(self, post_key):\n form = CommentForm(self.request.POST)\n is_liked = Like.is_liked_by_user(self.user, self.post)\n\n if form.validate():\n comment_key = Comment.new_comment(form.content.data, self.user,\n self.post)\n\n if comment_key:\n self.redirect_to(\"viewpost\", post_key=post_key,\n _fragment=str(comment_key))\n else:\n self.abort(500)\n\n self.render_viewpost(form, is_liked)", "title": "" }, { "docid": "1573a05a8c90223786c44481e85adf69", "score": "0.59153986", "text": "def pickRepostComment(repostComments): \n return None", "title": "" }, { "docid": "55ebbd6cb68713797bf7ff40c6db7f78", "score": "0.591078", "text": "def post_comment(request, next=None):\n # Fill out some initial data fields from an authenticated user, if present\n data = request.POST.copy()\n print data\n if request.user.is_authenticated():\n if not data.get('name', ''):\n data[\"name\"] = request.user.get_full_name() or request.user.username\n if not data.get('email', ''):\n data[\"email\"] = request.user.email\n\n # Look up the object we're trying to comment about\n ctype = data.get(\"content_type\")\n object_pk = data.get(\"object_pk\")\n print ctype\n if ctype is None or object_pk is None:\n return CommentPostBadRequest(\"Missing content_type or object_pk field.\")\n try:\n model = models.get_model(*ctype.split(\".\", 1))\n target = model._default_manager.get(pk=object_pk)\n except TypeError:\n return CommentPostBadRequest(\n \"Invalid content_type value: %r\" % escape(ctype))\n except AttributeError:\n return CommentPostBadRequest(\n \"The given content-type %r does not resolve to a valid model.\" % \\\n escape(ctype))\n except ObjectDoesNotExist:\n return CommentPostBadRequest(\n \"No object matching content-type %r and object PK %r exists.\" % \\\n (escape(ctype), escape(object_pk)))\n\n # Do we want to preview the comment?\n preview = \"preview\" in data\n\n # Construct the comment form\n form = comments.get_form()(target, data=data)\n\n # Check security information\n if form.security_errors():\n return CommentPostBadRequest(\n \"The comment form failed security verification: %s\" % \\\n escape(str(form.security_errors())))\n\n # If there are errors or if we requested a preview show the comment\n if preview:\n template_list = [\n \"comments/%s_%s_preview.html\" % tuple(str(model._meta).split(\".\")),\n \"comments/%s_preview.html\" % model._meta.app_label,\n \"comments/preview.html\",\n ]\n return render_to_response(\n template_list, {\n \"comment\" : form.data.get(\"comment\", \"\"),\n \"form\" : form,\n },\n RequestContext(request, {})\n )\n \n if form.errors:\n (view_func, view_args, view_kwargs) = resolve(target.get_absolute_url())\n view_kwargs.update({\n 'extra_context': {'comment_form': form}\n })\n return view_func(request, *view_args, **view_kwargs)\n\n # Otherwise create the comment\n comment = form.get_comment_object()\n comment.ip_address = request.META.get(\"REMOTE_ADDR\", None)\n if request.user.is_authenticated():\n comment.user = request.user\n\n # Signal that the comment is about to be saved\n responses = signals.comment_will_be_posted.send(\n sender = comment.__class__,\n comment = comment,\n request = request\n )\n\n for (receiver, response) in responses:\n if response == False:\n return CommentPostBadRequest(\n \"comment_will_be_posted receiver %r killed the comment\" % receiver.__name__)\n\n # Save the comment and signal that it was saved\n comment.save()\n signals.comment_was_posted.send(\n sender = comment.__class__,\n comment = comment,\n request = request\n )\n\n return next_redirect(data, next, comment_done, c=comment._get_pk_val())", "title": "" }, { "docid": "c85bb1e58b7a038fd261394cc45b50da", "score": "0.5903895", "text": "def edit_comment(self, comment_object, pid):\n if self.user:\n comment_content = self.request.get('post_comment')\n if comment_content:\n comment_object.comment = comment_content\n CommentEntity.commit_comment(comment_object)\n return self.redirect('/{0}/{1}'.format('post',\n pid))\n\n self.redirect('/login')", "title": "" }, { "docid": "b710bbc470070b1bc2d25ad3b1bcc0d3", "score": "0.58985364", "text": "def post(self, post_id):\n data = request.json\n comment = create_comment(data=data, post_id=post_id)\n return comment", "title": "" }, { "docid": "4df99ef651670e5cb343ca4e9f7bc594", "score": "0.5895393", "text": "def get_blog_post(request, pk):\n return render(request, 'oikos/blog.html', {'post' : Post.objects.get(pk=pk)})", "title": "" }, { "docid": "6e400562d59bb225386fd3713b126bb9", "score": "0.58818877", "text": "def comment_detail(request, company_slug, idea_id, comment_id): \n comment = get_object_or_404(Comment, idea__id=idea_id, pk=comment_id)\n\n if request.method == 'GET':\n serializer = CommentSerializer(comment)\n return Response(serializer.data)\n\n elif request.method == 'DELETE':\n if not has_admin_permission(request.user, comment.idea.company) and comment.created_by != request.user:\n return HttpResponseForbidden() \n\n try:\n member = Member.objects.get(company=comment.idea.company, user=request.user)\n if member.blocked:\n return HttpResponseForbidden() \n except:\n pass\n\n comment.delete()\n return Response(status=status.HTTP_204_NO_CONTENT)", "title": "" }, { "docid": "0168b1f8b91b78e798e21abeca66e931", "score": "0.58523023", "text": "def comment_was_posted(request):\r\n obj = None\r\n if request.GET.has_key('c'):\r\n content_type_id, object_id = request.GET['c'].split(':')\r\n try:\r\n content_type = ContentType.objects.get(pk=content_type_id)\r\n obj = content_type.get_object_for_this_type(pk=object_id)\r\n except ObjectDoesNotExist:\r\n pass\r\n return render_to_response('comments/posted.html', {'object': obj}, context_instance=RequestContext(request))", "title": "" }, { "docid": "8f00f0a3ee6137af6c5ae0af11863826", "score": "0.583153", "text": "def comment(self, content):", "title": "" }, { "docid": "e4e9567f1b024e56a73e06b4e09111ea", "score": "0.5820659", "text": "def post(self, request, post_id):\n post = Post.object.filter(pk=post_id).first()\n title = self.request.POST.get(\"title\", \"\")\n comment = self.request.POST.get(\"comment\", \"\")\n user = self.request.user\n if not title or not comment:\n return JsonResponse({\"error\": \"Datos incompletos\"})\n obj = Comment.objects.create(\n title=title,\n comment=comment,\n user=user,\n post=post\n )\n obj.save()\n return redirect(\"view_post\", (self.post.pk,))", "title": "" }, { "docid": "04b7a8c332af03fd5359469648dce53c", "score": "0.5808202", "text": "def email(self, comment, content_object, request):\r\n if comment.is_public:\r\n context = {\r\n 'comment': comment,\r\n 'entry': content_object,\r\n }\r\n email_body = render_to_string('blog/new_comment_email.txt', context)\r\n mail_managers(u'Comment on \"%s\"' % content_object, email_body)", "title": "" }, { "docid": "9a016602aa842096c58b9ab3e09e7d05", "score": "0.57942635", "text": "def item_show(item_id):\n item = items.find_one({'_id': ObjectId(item_id)})\n # Add the below line:\n item_comments = comments.find({'item_id': ObjectId(item_id)})\n # Edit the return statement to be the following:\n return render_template('show.html', item=item, comments=item_comments)", "title": "" }, { "docid": "78fef74fa042889e57ba462adab90e7e", "score": "0.5741248", "text": "def viewpost(post_id):\n # if request.method == 'POST':\n # if 'username' not in session:\n # return render_template('login.html')\n\n # message = request.form['message']\n # username = session['username']\n # user_id = session['user_id']\n\n # # Use backend post api - Post\n # body = {'user_id': user_id, 'username': username, 'message': message}\n # url = '{}/posts'.format(app.config[\"POST_BASE_URI\"])\n # response = requests.post(url=url,\n # json=body,\n # headers={'content-type': 'application/json'},\n # timeout=3)\n\n # app.logger.info('Get %s with response status code %s',\n # url, response.status_code)\n # app.logger.info('Get %s with response text %s', url, response.text)\n\n # return redirect(url_for('home'))\n\n url = '{}/posts/{}'.format(app.config[\"POST_BASE_URI\"], post_id)\n response = requests.get(url, timeout=3)\n\n app.logger.info('Get %s with response status code %s',\n url, response.status_code)\n # app.logger.info('Get %s with response text %s', url, response.text)\n\n post = json.loads(response.text)['data']['post']\n\n # comments = [{'message': 'Hello'}, {\n # 'message': 'Hello'}, {'message': 'Hello'}]\n comments = json.loads(response.text)['data']['comments']\n\n return render_template('postdetail.html', post=post, comments=comments)", "title": "" }, { "docid": "842f86990df5806e6ca4e3b00c71c9ff", "score": "0.57406366", "text": "def post_detail(request, pk=None, slug=None):\n if slug:\n post = get_object_or_404(Post, slug=slug)\n elif pk:\n post = get_object_or_404(Post, pk=pk)\n\n return render(request, \"blog/blog_detail.html\", {\n \"page\": \"blog\",\n \"post\": post\n })", "title": "" }, { "docid": "38d910e5a710ded2629a50f3d5a1e7c0", "score": "0.5735533", "text": "def post_comment():\n content = request.json['content']\n parent = request.json['parent']\n post_id = request.json['post_id']\n user_id = get_jwt_identity()\n comment = Comment(user_id, content, post_id, parent_id=parent)\n db.session.add(comment)\n db.session.commit()\n return respond(plain_response(comment.id))", "title": "" }, { "docid": "45778fc0137795d32597f6ccbd8cbfbf", "score": "0.57330495", "text": "def feature_comment(request, pk):\n feature = get_object_or_404(Feature, pk=pk)\n if request.method == 'POST':\n comment_form = FeatureCommentForm(request.POST)\n if comment_form.is_valid():\n comment = comment_form.save(commit=False)\n comment.commenter = request.user\n comment.feature = feature\n comment.save()\n return redirect(feature_details, feature.pk)\n else:\n comment_form = FeatureCommentForm()\n return render(request, 'feature_comment.html', {'feature': feature, 'comment_form': comment_form})", "title": "" }, { "docid": "99b506f7fd1b6539d847e8123bef16a3", "score": "0.57282954", "text": "def edit_comment(request, comment_id):\n user = community_helpers.get_logged_user(request)\n \n if user:\n comment = models.Comment.objects.get(id = comment_id)\n EditCommentForm = forms.build_edit_comment_form(comment)\n if request.POST:\n form = EditCommentForm(request.POST, request.FILES)\n if form.is_valid():\n form.handle_edit(user, comment)\n return http.HttpResponseRedirect(reverse('content-redirect-by-id', args=[comment.content_type.id, comment.object_id]))\n else:\n form = EditCommentForm()\n return shortcuts.render_to_response(\n 'comments/edit_comment.html', \n {'form':form, 'comment':comment, 'node_content_type':comment.content_type.id, 'node_object_id':comment.object_id}, \n context_instance = RequestContext(request)\n )", "title": "" }, { "docid": "3960db6180e18800a9a24dc751dfddda", "score": "0.5716266", "text": "def post_comment(request, next=None, using=None):\r\n # Fill out some initial data fields from an authenticated user, if present\r\n data = request.POST.copy()\r\n if request.user.is_authenticated():\r\n if not data.get('name', ''):\r\n data[\"name\"] = request.user.get_full_name() or request.user.get_username()\r\n if not data.get('email', ''):\r\n data[\"email\"] = request.user.email\r\n\r\n # Look up the object we're trying to comment about\r\n ctype = data.get(\"content_type\")\r\n object_pk = data.get(\"object_pk\")\r\n if ctype is None or object_pk is None:\r\n return CommentPostBadRequest(\"Missing content_type or object_pk field.\")\r\n try:\r\n model = models.get_model(*ctype.split(\".\", 1))\r\n target = model._default_manager.using(using).get(pk=object_pk)\r\n except TypeError:\r\n return CommentPostBadRequest(\r\n \"Invalid content_type value: %r\" % escape(ctype))\r\n except AttributeError:\r\n return CommentPostBadRequest(\r\n \"The given content-type %r does not resolve to a valid model.\" % \\\r\n escape(ctype))\r\n except ObjectDoesNotExist:\r\n return CommentPostBadRequest(\r\n \"No object matching content-type %r and object PK %r exists.\" % \\\r\n (escape(ctype), escape(object_pk)))\r\n except (ValueError, ValidationError) as e:\r\n return CommentPostBadRequest(\r\n \"Attempting go get content-type %r and object PK %r exists raised %s\" % \\\r\n (escape(ctype), escape(object_pk), e.__class__.__name__))\r\n\r\n # Do we want to preview the comment?\r\n preview = \"preview\" in data\r\n\r\n # Construct the comment form\r\n form = django_comments.get_form()(target, data=data)\r\n\r\n # Check security information\r\n if form.security_errors():\r\n return CommentPostBadRequest(\r\n \"The comment form failed security verification: %s\" % \\\r\n escape(str(form.security_errors())))\r\n\r\n # If there are errors or if we requested a preview show the comment\r\n if form.errors or preview:\r\n template_list = [\r\n # These first two exist for purely historical reasons.\r\n # Django v1.0 and v1.1 allowed the underscore format for\r\n # preview templates, so we have to preserve that format.\r\n \"comments/%s_%s_preview.html\" % (model._meta.app_label, model._meta.module_name),\r\n \"comments/%s_preview.html\" % model._meta.app_label,\r\n # Now the usual directory based template hierarchy.\r\n \"comments/%s/%s/preview.html\" % (model._meta.app_label, model._meta.module_name),\r\n \"comments/%s/preview.html\" % model._meta.app_label,\r\n \"comments/preview.html\",\r\n ]\r\n return render_to_response(\r\n template_list, {\r\n \"comment\": form.data.get(\"comment\", \"\"),\r\n \"form\": form,\r\n \"next\": data.get(\"next\", next),\r\n },\r\n RequestContext(request, {})\r\n )\r\n\r\n # Otherwise create the comment\r\n comment = form.get_comment_object()\r\n comment.ip_address = request.META.get(\"REMOTE_ADDR\", None)\r\n if request.user.is_authenticated():\r\n comment.user = request.user\r\n\r\n # Signal that the comment is about to be saved\r\n responses = signals.comment_will_be_posted.send(\r\n sender=comment.__class__,\r\n comment=comment,\r\n request=request\r\n )\r\n\r\n for (receiver, response) in responses:\r\n if response == False:\r\n return CommentPostBadRequest(\r\n \"comment_will_be_posted receiver %r killed the comment\" % receiver.__name__)\r\n\r\n # Save the comment and signal that it was saved\r\n comment.save()\r\n signals.comment_was_posted.send(\r\n sender=comment.__class__,\r\n comment=comment,\r\n request=request\r\n )\r\n\r\n return next_redirect(request, fallback=next or 'comments-comment-done',\r\n c=comment._get_pk_val())", "title": "" }, { "docid": "1bee138e51bed7498a8c981fb91a695f", "score": "0.5714569", "text": "def quick_url(comment):\n\n def to_id(fullname):\n return fullname.split(\"_\", 1)[1]\n\n return \"http://www.reddit.com/r/{}/comments/{}/_/{}?context=3\".format(\n comment.subreddit.display_name, to_id(comment.link_id), comment.id\n )", "title": "" }, { "docid": "fae50ed823edcc7a7d1d6ae900d5b806", "score": "0.57140905", "text": "def get(self, post_id):\r\n if not self.user:\r\n self.redirect('/blog/login?error=You need to be logged')\r\n return\r\n else:\r\n key = db.Key.from_path('Blog', int(post_id))\r\n post = db.get(key)\r\n if not post:\r\n self.error(404)\r\n return\r\n user = post.user\r\n loggedUser = self.user\r\n if user == loggedUser:\r\n key = db.Key.from_path('Blog', int(post_id))\r\n post = db.get(key)\r\n error = \"\"\r\n self.render(\"editpost.html\", subject=post.subject,\r\n content=post.content, post_id=post_id,\r\n error=error)\r\n else:\r\n error = \"You can't edit this post\"\r\n self.render(\"error.html\", error=error)", "title": "" }, { "docid": "663c1d8648303fcd021c55e8a906cf30", "score": "0.5698372", "text": "def add_comment_to_post(request, pk):\n post = get_object_or_404(Post, pk=pk)\n if request.method == \"POST\":\n form = CommentForm(request.POST)\n if form.is_valid():\n comment = form.save(commit=False)\n comment.post = post\n comment.save()\n return redirect('blog-post', pk=post.id)\n else:\n form = CommentForm()\n return render(request, 'add_comment_to_post.html', {'form': form})", "title": "" }, { "docid": "af2a8d546f001eba74bd1bee97d966ed", "score": "0.56928205", "text": "def detail_post_view(session: Session, post_id: IdType) -> typing.List[PostType]:\n post = session.query(Post).get(post_id)\n\n if not post:\n return Response({'message': 'Post does not exist'}, status=204)\n else:\n post_data = dict(\n id=post.id,\n author_id=post.author_id,\n title=post.title,\n content=post.content,\n created_at=post.created_at.strftime(format='%Y-%m-%d %H:%M'),\n )\n return {'data': post_data}", "title": "" }, { "docid": "7fdbe1ea50ca2ca3968768f0fe5590c4", "score": "0.56762093", "text": "def feature_details(request, pk):\n feature = get_object_or_404(Feature, pk=pk)\n comments = FeatureComment.objects.filter(feature=feature)\n return render(request, \"feature_details.html\", {'feature': feature, 'comments': comments})", "title": "" }, { "docid": "ea10d8ed339a3bd1192b51cb0137c990", "score": "0.5670916", "text": "def check_comment(post_id, comment_id, check_author=True):\n\n db = get_db()\n\n comment = get_comment_of_post(db, post_id, comment_id)\n if comment is None:\n abort(404, \"Comment id {0} doesn't exist.\".format(comment_id))\n\n current_user = get_user_by_username(db, request.authorization[\"username\"])\n if check_author and comment[\"author_id\"] != current_user[\"id\"]:\n abort(403)\n\n return comment", "title": "" }, { "docid": "74b91df995a745dd9214f66b529be32e", "score": "0.5634377", "text": "def post_detail(request, pk):\n post = get_object_or_404(Feedback, pk=pk)\n post.save()\n return render(request, \"feedbackdetail.html\", {\"post\":post})", "title": "" }, { "docid": "ad72f193d51a44406eef966f0d461d8d", "score": "0.5633675", "text": "def add_comment(self, post):\n if self.user:\n comment_content = self.request.get('comment')\n if comment_content:\n comment = CommentEntity.register(user=self.user,\n post=post,\n comment=comment_content)\n CommentEntity.commit_comment(comment)\n return self.redirect('/{0}/{1}'.format('post',\n post.key().id()))\n\n self.redirect('/login')", "title": "" }, { "docid": "c2b8f4fc62acdeaae105bce80552942d", "score": "0.56218356", "text": "def get(self, post_id):\n if self.user_id:\n params, user_info = disclaim(self)\n else:\n params = {} \n params['captchahtml'] = captchaBase(self)\n blog = models.BlogPost.get_by_id(long(post_id))\n if blog is not None:\n params['title'] = blog.title\n params['subtitle'] = blog.subtitle\n params['blob_key'] = blog.blob_key\n params['author'] = blog.author\n params['content'] = blog.content\n return self.render_template('materialize/landing/blogpost.html', **params)\n else:\n return self.error(404)", "title": "" }, { "docid": "e6e46599cb7cdbfc08739013f522a6bb", "score": "0.56055665", "text": "def post(self, post_id):\n comment = request.form.get('comment')\n\n post = Post.get_by_id(post_id)\n post.comments.append(Comment(session.get('user_id'), post.id, comment))\n\n db.session.commit()\n\n return redirect(url_for('blog.post', post_id=post_id))", "title": "" }, { "docid": "5357134e62722ca8f07e6e42f10dd1b0", "score": "0.5603172", "text": "def GetComment(self):\r\n pass", "title": "" }, { "docid": "3073a414630f0449af65e90de18c7401", "score": "0.55794376", "text": "def representative_comment(self):\n p = self.root_posts\n if len(p) == 0:\n return None\n return p[0]", "title": "" }, { "docid": "0156caa4dcd7416014144af7a52818b7", "score": "0.55773926", "text": "def show_movie(request, movie_slug):\n context_dict = {}\n \n\t# Attempt to retrieve the data to be displayed on the movies page\n try:\n movie = Movie.objects.get(slug=movie_slug)\n comments = Comment.objects.filter(movie=movie)\n\n context_dict['movie'] = movie\n\n context_dict['comments'] = sort_comments(comments)\n movie.views = movie.views + 1\n movie.save()\n\n\t\t# If the user is logged in allow them to comment and reply to comments\n if request.user.is_authenticated:\n author = UserProfile.objects.filter(\n user=request.user)[0]\n\n form = CommentForm(\n author=author,\n parent=None,\n movie=movie)\n\n context_dict['form'] = form\n\n else:\n context_dict['form'] = None\n\n\t# if the user posts the comment form\n if request.method == \"POST\":\n form = CommentForm(\n request.POST,\n author=author,\n parent=None,\n movie=movie)\n\t\t\t\n if form.is_valid():\n comment = form.save(commit=False)\n comment.save()\n request.method=\"GET\"\n return show_movie(request, movie_slug)\n\n else:\n print(form.errors)\n \n except Movie.DoesNotExist:\n context_dict['movie'] = None\n context_dict['comments'] = None\n context_dict['form'] = None\n\n return render(request, \n 'rate_my_movie_app/movie.html',\n context_dict)", "title": "" }, { "docid": "d1d3c7fde4f8927ee58438241c2b6b73", "score": "0.5576925", "text": "def show_comments(item_format=' - {timestamp} -> {text} .::. {_ts}', admin_fmt=True, **kwargs):\n print('\\n'.join(get_comments(item_format=item_format, admin_fmt=admin_fmt, **kwargs)))", "title": "" }, { "docid": "f1e860f91b2b7b1f367cf1d0028de4fa", "score": "0.55709225", "text": "def post_details(request, slug, post_pk):\n post, post_url = get_redirected(models.Post, {'pk': post_pk}, {'slug': slug})\n # return render(request, 'blog/post_details.html', {'post': post })\n if post_url:\n return HttpResponseRedirect(post_url)\n\n return render(request, 'blog/post_details.html', {'post': post})", "title": "" }, { "docid": "747affcc5d5fbc52191a99b7a9ffa0b4", "score": "0.5570872", "text": "def get(self):\n\t\t# getting the numbers of all post comment\n\t\tpost_numdata_comments = numdata_comments_all_post()\n\t\t# getting the hottest comment\n\t\thottest_posts = hottest_dic(post_numdata_comments, 5)\n\t\t# getting all the numbers of comments by the category\n\t\t# ordering\n\t\t# data structure of this behavior\n\t\t# idpost\t\t\t\tnumber of comments, postentity\n\t\t# [('5710932114145280', [6, <models.blog_model.Blog object at 0x04428110>])\n\t\thottest_posts = sort_dictionary_desc(hottest_posts)\n\t\thottest_category = count_comments_by_category(post_numdata_comments)\n\t\thottest_category = hottest_dic(hottest_category, 5)\n\t\thottest_category = sort_dictionary_desc(hottest_category)\n\t\tlogging.error(hottest_category)\n\t\t\n\t\tself.render(\"hottest.html\", hottest_posts = hottest_posts, hottest_category = hottest_category)", "title": "" }, { "docid": "18bd0b6db5005190a4b039255612e138", "score": "0.5568248", "text": "def show_article(request, article_id):\n posts = Post.objects.filter(article=article_id).order_by(\"created\")\n article = Article.objects.get(pk=article_id)\n return render(request, 'articles/article.html', {'posts': posts, 'article': article})", "title": "" }, { "docid": "2cc3362e9e62a7119e7aa873d03193d1", "score": "0.55588794", "text": "def blogpost(request, slug):\n assert isinstance(request, HttpRequest)\n\n post_current = Post.objects.get(slug=slug) # запрос на выбор конкретной статьи по параметру\n comments = Comment.objects.filter(post=post_current)\n\n if request.method == \"POST\": # после отправки данных формы на сервер методом POST\n commentform = CommentForm(request.POST)\n\n if commentform.is_valid():\n comment_f = commentform.save(commit=False)\n comment_f.author = request.user # добавляем (так как этого поля нет в форме) в модель Комментария (Comment) в поле автор авторизованного пользователя\n comment_f.date = datetime.now() # добавляем в модель Комментария (Comment) текущую дату\n comment_f.post = post_current # Blog.objects.get(id=parametr) # добавляем в модель Комментария (Comment) статью, для которой данный комментарий\n comment_f.save() # сохраняем изменения после добавления полей\n\n return redirect('blogpost', slug=post_current.slug) # переадресация на ту же страницу статьи после отправки комментария\n\n else:\n commentform = CommentForm() # создание формы для ввода комментария\n\n data = {\n \"title\": post_current.title,\n 'post_current': post_current, # передача конкретной статьи в шаблон веб-страницы\n 'comments': comments,\n \"src\": '/media',\n 'image': post_current.image,\n 'commentform': commentform, # передача формы добавления комментария в шаблон веб-страницы\n 'year':datetime.now().year,\n }\n\n return render(request, 'blog/blogpost.html', data)", "title": "" }, { "docid": "1fb0fba1fb39c954587f4c1878fdff2c", "score": "0.5558202", "text": "def render_post(id):\n return render_template(\"blog/post.html\", post=_get_post(id))", "title": "" }, { "docid": "c12e42a1c60651c6789835aa21da038d", "score": "0.5544281", "text": "def add_comment(request, post_id, username):\n form = CommentForm(request.POST or None)\n if form.is_valid():\n post = get_object_or_404(Post, author__username=username, id=post_id)\n comment = form.save(commit=False)\n comment.post = post\n comment.author = request.user\n comment.save()\n return redirect(\"post\", username, post_id)", "title": "" }, { "docid": "5ef6fa3b926f7136631aabd7573a6f76", "score": "0.55422664", "text": "def extract_random_comment(post, min_length=None):\n url = '%s.json?sort=random' % post['permalink'].rstrip('/')\n r = get(url)\n r.raise_for_status()\n j = r.json()\n comments = [c for c in j[1]['data']['children'] if 'body' in c['data']]\n if min_length:\n comments = [c for c in comments if len(c['data']['body']) >= min_length]\n if len(comments) > 0:\n global last_url\n last_url = normify_url(url)\n return comments[0]\n else:\n raise APIError('The selected post doesn\\'t have any comments')", "title": "" }, { "docid": "5750a522894d6c3ca658308f7dfc74e8", "score": "0.5538344", "text": "def printPost(pid):\n query = ''' SELECT * FROM posts WHERE lower(pid)=?'''\n cursor.execute(query, (pid,))\n post = cursor.fetchone()\n connection.commit()\n\n Type = typeOfPost(pid)\n print('-' * 25 + 'Post Details' + '-' * 24)\n print('<'+Type.upper()+'Post>')\n print('Post ID: {}'.format(pid))\n print('Post date: {}'.format(post[1]))\n print('Post title: {}'.format(post[2]))\n print('Post body: {}'.format(post[3]))\n print('Poster: {}'.format(post[4]))\n print('-' * 60)", "title": "" }, { "docid": "b805d7da964562b118dba51972d0a22a", "score": "0.5532694", "text": "def get_context_data(self, **kwargs):\n context = super(CommentCreate, self).get_context_data(**kwargs)\n # Call the base implementation first to get a context\n context['post'] = get_object_or_404(Post, slug = self.kwargs['slug'])\n # Get the post from id and add it to the context\n return context", "title": "" }, { "docid": "173e1acdc2329e24cf58692ed1234c12", "score": "0.55315", "text": "def add_comment(request, slug):\n #print(\"looool\")\n p = request.POST\n\n if p[\"content\"]:\n author = \"Anonymous\"\n if p[\"author\"]: author = p[\"author\"]\n\n comment = Comment(post=Post.objects.get(slug=slug))\n cf = CommentForm(p, instance=comment)\n cf.fields[\"author\"].required = False\n\n comment = cf.save(commit=False)\n comment.author = author\n comment.save()\n return HttpResponseRedirect(reverse(\"blog.views.post\", args=[slug]))", "title": "" }, { "docid": "bb932d93b732c09857387552fb26fea6", "score": "0.55157614", "text": "def comment(self, comment_text, email=\"\", name=\"\", pseud=None):\n \n if self.id is None:\n return self._work.comment(comment_text, email, name, pseud)\n \n if not self.loaded:\n raise utils.UnloadedError(\"Chapter isn't loaded. Have you tried calling Chapter.reload()?\")\n \n if self._session is None:\n raise utils.AuthError(\"Invalid session\")\n \n if self.id is not None:\n return utils.comment(self, comment_text, self._session, False, email=email, name=name, pseud=pseud)", "title": "" }, { "docid": "e3e5d0eff498c13ff561e3f4e63a32cf", "score": "0.55125606", "text": "async def meme(self, ctx: commands.Context):\n subreddit = await self.config.guild(ctx.guild).subreddit()\n async with self.session.get(\n f\"https://www.reddit.com/{subreddit}/top.json?sort=new\"\n ) as resp:\n data = await resp.json()\n data = data[\"data\"]\n children = data[\"children\"]\n post = random.choice(children)[\"data\"]\n title = post[\"title\"]\n url = post[\"url_overridden_by_dest\"]\n link_url = f'https://reddit.com{post[\"permalink\"]}'\n ups = post[\"ups\"]\n comnts = post[\"num_comments\"]\n\n if post[\"over_18\"] is True:\n return await ctx.send(\n \"Cannot show content because it is nsfw,\"\n \" try changing the subreddit lol.\"\n )\n\n embed = (\n discord.Embed(title=title, url=link_url)\n .set_image(url=url)\n .set_footer(text=\"👍 {} | 💬 {}\".format(ups, comnts))\n )\n await ctx.send(embed=embed)", "title": "" }, { "docid": "ed82cab84b16e1b0dfb3e9b6189891fa", "score": "0.55006695", "text": "def post_detail(request, year, month, day, slug):\r\n tt = time.strptime('-'.join([year, month, day]), '%Y-%b-%d')\r\n date = datetime.date(*tt[:3])\r\n try:\r\n post = Post.objects.published().get(slug=slug, pub_date__year=date.year, \r\n pub_date__month=date.month, pub_date__day=date.day)\r\n except Post.DoesNotExist:\r\n raise Http404\r\n \r\n return render_with_context(request, 'blog/post_detail.xhtml', {\r\n 'post': post,\r\n 'title': post.title, \r\n })", "title": "" }, { "docid": "f9692cbb2827b959e5f57229a876b723", "score": "0.54891664", "text": "async def set_comment(self, ctx: commands.Context, *, comment: str = None):\n await sql.execute(\"UPDATE servers SET comment=? WHERE serverid=?\", comment, str(ctx.message.guild.id))\n em = discord.Embed(colour=discord.Colour.dark_green())\n if comment:\n em.title = f\"Successfully changed comment symbol to `{comment}`.\"\n else:\n em.title = \"Successfully removed comment symbol.\"\n await ctx.send(embed=em)", "title": "" }, { "docid": "5a4539cc9e588fc2cfdf4ecca0054286", "score": "0.54857767", "text": "def post_comment(request, next=None):\n\n # Require POST\n if request.method != 'POST':\n return http.HttpResponseNotAllowed([\"POST\"])\n\n is_ajax = request.POST.get('is_ajax') and '_ajax' or ''\n\n # Fill out some initial data fields from an authenticated user, if present\n data = request.POST.copy()\n\n if request.user.is_authenticated():\n if not data.get('name', ''):\n data[\"name\"] = request.user.get_full_name()\n if not data.get('email', ''):\n data[\"email\"] = request.user.email\n\n # Look up the object we're trying to comment about\n ctype = data.get(\"content_type\")\n object_pk = data.get(\"object_pk\")\n parent_pk = data.get(\"parent_pk\")\n parent_comment = None\n if ctype is None or object_pk is None:\n return CommentPostBadRequest(\"Missing content_type or object_pk field.\")\n try:\n model = models.get_model(*ctype.split(\".\", 1))\n target = model._default_manager.get(pk=object_pk)\n if parent_pk:\n parent_comment = MpttComment.objects.get(pk=parent_pk)\n except TypeError:\n return CommentPostBadRequest(\n \"Invalid content_type value: %r\" % escape(ctype))\n except AttributeError:\n return CommentPostBadRequest(\n \"The given content-type %r does not resolve to a valid model.\" % \\\n escape(ctype))\n except MpttComment.DoesNotExist:\n return CommentPostBadRequest(\n \"Parent comment with PK %r does not exist.\" % \\\n escape(parent_pk))\n except ObjectDoesNotExist:\n return CommentPostBadRequest(\n \"No object matching content-type %r and object PK %r exists.\" % \\\n (escape(ctype), escape(object_pk)))\n\n # Do we want to preview the comment?\n preview = data.get(\"submit\", \"\").lower() == \"preview\" or \\\n data.get(\"preview\", None) is not None\n\n # Construct the comment form\n form = MpttCommentForm(target, parent_comment=parent_comment, data=data)\n\n # Check security information\n if form.security_errors():\n return CommentPostBadRequest(\n \"The comment form failed security verification: %s\" % \\\n escape(str(form.security_errors())))\n\n # If there are errors or if we requested a preview show the comment\n if form.errors or preview:\n template_list = [\n \"comments/%s_%s_preview%s.html\" % tuple(str(model._meta).split(\".\") + [is_ajax]),\n \"comments/%s_preview%s.html\" % (model._meta.app_label, is_ajax),\n \"comments/preview%s.html\" % is_ajax\n ]\n return render_to_response(\n template_list, {\n \"comment\" : form.data.get(\"comment\", \"\"),\n \"form\" : form,\n \"allow_post\": not form.errors\n },\n RequestContext(request, {})\n )\n\n # Otherwise create the comment\n comment = form.get_comment_object()\n comment.ip_address = request.META.get(\"REMOTE_ADDR\", None)\n if request.user.is_authenticated():\n comment.user = request.user\n\n # Signal that the comment is about to be saved\n responses = signals.comment_will_be_posted.send(\n sender = comment.__class__,\n comment = comment,\n request = request\n )\n\n for (receiver, response) in responses:\n if response == False:\n return CommentPostBadRequest(\n \"comment_will_be_posted receiver %r killed the comment\" % receiver.__name__)\n\n # Save the comment and signal that it was saved\n comment.save()\n signals.comment_was_posted.send(\n sender = comment.__class__,\n comment = comment,\n request = request\n )\n\n if is_ajax:\n data['next'] = None\n\n return next_redirect(data, next, 'comments_comment_done%s' % (is_ajax and '_ajax' or ''), c=comment._get_pk_val())", "title": "" }, { "docid": "c3393ccc0ff923e37df4edc8880205e0", "score": "0.5463447", "text": "def test_issues_id_comments_issue_comment_id_post(self):\n pass", "title": "" }, { "docid": "8eaeb2d2cfb921f90c66a034872c0b6f", "score": "0.54462004", "text": "def show_article(request, username, permalink):\n\n try:\n user = User.objects.get(username__iexact=username)\n except User.DoesNotExist:\n raise Http404\n\n try:\n # there should be always only one of those\n article = UserActivity.objects.select_related().get(\n username__iexact = username,\n post__permalink = permalink,\n status = UserActivity.PUBLIC_STATUS)\n except UserActivity.DoesNotExist:\n # check for draft articles\n articles = UserActivity.objects.select_related().filter(\n username__iexact = username, \n post__permalink = permalink,\n status = UserActivity.DRAFT_STATUS)\n if len(articles)==0:\n raise Http404\n else:\n article = articles[0] \n # show draft only if currently logged in user is viewing\n if (not request.user.is_superuser) and (request.user != user):\n raise Http404\n\n data = {\n 'activity' : article,\n 'user_profile': user,\n }\n\n return render(\n request,\n templates['ARTICLE'],\n data,\n )", "title": "" }, { "docid": "b461cd019185f21f700becac53a9d450", "score": "0.5431777", "text": "def comment_handler(sender, instance, created, **kwargs):\n recipients = []\n if instance.parent and instance.parent.author != instance.author:\n # comment is part of a thread\n parent_comment_author = instance.parent.author\n recipients.append(parent_comment_author)\n comment_author = instance.author\n article = instance.article\n desc_string = \"{} posted a comment to {} on {}\"\n article_author = article.author\n if article_author.id != comment_author.id:\n recipients.append(article_author)\n url = reverse(\n \"articles:article-details\", args=[article.slug])\n resource_url = f\"{settings.DOMAIN}{url}\"\n\n notify.send(comment_author,\n recipient=recipients,\n description=desc_string.format(comment_author.username,\n article or instance,\n instance.created_at.strftime('%d-%B-%Y %H:%M')), # noqa\n verb=verbs.COMMENT_CREATED,\n target=article or instance,\n action_object=instance,\n resource_url=resource_url)", "title": "" }, { "docid": "2eb07e6bfbb75ae544755f38237d11c4", "score": "0.5428052", "text": "def test_post_view_with_permalink(self):\n reset_db()\n p = create_post_with_comments()\n c = Client()\n response = c.get(reverse('blog.views.post_view', args=[p.permalink] ) )\n self.assertEqual(response.status_code, 200, 'Expected 200.')\n self.assertEqual(response.context['post'].id, p.id, 'Mismatch on post ids.')", "title": "" }, { "docid": "41805d8194b675b04493a8233966397e", "score": "0.5425533", "text": "def test_single_post_view(self):\n reset_db()\n p = create_post_with_comments()\n c = Client()\n response = c.get('/post/%s/%s' % (p.id, p.permalink) )\n self.assertEqual(response.status_code, 200, 'HTTP error.')\n self.assertIsNotNone(response.context['post'], 'No post returned.')", "title": "" } ]
d892852aa3e7a6ea40c368be4d467adc
Convert value to boolean.
[ { "docid": "62d72b52a5d57f1f68f69a09dd84d2d0", "score": "0.80972165", "text": "def toBool(self, val: Any) -> bool:\n if type(val) == bool:\n return val\n elif type(val) == int:\n return val != 0\n elif type(val) == str:\n v = val.lower()\n return not (v == \"no\" or v == \"false\" or v == \"0\")\n _LOGGER.warning(\n \"Visonic unable to decode boolean value %s type is %s\", val, type(val)\n )\n return False", "title": "" } ]
[ { "docid": "5d4bd8517d78334032a46f042455bfcc", "score": "0.863882", "text": "def to_boolean(value: Any) -> bool:\n return BooleanConverter.to_boolean_with_default(value, False)", "title": "" }, { "docid": "fee659ff6685b59b558ebd84f3dfcd34", "score": "0.85566324", "text": "def _asBool(self):\n return bool(self.value)", "title": "" }, { "docid": "5549e9521f472bef0c2e79ce6fadbfee", "score": "0.8505888", "text": "def Boolean(value):\n return asbool(value)", "title": "" }, { "docid": "152a19f4ccb3cf31bcd5e49f3215af9f", "score": "0.8140877", "text": "def ToBool(value):\n if str(value).lower() in (\"yes\", \"y\", \"true\", \"t\", \"1\"): return True\n if str(value).lower() in (\"no\", \"n\", \"false\", \"f\", \"0\", \"0.0\", \"\", \"none\", \"[]\", \"{}\"): return False\n raise Exception('Invalid value for boolean conversion: ' + str(value))", "title": "" }, { "docid": "aa3afed99a944fa7ea8ee3c511dfb7dd", "score": "0.8091099", "text": "def to_bool(value):\n return str(value).lower() in ['1', 't', 'true', 'y', 'yes']", "title": "" }, { "docid": "5b6a6f43f74198e575baa21b4b31dc46", "score": "0.805445", "text": "def _cast_boolean(self, value):\n value = str(value)\n if value == '':\n return False\n else:\n return value.strip() in ['True', 'true', '1']", "title": "" }, { "docid": "5d0b1f098787a4c1345d5ddbc97bff83", "score": "0.79466623", "text": "def Bool(value):\n return get_env().formula_manager.Bool(value)", "title": "" }, { "docid": "4b2de0974683f4b14a70e9eac5686603", "score": "0.7910972", "text": "def to_bool(val) -> bool:\n\n if type(val) == bool:\n return val\n else:\n return bool(strtobool(val))", "title": "" }, { "docid": "5ecc9b7699d7f12da4a3338cfffcc4c9", "score": "0.7887335", "text": "def to_bool(value):\n if str(value).lower() in (\"yes\", \"y\", \"true\", \"t\", \"1\"): return True\n if str(value).lower() in (\"no\", \"n\", \"false\", \"f\", \"0\", \"0.0\", \"\", \"none\", \"[]\", \"{}\"): return False\n raise Exception('Invalid value for boolean conversion: ' + str(value))", "title": "" }, { "docid": "0b7e2fd26e6ba7ad0241181964ee8c83", "score": "0.78812224", "text": "def to_bool(value):\n if value is None:\n return False\n if str(value).lower() in (\"yes\", \"y\", \"true\", \"t\", \"1\"):\n return True\n if str(value).lower() in (\"no\", \"n\", \"false\", \"f\", \"0\", \"0.0\", \"\", \"none\", \"[]\", \"{}\"):\n return False\n raise Exception('Invalid value for boolean conversion: ' + str(value))", "title": "" }, { "docid": "a05ebade6b6931beb40494e0aed19ddc", "score": "0.778305", "text": "def boolean_argument(value):\n return bool(strtobool(value))", "title": "" }, { "docid": "a05ebade6b6931beb40494e0aed19ddc", "score": "0.778305", "text": "def boolean_argument(value):\n return bool(strtobool(value))", "title": "" }, { "docid": "b9b5c3ec5dea158c5f87928bc5a796a5", "score": "0.77716255", "text": "def asbool(value):\n if value is None:\n return False\n\n if isinstance(value, bool):\n return value\n\n return value.lower() in (\"true\", \"1\")", "title": "" }, { "docid": "f2a9974c3e0df6f237cf448194d5193d", "score": "0.77589124", "text": "def cast_value(self, value: str) -> typing.Any:\n if value.lower() in ['true', '1']:\n return True\n elif value.lower() in ['false', '0']:\n return False\n else:\n return bool(value)", "title": "" }, { "docid": "b822c642ed923e6f3ef73368f40c4a98", "score": "0.77273583", "text": "def convert_to_boolean(value):\n if isinstance(value, string_types):\n if value.lower() in ['t', 'true', 'on', 'yes', '1']:\n return True\n elif value.lower() in ['f', 'false', 'off', 'no', '0']:\n return False\n\n return value", "title": "" }, { "docid": "d904a4d5d3cfc3ef0854072941be1289", "score": "0.77238077", "text": "def to_bool(value=\"\"):\n # type: (str) -> bool\n return str(value).lower() in {\"true\", \"on\", \"1\", \"yes\", \"oui\"}", "title": "" }, { "docid": "c82741facae2627581e676a65edd8a5b", "score": "0.7716701", "text": "def __bool__(self):\n return bool(self._value)", "title": "" }, { "docid": "dd3be86b0e8a070111f687720078ae91", "score": "0.7668287", "text": "def _convert_to_boolean(value):\n if value.lower() not in Config.BOOLEAN_STATES:\n raise ValueError('Not a boolean: %s' % value)\n return Config.BOOLEAN_STATES[value.lower()]", "title": "" }, { "docid": "ddcba74d8e55d97272a649011032af42", "score": "0.766755", "text": "def _bool(value):\r\n if value == 0 or isinstance(value, (str, native_str)):\r\n return True\r\n return bool(value)", "title": "" }, { "docid": "e9ad4b790d01e118633f57d689acb3ea", "score": "0.7658754", "text": "def boolean_converter(val):\n\n boolean_vals = ('true', '1', 'yes', 'y')\n\n if val and (str(val).lower() in boolean_vals):\n return True\n return False", "title": "" }, { "docid": "90f61bd8b0e8a39d176557a4ef303050", "score": "0.7608571", "text": "def to_python(self, value):\r\n if value in (True, 'True', '1'):\r\n return True\r\n elif value in (False, 'False', '0'):\r\n return False\r\n else:\r\n return None", "title": "" }, { "docid": "f740c93fce8cfd419b3ff77dff114296", "score": "0.7589277", "text": "def to_boolean(value):\n _BOOL_STATES = {'1': True, 'yes': True, 'true': True, 'on': True,\n '0': False, 'no': False, 'false': False, 'off': False}\n\n value = str(value).lower().strip()\n if value not in _BOOL_STATES:\n raise ValueError(value)\n return _BOOL_STATES[value]", "title": "" }, { "docid": "b6ab2da8d9699ed2a31aa3a953649393", "score": "0.7586434", "text": "def __bool__(self) -> bool:\n return bool(self._value)", "title": "" }, { "docid": "f34b30f231730d7f6c9bf972c49e7f72", "score": "0.75436604", "text": "def get_boolean_value(self):\n if self._type != Value.BOOLEAN:\n raise Exception(\"Illegal boolean\")\n return self._value", "title": "" }, { "docid": "ce57f426af424934d2deeabfb2412a22", "score": "0.7542982", "text": "def asBool(*args, **kwargs):\n \n pass", "title": "" }, { "docid": "ce57f426af424934d2deeabfb2412a22", "score": "0.7542982", "text": "def asBool(*args, **kwargs):\n \n pass", "title": "" }, { "docid": "ecad8791f970f3723fd59de75ffc284f", "score": "0.7542822", "text": "def test_to_value(self, data, value):\n assert value == Boolean.to_value(data)", "title": "" }, { "docid": "6599dd8409758766cc99868194981aa4", "score": "0.75231975", "text": "def is_true(value):\n return value is True", "title": "" }, { "docid": "ad565e81ac837d09fd25a83fd2ac3e97", "score": "0.7481766", "text": "def _getValueAsBoolean(self, node):\n value = node.attrib[\"value\"]\n assert value in (\"True\", \"False\",), \\\n \"Unexpected boolean value '%s' in tag <%s>\" % (value, node.tag)\n return value == \"True\"", "title": "" }, { "docid": "df59fb4f07aad74e40eebe0be6f83683", "score": "0.7478464", "text": "def tobool( val ):\n\n if val.lower() in [ \"y\", \"yes\", \"t\", \"true\", \"1\", 1 ]:\n return True\n\n else:\n return False", "title": "" }, { "docid": "bc8bfe7d7d2c500b9c511901b45d15bb", "score": "0.74751365", "text": "def to_bool(value: str) -> bool:\n if value in [\"True\", \"true\", True]:\n return True\n if value in [\"False\", \"false\", False]:\n return False\n raise ValueError(f\"Value {value} cannot be converted into a bool\")", "title": "" }, { "docid": "3a863b5224795c599adf7234f7f187cf", "score": "0.7473932", "text": "def __bool__(self) -> bool:\n return bool(self.value())", "title": "" }, { "docid": "f480182054800cbca996c685b76317db", "score": "0.7471968", "text": "def _convert_bools(self, name: str, value) -> bool:\n if isinstance(value, bool):\n return value\n\n if value == None:\n return None\n\n value = str(value).lower()\n # Try to convert values to the positive.\n if value == '1' or value == 'true':\n return True\n # Try to convert values to the negative.\n elif value == '0' or value == 'false':\n return False\n else:\n AttributeError('%s field %s should be type bool.' % (\n __class__.__name__, name))\n logging.error('%s field %s should be type bool.' % (\n __class__.__name__, name))", "title": "" }, { "docid": "ad80c7e306973716e36fac67f06192a9", "score": "0.74677265", "text": "def convert(self, value, db=None):\n if value is UNASSIGNED:\n return value\n elif value == self.false_label:\n return False\n elif value == self.true_label:\n return True\n return bool(value)", "title": "" }, { "docid": "b163d580bcb1c750caf0b71030346de0", "score": "0.74324596", "text": "def is_true(value):\n\n return (value is True)", "title": "" }, { "docid": "047685bb875f18d9a673013d8ded321a", "score": "0.7405042", "text": "def cast(self, val):\n if isinstance(val, bool):\n return BoolVal(val, self.ctx)\n if z3_debug():\n if not is_expr(val):\n msg = \"True, False or Z3 Boolean expression expected. Received %s of type %s\"\n _z3_assert(is_expr(val), msg % (val, type(val)))\n if not self.eq(val.sort()):\n _z3_assert(self.eq(val.sort()), \"Value cannot be converted into a Z3 Boolean value\")\n return val", "title": "" }, { "docid": "fd7c1a085118afb1096d5f8cacbabeed", "score": "0.73866117", "text": "def convert_tobooleanvalue(i: bytes) -> bool:\n return bool(int(i))", "title": "" }, { "docid": "58d8cf9c812e8cb9aa79f282a7264d58", "score": "0.7379311", "text": "def int_to_bool(value: int) -> bool:\n return False if value == 0 else True", "title": "" }, { "docid": "b560a25aefc9503e378b1a25d83d3cf4", "score": "0.73696303", "text": "def ToBool(value, default=_DEFAULT):\n value = str(value).lower()\n if value in constants.TRUE_VALUES:\n return True\n if value in constants.FALSE_VALUES:\n return False\n if default is _DEFAULT:\n raise ConversionError('Cannot convert %r to bool' % value)\n return default", "title": "" }, { "docid": "18924c11bd08910462a714d3aadd9ffa", "score": "0.73639184", "text": "def _isBool(self):\n return isinstance(self.value, bool)", "title": "" }, { "docid": "303968e630cce419da26a43e15ad30b2", "score": "0.73460144", "text": "def str_to_bool(value):\n\n\treturn bool(value) and value.lower() != 'false'", "title": "" }, { "docid": "fa7f24fa1a2b7dba88d9ea2f63cfd825", "score": "0.73320675", "text": "def str2bool(value: str):\r\n if type(value) == bool:\r\n return_val = value\r\n else:\r\n return_val = value.lower() in (\"yes\", \"true\", \"t\", \"1\")\r\n\r\n return return_val", "title": "" }, { "docid": "308d960b2394d3cf356386255060a57d", "score": "0.72950953", "text": "def coerceBoolean(value, mode = \"best\"):\n\n # convert true values\n if value in [ 1, \"true\", u\"true\", \"yes\", u\"yes\" ]:\n return True\n\n # convert false values\n if value in [ 0, \"false\", u\"false\", \"no\", u\"no\" ]:\n return False\n\n return _handleMode(value, \"boolean\", mode)", "title": "" }, { "docid": "af5884d6985969a1ae66b7b768681847", "score": "0.728786", "text": "def str_to_bool(value):\n value = value.lower()\n return value in ['true', '1', 'up', 'on']", "title": "" }, { "docid": "b63c7737c56ecb65287daf11935571ce", "score": "0.72775364", "text": "def _to_bool(x):\n return x.astype(bool)", "title": "" }, { "docid": "d1fc6eee2a24c6d115f05d08b17b3fe4", "score": "0.72260445", "text": "def get_as_boolean(self, index):\n value = self[index]\n return BooleanConverter.to_boolean(value)", "title": "" }, { "docid": "1b0710f5c864160a2ee4636c9df306ca", "score": "0.7219768", "text": "def isBool(self, input_val):\n if isinstance(input_val, str):\n if input_val.upper() in ['FALSE', '0', 'NO']:\n input_val = False\n elif input_val.upper() in ['TRUE', '1', 'YES']:\n input_val = True\n else:\n input_val = None\n elif isinstance(input_val, int):\n if input_val == 0:\n input_val = False\n if input_val == 1:\n input_val = True\n return input_val", "title": "" }, { "docid": "10ed659e88d76c16a8c08cf0686aa763", "score": "0.71981543", "text": "def bool_value(self):\n if self.value:\n if self.value.lower() in self.MEANS_YES:\n return True\n return False\n return None", "title": "" }, { "docid": "85517e0e1cc5c6324b0a30b20d2927e4", "score": "0.71761125", "text": "def check_value(self, value: typing.Any) -> bool:\n return isinstance(value, bool)", "title": "" }, { "docid": "19f9780982e16e4347626a635cf3a936", "score": "0.71621436", "text": "def coerce_boolean(value):\n if is_string(value):\n normalized = value.strip().lower()\n if normalized in ('1', 'yes', 'true', 'on'):\n return True\n elif normalized in ('0', 'no', 'false', 'off', ''):\n return False\n else:\n msg = \"Failed to coerce string to boolean! (%r)\"\n raise ValueError(format(msg, value))\n else:\n return bool(value)", "title": "" }, { "docid": "0d0e9305d874b730505cb9d61668baf6", "score": "0.7159789", "text": "def getValue(self) -> \"bool\" :\n return _quickfix.BoolField_getValue(self)", "title": "" }, { "docid": "aa9b9533dd98cb4c8e0be397fe7e88ae", "score": "0.71549124", "text": "def __nonzero__(self):\n return bool(self.value)", "title": "" }, { "docid": "17b96aafb0ade443469d31af099aea25", "score": "0.7142359", "text": "def boolean(value, context=None):\n try:\n return bool(value)\n except (TypeError, ValueError):\n raise ValidationException('This field is suppose to be boolean')", "title": "" }, { "docid": "9fb1ff055a4a07904cc53e5b4ccbe379", "score": "0.71229684", "text": "def toBool(x):\n if x.lower() == 'true':\n return True\n elif x.lower() == 'false':\n return False\n else:\n raise ValueError", "title": "" }, { "docid": "3152c4dc4abd610c93e6df0d969d5a85", "score": "0.71187764", "text": "def boolean(val):\n\n return str(val).lower() in ('1', 'yes', 'true', 'on')", "title": "" }, { "docid": "5ff118df1008664795ea658297365504", "score": "0.7100943", "text": "def return_true(value):\n\n return True", "title": "" }, { "docid": "53a0bd3c59e574e293c4f3dbb6e9abc2", "score": "0.70947546", "text": "def test_boolean_true_returns_true(self):\n self.assertEqual(to_bool(True), True)", "title": "" }, { "docid": "9fc411687c2f88283d0a4a9acb423ad3", "score": "0.7082532", "text": "def create_boolean(value):\n return jpath4.query.data.StandardSequence([jpath4.query.data.StandardBoolean(value)])", "title": "" }, { "docid": "88492eeb0a75a5a12a18e2982043c974", "score": "0.7072428", "text": "def as_bool(self, key):\n val = self[key]\n if val == True:\n return True\n elif val == False:\n return False\n else:\n try:\n if not isinstance(val, StringTypes):\n raise KeyError\n else:\n return self.main._bools[val.lower()]\n except KeyError:\n raise ValueError('Value \"%s\" is neither True nor False' % val)", "title": "" }, { "docid": "694b34e0e0a4e600e9f4259b9eac7dcc", "score": "0.70704985", "text": "def _createBool(newvalue):\n return CableValue(bool(newvalue))", "title": "" }, { "docid": "7deb4fa56286f58f427fb660228af857", "score": "0.7061925", "text": "def to_nullable_boolean(value: Any) -> Optional[bool]:\n # Shortcuts\n if value is None:\n return None\n if type(value) is type(True):\n return value\n\n str_value = str(value).lower()\n # All true values\n if str_value in ['1', 'true', 't', 'yes', 'y']:\n return True\n # All false values\n if str_value in ['0', 'false', 'f', 'no', 'n']:\n return False\n\n # Everything else:\n return None", "title": "" }, { "docid": "8b803f187ea1c634faafc873dd25c193", "score": "0.7034297", "text": "def process_value(self, value):\n if value == self.TRUE_VALUE:\n return True\n elif value == self.FALSE_VALUE:\n return False\n else:\n raise QueryParameterInvalidError(self.name, value)", "title": "" }, { "docid": "ac3ef8af0471644195044b0f76e57e09", "score": "0.70269805", "text": "def _isValueBoolean(actuator):\n return actuator.value == 0 or \\\n actuator.value == 1 or \\\n actuator.value.upper == \"TRUE\" or \\\n actuator.value.upper == \"FALSE\"", "title": "" }, { "docid": "7083dc2fa0d3116edfc60c91e3d3867b", "score": "0.702437", "text": "def str2bool(val):\n if val not in [u\"True\", u\"False\"]:\n # Not using ValueError intentionally: all config errors are RuntimeError\n raise RuntimeError(\"Found %s instead of 'True' of 'False'\" % val)\n elif val == u\"True\":\n return True\n else:\n return False", "title": "" }, { "docid": "efa7c3dcdb41fd829b58f2086e05dd18", "score": "0.702407", "text": "def str2bool(value: Union[bool, str]) -> bool:\n\n value2 = False\n\n if isinstance(value, bool):\n value2 = value\n else:\n value2 = value.lower() in ('yes', 'true', 't', '1', 'on')\n\n return value2", "title": "" }, { "docid": "5bb497518166dcd48c2db5cf81d5ed56", "score": "0.70188636", "text": "def boolean(value):\n def _boolean():\n return value\n return _boolean", "title": "" }, { "docid": "9d2d9cd5b0141f91dd51756100b0f12c", "score": "0.7016583", "text": "def boolval(b):\n b = str(b).lower()\n if b in [ 'true', '1', 'yes']:\n return True\n elif b in [ 'false', '0', 'no']:\n return False\n return False", "title": "" }, { "docid": "c30fd04719265bc7eba35aa0d9638f19", "score": "0.7000714", "text": "def create_boolean(b):\n return Value(Value.BOOLEAN, b)", "title": "" }, { "docid": "202d7a8ae17ab603e07755240e67ebf3", "score": "0.698797", "text": "def to_python(self, value):\n # Explicitly check for the string 'False', which is what a hidden field\n # will submit for False. Also check for '0', since this is what\n # RadioSelect will provide. Because bool(\"True\") == bool('1') == True,\n # we don't need to handle that explicitly.\n\n if isinstance(value, six.string_types) and value.lower() in ('false', '0'):\n value = False\n\n elif value is None:\n value = False\n\n elif value is False:\n value = False\n\n elif isinstance(value, six.string_types) and value.lower() in ('true', '1'):\n value = True\n\n elif value is True:\n value = True\n\n else:\n raise ValidationError(u'%s is not boolean' % value)\n\n return value", "title": "" }, { "docid": "65e141b751c949981991292048b86e74", "score": "0.6986076", "text": "def booleanize(value):\n true_values = (\"yes\", \"true\", \"1\")\n false_values = (\"no\", \"false\", \"0\")\n if isinstance(value, bool):\n return value\n if value.lower() in true_values:\n return True\n elif value.lower() in false_values:\n return False\n raise TypeError(\"Cannot booleanize ambiguous value '%s'\" % value)", "title": "" }, { "docid": "4eaaaeb9cf23123e283c45ddd5f97da4", "score": "0.69842726", "text": "def b(value):\n return 'true' if value else 'false'", "title": "" }, { "docid": "b1c9f2d3469c5d1bd869f6034e393c0d", "score": "0.69831014", "text": "def get_boolean(self, key):\n def convert(val, context):\n if val in [True, False, None]:\n return val\n elif str(val).lower() in [\"y\", \"yes\", \"true\", \"on\", \"1\"]:\n return True\n elif str(val).upper() in [\"n\", \"no\", \"false\", \"off\", \"0\"]:\n return False\n else:\n raise ConfigConversionError(\n \"must be y/n, yes/no, true/false, on/off, or 0/1\")\n return self.get(key, convert)", "title": "" }, { "docid": "3afa7882bea0c527b43a43bf1c02b506", "score": "0.6974749", "text": "def convert_config_value(self, value, label):\n if isinstance(value, six.string_types):\n value = value.lower()\n\n if value in self.TRUTHY_VALUES:\n return True\n elif value in self.FALSY_VALUES:\n return False\n else:\n raise YapconfValueError(\n \"Cowardly refusing to interpret \"\n \"config value as a boolean. Name: \"\n \"{0}, Value: {1}\".format(self.name, value)\n )", "title": "" }, { "docid": "242c4390159ee9604ee6a46601eada41", "score": "0.695988", "text": "def tgread_bool(self):\n value = self.read_int(signed=False)\n if value == 0x997275b5: # boolTrue\n return True\n elif value == 0xbc799737: # boolFalse\n return False\n else:\n raise RuntimeError('Invalid boolean code {}'.format(hex(value)))", "title": "" }, { "docid": "6c06cf052cbb81a503c348c4594111ef", "score": "0.6923948", "text": "def read_boolean(self):\n s = self.read_typed(str).lower()\n if s == 'true':\n return True\n elif s == 'false':\n return False\n else:\n raise ValueError('Cannot convert \\'%s\\' to a boolean' % s)", "title": "" }, { "docid": "d65e63e8967b9ebdda6dc4700d43058c", "score": "0.69234395", "text": "def is_bool(value: int) -> bool:\n return TypeRanges.BOOL.inf <= remove_base_prefix(value) <= TypeRanges.BOOL.max", "title": "" }, { "docid": "f571247350ec7d632b26ddf7a1996965", "score": "0.69179946", "text": "def parse_bool(value):\n try:\n return bool(int(value))\n except ValueError:\n lowercase = value.lower()\n if lowercase == 'true':\n return True\n elif lowercase == 'false':\n return False\n raise ValueError('Unable to parse {} as a bool'.format(value))", "title": "" }, { "docid": "3160f070a603119490211288df8f978b", "score": "0.69152445", "text": "def bool_from_header_value(value):\n if isinstance(value, bool):\n return value\n elif isinstance(value, (basestring, unicode)):\n if str(value).lower() == 'true':\n return True\n return False", "title": "" }, { "docid": "30ff6569861b4352f1e780a8026ff96e", "score": "0.69151366", "text": "def value_to_bool(value: Optional[OptionValue]) -> Optional[bool]:\n retval: Optional[bool]\n\n if value is None:\n retval = None\n else:\n if isinstance(value, bool):\n retval = value\n elif isinstance(value, (int, float)):\n retval = bool(value)\n elif isinstance(value, str):\n if value in TRUE_VALUES:\n retval = True\n elif value in FALSE_VALUES:\n retval = False\n else:\n raise ValueError\n else:\n raise ValueError\n\n return retval", "title": "" }, { "docid": "7de6fbe87543b2a089fff9fe43df98c8", "score": "0.68959", "text": "def BoolVal(val, ctx=None):\n ctx = _get_ctx(ctx)\n if val:\n return BoolRef(Z3_mk_true(ctx.ref()), ctx)\n else:\n return BoolRef(Z3_mk_false(ctx.ref()), ctx)", "title": "" }, { "docid": "ee062ebe39a4b56b955fe272b71e5e2b", "score": "0.68944204", "text": "def to_bool_arg(self, arg):\n if arg == 1:\n return True\n elif arg == -1:\n return False\n else:\n Exception(\n f\"Unknown int value for the trarnsformation to bool: {arg}\")", "title": "" }, { "docid": "8620873d239787c909fd657f33a36606", "score": "0.6894205", "text": "def _booleanConverter(self, input):\n if input in [True, False]:\n return input\n\n if input in [1, '1']:\n return True\n\n if input in [0, '0']:\n return False\n\n try:\n input_lower = input.lower()\n except:\n raise ValueError('Not a boolean value: %s' % (input))\n\n if input_lower in ['yes', 'true']:\n return True\n if input_lower in ['no', 'false']:\n return False\n\n raise ValueError('Not a boolean value: %s' % (input))", "title": "" }, { "docid": "a917be036c9e4971b80a0a83c05a4cd0", "score": "0.68868613", "text": "def _bool(v):\n try:\n if isinstance(v, basestring):\n return v.lower() in ['t', 'true', '1']\n return bool(v)\n except Exception:\n return False", "title": "" }, { "docid": "678b468f331a8cdc82c7cd764d3eac5e", "score": "0.6871099", "text": "def as_bool(self, key):\r\n val = self[key]\r\n if val == True:\r\n return True\r\n elif val == False:\r\n return False\r\n else:\r\n try:\r\n if not isinstance(val, basestring):\r\n # TODO: Why do we raise a KeyError here?\r\n raise KeyError()\r\n else:\r\n return self.main._bools[val.lower()]\r\n except KeyError:\r\n raise ValueError('Value \"%s\" is neither True nor False' % val)", "title": "" }, { "docid": "2bb29530594fcdb50b95db11ead3166f", "score": "0.6866402", "text": "def get_bool(value: str) -> Union[bool, None]:\n lowered = str(value).lower()\n\n if lowered in (\"yes\", \"y\", \"true\", \"t\", \"1\", \"enable\", \"on\"):\n val = True\n elif lowered in (\"no\", \"n\", \"false\", \"f\", \"0\", \"disable\", \"off\"):\n val = False\n else:\n val = None\n\n return val", "title": "" }, { "docid": "18c821be7834ef5744dbb6c01b41fc2b", "score": "0.6851357", "text": "def bool(self, python_bool_var):\n return \"true\" if python_bool_var else \"false\"", "title": "" }, { "docid": "ad5815754f4fdf1824bb472f7daccf41", "score": "0.684874", "text": "def str_to_bool(value):\n if isinstance(value, bool):\n return value\n if value.lower() in ('yes', 'true', 't', 'y', '1'):\n return True\n elif value.lower() in ('no', 'false', 'f', 'n', '0'):\n return False\n else:\n raise argparse.ArgumentTypeError('Boolean value expected.')", "title": "" }, { "docid": "0b5cafaf8828264b59281d1e57772aa0", "score": "0.68455356", "text": "def encode_boolean(self, b):\r\n if bool(b):\r\n return 'true'\r\n return 'false'", "title": "" }, { "docid": "c17a19290ec6accd24b975d73ebc2c0e", "score": "0.68455225", "text": "def to_python(self, value):\r\n # Explicitly check for the string 'False', which is what a hidden field\r\n # will submit for False. Also check for '0', since this is what\r\n # RadioSelect will provide. Because bool(\"True\") == bool('1') == True,\r\n # we don't need to handle that explicitly.\r\n if value in ('False', '0'):\r\n value = False\r\n else:\r\n value = bool(value)\r\n value = super(BooleanField, self).to_python(value)\r\n if not value and self.required:\r\n raise ValidationError(self.error_messages['required'])\r\n return value", "title": "" }, { "docid": "ffbce9bfcfe147945be164c5bb1fa622", "score": "0.68400574", "text": "def to_be_a_bool(self, message: Optional[str] = \"\") -> bool:\n return cast(bool, self.to_be_a(ExpectedType.BOOLEAN, message))", "title": "" }, { "docid": "5f0c34b070eec24419d7041290065b0a", "score": "0.6823493", "text": "def str_to_bool(value):\n if not value:\n return False\n if value.lower() in STR_FALSE:\n return False\n if value.lower() in STR_TRUE:\n return True\n raise ValueError(\"Invalid truth value '%s'\" % value)", "title": "" }, { "docid": "17ca97bc6a0078427e1e55826c4b9d5e", "score": "0.6804988", "text": "def to_bool(var):\n if isinstance(var, str):\n if var == '' or var.lower() == 'false':\n return False\n else:\n return True\n return bool(var)", "title": "" }, { "docid": "35a5b9c43e8c6805dec62e2fbe010672", "score": "0.6801289", "text": "def convert_to_boolean(obj):\n istrue = ('true', 'yes', 'ok', '1', 'on', '+', 'True', 'Yes', 'Ok', 'On', 'TRUE', 'YES', 'OK', 'ON', 1, 1.0)\n isfalse = ('false', 'no', '0', '-', 'off', 'False', 'No', 'Off', 'FALSE', 'NO', 'OFF', 0, 0.0)\n\n if isinstance(obj, (str, int, float)):\n if obj in istrue:\n return True\n elif obj in isfalse:\n return False\n else:\n raise TypeError('could not convert to boolean')\n\n elif hasattr(obj, '__bool__'):\n return bool(obj)\n\n raise TypeError('could not convert to boolean')", "title": "" }, { "docid": "58f18e3557b8c79019f8017bfa25cf8b", "score": "0.68012357", "text": "def str2bool(v):\n if isinstance(v, bool):\n return v\n if v.lower() in ('yes', 'true', 't', 'y', '1'):\n return True\n elif v.lower() in ('no', 'false', 'f', 'n', '0'):\n return False\n else:\n raise NameError('Boolean value expected.')", "title": "" }, { "docid": "5936cac0641299577fb3f7276714bea4", "score": "0.68003225", "text": "def boolean_attribute(value: bool) -> bool:\n return attr.ib(default=value, type=bool, kw_only=True)", "title": "" }, { "docid": "753c94461979fb53b449a8a06d3cedef", "score": "0.6795977", "text": "def test_to_data(self, value, data):\n assert data == Boolean.to_data(value)", "title": "" }, { "docid": "ed23ee9b97a79f52093d9773eb6a3ecb", "score": "0.67912906", "text": "def str2bool(v:Union[str, bool]) -> bool:\n if isinstance(v, bool):\n b = v\n elif v.lower() in ('yes', 'true', 't', 'y', '1'):\n b = True\n elif v.lower() in ('no', 'false', 'f', 'n', '0'):\n b = False\n else:\n raise ValueError('Boolean value expected.')\n return b", "title": "" }, { "docid": "db971e7830655710e22e31b611ef5080", "score": "0.67856514", "text": "def to_boolean_with_default(value: Any, default_value: bool) -> bool:\n result = BooleanConverter.to_nullable_boolean(value)\n return result if not (result is None) else bool(default_value)", "title": "" }, { "docid": "a911ead5ace1e02ccb1b9eb40e9ce23f", "score": "0.6782127", "text": "def boolify(var):\n\n if isinstance(var, str):\n var = strtobool(var)\n if not isinstance(var, bool):\n var = bool(var)\n return var", "title": "" }, { "docid": "e2d502d114c71f9348dda3ad98f23769", "score": "0.6767168", "text": "def str2bool(v):\n if isinstance(v, bool):\n return v\n if v.lower() in ('yes', 'true', 't', 'y', '1'):\n return True\n elif v.lower() in ('no', 'false', 'f', 'n', '0'):\n return False\n else:\n raise ValueError('Boolean value expected.')", "title": "" } ]
5f2376e139bf9216035dd5e8721f8c77
Get a 2d numpy array of obstacles in the form [[x, y], [x, y], ...]
[ { "docid": "74d1869550c2ceeb24977a200a7bdbae", "score": "0.7013155", "text": "def get_obstacles(self):\n return np.argwhere(self.obstacles == True) * self.config.grid_reso", "title": "" } ]
[ { "docid": "3e0ccacd4db9e4bc1fcb1726c1a0fa96", "score": "0.7382825", "text": "def obstacles(self):\n return list(self._obstacles)", "title": "" }, { "docid": "6586c95dc5ce60c31e82d33802373a2e", "score": "0.7301217", "text": "def get_obstacles_positions(obstacles: []) -> List[Position]:\n obstacles_positions: List[Position] = []\n # Iterate over two-dimensional array\n for obstacle in obstacles:\n # Create a position object to append to a list\n position = Position(obstacle[0], obstacle[1])\n obstacles_positions.append(position)\n \n return obstacles_positions", "title": "" }, { "docid": "9cfec27324a94fe3b91b1970dca4f753", "score": "0.66640806", "text": "def get_static_obstacles(self):\n # gap = 0.20/self.difficulty\n #\n # # Vertical bars\n # self.O1 = (0.47, 0, 0.47 + self.wall_width, 0.17 - gap)\n # self.O2 = (0.47, 0.15 + gap, 0.47 + self.wall_width, 0.85 - gap)\n # self.O3 = (0.47, 0.85 + gap, 0.47 + self.wall_width, 1)\n #\n # # Top horizontal bars\n # self.O4 = (0, 0.65, 0.25 - gap, 0.65 + self.wall_width)\n # self.O5 = (0.25 + gap, 0.65, 0.75 - gap, 0.65 + self.wall_width)\n # self.O6 = (0.75 + gap, 0.65, 1, 0.65 + self.wall_width)\n #\n # # Bottom Horizontal bars\n # self.O7 = (0, 0.3, 0.75 - gap, 0.3 + self.wall_width)\n # self.O8 = (0.75 + gap, 0.3, 1, 0.3 + self.wall_width)\n #\n # obstacles = [self.O1, self.O2, self.O3, self.O4, self.O5, self.O6, self.O7, self.O8]\n #\n\n self.O1 = (0, 0.25, 0 + self.wall_width + 0.45, 0.25 + self.wall_width) # (0, 0.25, 0.5, 0.3)\n self.O2 = (0.5, 0.25, 0.5 + self.wall_width, 0.25 + self.wall_width + 0.5) # (0.5, 0.25, 0.55, 0.8)\n obstacles = [self.O1, self.O2]\n\n # obstacles = []\n return obstacles", "title": "" }, { "docid": "02b235591e00832cd6400ac079a719e0", "score": "0.64403796", "text": "def generate_obstacles(self, num_obs):\n obs_list = []\n while len(obs_list) < num_obs:\n obs = [random.randint(1, self.x_axis_max - 1), random.randint(1, self.y_axis_max - 1)]\n if obs in obs_list:\n continue\n elif obs[0] == self.goal[0] and obs[1] == self.goal[1]:\n continue\n elif obs[0] == self.start[0] and obs[1] == self.start[1]:\n continue\n else:\n obs_list.append(obs)\n\n # set grid map boundary as obstacles as well\n for x in range(1, self.x_axis_max):\n obs_list.append([x, 0])\n obs_list.append([x, self.y_axis_max])\n for y in range(1, self.y_axis_max):\n obs_list.append([0, y])\n obs_list.append([self.x_axis_max, y])\n obs_list.append([0, 0])\n obs_list.append([0, self.y_axis_max])\n obs_list.append([self.x_axis_max, 0])\n obs_list.append([self.x_axis_max, self.y_axis_max])\n\n # print(obs_list)\n # print(len(obs_list))\n return obs_list", "title": "" }, { "docid": "efdc794cce2a38b3c931f0fe59dd7a6e", "score": "0.618513", "text": "def get_1D_neighbors(self):\n adjacents = [(-1,-1), (-1,0), (-1,1), (0,1), (1,1), (1,0), (1,-1), (0,-1)]\n \n nb = np.full((self.obstacle_map.size,8), -1, dtype=np.int)\n \n for [x,y] in self.visible_cells:\n index_1D = self.indexFromXY(x,y,self.obstacle_map.shape[1])\n for i, (xpos,ypos) in enumerate(adjacents):\n if 0 <= x+xpos < self.obstacle_map.shape[0] and 0 <= y+ypos < self.obstacle_map.shape[1]:\n if not self.obstacle_map[x+xpos, y+ypos] < 0:\n nb[index_1D, i] = self.indexFromXY(xpos+x, ypos+y, self.obstacle_map.shape[1])\n return nb", "title": "" }, { "docid": "b338602d20022ae37fde0d07b8fba14c", "score": "0.61557966", "text": "def get_neighbors(x, y):\n return[\n (x, y - 1), (x + 1, y), (x - 1, y), (x - 1, y - 1),\n (x, y + 1), (x + 1, y + 1), (x - 1, y + 1), (x + 1, y - 1)\n ]", "title": "" }, { "docid": "a48168b025bfb05d13efde67a68fabbb", "score": "0.61122984", "text": "def createObstacles():\n for ob in obs_list:\n pygame.draw.rect(screen, red, pygame.Rect(ob))", "title": "" }, { "docid": "665be9bda3cf36bfb24363aec0b888fd", "score": "0.59382457", "text": "def GetPositions2D(self):\r\n return np.resize(self.positions,(4,2))", "title": "" }, { "docid": "08860687d915a45eacd0be063a273d76", "score": "0.5890589", "text": "def add_obstacles(self, obstacles):\n for obstacle in obstacles:\n x1, y1, x2, y2 = (np.array(obstacle) / self.grid_res).astype(int)\n self.grid_data[x1:x2, y1:y2] = 1.0", "title": "" }, { "docid": "a8b2155e725bf652598d49cae4c975f8", "score": "0.58401555", "text": "def draw_obstacles(self, obstacles, robot_state):\n # type: (list, RobotState) -> None\n points = []\n for position in obstacles:\n point = _global2local_point(position, robot_state.exact_position, robot_state.exact_rotation)\n points.append(point)\n\n self._draw_sphere_list(0, OBSTACLE_NAMESPACE, OBSTACLE_SCALE, points, color=OBSTACLE_COLOR)", "title": "" }, { "docid": "28be91ac12ce86ef03461bb658876b7f", "score": "0.581147", "text": "def neighbor_coords(self,x,y):\n neighbors = []\n if self.in_bounds(x+1, y+1): neighbors.append([x+1,y+1])\n if self.in_bounds(x+1, y): neighbors.append([x+1,y])\n if self.in_bounds(x-1, y-1): neighbors.append([x-1,y-1])\n if self.in_bounds(x-1, y): neighbors.append([x-1,y])\n if self.in_bounds(x-1, y+1): neighbors.append([x-1,y+1])\n if self.in_bounds(x, y+1): neighbors.append([x,y+1])\n if self.in_bounds(x+1, y-1): neighbors.append([x+1,y-1])\n if self.in_bounds(x, y-1): neighbors.append([x,y-1])\n return neighbors", "title": "" }, { "docid": "36f12c6bae039b83fd995eb5b47e89ff", "score": "0.58062243", "text": "def findNeighbours(x,y,img):\n n_img = img\n x_1, y_1, x1, y1 = x-1, y-1, x+1, y+1;\n return [ n_img[x_1][y], n_img[x_1][y1], n_img[x][y1], n_img[x1][y1], n_img[x1][y], n_img[x1][y_1], n_img[x][y_1], img[x_1][y_1] ]", "title": "" }, { "docid": "8d7b14dedce791e218b0a273888c34c2", "score": "0.5802935", "text": "def _neighbours(self, y, x):\r\n neighbours = []\r\n for dy in range(-1, 2):\r\n for dx in range(-1, 2):\r\n if dy == 0 and dx == 0:\r\n continue\r\n else:\r\n if self._valid_position(y+dy, x+dx):\r\n neighbours.append((y+dy, x+dx))\r\n return neighbours", "title": "" }, { "docid": "722c12cb2d88e712f700a6b9785402c6", "score": "0.579285", "text": "def getNeighborhood(self):\n neighborhood = \"[\"\n for ry in xrange(-self.radius,self.radius+1,1):\n for rx in xrange(-self.radius,self.radius+1,1):\n if ((ry != 0) or (rx != 0)):\n if (self.neighborhood_select):\n if ((abs(rx) + abs(ry)) <= self.radius):\n arg = \"\".join((\"scratchgrid[rr+\",str(ry),\",cc+\",str(rx),\"],\"))\n neighborhood += \"\".join(arg)\n else:\n pass\n elif (not self.neighborhood_select):\n arg = \"\".join((\"scratchgrid[rr+\",str(ry),\",cc+\",str(rx),\"],\"))\n neighborhood += \"\".join(arg) \n neighborhood = neighborhood[:-1]\n neighborhood += \"\".join(\"]\")\n return neighborhood", "title": "" }, { "docid": "6ce0f2960ed091d59184bd0553c62d73", "score": "0.5735862", "text": "def find_obstacles(puzzle) -> List[List[int]]:\n for subset in puzzle:\n global side1, side2, side3\n side1 = []\n side2 = []\n side3 = []\n solution = solve(0, subset)\n if not solution:\n return subset\n return []", "title": "" }, { "docid": "3a5855ca4bde28425315368df460d98d", "score": "0.57353014", "text": "def voisins(pos, obstacles, taille):\r\n res = []\r\n x, y = pos\r\n for i, j in [(0,1),(0,-1),(1,0),(-1,0)]:\r\n if ((x + i,y + j) not in obstacles) and (x + i) >= 0 and (x + i) < taille[0] and (y + j) >= 0 and (y + j) < taille[1]:\r\n res.append((x+i, y + j))\r\n return res", "title": "" }, { "docid": "8acd5edd26f033a0f8bbfcc6513de961", "score": "0.57002795", "text": "def coord2d(x,y):\n return np.array([x,y])", "title": "" }, { "docid": "7e6a568b8e5248964709a075088d1786", "score": "0.5696858", "text": "def obstacles_between(self, ship, target, ignore=()):\n obstacles = []\n excl = [ship, target]\n entities = ([] if issubclass(Planet, ignore) else self.all_planets(exclude=excl)) \\\n + ([] if issubclass(Ship, ignore) else self.all_ships(exclude=excl))\n for foreign_entity in entities:\n if collision.intersect_segment_circle(ship, target, foreign_entity, fudge=ship.radius + 0.1):\n obstacles.append(foreign_entity)\n return obstacles", "title": "" }, { "docid": "1bbbe8597e7f7a2affd14d80f986ec10", "score": "0.56554735", "text": "def return_neighborhoods_coordinates():\r\n return neighborhoods_coordinates", "title": "" }, { "docid": "3a98c3a6c5a23dc3583693ae58219517", "score": "0.56311774", "text": "def create_grid(width, height):\n return [[0 for _x in range(width)] for _y in range(height)]", "title": "" }, { "docid": "9eb3e2e6703bb0657c870969a6124a53", "score": "0.5616334", "text": "def get_neighbours(self, coordinates, img):\n nrows = len(img)\n ncols = len(img[0])\n r = coordinates[0]\n c = coordinates[1]\n to_return = [\n [r - 1, c],\n [r + 1, c],\n [r, c - 1],\n [r, c + 1]\n ]\n return [t for t in to_return if 0 <= t[0] < nrows and 0 <= t[1] < ncols]", "title": "" }, { "docid": "96ff6d6f33926296012fa87e1f8870f4", "score": "0.56038684", "text": "def add_obstacles(self, obs) -> None:\n for o in obs:\n # Convert each bound to xy coordinate\n # Forward converts from latlon to cartesian\n x, y, *_ = self.cartesian.forward(o[\"latitude\"], o[\"longitude\"])\n\n # Add each point to the list and the graph\n obi = Obstacle(Point(x, y, self.alt_bounds[0]), o[\"radius\"], o[\"height\"])\n self.obstacles.append(obi)\n for n in obi.points():\n p = Point(*n)\n if self.boundary_poly.contains(p):\n self.graph.add_node(n)", "title": "" }, { "docid": "7fcf712b9bbeef93e45556a43338e66f", "score": "0.5597686", "text": "def visible_vertices(p, visible_obstacles):\n V = []\n\n for obstacle in visible_obstacles:\n for vertex in [v[0] for v in obstacle.lines]:\n if line_is_unblocked(p, vertex, visible_obstacles):\n V.append(vertex)\n\n return V", "title": "" }, { "docid": "950907efec3f54bb248e0cd42ad73f30", "score": "0.55852616", "text": "def array_coords(shape):\n\ty = shape[0]\n\tx = shape[1]\n\tout = scipy.empty((2,y,x))\n\tt = scipy.arange(y,dtype='f8')\n\tout[0] = scipy.tile(t,(x,1)).T\n\tt = scipy.arange(x,dtype='f8')\n\tout[1] = scipy.tile(t,(y,1))\n\treturn out", "title": "" }, { "docid": "bc6a82d54045e6261a88309960cef190", "score": "0.5583301", "text": "def living_cells(self):\n cells = [(i,j,self.grid[i][j]) for i in range(self.size) for j in range(self.size) if self.grid[i][j] != 0]\n return zip(*cells)", "title": "" }, { "docid": "eebb92d0e81d3bd242ae8a0fa66cb64d", "score": "0.55758065", "text": "def get_neighbours(self,x_coord, y_coord):\n\t\treturn [(x_coord - 1, y_coord - 1), (x_coord, y_coord - 1), (x_coord + 1, y_coord - 1), \\\n\t\t\t(x_coord - 1, y_coord), (x_coord + 1, y_coord), \\\n\t\t\t(x_coord - 1, y_coord + 1), (x_coord, y_coord + 1), (x_coord + 1, y_coord + 1)]", "title": "" }, { "docid": "7b1fb962f32356f524ad2d0879e3619b", "score": "0.5570699", "text": "def get_new_position(obstacles, x_bounds, y_bounds):\n while True:\n x = randint(0, x_bounds)\n y = randint(0, y_bounds)\n\n if not geom.point_in_any_obstacle(geom.Point(x, y), obstacles):\n return geom.Point(x, y)", "title": "" }, { "docid": "21e6a90c346db6d339f62ea5fb6f6838", "score": "0.55528706", "text": "def _determine_cell_neighbours(self, pos):\n x = pos[0]\n y = pos[1]\n\n return [\n (x + 1, y), (x + 1, y + 1), (x, y + 1), (x - 1, y + 1), (x - 1, y),\n (x - 1, y - 1), (x, y - 1), (x + 1, y - 1)\n ]", "title": "" }, { "docid": "a966d58caeb8a7aaeac8f264ce79f89b", "score": "0.55523187", "text": "def get_available_tiles(self):\r\n self._coordinates = []\r\n available_tiles = []\r\n row = -1\r\n \r\n for segment in self._grid:\r\n temp_coordinates = []\r\n col = -1\r\n row += 1\r\n \r\n for tile in segment:\r\n col += 1\r\n temp_coordinates.append([row,col])\r\n \r\n if tile == 0:\r\n available_tiles.append([row,col])\r\n self._coordinates.append(temp_coordinates)\r\n \r\n return available_tiles", "title": "" }, { "docid": "39d473ea60e1e7374f6108e1507f799c", "score": "0.55498344", "text": "def neighbors(board, i, j):\n return [board[j - 1][i], board[j + 1][i], board[j][i + 1], board[j][i - 1]]", "title": "" }, { "docid": "99391287badbe6eea36e36d0fa8724c3", "score": "0.5535299", "text": "def get_dead_board(self):\n return [[0 for x in range(self.width)]\n for x in range(self.height)]", "title": "" }, { "docid": "b3c2ddf5f7225ebfe07107ac53f9dd7b", "score": "0.5525028", "text": "def find_path(self, obstacles, start, end):\n # type: (list, (float, float), (float, float)) -> list\n return self._a_star(obstacles, position2grid(start), position2grid(end))", "title": "" }, { "docid": "9be7b4b935e618b98c64dd49dc065a68", "score": "0.5506782", "text": "def draw_map(self):\n for i in range(self.N + 1):\n self.cells.append([])\n for j in range(1, self.N):\n rect(self.screen, (0, 0, 0), (i * self.cell_size, j * self.cell_size, self.cell_size, self.cell_size),\n 2)\n self.cells[i].append([i * self.cell_size, j * self.cell_size, 0])\n self.draw_walls()\n return self.cells", "title": "" }, { "docid": "176ef192ec4ebd700a29c8026d7d83b6", "score": "0.54961437", "text": "def new_game():\n return np.array([[None] * 3] * 3)", "title": "" }, { "docid": "de42eaec6c23445a3c1603bfe3b63ed5", "score": "0.54925996", "text": "def detect_obstacles_while_moving(self):\n if self.state == self.STATE_EXPLORATION_RIGHT:\n if any(np.array(self.front_list < self.SCANNER)):\n\n # Get ids in list where obstacle near\n idx_list = np.where(np.array(self.front_list) < self.SCANNER)\n # convert ids into y coordinate\n y_obstacles = self.y_before_step - idx_list[0]*9/100*self.STEP\n scan = np.array(self.front_list)\n # calculate x coordinate of obstacles basted on sensor value\n x_obstacles = self.x + scan[idx_list[0]]\n\n return y_obstacles, x_obstacles\n\n elif self.state == self.STATE_EXPLORATION_LEFT:\n if any(np.array(self.front_list < self.SCANNER)):\n\n idx_list = np.where(np.array(self.front_list) < self.SCANNER)\n y_obstacles = self.y_before_step + idx_list[0]*9/100*self.STEP\n scan = np.array(self.front_list)\n x_obstacles = self.x + scan[idx_list[0]]\n\n return y_obstacles, x_obstacles\n\n elif self.state == self.STATE_FORWARD:\n if any(np.array(self.left_list < self.SCANNER)):\n\n idx_list = np.where(np.array(self.front_list) < self.SCANNER)\n x_obstacles = self.x_before_step + idx_list[0] * 9 / 100 * self.STEP # step when going forward\n scan = np.array(self.front_list)\n y_obstacles = self.x + scan[idx_list[0]]\n\n return y_obstacles, x_obstacles\n\n if any(np.array(self.right_list < self.SCANNER)):\n\n idx_list = np.where(np.array(self.front_list) < self.SCANNER)\n x_obstacles = self.x_before_step + idx_list[0] * 9 / 100 * self.STEP # step when going forward\n scan = np.array(self.front_list)\n y_obstacles = self.x - scan[idx_list[0]]\n\n return y_obstacles, x_obstacles\n else:\n pass", "title": "" }, { "docid": "89848869c8b927f7d71b7b67f98859c1", "score": "0.548818", "text": "def obstacles_between(ship, target,game_map, ignore_ships = False, ignore_planets= False):\n obstacles = []\n entities = []\n if not ignore_planets :\n entities.extend(game_map.all_planets())\n if not ignore_ships :\n entities.extend(game_map.all_ships())\n for foreign_entity in entities:\n if foreign_entity == ship or foreign_entity == target:\n continue\n if intersect_segment_circle(ship, target, foreign_entity, fudge=ship.radius + 0.1):\n return True\n return False", "title": "" }, { "docid": "715d765c4754e1b6264f6f829777e1e0", "score": "0.5478086", "text": "def uniquePathsWithObstacles(self, obstacleGrid):\n m = len(obstacleGrid)\n n = len(obstacleGrid[0])\n for i in range(n):\n if obstacleGrid[0][i] == 1:\n for j in range(i, n):\n obstacleGrid[0][j] = 0\n break\n else:\n obstacleGrid[0][i] = 1\n\n for i in range(1, m):\n for j in range(n):\n if obstacleGrid[i][j] == 1:\n obstacleGrid[i][j] = 0\n else:\n if j == 0:\n obstacleGrid[i][j] = obstacleGrid[i-1][j]\n else:\n obstacleGrid[i][j] = obstacleGrid[i-1][j] + obstacleGrid[i][j-1]\n return obstacleGrid[m-1][n-1]", "title": "" }, { "docid": "2b216f471f2998ae4ec097e921ba15be", "score": "0.5475305", "text": "def matrix_neighbors(row, col, width, height):\n return [(row + i, col + j) for i in (-1, 0, 1) for j in (-1, 0, 1) if 0 <= row + i < width and 0 <= col + j < height]", "title": "" }, { "docid": "00e590855875db5cb6f9c291004724d1", "score": "0.54677457", "text": "def is_hit_obstacle(self, x, y):\n x = (x / self.config.grid_reso).astype(int)\n y = (y / self.config.grid_reso).astype(int)\n # assuming [0, 0] is always an obstacle because of the boundary\n return self.obstacles[x * ~self.is_out_of_boundary(x, y), y * ~self.is_out_of_boundary(x, y)]", "title": "" }, { "docid": "df392214fec4134c6fe7720b4fff92e4", "score": "0.545878", "text": "def get_locations(percepts, obstacles):\n possible_locations = []\n possible_starting_points = []\n unique_points = []\n\n # TODO We should add a check to make sure we don't consider\n # any points inside an obstacle\n for obstacle in obstacles:\n for line in obstacle.lines:\n possible_starting_points.append(geom.Point((line[0].x - percepts[0].x),\n (line[0].y - percepts[0].y)))\n unique_points.append(line[0])\n\n for v in possible_starting_points:\n # Create a new point from v (possible agent start position) and percepts.\n # If all new points are actual vertice in on obstacle, v is valid\n valid_point = True\n\n for percept in percepts:\n vertex = geom.Point((v.x + percept.x), (v.y + percept.y))\n\n if not any([vertex == p for p in unique_points]):\n valid_point = False\n break\n\n if valid_point:\n possible_locations.append(v)\n\n return possible_locations", "title": "" }, { "docid": "6796c680ff8ce7cb0ec393e40c6b9f02", "score": "0.5443998", "text": "def game_board():\r\n board = np.zeros((rows, columns))\r\n return board", "title": "" }, { "docid": "c6bd69a1de3d69b37e1914b0fa0b070f", "score": "0.5442009", "text": "def nested_coordinates_list(self):\n self._nested_list = []\n map_size = self.rossumoya.map_size\n x, y = map_size\n for x_index in range(x):\n self._nested_list.append([])\n for _ in range(y):\n self._nested_list[x_index].append(None)\n return self._nested_list", "title": "" }, { "docid": "d23198ec76574ba6bee9d5055f462599", "score": "0.5438812", "text": "def new_board():\n return [[EMPTY for _ in range(NROWS)] for _ in range(NCOLS)]", "title": "" }, { "docid": "e36f51ee15182f0618f30d545e4dada7", "score": "0.54108936", "text": "def neighbor_cells(arr, coords):\n ndim = arr.ndim\n shape = arr.shape\n return [arr[coords[:d] + (coords[d] + 1,) + coords[d + 1:]]\n for d in range(ndim) if coords[d] + 1 < shape[d]] + \\\n [arr[coords[:d] + (coords[d] - 1,) + coords[d + 1:]]\n for d in range(ndim) if coords[d] > 0]", "title": "" }, { "docid": "f9b9846c09ba117cab379e74958eb571", "score": "0.54068756", "text": "def get_2D_grid(start_point, end_point, spacing):\n \n x, y = numpy.mgrid[start_point.x:end_point.x:spacing,\n start_point.y:end_point.y:spacing]\n\n x = x.ravel(); y = y.ravel()\n\n return x, y", "title": "" }, { "docid": "818d68e149ce40fccc04bebc7596afc1", "score": "0.54047805", "text": "def visited_board_16x16() -> List[List[int]]:\r\n return [\r\n [-1, -1, -1, -1],\r\n [-1, -1, -1, -1],\r\n [-1, -1, -1, -1],\r\n [-1, -1, -1, -1]\r\n ]", "title": "" }, { "docid": "2a36047ab1aa9e6215234dd8ba20f20e", "score": "0.5374832", "text": "def actions(state):\n return visible_vertices(state, env.visible_obstacles)", "title": "" }, { "docid": "9322f5312cc3ec3fedbc43255e6234fa", "score": "0.537227", "text": "def coords():\n return np.array(\n [[39, 6, -32], [29, 40, 1], [-20, -74, 35], [-29, -59, -37]]\n )", "title": "" }, { "docid": "38a994bc7e87de1533f5ae67549f7432", "score": "0.5362488", "text": "def get_traffic(self, format=0):\n \n trafficMap = np.zeros(self.obstacle_map.size,dtype=np.int)\n\n # Ground cells\n vCells = np.argwhere(self.obstacle_map.flatten()==0).flatten()\n \n # Update traffic map\n for k in vCells:\n self.get_cell_traffic(k, trafficMap)\n \n if format == 0:\n return trafficMap\n elif format == 1:\n return np.reshape(trafficMap, self.obstacle_map.shape)", "title": "" }, { "docid": "2d9c61b5f44275466c9ad168f53d417f", "score": "0.5361387", "text": "def get_neighbours(coords):\n\n dxdy = [(-1, -2), (0, -2), (1, -2), (-2, -1), (-1, -1), (0, -1), (1, -1), (2, -1),\n (-2, 0), (-1, 0), (1, 0), (2, 0), (-2, 1), (-1, 1), (0, 1), (1, 1), (2, 1),\n (-1, 2), (0, 2), (1, 2), (0, 0)]\n\n neighbours = []\n for dx, dy in dxdy:\n neighbour_coords = coords[0] + dx, coords[1] + dy\n if not (0 <= neighbour_coords[0] < nx and\n 0 <= neighbour_coords[1] < ny):\n # We're off the grid: no neighbours here.\n continue\n neighbour_cell = cells[neighbour_coords]\n if neighbour_cell is not None:\n # This cell is occupied: store this index of the contained point.\n neighbours.append(neighbour_cell)\n return neighbours", "title": "" }, { "docid": "8e361cd5d0ef02f166bc75ee3e1d8650", "score": "0.53555095", "text": "def get_neighbours(self, src):\r\n directions = [(0, -1), (1, 0), (0, 1), (-1, 0)]\r\n neighbours = []\r\n for dx,dy in directions:\r\n x = src[0] + dx\r\n y = src[1] + dy\r\n if x in range(self.size) and y in range(self.size):\r\n neighbours.append((x, y))\r\n return sorted(neighbours)", "title": "" }, { "docid": "c76b37f610a3509150e9518a964ef7fa", "score": "0.5353335", "text": "def create_vertical_roadmap(self, from_: Position, to: Position) -> list[Cell]:\n\n y_shift = to.y - from_.y\n assert (y_shift > 0 or y_shift < 0), \"unchecked path\"\n y_step = 1 if y_shift > 0 else -1\n x = from_.x\n roadmap = [self._board[y][x]\n for y in range(from_.y, to.y + y_step, y_step)]\n return roadmap[1:]", "title": "" }, { "docid": "5d9a4eefee39433d9de8c4c66c649f52", "score": "0.5353084", "text": "def get_tile_neighbours(self, x, y):\n res = list()\n for key in cons.DIRECTIONS:\n tup = cons.DIRECTIONS.get(key)\n res.append(self.get_tile_neighbour(x, y, tup))\n return res", "title": "" }, { "docid": "bd3ec523a398d964588e22580ac6bf84", "score": "0.53426754", "text": "def neighbours(self, currTile):\n X = currTile[0]\n Y = currTile[1]\n l = [(x,y) for x in range(X-1, X+2) for y in range(Y-1, Y+2) if 0 <= x < self.config.N if 0 <= y < self.config.N if (x != X or y != Y)]\n return l", "title": "" }, { "docid": "9c1efdc4c72eb7fd2e76ca851d377d97", "score": "0.5339586", "text": "def get_neighbours(self, square: int) -> [int]:\n x, y = square % self.cols, square // self.rows\n neighbours = [(p[0] + x, p[1] + y) for p in [(0, -1), (-1, 0), (0, 1), (1, 0)]] \n return [n[0] + n[1] * self.cols for n in neighbours if 0 <= n[0] < self.cols and 0 <= n[1] < self.rows]", "title": "" }, { "docid": "ad4d5a806800463852f6b8986ce47f30", "score": "0.5322932", "text": "def create_grid_2_5d(data, safe_distance):\n # minimum and maximum north coordinates\n north_min = np.floor(np.min(data[:, 0] - data[:, 3]))\n north_max = np.ceil(np.max(data[:, 0] + data[:, 3]))\n\n # minimum and maximum east coordinates\n east_min = np.floor(np.min(data[:, 1] - data[:, 4]))\n east_max = np.ceil(np.max(data[:, 1] + data[:, 4]))\n\n # given the minimum and maximum coordinates we can\n # calculate the size of the grid.\n north_size = int(np.ceil((north_max - north_min + 1)))\n east_size = int(np.ceil((east_max - east_min + 1)))\n\n # Initialize an empty grid\n grid = np.zeros((north_size, east_size))\n\n # Populate the grid with obstacles\n for i in range(data.shape[0]):\n north, east, alt, d_north, d_east, d_alt = data[i, :]\n obstacle = [\n int(np.clip(north - d_north - safe_distance - north_min, 0, north_size - 1)),\n int(np.clip(north + d_north + safe_distance - north_min, 0, north_size - 1)),\n int(np.clip(east - d_east - safe_distance - east_min, 0, east_size - 1)),\n int(np.clip(east + d_east + safe_distance - east_min, 0, east_size - 1)),\n ]\n obs = grid[obstacle[0]:obstacle[1] + 1, obstacle[2]:obstacle[3] + 1]\n np.maximum(obs, np.ceil(alt + d_alt + safe_distance), obs)\n\n return grid, int(north_min), int(east_min)", "title": "" }, { "docid": "ad2632666a072c8d24eda89c5292e835", "score": "0.5317036", "text": "def create_knight_roadmap(self, from_: Position, to: Position) -> list[Cell]:\n\n return [self._board[to.y][to.x]]", "title": "" }, { "docid": "daecb0b845e234930686b2e49d98639a", "score": "0.5313709", "text": "def __init__(self):\n\n ##2D array of Tile objects\n self.board = []", "title": "" }, { "docid": "c935410154d1f2d0f466f0d6f730fcc9", "score": "0.53093815", "text": "def make_locs(n, m) -> NDArray:\n return np.array([[i, j] for i in range(n) for j in range(m)])", "title": "" }, { "docid": "f84cecad7f66235e3f432dfc9567d760", "score": "0.5304875", "text": "def _emptyMaze(self, width: int, height: int) -> list:\n return [[Cell() for x in range(width)] for y in range(height)]", "title": "" }, { "docid": "1c47c86de7666745948015c58bc5dc49", "score": "0.5303785", "text": "def getNeighbours(image,i,j):\n nbg = []\n for k in np.arange(i-1, i+2):\n for l in np.arange(j-1, j+2):\n try:\n nbg.append(image[k,l])\n except Exception as e:\n pass\n return np.array(nbg)", "title": "" }, { "docid": "e33c6c29202fc171158fb68e028ea698", "score": "0.5295464", "text": "def extracted_array_2d_from(\r\n array_2d: np.ndarray, y0: int, y1: int, x0: int, x1: int\r\n) -> np.ndarray:\r\n\r\n new_shape = (y1 - y0, x1 - x0)\r\n\r\n resized_array = np.zeros(shape=new_shape)\r\n\r\n for y_resized, y in enumerate(range(y0, y1)):\r\n for x_resized, x in enumerate(range(x0, x1)):\r\n if (\r\n y >= 0\r\n and x >= 0\r\n and y <= array_2d.shape[0] - 1\r\n and x <= array_2d.shape[1] - 1\r\n ):\r\n resized_array[y_resized, x_resized] = array_2d[y, x]\r\n\r\n return resized_array", "title": "" }, { "docid": "34e282ff6a2c1986891b60289effe7e1", "score": "0.5289425", "text": "def neighbors(self, cell):\n i, j = self._world_to_map(cell[0], cell[1])\n neigh = []\n if self._is_open(i-1,j):\n neigh.append((self._map_to_world(i-1,j)))\n if self._is_open(i+1,j):\n neigh.append((self._map_to_world(i+1,j)))\n if self._is_open(i,j-1):\n neigh.append((self._map_to_world(i,j-1)))\n if self._is_open(i,j+1):\n neigh.append((self._map_to_world(i,j+1)))\n # rounding done in map_to_world too but this seems to help\n neigh = [(round(x,2), round(y,2)) for x,y in neigh]\n return neigh", "title": "" }, { "docid": "31772bf4307dd3eb29112032613d6497", "score": "0.5288454", "text": "def neighbours(self, board, pos):\n res = []\n x,y = pos[0], pos[1]\n I = x-1\n J = y-1\n\n for i in range(I, I+3):\n for j in range(J, J+3):\n if i < 0 or j < 0 or i > 4 or j > 4 or (i,j) == pos:\n continue\n else:\n if self.canCapture(board, (x, y), (i,j)) :\n res.append((i, j))\n return res", "title": "" }, { "docid": "fa1d04456022e8275a042b12c1d5e236", "score": "0.5269025", "text": "def cell_list(self):\n final_lst = [] # this will be the returned list\n for row in range(0, len(self.board)):\n for col in range(0, len(self.board)):\n final_lst.append((row, col))\n final_lst.append(self.target_location()) # adds the target location\n # In this board, returns a list containing the cells in the square\n # from (0,0) to (6,6) and the target cell (3,7)\n return final_lst", "title": "" }, { "docid": "41bf0badd86c90949f8db0909f893c21", "score": "0.52671003", "text": "def neighborhood(arr: np.ndarray, pos: tuple, n: int):\n sx, sy = max(0, pos[0] - n), max(0, pos[1] - n)\n ex, ey = min(pos[0] + n + 1, arr.shape[0]), min(pos[1] + n + 1, arr.shape[1])\n return arr[sx: ex, sy: ey]", "title": "" }, { "docid": "5c86b099b34e3cb435e1a5ed81cdcfc3", "score": "0.52669704", "text": "def vector_2d():\n return [0.0, 1.0]", "title": "" }, { "docid": "4f17838913ff5b81f4fad731e6248563", "score": "0.5266566", "text": "def get_neighbouring_coordinates(position):\n row = position[0]\n col = position[1]\n\n neighbours = [\n (row - 1, col),\n (row + 1, col),\n (row, col - 1),\n (row, col + 1)\n ]\n\n return [n for n in neighbours if 0 <= n[0] < len(current_image) and 0 <= n[1] < len(current_image[1])]", "title": "" }, { "docid": "6fcb2315866d9203149f9f134993ed6e", "score": "0.5264142", "text": "def _get_obs(self) -> np.array:\n return self.board", "title": "" }, { "docid": "1e798af90fde4507b5bb756d2d67a5f0", "score": "0.5260177", "text": "def getAvailableTiles(self):\n emptyPos = []\n for x in range(self.size):\n for y in range(self.size):\n if self.tile[x][y] == 0:\n emptyPos.append([x, y])\n return emptyPos", "title": "" }, { "docid": "5b2122bd7efda1395a803bb6b9c9df07", "score": "0.52600324", "text": "def getNeighbors(self):\n return [self.north, self.south, self.east, self.west]", "title": "" }, { "docid": "3a278765f36da44fe426d3790dc0f556", "score": "0.52579004", "text": "def cell_list(self):\n # In this __board, returns a list containing the cells in the square\n # from (0,0) to (6,6) and the __target cell (3,7)\n res = [(i, j) for i in range(len(self.__grid))\n for j in range(len(self.__grid))]\n res.append(self.target_location())\n return res", "title": "" }, { "docid": "31d4cb2e911fd013c438d9d9106e8faf", "score": "0.5257545", "text": "def __init__(self, contamination):\n self.rows = len(contamination) + 2\n self.columns = len(contamination[0]) + 2\n self.robots = []\n for i in range(self.rows):\n temp = []\n for j in range(self.columns):\n temp.append(0)\n self.robots.append(temp)\n # everything below this line surrounds the 2-D array with a border of 0's for the bases\n tempList = contamination\n tempList.insert(0, [0] * (self.columns - 2))\n tempList.append([0] * (self.columns - 2))\n for i in range(len(tempList)):\n l = tempList[i]\n l.insert(0, 0)\n l.append(0)\n tempList[i] = l \n self.contamination = tempList", "title": "" }, { "docid": "b2d2955b07ebd0434edc32b5e3d324cd", "score": "0.5249533", "text": "def get_neighbours(self, coordinate, nrows, ncols):\n row = coordinate[0]\n col = coordinate[1]\n\n neighbours = [\n (row - 1, col),\n (row + 1, col),\n (row, col - 1),\n (row, col + 1)\n ]\n\n return [n for n in neighbours if 0 <= n[0] < nrows and 0 <= n[1] < ncols]", "title": "" }, { "docid": "f636ff6ad12395e5c66a551e71881178", "score": "0.52467364", "text": "def make_obstacles_scan(scan_list):\n\t#Convert the scan to a numoy arra of points\n\tpt_ang = np.arange(0,2*np.pi,np.pi/180)\n\tpt_scan = np.array(scan_list)\n\tpts = [] #To be returned cartesian form of scan\n\tpt_x = np.multiply(pt_scan,np.cos(pt_ang))\n\tpt_y = np.multiply(pt_scan,np.sin(pt_ang))\n\n\tfor a,b in zip(pt_x,pt_y):\n\t\tpts.append((a,b))\n\n\tpt_scan = np.array(scan_list)\n\t#Shifting the scan values\n\tpt_scan_prev = np.append(pt_scan[1:],pt_scan[0])\n\t# Taking the absolute difference and comparing with Threshold\n\tline_obst = abs(pt_scan_prev - pt_scan)>2*THRESHOLD\n\tind=np.argwhere(line_obst==True)\n\tind = np.append(0 , ind)\n\tind = np.append(ind, len(scan_list))\n\n\tline_obstacles = []\n\tpt_scan_enum = list(enumerate(scan_list))\n\tfor i in range(len(ind)-1):\n\t\tline = [(pt[0] , pt[1]) for pt in pts[ind[i]+1:ind[i+1]+1]]\n\t\tline_obstacles.append(line)\n\n\treturn (line_obstacles , pts)", "title": "" }, { "docid": "948878e944fe674097ffbb433226a4a1", "score": "0.5244176", "text": "def find_neighbourhood(row, col, layers):\n neighbourhood = []\n for i in range(6):\n if check_boundary(row - 1, col, layers) and i == 0:\n neighbourhood.append((row - 1, col))\n elif check_boundary(row + 1, col, layers) and i == 1:\n neighbourhood.append((row + 1, col))\n elif check_boundary(row, col - 1, layers) and i == 2:\n neighbourhood.append((row, col - 1))\n elif check_boundary(row, col + 1, layers) and i == 3:\n neighbourhood.append((row, col + 1))\n elif check_boundary(row - 1, col + 1, layers) and i == 4:\n neighbourhood.append((row - 1, col + 1))\n elif check_boundary(row + 1, col - 1, layers) and i == 5:\n neighbourhood.append((row + 1, col - 1))\n\n return neighbourhood", "title": "" }, { "docid": "5d448636e69d1bd5815974889382f7ae", "score": "0.5243242", "text": "def return_bounding_box_2d(x, y, xsize, ysize):\n if xsize <= 0 or ysize <= 0:\n print(\"ERROR: can't compute bounding box, xsize or height has no positive value\")\n return []\n return [x-xsize/2, y-ysize/2, x+xsize/2, y+ysize/2]", "title": "" }, { "docid": "047c1262949f27f69e90db231cbfeb0f", "score": "0.52294093", "text": "def get_random_board(self):\n return [[random.randint(0,1) for x in range(self.width)]\n for x in range(self.height)]", "title": "" }, { "docid": "6291f073381e88c37e33c35ead9aaa53", "score": "0.5227485", "text": "def get_grid_observation(self):\n cursor_bit = np.zeros(self.board.grid_size + (1,))\n cursor_bit[tuple(self.cursor)] = 1\n\n n_grid = np.prod(self.board.grid_size)\n population_normalized = np.full(cursor_bit.shape, self.population / n_grid)\n room_normalized = np.full(cursor_bit.shape, self.room / n_grid)\n\n obs = np.concatenate([\n self.board.to_one_hot(self.player_id),\n cursor_bit,\n population_normalized,\n room_normalized\n ], axis=-1)\n return obs", "title": "" }, { "docid": "84302bb268aeb0b1fad6ee1a26958faa", "score": "0.521875", "text": "def detectorResponseMatrix():\n vec = []\n for i in range(GRID_RESOLUTION):\n vec2 = []\n for j in range(GRID_RESOLUTION):\n vec2.append(detectorResponseToCell(i, j))\n vec.append(vec2)\n return vec", "title": "" }, { "docid": "71c8c609dc184d69ba3adf1adb6d50f9", "score": "0.5208937", "text": "def getNeighbors(row,col,i,j):\n neigb = []\n\n for m in [i-1,i,i+1]:\n for n in [j-1,j,j+1]:\n if (not (m == i and j == n)) and row > m >= 0 and col > n >= 0: #within bound\n neigb.append((m,n))\n\n return neigb", "title": "" }, { "docid": "0df036c31880816a2463961d5fe62bf5", "score": "0.51994836", "text": "def createObstacles():\r\n # Select a random number, 0 - 4\r\n r = random.randrange(4)\r\n\r\n # If the number is 0, create a spike obstacle\r\n if r == 0:\r\n obstacles.append(Spike(1200, 415, .25, \"assets/images/obstacles/Spikes.png\", window))\r\n\r\n # If the number is 1, create a creepy crawly obstacle\r\n elif r == 1:\r\n obstacles.append(CreepyCrawly(1200, 415, .25,\"assets/images/obstacles/CreepyCrawly.png\", window))\r\n\r\n # If the number is 2, create a rocket obstacle\r\n elif r == 2:\r\n obstacles.append(Rocket(1200, 340, .25, \"assets/images/obstacles/Rocket.png\", window))\r\n\r\n # If the number is 3, create a space invader obstacle\r\n elif r == 3:\r\n obstacles.append(SpaceInvader(1200, 340, .3, \"assets/images/obstacles/SpaceInvader.png\", window))\r\n\r\n # If the number is, create a monkey obstacle\r\n else:\r\n obstacles.append(Monkey(1200, 310, .5, \"assets/images/obstacles/Monkey.png\", window))", "title": "" }, { "docid": "f9b58c38ae81397e941384fc7abd8210", "score": "0.5197234", "text": "def get_boundary_coords():\n coords = []\n for x in range(NUM_PIX_X):\n for y in range(NUM_PIX_Y):\n if get_chip_coords(x,y)[0] == -1:\n coords.append((x,y))\n \n return coords", "title": "" }, { "docid": "3299d834a1418a142ebdabb82650d850", "score": "0.519159", "text": "def findNeighbours(self, x, y, board=None):\n board, _ = self.setBoardAndPlayer(board)\n\n neighbours = []\n # min/max take care of the edge cases\n for i in range(max(0, x-1), min(x+2, self.spanX)):\n for j in range(max(0, y-1), min(y+2, self.spanY)):\n if board[i][j] != BLANK:\n neighbours.append((i, j))\n return neighbours", "title": "" }, { "docid": "4e5a9d3eb5d66cef6ca767308b8b31d2", "score": "0.5182894", "text": "def getBoardArray(self):\r\n a = zeros(self.outdim)\r\n for i, p in enumerate(self._iterPos()):\r\n if self.b[p] == self.WHITE:\r\n a[2 * i] = 1\r\n elif self.b[p] == self.BLACK:\r\n a[2 * i + 1] = 1\r\n return a", "title": "" }, { "docid": "4e5a9d3eb5d66cef6ca767308b8b31d2", "score": "0.5182894", "text": "def getBoardArray(self):\r\n a = zeros(self.outdim)\r\n for i, p in enumerate(self._iterPos()):\r\n if self.b[p] == self.WHITE:\r\n a[2 * i] = 1\r\n elif self.b[p] == self.BLACK:\r\n a[2 * i + 1] = 1\r\n return a", "title": "" }, { "docid": "80e8c7eb525fadb65254ebc6c96c60c9", "score": "0.5179019", "text": "def to_arrays(self):\n players = range(self.num_players)\n maps = [self.halite_map]\n for player_id in players:\n ship_dict = self.ships[player_id]\n ship_indicators = np.zeros_like(self.halite_map)\n ship_energy = np.zeros_like(self.halite_map)\n for (x, y), energy in ship_dict.items():\n ship_energy[x, y] = energy\n ship_indicators[x, y] = 1\n maps.extend([ship_energy, ship_indicators])\n for player_id in players:\n factory_loc = self.factory_locs[player_id]\n factory_loc_map = np.zeros_like(self.halite_map)\n factory_loc_map[factory_loc[0], factory_loc[1]] = 1\n maps.append(factory_loc_map)\n for player_id in players:\n dropoff_locs = self.dropoff_locs[player_id]\n dropoff_loc_map = np.zeros_like(self.halite_map)\n for dropoff_loc in dropoff_locs:\n dropoff_loc_map[dropoff_loc[0], dropoff_loc[1]] = 1\n maps.append(dropoff_loc_map)\n map_arr = np.array(maps)\n\n other_state = [self.turns_remaining]\n for player_id in players:\n other_state.append(self.scores[player_id])\n other_arr = np.array(other_state, dtype=np.float64)\n map_arr = np.moveaxis(map_arr, 0, -1)\n assert map_arr.shape == (\n self.map_width,\n self.map_width,\n 9 if self.num_players == 2 else 17,\n )\n return map_arr, other_arr", "title": "" }, { "docid": "d9ccab57a18b890f6947ed0794694f93", "score": "0.517024", "text": "def neigh(Node, step):\n\tx, y = Node[0], Node[1]\n\treturn [[x, y+step], [x, y-step], [x+step, y], [x-step, y]]", "title": "" }, { "docid": "67c0d9a0c0ae6642e6f982423d2b8518", "score": "0.5164656", "text": "def as_list(self, key=True):\n grid_list = []\n\n for x in range(self.width):\n for y in range(self.height):\n if self[x][y] == key:\n grid_list.append((x, y))\n\n return grid_list", "title": "" }, { "docid": "f4b993679c5f5fa463f39a6cea142bd8", "score": "0.51613635", "text": "def _grid(self, coords):\n rows = max(r for (r, _, _) in coords) + 1 if coords != [] else 0\n unpadded_grid = [[p for (r, _, p) in coords if r == row] for row in\n range(rows)]\n return unpadded_grid", "title": "" }, { "docid": "92f43c3d510b9cb2ee21c92cddc1550e", "score": "0.5159394", "text": "def loadObstacles(self,obstaclefile):\n\n # Initiate data structures.\n data = open(obstaclefile,\"r\")\n obs_sizes = [];\n obs_positions = [];\n obs_masses = [];\n reading = \"None\"\n\n # Parse the obstacle text file.\n for line in data:\n linesplit = line.split()\n if linesplit != []:\n if linesplit[0] != \"#\":\n obs_sizes.append([float(linesplit[0]),float(linesplit[1]),float(linesplit[2])])\n obs_positions.append([float(linesplit[3]),float(linesplit[4]),float(linesplit[5])])\n obs_masses.append(float(linesplit[6])) \n\n # Go through all the obstacles in the list and spawn them.\n for i in range(len(obs_sizes)):\n\n obs_size = obs_sizes[i]\n obs_pos = obs_positions[i]\n obs_mass = obs_masses[i]\n \n # Create the obstacle.\n body = ode.Body(self.world)\n geom = ode.GeomBox(space=self.space, lengths=obs_size )\n geom.setBody(body)\n geom.setPosition(obs_pos)\n M = ode.Mass()\n M.setBox(obs_mass,obs_size[0],obs_size[1],obs_size[2])\n body.setMass(M)\n\n # Append all these new pointers to the simulator class.\n self._geoms.append(geom)", "title": "" }, { "docid": "63d42b555366d584f5d111f988865943", "score": "0.5158857", "text": "def get_neighbors(self, x, y):\n neighbors = []\n for y_ in [y - 1, y, y + 1]:\n if y_ < 0 or y_ > self.height - 1: # If Y coordinate is invalid\n continue\n for x_ in [x - 1, x, x + 1]:\n if x_ == x and y_ == y: # If center\n continue\n if x_ < 0 or x_ > self.width - 1: # If X coordinate is invalid\n continue\n neighbors.append((x_, y_))\n return neighbors", "title": "" }, { "docid": "1af892c93d788ccbb26b8892dcf935ae", "score": "0.5157673", "text": "def _generate_grid(self):\n return [[self.make_square(row, col) for col in range(self.dim_col)]\n for row in range(self.dim_row)]", "title": "" }, { "docid": "bb6468015a52974d7a263795bb81f899", "score": "0.5153086", "text": "def cell_list(self):\n lst = [(i, j) for i in range(self.BOARD_LENGTH) for j in\n range(self.BOARD_LENGTH)]\n lst.append(self.GOAL_LOCATION)\n return lst", "title": "" }, { "docid": "593493a9e5c93e5ebd28b476e91ad340", "score": "0.51493853", "text": "def neighbors(self, pos, diagonals=False):\r\n x, y = pos\r\n neighbors = [(x + 1, y), (x - 1, y), (x, y - 1), (x, y + 1)]\r\n if diagonals:\r\n neighbors += [(x + 1, y + 1), (x - 1, y + 1), (x + 1, y - 1), (x - 1, y - 1)]\r\n if (x + y) % 2 == 0: neighbors.reverse()\r\n results = filter(self.in_bounds, neighbors)\r\n results = filter(self.no_obstacle, results)\r\n return results", "title": "" }, { "docid": "926358c52e9b65966852b514fcceb712", "score": "0.5146556", "text": "def neighbors8(self, coord):\n (x,y) = coord\n results = [(x+1, y+1), (x+1, y), (x+1, y-1),\n (x, y+1), (x, y-1),\n (x-1, y+1), (x-1, y), (x-1, y-1)]\n results = list(filter(self.in_bounds, results))\n results = list(filter(self.passable, results))\n return results", "title": "" }, { "docid": "bb38c47eec0f3ca2ad2715da8c36114c", "score": "0.51445603", "text": "def generate(self):\n newBoards = []\n\n # For each point\n for p in self.board:\n # Generate all neighboring points\n for newy in range(self.N):\n if newy == p[1]:\n continue\n newp = (p[0], newy)\n if newp not in self.board:\n newboard = self.board[:]\n newboard[newboard.index(p)] = newp\n newBoards.append(newboard)\n\n return newBoards", "title": "" }, { "docid": "e68169fb1fe6eedc1ca858b065a8d956", "score": "0.51392007", "text": "def get_nozzle_positions():\n nozzle_positions = [(0.0)] * n_nozzles\n d_between = boom_length / (n_nozzles - 1)\n for i in range(n_nozzles):\n nozzle_positions[i] = (boom_offset, i * d_between - boom_length / 2, inp['H'] - boom_height)\n return nozzle_positions", "title": "" }, { "docid": "969fa007597b157725a2a56afbb51451", "score": "0.51356924", "text": "def get_movables_tiles(board, player_value=0):\n movable = []\n for x in range(len(board)):\n for y in range(len(board[x])):\n value = board[x][y]\n if is_movable_tile(x, y) and (value == 0 or value == player_value):\n movable.append((x, y))\n\n return movable", "title": "" }, { "docid": "3268ff3c8861c2fa7c275a12b6c862db", "score": "0.5132038", "text": "def hit_box(self) -> list:\n hit_box = []\n\n x_max = self.largeur_screen // LARGEUR_TUILE\n y_max = self.hauteur_screen // HAUTEUR_TUILE\n\n for i in range(0, y_max):\n for j in range(0, x_max):\n x = j * LARGEUR_TUILE\n y = i * HAUTEUR_TUILE\n item = self.niveau[i][j]\n\n if item != \"V\":\n hit_box.append((x, y))\n\n return hit_box", "title": "" } ]
d9b703dfa0713d148772683e9d85d349
Reduce the items in this list to identifiers that can be used to recreate them from scratch. This adds each item to the cache too.
[ { "docid": "b0417d0cd78b920baa1079318656fc4c", "score": "0.69286984", "text": "def _pack_items(self):\n identifiers = tuple(self.identify_items(self))\n cache_keys = self.make_cache_keys(identifiers)\n cache_items = dict(izip(cache_keys, self))\n self.cache.set_many(cache_items, self.cache_timeout)\n return identifiers", "title": "" } ]
[ { "docid": "04f3496398ff04367f08c593ebc8f7c4", "score": "0.71066976", "text": "def _unpack_items(self):\n\n # Prevent the unpack operation from occurring more than once.\n if hasattr(self, '_unpack'):\n delattr(self, '_unpack')\n else:\n return\n\n # The list contains identifiers that will be unpacked into real items.\n # Copy them so they won't be lost when the list values are altered.\n identifiers = self[:]\n\n cache_keys = dict(izip(identifiers, self.make_cache_keys(identifiers)))\n cached_items = self.cache_backend.get_many(cache_keys.values())\n\n items = {}\n missed = []\n for identifier, cache_key in cache_keys.items():\n try:\n item = cached_items[cache_key]\n assert item is not None\n except (AssertionError, KeyError):\n missed.append(identifier)\n else:\n items[identifier] = item\n\n if missed:\n\n # Rebuild the missing items using their identifiers and\n # replace the contents of this list with those new items.\n self[:] = self.rebuild_items(missed)\n\n # Use the pack_items method to add them to the cache and also\n # get back their identifiers. Finally, put the new items into\n # the items dict to be returned at the end.\n found_identifiers = self._pack_items()\n items.update(izip(found_identifiers, self))\n\n # Replace the value of this list with the final result.\n del self[:]\n for identifier in identifiers:\n item = items.get(identifier)\n if item is not None:\n self.append(item)", "title": "" }, { "docid": "fd95c30102e1eb9b54c5b5913727ada8", "score": "0.65366846", "text": "def rebuild_items(self, identifiers):\n raise NotImplementedError", "title": "" }, { "docid": "89d5610230e58fa085046f10e4c06c5f", "score": "0.6319896", "text": "def UpdateIds(self):\r\n removed = set(self.item_itemId.keys()) - set(self.data.keys())\r\n for item in removed:\r\n itemId = self.item_itemId[item]\r\n del self.item_itemId[item]\r\n del self.itemId_item[itemId]", "title": "" }, { "docid": "8aac78c071b4804a50c8441963bb9100", "score": "0.6208331", "text": "def __reduce__(self):\n init_args = (\n self.__class__,\n self._pack_items(),\n )\n if self.cache_timeout:\n init_kwargs = {'cache_timeout': self.cache_timeout}\n else:\n init_kwargs = {}\n return (_unpickle_cached_list, init_args, init_kwargs)", "title": "" }, { "docid": "719aae5ffbb29c6b379af986fa5e37a5", "score": "0.61230433", "text": "def learn_ids(self, item_list):\n self._reset_sequence()\n for item in item_list:\n key = self.nondup_key_for_item(item)\n self.ids[key] = item[self.id_key]", "title": "" }, { "docid": "405476beaecdf6d402cdcc71a0f9e9f5", "score": "0.6076083", "text": "def set_ids(self, item_list):\n self._reset_sequence()\n for item in item_list:\n key = self.nondup_key_for_item(item)\n item[self.id_key] = self.ids.get(key) or self._get_next_id()", "title": "" }, { "docid": "70624459dc155a8c7515bf792e28e123", "score": "0.5991048", "text": "def fillCache(self):\n items = self.source.getRecent()\n items.reverse() # make sure the most recent ones are added last to the cache\n for item in items:\n self.cache.append(item.title)", "title": "" }, { "docid": "0029a129d6ea9529f86fea0bf93e576f", "score": "0.5884281", "text": "def rehash(self):\n old = list()\n # use iteration to record existing items\n for i in range(self.capacity // 2):\n if self.table[i] is not None:\n old.append(self.table[i])\n self.table = self.capacity * [None] # then reset table to desired capacity\n self.size = 0\n for i in old:\n index = self.quadratic_probe(i.key)\n self.table[index] = i\n self.size += 1", "title": "" }, { "docid": "d4a8cd859e3895cb5b72bb40991911e7", "score": "0.58541334", "text": "def testRemovingDuplicates(self):\n\n item1 = {KEY: 'one', 'name': 'foo'}\n item2 = {KEY: 'two', 'name': 'bar'}\n item3 = {KEY: 'three', 'name': 'baz'}\n dup_item1 = {KEY: 'one', 'name': 'foo'}\n dup_item2 = {KEY: 'two', 'name': 'qux'}\n\n list_with_duplicates = [item1, item2, item3, dup_item1, dup_item2]\n # duplicate items should not be present in the cached list\n expected_list = [item1, item2, item3]\n\n cached_list_logic.setCacheItems('test_list', list_with_duplicates)\n cached_list = cached_list_model.CachedList.get_by_id('test_list')\n self.assertListEqual(cached_list.list_data, expected_list)", "title": "" }, { "docid": "ec79bf68b3cc9ade46e621262956863b", "score": "0.5630425", "text": "def remove_duplicates_in_items(items: list, id_key: str) -> list:\n ids = {}\n new_items = []\n for item in items:\n item_id = item.get(id_key)\n if item_id not in ids:\n ids[item_id] = True\n new_items.append(item)\n\n return new_items", "title": "" }, { "docid": "2cf41babfc15555765cec21ea4efcf47", "score": "0.5608932", "text": "def Deduplicate(items):\n seen = set()\n for it in items:\n if it not in seen:\n seen.add(it)\n yield it", "title": "" }, { "docid": "6b3f058576245b08a98c4e29c0dbfd46", "score": "0.5596204", "text": "def _hashed_items(self):\n return self", "title": "" }, { "docid": "362eebce24f40328b63fa390e70c0c79", "score": "0.55942404", "text": "def _reset_track_lst(self):\n del self._track_item\n self._track_item = {key : [] for key in self._track_lst}\n return self._track_item", "title": "" }, { "docid": "700cb8953104865f413da2221aa61eb3", "score": "0.55431926", "text": "def OldItems(self) -> _n_1_t_7:", "title": "" }, { "docid": "9cf9511193f5fcbf4050f362f6a20828", "score": "0.55386627", "text": "def collate(items):\n # return batch items as a list\n return items", "title": "" }, { "docid": "981de5cb377c8c0713c7fdd006f2296d", "score": "0.5511309", "text": "def _cache(item_label, item_list):\n id_label = item_label + '_id'\n mbid_label = item_label + '_mbid'\n echonest_id_label = item_label + '_echonest_id'\n items = {}\n for item in item_list:\n key = '/%s/%s' % (item_label, item[id_label])\n items[key] = item\n musicbrainz_id = item.get(mbid_label, None)\n if musicbrainz_id:\n items['/musicbrainz/%s/%s' % (item_label, musicbrainz_id)] = key\n # echonest_id = item.get(echonest_id_label, None)\n # if echonest_id:\n # items['/echonest/%s/%s' % (item_label, echonest_id)] = key\n application.config.get('CACHE').set_many(items)", "title": "" }, { "docid": "c81a69b566689e9502235bf06ce8ab51", "score": "0.54834604", "text": "def clean_up_map(self):\n self.items = [i for i in self.items if i.quantity != 0]", "title": "" }, { "docid": "037eb212aeb81bbc51022678d212b6c4", "score": "0.546661", "text": "def cache_all(self):\n if not self._cached_all:\n poss = range(len(self))\n uuids = self.vars['uuid']\n\n cls_names = self.variables['cls'][:]\n samples_idxss = self.variables['samples'][:]\n subchanges_idxss = self.variables['subchanges'][:]\n mover_idxs = self.variables['mover'][:]\n details_idxs = self.variables['details'][:]\n try:\n input_samples_vars = self.variables['input_samples']\n except KeyError:\n # BACKWARD COMPATIBILITY: REMOVE IN 2.0\n input_samples_idxss = [[] for _ in samples_idxss]\n else:\n input_samples_idxss = input_samples_vars[:]\n\n [self._add_empty_to_cache(*v) for v in zip(\n poss,\n uuids,\n cls_names,\n samples_idxss,\n input_samples_idxss,\n mover_idxs,\n details_idxs)]\n\n [self._load_partial_subchanges(c, s) for c, s in zip(\n self,\n subchanges_idxss)]\n\n self._cached_all = True", "title": "" }, { "docid": "768778ac6290dd31ad58bda94e7f950a", "score": "0.5451933", "text": "def uniquify_list(seq, idfun=None):\n\n if idfun is None:\n\n def idfun(x):\n return x\n\n seen = {}\n result = []\n for item in seq:\n marker = idfun(item)\n if marker in seen:\n continue\n seen[marker] = 1\n result.append(item)\n return result", "title": "" }, { "docid": "27056edd253e5a0fc73c653f8cf72b50", "score": "0.5417399", "text": "def concat_duplicate_ids(self) -> None:\n # Rebuilt list instead of removing duplicated one at a time at the cost of O(n).\n self.data.clear()\n\n # This implementation takes advantage of the ordering of the duplicated in the __init__ method\n\n has_external_ids = set()\n for ext_id, items in self._external_id_to_item.items():\n if not isinstance(items, list):\n self.data.append(items)\n if items.id is not None:\n has_external_ids.add(items.id)\n continue\n concatenated = DatapointsArray.create_from_arrays(*items)\n self._external_id_to_item[ext_id] = concatenated\n if concatenated.id is not None:\n has_external_ids.add(concatenated.id)\n self._id_to_item[concatenated.id] = concatenated\n self.data.append(concatenated)\n\n if not (only_ids := set(self._id_to_item) - has_external_ids):\n return\n\n for id_, items in self._id_to_item.items():\n if id_ not in only_ids:\n continue\n if not isinstance(items, list):\n self.data.append(items)\n continue\n concatenated = DatapointsArray.create_from_arrays(*items)\n self._id_to_item[id_] = concatenated\n self.data.append(concatenated)", "title": "" }, { "docid": "0f465738f7d88199fb3e96d16f08925f", "score": "0.54072", "text": "def unique_residue_ids(self): \n # Convenience abbreviations.\n identifiers = self.identifiers\n res_ids = self.res_ids\n res_cnt = self.res_cnt \n # Preparing the list of unique residue identifiers.\n # In the end it should be: res_cnt == len(res_ids)-1.\n # The 'elif' line is controlling that only unique\n # identifiers are collected.\n for identifier in identifiers:\n if len(res_ids) == 0:\n # Require 'deepcopy', otherwise constant change\n # of 'res_ids[res_cnt]' with 'identifier'.\n res_ids.append(deepcopy(identifier))\n elif identifier[1] == res_ids[res_cnt][1]: \n pass\n else:\n res_ids.append(deepcopy(identifier))\n res_cnt += 1 \n # Return assignments to object scope.\n self.res_ids = res_ids\n self.res_cnt = res_cnt", "title": "" }, { "docid": "7887abe496fd6fd9c547ffe44eb6ee68", "score": "0.5397846", "text": "def _expand_priority_order(self, id_list):\n res = self.id_priority_list.copy()\n for key in id_list:\n if key not in self.id_priority_list:\n res.append(key)\n return res", "title": "" }, { "docid": "f9d0ec9d39cfb94b73770923cc6880f8", "score": "0.5388973", "text": "def compactify(self):\n logger.debug(\"rebuilding dictionary, shrinking gaps\")\n\n # build mapping from old id -> new id\n idmap = dict(zip(sorted(itervalues(self.token2id)), range(len(self.token2id))))\n\n # reassign mappings to new ids\n self.token2id = {token: idmap[tokenid] for token, tokenid in iteritems(self.token2id)}\n self.id2token = {}\n self.dfs = {idmap[tokenid]: freq for tokenid, freq in iteritems(self.dfs)}", "title": "" }, { "docid": "15cd6d40f564fe8f6db34d783ab295c7", "score": "0.5363779", "text": "def _update_unused(self, key):\n try:\n item_list = self.parent.dataset[key]\n max_id = max(item['id'] for item in item_list) if item_list else 0\n next_id = max(max_id + 1, len(item_list))\n except KeyError:\n # The table doesn't exist, so we can use anything\n next_id = 1\n self.unused[key] = next_id", "title": "" }, { "docid": "3a081b6d2f5907a4cca610f268f2754f", "score": "0.53463596", "text": "def reset(self):\n for i in range(self.k):\n self.list[i] = self.dic[i]\n return self.list", "title": "" }, { "docid": "52319bc27440f01d24b5f7f7d990c9bc", "score": "0.5338179", "text": "def update(self):\r\n self.data = [self.make_item_tuple(i) for i in self.query]\r\n self._fetched = True\r\n query_cache.set(self.iden, self.data)", "title": "" }, { "docid": "4426012ea6e97d7223fbe4ca71c77f83", "score": "0.5335431", "text": "def make_cache_keys(self, identifiers):\n\n raise NotImplementedError", "title": "" }, { "docid": "d0455dbf0a4c0f80171edacef755f2b3", "score": "0.5325638", "text": "def _unpickle_cached_list(cls, *args, **kwargs):\n new_list = cls(*args, **kwargs)\n new_list._unpack = True\n return new_list", "title": "" }, { "docid": "3d0878b3d86025381140c859c9f882f7", "score": "0.53181565", "text": "def remove_duplicates(data):\n already_used_items = {}\n return_data = []\n\n for item in data:\n # Yes, I know that I can find used items in the return_data,\n # but HW requires this logic.\n if not already_used_items.get(item):\n return_data.append(item)\n already_used_items[item] = True\n\n return return_data", "title": "" }, { "docid": "6da9f8f02ea1dab3c752f7bc1f152e37", "score": "0.52880013", "text": "def _increase_size(self) -> None:\n keys_vals_to_move = [item for item in self.HashMap if item]\n self.length = 0\n self.capacity = self.capacity * 2\n self.HashMap = [None] * self.capacity\n for item in keys_vals_to_move:\n while len(item) > 0:\n self.add(item[0], item[1])\n item.pop(0)\n item.pop(0)", "title": "" }, { "docid": "75e5962c6bb6e899a567f9886a797905", "score": "0.5285236", "text": "def GetItemsFromIterablesCached(self):\n items = []\n assert len(self.state_id) == len(self.iterables)\n for iter_num, iter_counter in enumerate(self.state_id):\n item = self.history_items[iter_num].get(iter_counter+1, \"unexplored\")\n if item == \"unexplored\":\n try:\n item = self.iterables[iter_num].next()\n except StopIteration:\n item = None\n self.history_items[iter_num][iter_counter+1] = item\n items.append(item)\n return items", "title": "" }, { "docid": "ce8347208ee33e6fd2c938f0509aedf1", "score": "0.52832246", "text": "def clear_cache(self):\n ida_strlist.clear_strlist()", "title": "" }, { "docid": "0a6692985258457ba9a782cf0a6d9198", "score": "0.5281415", "text": "def remove_duplicates_for_fetch(items: list, last_fetched_ids: list) -> list:\n return [\n item\n for item in items\n if item.get('id') and item.get('id') not in last_fetched_ids\n ]", "title": "" }, { "docid": "21583c27bcaf64ae76a948825eacbf04", "score": "0.52663577", "text": "def fill_from_cache(self):\n move_count = min(\n len(self._replacement_cache),\n constants.K - len(self._contacts)\n )\n\n for _ in range(move_count):\n self.add_contact(self._replacement_cache.pop())", "title": "" }, { "docid": "9fdcda24368d43c68d039a458ccd6446", "score": "0.52637416", "text": "def large_train_collection(train_items: List[JSONDict]) -> TrainCollection:\n items = []\n\n item = train_items[0]\n for i in range(3000):\n copy = item.copy()\n copy[\"url\"] = copy[\"url\"].replace(\"post1\", f\"post{i}\")\n items.append(copy)\n\n collection = TrainCollection(items=items)\n return collection", "title": "" }, { "docid": "990715882cbcb3b4c793a97c0ee772f0", "score": "0.5230081", "text": "def clear_flush_lists(self, keys):\r\n cache.delete_many(keys)", "title": "" }, { "docid": "a61766dec7c0c946a54fcb5b1631ac45", "score": "0.5229862", "text": "def trim_items(self, items):\r\n\t\tlogger.debug(\"Enter\")\r\n\t\t\r\n\t\tif self.transactions:\r\n\t\t\tall_items = set.union(*[self.transactions[u][-1] for u in self.transactions.keys()])\r\n\t\telse:\r\n\t\t\treturn items\r\n\t\t\t\r\n\t\ttmp = items.copy()\r\n\t\t\r\n\t\tfor i in items:\r\n\t\t\tif i in all_items:\r\n\t\t\t\tlogger.debug(\"Removing %r\" % i)\r\n\t\t\t\ttmp.remove(i)\r\n\t\t\t\t\r\n\t\tlogger.debug(\"Exit\")\r\n\t\treturn tmp", "title": "" }, { "docid": "e315ec3a484517b576a5a8b4eb6c6880", "score": "0.52169263", "text": "def evict_or_add (self, item):", "title": "" }, { "docid": "c7a06bcf5829018a8e58a369e038a602", "score": "0.521555", "text": "def cache_db_items(self, key, items, item_key='id'):\n db_items = self._extension_data['db_items'].setdefault(key, {})\n for item in items:\n db_items[item[item_key]] = item", "title": "" }, { "docid": "e02d6bcaaa4b546441050e042af93801", "score": "0.52053124", "text": "def untie_everything(self):\r\n self.tied_indices = []", "title": "" }, { "docid": "80a4055d21c6dd78745d80ec67cba51e", "score": "0.5204175", "text": "def remove_duplicate_items(cls, items_in, prior_batch_ids):\n items_out = []\n item_ids = set(prior_batch_ids)\n for item in items_in:\n if item[\"id\"] not in item_ids:\n item_ids.add(item[\"id\"])\n items_out.append(item)\n else:\n continue\n return items_out", "title": "" }, { "docid": "27b93b9260cc5dce0097053577cd0ac5", "score": "0.51884717", "text": "def delete_duplicate(x):\n return list(dict.fromkeys(x))", "title": "" }, { "docid": "914725600bf52a10842be4db18e10034", "score": "0.5184313", "text": "def prune(self): # HashMap.prune\n for hashval, list in self.contentHash.iteritems():\n newlist=[]\n for entry in list:\n if not entry.deleted:\n newlist.append(entry)\n self.contentHash[hashval]=newlist", "title": "" }, { "docid": "8489e3ee6eb773ff17078793c717ffd4", "score": "0.51667583", "text": "def _fillcache(self, n: int | None) -> None:\n if not n:\n n = 1\n try:\n while len(self._cache) < n:\n self._cache.append(self.modifier(next(self._iterable))) # type: ignore\n except StopIteration:\n while len(self._cache) < n:\n self._cache.append(self.sentinel)", "title": "" }, { "docid": "312868e6563227ce5e3cbfe5a72b841e", "score": "0.5164763", "text": "def remove_duplicates(input_list):\n return list(dict.fromkeys(input_list))", "title": "" }, { "docid": "681a06ca9b01daaf8a4dd09169de8563", "score": "0.5159341", "text": "def _change_objs_to_IDs(self):\n if self.location:\n self.location = self.location.id\n if self.contents:\n self.contents = [obj.id for obj in self.contents]", "title": "" }, { "docid": "59ee1707485a948778f73bcfd255ec48", "score": "0.51530373", "text": "def _hash(self, item):\r\n pass # TODO\r", "title": "" }, { "docid": "f012f5379aad31fe74cd1eae0b390801", "score": "0.51485205", "text": "def _shrink(self):\n self.capacity = round(self.capacity / self.factor)\n temp = [None] * self.capacity\n for i in range(self.capacity):\n temp[i] = self.store[i]\n self.store = temp", "title": "" }, { "docid": "98a5993a5cee476306e6864df5d4e925", "score": "0.51277685", "text": "def dedupe(items):\n seen = set()\n for item in items:\n if item not in seen:\n yield item\n seen.add(item)", "title": "" }, { "docid": "288d9d2a080beb0d9cc12e0cc0d60fbe", "score": "0.5118063", "text": "def remove_duplicates(self):\n names: Dict[str, int] = dict()\n for step in self.Sequence:\n if isinstance(step, Repeater):\n continue\n name = step.Name\n if name != '':\n if name not in names:\n names[name] = 1\n else:\n names[name] += 1\n for step in reversed(self.Sequence):\n if isinstance(step, Repeater):\n continue\n name = step.Name\n if name and (names[name] > 1):\n names[name] -= 1\n step.Name = name + \"_%i\" % names[name]", "title": "" }, { "docid": "e42f9fdac48b633a57e1b1ed05b3313e", "score": "0.5107073", "text": "def _update_use(self, key):\n\t\tif (self._replace_pol == Cache.LRU):\n\t\t\tself.cache[key]= self.hashmap[key]\n\t\tif (self._replace_pol == Cache.LRU_S):\n\t\t\tself.cache[key] = self.hashmap[key]", "title": "" }, { "docid": "066452b6998349caf8cfe8c6554ca6f7", "score": "0.51054615", "text": "def removeOldItems(self):\n pass", "title": "" }, { "docid": "0fbe1bb26e47641f1f92e3af341696b8", "score": "0.51044595", "text": "def shuffled_data_reset(self):\n self.unique_data = {}\n for stock in self.stocks:\n self.unique_data[stock] = []\n for date in self.dates:\n self.unique_data[stock] += [date]", "title": "" }, { "docid": "1261f1cd1f279cb3ea82516fa1d986fb", "score": "0.5082867", "text": "def hash_fieldlist(cls):\n for field in cls.fieldlist:\n cls.fieldhash[field.id] = field", "title": "" }, { "docid": "aa1760328e5c1293629bd24921c1bcbf", "score": "0.5079646", "text": "def mixed_train_items(train_items: List[JSONDict]) -> List[JSONDict]:\n train_items[1][\"categoryid\"] = 9107252648\n return train_items", "title": "" }, { "docid": "707f627a09c9f7f6a0e572774bb508e5", "score": "0.50749475", "text": "def __init__(self):\n self.items = []\n self.indexes: Dict[int, Set] = defaultdict(set)", "title": "" }, { "docid": "5cce20cbf558401cc8c1d179de3b56b6", "score": "0.5070577", "text": "def deduped(items):\n \n return list(set(items))", "title": "" }, { "docid": "cfe0fac80127b37b024e0b8dad1328c3", "score": "0.506829", "text": "def reap():\n\n # This might be more efficient by paginating manually and do memcache.get_multi\n for counter in get_all_counters():\n value = memcache.get(counter.key.string_id(), namespace=NAMESPACE)\n # if value is None:\n # continue\n _increment_db(counter, value)\n memcache.decr(counter.key.string_id(), value, namespace=NAMESPACE)\n print 'memcache: %s => %s' % (counter.key.string_id(), memcache.get(counter.key.string_id(), namespace=NAMESPACE))", "title": "" }, { "docid": "64a232a9c4b886445179f8217d1dbe63", "score": "0.5066847", "text": "def __init__(self):\n self.ids_seen = set()", "title": "" }, { "docid": "37a39cdbeeb3e27bcc1fb4eb342d6fa1", "score": "0.506516", "text": "def potentials(self, potential_list):\n for item in potential_list:\n item.store()\n potential_list_uuids = [item.uuid for item in potential_list]\n self.set_attribute('potentials', potential_list_uuids)", "title": "" }, { "docid": "105ce627edef9178252460b3bdf9e8c1", "score": "0.5062378", "text": "def map_items(self) -> None:\n self.__attribute_columns = list(self.__DataFrame.columns)\n self.__attribute_columns.remove(self.__surv_col_name)\n self.__attribute_columns.remove(self.__status_col_name)\n\n mapped_int = 0\n\n for attribute in self.__attribute_columns:\n for value in self.__DataFrame[attribute].unique():\n item_reference = (attribute, value)\n self.__item_map[item_reference] = mapped_int\n self.items_list.append(item_reference)\n mapped_int += 1", "title": "" }, { "docid": "d1358b98e41139369442a74ad2a3db1d", "score": "0.5051759", "text": "def expand_id_map(id_map, all_ids):\n\n unmapped_ids = list(set(all_ids).difference(id_map.keys()))\n\n for i in unmapped_ids:\n id_map[i] = i\n\n return id_map", "title": "" }, { "docid": "b1668ee6e1df57e8925590d2e042e93e", "score": "0.50455356", "text": "def rehash(self):\n new_cap = self._get_new_capacity() # Choose not to handle the ValueError thrown by _get_new_capacity()\n new_table = HashTable(new_cap) # Create a new hash table directly\n for i in range(self.size):\n if self.keys[i] is not None: # Only put() when there exists a key (no Nones)\n new_table[self.keys[i]] = self.values[i] # Rehash and insert into the new table\n\n self.keys = new_table.keys # Update instance variables\n self.values = new_table.values\n self.size = new_cap # Update N\n self.count_rehashes += 1 # Increment total rehashes", "title": "" }, { "docid": "e3d60e32e91c980445d6d1e66f08977e", "score": "0.5032233", "text": "def __fillCache(self):\n assert (not self.__modelCache)\n\n # Assemble a list of model IDs to look up\n numModelIDs = len(self.__modelIDs) if self.__modelIDs else 0\n\n if self.__nextIndex >= numModelIDs:\n return\n\n idRange = self.__nextIndex + self.__CACHE_LIMIT\n if idRange > numModelIDs:\n idRange = numModelIDs\n\n lookupIDs = self.__modelIDs[self.__nextIndex:idRange]\n\n self.__nextIndex += (idRange - self.__nextIndex)\n\n # Query Nupic for model info of all models in the look-up list\n # NOTE: the order of results may not be the same as lookupIDs\n infoList = _clientJobsDB().modelsInfo(lookupIDs)\n assert len(infoList) == len(lookupIDs), \\\n \"modelsInfo returned %s elements; expected %s.\" % \\\n (len(infoList), len(lookupIDs))\n\n # Create _NupicModelInfo instances and add them to cache\n for rawInfo in infoList:\n modelInfo = _NupicModelInfo(rawInfo=rawInfo)\n self.__modelCache.append(modelInfo)\n\n assert len(self.__modelCache) == len(lookupIDs), \\\n \"Added %s elements to modelCache; expected %s.\" % \\\n (len(self.__modelCache), len(lookupIDs))\n\n if self.debug:\n _emit(Verbosity.DEBUG,\n \"MODELITERATOR: Leaving __fillCache(); modelCacheLen=%s\" % \\\n (len(self.__modelCache),))", "title": "" }, { "docid": "f774b8e5bed3aa9569453565162825db", "score": "0.50301826", "text": "def _remove_duplicates(input_list):\n return list(OrderedDict.fromkeys(input_list))", "title": "" }, { "docid": "c2b2f4395e0c3820a7e3587e355598ae", "score": "0.50289893", "text": "def mapper_get_items_init(self):\n if int(self.options.iteration) > 1:\n with open(self.options.f, 'r') as fh:\n self.frequent_items = set(fh.read().splitlines())\n else:\n self.frequent_items = {}", "title": "" }, { "docid": "867ffc91a746827266f061a3c0ae9897", "score": "0.50242746", "text": "def test_unsized(self):\n cache = LRUCache()\n for i in range(500):\n cache[i] = i\n for i in range(500):\n assert i in cache\n assert cache[i] == i", "title": "" }, { "docid": "89ed3790f153ec8739030c451e4af4c3", "score": "0.50208384", "text": "def _fillcache(self, n: int | None) -> None:\n if not n:\n n = 1\n try:\n while len(self._cache) < n:\n self._cache.append(next(self._iterable)) # type: ignore\n except StopIteration:\n while len(self._cache) < n:\n self._cache.append(self.sentinel)", "title": "" }, { "docid": "084dc13ea4b269548970835742e2e48d", "score": "0.501621", "text": "def add(self, item):\n self.update(set([item]))", "title": "" }, { "docid": "761ac9d24d795bc18846babfe10ea551", "score": "0.50095797", "text": "def removeDuplicatesInList(self, data):\n newDataList = []\n for i in data:\n if newDataList.count(i) == 0:\n newDataList.append(i)\n data.clear()\n data += newDataList", "title": "" }, { "docid": "89e4d0319dfeac13114d90db0bfe8255", "score": "0.5009236", "text": "def duplicate_train_items(train_items: List[JSONDict]) -> List[JSONDict]:\n train_items[1][\"url\"] = train_items[0][\"url\"]\n return train_items", "title": "" }, { "docid": "66f4c343d416d8f4427b63c0ab29a170", "score": "0.50060844", "text": "def item2id(self):\n if self._item2id is None:\n self._item2id = dict(zip(self.item_unique_vals, range(self.n_items)))\n return self._item2id", "title": "" }, { "docid": "27a87ab4211569e026551ad5c47e9c81", "score": "0.5004679", "text": "def customized_retained_list(self, base_day, day_list=[]):\n auBitmap = self.make_bitmap(base_day, 'dau')\n return [(base_day, auBitmap.count())] + \\\n zip(day_list, self._retained_value(base_day, day_list, 'dau'))", "title": "" }, { "docid": "f03f7c7b4a76609a03dd5702e72a26b7", "score": "0.4989525", "text": "def evict_purged(self, entity_ids: Iterable[str]) -> None:\n for entity_id in entity_ids:\n self._id_map.pop(entity_id, None)", "title": "" }, { "docid": "ad5c6f1d98a8bd7ccf01f8feb7706eed", "score": "0.4983754", "text": "def all_reduce(self):\n return {k: reduce_number(v) for k, v in self.items()}", "title": "" }, { "docid": "f52c5d3224a282b90a85cc7d9a214ff1", "score": "0.49834982", "text": "def purify(l):\n return set([item.replace('B-', '').replace('I-', '') for item in l])", "title": "" }, { "docid": "c8ace0f1a644421dec91924729cb3207", "score": "0.4974587", "text": "def remove_duplicate_urls(seq, id_fun=None):\n\n if id_fun is None:\n def id_fun(x):\n return x\n seen = {}\n result = []\n for item in seq:\n marker = id_fun(item)\n if marker in seen:\n continue\n seen[marker] = 1\n result.append(item)\n\n return result", "title": "" }, { "docid": "c33aaffbadb012dec2a4c4231ab05208", "score": "0.49692756", "text": "def setA(cls,*items):\n cls.A = sympy.FiniteSet(*items)\n cls.id2items= {}\n num = 0\n for i in cls.A:\n cls.id2items[num] = i\n num = num + 1", "title": "" }, { "docid": "264b7a9220432c6e5decdcd8d48cbb3a", "score": "0.4964221", "text": "def dedupe(self):\n elems = []\n for x in self.elems:\n if x not in elems:\n elems.append(x)\n return _coconut_tail_call(self.__class__, *elems)", "title": "" }, { "docid": "57508bd0a67b85f81e74c3eeb789319b", "score": "0.49561846", "text": "def frequentOneItem(self):\n\n candidate = {}\n # global finalPatterns, minSup, Database\n # self.minSup = self.minSup\n for i in range(len(self.Database)):\n for j in range(len(self.Database[i])):\n if self.Database[i][j] not in candidate:\n candidate[self.Database[i][j]] = [i]\n else:\n candidate[self.Database[i][j]] += [i]\n self.finalPatterns = {keys: value for keys, value in candidate.items() if len(value) >= self.minSup}\n #print(candidate)", "title": "" }, { "docid": "040beceaed5eaf9953636f961ee2c91d", "score": "0.4951346", "text": "def set_count(items):\n item_count = {}\n for item in items:\n if not item: continue\n if not item_count.has_key(item): item_count[item] = 0\n item_count[item] += 1\n \n items = [(v, k) for k, v in item_count.iteritems()]\n items.sort()\n items.reverse()\n \n return [(k, v) for v, k in items]", "title": "" }, { "docid": "638c68ccc4591aaa2e1196ce95f7d4a1", "score": "0.4945082", "text": "def __init__(self):\n \n self.items = [] \n self.ind = defaultdict(set) # item -> index into the items array", "title": "" }, { "docid": "ff689f32092e5d3a998c3112f1318c1b", "score": "0.49444813", "text": "def add_items_quantity_not_duplicates(request):\n all_items_no_duplicates = []\n\n for loop_index, item in enumerate(all_shopping_items(request)):\n item_dict = {\n 'item': item.item,\n 'quantity': item.quantity,\n 'category': item.category.category,\n 'id': item.id,\n 'user': {\n 'username': item.user.first_name\n }\n }\n\n if loop_index == 0:\n all_items_no_duplicates.append(item_dict)\n else:\n item_is_not_a_copy = True\n for list_item in all_items_no_duplicates:\n if list_item['item'] == item.item:\n item_is_not_a_copy = False\n list_item['quantity'] += item.quantity\n list_item['user']['username'] += ' / ' + item.user.first_name\n if item_is_not_a_copy:\n all_items_no_duplicates.append(item_dict)\n\n return all_items_no_duplicates", "title": "" }, { "docid": "c3a9248edfc9e0f0a3358d588b252bd1", "score": "0.4931224", "text": "def batchListReNumber(oldList, numberChange, untouchedValues):\n import re\n\n newList = copy.deepcopy(oldList)\n\n ## Append'_batchX' with X the smallest original 'Batch' if none already present\n for i in range(len(newList)):\n if (newList[i] not in untouchedValues) & (newList[i].find('_batch') == -1):\n newList[i] = newList[i] + '_batch' + str(min(numberChange.keys()))\n\n ## Update X in all '_batchX' column names (look for previous batch numbers and replace)\n for batchNum in numberChange.keys():\n # exact match with end of string ($)\n query = '.+?(?=_batch' + str(batchNum) + '$)'\n for j in range(len(newList)):\n if newList[j] not in untouchedValues:\n # string without _batchX\n searchRes = re.search(query, newList[j]) # if no match returns None\n if searchRes:\n newList[j] = searchRes.group() + '_batch' + str(numberChange[batchNum])\n\n return newList", "title": "" }, { "docid": "ce56708f97339c7d4a1da6439468eded", "score": "0.4925782", "text": "def put(self, key, item):\n if key is None or item is None:\n return\n if key in self.key_tracker.keys():\n self.key_tracker.pop(key)\n if len(self.key_tracker) >= BaseCaching.MAX_ITEMS:\n x = self.most_recent_use_key()\n print(\"DISCARD: {}\".format(x))\n self.key_tracker.pop(x)\n self.cache_data.pop(x)\n self.cache_data.update({key: item})\n self.key_tracker.update({key: self.count})\n self.count += 1", "title": "" }, { "docid": "34dec93b3a8a58d181cae4a79482b051", "score": "0.4925122", "text": "def transform(self):\n\n transactions = self._get_transactions(self._x_transformed)\n self.frequent_items = [set(item) for item in find_frequent_itemsets(\n transactions, minimum_support=self.support_min) if len(item) > 1]\n return self.frequent_items", "title": "" }, { "docid": "466edc1eb51eb08207a6fda46faf4b81", "score": "0.49242643", "text": "def test_unsized(self):\n cache = FIFOCache()\n for i in range(500):\n cache[i] = i\n for i in range(500):\n assert i in cache\n assert cache[i] == i", "title": "" }, { "docid": "218620dd2e34e606cff1fc47df6e4d5f", "score": "0.49182576", "text": "def recache_matches(self):\n matches = (Match.objects\n .filter(Q(winner = self) | Q(loser = self))\n .order_by('-played_time'))[:CACHED_RATING_LIMIT]\n matches = list(matches)\n matches.reverse()\n\n for match in matches:\n self.add_match(match, include_rank=True)\n\n self.save()", "title": "" }, { "docid": "e2bf8a8a350c7fdef133d9654844f1f4", "score": "0.4914266", "text": "def dedupe_list(input):\n return list(set(input))", "title": "" }, { "docid": "11131adb29b2611b8a23b0d18fcc2938", "score": "0.4905418", "text": "def clone(self):\n clone = super(LongObjectHashMap, self).clone()\n clone.clear()\n clone.initialize()\n for key in keySet():\n value = self.get(key)\n clone.put(key, value)\n return clone", "title": "" }, { "docid": "9c5be1e283d19a5418f64bf679f2d77a", "score": "0.48961446", "text": "def remap_ids(self, id_map: Dict[int, int]) -> None:", "title": "" }, { "docid": "58964a17a6901bdd1d37d5b11ca713d7", "score": "0.4895037", "text": "def uniq(seq, idfun=None):\n\n if idfun is None:\n def idfun(x): return x\n\n seen = {}\n result = []\n\n for item in seq:\n marker = idfun(item)\n if marker in seen: continue\n seen[marker] = 1\n result.append(item)\n return result", "title": "" }, { "docid": "fa37fb1e9c6b2adb33919384bb1fe233", "score": "0.48905498", "text": "def removeDups(lst):\n\n return list(dict.fromkeys(lst) )", "title": "" }, { "docid": "575fe653444d834d0a8ff1e8d5032ca0", "score": "0.488914", "text": "def distinct(self):\n memory = set()\n\n def _distinct(iterator):\n while True:\n item = next(iterator)\n if item in memory:\n continue\n memory.add(item)\n return item\n return self.__class__(self, _distinct)", "title": "" }, { "docid": "da853c2a8fa9a853613cd06bd71021c7", "score": "0.48863533", "text": "def get_ids(self):\n return [item.id for item in self.items]", "title": "" }, { "docid": "4e18e44efa58bc99dfc299a10a6e15d4", "score": "0.4885305", "text": "def new(self, name=None):\n if not hasattr(self, 'caches'):\n self.caches = []\n thisCache = deque()\n self.caches.append(thisCache)\n k = len(self.caches) - 1\n if name is None:\n name = str(k)\n self.names.append(name)\n return k", "title": "" }, { "docid": "09da322c4f02ec42b23cd82e968aef0d", "score": "0.4883666", "text": "def unique(list_: List) -> List:\n return list(collections.OrderedDict.fromkeys(list_))", "title": "" }, { "docid": "d95eb948e044afcfc507c664ed7956e1", "score": "0.4881793", "text": "def rescan(self):\n self.__artists = {}\n self.__artists_by_name = {}\n self.__albums = {}\n self.__tracks = {}\n self.__playlists = {}\n self.__populate_library()", "title": "" }, { "docid": "872baf49f2d4293229cffd1f8c805ebb", "score": "0.48815057", "text": "def rehash(self, new_len):\n print(\"!!!!!!!!!Rehash\")\n oldtable = self.table\n self.table = [set() for x in range(new_len)]\n for numberset in oldtable:\n for number in numberset:\n self.insert(number)\n\n print(\"Rehash-table-print\")\n print(self.table)", "title": "" }, { "docid": "09d5dc2021d377a0faa58508ba07dc66", "score": "0.48813647", "text": "def update(self, items: Mapping[Any, Any]) -> None:\n self.extend(list(items.values()))\n return", "title": "" } ]
95246c749a7299f7107f201320e37e0a
Updates the World Object Grid
[ { "docid": "e215fbe7a36102b1ca32dcd123523225", "score": "0.7229946", "text": "def update_world_object(self, Object, x, y):\n self.WOG[x][y] = Object", "title": "" } ]
[ { "docid": "ccb16c16812bbd50b00fe75b0f0a58a2", "score": "0.6663007", "text": "def update(self, player, obj_list):\n for obj in obj_list:\n obj.update(obj_list)\n player.update(self.map.tile_boxes)", "title": "" }, { "docid": "4251b18b88419fdcb5c977011fe47ba1", "score": "0.6646035", "text": "def update_grid_pos(self):\n self.grid_pos = self.get_tile_of_position(self.tank.body.position)", "title": "" }, { "docid": "49b7d14b764471d2f66ecf4b41ebcbb8", "score": "0.6583626", "text": "def update_grid(self):\r\n\t\tself.get_sim_cells()\r\n\t\t\"\"\"Two loops are used to stop early death or birth of cells.\r\n\t\tThe grid is meant to evolve as a whole each tick,\r\n\t\tand not cell-by-cell.\r\n\t\t\"\"\"\r\n\t\tfor coord in self.sim_cells:\r\n\t\t\tCell.ref_dict[coord].get_new_state()\r\n\t\tfor coord in self.sim_cells:\r\n\t\t\tCell.ref_dict[coord].update_state()\r\n\t\treturn self", "title": "" }, { "docid": "5fc87997f91cb27a844989ed1c7d632d", "score": "0.6409022", "text": "def update(self):\n if self.winter_tiles:\n for i in range(len(self.winter_tiles)):\n self.winter_tiles[i].show(self.game_display)\n self.grid.show_grid(self.game_display) # Show a grid\n self.show_tile_from_matrix('road')\n\n # Sorting of the objects based on y-coordinates for correct depiction\n tmp_objects = self.sort_objects_by_draw_y()\n if tmp_objects:\n self.draw_objects(tmp_objects)\n\n self.coinMatrix.draw(self.game_display, self.grid_size)\n\n # Snowing\n self.snow.update(self.width, self.height)\n self.snow.show(self.game_display)\n\n # Save and load the game\n if self.game_state == 'Save':\n self.save_json()\n elif self.game_state == 'Load':\n self.load_json()\n self.coinMatrix.updateMatrix(self.road_matrix)\n pygame.display.update() # Update of the screen\n self.clock.tick(60)", "title": "" }, { "docid": "e222f5ab4ad18cba94fed235fcc9199a", "score": "0.6276333", "text": "def update_grid(self):\n for var in self.model.variables():\n if var.name[0:1] == 'x':\n coor = int(var.name[1:])\n xCoor = (coor - 1) % self.cols\n yCoor = math.floor((coor - 1) / self.cols)\n if self.grid[yCoor][xCoor] == 0:\n self.grid[yCoor][xCoor] = \"0\"\n continue\n if var.value() == 1:\n self.grid[yCoor][xCoor] = \"x\"\n else:\n self.grid[yCoor][xCoor] = \"1\"", "title": "" }, { "docid": "e30e397a1218d02fb472bef3ebb01a3e", "score": "0.61028606", "text": "def update_grid(self):\n # List of all occupied/visited cells\n all_occupied_cells = [player, grid_goal] + self.myObstacles + self.myRewards + self.visited_cells\n\n # For every coordinate on the board, check whether it is occupied by any one of the above\n # cells by comparing coordinates and x, y indices. If no cell is found, add 0 to that position\n # in the grid indicating an empty cell\n # If a cell is found, add the cell-type to the position in the grid if\n # the cell is not of \"reward\" type. If it is of reward type,\n # add its value with a 'plus' in front of it\n for coords in self.get_cell_coordinates():\n i, j = coords\n required_cell = next((cell for cell in all_occupied_cells if (cell.x, cell.y) == coords), None)\n\n if required_cell:\n self.grid[i][j] = required_cell.type if required_cell.type is not 3 else '+' + str(required_cell.value)\n else:\n self.grid[i][j] = 0", "title": "" }, { "docid": "3244199d97f3394cef022740d39ec56c", "score": "0.61010367", "text": "def update(self):\n self.update_physics()\n\n for game_object in self.object_pool:\n game_object.update()", "title": "" }, { "docid": "dc0989774ffb37aeeed717137cb5d66f", "score": "0.6082684", "text": "def update(self, world):\n self.draw(world)\n if self.render:\n pygame.display.update()", "title": "" }, { "docid": "afec1254648b3c31d8c6402468258f13", "score": "0.6040232", "text": "def update( self):\n#\t\tGame._window.update()\n\t\tself.rect.topleft = self.pos\n\n\t\tself.setGrid(0, self.inactiveGrid())\n\t\tfor row in range( self.numRows - 1):\n\t\t\tfor col in range( self.numCols - 1):\n\t\t\t\tgameState_next = self.check_cellNeighbours(row, col)\n\t\t\t\tself.grids[ self.inactiveGrid()][row][col] = gameState_next\n\n\t\tself.activeGrid = self.inactiveGrid()", "title": "" }, { "docid": "478325dded0df68632170ff1dbd59855", "score": "0.60273534", "text": "def edit_grid() :\r\n\r\n #Loading a global variable\r\n global obstacle_coords\r\n\r\n #Checking if the tile is an obstacle or a free tile\r\n if(((row, col) in obstacle_coords) == False) :\r\n #Changing background colour of button\r\n gui_elements[\"maze_grid\"][row][col].config(bg=\"red\")\r\n\r\n #Updating the list of obstructions\r\n obstacle_coords.append((row, col))\r\n\r\n else :\r\n #Changing background colour of button\r\n gui_elements[\"maze_grid\"][row][col].config(bg=\"white\")\r\n\r\n #Updating the list of obstructions\r\n obstacle_coords.remove((row, col))", "title": "" }, { "docid": "009fb730578d942234ab8a8919effcfd", "score": "0.60224444", "text": "def update(self):\n p.setGravity(0, 0, -10.)\n if self.state == 0:\n for envO in self.env_objects:\n c_points = p.getClosestPoints(self.bot.p_id, envO.p_id, 0.001)\n if c_points is not None and len(c_points) > 0:\n self.state = 2\n print(\"CRASH\")\n if self.goal_pos.dist_2d(self.bot.pos) < self.config_.ROB_RADIUS * 1.5:\n print(\"GOAL\")\n self.state = 1\n self.bot.update()\n for o in self.env_objects:\n o.update()", "title": "" }, { "docid": "4826132262c399b4321b97fce27217c0", "score": "0.59522325", "text": "def Update_World(mech, world):\n for func in Update_World.updaters:\n func(mech, world)\n\n update_func_world(mech, world)\n Update_RATE(mech, world)", "title": "" }, { "docid": "f4f61fab96c39e37c01fda6b509e4a2f", "score": "0.5947868", "text": "def update_grid(self, grid):\n if torch.is_tensor(grid):\n grid = convert_legacy_grid(grid)\n\n if len(grid) != self.num_dims:\n raise RuntimeError(\"New grid should have the same number of dimensions as before.\")\n\n for i in range(self.num_dims):\n setattr(self, f\"grid_{i}\", grid[i])\n\n if not self.interpolation_mode:\n self.full_grid = create_data_from_grid(self.grid)\n\n self._clear_cache()\n return self", "title": "" }, { "docid": "20856802fff123ea0dbe44d7f29254f4", "score": "0.59443927", "text": "def update(self):\n for mc in self.monsters:\n mc.update(self)", "title": "" }, { "docid": "3d4ed00f8918f1ca806b65d160ee092e", "score": "0.5938704", "text": "def update_func_world(mech, world):\n globals().update(world)", "title": "" }, { "docid": "9076acb5b9f1ea0370fa81f1ee58cf74", "score": "0.59250045", "text": "def test_update_world(self):\n world = World()\n self.assertEqual(world.time, 0)\n\n old_state = []", "title": "" }, { "docid": "95d2971320d743db3ddea25f3cba6342", "score": "0.5903083", "text": "def update_map(self):\n self.generate_walls_from_markers()\n for (id,cube) in self.robot.world.light_cubes.items():\n self.update_cube(cube)\n for face in self.robot.world._faces.values():\n self.update_face(face)", "title": "" }, { "docid": "d2e7d6494257b3418ec5ecbd454a245c", "score": "0.5900022", "text": "def update(self, world: World, events):\n\n is_world_updated = self.previous_world_t != world.t\n if is_world_updated:\n self.previous_world_t = world.t\n\n self._handle_events(events)\n\n self.entities = self._get_entities(world)\n\n self.selected_entity_index %= len(self.entities)\n selected_entity = self.entities[self.selected_entity_index]\n reference_entity = self.entities[self.reference_entity_index]\n self.viewer.draw(world, reference_entity, selected_entity)\n\n if is_world_updated: self.plotter.update(self.entities, reference_entity)\n self.plotter.draw(selected_entity, self.entities)\n\n pygame.display.flip()", "title": "" }, { "docid": "9239c99658343d804abbdc9ef0570628", "score": "0.588836", "text": "def update(self):\n self.dirty_rects, old = [], self.dirty_rects\n self.world.tick()\n self.draw_world()\n\n self.clock.tick(FRAMERATE)\n pygame.display.update(self.dirty_rects + old)\n # print(self.world)\n print(self.world.ticks)", "title": "" }, { "docid": "e8d1fc7dbc63dbd6bb5049043c2d121b", "score": "0.58847094", "text": "def updateGlobalCoords(self):\r\n self.globalCoords = [ self.localToGlobalCoords(T) for T in self.localCoords ]\r\n #print 'cx,cy,thd are', self.cx, self.cy, math.degrees(self.thr)\r\n #print 'globalCoords are', self.globalCoords\r", "title": "" }, { "docid": "d676fd3eea594b39042ed82764c5522b", "score": "0.58839756", "text": "def draw_world(self):\n for row in self.world.cells:\n for cell in row:\n if not cell.static:\n self.draw_cell(cell)", "title": "" }, { "docid": "7a3110e49ab2d94bbf724b47267bf193", "score": "0.5855152", "text": "def update(self):\r\n self.cell.update()\r\n self.receptor_system.update()\r\n self.glucose_system.update()\r\n self.sensor_system.update()\r\n self.protein_system.update()\r\n self.glut_system.update()", "title": "" }, { "docid": "16a9df5df41d9c59d804de27e6a360d1", "score": "0.58381873", "text": "def update_objects(self):\n # Draw obstacles on the screen\n for obstacle in self._obstacles:\n obstacle.draw(self.screen)\n\n # Draw player on the screen\n self.player.draw(self.screen)\n # Draw score widget on the screen\n self.score_widget.draw(self.screen)", "title": "" }, { "docid": "7ba79b058d64e40a68348deb6c2f8925", "score": "0.5811544", "text": "def update(self, couple):\n bbox = couple[1]\n if bbox.x is None or bbox.y is None:\n return\n\n new_pos = set()\n x1 = int(bbox.x // self.grid_size)\n y1 = int(bbox.y // self.grid_size)\n x2 = int((bbox.x + bbox.w) // self.grid_size)\n y2 = int((bbox.y + bbox.h) // self.grid_size)\n\n # New population set\n positions = (x1, y1), (x1, y2), (x1, y2), (x2, y2)\n for pos in positions:\n if (0 <= pos[0] <= self.num_columns\n and 0 <= pos[1] <= self.num_rows):\n new_pos.add(pos)\n\n # Remove from previous locations\n for pos in self._population.get(couple, set()) - new_pos:\n self._grid[pos[0]][pos[1]].discard(couple)\n # Add to new locations\n for pos in new_pos - self._population.get(couple, set()):\n self._grid[pos[0]][pos[1]].add(couple)\n\n # Update population\n self._population[couple] = new_pos", "title": "" }, { "docid": "ba6dc86cad96fa6a10cd88c66211ebb6", "score": "0.5792867", "text": "def Update(*args):\r\n return _pynewton.World_Update(*args)", "title": "" }, { "docid": "84aced866f490d2416d8ee5c333b0b6d", "score": "0.577716", "text": "def _update(self):\n self._game.update()", "title": "" }, { "docid": "8c81d718a4c72893a0b317b5b9ce67a4", "score": "0.5762038", "text": "def update_obstacles(self, env_object_info):\n gui_obstacle_name_id = \\\n self.gui_obstacles_idx_to_name.index(env_object_info['name'])\n gui_obstacle_id = self.gui_obstacles_circle_idx[gui_obstacle_name_id]\n new_gui_obstacle_position = self.scale(env_object_info['position'])\n new_gui_obstacle_radius = env_object_info['radius'] * self.draw_scale\n new_gui_obstacle_points = util.get_circle_points(new_gui_obstacle_position,\n new_gui_obstacle_radius)\n self.canvas.coords(gui_obstacle_id, new_gui_obstacle_points)\n\n gui_obstacle_text_id = self.gui_obstacles_texts_idx[gui_obstacle_name_id]\n self.canvas.coords(gui_obstacle_text_id,\n new_gui_obstacle_position[0],\n new_gui_obstacle_position[1])", "title": "" }, { "docid": "ef0934c068db0c31c84e2d3181354575", "score": "0.57311904", "text": "def update(self):\r\n if len(self.collbox_dict) != 0:\r\n for coll_box in self.collbox_dict.values():\r\n coll_box.x = self.x + (coll_box.width // 2) + coll_box.x_offset\r\n coll_box.y = self.y + (coll_box.height // 2) + coll_box.y_offset\r\n coll_box.left_edge = coll_box.x - (coll_box.width // 2)\r\n coll_box.right_edge = coll_box.x + (coll_box.width // 2)\r\n coll_box.top_edge = coll_box.y - (coll_box.height // 2)\r\n coll_box.bottom_edge = coll_box.y + (coll_box.height // 2)\r\n\r\n if self.sprite_dict != None:\r\n for sprite in self.sprite_dict.values():\r\n sprite.x = self.x\r\n sprite.y = self.y", "title": "" }, { "docid": "8833f9340a225f4bf3820c523e3221ad", "score": "0.57147515", "text": "def update(self):\n self._coarse_grid = self._config_dict['coarse_grid']\n self._solution = self._config_dict['solution']\n BR_dict = dict()\n RTf_dict = dict()\n self._interface2dof_map = dict()\n self._global_dof_dimension = 0\n\n for problem_id, local_problem in self._local_problems_dict.items():\n BR_int_dict = dict()\n if local_problem.kernel.size == 0:\n for interface, B in local_problem.B.items():\n BR_int_dict[interface] = np.array([])\n else:\n for interface, B in local_problem.B.items():\n BR_int_dict[interface] = B @ local_problem.kernel\n BR_dict[problem_id] = BR_int_dict\n if local_problem.kernel.size == 0:\n RTf_dict[problem_id] = np.array([])\n else:\n RTf_dict[problem_id] = np.asarray(local_problem.kernel.T @ local_problem.f).flatten()\n for interface, B in local_problem.B.items():\n if interface not in self._interface2dof_map:\n new_global_dof_dimension = self._global_dof_dimension + B.shape[0]\n self._interface2dof_map[interface] = np.arange(self._global_dof_dimension, new_global_dof_dimension)\n self._global_dof_dimension = new_global_dof_dimension\n\n self._coarse_grid.update(BR_dict, RTf_dict, self._interfacedict2vector)", "title": "" }, { "docid": "7737ad53c94eaf262fc7531d4dd0bcab", "score": "0.5705845", "text": "def update_individual_spatial(self, population, row, col):\n\t\tNam = population[1][row][col]\n\t\tNim = population[2][row][col]\n\t\tpopulation[1][row][col], population[2][row][col] = self.mutate_individual(Nam, Nim)\n\t\tpopulation[0][row][col] = self.compute_r(population[1][row][col], population[2][row][col])\n\t\treturn(population)", "title": "" }, { "docid": "c5b490707e9d67dad989d77d605cfca8", "score": "0.568819", "text": "def update_square(self, location):\n x, y, z = location\n terrainNode = self.grid.get_node_at(location)\n if terrainNode is not None:\n rect = pygame.Rect(x, y, 1, 1)\n terrainNode.contents.render(rect, self.surface)", "title": "" }, { "docid": "f5e0bcbb560c321a9c2c3c06b97242f7", "score": "0.56843", "text": "def update(self):\n self.OH.update_objects()", "title": "" }, { "docid": "c1b3e677f6173b23b93234a8db829d2d", "score": "0.56799513", "text": "def do_it(self):\r\n \r\n sql = r\"\"\"UPDATE OR IGNORE obs_points SET east=X(Geometry) WHERE obsid IN \"\"\" + self.sqlpart2\r\n utils.sql_alter_db(sql)\r\n sql = r\"\"\"UPDATE OR IGNORE obs_points SET north=Y(Geometry) WHERE obsid IN \"\"\" + self.sqlpart2\r\n utils.sql_alter_db(sql)", "title": "" }, { "docid": "b3b45a222915f6de5d439614b120d82d", "score": "0.5672655", "text": "def update(self, window: 'Window'):\n self._cached_points = self.projected(window)", "title": "" }, { "docid": "b3b45a222915f6de5d439614b120d82d", "score": "0.5672655", "text": "def update(self, window: 'Window'):\n self._cached_points = self.projected(window)", "title": "" }, { "docid": "6e38eed70813e1e19ce2b93dbdfb4816", "score": "0.56643426", "text": "def workspace_update_data(self, event):\n\n self.GetMDIParent().workspace_modified = True\n\n if self.IsMaximized():\n event.Skip()\n return\n\n position = self.GetPosition()\n size = self.GetSize()\n\n self.workspace_data['x'] = position[0]\n self.workspace_data['y'] = position[1]\n self.workspace_data['width'] = size[0]\n self.workspace_data['height'] = size[1]\n\n event.Skip()", "title": "" }, { "docid": "5636a9689d4bb52d534c505dec324299", "score": "0.5657264", "text": "def update_grid(self):\n for m in self.messages:\n message = self.load_message(m)\n self.grid[message['position']]['agent'] = message['name']", "title": "" }, { "docid": "c5aee62bcb611d9817ed5abf5792499d", "score": "0.56422895", "text": "def updateGrid(array, community):\r\n \r\n #shuffle grid along both axes\r\n np.apply_along_axis(np.random.shuffle, 1, array)\r\n np.random.shuffle(array)\r\n \r\n #update locations of individuals\r\n getLoc = lambda x : (x // array.shape[0], x % array.shape[1])\r\n r = array.ravel()\r\n for i in range(array.size):\r\n community.people[r[i]].updateLoc(getLoc(i))\r\n \r\n return array", "title": "" }, { "docid": "cd05cc3bca07284a8666c32569b3ab00", "score": "0.5627794", "text": "def update(self):\n # This could use a refactor, but it's pointless to refactor before we\n # know what all the controls should be.\n\n if pyxel.btnp(pyxel.KEY_Q):\n pyxel.quit()\n elif pyxel.btnp(pyxel.KEY_W):\n self.player.walk(0)\n elif pyxel.btnp(pyxel.KEY_D):\n self.player.walk(2)\n elif pyxel.btnp(pyxel.KEY_S):\n self.player.walk(4)\n elif pyxel.btnp(pyxel.KEY_A):\n self.player.walk(6)\n\n for objekt in self.things:\n objekt.update()", "title": "" }, { "docid": "73c809aae8ca5b70d30c9a31dede18c3", "score": "0.5603339", "text": "def add_obj(self, map_obj):\n for i, cell in enumerate(self.grid):\n if map_obj.shape.intersects(cell):\n self.grid_occupancy[i] = 1", "title": "" }, { "docid": "abf70ac58c2b6fb74ac4492dae3e0220", "score": "0.55884767", "text": "def __init__(self, grid=None, nrow=25, ncol=25, goal_length=1,\n density=0.3, ctrl_error=0.05, episode_length=100, verbose=False):\n super(GridWorldEnv, self).__init__(\n grid, nrow, ncol, goal_length, density, ctrl_error, episode_length, verbose)\n self.nb_states = self.nrow * self.ncol\n self.nb_actions = 4", "title": "" }, { "docid": "a3a0515b0db1a8267f9cd8b09a79de08", "score": "0.55829614", "text": "def setup(self):\n self.set_cursor(0, 0)\n\n for obj in self.objects.copy():\n obj.destroy()\n\n for y in xrange(self.height):\n for x in xrange(self.width):\n self.map[x][y] = [floor]\n\n self[self.world.player.location].append(self.world.player)\n\n self.regions = []", "title": "" }, { "docid": "6b2716b1a1d3ec3a56232585aa38edf1", "score": "0.5582553", "text": "def make_world(self, num_obstacles):\r\n for o in range(num_obstacles):\r\n cell = UNDEFINED\r\n while cell != EMPTY:\r\n i = np.random.randint(0, self.grid.shape[0])\r\n j = np.random.randint(0, self.grid.shape[1])\r\n cell = self.grid[i, j]\r\n if cell == EMPTY:\r\n self.grid[i, j] = OBSTACLE", "title": "" }, { "docid": "d179089399b8adf5c25b7e381e08df00", "score": "0.5581471", "text": "def refresh_and_blit_overworld_viewing(self, display_update=True):\n\n self.overworld_viewing.refresh_and_blit_self()\n\n if display_update:\n pygame.display.update()", "title": "" }, { "docid": "1a27278fa0d332693d29f8a7b665219a", "score": "0.5580832", "text": "def update(self):\n # update all sprites\n self.all_sprites.update()", "title": "" }, { "docid": "3b0d5bcdfa009169d5cdb1595d82beb2", "score": "0.55695754", "text": "def update(self):\n for ai in self.ai_entities:\n ai.update()\n for animation in self.display_entities:\n animation.update()\n for physics in self.physics_entities:\n physics.update()", "title": "" }, { "docid": "07e970d206b1518e4d307cc25073789f", "score": "0.55520135", "text": "def update():", "title": "" }, { "docid": "f669914f20ed7cd28e0fcac380f00f5e", "score": "0.55500704", "text": "def __update_modelview(self):\n\n glMatrixMode(GL_MODELVIEW)\n glLoadIdentity()\n\n # We move the camera by moving the world in the opposite direction\n # We deduct how to move the world by the camera position\n camera_pose = self.scene.cam.pose\n world_pose = copy.deepcopy(camera_pose)\n # world_pose.yaw = np.pi/2 - camera_pose.yaw\n # world_pose.pitch = np.pi/2 - camera_pose.pitch\n # world_pose.roll = np.pi/2 - camera_pose.roll\n\n M = world_pose.transfmat\n M[:3, :] = -M[:3, :]\n\n # fit to open gl coordinate system\n M[0, :] = -M[0, :]\n M[1, :] = -M[1, :]\n # transpose and flatten to get column order\n M = M.T\n m = M.flatten()\n # replace model view with the new matrix\n glLoadMatrixf(m)", "title": "" }, { "docid": "efb0c6762609000b1050cb7f0fd48e8c", "score": "0.5536209", "text": "def update(self):\n self.update_agents()\n self.update_workplaces()\n self.update_specialties()\n self.update_adjacency()\n self.update_influence()\n self.update_state()\n #self.build_network_graph()\n #self.build_transition_matrix()\n #self.build_policy()\n self.reset_simulation()", "title": "" }, { "docid": "4bb550f46a2dbe813c81755d9c7b6132", "score": "0.5530107", "text": "def update(self):\n # Visualize the detected object\n is_world_changed = False\n self._lock.acquire()\n if self.has_objects():\n to_remove = None\n for i in range(len(self._objects)):\n if self._objects[i].is_removed:\n to_remove = i\n if to_remove is not None:\n self._remove_object(to_remove)\n is_world_changed = True\n\n self._lock.release()\n return is_world_changed", "title": "" }, { "docid": "515b6d7cc1b137f279527ceff4eac88f", "score": "0.5528818", "text": "def update_agent(self, env_object_info):\n gui_agent_name_id = \\\n self.gui_agents_idx_to_name.index(env_object_info['name'])\n gui_agent_id = self.gui_agents_circle_idx[gui_agent_name_id]\n new_gui_agent_position = self.scale(env_object_info['position'])\n new_gui_agent_radius = env_object_info['radius'] * self.draw_scale\n new_gui_agent_points = util.get_circle_points(new_gui_agent_position,\n new_gui_agent_radius)\n self.canvas.coords(gui_agent_id, new_gui_agent_points)\n\n gui_agent_text_id = self.gui_agents_texts_idx[gui_agent_name_id]\n self.canvas.coords(gui_agent_text_id,\n new_gui_agent_position[0],\n new_gui_agent_position[1])", "title": "" }, { "docid": "28ac099bdb85bed66d67520d2217bf71", "score": "0.5525303", "text": "def update_grid(self):\n old_grid = self._grid.copy()\n N = self._N\n for i in range(N):\n for j in range(N):\n total = (old_grid[i, (j-1) % N] # up\n + old_grid[i, (j+1) % N] # down\n + old_grid[(i-1) % N, j] # left\n + old_grid[(i+1) % N, j] # right\n + old_grid[(i-1) % N, (j-1) % N] # top-left\n + old_grid[(i+1) % N, (j-1) % N] # top-right\n + old_grid[(i-1) % N, (j+1) % N] # bottom-left\n + old_grid[(i+1) % N, (j+1) % N]) # bottom-right\n\n # Apply the rule\n if old_grid[i, j] == type(self).ON:\n if (total < 2) or (total > 3):\n self._grid[i, j] = type(self).OFF\n else:\n if total == 3:\n self._grid[i, j] = type(self).ON", "title": "" }, { "docid": "01e02286ce64e19077c073363d90c0dd", "score": "0.55229336", "text": "def update(self):\n #Skip this if there is no tileset or map data\n if self.tileset is None:\n Logger.warning(\"TileMap: There is no assigned tileset.\")\n return\n\n if self.map_data is None:\n Logger.warning(\"TileMap: There is no map data.\")\n return\n\n #Update Fbo size\n w, h = self.size\n w = w * 32\n h = h * 32\n self._fbo.size = (w, h)\n self._rect.size = (w, h)\n self._rect.texture = self._fbo.texture\n\n #Draw the new tiles\n self._fbo.clear()\n\n with self._fbo:\n ClearColor(0, 0, 0, 0)\n ClearBuffers()\n\n #Draw each tile\n x = 0\n y = 0\n\n for row in self.map_data:\n for tile in row:\n Rectangle(pos = (x, y), size = (32, 32), \n source = self.tileset[tile])\n x += 32\n\n y += 32\n x = 0\n\n #Force a redraw of the Fbo\n self._fbo.draw()", "title": "" }, { "docid": "f6709b0464ac5533935d999911fcb0cd", "score": "0.5517183", "text": "def update(self):\n self.game_manager.update()", "title": "" }, { "docid": "26dd1eca1986788cb58cdf69574460fe", "score": "0.5516408", "text": "def _draw_grid(self, state):\n for pos in state.grid.keys():\n agent_square = False\n if pos == state.player_pos:\n agent_square = True\n canvas_pos = self._square_coord_to_canvas_coord(*pos)\n self._draw_square(state.grid[pos], canvas_pos, agent_square)", "title": "" }, { "docid": "8cfaf14668ae0c3e8a17c226cba4a187", "score": "0.5492774", "text": "def update(self):\n units = list(self.units)\n shuffle(units)\n for unit in units:\n unit.action()\n self.auto_update_occupied_cells()\n self.kill_units()\n self.auto_update_occupied_cells()", "title": "" }, { "docid": "35bb606c2d05652b4c14087111f45ce7", "score": "0.54828745", "text": "def _updateGeometry(self):\n pass", "title": "" }, { "docid": "635fb17a8eb974081f6ffef689c0a79c", "score": "0.54768187", "text": "def _grid_changed(self):\n\n if not self.grid:\n return\n dimensions = self.getDimensions()\n if not self.isMRG():\n # MD layout (1 or 2 dimensions)\n if self.grid[0] != self.index0:\n self.index0 = self.grid[0]\n if self.grid[1] != self.index1:\n self.index1 = self.grid[1]\n else:\n # MRG layout (3 dimensions)\n if self.grid[0] != self.index1:\n self.index1 = self.grid[0]\n if self.grid[1] != self.index2:\n self.index2 = self.grid[1]\n if self.sensor:\n self.drawLocationImages()", "title": "" }, { "docid": "5c8ace147c2fd716eabb25237931d85e", "score": "0.5471644", "text": "def update(self):\n self.platform_list.update()\n self.enemy_list.update()\n self.zone_list.update()\n self.potion_list.update()", "title": "" }, { "docid": "4cc1b8dc6388e01f9433adf62a8b5fad", "score": "0.54702455", "text": "def update_model(self):\n self.request = self.model.get_request(self.events)\n self.model.user_request(self.request)\n self.model.call_menu()\n self.model.towers_attack()\n self.model.enemies_advance()\n\n for tw in self.model.towers:\n tw.update()\n for en in self.model.enemies.get():\n en.update()", "title": "" }, { "docid": "9c2f8754627565f6a005fb2efacd7b34", "score": "0.54696226", "text": "def update_fov(self) -> None:\n self.game_map.visible_tiles[:] = tcod.map.compute_fov(\n self.game_map.tiles[\"transparent\"],\n (self._player.x, self._player.y),\n radius=self._fov_radius,\n )\n # If a tile is 'visible' it should be set to 'explored'.\n self.game_map.explored_tiles |= self.game_map.visible_tiles", "title": "" }, { "docid": "bc303e1cc5b12ba538a724afd52d5d58", "score": "0.54618496", "text": "def update(self, entities):\n\n # The set of entities that need a corresponding simulation\n # object. We remove entities where we see them, and create\n # simulation objects where necessary.\n to_remove = set(self.__mapping.keys())\n for e in entities:\n if e in to_remove:\n to_remove.remove(e)\n else:\n body = e.get_component(Body)\n assert body\n pymunk_body = Physics.PymunkBody(body)\n self.__mapping[e] = pymunk_body\n self.__space.add(pymunk_body.body, pymunk_body.shape)\n\n # Now, the set contains all of the entities that had simulation\n # bodies but shouldn't any more.\n for e in to_remove:\n pymunk_body = self.__mapping[e]\n self.__space.remove(pymunk_body.body, pymunk_body.shape)\n del self.__mapping[e]", "title": "" }, { "docid": "d9caabb854d8e0c8fa0f512bf7c29f0b", "score": "0.5458752", "text": "def update_programs_world_matrix(self):\n # upload the model matrix to the gpu\n for p in self.programs.values():\n p.set_world_matrix(self.camera.world_matrix)", "title": "" }, { "docid": "c6c552facb2059bf452552ae488ea90c", "score": "0.5453995", "text": "def update_objects(self):\n ObjectsDialog.update_objects(self)\n self.__update_datasets()", "title": "" }, { "docid": "31e90d87aba297f6dbd72da2ac316c99", "score": "0.54509586", "text": "def createhillMovesGrid(currentObject, numpyGrid, dimensions, houseCounter):\n\n YPosition = 300\n waterWidth = int(23040/296)\n\n # Specify drawNumbers\n drawNumber = currentObject.uniqueID\n fADrawNumber = 1\n\n # Define the maximum number of houses that can be placed on one row\n maxHousesOnRow = houseCounter[3]\n\n if currentObject.type == \"eengezinswoning\":\n maxHousesOnNextRows = houseCounter[4]\n\n # Put eengezinswoningen on the first row\n if houseCounter[0] < maxHousesOnRow:\n coordinates = (YPosition, currentObject.freeArea\n + dimensions * houseCounter[0])\n\n # Put eengezinswoningen on the second row\n elif houseCounter[1] < maxHousesOnNextRows:\n coordinates = (YPosition - dimensions, waterWidth +\n currentObject.freeArea + dimensions * houseCounter[1])\n\n # Put eengezinswoningen on the third row\n elif houseCounter[2] < maxHousesOnNextRows:\n coordinates = (YPosition - dimensions * 2, waterWidth +\n currentObject.freeArea + dimensions * houseCounter[2])\n\n else:\n\n # Put bungalows on the first row\n if houseCounter[0] < maxHousesOnRow:\n coordinates = (currentObject.freeArea,\n waterWidth + currentObject.freeArea + dimensions * \\\n houseCounter[0])\n\n # Put bungalows on the second row\n elif houseCounter[1] < maxHousesOnRow:\n coordinates = (currentObject.freeArea + dimensions,\n waterWidth + currentObject.freeArea + dimensions * \\\n houseCounter[1])\n\n # Put bungalows on the third row\n elif houseCounter[2] < maxHousesOnRow:\n coordinates = (currentObject.freeArea + dimensions * 2,\n waterWidth + currentObject.freeArea + dimensions * \\\n houseCounter[2])\n\n # Update coordinats in self\n changeCoordinates(currentObject, coordinates)\n\n # Get coordinate variables\n coord = coordinateVariables(currentObject)\n\n # The area is clear, so we can immediately draw the free area\n visualizeOnGrid(coord[5], coord[6], coord[7], coord[8],\n numpyGrid, fADrawNumber)\n\n # Visualize house on top of free area\n visualizeOnGrid(coord[0], coord[1], coord[2], coord[3],\n numpyGrid, drawNumber)", "title": "" }, { "docid": "c8bfe92e9541b604a9fbbe005d935ecc", "score": "0.5445584", "text": "def update(self):\n tmpGrid = self.grid.copy()\n for i in range(self.N):\n for j in range(self.N):\n neighbours = self.grid[max(0, i-1):min(i+2,self.N),max(0, j-1):min(j+2,self.N)].sum()\n neighbours -= self.grid[i, j]\n if self.grid[i, j] == 1:\n if neighbours > 3 or neighbours < 2:\n tmpGrid[i, j] = 0\n elif neighbours == 3:\n tmpGrid[i, j] = 1\n self.grid = tmpGrid", "title": "" }, { "docid": "a296e69589282cdcac7d48eaff3fc9b8", "score": "0.5431386", "text": "def grid(self, grid):\n self._grid = grid", "title": "" }, { "docid": "5c66034202e7afe84c2224949bd089c1", "score": "0.5429856", "text": "def update_map(self,players,enemies,attacks,items):\n ##Reset map\n objects = players.player_array + enemies.enemy_array + self.tiles \n players = players.player_array\n attacks = attacks.attack_array\n items = items.item_array\n\n for key in self.spatial_map.keys():\n self.spatial_map[key][\"objects\"] = []\n self.spatial_map[key][\"attacks\"] = []\n self.spatial_map[key][\"players\"] = []\n self.spatial_map[key][\"items\"] = []\n\n #For each object place a reference to them inside each tile they overlap\n for obj in objects:\n self.update_single_obj(obj)\n \n for player in players:\n self.update_single_player(player)\n\n for attack in attacks:\n self.update_single_att(attack)\n\n for item in items:\n self.update_single_item(item)", "title": "" }, { "docid": "2b63135918dde00f4bf19df5a21c549b", "score": "0.5428709", "text": "def update( ):", "title": "" }, { "docid": "ab42acbae32976a01f94a19c2e3dc505", "score": "0.54275686", "text": "def update_track_data(self):\n if self.interactive_cell is not None:\n print(\"updating the data structure\")\n print(self.trackdata.get_cell_params(self.current_image, self.current_cell_id))\n properties = self.interactive_cell.get_position_props()\n print(properties)\n self.trackdata.set_cell_properties(self.current_image, self.current_cell_id, properties)\n print(self.trackdata.get_cell_params(self.current_image, self.current_cell_id))", "title": "" }, { "docid": "a38e56cca0ea7476dd3d740729a1cb73", "score": "0.54199016", "text": "def update(self):\n\n if self.states[PROP_VIEW] == STATE_OVERVIEW: # the worldmap\n if self.states[PROP_UPDATE_MAIN]: # if the main view is being updated\n self.main_view.set_img(self.game.overview_map) # set the view to the worldmap\n self.gm_view.set_img(self.game.overview_map_dm) # set the view to the worldmap\n elif self.states[PROP_VIEW] == STATE_MAPVIEW: # view a map\n # calculate zoom and translation parameters for the main_view\n params = {\"fow\": \"tv\"} # the parameters that will be asked for in the image. fow will be a nice rendering\n if self.states[PROP_ZOOM] != 1:\n params[\"zoom\"] = self.states[PROP_ZOOM]\n if self.states[PROP_TRANS_X]:\n params[\"trans_x\"] = self.states[PROP_TRANS_X]\n if self.states[PROP_TRANS_Y]:\n params[\"trans_y\"] = self.states[PROP_TRANS_Y]\n\n if self.states[PROP_SHOW_TOKEN]:\n params[\"tokens\"] = True\n\n # only update main if allowed\n if self.states[PROP_UPDATE_MAIN]:\n self.main_view.set_img(self.game.curr_map().get_img(**params))\n\n # now that the main view is updated start changing the parameters to suit the gm view\n if self.states[PROP_UPDATE_MAIN]:\n params[\"border\"] = True\n if self.states[PROP_GM_VIEW] == STATE_PREVIEW: # preview is almost the image the main map shows + border\n self.gm_view.set_img(self.game.curr_map().get_img(**params))\n elif self.states[PROP_GM_VIEW] == STATE_NORMAL: # normal gm view\n # add fow param for the gm's view\n if self.states[PROP_GRIDLINES]:\n params[\"gridlines\"] = True\n params[\"fow\"] = \"gm\"\n params[\"dm\"] = True\n params[\"tokens\"] = True\n self.gm_view.set_img(self.game.curr_map().get_img(**params)) # get image and set view\n\n # actually show the images created\n self.gm_view.show(\"gm\")\n self.main_view.show(\"main\")", "title": "" }, { "docid": "9878dbf2b0d57bd216906e2ddd16cb82", "score": "0.5416527", "text": "def update_geometry(self):\r\n \r\n self.__e2p_horz_marg, self.__e2p_vert_marg,\\\r\n self.__p2l_horz_marg, self.__p2p_gap, self.__p2n_gap,\\\r\n self.__port_size, self.__l2n_gap = self.__geo_prop\r\n\r\n self.update_block()\r\n pass", "title": "" }, { "docid": "4e266d338fe2cfb9910cdae67992f45c", "score": "0.5414993", "text": "def updateGrid(self, shape):\r\n testing = find_max_score_location(self, shape)\r\n location = testing[2]\r\n fits = testing[0]\r\n numRotations = testing[1]\r\n while numRotations:\r\n shape.rotate90()\r\n numRotations -= 1\r\n for row in range(len(shape.squares)):\r\n for col in range(len(shape.squares[0])):\r\n if shape.squares[row][col] == True:\r\n self.squares[location[0] + row][location[1] + col] = True\r\n self.print()\r\n return fits", "title": "" }, { "docid": "72820f98bcdbd7300373cfc703cf6249", "score": "0.54123235", "text": "def refresh_2D(self, width, height):\n glViewport(0, 0, width, height)\n glMatrixMode(GL_PROJECTION)\n glLoadIdentity()\n glOrtho(0.0, width, height, 0.0, -1.0, 1.0)\n glMatrixMode(GL_MODELVIEW)\n glLoadIdentity()", "title": "" }, { "docid": "292b387c73ce85b26f2b482af1dff071", "score": "0.54103035", "text": "def run_world_simulation(self, world, generation, one_loc=None):\n draw_path = '{}gen{}/world'.format(self.path, generation)\n if one_loc:\n one_agent = self.world.agents[one_loc]\n draw_path = '{}gen{}/agent-i{}/world'.format(self.path, generation, one_agent.private_id)\n draw_ext = '.png'\n if self.draw:\n self.view.draw(world)\n self.view.save(draw_path+'0'+draw_ext)\n # print('max_time: {}'.format(self.max_time))\n for t in range(1, self.max_time):\n self.update(world)\n if self.draw:\n self.view.save(draw_path+str(t)+draw_ext)\n with open((draw_path+str(t)+'-comm.csv'), 'w') as f:\n f.write('\\n'.join([str(x) for x in world.comms]))\n self.num_comms += len(world.comms)\n world.comms = []", "title": "" }, { "docid": "c5ea5f507d0b9be50788171e887c8995", "score": "0.54042923", "text": "def update_house(self, house, points):\n pass", "title": "" }, { "docid": "aa51267cc8dcfe6b3eb332e78ec729c0", "score": "0.54038113", "text": "def __init__(self, lifespan=None): \n Grid_2D_World.__init__(self, lifespan)\n self.name = 'grid_2D_dc'\n self.name_long = 'decoupled two dimensional grid world'\n print \"--decoupled\"\n self.num_sensors = self.world_size * 2\n self.VISUALIZE_PERIOD = 10 ** 3\n self.display_state = False", "title": "" }, { "docid": "7e104119c0f5c4b592a26de9a22b112c", "score": "0.54023683", "text": "def update_scene(self):\n\n self.update_programs_projection_matrix()\n self.update_programs_view_matrix()\n self.update_programs_world_matrix()\n if self.ctrl:\n self.update_cursor_data()\n self.glDraw()", "title": "" }, { "docid": "aa8b482d13bca78d63790cc20dc94fb9", "score": "0.5402059", "text": "def tick(self):\n for world_object in self.objects:\n world_object.tick(self.tick_length)\n self.time += self.tick_length", "title": "" }, { "docid": "66403dc28195279d2d3088308df283e7", "score": "0.54001397", "text": "def update(self, object_):\n raise NotImplementedError()", "title": "" }, { "docid": "1232dc9540edd7787a03a32b67628ce8", "score": "0.5397368", "text": "def update_window(self):\n # Update robot position\n self.r1.update_window()\n self.r2.update_window()\n self.r3.update_window()\n\n # now update the OpenGL graphics in window\n self.view3D.update()", "title": "" }, { "docid": "df6f23fddcb85ba97dcaeb9ed9a7489d", "score": "0.5393906", "text": "def update(self):\n self.robot.update()", "title": "" }, { "docid": "0d5cd273b650ad21a37108b8eda17554", "score": "0.53879035", "text": "def regenerate_surface(self):\n for x in xrange(0, self.gridWidth):\n for y in xrange(0, self.gridHeight):\n self.update_square((x, y, self.view.z))", "title": "" }, { "docid": "eca5925f4dfe86b42b1ab32e44737d6f", "score": "0.53785145", "text": "def update():\n\n\tglobal START, END\n\n\tfor i in range(ROWS):\n\t\tfor j in range(COLS):\n\n\t\t\tif GRID[i][j].rect.collidepoint(pygame.mouse.get_pos()[0], pygame.mouse.get_pos()[1]):\n\t\t\t\tkeys = pygame.key.get_pressed()\n\n\t\t\t\tif keys[pygame.K_s] and not GRID[i][j].isEnd:\n\t\t\t\t\tsetStart(i, j)\n\n\t\t\t\telif keys[pygame.K_e] and not GRID[i][j].isStart:\n\t\t\t\t\tsetEnd(i, j)\n\n\t\t\tGRID[i][j].update()", "title": "" }, { "docid": "42e1433ce749544a15aa95c21da63a5f", "score": "0.5366284", "text": "def test_update(self):\n square = Square(5)\n square.update()\n square.update(1, 2, y=3)\n square.update(1, 12, id=4)\n square.update(size=2, y=5)", "title": "" }, { "docid": "e29da65f264ecaa6bf4014a7e51d8354", "score": "0.53635085", "text": "def _update_state(self, obs_dict):\n self.x = obs_dict['poses_x'][obs_dict['ego_idx']]\n self.y = obs_dict['poses_y'][obs_dict['ego_idx']]\n\n self.theta = obs_dict['poses_theta'][obs_dict['ego_idx']]\n self.in_collision = obs_dict['collisions'][obs_dict['ego_idx']]\n self.collision_angle = obs_dict['collision_angles'][obs_dict['ego_idx']]", "title": "" }, { "docid": "1ece8315b242490ae55390b9e2366a49", "score": "0.5356332", "text": "def updatePositions(self):\r\n for body in self.smallBodies+self.largeBodies:\r\n body.pos += body.velocity*self.dt", "title": "" }, { "docid": "b6994c9fd53b5c09e7e66dde3832e274", "score": "0.53509086", "text": "def move_obj(self, move_x, move_y):\r\n self.wall_list.update(move_x, move_y)\r\n self.door_list.update(move_x, move_y)\r\n self.coin_list.update(move_x, move_y)", "title": "" }, { "docid": "cba6d4f75eac09baf930bc0a050738d5", "score": "0.53463733", "text": "def update(self):\n\t\tself.active_sprite_list.update()\n\t\tfireball.Fireball.all_fireballs.update()\n\t\tself.score_board.update()", "title": "" }, { "docid": "8f0cea03788b385805f747817ce4cd5a", "score": "0.53455746", "text": "def _update_geos(self):\n # globe\n self._fig.update_geos(\n row=2,\n col=1,\n showland=False,\n showcountries=False,\n showocean=False,\n showcoastlines=False,\n showframe=False,\n showrivers=False,\n showlakes=False,\n showsubunits=False,\n bgcolor=\"rgba(0,0,0,0)\",\n projection=dict(type=self.projection, rotation=dict(lon=0, lat=0, roll=0)),\n lonaxis=dict(showgrid=True, gridcolor=\"rgb(102, 102, 102)\", gridwidth=1),\n lataxis=dict(showgrid=True, gridcolor=\"rgb(102, 102, 102)\", gridwidth=1),\n )", "title": "" }, { "docid": "c0cabb3c24dc1a92eeb199d14a0eaa45", "score": "0.53441733", "text": "def do_it(self):\r\n # First find EPSG-ID for the CRS\r\n sql = r\"\"\"SELECT srid FROM geometry_columns where f_table_name = 'obs_points'\"\"\"\r\n ConnectionOK, result = utils.sql_load_fr_db(sql)\r\n EPSGID= str(result[0][0])\r\n #Then do the operation\r\n sql = r\"\"\"Update or ignore 'obs_points' SET Geometry=MakePoint(east, north, \"\"\"\r\n sql += EPSGID\r\n sql += \"\"\") WHERE obsid IN \"\"\" + self.sqlpart2\r\n utils.sql_alter_db(sql)", "title": "" }, { "docid": "ab8104a66e11aa40aa274007683334ef", "score": "0.5338825", "text": "def get_grid_obs(self):\n self.get_map()\n if not self.finalized:\n self.add_objs_to_map()\n self.grid = [''.join(x) for x in self.map]\n\n return self.grid", "title": "" }, { "docid": "494638ef20dbefa8eadf604666927a82", "score": "0.5332123", "text": "def _write_grid(self, rr_graph):\n\n grid_locs = rr_graph.grid.init('gridLocs', len(self.graph.grid))\n for out_grid_loc, grid_loc in zip(grid_locs, self.graph.grid):\n out_grid_loc.x = grid_loc.x\n out_grid_loc.y = grid_loc.y\n out_grid_loc.blockTypeId = grid_loc.block_type_id\n out_grid_loc.widthOffset = grid_loc.width_offset\n out_grid_loc.heightOffset = grid_loc.height_offset", "title": "" }, { "docid": "f737f468202e354adc13cb16f314c686", "score": "0.5331146", "text": "def updateWorld(msg):\n global X, V, orientation\n X = np.array([float(msg.pose[1].position.x), float(msg.pose[1].position.y)])\n V = np.array([float(msg.twist[1].linear.x), float(msg.twist[1].linear.y)])\n orientation = np.arctan2(2 * float(msg.pose[1].orientation.w) * float(msg.pose[1].orientation.z), \\\n 1 - 2 * float(msg.pose[1].orientation.z)**2)", "title": "" }, { "docid": "41200d1aea434dd8554b8745d4687109", "score": "0.532805", "text": "def update(self):\n old_coords = self.coords\n new_coords = (old_coords[0] + self.direction['H'] * self.speed[0],\n old_coords[1] + self.direction['V'] * self.speed[1])\n self.coords = new_coords", "title": "" }, { "docid": "3a8f4c6a40855cdbd0576aafa89ee391", "score": "0.53174084", "text": "def __init__(self, env, width, height, min_reward, max_reward): \n self.env = env\n self.width = width\n self.height = height\n self.min_reward = min_reward\n self.max_reward = max_reward\n \n self.root_window = Tkinter.Tk()\n self.root_window.title(\"Gridworld Visualisation\")\n self.root_window.geometry(str(width)+\"x\"+str(height)+\"+200+200\")\n self.root_window.resizable(0, 0)\n \n self.canvas = Tkinter.Canvas(self.root_window, width=width, height=height)\n self.canvas.pack(fill=Tkinter.BOTH, expand=1)\n \n self.grid_x_inc = float(self.width) / self.env.width\n self.half_grid_x_inc = self.grid_x_inc / 2.0\n self.grid_y_inc = float(self.height) / self.env.height\n self.half_grid_y_inc = self.grid_y_inc / 2.0\n \n self.h_lines = []\n for l in xrange(self.env.height):\n pos = l * self.grid_y_inc\n self.h_lines.append(self.canvas.create_line(0, pos, self.width, pos,\n fill=LINE_COLOR))\n \n self.v_lines = []\n for l in xrange(self.env.width):\n pos = l * self.grid_x_inc\n self.v_lines.append(self.canvas.create_line(pos, 0, pos, self.height,\n fill=LINE_COLOR))\n \n self.Q_values = {}\n for state in xrange(self.env.num_states):\n x, y = self.state_to_pos(state)\n x_pos = x * self.grid_x_inc\n y_pos = y * self.grid_y_inc\n #up\n self.Q_values[(state, \"up\")] = self.canvas.create_polygon(\\\n x_pos+Q_TRIANGLE_BORDER,\n y_pos+Q_TRIANGLE_BORDER, \n x_pos+self.grid_x_inc-Q_TRIANGLE_BORDER+1,\n y_pos+Q_TRIANGLE_BORDER,\n x_pos+self.half_grid_x_inc,\n y_pos+Q_TRIANGLE_PERCENT*self.half_grid_y_inc,\n fill=\"blue\")\n #down\n self.Q_values[(state, \"down\")] = self.canvas.create_polygon(\\\n x_pos+Q_TRIANGLE_BORDER,\n y_pos+self.grid_y_inc-Q_TRIANGLE_BORDER+1, \n x_pos+self.grid_x_inc-Q_TRIANGLE_BORDER+1,\n y_pos+self.grid_y_inc-Q_TRIANGLE_BORDER+1,\n x_pos+self.half_grid_x_inc,\n y_pos+self.grid_y_inc-Q_TRIANGLE_PERCENT*self.half_grid_y_inc,\n fill=\"blue\")\n #left\n self.Q_values[(state, \"left\")] = self.canvas.create_polygon(\\\n x_pos+Q_TRIANGLE_BORDER,\n y_pos+Q_TRIANGLE_BORDER,\n x_pos+Q_TRIANGLE_BORDER,\n y_pos+self.grid_y_inc-Q_TRIANGLE_BORDER+1,\n x_pos+Q_TRIANGLE_PERCENT*self.half_grid_x_inc,\n y_pos+self.half_grid_y_inc,\n fill=\"blue\")\n #right\n self.Q_values[(state, \"right\")] = self.canvas.create_polygon(\\\n x_pos+self.grid_x_inc-Q_TRIANGLE_BORDER+1,\n y_pos+Q_TRIANGLE_BORDER,\n x_pos+self.grid_x_inc-Q_TRIANGLE_BORDER+1,\n y_pos+self.grid_y_inc-Q_TRIANGLE_BORDER+1,\n x_pos+self.grid_x_inc-Q_TRIANGLE_PERCENT*self.half_grid_x_inc,\n y_pos+self.half_grid_y_inc,\n fill=\"blue\")\n \n self.greedy_policy = {}\n for state in xrange(self.env.num_states):\n x, y = self.state_to_pos(state)\n x_pos = x * self.grid_x_inc\n y_pos = y * self.grid_y_inc\n #up\n self.greedy_policy[(state, \"up\")] = self.canvas.create_polygon(\\\n x_pos+Q_TRIANGLE_BORDER+self.half_grid_x_inc*G_OUTER_PERCENT,\n y_pos+Q_TRIANGLE_BORDER, \n x_pos+self.grid_x_inc-Q_TRIANGLE_BORDER+1-self.half_grid_x_inc*G_OUTER_PERCENT,\n y_pos+Q_TRIANGLE_BORDER,\n x_pos+self.grid_x_inc-Q_TRIANGLE_BORDER+1-self.half_grid_x_inc*G_INNER_PERCENT,\n y_pos+Q_TRIANGLE_BORDER+self.half_grid_y_inc,\n x_pos+Q_TRIANGLE_BORDER+self.half_grid_x_inc*G_INNER_PERCENT,\n y_pos+Q_TRIANGLE_BORDER+self.half_grid_y_inc,\n fill=G_COLOR, state=Tkinter.HIDDEN)\n #down\n self.greedy_policy[(state, \"down\")] = self.canvas.create_polygon(\\\n x_pos+Q_TRIANGLE_BORDER+self.half_grid_x_inc*G_OUTER_PERCENT,\n y_pos+self.grid_y_inc-Q_TRIANGLE_BORDER+1, \n x_pos+self.grid_x_inc-Q_TRIANGLE_BORDER+1-self.half_grid_x_inc*G_OUTER_PERCENT,\n y_pos+self.grid_y_inc-Q_TRIANGLE_BORDER+1,\n x_pos+self.grid_x_inc-Q_TRIANGLE_BORDER+1-self.half_grid_x_inc*G_INNER_PERCENT,\n y_pos+Q_TRIANGLE_BORDER+self.half_grid_y_inc,\n x_pos+Q_TRIANGLE_BORDER+self.half_grid_x_inc*G_INNER_PERCENT,\n y_pos+Q_TRIANGLE_BORDER+self.half_grid_y_inc,\n fill=G_COLOR, state=Tkinter.HIDDEN)\n #left\n self.greedy_policy[(state, \"left\")] = self.canvas.create_polygon(\\\n x_pos+Q_TRIANGLE_BORDER,\n y_pos+Q_TRIANGLE_BORDER+self.half_grid_y_inc*G_OUTER_PERCENT,\n x_pos+Q_TRIANGLE_BORDER,\n y_pos+self.grid_y_inc-Q_TRIANGLE_BORDER+1-self.half_grid_y_inc*G_OUTER_PERCENT,\n x_pos+Q_TRIANGLE_PERCENT*self.half_grid_x_inc,\n y_pos+self.grid_y_inc-Q_TRIANGLE_BORDER+1-self.half_grid_y_inc*G_INNER_PERCENT,\n x_pos+Q_TRIANGLE_PERCENT*self.half_grid_x_inc,\n y_pos+Q_TRIANGLE_BORDER+self.half_grid_y_inc*G_INNER_PERCENT,\n fill=G_COLOR, state=Tkinter.HIDDEN)\n #right\n self.greedy_policy[(state, \"right\")] = self.canvas.create_polygon(\\\n x_pos+self.grid_x_inc-Q_TRIANGLE_BORDER+1,\n y_pos+Q_TRIANGLE_BORDER+self.half_grid_y_inc*G_OUTER_PERCENT,\n x_pos+self.grid_x_inc-Q_TRIANGLE_BORDER+1,\n y_pos+self.grid_y_inc-Q_TRIANGLE_BORDER+1-self.half_grid_y_inc*G_OUTER_PERCENT,\n x_pos+Q_TRIANGLE_PERCENT*self.half_grid_x_inc,\n y_pos+self.grid_y_inc-Q_TRIANGLE_BORDER+1-self.half_grid_y_inc*G_INNER_PERCENT,\n x_pos+Q_TRIANGLE_PERCENT*self.half_grid_x_inc,\n y_pos+Q_TRIANGLE_BORDER+self.half_grid_y_inc*G_INNER_PERCENT,\n fill=G_COLOR, state=Tkinter.HIDDEN)\n \n self.actions_taken = {}\n for state in xrange(self.env.num_states):\n x, y = self.state_to_pos(state)\n x_pos = x * self.grid_x_inc\n y_pos = y * self.grid_y_inc\n #up\n self.actions_taken[(state, \"up\")] = self.canvas.create_polygon(\\\n x_pos+Q_TRIANGLE_BORDER+self.half_grid_x_inc*T_OUTER_PERCENT,\n y_pos+Q_TRIANGLE_BORDER, \n x_pos+self.grid_x_inc-Q_TRIANGLE_BORDER+1-self.half_grid_x_inc*T_OUTER_PERCENT,\n y_pos+Q_TRIANGLE_BORDER,\n x_pos+self.grid_x_inc-Q_TRIANGLE_BORDER+1-self.half_grid_x_inc*T_INNER_PERCENT,\n y_pos+Q_TRIANGLE_BORDER+self.half_grid_y_inc,\n x_pos+Q_TRIANGLE_BORDER+self.half_grid_x_inc*T_INNER_PERCENT,\n y_pos+Q_TRIANGLE_BORDER+self.half_grid_y_inc,\n fill=T_TRIANGLE_COLOR, state=Tkinter.HIDDEN)\n #down\n self.actions_taken[(state, \"down\")] = self.canvas.create_polygon(\\\n x_pos+Q_TRIANGLE_BORDER+self.half_grid_x_inc*T_OUTER_PERCENT,\n y_pos+self.grid_y_inc-Q_TRIANGLE_BORDER+1, \n x_pos+self.grid_x_inc-Q_TRIANGLE_BORDER+1-self.half_grid_x_inc*T_OUTER_PERCENT,\n y_pos+self.grid_y_inc-Q_TRIANGLE_BORDER+1,\n x_pos+self.grid_x_inc-Q_TRIANGLE_BORDER+1-self.half_grid_x_inc*T_INNER_PERCENT,\n y_pos+Q_TRIANGLE_BORDER+self.half_grid_y_inc,\n x_pos+Q_TRIANGLE_BORDER+self.half_grid_x_inc*T_INNER_PERCENT,\n y_pos+Q_TRIANGLE_BORDER+self.half_grid_y_inc,\n fill=T_TRIANGLE_COLOR, state=Tkinter.HIDDEN)\n #left\n self.actions_taken[(state, \"left\")] = self.canvas.create_polygon(\\\n x_pos+Q_TRIANGLE_BORDER,\n y_pos+Q_TRIANGLE_BORDER+self.half_grid_y_inc*T_OUTER_PERCENT,\n x_pos+Q_TRIANGLE_BORDER,\n y_pos+self.grid_y_inc-Q_TRIANGLE_BORDER+1-self.half_grid_y_inc*T_OUTER_PERCENT,\n x_pos+Q_TRIANGLE_PERCENT*self.half_grid_x_inc,\n y_pos+self.grid_y_inc-Q_TRIANGLE_BORDER+1-self.half_grid_y_inc*T_INNER_PERCENT,\n x_pos+Q_TRIANGLE_PERCENT*self.half_grid_x_inc,\n y_pos+Q_TRIANGLE_BORDER+self.half_grid_y_inc*T_INNER_PERCENT,\n fill=T_TRIANGLE_COLOR, state=Tkinter.HIDDEN)\n #right\n self.actions_taken[(state, \"right\")] = self.canvas.create_polygon(\\\n x_pos+self.grid_x_inc-Q_TRIANGLE_BORDER+1,\n y_pos+Q_TRIANGLE_BORDER+self.half_grid_y_inc*T_OUTER_PERCENT,\n x_pos+self.grid_x_inc-Q_TRIANGLE_BORDER+1,\n y_pos+self.grid_y_inc-Q_TRIANGLE_BORDER+1-self.half_grid_y_inc*T_OUTER_PERCENT,\n x_pos+Q_TRIANGLE_PERCENT*self.half_grid_x_inc,\n y_pos+self.grid_y_inc-Q_TRIANGLE_BORDER+1-self.half_grid_y_inc*T_INNER_PERCENT,\n x_pos+Q_TRIANGLE_PERCENT*self.half_grid_x_inc,\n y_pos+Q_TRIANGLE_BORDER+self.half_grid_y_inc*T_INNER_PERCENT,\n fill=T_TRIANGLE_COLOR, state=Tkinter.HIDDEN)", "title": "" }, { "docid": "81199972fa288a30be57e9729209e64d", "score": "0.531061", "text": "def update_plants(self,surface):\n for plant in self.plants:\n plant.update(surface,self.current_time)", "title": "" }, { "docid": "fee6404df206f5541c3e32c0ad1f55d2", "score": "0.5310382", "text": "def update_map(self, grid_map, pose, scan):\n\n # Current yaw of the robot\n robot_yaw = self.get_yaw(pose.pose.orientation)\n # The origin of the map [m, m, rad]. This is the real-world pose of the\n # cell (0,0) in the map.\n origin = grid_map.get_origin()\n # The map resolution [m/cell]\n resolution = grid_map.get_resolution()\n \n \n \"\"\"\n print(\"das sind die Pose- Werte: \" +str(pose))\n \n a = True\n if a == True:\n print(\"das sind die grid- Werte: \" +str(grid_map)) \n a = False\n \"\"\"\n x_list = []\n y_list = []\n #x_y_list = []\n occupied_cells = []\n \n \n for i in range(len(scan.ranges)):\n \n if scan.ranges[i] > scan.range_min and scan.ranges[i] < scan.range_max:\n \n x = (scan.ranges[i] * cos(robot_yaw + (scan.angle_min + scan.angle_increment * i))) + pose.pose.position.x - origin.position.x\n \n y = (scan.ranges[i] * sin(robot_yaw + (scan.angle_min + scan.angle_increment * i))) + pose.pose.position.y - origin.position.y\n \n #x and y end index\n \n x = int(x / resolution) \n y = int(y / resolution)\n \n occupied_cells.append([x,y])\n \n \n #mark as free\n \n starting_x = int((pose.pose.position.x - origin.position.x) / resolution)\n \n starting_y = int((pose.pose.position.y - origin.position.y) / resolution) \n \n start = [starting_x, starting_y]\n \n end = [x,y] \n \n traversed = self.raytrace(start, end)\n \n \n for i in traversed:\n \n self.add_to_map(grid_map, i [0], i [1],self.free_space)\n \n if self.is_in_bounds(grid_map, i[0], i[1]):\n \n x_list.append(i[0])\n \n y_list.append(i[1])\n \n #x_y_list.append([i[0],i[1],self.free_space]) \n \n \n #x and y end index add to list\n #x_list.append(x)\n \n \n #mark as occupied\n \n for i in range(len(occupied_cells)):\n \n self.add_to_map(grid_map,occupied_cells [i][0], occupied_cells [i] [1],self.occupied_space) \n \n #if self.is_in_bounds(grid_map,occupied_cells [i][0], occupied_cells [i][1]):\n \n #x_list.append(occupied_cells [i][0])\n \n #y_list.append(occupied_cells [i][1])\n \n #x_y_list.append([occupied_cells [i][0],occupied_cells [i][1],self.occupied_space])\n \n \n #x_y_list.sort()\n \n data = []\n \n # Only get the part that has been updated\n update = OccupancyGridUpdate()\n \n # The minimum x index in 'grid_map' that has been updated\n update.x = min(x_list)\n \n # The minimum y index in 'grid_map' that has been updated\n update.y = min(y_list)\n \n # Maximum x index - minimum x index + 1\n update.width = max(x_list) - min(x_list) + 1\n #print(\"Breite des Rechtecks: \" +str(update.width))\n \n # Maximum y index _map- minimum y index + 1\n update.height = max(y_list) - min(y_list) + 1\n \n #print(\"Hoehe des Rechtecks: \" +str(update.height))\n # The map data inside the rectangle, in row-major order.\n \n for h in range(update.height):\n \n for i in range(update.width):\n \n data.append(grid_map[update.x + i, update.y + h])\n \n update.data = data\n #print(\"das sind die Daten: \" +str(data))\n \n return grid_map, update", "title": "" }, { "docid": "a03074f42a860d470e4816b8ac49a385", "score": "0.53102237", "text": "def updateVS(db):\n# graph = get_graph(db)\n mesh = get_mesh(db)\n pos = db.get_property('position')\n wall = db.get_property('wall')\n scale = db.get_property('scale_px_to_micron')\n\n if mesh.degree()==3:\n\t # Area of each cell\n\t V = refresh_property(db, 'V')\n\t for cid in mesh.wisps(3):\n\t\tV[cid]=scale[0]*scale[0]*scale[0]*cell_volume(mesh, pos, cid)\n \n\t # Lengths of each wall\n\t S = refresh_property(db, 'S')\n\t for wid in mesh.wisps(2):\n\t\tS[wid] = scale[0]*scale[0]*face_surface_3D(mesh, pos, wid)\n\n if mesh.degree()==2:\n\t # Area of each cell\n\t V = refresh_property(db, 'V')\n\t for cid in mesh.wisps(2):\n\t\tV[cid]=scale[0]*scale[0]*face_surface_2D(mesh, pos, cid)\n\n\t \n\t # Lengths of each wall\n\t S = refresh_property(db, 'S')\n\t for wid in mesh.wisps(1):\n\t\tS[wid] = scale[0]*edge_length(mesh, pos, wid)", "title": "" }, { "docid": "13bc3a224e18c196b6ad30e173f22528", "score": "0.53061366", "text": "def updatePixelCoords( self ):\r\n print 'in Movable\\'s updatePixelCoords'", "title": "" } ]
fe15a1a597b13a43456fd8bdf2a252fd
rejects a nonnumeric input of "foo"
[ { "docid": "1232bc5326ddca1da44076edffbc92a9", "score": "0.0", "text": "def test_reject_foo():\n check50.run(\"python3 cash.py\").stdin(\"foo\").reject()", "title": "" } ]
[ { "docid": "eb8de4b095347f3422d0d8ab42c05c67", "score": "0.6866605", "text": "def check(param):\n try:\n param = float(param)\n\n except ValueError:\n print(\"Incorrect input \\\"%s\\\". Must be number.\" % param)", "title": "" }, { "docid": "a6ba2276e2f4248e8994b8cae14a2c63", "score": "0.6722084", "text": "def is_numeric(form, field):\n data = str(field.data)\n if data.isalpha():\n raise ValidationError(message_is_numeric)", "title": "" }, { "docid": "c8fc4d6d231bb1139f1bf85d09b3d578", "score": "0.667467", "text": "def check_numeric(arg):\n while True:\n try:\n entry = float(input(arg))\n if entry < 0:\n raise ValueError\n else:\n return entry\n\n except ValueError:\n print(\"Entries must be positive and numeric\")", "title": "" }, { "docid": "79c55c128ca5194b60f902f99d0295eb", "score": "0.66200054", "text": "def number_or_exit(s, msg):\n isNumber = r'^[1-9][0-9]*\\.?[0-9]*([Ee][+-]?[0-9]+)?$';\n\n if not re.search(isNumber, s):\n print(\"Error: the {} argument must be a number\".format(msg))\n exit_with_usage()", "title": "" }, { "docid": "a980ed468489ba0d4901ce4ee2656340", "score": "0.6596289", "text": "def _validate(number):\n # Validate Input\n if type(number) == str: # If it is a string, make sure it is digits \n if isdigit(number): number\n else:\n print(\"Verify that your imput consists of the digits 0 thru 9. Your input: \", number)\n return -1 #Bad Input\n elif type(number) == int: # It is an integer value, convert to string\n number = str(number)\n return number", "title": "" }, { "docid": "c99873a99c3fd87e380a2a66255007a7", "score": "0.6582099", "text": "def validate_number(input_data):\n if input_data.startswith('-'):\n return input_data[1:].isdigit()\n return input_data.isdigit()", "title": "" }, { "docid": "8af4ea160899d993701c988a55ee70a5", "score": "0.6551377", "text": "def numeric_validation(entry):\n while True:\n try:\n entry = int(entry)\n break\n except ValueError:\n entry = input(\"Please enter a valid integer: \")\n return entry", "title": "" }, { "docid": "66f277376594f176feb9d7b76047ed4d", "score": "0.64648896", "text": "def validate_amount(amount):\n\n try:\n amount = float(amount)\n\n if amount < 0:\n raise argparse.ArgumentTypeError(\"Amount value should be a non-negative value.\")\n\n if math.isnan(amount) or math.isinf(amount):\n raise argparse.ArgumentTypeError(\"Amount value is not a number.\")\n\n except ValueError:\n raise\n\n return amount", "title": "" }, { "docid": "517ed8a6bd6069985c38912f743da2a0", "score": "0.6449597", "text": "def validate_float(user_input): \r\n try:\r\n return float(user_input)\r\n except:\r\n print(\"\\nPlease enter a real number.\")\r\n return \"invalid_input\"", "title": "" }, { "docid": "f972af5a797c3e414fadedf5ad628bf3", "score": "0.64454305", "text": "def test_wrong_type_integer_input(self):\n\n number = get_integer_input(\"Integer Number\")\n self.assertNotEqual(type(number), str)", "title": "" }, { "docid": "f731f9c460e778127dca9278eb407034", "score": "0.644076", "text": "def test_convert_nonnumeric_value(self):\n with pytest.raises(TypeError):\n pressure_util.convert('a', PRESSURE_HPA, PRESSURE_INHG)", "title": "" }, { "docid": "08415286648c0f8b4fd7378180d1022c", "score": "0.6374627", "text": "def isnumeric(input):\n # 2016-01-20 11:48 IJMC: Created\n try:\n junk = float(input) + 0.\n ret = True\n except:\n ret = False\n return ret", "title": "" }, { "docid": "f4d68c992af47b38e44ca5f87e11dd4c", "score": "0.63367385", "text": "def raise_not_number(x):\n try:\n float(x)\n except ValueError:\n raise SizeError('Must pass a number, received {}'.format(x))", "title": "" }, { "docid": "c9a902cb7cb025889c5c1206d35dfcec", "score": "0.63094205", "text": "def validate_num(\r\n num: Union[int, float, Int, Number]) -> None:\r\n from apysc.type.number_value_interface import NumberValueInterface\r\n if isinstance(\r\n num,\r\n (int, float, NumberValueInterface)):\r\n return\r\n raise ValueError(\r\n f'Specified value is not iteger or float type: {num}'\r\n f'({type(num)})')", "title": "" }, { "docid": "207bcec40f3cdc3d1569bac10afc1e9c", "score": "0.6236718", "text": "def opcion_valida(opcion: str) -> int:\n while not opcion.isnumeric() or int(opcion) < 1 or int(opcion) > 8:\n opcion = input(\"Ingrese una opcion correcta: \")\n opcion = int(opcion)\n return opcion", "title": "" }, { "docid": "fcd0eb6c674273e8e9284f93193ea558", "score": "0.6201344", "text": "def is_numeric(input_str):\n\n try:\n float(input_str)\n return True\n except ValueError:\n return False", "title": "" }, { "docid": "06a32c66bfcb03036a23a34f571bf94e", "score": "0.61735827", "text": "def ask(question, valueType):\n while True:\n try:\n answer = valueType(raw_input(question))\n if answer < 0:\n raise ValueError\n return answer\n except ValueError:\n if valueType == int:\n print \" ^ Enter a positive whole number...\"\n else:\n print \" ^ Enter a positive number...\"", "title": "" }, { "docid": "0d594095e6f520a42448be4069e66e01", "score": "0.6124916", "text": "def test_if_it_accepts_float_numbers_are_accepted(self):\n with self.assertRaises(TypeError):\n primenum(56.58)", "title": "" }, { "docid": "f789770989c157819bc10916e71cd027", "score": "0.61225647", "text": "def check_input(valid_number): # function which checks all user input, whether it is valid (whole positive numbers) or invalid\n value = None\n while not value: # creates a loop so it will keep going till user input is valid \n value = input(valid_number)\n if not value.isnumeric(): # if input is not a number\n print(\"Please enter a valid number\")\n print(\"\\n\")\n value = None\n else:\n return int(value) # once a correct number is entered the number is returned and program contiues", "title": "" }, { "docid": "0b28f9f30b6419dace166792ce48baad", "score": "0.6111065", "text": "def test_phone_parser_wrong_input(self):\r\n invalid = ('66867867', '', 668678678)\r\n for num in invalid:\r\n self.assertRaises(WrongInput, phone_parser, num)", "title": "" }, { "docid": "7ee45eeda94b9d8da93bfdc5770097a7", "score": "0.6106006", "text": "def validate_numeric_scalar(var: Any) -> bool:\n assert isinstance(var, (int, float)), \"Argument must be single numeric value\"\n return var", "title": "" }, { "docid": "952e4bda8e3e4f0002b89bf61c64789d", "score": "0.6091906", "text": "def is_number(num):\n try:\n int(num)\n except:\n print('Only enter numbers')\n return False\n return True", "title": "" }, { "docid": "6146a4fc89cbad85b94fc25599723c26", "score": "0.60882956", "text": "def test_input_with_other_char_than_numbers(self):\n input_with_letter = \"12A456789\"\n input_with_special_char = \"1234567+9\"\n self.assertEqual(input_is_valid(input_with_letter), False)\n self.assertEqual(input_is_valid(input_with_special_char), False)", "title": "" }, { "docid": "56ac19df2aa65ded6074ee395cad9ca0", "score": "0.6079081", "text": "def validate_input(input, opts):\n if not input.isnumeric():\n return False\n n = int(input)\n # Valid indices start at 1.\n if 0 >= n or len(opts) < n:\n return False\n return True", "title": "" }, { "docid": "5d8d4673526ee08d47a7128dde99336f", "score": "0.6073587", "text": "def test_bad_numeric(self):\n with pytest.raises(UnwrapFailedError):\n assert self._get(123).unwrap() # noqa: Z432", "title": "" }, { "docid": "5d8d4673526ee08d47a7128dde99336f", "score": "0.6073587", "text": "def test_bad_numeric(self):\n with pytest.raises(UnwrapFailedError):\n assert self._get(123).unwrap() # noqa: Z432", "title": "" }, { "docid": "007c7331fbe2f3cd681e1816b05474ec", "score": "0.60536397", "text": "def valid_number(data_in):\n\n try:\n data_out = float(data_in)\n except ValueError:\n data_out = np.nan\n return data_out", "title": "" }, { "docid": "c98c9aa97a845f00394863b636045a4d", "score": "0.6045029", "text": "def clean_price(price_str):\n try:\n try:\n price_float = float(price_str)\n except ValueError:\n price_float = float(price_str[1:])\n\n except ValueError:\n input('''\n \\n***** PRICE ERROR *****\n \\rThe Price should be a number without a currency label.\n \\rEx: 5.99\n \\rPress enter to try again.\n \\r**********************''')\n return\n else:\n return int(price_float * 100)", "title": "" }, { "docid": "14fdc92dd81e78350604bad01db769a1", "score": "0.6037946", "text": "def number_check(num_, len_, name):\r\n valid = True\r\n error_message = \"\"\r\n if not isinstance(num_, str):\r\n valid = False\r\n error_message = f\"Input for {name} should be string\"\r\n elif len(num_) > len_:\r\n error_message = f\"Max len of {name} is 5 char\"\r\n valid = False\r\n elif num_ == \"\":\r\n valid = True\r\n error_message = \"\"\r\n elif not re.search(r\"[\\d]+[-\\/.]*[a-zA-Z]*\", num_):\r\n error_message = f\"{name} should be: [0-9][ - . /][ a-zA-Z]\"\r\n valid = False\r\n return (valid, error_message, num_)", "title": "" }, { "docid": "c50f225ce7a6c2c0bbb43d961d11a695", "score": "0.6022796", "text": "def safeIntegerInput(prompt):\n inputString = input(prompt)\n try:\n number = int(inputString)\n return number\n except ValueError:\n print(\"Error in number format:\", inputString)\n return safeIntegerInput(prompt)", "title": "" }, { "docid": "a74a3a2ef82c494c9641d148a4982235", "score": "0.60155165", "text": "def num_in(prompt):\n while True:\n try:\n return float(input(prompt))\n except ValueError:\n print(\"Please enter a number!\")", "title": "" }, { "docid": "4fa113da113c983fc76d9e223a5e8bbc", "score": "0.5977137", "text": "def is_numeric(val):\n return isinstance(val, (int, float))", "title": "" }, { "docid": "d6ed4019bbc35fd0532d27a0bb85c331", "score": "0.59757483", "text": "def _control_input_type(token: Any) -> None:\n\n if not isinstance(token, str):\n raise TypeError(f\"Wrong input type, expected string, got {type(token)}\")\n if token == \"\":\n raise ValueError(\"Wrong input type: empty string\")", "title": "" }, { "docid": "b4b1c36b7ab629c340833b71e4c2a429", "score": "0.59514385", "text": "def valid_input(data_input):\n\n is_valid = False\n try:\n data_input = int(data_input)\n except ValueError:\n print(\"\"\"Vous n'avez pas saisi un nombre. Veuillez recommencer...\"\"\")\n else:\n is_valid = True\n\n return is_valid", "title": "" }, { "docid": "244a938499385171cf96d9abd510b47b", "score": "0.5948644", "text": "def is_valid(candidate):\n if candidate.isalnum():\n if candidate.isalpha():\n retval = True, 'letter'\n else:\n retval = True, 'number'\n else:\n try:\n val = float(candidate)\n retval = True, 'number'\n except ValueError:\n retval = False, 'neither'\n return retval", "title": "" }, { "docid": "7d7fd11c60fe458920c91fc3acfbceb6", "score": "0.5946673", "text": "def validate_input(input_val):\n try:\n float_input = float(input_val)\n return float_input\n except ValueError:\n return None", "title": "" }, { "docid": "086eb11cb087707c1fe32284d9f874ee", "score": "0.5944595", "text": "def isNum(x):\n\treturn isfloat(x) or isint(x)", "title": "" }, { "docid": "6c38583bc6449528fdbc08c7b8274a05", "score": "0.59426284", "text": "def is_numeric(value):\n return isinstance(value, int) or isinstance(value, float)", "title": "" }, { "docid": "6c38583bc6449528fdbc08c7b8274a05", "score": "0.59426284", "text": "def is_numeric(value):\n return isinstance(value, int) or isinstance(value, float)", "title": "" }, { "docid": "32f4f06874d843f6cf43c14f9d947e84", "score": "0.59352654", "text": "def test_with_non_numeric_nomor(self):\n form = self.form_data(\"ini non numeric\", 'p')\n self.assertFalse(form.is_valid())\n #get first form, and get nomor field.\n #then get it data with .as_data() method\n error = form[0]['nomor'].errors.as_data()[0]\n self.assertEqual(error.code, 'invalid input')", "title": "" }, { "docid": "3d228631096be14c2a4b580eb8423272", "score": "0.5913183", "text": "def check_int_type(rep):\n try:\n rep = int(rep)\n except ValueError:\n print(\"Be carefull, a number is expected!\")\n sys.exit(2)\n if rep < 0:\n print(\"The number must be positive!\")\n sys.exit(2)\n return rep", "title": "" }, { "docid": "7c068c3d073b28b614ee6b698c586cd4", "score": "0.5906656", "text": "def bad_input():\r\n print(\"Bad input, please try again\")", "title": "" }, { "docid": "82a6dae82c98970ee01eeac8f5de7cbf", "score": "0.5901525", "text": "def validate_int_or_basestring(option, value):\r\n if isinstance(value, (int, long)):\r\n return value\r\n elif isinstance(value, basestring):\r\n if value.isdigit():\r\n return int(value)\r\n return value\r\n raise TypeError(\"Wrong type for %s, value must be an \"\r\n \"integer or a string\" % (option,))", "title": "" }, { "docid": "1e69bfa53544896a15e4433b45768c98", "score": "0.5899116", "text": "def test_strip_nonnumeric(self):\n Valid = self.create_ValidIDs_class()\n Valid.strip_nonnumeric(keycol)\n assert False not in [\n isinstance(val.item(), int) for val\n in Valid.df[keycol].dropna().astype(int).values\n ]", "title": "" }, { "docid": "b49e2a308c7622e07d6f1c1014cee935", "score": "0.58989674", "text": "def is_number_tryexcept(s):\n try:\n if s == 'nan':\n return False\n float(s)\n return True\n except ValueError:\n return False", "title": "" }, { "docid": "b2b6a2303c6a7d7ab7fb619588d1e580", "score": "0.58912516", "text": "def valid(f):\n try: \n return not re.search(r'\\b0[0-9]', f) and eval(f) is True\n except ArithmeticError:\n return False", "title": "" }, { "docid": "8862188df7f1d1ddce612604ae1542e3", "score": "0.5882695", "text": "def get_valid_number(question):\n value = input(question)\n valid = False\n while valid == False:\n try:\n while int(value) <= 0: # Reject values less than 0\n print('Number must be > 0')\n value = input(question)\n valid = True\n except ValueError: # Reject invalid values\n print('Invalid input; enter a valid number')\n value = input(question)\n return value", "title": "" }, { "docid": "905eec86a710c1304b90a2cc01c7d6e7", "score": "0.58655685", "text": "def is_numeric(self, val):\n val = str(val)\n if val.isnumeric():\n print(\"Numeric\")\n return True\n else:\n print(\"Not numeric\")\n return False", "title": "" }, { "docid": "ea11ed966a078ecd3aa1ed335d3c0c67", "score": "0.5863097", "text": "def test_validate_amount(self):\n\n self.assertEqual(util.validate_amount('1'), '1.00')\n self.assertEqual(util.validate_amount('10'), '10.00')\n self.assertEqual(util.validate_amount('10.'), '10.00')\n self.assertEqual(util.validate_amount('10.0'), '10.00')\n self.assertEqual(util.validate_amount('10.00'), '10.00')\n\n with self.assertRaises(ValueError):\n util.validate_amount('-')\n util.validate_amount('f')\n util.validate_amount('1f')\n util.validate_amount('10.f')\n util.validate_amount('10.0f')", "title": "" }, { "docid": "cf3328ad448ad953a3e4b1d3ceaed1d7", "score": "0.5861212", "text": "def get_integer_input(message):\n\n value_as_string = input(message)\n while not value_as_string.isdecimal() and value_as_string.isnumeric():\n print('The Input must be an integer')\n value_as_string = input(message)\n\n return float(value_as_string)", "title": "" }, { "docid": "4a781ea0eab1ed8d15d371a815601cc9", "score": "0.5852245", "text": "def enforce_some_alnum(value):\n \n if value:\n for c in value:\n x = ord(c)\n if lower_start <= x <= lower_end:\n return value\n elif upper_start <= x <= upper_end:\n return value\n elif digit_start <= x <= digit_end:\n return value\n \n msg = _('There must be at least one letter or digit.')\n raise forms.ValidationError(msg)", "title": "" }, { "docid": "254de535a7d13248483ac8003bd1c840", "score": "0.58470005", "text": "def _isint(word):\n return match('^[-+]?[0-9]+$',word)", "title": "" }, { "docid": "e2faf187d42b968c46984d1b9bcd654a", "score": "0.58445674", "text": "def _validate(value):\n\n try:\n return float(value)\n except ValueError as e:\n e.args = (e.args[0],'Value entered can not be convert to float')\n raise", "title": "" }, { "docid": "7118996b6c317ffcb8f19950f81f5315", "score": "0.58423126", "text": "def is_numeric(str, func=float):\n try:\n i = func(str)\n except:\n return False\n return True", "title": "" }, { "docid": "9b7cf775ad70be2dc5a918d7b559f974", "score": "0.5841039", "text": "def _replace_invalid(value):\n if isinstance(value, str):\n if 's' or '*' in value:\n value = np.nan\n return float(value)", "title": "" }, { "docid": "aa05f720b7ccdd6827c3aa8c1ffd92ac", "score": "0.58398867", "text": "def valid(f):\r\n try: \r\n return not re.search(r'\\b0[0-9]', f) and eval(f) is True\r\n except ArithmeticError:\r\n return False", "title": "" }, { "docid": "0741b12a6b2626793ab95e7d88625a84", "score": "0.5838307", "text": "def validate_integer(user_input): \r\n try:\r\n return int(user_input)\r\n except ValueError:\r\n print(\"\\nInvalid input, please enter an integer.\")\r\n return \"invalid_input\"", "title": "" }, { "docid": "7a1eb50a845310f17a5b4b39db6aa2c5", "score": "0.5822806", "text": "def ensure_int(param):\n try:\n param = re.sub(\"\\W\", \"\", param)\n return int(param)\n except ValueError:\n return None", "title": "" }, { "docid": "62bfe77285aa05c0cab1dab4a50aea20", "score": "0.582152", "text": "def _force_number(func):\n def validation_decorator(*args, **kwargs):\n params = list(args)\n for i, n in enumerate(params):\n\n if type(n) == list or type(n) == tuple:\n if len(n) == 0:\n params[i] = n = 0\n elif len(n) == 1:\n params[i] = n = n[0]\n\n if type(n) == str:\n if n == \"\":\n params[i] = n = 0\n continue\n\n if n is None:\n params[i] = 0\n continue\n\n if type(n) != float and type(n) != int:\n try:\n if '.' in n:\n params[i] = float(n)\n else:\n params[i] = int(n)\n except Exception:\n # raise ValueError(\"\")\n # js returns None instead\n pass\n\n args = tuple(params)\n try:\n return func(*args)\n except Exception:\n return None\n\n return validation_decorator", "title": "" }, { "docid": "8a5c7c81bbf800b02b81fc24144ffcf3", "score": "0.5821398", "text": "def is_number_tryexcept(s):\n try:\n float(s)\n return True\n except ValueError:\n return False", "title": "" }, { "docid": "ba71e4dc2c51cd441a00e9b06cef9b71", "score": "0.57937133", "text": "def is_num(text):\n try:\n float(text)\n except:\n return False\n else:\n return True", "title": "" }, { "docid": "4d78e8957212dfcd9afdb9630e4907b1", "score": "0.57752436", "text": "def tests_api_returns_error_when_user_posts_non_numeric_data(self):\n url = reverse('fibonacci_api')\n data = {\"number\": \"not a numeric value\"}\n response = self.client.post(url, data, format='json')\n self.assertIn(403, response.data)", "title": "" }, { "docid": "9427c05561710ea2755a1eadaea25df9", "score": "0.5769555", "text": "def valid_rating(number_string):\n\n try:\n if int(number_string) >= 0:\n return True\n else:\n return False\n\n except ValueError:\n print(\"\\n It has to be a 0 or positive number!\")\n return False", "title": "" }, { "docid": "846615941ff04b31e13a84aa37ae18eb", "score": "0.5766522", "text": "def test_parsing_fails_alpha_utilization(self):\n with self.assertRaises(ValueError):\n self.alert_service._parse(\"(1234,a,1,1)\")", "title": "" }, { "docid": "ba091bbff0755c94c2887f75f58e9f60", "score": "0.57642835", "text": "def get_user_number_input() -> int:\n try:\n user_number = int(input())\n except ValueError as e:\n print(e.args)\n throw_error(\"Invalid input - must be natural number\")\n return user_number", "title": "" }, { "docid": "eb72567cd8a32810cc6bd8ca090593dc", "score": "0.5763005", "text": "def valid(f):\n try:\n return not re.search(r'\\b0[0-9]', f) and eval(f) is True\n except ArithmeticError:\n return False", "title": "" }, { "docid": "eb72567cd8a32810cc6bd8ca090593dc", "score": "0.5763005", "text": "def valid(f):\n try:\n return not re.search(r'\\b0[0-9]', f) and eval(f) is True\n except ArithmeticError:\n return False", "title": "" }, { "docid": "1b3d5640969014f9f2b4d1e2422c0ab9", "score": "0.57540035", "text": "def _check_angka(cls, indo):\n if not (isinstance(indo, int) or indo.isdigit()):\n raise ValueError((\"Angka harus berupa int, \"\n \"atau string berisi digit\"))\n return indo", "title": "" }, { "docid": "7aafc5a33b6985ba123c35b5e4eb43d6", "score": "0.5748548", "text": "def validate_int(question):\n while True:\n try:\n ans = int(input(question))\n return ans\n except ValueError: print(\"You must input an integer!\")", "title": "" }, { "docid": "2951267509945c6ebbd967629b88000c", "score": "0.57440245", "text": "def input_number():\n while True:\n number = input('Enter number:\\n')\n if number.isdigit():\n return number\n elif number.lower() == 'quit':\n return 0\n else:\n print (\"Invalid input, not number!\")", "title": "" }, { "docid": "93a137b204dca68cb85c813b4c08f5ba", "score": "0.57411116", "text": "def check_input(input):\n check = True\n \n if len(input) == 0:\n check = False\n else:\n for i in range(len(input)):\n if (input[i] < '0') or (input[i] > '9'):\n check = False\n\n return check", "title": "" }, { "docid": "fd5199c8319f17c89f427694273d1ca9", "score": "0.5732932", "text": "def isnumerical(x):\n try:\n xx = float(x)\n except TypeError:\n return False\n except ValueError:\n return False\n except Exception:\n return False\n else:\n return True", "title": "" }, { "docid": "6bc5fa366eefd68e6804331576d431fd", "score": "0.5728857", "text": "def test__validate_inputs_scalar_unknown(self):\n # Run / Assert\n with pytest.raises(ValueError):\n GreaterThan._validate_inputs(low='a', high='b', scalar='unknown', drop=None)", "title": "" }, { "docid": "5bb2787f0186a3ea32c739287c521b60", "score": "0.57250357", "text": "def get_positive_float(question):\n while True:\n try:\n answer = float(input(question))\n if not answer or answer < 0:\n message(\"Please use positive decimal values for the price\")\n answer = float(input(question))\n else:\n return answer\n except ValueError:\n message(\"Please enter a number\")", "title": "" }, { "docid": "e2f6309c2b141473df41489928cb7117", "score": "0.5724969", "text": "def test_input_not_valid(set_cli_sys_argv, capsys):\n sys.argv.append(\"101\")\n with pytest.raises(\n ValueError, match=\"favorite_number_less_than_100 has to be smaller than 100!\"\n ):\n main()", "title": "" }, { "docid": "bdb92c0ead8dc24ec1d1686ce70d504a", "score": "0.57186574", "text": "def validate_positive_integer(user_input):\r\n try:\r\n user_input = int(user_input)\r\n if user_input < 0:\r\n print(\"\\nPlease enter a non-negative integer.\")\r\n return \"invalid_input\"\r\n else:\r\n return user_input\r\n except ValueError:\r\n print(\"\\nInvalid input, please enter an integer.\")\r\n return \"invalid_input\"", "title": "" }, { "docid": "e33fe76ccc9500ad5b04bdce7a4676aa", "score": "0.5708761", "text": "def is_numeric(value, value_label):\n if not isinstance(value, numbers.Number):\n raise ValueError(\"`\" + value_label + \"` must be a number\")", "title": "" }, { "docid": "272c7bb13ae63bad0bfa8825783aecac", "score": "0.57053864", "text": "def _is_number(s):\n if len(s) > 0 and s[0] in ('-', '+'):\n return s[1:].isdigit()\n return s.isdigit()", "title": "" }, { "docid": "6d4ed990ee4d5984842c4705c9eed2cb", "score": "0.56966096", "text": "def validateFloat(number):\n try:\n float(number)\n except ValueError:\n print(\"You must input a number.\")\n return False\n finally: pass", "title": "" }, { "docid": "0c29741ef5d9b87282a32f0c2149b6c0", "score": "0.5689624", "text": "def _isNumeric(n):\n try:\n b = float(n)\n return True\n except:\n pass\n return False", "title": "" }, { "docid": "d5f478200fa7214b8af4ecde08d2dfb6", "score": "0.5689393", "text": "def chkNumeric(elm):\n \n try:\n test = float(elm)\n except:\n return False\n else:\n return True", "title": "" }, { "docid": "6584aed1609cb72fd4e120c3841fad40", "score": "0.5688048", "text": "def invalid_query(query):\n if isinstance(query, str):\n if re.search(r\"[0-9]\", query, re.UNICODE):\n return {'error': 'City name must not contain numbers!'}\n else:\n return False\n else:\n return {'error': 'City name must be a string!'}", "title": "" }, { "docid": "bfca7f290757bda402af33d9ca3f5839", "score": "0.5680651", "text": "def _ValidateASNum(self, asnum):\n try:\n asnumber = int(asnum)\n if asnumber <= 0:\n raise errors.ApilityioValueError(\n 'Not a valid ASNUM. Negative number.')\n return True\n except Exception:\n raise errors.ApilityioValueError(\n 'Not a valid ASNUM. It is a string.')", "title": "" }, { "docid": "ec5e117048f8c5379ca1e6f0f8b6a075", "score": "0.5671852", "text": "def numeric(input):\n try:\n junk = input + 0\n ret = True\n except:\n ret = False\n return ret", "title": "" }, { "docid": "6f0b56b6ea870e9ecc8af86a8b5188e8", "score": "0.56701636", "text": "def isNumPosStrValue(value):\r\n\r\n return (value and isinstance(value, basestring) and value.isdigit() and int(value) > 0) or (isinstance(value, int) and value > 0)", "title": "" }, { "docid": "6cf56f4950984eb46ebf58a55b392019", "score": "0.56622547", "text": "def ttest_typeerror2():\n with pytest.raises(TypeError, match=\"only numbers are allowed\"):\n add('a', 'b')", "title": "" }, { "docid": "c7585b7abc935a169e4241658298dfb3", "score": "0.5659002", "text": "def get_good_input():\r\n got_good_input = False\r\n while got_good_input == False:\r\n try:\r\n (input(\"Please enter a whole number: \"))\r\n got_good_input = True\r\n except ValueError:\r\n print(\"That was not a a whole number. Please try again.\")\r\n good = \"Valid\"\r\n return good", "title": "" }, { "docid": "0c4277b7fcabe93a2a5a0ff9f3c57b87", "score": "0.5651511", "text": "def invalid_number_error_message(context):\n assert_in(\"next-action: error: argument -n/--number: invalid number:\", context.next_action())", "title": "" }, { "docid": "0d7335d00c7a1ab679edb05aa9c27db3", "score": "0.5648884", "text": "def is_number(s): \n try:\n float(s)\n return True\n except ValueError:\n return False", "title": "" }, { "docid": "0d7335d00c7a1ab679edb05aa9c27db3", "score": "0.5648884", "text": "def is_number(s): \n try:\n float(s)\n return True\n except ValueError:\n return False", "title": "" }, { "docid": "ade3ddacfcc42a7e0fa04c2523dbb92f", "score": "0.56452686", "text": "def validateNumberGreaterThanZero(message):\n\n badInput = True\n while badInput:\n try:\n response = int(input(message))\n\n while response <= 0:\n response = int(input(\"Only positive amounts greater than 0 allowed. Try again: \"))\n badInput = False\n except ValueError:\n print(\"Only numbers allowed\")\n return response", "title": "" }, { "docid": "7d74fb7aa5c76cf04b3f127cde7b7f12", "score": "0.56437224", "text": "def parse_input(input_string):\n if input_string.strip() in {\"1\", \"2\", \"3\", \"4\", \"5\", \"6\"}:\n return int(input_string)\n else:\n print(\"Please enter a number from 1 to 6.\")\n raise SystemExit(1)", "title": "" }, { "docid": "dc038012329800ee77b7cb3b34695cdd", "score": "0.5638011", "text": "def isvalid(number):\n if (number is None) or isinstance(number, str) or not np.isfinite(number):\n return False\n else:\n return True", "title": "" }, { "docid": "25d94a0b284fb14650ca36d8676087bc", "score": "0.56313074", "text": "def inputValid(userInput):\r\n regex = \"\\d+[d,D]+\\d+[+,-]\\d+\"\r\n if re.search(regex, userInput):\r\n return True\r\n else:\r\n return False", "title": "" }, { "docid": "7a781cd4137842820a863e1637af7ca4", "score": "0.5630836", "text": "def alpha_numeric(alpha_numeric_string: str) -> str:\n if alpha_numeric_string and re.match(\"^[a-zA-Z0-9\\-_]+$\",\n alpha_numeric_string):\n return alpha_numeric_string\n else:\n raise argparse.ArgumentTypeError(\"invalid alphaNumeric string\")", "title": "" }, { "docid": "e29670158462d4efee0d470ea51fe480", "score": "0.56270325", "text": "def test_passing_and_invalid_input_type(self, mock_input):\n invalid_input_type = 'not_int_nor_float'\n with self.assertRaises(InvalidInputType) as e:\n self.inputy.format_input(type=invalid_input_type)\n self.assertEqual(e.exception.input_type, invalid_input_type)", "title": "" }, { "docid": "c4a300d92395302fd694f58140d5c6b1", "score": "0.5624863", "text": "def type_checker(val):\n try:\n return int(val)\n except ValueError:\n pass\n try:\n return float(val)\n except ValueError:\n pass\n return bool_filter(val)", "title": "" }, { "docid": "f898146e15270bef31adecc3af80f434", "score": "0.56119406", "text": "def is_number(word):\n try:\n n = float(word)\n return True\n except:\n return False", "title": "" }, { "docid": "6b2b43484161931967aa63168a505b93", "score": "0.56117135", "text": "def isNumber(s):\n try:\n float(s)\n return True\n except ValueError:\n return False", "title": "" }, { "docid": "de46893c8497dc7ef7383b84cd7ab349", "score": "0.56103134", "text": "def isnumeric(x):\n return isinstance(x, NUMERIC_TYPES)", "title": "" }, { "docid": "57c7dde702f7a5d6e65039a594537f38", "score": "0.5609447", "text": "def _numeric_type(param):\n return isinstance(param, (float, int)) or param is None", "title": "" } ]
684d4bb24cc2591d34e2cb4b07a3c8d5
where should we go next tick?
[ { "docid": "50f191c660eb379fbac6e791db39402f", "score": "0.0", "text": "def nextStep(self):\n options = self.surroundingValues()\n\n a = [self.expandTile(tile, tile.explored) for tile in options]\n\n m = min(a)\n maxIndexes = [i for i, j in enumerate(a) if j == m]\n\n\tprint a\t\n\tprint maxIndexes\n\tprint \n\n direction = random.choice(maxIndexes)\n\n # go from index in the array to an actual direction\n if direction == 0:\n return [0,1]\n elif direction == 1:\n return [1,0]\n elif direction == 2:\n return [0,-1]\n elif direction == 3:\n return [-1,0]\n\n raise ValueError('wat')\n return", "title": "" } ]
[ { "docid": "47e4d0cbcb6e027f154a33d964fd0780", "score": "0.76231015", "text": "def tick(self):", "title": "" }, { "docid": "47e4d0cbcb6e027f154a33d964fd0780", "score": "0.76231015", "text": "def tick(self):", "title": "" }, { "docid": "c6895c04d064960b1ae0bf4a4da83d20", "score": "0.75546896", "text": "def tick(self):\n pass", "title": "" }, { "docid": "c6895c04d064960b1ae0bf4a4da83d20", "score": "0.75546896", "text": "def tick(self):\n pass", "title": "" }, { "docid": "fa5b9e7cb1da1b9aaba893abc7f61a3e", "score": "0.71828836", "text": "def tick(self):\n\t\tprint('Must implement tick method')", "title": "" }, { "docid": "21052662d26063a1de0e4df86f5cc5b8", "score": "0.70880234", "text": "def tick_handler(self):", "title": "" }, { "docid": "cedc0f1436ad90cd3e7334501714b7db", "score": "0.6972772", "text": "def play_tick(self):\n pass", "title": "" }, { "docid": "9db75ec327a0b6e4adee278c6bea2aad", "score": "0.69448376", "text": "def Tick1S():\n#---------------------------------------------------------------\n pass", "title": "" }, { "docid": "f6b05c9c18c04aba7eb58245b11add60", "score": "0.6774041", "text": "def on_event_new_tick(self):", "title": "" }, { "docid": "1f9bb8d4246d809321fa4bf354427e6c", "score": "0.66825753", "text": "def tick(self, gamestate):", "title": "" }, { "docid": "6d7ea7315bd8ac95ba29105ee17c9b12", "score": "0.66772", "text": "def Tick100MS():\n#---------------------------------------------------------------\n pass", "title": "" }, { "docid": "7182d0ac848260612b814a633a7d7be2", "score": "0.66086775", "text": "def tick(self, sim):\n pass", "title": "" }, { "docid": "56326e14c9c27b9d96388c6abd44917c", "score": "0.65953845", "text": "def tick(self):\n raise NotImplementedError('You must implement this.')", "title": "" }, { "docid": "c06fea3f96b163280f0a338958457004", "score": "0.655352", "text": "def new_tick(self, msg):\n pass", "title": "" }, { "docid": "2c5f4fa64dc17db4472fbce28743b03f", "score": "0.65278935", "text": "def ticking(self):\n self.ready()", "title": "" }, { "docid": "b5c2edbd87a66dadfd9eebf02006e48a", "score": "0.6331", "text": "def on_tick() -> None:\n raise NotImplementedError", "title": "" }, { "docid": "95f2cdfeb1fe90c4ab87164d1e5ecccc", "score": "0.6325778", "text": "def advance_time():", "title": "" }, { "docid": "7e279f82fd8010fb5a69d595e2367ba6", "score": "0.6290593", "text": "def _tick(self):\n self._update_agent_stats()\n\n self.handle_call_events()\n\n if not self._shift_over:\n self.calculate()\n\n if self._current_time % self.REPORTING_INTERVAL == 0:\n self.print_report()\n\n self._create_checkpoint()", "title": "" }, { "docid": "7c8a8117e379e5f45abdbc2451cc30c0", "score": "0.6217834", "text": "def next_event(self):\n pass", "title": "" }, { "docid": "3808952aa046cafd9993d295316ead3a", "score": "0.6190031", "text": "def tick(self):\r\n self.times.append(timeit.default_timer())", "title": "" }, { "docid": "59b0fa88992b8fb1612d1356149a7082", "score": "0.6173794", "text": "def tick(self) -> bool:\n return False", "title": "" }, { "docid": "7fa46544c2dfee7177f828ed7d3ab244", "score": "0.6170556", "text": "def _ticktock2(self):\n ui = self._ui\n \n now = dt.utcnow()\n self._ui['tick_ends'].appendleft(now)\n \n counter = ui['counter']\n ui['counter'] = counter +1\n \n if ui['sync'] is not None:\n return\n \n \n self._ui['entries'].appendleft(now)\n \n if not counter:\n ui['epoch'] = now\n ui['first'] = now", "title": "" }, { "docid": "0438e92ffc4a6d87d73cfa42d3aa0055", "score": "0.614751", "text": "def tick():\r\n global tenths\r\n tenths = tenths + 1\r\n format(tenths)", "title": "" }, { "docid": "4d8b0c207c4f4419126c23b03b12f48f", "score": "0.60896516", "text": "def tickle_me():\n\tload_til(500.0)\n\treturn \"Tickling you. (half-second)\"", "title": "" }, { "docid": "b4519cdf915e87d087aa394273ccc251", "score": "0.60748243", "text": "def tick(self, amount):\n pass", "title": "" }, { "docid": "0df62d77e1b3e3def45dcdc4eadd07d9", "score": "0.6068059", "text": "def performGameCycle(self):", "title": "" }, { "docid": "ce4ea50f299f888de2b575b8fcff76dd", "score": "0.6025078", "text": "def update(self) -> None:\n time_elapsed = min(self.time_to_tick, self.time_to_weave)\n self.time += time_elapsed\n\n # Handle DoTs\n if self.song_began and self.time % 3000 == self.tick_offset:\n for _ in range(DOTS_UP):\n self.on_tick()\n\n # Natural BL regeneration\n if self.bl_stacks < MAX_BL_STACKS:\n if self.bl_cd - time_elapsed <= 0:\n self.bl_stacks += 1\n\n # If we're still not at max stacks, lower the CD\n if self.bl_stacks < MAX_BL_STACKS:\n self.bl_cd = (self.bl_cd - time_elapsed) % 15000\n else:\n self.bl_cd = 15000 \n else:\n self.bl_cd -= time_elapsed\n\n # EA regeneration\n if self.ea_cd > 0:\n self.ea_cd = max(0, self.ea_cd - time_elapsed)", "title": "" }, { "docid": "4c1a43e5f913bd740df6d94ab87221b9", "score": "0.60147256", "text": "def called_every_second(self):", "title": "" }, { "docid": "3de663b1de6663b09b0ea6b0d7e17866", "score": "0.6002003", "text": "def tick(self):\n\n previous_hour = self.__hours\n Clock.tick(self)\n if (self.__hours < previous_hour):\n self.advance()", "title": "" }, { "docid": "03c71c964933df3c6b5d4c6bc2433e13", "score": "0.59970266", "text": "def periodic():", "title": "" }, { "docid": "94799c5ab462aca47cf0ef4b05f3014e", "score": "0.5991244", "text": "def start_tick(self):\n if self.iactive is not None and self.scenes:\n self.scenes[self.iactive].start_tick()", "title": "" }, { "docid": "2319c8a53be6def32ed9589ca70780fe", "score": "0.5975583", "text": "def step(self):\n self.incendio()", "title": "" }, { "docid": "e036c2d95a166ac7d43296b88b699573", "score": "0.5962916", "text": "def pause_tick(self):\n pass", "title": "" }, { "docid": "76e811b4cb6512f0e6d2d6f9233d648e", "score": "0.5962218", "text": "def start(self):\n self.stop()\n self.ticking = 1\n self.step()\n self.tickId = self.parent.after(self.tickDelay, self.start)", "title": "" }, { "docid": "24deef73ebf7990d05e975d50d7a5918", "score": "0.59597754", "text": "def step(self):\n self.schedule.step()", "title": "" }, { "docid": "24deef73ebf7990d05e975d50d7a5918", "score": "0.59597754", "text": "def step(self):\n self.schedule.step()", "title": "" }, { "docid": "54ae33ddbb2ab5a1b55c19d855444a19", "score": "0.5947314", "text": "def __calculate_next_tick(self, ref_time_stamp):\n # The remaining time is calculated as follows\n # The ref time marks the start of job execution\n # time consumed by job is current time minus the ref time\n # Thus the remaining time is the difference between the consumed time and the frequency time\n # If the job consumes more time than the frequency allows, the calculated time will be negative\n # In that case, the next_tick shall be set to zero forcing zero sleep and immediate execution of next iteration\n next_tick = self.__clock_tick_duration_seconds - (time.time() - ref_time_stamp)\n self.__next_tick = 0 if next_tick < 0 else next_tick", "title": "" }, { "docid": "bd98cdd2868727f877e1e9019a9d31a9", "score": "0.59169257", "text": "def tick():\n global t\n t = t + 1\n #print \"test\"+ str(t)\n format(t)", "title": "" }, { "docid": "8b92c543267613037f0e9b3fb23d1f70", "score": "0.59071636", "text": "def next_track(self):\r\n if self.is_active:\r\n self.sp.next_track()", "title": "" }, { "docid": "27118093759e678b4d56e476e035fa64", "score": "0.58784366", "text": "def tick(self, now=None):\r\n if now is None:\r\n # now won't be None in unit tests\r\n now = time.time()\r\n for event in events.TICK_EVENTS:\r\n period = event.period\r\n last_tick = self.ticks.get(period)\r\n if last_tick is None:\r\n # we just started up\r\n last_tick = self.ticks[period] = timeslice(period, now)\r\n this_tick = timeslice(period, now)\r\n if this_tick != last_tick:\r\n self.ticks[period] = this_tick\r\n events.notify(event(this_tick, self))", "title": "" }, { "docid": "c2dbd678720b9ab300afd2d7550eed53", "score": "0.58740884", "text": "def tick():\n global countdown\n countdown -= 1", "title": "" }, { "docid": "a03905a89dd14f70ca777ad674589f7a", "score": "0.58666366", "text": "def tick(self, ticktime_ms):\n\n self.next_locations = self.request_next_locations(ticktime_ms)\n\n return", "title": "" }, { "docid": "e98f5fbb08a37fc090fd1042582ecb75", "score": "0.5860322", "text": "def step(self):", "title": "" }, { "docid": "e98f5fbb08a37fc090fd1042582ecb75", "score": "0.5860322", "text": "def step(self):", "title": "" }, { "docid": "e98f5fbb08a37fc090fd1042582ecb75", "score": "0.5860322", "text": "def step(self):", "title": "" }, { "docid": "e98f5fbb08a37fc090fd1042582ecb75", "score": "0.5860322", "text": "def step(self):", "title": "" }, { "docid": "d0067d3090d07677b31084c970512d39", "score": "0.58435094", "text": "def next(self):\n for d in self.getdatanames(): # Looping through all symbols\n pos = self.getpositionbyname(d).size or 0\n if pos == 0: # Are we out of the market?\n # Consider the possibility of entrance\n # Notice the indexing; [0] always means the present bar, and [-1] the bar immediately preceding\n # Thus, the condition below translates to: \"If today the regime is bullish (greater than\n # 0) and yesterday the regime was not bullish\"\n if self.regime[d][0] > 0 and self.regime[d][-1] <= 0: # A buy signal\n self.buy(data=self.getdatabyname(d))\n\n else: # We have an open position\n if self.regime[d][0] <= 0 and self.regime[d][-1] > 0: # A sell signal\n self.sell(data=self.getdatabyname(d))", "title": "" }, { "docid": "80563f63bb74366790fe901deb7809c9", "score": "0.58254075", "text": "def lasttick(self):\n if self.__lasttick:\n return self.__lasttick\n else:\n return 0", "title": "" }, { "docid": "f88dc84e558db338cab6d32a89b41386", "score": "0.58142823", "text": "def nxt_followed(self, handle, nxt_handle, x_axis, y_axis):\n print \"moving nxt to next point of the path\"\n return {'ack': 'got followed'}", "title": "" }, { "docid": "70d8a735611647b0debb6be4f385b9a0", "score": "0.58093745", "text": "def on_tick(self, tick: TickData):\n if not tick.last_price:\n return\n\n # update tick\n self.last_tick = tick\n\n # update vline for different pairs\n #self.vg[tick.vt_symbol].update_tick(tick=tick)\n #print(tick)\n #print(self.vg[tick.vt_symbol].vline)\n\n #self.last_vline = self.vg[self.vt_symbol].vline\n\n #for v in self.vg.vline_buf:\n # if self.vg.vline_buf[v].volume < 0.9*v:\n # return\n\n \"\"\"\"\"\"\n #self.last_tick = tick\n #self.write_log(f\"Tick: {tick.last_price} TS: {tick.datetime}\")", "title": "" }, { "docid": "177287ab7295d082e24b131a86bcfeae", "score": "0.58038265", "text": "def tick(self):\n if self.stopped:\n return\n \n last_time = self.current_time\n last_clock = self.last_clock_time\n \n # first, determine new time\n clock_time = time()\n if last_clock is None:\n last_clock = clock_time\n diff = clock_time - last_clock\n new_time = (self.direction * self.rate * diff) + last_time\n \n # update the time\n self.last_clock_time = clock_time\n self.current_time = new_time\n \n # now we make sure we're in bounds (we don't do this before, since it\n # can cause us to skip events that are at boundaries.\n self.current_time = max(self.current_time, 0)\n self.current_time = min(self.current_time, self.length())", "title": "" }, { "docid": "836e368944edb41f943f5ffde76ee6a9", "score": "0.57948995", "text": "def tick(self):\n self.time += 1\n self.calculate_weights()\n if not self.time % self.rotation_time:\n self.rotate()", "title": "" }, { "docid": "6123eadcf90a2a9bf107ccb4c90c8243", "score": "0.5779648", "text": "def increment_time(self):", "title": "" }, { "docid": "306a240143120063ccbcf19a543936df", "score": "0.577839", "text": "def on_tick(self, tick: TickData):\n if (\n self.last_tick_time\n and self.last_tick_time.minute != tick.datetime.minute\n ):\n bars = {}\n for vt_symbol, bg in self.bgs.items():\n bars[vt_symbol] = bg.generate()\n self.on_bars(bars)\n\n bg: BarGenerator = self.bgs[tick.vt_symbol]\n bg.update_tick(tick)\n\n self.last_tick_time = tick.datetime", "title": "" }, { "docid": "bef3c65d982c6ee104b14f79694ed1ff", "score": "0.5773426", "text": "def tick(self):\n self._wrap_lock(self._tick)", "title": "" }, { "docid": "c457d216db7375602c3d350accf3248f", "score": "0.5770214", "text": "def tick(self):\n t = self.time()\n self._dt = t - self._t\n self._t = t\n self._frame_times.append(self._dt)", "title": "" }, { "docid": "befbaa99edbe8f05a04b4a98d7bdb66e", "score": "0.5769967", "text": "def run_now(self):\n self.last_check = 0", "title": "" }, { "docid": "abdf57dc2a7f86e7343cf784cb263754", "score": "0.5767029", "text": "def next(self):\n if not self.position:\n if self.close[0] > self.ketler.upper[0]:\n self.order = self.order_target_percent(target=0.95)\n else:\n if self.close[0] < self.ketler.expo[0]:\n self.order = self.sell()", "title": "" }, { "docid": "3b73c601130a0080df06a82800d61ec2", "score": "0.5761602", "text": "def next(self):\n for d in self.getdatanames(): # Looping through all symbols\n pos = self.getpositionbyname(d).size or 0\n if pos == 0: # Are we out of the market?\n # Consider the possibility of entrance\n # Notice the indexing; [0] always means the present bar, and [-1] the bar immediately preceding\n # Thus, the condition below translates to: \"If today the regime is bullish (greater than\n # 0) and yesterday the regime was not bullish\"\n if self.sma[d][0] * self.params.var1 > self.getdatabyname(d).high[0]: # A buy signal\n self.order_target_percent(data=self.getdatabyname(d), target=0.98)\n\n else: # We have an open position\n if self.getdatabyname(d).close[-1] * self.params.var2 <= self.getdatabyname(d).high[0]: # A sell signal\n self.order_target_percent(data=self.getdatabyname(d), target=0)", "title": "" }, { "docid": "bfedda1685cc4fe553c253b697ca3794", "score": "0.57515013", "text": "def do_timestep(self, ticks) -> None:\n if ticks == 0:\n return\n\n timeplus = float(ticks) / float(self.ticks_per_beat)\n if not self._pos_feed:\n self._acumulated_time += ticks\n if self._acumulated_time >= self._min_time_division and not self._pos_feed:\n self.position += 1\n self._acumulated_time = 0\n self._pos_feed = True\n\n if self._last_note_on:\n self.distance = timeplus\n else:\n self.distance += timeplus\n self._last_note_on = False\n\n for note in self.active_notes:\n note.length += timeplus", "title": "" }, { "docid": "f26b04ba0299607b110f6af8bf9f2017", "score": "0.57501334", "text": "def next(self):\n\n # Determine which set of moving averages to use\n curdate = self.datetime.date(0)\n dtidx = None # Will be index\n # Determine which period (if any) we are in\n for sd, ed in self.date_combos:\n # Debug output\n # print('{}: {} < {}: {}, {} < {}: {}'.format(\n # len(self), sd, curdate, (sd <= curdate), curdate, ed, (curdate <= ed)))\n if sd <= curdate and curdate <= ed:\n dtidx = (sd, ed)\n # Debug output\n # print('{}: the dtixdx is {}, and curdate is {};'.format(len(self), dtidx, curdate))\n for d in self.getdatanames(): # Looping through all symbols\n pos = self.getpositionbyname(d).size or 0\n if dtidx is None: # Not in any window\n break # Don't engage in trades\n if pos == 0: # Are we out of the market?\n # Consider the possibility of entrance\n # Notice the indexing; [0] always mens the present bar, and [-1] the bar immediately preceding\n # Thus, the condition below translates to: \"If today the regime is bullish (greater than\n # 0) and yesterday the regime was not bullish\"\n '''if self.slowma[d][dtidx][0] > self.getdatabyname(d).close[0]: # A buy signal\n self.sell(data=self.getdatabyname(d), size=1000)\n\n else: # We have an open position\n if self.fastma[d][dtidx][0] < self.getdatabyname(d).close[0]: # A sell signal\n self.close(data=self.getdatabyname(d), size=1000)\n '''\n if self.sma[d][dtidx][0] * self.var1[d][dtidx] > self.getdatabyname(d).high[0]: # A buy signal\n self.order_target_percent(data=self.getdatabyname(d), target=0.98)\n\n else: # We have an open position\n if self.getdatabyname(d).close[-1] * self.var2[d][dtidx] <= self.getdatabyname(d).high[0]: # A sell signal\n self.order_target_percent(data=self.getdatabyname(d), target=0)", "title": "" }, { "docid": "65b0875fe77e17a3b8d75adf58219632", "score": "0.57475924", "text": "def tick(self):\n\n # next historical order to be sent\n\n oborder = self.hist_orders[self.ob_idx]\n\n # if I have queued orders\n if self.my_queue:\n # if my order reaches the orderbook before the next historical order\n if self.my_queue[0].timestamp < oborder[self.col_idx['timestamp']]:\n my_order = self.my_queue.popleft()\n self._send_to_orderbook(my_order, is_mine=True)\n self.remove_vol_in_queue(my_order[self.col_idx['uid']])\n return\n\n # otherwise sent next historical order\n self._send_historical_order(oborder)", "title": "" }, { "docid": "f6d140c7056e7f3f75b4a69e04f6b25d", "score": "0.5744944", "text": "def _on_tick(self, args: Tuple, kwargs: Dict) -> None:\n pass", "title": "" }, { "docid": "6db101e376fc8c48eab7fbaace6103a5", "score": "0.5743131", "text": "def tick(self):\n while self.active:\n for coil in self.ball_search_coils:\n self.pop_coil(coil)\n yield Timing.secs(self.machine.config['ball_search']\\\n ['secs between ball search coils'])\n yield Timing.secs(self.machine.config['ball_search']\\\n ['secs between ball search rounds'])\n # todo do we have to deal with switches that might be hit due to these\n # coils firing?\n # todo should the above code also look for self.active?", "title": "" }, { "docid": "9350ee10aabc339a2aa5e3462385fcb8", "score": "0.5741916", "text": "def _ticktock(self):\n \n ui = self._ui\n now = dt.utcnow() \n ui['ticks'].appendleft(now)\n \n if ui['sync'] is None:\n return\n \n \n counter = ui['counter']\n if not counter:\n self._ticktock_1st(now)\n \n \n entry = self._synced(now)\n ui['entries'].appendleft(entry)\n \n if not counter:\n ui['first'] = entry", "title": "" }, { "docid": "78689bed249ecaf6cf004b3f48055669", "score": "0.5733522", "text": "def on_tick(self, tick: TickData):\n self.bg5.update_tick(tick)", "title": "" }, { "docid": "d994dae4eef2b1336265ae6a2f02b241", "score": "0.5730125", "text": "def step(self):\r\n ...", "title": "" }, { "docid": "c0cd4dca1c2f1b00a602b29bb1107f20", "score": "0.5726646", "text": "def do_tick(self):\n for member in self.members:\n member.tick(self.tick_count * swimprotocol.SWIM.T)\n self.tick_count = self.tick_count + 1", "title": "" }, { "docid": "17715d50d4aba1afe1b7be37b19195d5", "score": "0.57241493", "text": "def running():", "title": "" }, { "docid": "67d1fd75b3152adee56252abf0c94f95", "score": "0.57208514", "text": "def __tick(self):\n def set_current_time():\n now = datetime.datetime.now()\n self.__hour = now.hour\n self.__minute = now.minute\n self.__second = now.second\n self.notifyAll()\n\n self.__interval = Interval(set_current_time, 3)", "title": "" }, { "docid": "1dc3ca2a84edb0bea255cf1365c4eea3", "score": "0.57200813", "text": "def tick(cls):\n cls.instance_.finishActive()", "title": "" }, { "docid": "9bd74a9679855c9b70878c353cce289a", "score": "0.5699552", "text": "def timer():", "title": "" }, { "docid": "16fee983405830fbcdc81e6215b804d8", "score": "0.56920654", "text": "def run_logic(self):\n\n self._clock.tick(60)", "title": "" }, { "docid": "2b5edd0393607d05f14af1131611718e", "score": "0.5676555", "text": "def _schedule_next_flush(self):\n #time until the hour rolls over:\n vals = time.localtime()\n timeLeft = 60*(60-vals[4]) - vals[5]\n timeLeft += 30\n ##DEBUG:\n #timeLeft = 30\n self._next_flush_event = reactor.callLater(timeLeft, self._flush_event_logs)", "title": "" }, { "docid": "8ed86ba25084a59523d90cb5630967a9", "score": "0.56755614", "text": "def advance(self):\n None", "title": "" }, { "docid": "38c78989fdeeec4265bac4b2258dbb5c", "score": "0.5666694", "text": "def tick(self):\n if self.currentTask is not None:\n self.timeRemaining -= 1\n if(self.timeRemaining <= 0):\n self.currentTask = None\n self.timeRemaining = 0 # just in case it goes negative", "title": "" }, { "docid": "0e43b0f7b0d518551bfcc7bfbfe27a7c", "score": "0.56557053", "text": "def tick(self):\r\n if self.seconds == 59:\r\n self.seconds = 0\r\n if (self.minutes == 59):\r\n self.minutes = 0\r\n self.hours = 0 if self.hours==23 else self.hours + 1\r\n else:\r\n self.minutes += 1;\r\n else:\r\n self.seconds += 1;", "title": "" }, { "docid": "681682eaa53da71329813e7f88ce4fd5", "score": "0.56551063", "text": "def loop():", "title": "" }, { "docid": "3e5df73a786e2c9fc2c6adebc1e083bd", "score": "0.5647457", "text": "def on_epoch_end(self):\n # TODO: Some sort of shuffling?\n return", "title": "" }, { "docid": "7b7afbfb0e0595e84e127c27b9706c8f", "score": "0.56371874", "text": "def start_epoch(self):\r\n pass # for this type of con. method we don't do anything\r", "title": "" }, { "docid": "ef8176ebb5b0e3b557bf09a6d76dc29f", "score": "0.5635328", "text": "def timer_tick(self, *args):\n # Generate a new number and increment the tick count\n new_val = self._generator()\n self.num_ticks += len(new_val)\n\n # grab the existing data, truncate it, and append the new point.\n # This isn't the most efficient thing in the world but it works.\n cur_data = self.viewer.data\n # new_data = np.hstack((cur_data[-self.max_num_points+1:], [new_val]))\n new_data = np.r_[cur_data[-self.max_num_points+1:], new_val]\n new_index = np.arange(self.num_ticks - len(new_data) + 1,\n self.num_ticks + 0.01)\n\n self.viewer.index = new_index\n self.viewer.data = new_data\n return", "title": "" }, { "docid": "ccc7a219d5e41f50ea4f1837f24a6667", "score": "0.5634773", "text": "def step(self):\n pass", "title": "" }, { "docid": "ccc7a219d5e41f50ea4f1837f24a6667", "score": "0.5634773", "text": "def step(self):\n pass", "title": "" }, { "docid": "79cfbcb7e32fbf9fa1d3aa530ba59a00", "score": "0.5633921", "text": "def step(self):\n info = self.r.tolist()+self.v.tolist()\n self.v += (self.brain.feedForward(info)[-1]-np.array([.5,.5]))*dt\n self.r += self.v*dt", "title": "" }, { "docid": "03be9d8492481dd1a8d37c35b4fd9a5b", "score": "0.5627474", "text": "def Schedule_Next(self,Now):\n tf = max([self.orig.travel_factor, self.dest.travel_factor])\n #delta_t = int(abs(round(RNG.Normal(self.T*tf, self.T_std), 0)))\n delta_t = np.random.poisson(self.T*tf)\n Flight_Generator.Schedule_Flight(Now+delta_t, self)", "title": "" }, { "docid": "f5419b268d48246c4068563aed7dc572", "score": "0.56225395", "text": "def tick_tock():\n text = \"Tock\"\n if time.localtime()[5] % 2 == 0:\n text = \"Tick\"\n print(text)", "title": "" }, { "docid": "aa19df868a4272c7dc8bd6f7423164f0", "score": "0.56224823", "text": "def step(self, dt):\n pass", "title": "" }, { "docid": "96f9e3e7219c2872b4f978aaf1814982", "score": "0.5615514", "text": "def timer_tick(self, *args):\r\n # Generate a new number and increment the tick count\r\n new_val = self._generator(self.mean, self.stddev)\r\n self.num_ticks += 1\r\n\r\n # grab the existing data, truncate it, and append the new point.\r\n # This isn't the most efficient thing in the world but it works.\r\n cur_data = self.viewer.data\r\n new_data = np.hstack((cur_data[-self.max_num_points+1:], [new_val]))\r\n new_index = np.arange(self.num_ticks - len(new_data) + 1,\r\n self.num_ticks + 0.01)\r\n\r\n self.viewer.index = new_index\r\n self.viewer.data = new_data\r\n return", "title": "" }, { "docid": "bd906130705ddfdbea59e2491536b644", "score": "0.56114537", "text": "def redoNext():", "title": "" }, { "docid": "d418af1fd1523b5fe28559bce750a109", "score": "0.56098574", "text": "def get_on_next(self):", "title": "" }, { "docid": "18df9770ccd94aae29d3caf35c637a1a", "score": "0.5608524", "text": "def running(self):", "title": "" }, { "docid": "9d03589020a362406d0968edcc4f34ed", "score": "0.5605621", "text": "def tick(self):\n if self._current_run is None:\n self._current_run = asyncio.ensure_future(self.run_once())\n asyncio.get_event_loop().run_until_complete(self._current_run)", "title": "" }, { "docid": "999ba46dcb5f3c5346796cba77c004ca", "score": "0.5600143", "text": "def _next_iteration(self):\n pass", "title": "" }, { "docid": "e315f73351409b3a6e3f9ecbfd239c1c", "score": "0.5595848", "text": "def test_stream_all_ticks(self):\n # Stream to Tick #1 (GOOG)\n self.price_handler.stream_next_tick()\n self.assertEqual(\n self.price_handler.tickers[\"GOOG\"][\"timestamp\"].strftime(\n \"%d-%m-%Y %H:%M:%S.%f\"\n ), \n \"01-02-2016 00:00:01.358000\"\n )\n self.assertEqual(\n self.price_handler.tickers[\"GOOG\"][\"bid\"], \n Decimal(\"683.56000\")\n )\n self.assertEqual(\n self.price_handler.tickers[\"GOOG\"][\"ask\"], \n Decimal(\"683.58000\")\n )\n\n # Stream to Tick #2 (AMZN)\n self.price_handler.stream_next_tick()\n self.assertEqual(\n self.price_handler.tickers[\"AMZN\"][\"timestamp\"].strftime(\n \"%d-%m-%Y %H:%M:%S.%f\"\n ), \n \"01-02-2016 00:00:01.562000\"\n )\n self.assertEqual(\n self.price_handler.tickers[\"AMZN\"][\"bid\"], \n Decimal(\"502.10001\")\n )\n self.assertEqual(\n self.price_handler.tickers[\"AMZN\"][\"ask\"], \n Decimal(\"502.11999\")\n )\n\n # Stream to Tick #3 (MSFT)\n self.price_handler.stream_next_tick()\n self.assertEqual(\n self.price_handler.tickers[\"MSFT\"][\"timestamp\"].strftime(\n \"%d-%m-%Y %H:%M:%S.%f\"\n ), \n \"01-02-2016 00:00:01.578000\"\n )\n self.assertEqual(\n self.price_handler.tickers[\"MSFT\"][\"bid\"], \n Decimal(\"50.14999\")\n )\n self.assertEqual(\n self.price_handler.tickers[\"MSFT\"][\"ask\"], \n Decimal(\"50.17001\")\n )\n\n # Stream to Tick #10 (GOOG)\n for i in range(4, 11):\n self.price_handler.stream_next_tick()\n self.assertEqual(\n self.price_handler.tickers[\"GOOG\"][\"timestamp\"].strftime(\n \"%d-%m-%Y %H:%M:%S.%f\"\n ), \n \"01-02-2016 00:00:05.215000\"\n )\n self.assertEqual(\n self.price_handler.tickers[\"GOOG\"][\"bid\"], \n Decimal(\"683.56001\")\n )\n self.assertEqual(\n self.price_handler.tickers[\"GOOG\"][\"ask\"], \n Decimal(\"683.57999\")\n )\n\n # Stream to Tick #20 (GOOG)\n for i in range(11, 21):\n self.price_handler.stream_next_tick()\n self.assertEqual(\n self.price_handler.tickers[\"MSFT\"][\"timestamp\"].strftime(\n \"%d-%m-%Y %H:%M:%S.%f\"\n ), \n \"01-02-2016 00:00:09.904000\"\n )\n self.assertEqual(\n self.price_handler.tickers[\"MSFT\"][\"bid\"], \n Decimal(\"50.15000\")\n )\n self.assertEqual(\n self.price_handler.tickers[\"MSFT\"][\"ask\"], \n Decimal(\"50.17000\")\n )\n\n # Stream to Tick #30 (final tick, AMZN)\n for i in range(21, 31):\n self.price_handler.stream_next_tick()\n self.assertEqual(\n self.price_handler.tickers[\"AMZN\"][\"timestamp\"].strftime(\n \"%d-%m-%Y %H:%M:%S.%f\"\n ), \n \"01-02-2016 00:00:14.616000\"\n )\n self.assertEqual(\n self.price_handler.tickers[\"AMZN\"][\"bid\"], \n Decimal(\"502.10015\")\n )\n self.assertEqual(\n self.price_handler.tickers[\"AMZN\"][\"ask\"], \n Decimal(\"502.11985\")\n )", "title": "" }, { "docid": "7ce1beb83e693ec8647857b8f4847843", "score": "0.5594893", "text": "def tick(self, now, elapsed):\n raise NotImplementedError", "title": "" }, { "docid": "bb3ac77e3758dfd85ca82d901cf43327", "score": "0.55913913", "text": "def timestep(self, simsystem, osc, obs):\n pass", "title": "" }, { "docid": "436b9f1e19b0555a53a94e5f1958a500", "score": "0.5589302", "text": "def on_tick(self, tick: TrainingTickPacket) -> Optional[Grade]:\n pass # Continue by default", "title": "" }, { "docid": "c5313eced743ca08b3b75da340cf157e", "score": "0.5583983", "text": "def tick(self):\n self.push_orders_from_queue_to_env()\n self.remove_orders_from_env()", "title": "" }, { "docid": "6b6dad85ce05249c116881412581c003", "score": "0.5583846", "text": "def position(self, time):", "title": "" }, { "docid": "fb60a891cf2bb559f91eb210c9d05282", "score": "0.55787194", "text": "def test_Tickable_tickAccumGetsProperValueAfter1Second( self ):\n\t\timport time\n\t\tself.t.tick()\n\t\tself.assertEqual( self.t.tickAccum.seconds, 0, \"Should be 0\" )\n\t\ttime.sleep(1)\n\t\tself.t.tick()\n\t\tself.assertEqual( self.t.tickAccum.seconds, 1, \"Should be about 1\" )", "title": "" }, { "docid": "c41626095a41ac5684951f26cffbd8d5", "score": "0.5573586", "text": "def end_tick(self):\n if self.iactive is not None and self.scenes:\n self.scenes[self.iactive].end_tick()", "title": "" } ]
ba9bf18738bbfb8d5804a416357f11d0
Read in a schema definition for our part of the config and hook it
[ { "docid": "4a9c2e5bea03e0bb0804ad72647ec2bf", "score": "0.0", "text": "def extend_schema(self, parent_schema):\n schema_path = os.path.join(self.dogen.pwd, \"schema\", \"cct_schema.yaml\")\n schema = {}\n with open(schema_path, 'r') as fh:\n schema = yaml.safe_load(fh)\n\n parent_schema['map']['cct'] = schema", "title": "" } ]
[ { "docid": "20a2e7a8484156b260fdb229de1e1660", "score": "0.7257588", "text": "def load_schema(self):\n raise NotImplementedError", "title": "" }, { "docid": "a12bfee9e3094a2afc066b5de193e7d9", "score": "0.72208834", "text": "def input_config_schema(self) -> IDefinitionConfigSchema:", "title": "" }, { "docid": "1aaa12eeb8fcd2e69a5b255fd82fc935", "score": "0.6990748", "text": "def getSchema():", "title": "" }, { "docid": "86c202456a8db932152b710cb124848e", "score": "0.69262326", "text": "def define_schema(self) -> None:\n pass", "title": "" }, { "docid": "bdbb3cb8831476d260a1fca630f0fdef", "score": "0.64331645", "text": "def load_schema(self, path):\n raise NotImplementedError", "title": "" }, { "docid": "1a97a353a946c17db1836d1bb14601e4", "score": "0.642445", "text": "def load_schema(self):\n query = self.config.get_schemas\n self.cursor.execute(query)\n return self._get_result()", "title": "" }, { "docid": "fdf8cfc9fcfff47bebae3f926f2c607d", "score": "0.6417863", "text": "def test_read_schema(self):\n assert set(self.s.fields.keys()) \\\n == set(['boolean_field',\n 'int_field',\n 'text_field',\n 'location_field'])\n assert self.s.default_field_name == 'text_field'\n assert self.s.unique_key == 'int_field'", "title": "" }, { "docid": "455d15d2395d68c8573c2b9f5e132008", "score": "0.6404794", "text": "def _load_schema(schema_name: str) -> dict:\n with open(os.path.join(os.path.dirname(__file__), 'schemas', schema_name)) as f:\n return json.load(f)", "title": "" }, { "docid": "f690c0537ef0a9442abab440818ed322", "score": "0.6356305", "text": "def get_schema(self, walk, schema, **kwargs):\n raise NotImplemented", "title": "" }, { "docid": "d20eb5b3c38602a6e5ca0948f66be72d", "score": "0.63054293", "text": "def config_schema(self) -> Optional[JsonObjectSchema]:", "title": "" }, { "docid": "607309fde1470a9863f3b74f96f77421", "score": "0.6304943", "text": "def get_schema():\n global FULL_SCHEMA\n if FULL_SCHEMA:\n return FULL_SCHEMA\n full_schema = {\n '$schema': 'http://json-schema.org/draft-04/schema#',\n 'id': 'cloud-config-schema', 'allOf': []}\n\n configs_dir = os.path.dirname(os.path.abspath(__file__))\n potential_handlers = find_modules(configs_dir)\n for (_fname, mod_name) in potential_handlers.items():\n mod_locs, _looked_locs = importer.find_module(\n mod_name, ['cloudinit.config'], ['schema'])\n if mod_locs:\n mod = importer.import_module(mod_locs[0])\n full_schema['allOf'].append(mod.schema)\n FULL_SCHEMA = full_schema\n return full_schema", "title": "" }, { "docid": "204df02430bd730be21dcc3c16ad8018", "score": "0.6289603", "text": "def my_schema_callback(schema):\n for k in [k for k in schema['properties'] if k.startswith('_')]:\n del schema['properties'][k]\n for k in [k for k in schema['properties'] if '/' in k]:\n schema['properties'][k.split('/')[1]] = schema['properties'][k]\n del schema['properties'][k]\n for k in [k for k in schema['properties'] if k.endswith('_id')]:\n if k in schema['required'] or k in schema['systemProperties']:\n continue\n schema['properties'][k.replace('_id', '')] = {'type': ['string', 'null']} # schema['properties'][k]\n del schema['properties'][k]\n # adds extra properties not found in\n schema['category'] = 'bcc extention'\n if 'sizeaxis1' in schema['properties']:\n schema['properties']['lesion'] = {'$ref': '_definitions.yaml#/to_one'}\n return schema", "title": "" }, { "docid": "c49bc1f8afff893a0c9c2bf4eea9fe2d", "score": "0.6245679", "text": "def loadDBSchema(self):\n\n\t\tprint 'loading database schema: ', self.schemaFile\n\t\twith open(self.schemaFile) as json_file:\n\t\t\tself.schema = json.load(json_file)\n\t\tjson_file.close()\n\t\tassert(self.schema['nTables'] == len(self.schema['tablenames']))\n\t\t# print(self.schema)", "title": "" }, { "docid": "ec5600af127a9a762748b0562e7bfb6e", "score": "0.6215938", "text": "def _setSchema(self):\n try:\n data = self.request(\"admin/luke\", parameters={'show': \"schema\"})\n\n self._setTypes(data['schema']['types'])\n self.fields = {}\n self._setFields(data['schema']['fields'], self.fields)\n self.dynamicFields = {}\n self._setFields(data['schema']['dynamicFields'], self.dynamicFields)\n\n #Set copySources\n for (fieldname, fieldspecs) in data['schema']['fields'].iteritems():\n for copyfieldname in fieldspecs['copySources']:\n self.fields[fieldname].copySources.append(self.fields[copyfieldname])\n\n #Could we have copyfield for dynamic fields? #FIXME\n\n self.id_field = data['schema']['uniqueKeyField']\n except KeyError, e:\n raise SOLRResponseFormatError, \"Wrong response format: %s %s - %s\" % (KeyError, e, data)", "title": "" }, { "docid": "bafb90925181f38ae9747a47b64098c0", "score": "0.62053746", "text": "def __init__(self, schema: str):\n if os.path.isfile(schema):\n self._schema = open(schema, 'r').read()\n else:\n self._schema = schema", "title": "" }, { "docid": "440e074d4611c4d29ae4aa82d79a102e", "score": "0.6199084", "text": "def _schema(self, path, obj, app):\n if path.startswith('#/definitions'):\n last_token = jp_split(path)[-1]\n if app.version == '1.2':\n obj.update_field('name', scope_split(last_token)[-1])\n else:\n obj.update_field('name', last_token)", "title": "" }, { "docid": "87b220f693360af13cb2b927af1ddf4d", "score": "0.61490417", "text": "def visit_schema(self, schema):\n pass", "title": "" }, { "docid": "b641de252de8d1fdd5216b3e56923568", "score": "0.6137045", "text": "def load_schema(self, schema):\n self.schema = load_json(schema)\n validate_schema(self.schema)\n self.schema_nx = load_schema_into_networkx(self.schema)", "title": "" }, { "docid": "e88acdb65caf3733d682ebe59491eb94", "score": "0.6128808", "text": "def _get_schema(self, data):\n return self.schema()", "title": "" }, { "docid": "c0903ca2e5df6a32c23a6cf545428dca", "score": "0.60882545", "text": "def read_schema_section(self):\n\n # The next tag must be 'SCHEMA'\n tag_line = self.read_line()\n if tag_line is None:\n raise Exception(\"Unexpected EOF, should be SCHEMA tag\")\n (tag, _) = self.parse_tag_line(tag_line)\n if tag != 'SCHEMA':\n raise Exception(\n f\"Unrecoginized tag line_number '{tag_line}', should be SCHEMA\"\n )\n\n # Read the SCHEMA records until the next TAG_TOKEN\n schema_lines = []\n while True:\n line = self.read_line()\n if line is None:\n break\n (tag, _) = self.parse_tag_line(line)\n if tag in self.TAG_TOKENS:\n if tag in ('DATA', 'ERRORS', 'EXISTING_SCHEMA', 'SCHEMA'):\n raise Exception(f\"Unexpected {tag} tag\")\n self.push_back(line)\n break\n schema_lines.append(line)\n\n return ''.join(schema_lines)", "title": "" }, { "docid": "200ae8776af4253ec8ecb5171e6d7ea2", "score": "0.6078499", "text": "def _read_schema(self):\n cache_filename = os.path.join(\n CACHE_ROOT, \"%s.smt\" % self._representation)\n log.info(\"Attempting to read local schema at %s\" % cache_filename)\n try:\n if time.time() - os.stat(cache_filename).st_mtime > CACHE_EXPIRY:\n log.warning(\"Cache expired, re-pulling\")\n self._pull_schema_definition(cache_filename)\n except OSError:\n log.warning(\"Local schema not found. Pulling from web.\")\n self._pull_schema_definition(cache_filename)\n else:\n log.info(\"Success\")\n\n return cache_filename", "title": "" }, { "docid": "975bc86a280809493d86c0329bf34dbd", "score": "0.60747033", "text": "def schema(self):\n return _parse_schema_resource(self._properties.get('schema', {}))", "title": "" }, { "docid": "cce4eb26e8258b71145b12a933cb50d2", "score": "0.6001636", "text": "def SCHEMA(self):\n pass", "title": "" }, { "docid": "cce4eb26e8258b71145b12a933cb50d2", "score": "0.6001636", "text": "def SCHEMA(self):\n pass", "title": "" }, { "docid": "cce4eb26e8258b71145b12a933cb50d2", "score": "0.6001636", "text": "def SCHEMA(self):\n pass", "title": "" }, { "docid": "60a65ef4574d42fd7dc9f0c36bb9286b", "score": "0.59796387", "text": "def __init__(self, definition=None):\n super(Schema, self).__init__()\n self._map = {}\n self._bq_schema = definition\n self._populate_fields(definition)", "title": "" }, { "docid": "75d02d1dab28c87ca8b43ee8a8328657", "score": "0.5960625", "text": "def main():\n parser = get_parser()\n handle_schema_args('cloudconfig-schema', parser.parse_args())\n return 0", "title": "" }, { "docid": "8b821d6db3a46d7cf7d02d19fb61bd8c", "score": "0.5945147", "text": "def new_schema():\n generate_new_schema()", "title": "" }, { "docid": "083a001fc0b2c3122ccdc15cfee33ed3", "score": "0.59255975", "text": "def get_schema(self) -> dict:\n if not os.path.isfile(self.path):\n logger.error('Path `%s` does not resolve as a valid file.', self.path)\n raise ImproperlyConfigured(\n f'The path `{self.path}` does not point to a valid file. Make sure to point to the specification file.'\n )\n try:\n logger.debug('Fetching static schema from %s', self.path)\n with open(self.path, 'r') as f:\n content = f.read()\n except Exception as e:\n logger.exception('Exception raised when fetching OpenAPI schema from %s. Error: %s', self.path, e)\n raise ImproperlyConfigured(\n f'Unable to read the schema file. Please make sure the path setting is correct.\\n\\nError: {e}'\n )\n if '.json' in self.path:\n return json.loads(content)\n elif '.yaml' in self.path or '.yml' in self.path:\n return yaml.load(content, Loader=yaml.FullLoader)\n else:\n raise ImproperlyConfigured('The specified file path does not seem to point to a JSON or YAML file.')", "title": "" }, { "docid": "f7693fe798fd0f30e90a47f25a1c1fbb", "score": "0.5916935", "text": "def set_schema(obj, eng):\n if '$schema' not in obj.data:\n obj.data['$schema'] = \"{data_type}.json\".format(\n data_type=obj.data_type or eng.workflow_definition.data_type\n )\n\n if not obj.data['$schema'].startswith('http'):\n obj.data['$schema'] = url_for(\n 'invenio_jsonschemas.get_schema',\n schema_path=\"records/{0}\".format(obj.data['$schema'])\n )", "title": "" }, { "docid": "485ee6f1c6dbec358f4df75ecb17b89b", "score": "0.5913141", "text": "def metadatablock_json_schema():\n\n return open(\"./tests/fixtures/dataversebase/toydataset.schema.json\").read()", "title": "" }, { "docid": "a15a98e8ba3f81903ffbe3a9c145f618", "score": "0.5878806", "text": "def validate(self):\n logger.info('Reading plugin schema file %s', self.__schema_file)\n\n if self.__plugin_schemas is None:\n self.__plugin_schemas = self.__read_schema_file()\n\n logger.debug('Validating plugin schema file content : %s',\n self.__plugin_schemas)\n self.__validate_schemas()", "title": "" }, { "docid": "e60d2153351382599d56a15394fbc213", "score": "0.58765936", "text": "def jsonschema_definition(self) -> Dict[str, Any]:\n pass", "title": "" }, { "docid": "a291c5e4428e987bf6cd472cd3cd6124", "score": "0.5869546", "text": "def cache_schema():\n # Calling load_schema() will generate the schema as a side effect\n load_schema()", "title": "" }, { "docid": "b2327968a1768478ec2a286d7c03dabb", "score": "0.5856529", "text": "def __register_schema(self,\n config,\n schema):\n\n if not os.path.exists(schema):\n raise IOError(u'Schema does not exists: {s}'.format(s=schema))\n\n # Place a copy of the schema in self.schema_path\n ensure_path_exists(self.schema_path)\n schema_copy_fn = u'{cfg}.schema.{ext}'.format(cfg=config,\n ext=CONST.json)\n schema_copy = os.path.join(self.schema_path, schema_copy_fn)\n shutil.copyfile(schema, schema_copy)\n\n # Register the schema\n self.__registered_cfg[config][CONST.cfg_schema] = schema_copy_fn", "title": "" }, { "docid": "63b407b09cff9d5c2e654bb3204b8067", "score": "0.5853047", "text": "def get_schema(self):\n return self.schema", "title": "" }, { "docid": "2de2f068964dc7e016d9d22829df3ce9", "score": "0.58503586", "text": "def handle_schema_args(name, args):\n exclusive_args = [args.config_file, args.docs]\n if not any(exclusive_args) or all(exclusive_args):\n error('Expected either --config-file argument or --docs')\n full_schema = get_schema()\n if args.config_file:\n try:\n validate_cloudconfig_file(\n args.config_file, full_schema, args.annotate)\n except SchemaValidationError as e:\n if not args.annotate:\n error(str(e))\n except RuntimeError as e:\n error(str(e))\n else:\n print(\"Valid cloud-config file {0}\".format(args.config_file))\n elif args.docs:\n schema_ids = [subschema['id'] for subschema in full_schema['allOf']]\n schema_ids += ['all']\n invalid_docs = set(args.docs).difference(set(schema_ids))\n if invalid_docs:\n error('Invalid --docs value {0}. Must be one of: {1}'.format(\n list(invalid_docs), ', '.join(schema_ids)))\n for subschema in full_schema['allOf']:\n if 'all' in args.docs or subschema['id'] in args.docs:\n print(get_schema_doc(subschema))", "title": "" }, { "docid": "af03ba52442219570687a1a4187ab5b6", "score": "0.5816239", "text": "def load(self, definition, schema_file, context):\n try:\n validate(definition, self._get_schema_from_file(schema_file))\n except ValidationError:\n raise ConfigException(\n 'Failed to validate interface with schema: {}'.format(\n traceback.format_exc()))\n\n try:\n global_settings = self._load_global_setting(\n definition.get('global_settings'), context\n )\n\n requests = [self._load_request(item) for item in definition['requests']]\n\n return munchify({\n 'meta': munchify(definition['meta']),\n 'tokens': definition['tokens'],\n 'global_settings': global_settings,\n 'requests': requests,\n })\n except Exception as ex:\n error = 'Unable to load configuration: %s' % str(ex)\n _logger.exception(error)\n raise ConfigException(error)", "title": "" }, { "docid": "2ec120889092ad5b8ec8eb2159b54978", "score": "0.58096194", "text": "def schema(self) -> \"Schema\":\n raise NotImplementedError", "title": "" }, { "docid": "b59d3e184492921c228ee929b2b5bb61", "score": "0.5780726", "text": "def read_schema(self):\n if self.db.has_collection(self.coll_name) and self.state_coll.has('schema'):\n state = self.state_coll.get('schema')\n else:\n state = {'schema': {}}\n\n return state.get('schema')", "title": "" }, { "docid": "79b9a811772cac241c0f0443de1dfd96", "score": "0.57753044", "text": "def setupSchema(app,\r\n conn,\r\n grantedusers = ['public'],\r\n publicsyn = True):\r\n r = dbschema.getSchemaVersion(conn, oraschema.LATESTVERSIONS)\r\n\r\n if r['schema-version'] is not None:\r\n raise Exception(\"crossbar.io Repository already installed\")\r\n\r\n return _setupSchema(conn, grantedusers, publicsyn)", "title": "" }, { "docid": "c7417c2d0df3bfa9d3573002a79ea6cc", "score": "0.5771426", "text": "def db_init( self, schema ):\n #TODO: initialize the database schema\n try: \n pass\n except:\n raise Exceptions.ConfigFault(\"Couldn't initialize database\")", "title": "" }, { "docid": "abee92d108e29d2c2aa7588c3117b4db", "score": "0.5769642", "text": "def schema(self):\n return self.reader.schema()", "title": "" }, { "docid": "e3551ef3e2b9db4d643d8cb0ad216244", "score": "0.576468", "text": "def schemas_config(self, configs):\n self.__schemas_config = configs", "title": "" }, { "docid": "fc8ed43719decf60a577ce127c8396ba", "score": "0.57510227", "text": "def load_schema(self, path):\n f = open(path, \"r\")\n return f.read()", "title": "" }, { "docid": "bae3f86858276df1f2463c3e8b62d516", "score": "0.5748631", "text": "def schema(self, schema):\n \n self._schema = schema", "title": "" }, { "docid": "30468bd01592082a26d906fb98d18ebb", "score": "0.5743769", "text": "def schema(self):\n try:\n return self._get_schema()\n except SchemaNotFoundError as error:\n logging.getLogger(__name__).warning(str(error))", "title": "" }, { "docid": "30611bbcdd235a8473f17f25934164e5", "score": "0.57144326", "text": "def start_schema_creation(self, apiId: str, definition: bytes) -> Dict:\n pass", "title": "" }, { "docid": "be8fba408bea9959911084e8b240b666", "score": "0.57089764", "text": "def schema(self):\n return self.__schema", "title": "" }, { "docid": "be8fba408bea9959911084e8b240b666", "score": "0.57089764", "text": "def schema(self):\n return self.__schema", "title": "" }, { "docid": "88053be6879d2b3b66af29c6c8eeece9", "score": "0.5685297", "text": "def config_schema(self) -> vol.Schema:\n return vol.Schema(\n {\n vol.Required(CONF_API_KEY): str,\n vol.Inclusive(\n CONF_LATITUDE, \"coords\", default=self.hass.config.latitude\n ): cv.latitude,\n vol.Inclusive(\n CONF_LONGITUDE, \"coords\", default=self.hass.config.longitude\n ): cv.longitude,\n vol.Optional(\n CONF_ELEVATION, default=self.hass.config.elevation\n ): vol.Coerce(float),\n }\n )", "title": "" }, { "docid": "217e9646acdc42513a0a14e7fd517f16", "score": "0.5679437", "text": "def get_gedl_schema(schema_location):\n basepath = \"\"\n #get the module paths to help find the schema in a relitive to ourself\n if(len(sys.path) > 1):\n basepath = sys.path[0]\n path = os.path.join(basepath,schema_location)\n \n if(not os.path.exists(path)):\n #schema not found relitive to the python enviroment, check a local path\n path = schema_location\n if(not os.path.exists(path)):\n #Unable to get python schema\n raise(IOError(\"Unable to fild cle schema (expected at): \" + path))\n \n #we found the schema load it into ram\n print(\"Using GEDL schema: \" + path)\n with open(path,\"r\",encoding=\"UTF-8\") as schemafile:\n return(json.loads(schemafile.read()))", "title": "" }, { "docid": "6836ae553860c0c9ee69e1f31556c20d", "score": "0.5676", "text": "def retrieveSchemas(self)->None:\n aepp.importConfigFile(\n self.CONFIG['original']['config_file_location'])\n schema_instance = aep_schema.Schema()\n tenant_id = schema_instance.getTenantId()\n schemas = schema_instance.getSchemas()\n obj_schema = {schema['title']: schema['meta:altId']\n for schema in schemas if schema['title'].startswith(\"Adhoc\") == False}\n for schema in obj_schema.values():\n schema = schema_instance.getSchema(schema, full=False, save=True)\n allOf = schema['allOf']\n allOf = [element for element in allOf if '$ref' in element.keys()]\n for element in allOf:\n if 'classes' in element['$ref']:\n myClass = schema_instance.getClass(\n element['$ref'], save=True)\n if 'mixins' in element['$ref']:\n myMixin = schema_instance.getMixin(\n element['$ref'], save=True)", "title": "" }, { "docid": "f37e1b5085f50e6f43ea445681f4915d", "score": "0.56728673", "text": "def __validate_config(self,\n schema_path,\n obj):\n\n # Load schema\n schema_obj = self.__get_schema_obj(schema_path)\n\n if u'properties' in schema_obj:\n # Make sure schema has reference to TEMPLATE_DEFAULTS_ITEM!\n if TEMPLATE_DEFAULTS_ITEM not in schema_obj[u'properties']:\n schema_obj[u'properties'][TEMPLATE_DEFAULTS_ITEM] = {u'type': u'object'}\n\n # Ensure loaded config is still valid (i.e any external modification hasn't corrupted it)\n validator = Draft4Validator(schema_obj.cfg)\n validator.validate(obj.cfg)", "title": "" }, { "docid": "c1d03056954aec670a4f285b7383b8c3", "score": "0.5661043", "text": "def test_get_schema_from_api():\n connection = DatabaseSchemaConnection(POSTGREST_URL)\n resp = connection.get_by_id(\"marple-dataset.json\")\n assert resp['$schema'] == 'http://json-schema.org/draft-04/schema#'\n assert isinstance(resp,dict)", "title": "" }, { "docid": "2e81759942d12b9229484596160a4eff", "score": "0.56599015", "text": "def validate_config(self):\n # with self.spinner.reenter('Validating Config') as grandchild:\n super(Instattack, self).validate_config()\n data = self.config.get_dict()\n\n # This is where we would configure the system settings with the user\n # defined settings.\n # config(data)\n\n # grandchild.warning('Not Currently Validating Schema', fatal=False, options={\n # 'label': True,\n # })", "title": "" }, { "docid": "10ed53159fe945a9a12f51f6fdc28aba", "score": "0.5644752", "text": "def __init__(self):\n if self.schema is None:\n _cls_name = f\"{self.model.__name__}Schema\"\n self.schema = globals()[_cls_name]", "title": "" }, { "docid": "fd30c838971eabbca690bde0abc9055c", "score": "0.56210756", "text": "def read_json_schema(schemafile):\r\n with open(schemafile, 'r') as jsonschemafile:\r\n data=jsonschemafile.read() \r\n \r\n json_dict = json.loads(data)\r\n colnames = json_dict['ColumnNames']\r\n offsets = json_dict['Offsets']\r\n includeheader = json_dict['IncludeHeader']\r\n fixedwidthencoding = json_dict['FixedWidthEncoding']\r\n delimitedencoding = json_dict['DelimitedEncoding']\r\n return colnames,offsets,includeheader", "title": "" }, { "docid": "b9628d41b08359b69aabf2fcd64f92b9", "score": "0.5620795", "text": "def create_schema(self, config_path: str, arg_replacements: Dict) -> Schema:\n return self._create_aepobject(Schema, config_path, arg_replacements)", "title": "" }, { "docid": "5d10b2bc62522850570cfce78e7ba920", "score": "0.5619358", "text": "def read_existing_schema_section(self):\n\n # The next tag must be 'EXISTING_SCHEMA'\n tag_line = self.read_line()\n if tag_line is None:\n raise Exception(\"Unexpected EOF, should be EXISTING_SCHEMA tag\")\n (tag, _) = self.parse_tag_line(tag_line)\n if tag == 'EXISTING_SCHEMA':\n # Read the EXISTING_SCHEMA records until the next TAG_TOKEN\n schema_lines = []\n while True:\n line = self.read_line()\n if line is None:\n break\n (tag, _) = self.parse_tag_line(line)\n if tag in self.TAG_TOKENS:\n if tag in ('DATA', 'EXISTING_SCHEMA'):\n raise Exception(f\"Unexpected {tag} tag\")\n self.push_back(line)\n break\n schema_lines.append(line)\n return ''.join(schema_lines)\n else:\n self.push_back(tag_line)\n return []", "title": "" }, { "docid": "2e95e4b18a86e7a79b02a4b890a65d5a", "score": "0.56126887", "text": "def _get_schema(self):\n if not self._schema:\n self._schema = XSDSchema(self.filetype, self.version)\n return self._schema", "title": "" }, { "docid": "cd6970bc9033144b09eac1029c53c4ff", "score": "0.5612554", "text": "def test_schema(self):\n fti = queryUtility(IDexterityFTI, name=CT)\n schema = fti.lookupSchema()\n self.assertEqual(IFeaturedListings, schema)", "title": "" }, { "docid": "793da77f74b8666b14ab78d098726ff1", "score": "0.5611442", "text": "def Read(self, *args):\n return _Storage.Storage_Schema_Read(self, *args)", "title": "" }, { "docid": "2dce402a8c5394192518f0d55246b0cc", "score": "0.5606407", "text": "def _get_config_schema(input_dict: dict[str, Any] | None = None) -> vol.Schema:\n if input_dict is None:\n input_dict = {}\n\n return vol.Schema(\n {\n vol.Required(\n CONF_NAME, default=input_dict.get(CONF_NAME, DEFAULT_NAME)\n ): str,\n vol.Required(CONF_HOST, default=input_dict.get(CONF_HOST)): str,\n vol.Required(\n CONF_DEVICE_CLASS,\n default=input_dict.get(CONF_DEVICE_CLASS, DEFAULT_DEVICE_CLASS),\n ): vol.All(\n str,\n vol.Lower,\n vol.In([MediaPlayerDeviceClass.TV, MediaPlayerDeviceClass.SPEAKER]),\n ),\n vol.Optional(\n CONF_ACCESS_TOKEN, default=input_dict.get(CONF_ACCESS_TOKEN, \"\")\n ): str,\n },\n extra=vol.REMOVE_EXTRA,\n )", "title": "" }, { "docid": "74e145096f96f1d3874ecf7cef0539a7", "score": "0.56050754", "text": "def _get_schema(self, user_input):\n schema = vol.Schema(\n {vol.Required(CONF_HOST, default=user_input.get(CONF_HOST, \"\")): cv.string}\n )\n return schema", "title": "" }, { "docid": "b945c2c96c29ece6dfe827a69eb8b60d", "score": "0.559489", "text": "def schema(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"schema\")", "title": "" }, { "docid": "b945c2c96c29ece6dfe827a69eb8b60d", "score": "0.559489", "text": "def schema(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"schema\")", "title": "" }, { "docid": "b945c2c96c29ece6dfe827a69eb8b60d", "score": "0.559489", "text": "def schema(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"schema\")", "title": "" }, { "docid": "b945c2c96c29ece6dfe827a69eb8b60d", "score": "0.559489", "text": "def schema(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"schema\")", "title": "" }, { "docid": "b945c2c96c29ece6dfe827a69eb8b60d", "score": "0.559489", "text": "def schema(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"schema\")", "title": "" }, { "docid": "5786739e86ce1be5c17164295e4e5fd3", "score": "0.55935436", "text": "def refresh_definition(self):\n encoded_id = urllib.parse.quote_plus(self.id)\n result = self._aep.get(path='schemaregistry.schema',\n body={},\n params={},\n url_suffix='/'+encoded_id)\n self.definition = result", "title": "" }, { "docid": "40bf1c0880ce78c3712f19efbc2583a8", "score": "0.558595", "text": "def __init__(self, config_path: Optional[str]):\n self.config_path = config_path\n if self.config_path is None:\n if not self.strict:\n self.config = {}\n self.schema = {}\n return\n raise FileNotFoundError(\"Config file was not found\")\n if not os.path.exists(self.config_path):\n raise FileNotFoundError(f\"Configuration file not found in {self.config_path}\")\n main_dirname = os.path.abspath(os.path.dirname(__file__))\n schema_path = os.path.join(main_dirname, \"config.schema.json\")\n with open(self.config_path, \"r\", encoding=\"utf-8\") as fid:\n self.config: dict = json.load(fid)\n if not os.path.exists(schema_path):\n logger.warning(\"Configuration schema was not found in %s. Continuing without schema.\", schema_path)\n self.schema = {}\n return\n with open(schema_path, \"r\", encoding=\"utf-8\") as fid:\n self.schema = json.load(fid)\n jsonschema.validate(self.config, self.schema)", "title": "" }, { "docid": "d69bef2e0c94c2580ba8972b64a5ed60", "score": "0.55855185", "text": "def schema(self):\n return self._schema", "title": "" }, { "docid": "5aa9a3c4694118b21358e06c875c3fe2", "score": "0.557486", "text": "def read(self):\n self.schema_class = 'service_instances_schema.ServiceInstancesSchema'\n schema_object = super(ServiceInstance, self).read()\n self.schema_class = 'service_instance_schema.ServiceInstanceSchema'\n return schema_object", "title": "" }, { "docid": "874292ddbb59dfb3062fa8a0ec8b6629", "score": "0.557452", "text": "def fetch_schema(data_args, schema_inferred):\n if not data_args.schema_file:\n if not data_args.input_bq_table:\n # Both schema and input_bq_table are unset.\n # Use gcs schema file because safer than assuming this user has\n # created the lineorders table.\n data_args.schema_file = \\\n 'gs://python-dataflow-example/schemas/lineorder-schema.json'\n else:\n # Need to fetch schema from existing BQ table.\n bq_cli = bq.Client()\n dataset_name, table_name = data_args.input_bq_table.split('.', 1)\n bq_dataset = bq_cli.dataset(dataset_name)\n # This forms a TableReference object.\n bq_table_ref = bq_dataset.table(table_name)\n # Errors out if table doesn't exist.\n bq_table = bq_cli.get_table(bq_table_ref)\n\n # Quickly parse TableSchema object to list of dictionaries.\n data_args.schema = {\n 'fields': [{\n 'name': field.name,\n 'type': field.field_type,\n 'mode': field.mode,\n 'fields': field.fields\n } for field in bq_table.schema]\n }\n if data_args.output_bq_table:\n # We need to check if this output table already exists.\n dataset_name, table_name = data_args.output_bq_table.split(\n '.', 1)\n bq_dataset = bq_cli.dataset(dataset_name)\n # This forms a TableReference object.\n bq_table_ref = bq_dataset.table(table_name)\n try:\n bq_cli.get_table(bq_table_ref)\n schema_inferred = True\n except NotFound:\n schema_inferred = False\n\n if data_args.schema_file and data_args.input_bq_table:\n logging.error('Error: pipeline was passed both schema_file and '\n 'input_bq_table. '\n 'Please enter only one of these arguments')\n raise ValueError('Error: pipeline was passed both schema_file and '\n 'input_bq_table. '\n 'Please enter only one of these arguments')\n\n return data_args, schema_inferred", "title": "" }, { "docid": "7ba60ebe94793b17791b80605b18118c", "score": "0.5570819", "text": "def jsonschema_ref_schema(self) -> Dict[str, Any]:\n if self.schema_ref_name:\n return {\"$ref\": f\"#/definitions/{self.schema_ref_name}\"}\n else:\n return self.jsonschema_definition", "title": "" }, { "docid": "5c8ce52652f4f3114e4a85b1e3e92d20", "score": "0.55592275", "text": "def init(\n new_schema: SortedDict,\n schema_filepath: str,\n overwrite: bool = False,\n verbose: bool = False,\n):\n from syphon import schema\n\n schema.save(new_schema, schema_filepath, overwrite)\n\n if verbose:\n print(f\"Init: wrote {schema_filepath}\")", "title": "" }, { "docid": "0f9f0b9d264149f33dcc4a617b0f31cb", "score": "0.55577475", "text": "def find_ref_data(self, ref):\n\n if '#' not in ref:\n return None\n schema_ref, path = self.get_schema_ref_and_path(ref)\n if self.ref_to_own_schema(ref):\n schema = self.schemas.get(schema_ref, None)\n if not schema:\n return None\n\n else:\n schema = self.get_remote_schema(ref)\n if not schema:\n return None\n\n elements = [x for x in path.split('/') if x]\n element = ''\n for element in elements:\n if element in schema:\n schema = schema[element]\n else:\n return None\n\n schema = dict(schema)\n schema['_from_schema_ref'] = schema_ref\n if '_schema_name' not in schema:\n schema['_schema_name'] = self.get_schema_name(schema_ref)\n schema['_prop_name'] = element\n schema['_ref_uri'] = ref\n return schema", "title": "" }, { "docid": "c4417464d0ec2ceb4cd5d0d981b11049", "score": "0.55544174", "text": "def schemas_config(self):\n return self.__schemas_config", "title": "" }, { "docid": "c822837f28a369dce83495dbc291ad28", "score": "0.55474746", "text": "def schema_definition(self) -> pulumi.Input['TableSchemaDefinitionArgs']:\n return pulumi.get(self, \"schema_definition\")", "title": "" }, { "docid": "cfc89350ddce6cc3489e4b953fbef05d", "score": "0.55418557", "text": "def _get_create_schema(self, data):\n if getattr(self, 'create_schema', None):\n return self.create_schema()\n else:\n return self._get_schema(data)", "title": "" }, { "docid": "517f5aa53421921a0617b3b17aa8aed0", "score": "0.5536574", "text": "def __validate_registered_config(self,\n config,\n obj):\n\n cfg = self.__registered_cfg.get(config)\n schema = cfg.get(CONST.cfg_schema)\n\n if schema is not None:\n # Load schema\n schema_path = os.path.join(self.schema_path, schema)\n self.__validate_config(schema_path, obj)", "title": "" }, { "docid": "06c605488c395fcca05bf4743cb2df10", "score": "0.5529778", "text": "def read_previous_schema(self) -> List[dict]:\n\n self.check_for_existing_schema()\n\n with open(self.schema_dir) as schema:\n data = json.load(schema)\n\n return data", "title": "" }, { "docid": "f72f363b71c88692c90897d38e58fb8f", "score": "0.5527214", "text": "def read_schema_from_file() -> dict:\n logger.info(f'Reading schema from: {SCHEMA_FILE}')\n try:\n with open(SCHEMA_FILE) as input_file:\n return json.load(input_file)\n except FileNotFoundError:\n raise ZenodoMetadataException(f'Schema file: {SCHEMA_FILE} not found.')", "title": "" }, { "docid": "57cdf079f69d20f92d720723c7110103", "score": "0.55207723", "text": "def fake_data_schema():\n return FakeDataSchema()", "title": "" }, { "docid": "8dd08e8786a8ba6b7d4d4ae3d8df15ec", "score": "0.5520027", "text": "def __init__(self):\n self.data_schema = vol.Schema(\n {\n vol.Required(CONF_USERNAME): str,\n vol.Required(CONF_PASSWORD): str,\n vol.Optional(CONF_CODE): str,\n }\n )", "title": "" }, { "docid": "00cb27c88dcbcfd1e1ffbf7fdd4dfdee", "score": "0.5514165", "text": "def _schema_def(self, resource: Type[ResourceBase]) -> Dict[str, str]:\n meta = getmeta(resource)\n ref = meta.resource_name\n if ref not in self.defs:\n self.defs[ref] = None # Placeholder to prevent recursion\n self.defs[ref] = self._resource_to_schema(meta)\n return {\"$ref\": f\"#/$defs/{ref}\"}", "title": "" }, { "docid": "9ae7bad8c28802a5ec685660f0c8ad3e", "score": "0.5506005", "text": "def __init__(self, name, schema):\n self.name = name\n self.schema = schema", "title": "" }, { "docid": "478ed98ec167e968643f21f4c1c29c55", "score": "0.5498467", "text": "def schema(self, schema):\n\n self._schema = schema", "title": "" }, { "docid": "ee2d716cf61a72e64dcd03a60345a487", "score": "0.5497979", "text": "def get_schema(cls) -> Dict[str, Any]:\n schema = super().get_schema()\n schema[\"uihints\"].update({\"canRefresh\": True})\n return schema", "title": "" }, { "docid": "f286ea442c15596513ad704ee381587e", "score": "0.5493095", "text": "def _get_schema_from_file(schema_file):\n try:\n return load_json_file(schema_file)\n except:\n raise ConfigException(\n 'Cannot load schema from file {}: {}'.format(\n schema_file, traceback.format_exc())\n )", "title": "" }, { "docid": "70978278f69bcd0b1d2f6eb4a47c8612", "score": "0.54918206", "text": "def get_standard_json_schema():\n return standard_json_schema", "title": "" }, { "docid": "d912056dc4c4aaf55431cfdf00cce0d9", "score": "0.54900414", "text": "def getschema(self, schemaname):\n if not self.registry.has_key(schemaname):\n self.convert_schema(schemaname=schemaname)\n outschema = self.registry.get(schemaname)\n return outschema", "title": "" }, { "docid": "8fec341de54cccbdf06e4278b224c7dd", "score": "0.54724944", "text": "def getSchema(schema):\n \n rip = _schema[schema]\n \n s = Schema.Schema(open(rip.path))\n \n assert s.id == schema, _('schema %s has not the same id %s as in the RIP files') % (\n rip.path, schema)\n return s", "title": "" }, { "docid": "60ba4845e64be76aeba0dbed09feca26", "score": "0.5470302", "text": "def get_introspection_schema(self, apiId: str, format: str) -> Dict:\n pass", "title": "" }, { "docid": "508b4e1b2739b7428e0976a821059255", "score": "0.5469957", "text": "def schema(self):\n schema = super().schema\n schema['properties'].update(\n {\n 'artist': {'type': 'string'},\n 'album': {'type': 'string'},\n 'year': {'type': ['string', 'integer']},\n 'tags': one_or_more({'type': 'string'}),\n 'tag_type': {'type': 'string', 'enum': list(self._opts('tag_type').keys())},\n 'encoding': {'type': 'string', 'enum': self._opts('encoding')},\n 'format': {'type': 'string', 'enum': self._opts('format')},\n 'media': {'type': 'string', 'enum': self._opts('media')},\n 'release_type': {\n 'type': 'string',\n 'enum': list(self._opts('release_type').keys()),\n },\n 'log': {\n 'oneOf': [\n {'type': 'string', 'enum': list(self._opts('log').keys())},\n {'type': 'boolean'},\n ]\n },\n 'leech_type': {'type': 'string', 'enum': list(self._opts('leech_type').keys())},\n 'hascue': {'type': 'boolean'},\n 'scene': {'type': 'boolean'},\n 'vanityhouse': {'type': 'boolean'},\n }\n )\n return schema", "title": "" }, { "docid": "b0c25fdf6f6f2d1de92730eeaa36d0a2", "score": "0.54668695", "text": "def schema_database_read():\n return {0: (\"titleb\", str),\n 1: (\"type\", str),\n 2: (\"xmlUrl\", str),\n 3: (\"htmlUrl\", str),\n 4: (\"keywordsb\", str),\n 5: (\"id\", int, \"PRIMARYKEY\", \"AUTOINCREMENT\")}", "title": "" }, { "docid": "30b70c118a4730f09794c7615ca5424b", "score": "0.5466642", "text": "def need_schema_upgrade(self):\n raise NotImplementedError", "title": "" }, { "docid": "f0227b515313d3f4a501081a62ebaeaf", "score": "0.54564106", "text": "def add_schema(schema, connection):\n create_schemas_meta_table(connection)\n\n file_pattern = schema['meta']['files']\n time = schema['meta']['version']\n description = schema['meta']['description']\n processing_schema = json.dumps(schema['processing'])\n\n cur = connection.cursor()\n cur.execute(\n f\"INSERT INTO {xml_schemas_table} \"\n f\"({file_pattern_field}, {schema_time_field}, {schema_desc_field}, {proc_schema_field}) \"\n \n \"VALUES \"\n f\"('{file_pattern}', '{time}', '{description}', '{processing_schema}'::json) \"\n \n f\"ON CONFLICT DO NOTHING;\")\n connection.commit()", "title": "" }, { "docid": "15d517c411da1cce436813b4bbdb5ab6", "score": "0.5456108", "text": "def __fetch__definition(self):\r\n dom = xml.dom.minidom.parseString(self.input_xml)\r\n self.logger.debug('Loading parameters: %s' % (dom.toprettyxml()))\r\n setattr(self, 'thoughtspot_host_name', Et.fromstring(self.input_xml).find('DestinationServer').text\r\n if 'DestinationServer' in self.input_xml else None)\r\n if self.thoughtspot_host_name is None:\r\n self.error = \"Please Enter a ThoughtSpot Server\"\r\n return\r\n\r\n setattr(self,'thoughtspot_database_name', Et.fromstring(self.input_xml).find('TargetDatabase').text\r\n if 'TargetDatabase' in self.input_xml else None)\r\n if self.thoughtspot_database_name is None:\r\n self.error = \"Please Enter a Database Name\"\r\n return\r\n\r\n setattr(self, 'thoughtspot_schema', Et.fromstring(self.input_xml).find('TargetSchema').text\r\n if 'TargetSchema' in self.input_xml else 'falcon_default_schema')\r\n\r\n setattr(self, 'thoughtspot_user_name', Et.fromstring(self.input_xml).find('UserName').text\r\n if 'UserName' in self.input_xml else None)\r\n if self.thoughtspot_user_name is None:\r\n self.error = \"Please Enter a ThoughtSpot User Name\"\r\n return\r\n\r\n password = Et.fromstring(self.input_xml).find('Password').text if 'Password' in self.input_xml else None\r\n setattr(self, 'thoughtspot_rsa_file_path', Et.fromstring(self.input_xml).find('rsaFilePath').text\r\n if 'rsaFilePath' in self.input_xml else None)\r\n if self.thoughtspot_rsa_file_path is not None:\r\n setattr(self, 'use_key_file', True)\r\n setattr(self, 'thoughtspot_password', None)\r\n else:\r\n if password is None:\r\n self.error = \"A Password or Key file Must be Entered\"\r\n return\r\n else:\r\n # Can be used for unencrypted passwords. remove # on line 95 and place a # on lines 97 and 98\r\n # setattr(self, 'thoughtspot_password', password)\r\n setattr(self, 'use_key_file', False)\r\n setattr(self, 'thoughtspot_password', self.alteryx_engine.decrypt_password(\r\n Et.fromstring(self.input_xml).find('Password').text, 0))\r\n\r\n setattr(self, 'thoughtspot_table_name', Et.fromstring(self.input_xml).find('TableName').text\r\n if 'TableName' in self.input_xml else None)\r\n if self.thoughtspot_table_name is None:\r\n self.error = \"Please Enter a ThoughtSpot Table Name\"\r\n return\r\n\r\n port_test = Et.fromstring(self.input_xml).find('DestinationPort').text\r\n self.logger.info(port_test)\r\n if port_test is None:\r\n setattr(self, 'thoughtspot_port', 22)\r\n else:\r\n setattr(self, 'thoughtspot_port', int(port_test))\r\n\r\n setattr(self, 'buffer_size', int(Et.fromstring(self.input_xml).find('BufferSize').text)\r\n if 'BufferSize' in self.input_xml else None)\r\n if self.buffer_size is None:\r\n self.error = \"Please Enter the row buffer size (Default: 1000)\"\r\n return\r\n\r\n setattr(self, 'verbosity', Et.fromstring(self.input_xml).find('Verbosity').text\r\n if 'Verbosity' in self.input_xml else 0)\r\n setattr(self, 'maxingoredrows', Et.fromstring(self.input_xml).find('MaxIgnoredRows').text\r\n if 'MaxIgnoredRows' in self.input_xml else 0)\r\n setattr(self, 'truncate', Et.fromstring(self.input_xml).find('Truncate').text\r\n if 'Truncate' in self.input_xml else False)\r\n setattr(self, 'ts_create_database', Et.fromstring(self.input_xml).find('CreateDatabase').text\r\n if 'CreateDatabase' in self.input_xml else False)\r\n setattr(self, 'booleanstring', Et.fromstring(self.input_xml).find('BooleanString').text\r\n if 'BooleanString' in self.input_xml else 'T_F')\r\n setattr(self, 'ts_create_table', Et.fromstring(self.input_xml).find('CreateTable').text\r\n if 'CreateTable' in self.input_xml else False)\r\n\r\n primary_keys = Et.fromstring(self.input_xml).find('PrimaryKey').text \\\r\n if 'PrimaryKey' in self.input_xml else None\r\n if primary_keys is not None:\r\n setattr(self, 'primary_keys', '\",\"'.join(primary_keys.split(\",\")))\r\n else:\r\n setattr(self, 'primary_keys', None)\r\n\r\n hash_test = Et.fromstring(self.input_xml).find('HashValue').text\r\n self.logger.info(hash_test)\r\n if hash_test is None:\r\n setattr(self, 'hash_number', 0)\r\n setattr(self, 'partition_keys', None)\r\n else:\r\n setattr(self, 'hash_number', int(hash_test))\r\n partition_keys = Et.fromstring(self.input_xml).find('PartitionKey').text \\\r\n if 'PartitionKey' in self.input_xml else None\r\n if partition_keys is not None:\r\n setattr(self, 'partition_keys', '\",\"'.join(partition_keys.split(\",\")))\r\n else:\r\n setattr(self, 'partition_keys', None)", "title": "" }, { "docid": "6f1ce7d7d3f58fb96f924949d0c220e8", "score": "0.5454911", "text": "def __init__(self):\n\n self.database = \"shsmd.db\"\n self.debug = False\n self.schema = \"../schema.sql\"", "title": "" } ]
ba2f90b28966006b3764a6a8cadb2c84
Returns the importance of term in document id. If the term isn't in the document, then return 0.
[ { "docid": "40550b3d44025bcbb8fdf4c3d7ea859c", "score": "0.718705", "text": "def imp(term,id):\r\n if id in postings[term]:\r\n return postings[term][id]*inverse_document_frequency(term)\r\n else:\r\n return 0.0", "title": "" } ]
[ { "docid": "1ab6cc16ab1cfd758823ca6ce6adb0c8", "score": "0.73194516", "text": "def get_tfidf_scores(self, term, document):\n if term in self.tf[document]:\n return self.tf[document][term] * self.idf[term]\n else:\n return 0", "title": "" }, { "docid": "a5322e56d406d8222b4591b5dc78afe3", "score": "0.7267702", "text": "def imp(self, term, id):\n if id in self.frequency[term]:\n return self.frequency[term][id] * self.inverse_document_frequency(term)\n else:\n return 0.0", "title": "" }, { "docid": "4c2be31df7ae39a53804ef7839af5709", "score": "0.7203408", "text": "def compute_term_frequency_in_collection(self, term, id_document):\n try:\n tf = self.inverted_index[term][id_document]\n return tf\n except KeyError:\n return 0", "title": "" }, { "docid": "b76922a897681b8201269db22bb15b46", "score": "0.6974998", "text": "def get_idf(self, term):\n if term in self.stopwords:\n return 0\n\n if not term in self.term_num_docs:\n return self.idf_default\n\n return math.log(float(1 + self.get_num_docs()) / \n (1 + self.term_num_docs[term]))", "title": "" }, { "docid": "b7e2d4947a28623fa72af23a6fe53cb3", "score": "0.6677582", "text": "def idf(self, word, corpus):\r\n\r\n idf = self.doc_freq(word, corpus)\r\n if idf == 0: \r\n return 0\r\n return np.log10(len(corpus)/(idf))", "title": "" }, { "docid": "b9723043fe5e9c564378843a21b06a95", "score": "0.66243446", "text": "def compute_idf(self, term):\n try:\n # thanks to the inverted index, we have access to all the documents containing the term\n docs = self.inverted_index[term].keys()\n df = len(docs)\n except KeyError:\n return 0\n return log(self.number_of_docs / df)", "title": "" }, { "docid": "9f263d50d42b747d120513716838d8f5", "score": "0.6587332", "text": "def term_frequency(term: str, document: dict):\n if term not in document:\n return 0\n\n number_of_occurences = document[term]\n total_number_of_occurences = sum(document.values())\n\n return number_of_occurences / total_number_of_occurences", "title": "" }, { "docid": "de89ea5fb8b03ae122b942e3b87a2ce8", "score": "0.6554476", "text": "def term_freq(self, word, document):\r\n\r\n if word in document.word_freq:\r\n return (1+np.log10(document.word_freq[word]))\r\n else:\r\n return 0", "title": "" }, { "docid": "6f78c0da6db0011ccd8bc003de211461", "score": "0.65498453", "text": "def get_words_importance(self, doc_word_dict : dict, id : int) -> list:\n word_list = doc_word_dict[id]\n word_importance = {}\n for word in word_list:\n word_importance[word] = log(len(doc_word_dict) / self.get_docs_for_word(word, doc_word_dict))\n return word_importance", "title": "" }, { "docid": "2592e943c06bd2a5be5a849cd218affd", "score": "0.6462634", "text": "def __term_frequency(term, document, query=False):\n\tif query:\n\t\treturn query_vector[term]\n\tif document not in inverted_index[term].keys():\n\t\treturn 0\n\treturn inverted_index[term][document]", "title": "" }, { "docid": "5624abaeb1c7b984a84d204a831f4bf7", "score": "0.6405168", "text": "def __document_frequency(term):\n\treturn len(inverted_index[term])", "title": "" }, { "docid": "1af3094b55566ff878786ae2ba3cfef4", "score": "0.6375153", "text": "def term_frequency(self, term):\n term = re.sub(r'\\W+', '', term).lower()\n if term not in self._word_to_count.keys():\n return 0\n else:\n return self._word_to_count[term] / self._word_count", "title": "" }, { "docid": "586f3fd4626f8420c9d0804104d99fe8", "score": "0.6220984", "text": "def __get_idf(self, token):\n df = self.dictionary.dfs.get(token, 0)\n if df == 0:\n return 0\n else:\n return math.log(len(self.dictionary.doc_ids)/df, 10)", "title": "" }, { "docid": "9d194912cafdb6ae92cd5cba74d04f45", "score": "0.6220814", "text": "def idf(self, searcher, fieldname, text):\r\n\r\n parent = searcher.get_parent()\r\n n = parent.doc_frequency(fieldname, text)\r\n dc = parent.doc_count_all()\r\n return log(dc / (n + 1)) + 1", "title": "" }, { "docid": "1b0032c32dcc640f5a00002fa4d914d5", "score": "0.62125367", "text": "def __log_term_frequency(term, document, query=False):\n\tif __term_frequency(term, document, query) > 0:\n\t\treturn 1 + math.log(__term_frequency(term, document, query))\n\treturn 0", "title": "" }, { "docid": "2a85052c0c58a437b40768cbb5100e80", "score": "0.6199197", "text": "def idf(self, term):\n return self.get_cache('idf', term,\n lambda: math.log(len(self.DOCVECS)/self.df(term)))", "title": "" }, { "docid": "0c492b24c60f3b8398f35b0d81affcd1", "score": "0.61888534", "text": "def compute_tf(document, term, type='raw'):\n assert (type in ['raw', 'augmented', 'boolean']), \"The parameter 'type' is not recognized. Please enter 'raw', 'boolean' or 'augmented' as it's value.\"\n tf = 0.0\n if type == 'raw':\n tf = float(document.count(term))\n if type == 'boolean':\n tf = float(term in document)\n if type == 'augmented':\n tf = 0.5 + ((0.5 * float(document.count(term))) / float(max([document.count(x) for x in document])))\n return tf", "title": "" }, { "docid": "b97c9c31f33cf2a704478804d66a14f4", "score": "0.6161808", "text": "def compute_tf_idf(self, term, id_document):\n tf_idf = self.log_normalization(term, id_document) * self.compute_idf(term)\n return tf_idf", "title": "" }, { "docid": "d4c6248e2039be9f502ed22fd972928f", "score": "0.6156177", "text": "def term_frequency(word, document):\n return document.count(word) / len(document.split(\" \"))", "title": "" }, { "docid": "c59e803d24c0ef0882023c997952f41c", "score": "0.61420256", "text": "def word_frequency_of_word_in_document(self, word, document_id):\n if word in self.documents[document_id]:\n return self.documents[document_id][word]\n else:\n return 0", "title": "" }, { "docid": "1a2e701a6e91e0f7f264f8bcdec5f1d3", "score": "0.61271435", "text": "def inverse_document_frequency(term):\r\n if term in dictionary:\r\n return math.log(N/document_frequency[term],2)\r\n else:\r\n return 0.0", "title": "" }, { "docid": "caad6749524bf8c38cfc72d0be5a5d29", "score": "0.6126351", "text": "def get_score(self, query_term, doc):", "title": "" }, { "docid": "ee0fd5091e801e91faf085ff3631d573", "score": "0.60995793", "text": "def tf_idf(self, word, document, corpus):\r\n\r\n return self.term_freq(word, document)*self.idf(word, corpus)", "title": "" }, { "docid": "b30f2746bf96644b686cd8a0c474e32c", "score": "0.60398054", "text": "def _idf(self, word, counter):\n try:\n return self._log_freqlist_total - math.log(self._freqlist[word])\n except ValueError:\n return 0", "title": "" }, { "docid": "f34b4ae22c6e331f006af3bf546b094f", "score": "0.60377926", "text": "def log_term_frequency(term: str, document: dict):\n return 1 + log(term_frequency(term, document))", "title": "" }, { "docid": "3b0f3e4494c5f04a6345e54d9f80d450", "score": "0.6011292", "text": "def tfidfScore(tf, numdocs, numtermincorp):\n return tf * log(numdocs / (1 + numtermincorp))", "title": "" }, { "docid": "fe9e99d67c716bf19c7ea992f94627d4", "score": "0.60076827", "text": "def _get_term_freq(self, doc):\n tf_dict = {}\n for token in doc:\n if token in tf_dict:\n tf_dict[token] += 1\n else:\n tf_dict[token] = 1\n for token in tf_dict:\n tf_dict[token] = tf_dict[token] / len(doc)\n return tf_dict", "title": "" }, { "docid": "e3ad043b895baa11dfdb2ca1c73d7b55", "score": "0.5957657", "text": "def getTF(self,word):\r\n return float(self.getCountOf(word))/float(self.getWordCount())", "title": "" }, { "docid": "f2a62be0f3d9539a0a1a0f2b0076ce78", "score": "0.59250844", "text": "def inverse_document_frequency(self, term):\n\n if term in self.dictionary:\n return math.log(self.N / len(self.index.get_postings(term)), 2)\n return 0.0", "title": "" }, { "docid": "91d88c4128cdaa7dd51d20b795f8deda", "score": "0.5862421", "text": "def get_tfidf_score(self, input_term, all_terms, unique_terms):\n if unique_terms.size==0: \n log_print(\"No terms found for the input term {}\".format(input_term))\n return None\n\n total = 1855658.0\n tf = Counter(item for sublist in all_terms for item in sublist)\n\n path_to_nyt_idf = os.path.dirname(os.path.realpath(__file__))\n with open(os.path.join(path_to_nyt_idf, 'nyt_idf_nonstem.json'), 'r') as fp:\n idf = json.load(fp)\n score = np.array([tf[term]*np.log(total/idf[term]) for term in unique_terms])\n tf_idf = dict(zip(unique_terms, score/max(score)))\n return tf_idf", "title": "" }, { "docid": "6149e125de392f1ba023640ab7d3d0ef", "score": "0.5833293", "text": "def __boolean_term_frequency(term, document, query=False):\n\tif __term_frequency(term, document, query) > 0:\n\t\treturn 1\n\treturn 0", "title": "" }, { "docid": "1770af3b106b3a4008e60f156566787d", "score": "0.5805489", "text": "def idf(word, stats):\n return math.log(stats.N() / stats[word]) if word in stats else 0", "title": "" }, { "docid": "348fade58f793fcb812eda46cec34be5", "score": "0.5763374", "text": "def term_frequency(word, word_frequency):\n\n return 0 if word_frequency[word] == 0 else 1 + log(word_frequency[word])", "title": "" }, { "docid": "4e09b728ce54fcee100f2eb375d705b2", "score": "0.57567257", "text": "def document_frequency(term: str, documents: list):\n documents_with_term = [doc for doc in documents if term_frequency(term, doc) > 0];\n\n return len(documents_with_term)", "title": "" }, { "docid": "eaab51e438bd8e4fbbe7b389c6d5f2a2", "score": "0.5746729", "text": "def get_score(self, query_term, doc):\n return self.__getattribute__(self.retrieval_model)(query_term, doc)", "title": "" }, { "docid": "3669da697e4141eaa0ca4cfc0d1da637", "score": "0.5736835", "text": "def __inverted_documnent_frequency(term):\n\treturn math.log(movies['num_of_movies'] / __document_frequency(term))", "title": "" }, { "docid": "8488e0b73a112efdc34bb77803e4cd73", "score": "0.57291937", "text": "def raw_counts(self, query_term, doc):\n return doc.get_dtf() * self.query_terms.count(query_term)", "title": "" }, { "docid": "35dd44506bbd01bdfd5144dcfc628d6f", "score": "0.5728637", "text": "def tfidf(term: str, document: dict, documents: list):\n tf = term_frequency(term, document)\n idf = inverse_document_frequency(term, documents)\n\n return tf*idf", "title": "" }, { "docid": "bf3ef435509aecb9fd5e9d4ae192029e", "score": "0.5718501", "text": "def __score(term, document, scoring_scheme, query):\n\tterm_frequency_type = scoring_scheme\n\tif term_frequency_type == \"n\":\n\t\tterm_frequency = __term_frequency(term, document, query)\n\telif term_frequency_type == \"l\":\n\t\tterm_frequency = __log_term_frequency(term, document, query)\n\telif term_frequency_type == \"a\":\n\t\tterm_frequency = __augmented_term_frequency(term, document, query)\n\telif term_frequency_type == \"b\":\n\t\tterm_frequency = __boolean_term_frequency(term, document, query)\n\n\treturn term_frequency * __inverted_documnent_frequency(term)", "title": "" }, { "docid": "a17c89b352252ff33a9369b37f677cbf", "score": "0.5707244", "text": "def score_one(self, sd):\r\n return (self.param + sd.doc_term_count) / (self.param * sd.doc_unique_terms + sd.doc_size)", "title": "" }, { "docid": "c7592a95c1fb70a21065a5a2edc740ae", "score": "0.567741", "text": "def compute_idfs(self, word_dict):\r\n\t\tprint('computing IDF scores for words within corpus')\r\n\t\t# obtain document frequencies\r\n\t\tdfreqs = dict(word_dict.dfs)\r\n\t\t# return IDF scores\r\n\t\treturn {word_dict[idx]: np.log(word_dict.num_docs / (1 + float(dfreq))) for idx, dfreq in dfreqs.items()}", "title": "" }, { "docid": "7719a1d51402e4e84c1237245f16469b", "score": "0.5644053", "text": "def inverse_document_frequency(self, query_word):\n no_qi = self.no_of_documents_containing_a_word(query_word)\n return float(math.log(self.no_of_documents / (no_qi + 1.0)))", "title": "" }, { "docid": "b19b542d025bfce2d3cad706005e27cd", "score": "0.5636606", "text": "def idfTerm(term, searcher):\n\n return idf(searcher.docFreq(term), searcher.maxDoc())", "title": "" }, { "docid": "f7c433f0945151873995a277b2b1067d", "score": "0.5621689", "text": "def getFreq(self,text, word_type=''):\n\t\tidList=[];\n\t\t#if araby.isHaraka(text[-1:]): text=text[:-1];\n\t\tidList=self.lookup(text,word_type);\n\t\t# if there are many take the first\n\t\tif idList:\n\t\t\treturn self.getAttribById(idList[0], u'freq')\n\t\telse: \n\t\t\treturn 0;", "title": "" }, { "docid": "8d3029bbba9b9d700bd4a2f62d1ff631", "score": "0.56136155", "text": "def idf(word, word_list):\n N = len(word_list)\n num_documents_containing_word = len([True for document in word_list if word in document])\n return math.log(N / num_documents_containing_word + 1) # + 1 accounts for if the word doesn't occur in any of the documents", "title": "" }, { "docid": "fc1ea5739249359c1f3a68edeb9bdc16", "score": "0.5594896", "text": "def importance(self) -> int:\n return self._importance", "title": "" }, { "docid": "45e9e1b76288883f663c61924147090f", "score": "0.55904835", "text": "def __augmented_term_frequency(term, document, query=False):\n\tmax_term_frequency = -1\n\tfor t in inverted_index.keys():\n\t\tmax_term_frequency = max(max_term_frequency, __term_frequency(t, document, query))\n\treturn 0.5 + 0.5 * ( __term_frequency(term, document, query) / max_term_frequency )", "title": "" }, { "docid": "3999b27040b7591d54a2714c1c96589d", "score": "0.55884415", "text": "def calc_tfidf_similarity(doc1: Counter, doc2: Counter, idf:\n Dict[Text, float]):\n tf1_max = max(doc1.values())\n tf2_max = max(doc2.values())\n total_sum = 0.0\n for word, tf1 in doc1.items():\n tf2 = doc2[word]\n if tf2 == 0 or word not in idf:\n continue\n tf1_norm = (0.5+0.5*float(tf1)/tf1_max)\n tf2_norm = (0.5+0.5*float(tf2)/tf2_max)\n idf_val = idf[word]\n total_sum += tf1_norm * tf2_norm * (idf_val*idf_val)\n return math.sqrt(total_sum)", "title": "" }, { "docid": "6f5bc01a23ac9affce6c84799cb26171", "score": "0.55526114", "text": "def get_idf(tf_matrix, term_index):\n\n # Document Frequency (DF)\n df = 0\n for entry in tf_matrix[:, term_index]:\n if entry > 0:\n df += 1\n\n # print \"df = \", df\n # Inverse Document Frequency (IDF)\n if df:\n idf = 1.0 + math.log10(float(TOTAL_DOCS) / df)\n else:\n idf = 0.0\n\n return idf", "title": "" }, { "docid": "59016a1f4e819b2d9a971198ba18fc81", "score": "0.554357", "text": "def similarity(self, query, id):\n similarity = 0.0\n for term in query.split(' '):\n if term in self.dictionary:\n similarity += self.inverse_document_frequency(term) * self.imp(term, id)\n\n if self.length[id] > 0:\n similarity = similarity / self.length[id]\n return similarity", "title": "" }, { "docid": "95bda274f1cacdd9e0684d4866897bf0", "score": "0.5534798", "text": "def calculate_document_tf_idf(sentence, paper_bag_of_words):\n # Sanity check\n assert(len(paper_bag_of_words) > 0)\n\n sentence_bag_of_words = defaultdict(float)\n for word in sentence:\n\n if word in STOPWORDS:\n continue\n else:\n sentence_bag_of_words[word] += 1.0\n\n sent_tf_idf = 0\n length = 0\n\n for word in sentence:\n\n if word in STOPWORDS:\n continue\n\n tf = sentence_bag_of_words[word]\n\n # Add 1 to the denominator to prevent division by 0\n idf = np.log(len(paper_bag_of_words) / (paper_bag_of_words[word] + 1))\n\n tf_idf = tf * idf\n\n sent_tf_idf += tf_idf\n\n length += 1\n\n if length == 0:\n return 0\n else:\n return sent_tf_idf / length", "title": "" }, { "docid": "574154d391ef7cea72fcd34ca925cc22", "score": "0.5512458", "text": "def calculate_tf_idf(sentence, global_count_of_papers_words_occur_in, paper_bag_of_words):\n bag_of_words = paper_bag_of_words\n\n sentence_tf_idf = 0\n\n length = 0\n\n for word in sentence:\n\n if word in STOPWORDS:\n continue\n\n # Get the number of documents containing this word - the idf denominator (1 is added to prevent division by 0)\n docs_containing_word = global_count_of_papers_words_occur_in[word] + 1\n\n # Count of word in this paper - the tf score\n count_word = bag_of_words[word]\n\n idf = np.log(NUMBER_OF_PAPERS / docs_containing_word)\n\n #word_tf_idf = (1 + np.log(count_word)) * idf\n\n word_tf_idf = count_word * idf\n\n sentence_tf_idf += word_tf_idf\n\n length += 1\n\n if length == 0:\n return 0\n else:\n sentence_tf_idf = sentence_tf_idf / length\n return sentence_tf_idf", "title": "" }, { "docid": "3462723797b792010e0af288e4741b46", "score": "0.5498431", "text": "def dirichlet(self, query_term, doc):\n # Frequency of term in the document (document term frequency - dtf)\n fqiD = doc.get_dtf()\n # Length of the document\n dl = self.inverted_index.get_doc_length(doc.get_doc_id())\n # Frequency of term in the collection (collection term frequency - ctf)\n cqi = self.inverted_index.get_ctf(query_term)\n # Total length of all documents in the collection\n cl = self.inverted_index.get_collection_length()\n \n score = math.log((fqiD + (self.mu * (cqi / cl))) / (dl + self.mu))\n return score * self.query_terms.count(query_term)", "title": "" }, { "docid": "53784acbc9786bc475f618db0be4be48", "score": "0.54652566", "text": "def _tf(tokenized_doc):\n term_tf = {}\n for term in tokenized_doc:\n if term not in term_tf:\n term_tf[term]=1.0\n else:\n term_tf[term]+=1.0\n\n # pprint(term_tf)\n return term_tf", "title": "" }, { "docid": "3b293b37d790c1975b74936de9cf4842", "score": "0.546485", "text": "def calculate_similarity(corpus, term_corpus):\n\n N = corpus.shape[0] # total number of reports\n term_counter = Counter()\n\n for index, value in corpus.items():\n total_features = value\n\n for term in term_corpus:\n if term in total_features:\n term_counter[term] += 1\n\n similarity = 0\n # idf = log(num_documents / num_documents_contain_term + 1)\n for term, term_frequency in term_counter.items():\n term_IDF = math.log(float(N) / (term_frequency + 1))\n similarity += term_IDF\n return similarity", "title": "" }, { "docid": "001734219fdd4482655a970acca37de9", "score": "0.5433262", "text": "def __get_weight_query_term(self, term, term_tf, n):\n\n # calculate tf in query\n # log_tf = 1 + math.log10(term_count)\n\n if term in self.dictionary.keys():\n (freq, postings_line) = self.dictionary[term]\n # calculate idf\n idf = math.log10(float(n)/float(freq))\n return term_tf * idf\n else:\n return 0", "title": "" }, { "docid": "65aac01b64ac0f8b5a171dd749d75a0d", "score": "0.5414591", "text": "def tf(bag_words, doc_dict):\n tf = {}\n len_doc = len(doc_dict)\n\n for token, count in bag_words.items():\n if count > 0:\n tf[token] = count / float(len_doc)\n else:\n tf[token] = 0\n return tf", "title": "" }, { "docid": "13bf35a9296fb0ecdbb8d9561964c3da", "score": "0.5411817", "text": "def doc_frequency(self, fieldid, text):\r\n raise NotImplementedError", "title": "" }, { "docid": "e3740a833027b125cf246a422ad7acb1", "score": "0.5398581", "text": "def compute_tfdf_score(cur, user_id, tf_type):\n score = 0.0\n # Get timeline_document from user\n cur.execute(\"SELECT timeline_document FROM users WHERE user_id = {};\".format(user_id))\n tl_doc = cur.fetchone()\n # tl_doc = cur.fetchone()[0]\n if (tl_doc is None) or (tl_doc[0] is None) or (len(tl_doc[0]) < 1):\n return score\n # Iterate over all topics to compute the final score\n cur.execute(\"SELECT topic, document_frequency FROM topics;\")\n for t in cur:\n # Compute TF\n tf = compute_tf(document=tl_doc[0], term=t[0], type=tf_type)\n # Compute TF-IDF\n score += tf * t[1]\n return score", "title": "" }, { "docid": "a86dc21dd09b9feebb35225e625b55f0", "score": "0.53949726", "text": "def _get_unigram_count(self, word):\n c = self.db_conn.cursor()\n c.execute(\"SELECT frequency FROM unigram_count WHERE word = ?\",\n (word,))\n result = c.fetchone()\n return float(result[0]) if result else 0", "title": "" }, { "docid": "a6cfcf155e9aaaf81074af691498c62e", "score": "0.5393289", "text": "def doc_freq(self, word, corpus):\r\n\r\n # finding doc_freq using len(posting list) from inverted index(if present)\r\n if len(self.inv_index)>0:\r\n return len(self.inv_index[word])\r\n\r\n count = 0\r\n for doc in corpus:\r\n if word in doc.word_freq:\r\n count += 1\r\n return count", "title": "" }, { "docid": "0217b748cfb19226bb42bf3b93a85b88", "score": "0.5378008", "text": "def get_docs_for_word(self, word : str, doc_word_dict : dict) -> float:\n n_docs = len(doc_word_dict)\n n_docs_containing_word = 0\n for doc in doc_word_dict:\n if word in doc_word_dict[doc]:\n n_docs_containing_word = n_docs_containing_word + 1\n if n_docs_containing_word == 0:\n print(\"Safety check. Ideally it shouldn't enter this if condition\")\n return 1\n return n_docs_containing_word", "title": "" }, { "docid": "4c1c7fd7fee301ff306fc66b83ebeba3", "score": "0.5369192", "text": "def get_score(self, word):\n retval = 0.0\n # calculate score if dictionaries are present\n if self.wdict != None and self.tdict != None:\n w = self.wdict.count(word)\n if w != 0:\n retval = self.tdict.count(word) * 1.0 / w\n return retval", "title": "" }, { "docid": "c6a7c90b47326363418dda94fa0ff68c", "score": "0.5367672", "text": "def raw_counts(self, query_term, doc):", "title": "" }, { "docid": "c1077a1ca0ed251d0f9937fbbd034a32", "score": "0.5360859", "text": "def scalar_inc_dec(word, valence, words_and_emoticons):\n previous_word = ' '\n next_word = ' '\n scalar = 0.0\n\n if word in BOOSTER_DICT:\n\n scalar = BOOSTER_DICT[word]\n if valence < 0:\n scalar *= -1\n\n return scalar", "title": "" }, { "docid": "983413b0753de4f66dc6030c1158be4e", "score": "0.5358795", "text": "def count_bag(self, tweet, use_first_tf_doc):\n\n count = 0.0\n sanitised_tweet_text = tweet['text'] # OSX\n #sanitised_tweet_text = tweet.text # Windows\n hashtag = False\n term_frequency_document = self.termfreq_doc if use_first_tf_doc else self.second_net_termfreq_doc\n count += self.get_author_sentiment(tweet)\n\n\n # The following loop first checks if the word in the tweet\n # is a hastag, setting the hashtag boolean to true if it is.\n # Then, a check is used to see if the term is in the term\n # frequency document. If it is, then add the value of that\n # term (plus 1) to the count, multiplying it by the hash_tag_multiplier \n # variable if the term is a hashtag. Then subtract the number returned\n # by the get_tweet_age_score function for that tweet, and finally\n # return the resulting count. If the count is below zero, return it\n # to zero.\n\n\n for word in sanitised_tweet_text.split():\n if word[0] == \"#\":\n text_word = word.lower().replace(\"#\", \"\")\n hashtag = True\n else:\n text_word = word.lower()\n if term_frequency_document.has_key(text_word):\n #if text_word in term_frequency_document.keys():\n count += 1\n if hashtag == True:\n if self.termfreq_doc.get(text_word) is not None:\n count += (self.termfreq_doc.get(text_word) * self.hash_tag_multiplier)\n hashtag = False\n else:\n if self.termfreq_doc.get(text_word) is not None:\n count += self.termfreq_doc.get(text_word)\n \n count -= self.get_tweet_age_score(tweet)\n if count <= 0.0:\n count = 0.0 \n print(count)\n return count", "title": "" }, { "docid": "ded23487a7ce3d39c1e6f9d0e94b2091", "score": "0.53567445", "text": "def rarity(self, word):\r\n if not isinstance(word, str):\r\n raise TypeError(\"Word given must be a string\")\r\n\r\n try:\r\n occurrence = self.word_frequency[word]\r\n except KeyError: # misspelling or missing word\r\n return 3\r\n\r\n if self.max/100 <= occurrence: # common\r\n return 0\r\n elif self.max/1000 <= occurrence < self.max/100: # uncommon\r\n return 1\r\n elif 0 < occurrence < self.max/1000: # rare\r\n return 2", "title": "" }, { "docid": "048e6c7db7c4d184c0e8378dbaccd291", "score": "0.5353218", "text": "def vector_space(self, query_term, doc):\n # Frequency of term in the document (document term frequency - dtf)\n fik = doc.get_dtf()\n # Number of documents in the collection\n N = self.inverted_index.get_total_docs()\n # Number of documents containing the term (document frequency - df)\n nk = self.inverted_index.get_df(query_term)\n \n score = 0\n if fik:\n score = (math.log(fik) + 1) * math.log(N / nk)\n return score", "title": "" }, { "docid": "3e070c1ff4e7e37c6f5ae785447991c2", "score": "0.535267", "text": "def doc2model_smooth(doc, smooth_term, vocab):\n counts = Counter(doc)\n for term in vocab:\n counts[term] = (counts[term] + smooth_term) / (1. * len(doc) + smooth_term * len(vocab))\n return counts", "title": "" }, { "docid": "69e41f15588c3dceb4502fca53f5500b", "score": "0.53482276", "text": "def counter_word_in_me(self,word):\n\t\treturn self.words[word].docnum_in_cate if word in self.words else 0", "title": "" }, { "docid": "a371a8dcba9da1b11f25725cff0eb0be", "score": "0.5338591", "text": "def tfidf_log(term: str, document: dict, documents: list):\n tf = log_term_frequency(term, document)\n idf = inverse_document_frequency(term, documents)\n\n return tf*idf", "title": "" }, { "docid": "2cf82a638cdd1f64e903613e3db423ef", "score": "0.5337314", "text": "def doc2model(doc):\n counts = Counter(doc)\n for term in counts:\n counts[term] /= 1. * len(doc)\n return counts", "title": "" }, { "docid": "820934a2cf823a1271098eaf9e30e34c", "score": "0.53371984", "text": "def __calculate_rf(self, idsoal, tf:int, term:str, skor_huruf:str):\n pos = self.__docnum_repository.\\\n get_doc_num_pos_class(idsoal, term, skor_huruf)\n neg = self.__docnum_repository.\\\n get_doc_num_neg_class(idsoal, term, skor_huruf)\n \n rf = log(2 + (pos / max(1, neg)), 10)\n\n return rf", "title": "" }, { "docid": "cac6104b53477adb1a1fcc35ab444886", "score": "0.5333681", "text": "def idf(corpus):\n n_docs = len(corpus)\n idf_dict = dict.fromkeys(corpus[0].keys(), 0)\n\n for bag_word in corpus:\n for word, count in bag_word.items():\n if count > 0:\n idf_dict[word] += 1\n\n for word, count in idf_dict.items():\n if count > 0:\n idf_dict[word] = math.log(n_docs / float(count))\n else:\n idf_dict[word] = 0\n\n return idf_dict", "title": "" }, { "docid": "d81f03eac0b1b599273080979e0cf423", "score": "0.53275126", "text": "def score_one(self, sd):\r\n # Q.count(t) * tfn / (tfn + c) * log_2((N+1)/(C.count(t)+0.5) where tfn = D.count(t)*log_2(1 + avgdl/len(D))\r\n # print(\"avg_dl: {}\".format(sd.avg_dl))\r\n # print(\"num_docs: {}\".format(sd.num_docs))\r\n # print(\"total_terms: {}\".format(sd.total_terms))\r\n # print(\"query_length: {}\".format(sd.query_length))\r\n # print(\"t_id: {}\".format(sd.t_id))\r\n # print(\"query_term_weight: {}\".format(sd.query_term_weight))\r\n # print(\"doc_count: {}\".format(sd.doc_count))\r\n # print(\"corpus_term_count: {}\".format(sd.corpus_term_count))\r\n # print(\"d_id: {}\".format(sd.d_id))\r\n # print(\"doc_term_count: {}\".format(sd.doc_term_count))\r\n # print(\"doc_size: {}\".format(sd.doc_size))\r\n # print(\"doc_unique_terms: {}\".format(sd.doc_unique_terms))\r\n # print(\"================================================================================\")\r\n #return (self.param + sd.doc_term_count) / (self.param * sd.doc_unique_terms + sd.doc_size)\r\n\r\n c = self.param\r\n N = sd.num_docs\r\n tfn = sd.doc_term_count*math.log2(1 + sd.avg_dl/sd.doc_size)\r\n return sd.query_term_weight * tfn / (tfn + c) * math.log2((N+1)/(sd.corpus_term_count+0.5))", "title": "" }, { "docid": "d7d59a4ed6f580e24b52ff90a9b44597", "score": "0.53198946", "text": "def count_tfidf(self):\n\t\tif 'all_docs_num' not in dir(self):\n\t\t\tprint 'just set all_docs_num to class attr firstly'\n\t\t\treturn\n\t\tdocsvector={}\n\t\tsave_idf={}\n\t\tfor doc_id in self.docs:\n\t\t\tdocsvector[doc_id]=[]\n\t\t\tfor word in sorted(self.feature):\n\t\t\t\ttf=0 \n\t\t\t\tif (doc_id in self.words[word].rf) and (self.words_count[doc_id]): \n\t\t\t\t\ttf=self.words[word].rf[doc_id]\n\t\t\t\t\ttf/=self.words_count[doc_id]\n\t\t\t\tdf=self.words[word].docnum_in_cate+self.words[word].docnum_in_others\n\t\t\t\tidf=math.log(self.all_docs_num/df,10.0)\n\t\t\t\tsave_idf[word]=idf\n\t\t\t\ttfidf=tf*idf\n\t\t\t\tdocsvector[doc_id].append(tfidf)\n\t\treturn docsvector,save_idf", "title": "" }, { "docid": "1eed69c4d18982f8247bcdec249b4e8c", "score": "0.5296557", "text": "def get_term_dist(docs, word2id, lowercase=True):\n term_dist = np.zeros(len(word2id))\n for doc in docs:\n for word in doc:\n if lowercase:\n word = word.lower()\n if word in word2id:\n term_dist[word2id[word]] += 1\n\n # normalize absolute freqs to obtain a relative frequency term distribution\n term_dist /= np.sum(term_dist)\n if np.isnan(np.sum(term_dist)):\n # the sum is nan if docs only contains one document and that document\n # has no words in the vocabulary\n term_dist = np.zeros(len(word2id))\n return term_dist", "title": "" }, { "docid": "b8a569f30568615c5fa8011ae0688fce", "score": "0.5287967", "text": "def compute_idfs(documents):\n total_docs = len(documents.keys())\n # getting all the unique words in documents\n words = set([word for docs in documents.values() for word in docs])\n # getting idf for words using formula\n idf = {}\n for word in words:\n doc_count = 0\n for docs in documents:\n if word in documents[docs]:\n doc_count += 1\n idf[word] = math.log(total_docs / doc_count)\n return idf", "title": "" }, { "docid": "ebe8071083d9135be0914d01378b3339", "score": "0.52826416", "text": "def compute_normalized_idf(corpus):\n d = {}\n for document in tqdm(corpus):\n words_document = []\n try:\n for p in sent_tokenize(document):\n words_document.append(tokenize(p, [lower]))\n except TypeError:\n continue\n words_document = list(itertools.chain(*words_document))\n for w in words_document:\n d[w] = d[w]+1 if w in d else 1\n N = len(d.keys())\n return {w: math.log(N/d[w])/math.log(N) for w in d.keys()}", "title": "" }, { "docid": "f3759567f7256a52e33b99527ca8d298", "score": "0.52685946", "text": "def __calculate_ntf(self, idsoal, tf:int, term:str):\n max_tf = self.__ntfrf_repository.get_max_tf(idsoal, term)\n \n #if max_tf == 0: Tidak Mungkin MAX_TF 0 Jika ada TF\n # max_tf = 1\n\n ntf = tf / max(1, max_tf)\n return ntf", "title": "" }, { "docid": "19b3056c8b9250060a8d80506a6c52ea", "score": "0.52613556", "text": "def key_func(G):\n n, d = G\n return d[\"computed importance factor\"]", "title": "" }, { "docid": "2daecb6f39e5c4768954801a50777d94", "score": "0.52583647", "text": "def inverse_document_frequency(term: str, documents: list):\n number_of_documents = len(documents)\n df = document_frequency(term, documents)\n\n return log(number_of_documents) - log(df)", "title": "" }, { "docid": "c373ee7fee612107a1904343b223938b", "score": "0.52519584", "text": "def compute_idfs(documents):\n words = []\n for filename in documents:\n words = words + documents[filename]\n wordList = set(words)\n\n wordIDF = dict()\n nDoc = len(documents)\n for word in wordList:\n nAppear = 0\n for filename in documents:\n if word in documents[filename]:\n nAppear += 1\n wordIDF[word] = math.log(nDoc/nAppear)\n\n return wordIDF\n\n\n\n\n #raise NotImplementedError", "title": "" }, { "docid": "2773022acfe1beb00e4e81e0a0645d19", "score": "0.5249381", "text": "def get_tfidf_scores(self, query):\n length = {}\n scores = {}\n for id in self.ids:\n length[id] = 0\n scores[id] = 0\n\n for term in re.findall(r\"[\\w']+|[.,!?;]\", query.strip()):\n term = term.lower()\n if not term in self.document_frequencies:\n continue\n df = self.document_frequencies[term]\n wq = np.log(self.num_documents/df)\n for id in self.ids:\n document_dict = self.term_frequencies[id]\n if not term in document_dict:\n scores[id] += 0\n continue\n tf = document_dict[term]\n length[id] += tf**2\n wd = 1 + np.log(tf)\n scores[id] += wq * wd\n for id in self.ids:\n if length[id] == 0:\n continue\n scores[id] /= np.sqrt(length[id])\n\n return scores", "title": "" }, { "docid": "a94c659133dd8c21e8fcc8548626d3d1", "score": "0.5240084", "text": "def jelinek_mercer(self, query_term, doc):\n # Frequency of term in the document (document term frequency - dtf)\n fqiD = doc.get_dtf()\n # Length of the document\n dl = self.inverted_index.get_doc_length(doc.get_doc_id())\n # Frequency of term in the collection (collection term frequency - ctf)\n cqi = self.inverted_index.get_ctf(query_term)\n # Total length of all documents in the collection\n cl = self.inverted_index.get_collection_length()\n \n score = math.log(((1 - self.alphaD) * (fqiD / dl)) + (self.alphaD * (cqi / cl)))\n return score * self.query_terms.count(query_term)", "title": "" }, { "docid": "d636d28f74a111a6cb997e74b2e845f6", "score": "0.5229651", "text": "def no_of_documents_containing_a_word(self, query_word):\n if PROXIMITY.useCache:\n if query_word in BM25.no_of_docs_dict:\n return float(BM25.no_of_docs_dict[query_word])\n else:\n return 0\n else:\n if query_word in self.cache:\n return float(self.cache[query_word])\n else:\n no_of_documents_having_the_word = 0\n for para_id, ranked_word_dict in self.documents.items():\n if query_word in ranked_word_dict:\n no_of_documents_having_the_word += 1\n self.cache[query_word] = no_of_documents_having_the_word\n return float(no_of_documents_having_the_word)", "title": "" }, { "docid": "7168c69387fc787c36783669e23c67e5", "score": "0.52270603", "text": "def score(self, word, context):\n context = self.check_context(context)\n context_freqdist = self.ngrams[context]\n word_count = context_freqdist[word]\n context_count = context_freqdist.N()\n return (word_count + self.k) / \\\n (context_count + self.k_norm)", "title": "" }, { "docid": "21c5f8d2b68defa13eee488472bae88e", "score": "0.52200663", "text": "def compute_sentiment_for_tweet(tweet):\n text = polyglot.text.Text(tweet.lower(), hint_language_code='it')\n scores = [word.polarity for word in text.words if word.polarity != 0]\n return np.mean(scores) if scores else 0.0", "title": "" }, { "docid": "9d3401958a69e735567a05ff09171f57", "score": "0.5212027", "text": "def tf_idf(self, corpus):\n top200list = [(word, count) for count, word in corpus.top200]\n\n \n if corpus.type == \"subject\":\n word_count = self.subject_word_count\n else: \n word_count = self.body_word_count\n \n self.tf_idf_scorelist = []\n # print word_count\n for word, document_frequency in top200list:\n if word not in word_count:\n # If word does not appear in the message, tf-idf == 0\n self.tf_idf_scorelist.append([word, 0])\n else:\n # calculate the tf-idf score for the word, appending the pair (word, score) to the list\n tf_idf_score = word_count[word] * math.log10(corpus.length / float(document_frequency)) + 1.0/200\n self.tf_idf_scorelist.append([word, tf_idf_score])\n \n \n return self.tf_idf_scorelist", "title": "" }, { "docid": "23a54153fbba0f89e126596ded64aa86", "score": "0.52037126", "text": "def number_doc(self, document: list):\n #doc = [0] * self.item_number()\n for i in range(len(document)):\n if document[i] > 0:\n self.term_indoc[i] += 1\n #print(i, document[i])\n # return doc", "title": "" }, { "docid": "9527f60633735d3c6f56447d6af3074f", "score": "0.5198056", "text": "def compute_idfs(documents):\n idfs = dict()\n\n for document in documents:\n already_added = set()\n for word in documents[document]:\n if word not in idfs:\n idfs[word] = 1\n already_added.add(word)\n elif word not in already_added:\n idfs[word] += 1\n already_added.add(word)\n\n for word in idfs:\n idfs[word] = np.log( len(documents) / idfs[word] )\n\n return idfs", "title": "" }, { "docid": "a5bc4a76ecf34dc9cbcdb3d03acb7767", "score": "0.51957184", "text": "def _getCountForUnigram(self,word1):\n count=self.unigrams[(word1)]\n if count==0:\n count=0.001\n return count", "title": "" }, { "docid": "c15339eabb1d35644cd35efdac9d13e4", "score": "0.51779836", "text": "def findfreq(word):\n\n\tif word in mostcommon:\n\t\treturn mostcommon[word]\n\telse:\n\t\treturn 0", "title": "" }, { "docid": "e69067a5caacec71a03278ae8b987ae2", "score": "0.5170481", "text": "def idf(inverted_index: List, token: str, collection_size: int) -> float:\n term_frequence = len(inverted_index.get(token, []))\n if term_frequence is 0:\n return 0\n return math.log((collection_size+1) / term_frequence)", "title": "" }, { "docid": "e4aef0991b20ecb512b79c568277d6d9", "score": "0.51683074", "text": "def calcidf(df):\n docamount = df[1] - 1 # subtract\n docfreq = df[0]\n idf = dict() # create a new dictionary\n for term in docfreq: # for every entry in the df, calculate the idf\n frequency = docfreq.get(term)\n multiplication = docamount / frequency\n answer = mth.log2(multiplication)\n idf[term] = answer # save the idf in the dictionary for the current term\n return idf", "title": "" }, { "docid": "85b13c002cf9b0a943e30e0b203bbe07", "score": "0.5163134", "text": "def get_emmision(self, word, tag):\n if tag in ['*', 'STOP']:\n return 0.0\n new_word = self.replace_word(word)\n return self.word.get((tag, new_word), 0.0) / self.ngrams[1][tag]", "title": "" }, { "docid": "1f4716e3a6d2b4253747ce5bde849769", "score": "0.5157731", "text": "def tfidf_search(query, index_set):\n index = get_index(index_set) # [token] -> [(doc_id, token_count)]\n df = get_df(index_set) # {token: doc_freq}\n processed_query = preprocess_query(query, index_set)\n # YOUR CODE HERE\n\n # Normalize index?\n relevant_documents = []\n total_documents = 3204 # Dynamicly?\n \n for word in processed_query:\n for term_frequencies in index[word]:\n doc_id = term_frequencies[1]\n tf = term_frequencies[1]\n idf = math.log(total_documents/df[word]) # IDF = log(total_documents/df)\n tf_idf = tf*idf\n if not any([e[0] == doc_id for e in relevant_documents]) : # Document not encountered before\n relevant_documents.append([doc_id, tf_idf])\n else: # Document encountered. Add tf-idf score to previous score\n for document in relevant_documents:\n if document[0] == doc_id:\n document[1] += tf_idf\n break\n # Sort in descending order according to score\n relevant_documents = sorted(relevant_documents,key=lambda x: (x[1]))\n return relevant_documents", "title": "" }, { "docid": "62ef62e74acd25ee51d17ae5271bb00d", "score": "0.5125886", "text": "def idf(self, book_num, token):\n return np.log((self.sentence_id + 1) / (self.df(book_num, token) + 1))", "title": "" }, { "docid": "8310f28fea6a5c72c7ee004854250751", "score": "0.51238996", "text": "def tf_idf_score(tokens):\n for token in tokens.keys():\n df = len(tokens[token])\n idf = math.log(TOTAL_UNIQUE_DOC/df)\n for posting in tokens[token]:\n tf = posting[1]\n tf_idf = (1 + math.log(tf)) * idf\n posting[1] = tf_idf\n \n return tokens", "title": "" }, { "docid": "183d68bf522d93cf0b91a779d2ef0570", "score": "0.51199824", "text": "def compute_idfs(documents):\n idfs = {} # Initialise a dictionary of idfs\n for doc in documents:\n for word in documents[doc]:\n if word in idfs: # Word already calculated\n continue\n word_occurences = 1\n for doc2 in documents:\n if doc2 == doc: # Skip word iteration we are looking at\n continue\n if word in documents[doc2]:\n word_occurences += 1\n idfs[word] = math.log(len(documents) / word_occurences)\n return idfs", "title": "" } ]
ae240ac6cc2863ef66089a55dc3eb4ed
This method is queue scheduler and executer \n
[ { "docid": "b9240612058aa55653c74834c388d516", "score": "0.0", "text": "def lcfs_alg(self, process_waiting):\n cpu_clock = 0\n local_finished_queue = []\n while len(process_waiting) is not 0:\n proc = process_waiting[-1]\n cpu_clock += 1\n\n if proc.exec_start is None:\n proc.exec_start = cpu_clock\n\n proc.remain -= 1\n\n if proc.remain is 0:\n proc.exec_stop = cpu_clock\n local_finished_queue.append(proc)\n del process_waiting[-1]\n self.finished_queue.append(local_finished_queue)", "title": "" } ]
[ { "docid": "ee659d2f4dda0e351f0bd89aaa2c9951", "score": "0.6921316", "text": "def processQueue(self):", "title": "" }, { "docid": "09982e5cbe4c7df30fe4ae28f60d7e1f", "score": "0.6735559", "text": "def scheduler(self):\n j = self.updater.job_queue\n print('getting daily queue')\n for hour in range(24):\n j.run_daily(\n self.reminder, \n days=(0, 1, 2, 3, 4, 5, 6), \n time=datetime.time(hour=hour, minute=0, second=0))", "title": "" }, { "docid": "bf97de4d4a319c49c9bf54fa22625dc4", "score": "0.6597712", "text": "def submit_task(d, queued_engine):", "title": "" }, { "docid": "23e1185fb8d5270a667d4e2d3257f59e", "score": "0.65356296", "text": "def schedule(self) -> TaskSchedulerResult:", "title": "" }, { "docid": "39d6c7f5c1bc044182218688a4d86895", "score": "0.64829427", "text": "async def _scheduler(self) -> None:\n while True:\n results = await self.broker.process_schedule()\n logger.info(\"poll-schedule\", executor=self.executor_id, results=results)\n await anyio.sleep(self.settings.schedule_interval)", "title": "" }, { "docid": "9f1871234406722881c04af1ec3fb7aa", "score": "0.64672405", "text": "def QueueAction(self):\r\n pass", "title": "" }, { "docid": "248113746a5f9e7b8974a9405b25fadd", "score": "0.64446646", "text": "def submit_to_queue(self, script_file):", "title": "" }, { "docid": "8614344a5e5d515b7cc39eff0c3a0201", "score": "0.63791", "text": "def postQueueSetup(self):", "title": "" }, { "docid": "001f189f2d2be78455aa44477059109e", "score": "0.63419634", "text": "def schedule():", "title": "" }, { "docid": "001f189f2d2be78455aa44477059109e", "score": "0.63419634", "text": "def schedule():", "title": "" }, { "docid": "fb9ce3e8b6a59bb826188d077a4f8427", "score": "0.6271516", "text": "def scheduling_step(self):\n self.qNetwork.scheduler.step()", "title": "" }, { "docid": "72fbb8bbe0b7777160730aa42e485ff0", "score": "0.6236307", "text": "def pre_task(d, queued_engine):", "title": "" }, { "docid": "d177ee87680f989087422d21a082a222", "score": "0.6169855", "text": "def q_handler(self, queue):\n pass", "title": "" }, { "docid": "5721e445d2d0d490b274d7a059fba8fa", "score": "0.61419314", "text": "def execute(obj, queue, restaurant):\n mean_a = 1000\n std_a = 200\n queue._queue.append(obj)\n if obj.q_type == 2:\n restaurant.groups.append(obj)\n restaurant.next_appearance_time = restaurant.simulation_time + abs(int(np.random.normal(mean_a, std_a)))\n obj.appearance_time = restaurant.simulation_time", "title": "" }, { "docid": "0155284ce73c99d5d0f469d3b8e58334", "score": "0.61335665", "text": "def execute(self):\n self.queue_command('execute')", "title": "" }, { "docid": "bbb39cde6a4e2601ecbeebc511beffc6", "score": "0.61177796", "text": "def q_handler(queue):\n pass", "title": "" }, { "docid": "064201ebbc9fd7b319e41af363cb1595", "score": "0.60882354", "text": "def runTask(self):", "title": "" }, { "docid": "8cbdbd93f2c1fdddea5994cba197e3d4", "score": "0.6071057", "text": "def publish_schedule(self):\n while not rospy.is_shutdown():\n # all encompassing try/catch to make sure this loop does not go down\n try:\n\n # copy all relevant entries under lock \n # we're taking a deepcopy as we might mess around with the times a bit\n with self.state_lock:\n expected_completion_time = deepcopy(self.expected_completion_time)\n active_batch = deepcopy(self.active_batch)\n normal_tasks = deepcopy(self.normal_tasks)\n time_critical_tasks = deepcopy(self.time_critical_tasks)\n\n now = rospy.get_rostime()\n # todo: fill this value better\n expected_end_of_batch = rospy.get_rostime() + rospy.Duration(120)\n\n # start from the time_cr\n schedule = ExecutionStatus(currently_executing = len(active_batch) > 0)\n all_tasks = ExecutionStatus(currently_executing = len(active_batch) > 0)\n\n schedule.header.stamp = now\n all_tasks.header.stamp = now\n\n for m in active_batch:\n m.task.execution_time = now\n schedule.execution_queue.append(m.task)\n all_tasks.execution_queue.append(m.task)\n\n\n # schedule.execution_queue += [m.task for m in time_critical_tasks]\n all_tasks.execution_queue += [m.task for m in time_critical_tasks]\n\n all_tasks.execution_queue += [m.task for m in normal_tasks] \n all_tasks.execution_queue = sorted(all_tasks.execution_queue, key=lambda x: x.start_after) \n all_tasks.execution_queue = sorted(all_tasks.execution_queue, key=lambda x: x.priority) \n\n\n self.schedule_publisher.publish(schedule) \n self.all_tasks_schedule_publisher.publish(all_tasks)\n\n self.update_schedule_condition.acquire()\n self.update_schedule_condition.wait()\n self.update_schedule_condition.release()\n except Exception, e:\n rospy.logwarn('Caught exception in publish_schedule loop: %s' % e)\n rospy.sleep(1)", "title": "" }, { "docid": "38fc6dd831a44eb5074059472f95b7c7", "score": "0.6069418", "text": "def post_task(d, queued_engine):", "title": "" }, { "docid": "875d8cfcb4347b491c2a6203b931d6f8", "score": "0.60506755", "text": "def boss():\n\n for i in xrange(1,10):\n task_queue.put(i)\n print('Assigned all work in iteration 1')\n\n for i in xrange(10,20):\n task_queue.put(i)\n print('Assigned all work in iteration 2')", "title": "" }, { "docid": "5b34c60faf976bca2db206ef0b3b9e4e", "score": "0.603343", "text": "def work_queue(q):\n global agent_data\n global listing_data\n global office_data\n if(agent_data):\n q.put(agent_data)\n time.sleep(0.1)\n if(listing_data):\n q.put(listing_data)\n time.sleep(0.1)\n if(office_data):\n q.put(office_data)\n time.sleep(0.1)\n q.join()", "title": "" }, { "docid": "6b118992b9444c8e05303ad1bf98525d", "score": "0.6030745", "text": "def worker(name, queue, results):\n pass", "title": "" }, { "docid": "0c7fc06973f069f1ba4b0b84046c4e92", "score": "0.602613", "text": "def __init__(self):\r\n self.queue = []", "title": "" }, { "docid": "e089a5135da4ce04f50dcc5a56ba7f97", "score": "0.6015598", "text": "def _queueDispatchNormal(self, nextEvent, queue=True, countdown=0, retryOptions=None, queueName=None):\r\n assert nextEvent is not None\r\n assert queueName\r\n \r\n url = self.buildUrl(self.currentState, nextEvent)\r\n params = self.buildParams(self.currentState, nextEvent)\r\n taskName = self.getTaskName(nextEvent)\r\n \r\n task = Task(name=taskName, method=self.method, url=url, params=params, countdown=countdown,\r\n retry_options=retryOptions, headers=self.headers)\r\n if queue:\r\n self.Queue(name=queueName).add(task)\r\n \r\n return task", "title": "" }, { "docid": "e089a5135da4ce04f50dcc5a56ba7f97", "score": "0.6015598", "text": "def _queueDispatchNormal(self, nextEvent, queue=True, countdown=0, retryOptions=None, queueName=None):\r\n assert nextEvent is not None\r\n assert queueName\r\n \r\n url = self.buildUrl(self.currentState, nextEvent)\r\n params = self.buildParams(self.currentState, nextEvent)\r\n taskName = self.getTaskName(nextEvent)\r\n \r\n task = Task(name=taskName, method=self.method, url=url, params=params, countdown=countdown,\r\n retry_options=retryOptions, headers=self.headers)\r\n if queue:\r\n self.Queue(name=queueName).add(task)\r\n \r\n return task", "title": "" }, { "docid": "9165fb63c6af7adf8ccaa633cfde4dcc", "score": "0.60065794", "text": "def create_executor():", "title": "" }, { "docid": "931a6cef09618f687c54fd50f221853d", "score": "0.6002376", "text": "def runMovementExecutor(ctrlQueue):\n ctrlQueue.processMovementAsync()", "title": "" }, { "docid": "fc5981bd695bd8d18f8d894cd71e5336", "score": "0.59982544", "text": "def __init__(self):\n self.queue = []", "title": "" }, { "docid": "fc5981bd695bd8d18f8d894cd71e5336", "score": "0.59982544", "text": "def __init__(self):\n self.queue = []", "title": "" }, { "docid": "fc5981bd695bd8d18f8d894cd71e5336", "score": "0.59982544", "text": "def __init__(self):\n self.queue = []", "title": "" }, { "docid": "dde6721867ea81c6b064153479d33d58", "score": "0.59972245", "text": "def submit(self, func=_ping, *args):\n self.queue.put((func, args))", "title": "" }, { "docid": "87ec4ff646818ad7d4040082c3d00252", "score": "0.5976356", "text": "def __init__(self):\n\n self.queue = []", "title": "" }, { "docid": "65a827e4934eb428c0fde4b7914992d7", "score": "0.5974972", "text": "def start_tasks(automatic_tasks):\n scheduler = AsyncIOScheduler()\n print('Started Chron Monitors')\n # scheduler.add_job(automatic_tasks.block_monitor,CronTrigger(hour='02',second='02'), misfire_grace_time=2,\n # max_instances=20)\n #\n # scheduler.add_job(automatic_tasks.send_dpops_stats,\n # CronTrigger(minute='00'), misfire_grace_time=2, max_instances=20)\n #\n # scheduler.add_job(automatic_tasks.delegate_ranks, CronTrigger(hour='02', second='02'), misfire_grace_time=2,\n # max_instances=20)\n scheduler.add_job(automatic_tasks.delegate_last_block_check,\n CronTrigger(minute='02,04,06,08,10,12,14,16,18,20,22,24,26,'\n '28,30,32,34,36,38,40,42,44,46,48,50,52,54,56,58'), misfire_grace_time=2,\n max_instances=20)\n\n\n\n scheduler.start()\n print('Started Chron Monitors : DONE')\n return scheduler", "title": "" }, { "docid": "aa413d93a525947e668840285f42e92d", "score": "0.59666026", "text": "def run(self):\n while self.frame_number <= self.max_frame_number or not self.run_queue.empty():\n \n # get a frame when frame period arrives\n if self.time % self.frame_period == 0:\n self.frame_arrival(self.frame_number)\n self.frame_number = self.frame_number + 1\n\n # if there are tasks in the run queue\n if not self.run_queue.empty():\n top_task = self.run_queue.queue[0]\n top_task.remain_time = top_task.remain_time - 1\n\n # if the task has finished\n if top_task.remain_time == 0:\n task = self.run_queue.get()\n self.task_finish_count = self.task_finish_count + 1\n task.order = self.task_finish_count\n # add 1 because self.time start from 0\n task.response_time = self.time - task.enqueue_time + 1\n if task.response_time > task.deadline:\n task.missed = 1\n\n self.history.append(task)\n # you can comment out the following line to skip yolo for shorter program runtime\n #self.run_yolo(task)\n \n self.time = self.time + 1\n \n # save scheduling history to file\n self.save_history()\n print(\"Scheduling history saved.\")", "title": "" }, { "docid": "c7efaa6ddd630ba65c6ead98eef8f7ed", "score": "0.5962126", "text": "def run(self):\n try:\n quit_request_detected = False\n while True:\n aFunction, arguments = self.manager.task_queue.get()\n if aFunction is None:\n break\n if quit_request_detected:\n continue\n try:\n try:\n args, kwargs = arguments\n except ValueError:\n args = arguments\n kwargs = {}\n aFunction(*args, **kwargs) # execute the task\n except Exception:\n self.config.logger.error(\"Error in processing a job\",\n exc_info=True)\n except KeyboardInterrupt:\n self.config.logger.info('quit request detected')\n quit_request_detected = True\n #thread.interrupt_main() # only needed if signal handler\n # not registerd\n except Exception:\n self.config.logger.critical(\"Failure in task_queue\", exc_info=True)", "title": "" }, { "docid": "22d1414bf9d5982b58b1f244b0c65774", "score": "0.5945777", "text": "def put( self ):\r\n BUY_QUEUE_DB.createQueue( self )", "title": "" }, { "docid": "6c41ee7cfb6588e82aa8a85961bb28b4", "score": "0.59050715", "text": "def execute(self, evq):\n\n\t\t# can't send if it's busy sending - and doesn't have a unit available for all transmissions\n\t\tbw = int(self.node.available_bandwidth()/self.concurr)\n\t\tif bw < cfg.NODE_BANDWIDTH_UNIT:\n\t\t\treturn None\n\t\t# then pop the last task we got\n\t\tt = self.node.pop_task_to_send(self.k, self.time)\n\t\t# if it's an invalid choice return without sending out the task\n\t\tif t == None:\n\t\t\treturn None\n\t\t# else plan the landing\n\t\tarrival_time = self.time+ task_communication_time(t.packet_size, point_to_point_transmission_rate(self.node._distances[self.destination.index], bw))\n\t\tevq.add_event(Task_arrival(arrival_time, self.destination, self.k, t))\n\t\tevq.add_event(Start_transmitting(self.time, self.node, arrival_time, bw))\n\t\treturn None", "title": "" }, { "docid": "5dc3a7255e77ee0821f078f00fcd92d7", "score": "0.5901182", "text": "def callback_task_scheduler_creator(queue_task_scheduler, task_supervisor):\n global entity_task_supervisor_info\n global active_task_schedulers\n logger.info(\"SchedulerDealer started\")\n\n while True:\n task_scheduler = None\n # wait for a new materialflow\n materialflow_specification = queue_task_scheduler.get()\n\n # create new task shedular\n task_scheduler = Scheduler(materialflow_specification, task_supervisor)\n\n active_task_schedulers.append(task_scheduler)\n task_scheduler.start()\n\n entity_task_supervisor_info.appendMaterialflow(\n materialflow_specification.id)\n\n task_supervisor.orion_connector.update_entity(\n entity_task_supervisor_info)\n logger.info(\"New TaskScheduler added\")\n\n logger.info(\"SchedulerDealer ended\")", "title": "" }, { "docid": "32245beee93a6556c958388fefddeb79", "score": "0.5898572", "text": "def worker_thread( self ):\n\n # rand = random.Random()\n \n #while self.running:\n # # To simulate asynchronous I/O, we create a random number at\n # # random intervals. Replace the following 2 lines with the real\n # # thing.\n # time.sleep( rand.random() * 1 )\n # msg = rand.random()\n # self.queue.put(msg)", "title": "" }, { "docid": "cbcfc85165804db4bd663142c9f82f60", "score": "0.5887806", "text": "def create_queue():\n return True", "title": "" }, { "docid": "c71da4fb04df7990abf1afd84f95e621", "score": "0.5882573", "text": "def handle_queue(self):\n if not self.queue:\n return\n calculator = self.get_free_calculator()\n if calculator is not None:\n task = self.queue.pop(0) # tuple: (data, client_addr)\n print 'Task from queue'\n self.handle_client_request(*task)", "title": "" }, { "docid": "4a62e16f3cb28ae4221cf1ca49886ef8", "score": "0.58702046", "text": "def __call__(self, *args, **kwargs):\n self.schedule(*args, **kwargs)", "title": "" }, { "docid": "26860fb43d63571fc92461e544732e1a", "score": "0.58692646", "text": "def handle_output(self, queue):", "title": "" }, { "docid": "03a30ef670c6ed8fc43d7558bd9bf55b", "score": "0.5823347", "text": "def run(self):\n\n # Startup\n # ------------------------------------------------------------------\n if(self.first_execution):\n\n # log\n self.logger.debug('Thread first execution')\n\n # setup_timer\n self.setup_timer.emit()\n\n # first_execution\n self.first_execution = False\n\n # Code\n # ------------------------------------------------------------------\n else:\n\n try:\n obj = self.queue.get(block=False)\n except Queue.Empty:\n self.logger.debug('Queue empty')\n return\n\n try:\n obj()\n except Exception, e:\n self.logger.debug('{0}'.format(e))\n finally:\n # notify queue\n self.queue.task_done()", "title": "" }, { "docid": "2b14f6ebce1518b0993d07e19d4c2b25", "score": "0.5823279", "text": "def process(self):\n self.out.idle=True\n if self.task.finished:\n self.sim.schedule(TaskFinishedEvent(self.sim, self.task, self.time))\n else:\n self.sim.add_task_to_rr_queue(self.task)\n \n \n task=self.sim.get_task_from_rr_queue() \n task.wait_out=task.wait_out+(self.time-task.outtime)\n \n# self.sim.schedule(OutInEvent(self.sim, task, self.time, self.out))\n self.sim.schedule(TaskOutArrivalEvent(self.sim, task, self.time))", "title": "" }, { "docid": "3392e738f705ae414a03887f3d19d98a", "score": "0.58100414", "text": "def execute(self):\n logging.info(\"Executing Task with ID: \" +str(self.get_id() ) + \" and Name: \"+ self.get_name() )", "title": "" }, { "docid": "b0306046c69aaf5e470b4c9533005e2d", "score": "0.5802936", "text": "def schedule_run(self, schedule):\n\n if len(schedule.jobs): # no need to start thread if no jobs in queue\n t = threading.Thread(target=self.scheduler_thread)\n # print('Starting scheduler thread')\n t.daemon = True # allows this thread to be auto-killed on program exit\n t.name = 'Scheduler Thread' # naming the thread helps with debugging\n t.start()", "title": "" }, { "docid": "90db4594bf0930bb251744ec1e4107cb", "score": "0.5789738", "text": "def sort_queue(self, queue, chart):\n ...", "title": "" }, { "docid": "90db4594bf0930bb251744ec1e4107cb", "score": "0.5789738", "text": "def sort_queue(self, queue, chart):\n ...", "title": "" }, { "docid": "289784cde6c55a3047bfe2c5249ec8fe", "score": "0.5789452", "text": "def process(self):\n self.out.work(self.task, self.time)", "title": "" }, { "docid": "fc487244608e8a94ba3fd3938c4f272e", "score": "0.5786862", "text": "def task(self):\n self._command()", "title": "" }, { "docid": "c78660fbebac34809345bc05ab6f3507", "score": "0.57828987", "text": "def __init__(self):\n\n self.queue = None", "title": "" }, { "docid": "8132b470834cdd118bd19824c5f071e4", "score": "0.5778886", "text": "def start_worker(self):", "title": "" }, { "docid": "9bc9072fca2436f3a6180285bc49096b", "score": "0.57753813", "text": "def test_enqueue_scheduled_jobs(self):\n queue = Queue(connection=self.testconn)\n registry = ScheduledJobRegistry(queue=queue)\n job = Job.create('myfunc', connection=self.testconn)\n job.save()\n registry.schedule(job, datetime(2019, 1, 1, tzinfo=timezone.utc))\n scheduler = RQScheduler([queue], connection=self.testconn)\n scheduler.acquire_locks()\n scheduler.enqueue_scheduled_jobs()\n self.assertEqual(len(queue), 1)\n\n # After job is scheduled, registry should be empty\n self.assertEqual(len(registry), 0)\n\n # Jobs scheduled in the far future should not be affected\n registry.schedule(job, datetime(2100, 1, 1, tzinfo=timezone.utc))\n scheduler.enqueue_scheduled_jobs()\n self.assertEqual(len(queue), 1)", "title": "" }, { "docid": "2aa461c2a59099b28f22645538a51645", "score": "0.5753001", "text": "def wthdMain(self):\r\n while self.running:\r\n time.sleep(1.0)\r\n self.queue.put(\"main\")", "title": "" }, { "docid": "ef597efc0c18387906e9a5714d8a73c7", "score": "0.57470465", "text": "def queue(self):\n self._queue = True\n return self", "title": "" }, { "docid": "33cdde67ec2238bee286e957570d738d", "score": "0.57407373", "text": "def put_job(self,name,func):\n self._qlock.acquire()\n self._q.append((name,func))\n self._qlock.release()", "title": "" }, { "docid": "59881637e3876c247e50528a084c1dc4", "score": "0.5737807", "text": "def schedule(self):\n if not self.schedule_expression:\n return\n self.__create_cw_log_groups()\n self.register_task_definition()\n self.scheduler.schedule()\n click.secho(f\"Scheduled task {self.taskName}\", fg=\"cyan\")", "title": "" }, { "docid": "cf358ce4420190e26bca11676b52cf16", "score": "0.5737732", "text": "def _apply_queue(self, args, thisTask, cmd_args, payload, setup):\n if not (thisTask.queue is None or thisTask.queue == \"\"):\n cmd_args.append(\"-p\") # partition\n cmd_args.append(thisTask.queue)\n return True", "title": "" }, { "docid": "6f534421e7cf609a3b2e736fd7228fe9", "score": "0.57371837", "text": "def runTest(self):\n sys_logging(\"start test\")\n switch_init(self.client)\n\n port = port_list[1]\n level = [0,1,2]\n max_childs = [4, 64, 8]\n sched_group_id_group_node = [None]*8\n sched_group_service_id = 1\n service_queueId = [None]*8\n\n\n sched_type1 = SAI_SCHEDULING_TYPE_STRICT\n sched_type2 = SAI_SCHEDULING_TYPE_DWRR\n sched_weight = 10\n cir = 2000000\n cbs = 256000\n pir = 1000000\n pbs = 64000\n queueId_list = []\n\n sched_oid1 = sai_thrift_qos_create_scheduler_profile(self.client, sched_type1, 0, cir, cbs, pir, pbs)\n sys_logging(\"sched_oid_1 0x%x\"%sched_oid1)\n\n sched_oid2 = sai_thrift_qos_create_scheduler_profile(self.client, sched_type2, sched_weight, cir, cbs, pir, pbs)\n sys_logging(\"sched_oid_1 0x%x\"%sched_oid2)\n sched_oid_list = [sched_oid1, sched_oid2]\n\n \n sched_group_id_root = sai_thrift_qos_create_scheduler_group(self.client, port, level[0], max_childs[0], port, 0)\n \n sched_group_id_chan_node = sai_thrift_qos_create_scheduler_group(self.client, port, level[1], max_childs[1], sched_group_id_root, 0)\n \n sched_group_id_group_node[0] = sai_thrift_qos_create_scheduler_group(self.client, port, level[2], max_childs[2], sched_group_id_chan_node, 0, service_id=sched_group_service_id) \n sched_group_id_group_node[1] = sai_thrift_qos_create_scheduler_group(self.client, port, level[2], max_childs[2], sched_group_id_chan_node, 0, service_id=sched_group_service_id)\n sched_group_id_group_node[2] = sai_thrift_qos_create_scheduler_group(self.client, port, level[2], max_childs[2], sched_group_id_chan_node, 0, service_id=sched_group_service_id)\n sched_group_id_group_node[3] = sai_thrift_qos_create_scheduler_group(self.client, port, level[2], max_childs[2], sched_group_id_chan_node, 0, service_id=sched_group_service_id)\n sched_group_id_group_node[4] = sai_thrift_qos_create_scheduler_group(self.client, port, level[2], max_childs[2], sched_group_id_chan_node, 0, service_id=sched_group_service_id) \n sched_group_id_group_node[5] = sai_thrift_qos_create_scheduler_group(self.client, port, level[2], max_childs[2], sched_group_id_chan_node, 0, service_id=sched_group_service_id)\n sched_group_id_group_node[6] = sai_thrift_qos_create_scheduler_group(self.client, port, level[2], max_childs[2], sched_group_id_chan_node, 0, service_id=sched_group_service_id)\n sched_group_id_group_node[7] = sai_thrift_qos_create_scheduler_group(self.client, port, level[2], max_childs[2], sched_group_id_chan_node, 0, service_id=sched_group_service_id)\n\n \n queue_type = SAI_QUEUE_TYPE_SERVICE\n queue_index = [0,1,2,3,4,5,6,7]\n service_queueId[0] = sai_thrift_create_queue_id(self.client, queue_type, port, queue_index[0], parent_id=sched_group_id_group_node[0], service_id=sched_group_service_id, sche_id=sched_oid1) \n service_queueId[1] = sai_thrift_create_queue_id(self.client, queue_type, port, queue_index[1], parent_id=sched_group_id_group_node[1], service_id=sched_group_service_id, sche_id=sched_oid2)\n service_queueId[2] = sai_thrift_create_queue_id(self.client, queue_type, port, queue_index[2], parent_id=sched_group_id_group_node[1], service_id=sched_group_service_id, sche_id=sched_oid2)\n service_queueId[3] = sai_thrift_create_queue_id(self.client, queue_type, port, queue_index[3], parent_id=sched_group_id_group_node[3], service_id=sched_group_service_id, sche_id=sched_oid1)\n service_queueId[4] = sai_thrift_create_queue_id(self.client, queue_type, port, queue_index[4], parent_id=sched_group_id_group_node[4], service_id=sched_group_service_id)\n service_queueId[5] = sai_thrift_create_queue_id(self.client, queue_type, port, queue_index[5], parent_id=sched_group_id_group_node[4], service_id=sched_group_service_id)\n service_queueId[6] = sai_thrift_create_queue_id(self.client, queue_type, port, queue_index[6], parent_id=sched_group_id_group_node[4], service_id=sched_group_service_id)\n service_queueId[7] = sai_thrift_create_queue_id(self.client, queue_type, port, queue_index[7], parent_id=sched_group_id_group_node[7], service_id=sched_group_service_id)\n #pdb.set_trace()\n\n warmboot(self.client)\n try:\n attrs = self.client.sai_thrift_get_queue_attribute(service_queueId[0])\n assert(SAI_STATUS_SUCCESS == attrs.status)\n for a in attrs.attr_list:\n if a.id == SAI_QUEUE_ATTR_SCHEDULER_PROFILE_ID:\n sys_logging(\"get queue scheduler profile:0x%x\"%a.value.oid)\n if a.value.oid != sched_oid1:\n raise NotImplementedError() \n attrs = self.client.sai_thrift_get_queue_attribute(service_queueId[1])\n assert(SAI_STATUS_SUCCESS == attrs.status)\n for a in attrs.attr_list:\n if a.id == SAI_QUEUE_ATTR_SCHEDULER_PROFILE_ID:\n sys_logging(\"get queue scheduler profile:0x%x\"%a.value.oid)\n if a.value.oid != sched_oid2:\n raise NotImplementedError()\n attrs = self.client.sai_thrift_get_queue_attribute(service_queueId[2])\n assert(SAI_STATUS_SUCCESS == attrs.status)\n for a in attrs.attr_list:\n if a.id == SAI_QUEUE_ATTR_SCHEDULER_PROFILE_ID:\n sys_logging(\"get queue scheduler profile:0x%x\"%a.value.oid)\n if a.value.oid != sched_oid2:\n raise NotImplementedError() \n attrs = self.client.sai_thrift_get_queue_attribute(service_queueId[3])\n assert(SAI_STATUS_SUCCESS == attrs.status)\n for a in attrs.attr_list:\n if a.id == SAI_QUEUE_ATTR_SCHEDULER_PROFILE_ID:\n sys_logging(\"get queue scheduler profile:0x%x\"%a.value.oid)\n if a.value.oid != sched_oid1:\n raise NotImplementedError()\n attrs = self.client.sai_thrift_get_queue_attribute(service_queueId[4])\n assert(SAI_STATUS_SUCCESS == attrs.status)\n for a in attrs.attr_list:\n if a.id == SAI_QUEUE_ATTR_SCHEDULER_PROFILE_ID:\n sys_logging(\"get queue scheduler profile:0x%x\"%a.value.oid)\n if a.value.oid != SAI_NULL_OBJECT_ID:\n raise NotImplementedError() \n attrs = self.client.sai_thrift_get_queue_attribute(service_queueId[5])\n assert(SAI_STATUS_SUCCESS == attrs.status)\n for a in attrs.attr_list:\n if a.id == SAI_QUEUE_ATTR_SCHEDULER_PROFILE_ID:\n sys_logging(\"get queue scheduler profile:0x%x\"%a.value.oid)\n if a.value.oid != SAI_NULL_OBJECT_ID:\n raise NotImplementedError()\n attrs = self.client.sai_thrift_get_queue_attribute(service_queueId[6])\n assert(SAI_STATUS_SUCCESS == attrs.status)\n for a in attrs.attr_list:\n if a.id == SAI_QUEUE_ATTR_SCHEDULER_PROFILE_ID:\n sys_logging(\"get queue scheduler profile:0x%x\"%a.value.oid)\n if a.value.oid != SAI_NULL_OBJECT_ID:\n raise NotImplementedError() \n attrs = self.client.sai_thrift_get_queue_attribute(service_queueId[7])\n assert(SAI_STATUS_SUCCESS == attrs.status)\n for a in attrs.attr_list:\n if a.id == SAI_QUEUE_ATTR_SCHEDULER_PROFILE_ID:\n sys_logging(\"get queue scheduler profile:0x%x\"%a.value.oid)\n if a.value.oid != SAI_NULL_OBJECT_ID:\n raise NotImplementedError()\n\n\n attr_value = sai_thrift_attribute_value_t(oid=sched_oid2)\n attr = sai_thrift_attribute_t(id=SAI_QUEUE_ATTR_SCHEDULER_PROFILE_ID, value=attr_value)\n self.client.sai_thrift_set_queue_attribute(service_queueId[4], attr)\n \n self.client.sai_thrift_set_queue_attribute(service_queueId[5], attr)\n self.client.sai_thrift_set_queue_attribute(service_queueId[6], attr)\n\n attr_value = sai_thrift_attribute_value_t(oid=sched_oid1)\n attr = sai_thrift_attribute_t(id=SAI_QUEUE_ATTR_SCHEDULER_PROFILE_ID, value=attr_value)\n self.client.sai_thrift_set_queue_attribute(service_queueId[7], attr)\n \n \n\n\n attrs = self.client.sai_thrift_get_queue_attribute(service_queueId[0])\n assert(SAI_STATUS_SUCCESS == attrs.status)\n for a in attrs.attr_list:\n if a.id == SAI_QUEUE_ATTR_SCHEDULER_PROFILE_ID:\n sys_logging(\"get queue scheduler profile:0x%x\"%a.value.oid)\n if a.value.oid != sched_oid1:\n raise NotImplementedError() \n attrs = self.client.sai_thrift_get_queue_attribute(service_queueId[1])\n assert(SAI_STATUS_SUCCESS == attrs.status)\n for a in attrs.attr_list:\n if a.id == SAI_QUEUE_ATTR_SCHEDULER_PROFILE_ID:\n sys_logging(\"get queue scheduler profile:0x%x\"%a.value.oid)\n if a.value.oid != sched_oid2:\n raise NotImplementedError()\n attrs = self.client.sai_thrift_get_queue_attribute(service_queueId[2])\n assert(SAI_STATUS_SUCCESS == attrs.status)\n for a in attrs.attr_list:\n if a.id == SAI_QUEUE_ATTR_SCHEDULER_PROFILE_ID:\n sys_logging(\"get queue scheduler profile:0x%x\"%a.value.oid)\n if a.value.oid != sched_oid2:\n raise NotImplementedError() \n attrs = self.client.sai_thrift_get_queue_attribute(service_queueId[3])\n assert(SAI_STATUS_SUCCESS == attrs.status)\n for a in attrs.attr_list:\n if a.id == SAI_QUEUE_ATTR_SCHEDULER_PROFILE_ID:\n sys_logging(\"get queue scheduler profile:0x%x\"%a.value.oid)\n if a.value.oid != sched_oid1:\n raise NotImplementedError()\n attrs = self.client.sai_thrift_get_queue_attribute(service_queueId[4])\n assert(SAI_STATUS_SUCCESS == attrs.status)\n for a in attrs.attr_list:\n if a.id == SAI_QUEUE_ATTR_SCHEDULER_PROFILE_ID:\n sys_logging(\"get queue scheduler profile:0x%x\"%a.value.oid)\n if a.value.oid != sched_oid2:\n raise NotImplementedError() \n attrs = self.client.sai_thrift_get_queue_attribute(service_queueId[5])\n assert(SAI_STATUS_SUCCESS == attrs.status)\n for a in attrs.attr_list:\n if a.id == SAI_QUEUE_ATTR_SCHEDULER_PROFILE_ID:\n sys_logging(\"get queue scheduler profile:0x%x\"%a.value.oid)\n if a.value.oid != sched_oid2:\n raise NotImplementedError()\n attrs = self.client.sai_thrift_get_queue_attribute(service_queueId[6])\n assert(SAI_STATUS_SUCCESS == attrs.status)\n for a in attrs.attr_list:\n if a.id == SAI_QUEUE_ATTR_SCHEDULER_PROFILE_ID:\n sys_logging(\"get queue scheduler profile:0x%x\"%a.value.oid)\n if a.value.oid != sched_oid2:\n raise NotImplementedError() \n attrs = self.client.sai_thrift_get_queue_attribute(service_queueId[7])\n assert(SAI_STATUS_SUCCESS == attrs.status)\n for a in attrs.attr_list:\n if a.id == SAI_QUEUE_ATTR_SCHEDULER_PROFILE_ID:\n sys_logging(\"get queue scheduler profile:0x%x\"%a.value.oid)\n if a.value.oid != sched_oid1:\n raise NotImplementedError()\n \n\n finally:\n #pdb.set_trace()\n for ii in range(8):\n attr_value = sai_thrift_attribute_value_t(oid=0)\n attr = sai_thrift_attribute_t(id=SAI_QUEUE_ATTR_SCHEDULER_PROFILE_ID, value=attr_value)\n self.client.sai_thrift_set_queue_attribute(service_queueId[ii], attr)\n\n #pdb.set_trace()\n for i in range(8):\n self.client.sai_thrift_remove_queue(service_queueId[i])\n\n for ii in range(8):\n self.client.sai_thrift_remove_scheduler_group(sched_group_id_group_node[ii])\n self.client.sai_thrift_remove_scheduler_group(sched_group_id_chan_node)\n self.client.sai_thrift_remove_scheduler_group(sched_group_id_root)\n\n self.client.sai_thrift_remove_scheduler_profile(sched_oid1)\n self.client.sai_thrift_remove_scheduler_profile(sched_oid2)", "title": "" }, { "docid": "55c8d13354431b48da081edcc48c77c9", "score": "0.5726604", "text": "def __init__(self):\n self.__queue = Queue.Queue()", "title": "" }, { "docid": "e4340d2e81a85e699eaef96f0da65ac6", "score": "0.5724557", "text": "def run_dispatcher(sender, instance, created, **kwargs):\n if created or instance.status==\"waiting\": \n #se il dispatcher e' appena stato creato o ri-settato su waiting accodalo\n if not instance.send_date: instance.send_date = datetime.now()\n timestamp = datetimeToUnixSec(instance.send_date)\n spooling_dispatcher.spool(disp=\"%s\" % instance.pk,at=timestamp)", "title": "" }, { "docid": "96666b0b65b7f4194ff1f36151cdf17f", "score": "0.5721709", "text": "def do_GET(self):\n # f = self.send_head()\n # if f:\n # self.copyfile(f, self.wfile)\n # f.close()\n print self.path \n \n if not self.server.sheduler:\n f.write(\"Sheduler not running\")\n return\n args =self.path\n print \"args\", args\n print 'sheduler', self.server.sheduler\n # Ceci est juste un test que ajoute un job sur le sheduler(\n # un job en duren -a_job_for_test, mais on aurait pu imaginer*\n # un truc plus generic , ou la method en question a executee\n # est passe par la requete Http.\n now = datetime.now() + timedelta(seconds=10)\n\n # comme un 'cron' , voici une facon de scecifier au sheduler\n # un job a effctue tous le trois secondes:) .Note que dans\n # l'exemple la fonction 'a_job_for_test' est juste\n # un exemple , mais nous aurions pu aussi bien imaginer que\n # le nom de la fonction est passe au serveur via Http\n # un truc du genre self.path.split('')\n self.server.sheduler.scheduler.add_interval_job(\n a_job_for_test,\n seconds=3,\n name='a_job_for_test',\n jobstore='MongoDB',\n args=[now])\n\n # comme un 'cron' , voici une facon de scecifier au sheduler\n # un job a effctued dans 10 secondes un seule fois.Note que dans\n # l'exemple la fonction 'a_job_for_test' est juste\n # un exemple , mais nous aurions pu aussi bien imaginer que\n # le nom de la fonction est passe au serveur via Http\n # un truc du genre self.path.split('')\n \n # self.server.sheduler.scheduler.add_date_job(a_job_for_test,\n # now,\n # name='a_job_for_test',\n # jobstore='MongoDB',\n # args=[now])\n thread.start_new_thread(\n self.server.sheduler.scheduler.start, ())", "title": "" }, { "docid": "03e660d8b4b5d1a049987a4269af748a", "score": "0.57131505", "text": "def start(self):\n self.scheduler.run()", "title": "" }, { "docid": "c44f64483249506b3cf24840449c6458", "score": "0.5688256", "text": "def _thread_proc(self):", "title": "" }, { "docid": "f4cfd32a0bc0ab24d6d8739d4bfe14ce", "score": "0.5688189", "text": "def __init__(self):\n self.queue = multiprocessing.Queue()", "title": "" }, { "docid": "30a775ed4183d5e40a776f446586eebd", "score": "0.56776375", "text": "def process(self): \n self.cpu.idle=True\n if not self.sim.sjf_queue.empty():\n task=self.sim.get_task_from_sjf_queue()\n self.sim.schedule(CpuInEvent(self.sim, task, self.time, self.cpu))\n else:\n pass\n \n self.task.outtime=self.time\n event=TaskOutArrivalEvent(self.sim, self.task, self.time)\n self.sim.schedule(event)", "title": "" }, { "docid": "8f178b55208475f485be62e6c6bffd09", "score": "0.5674458", "text": "def handle_queue(self):\n cmd, payload = self.queue.get()\n if cmd == 'quit':\n self.running = False\n\n elif cmd == 'insert':\n for file in payload['files']:\n self.send({\n 'do': 'index_add',\n 'filename': file\n })\n\n elif cmd == 'delete':\n for file in payload['files']:\n self.send({\n 'do': 'index_remove',\n 'filename': file\n })\n\n elif cmd == 'search':\n file = payload['file']\n self.send({\n 'do': 'index_search',\n 'filename': file\n })\n\n elif cmd == 'download':\n file = payload['file']\n addr = self.results.get(file)\n if addr is not None:\n print('Using cached address (%s:%s) for %s' % (*addr, file))\n download(self.naxos_path, file, addr)\n else:\n print('You have to search for the file first.') # TODO: Auto-search\n self.input_thread.unblock()\n\n elif cmd == 'discover_overlay':\n self.overlay_edges = set()\n self.send({\n 'do': 'discover_overlay'\n })\n self.periodic_runner.register(watch_edges, {\n 'client': self,\n 'old': set(),\n 'queue': self.queue,\n }, 0.5)\n \n elif cmd == 'print_overlay':\n self.periodic_runner.unregister(watch_edges)\n layout = 'strict graph {\\n\\t'+ '\\n\\t'.join(('%s -- %s' % e for e in self.overlay_edges)) +'\\n}\\n'\n print('Visit the following webpage for a visualization of the overlay:')\n print(tiny_url('https://dreampuf.github.io/GraphvizOnline/#' + urllib.parse.quote(layout)))\n print('Shortened since the link can get really big.')\n self.input_thread.unblock()\n else:\n raise ValueError('Unexpected command %s' % cmd)", "title": "" }, { "docid": "b0e8a2935bdc1060f33beeb24a84a53e", "score": "0.56606203", "text": "def __init__(self):\n self.queues = [], []", "title": "" }, { "docid": "e14a74e5ad4c335259a0bf3975baa8b0", "score": "0.56601393", "text": "def execCmd(self , queue , rien):\n while True:\n msg = queue.get()\n if msg == \"Connect\":\n self.tGrbl.connectToGrbl()\n self.tGuillotine.updateBtnState()\n elif msg == \"Disconnect\": \n self.tGrbl.disconnectToGrbl()\n self.tGuillotine.updateBtnState()", "title": "" }, { "docid": "e2a359560e8797247bc3fe9bfbec9b8a", "score": "0.5657827", "text": "def runTest(self):\n sys_logging(\"start test\")\n switch_init(self.client)\n port = port_list[1]\n sched_type1 = SAI_SCHEDULING_TYPE_STRICT\n sched_type2 = SAI_SCHEDULING_TYPE_DWRR\n sched_weight = 10\n cir = 2000000\n cbs = 256000\n pir = 1000000\n pbs = 64000\n queueId_list = []\n\n sched_oid1 = sai_thrift_qos_create_scheduler_profile(self.client, sched_type1, 0, cir, cbs, pir, pbs)\n sys_logging(\"sched_oid_1 0x%x\"%sched_oid1)\n\n sched_oid2 = sai_thrift_qos_create_scheduler_profile(self.client, sched_type2, sched_weight, cir, cbs, pir, pbs)\n sys_logging(\"sched_oid_1 0x%x\"%sched_oid2)\n sched_oid_list = [sched_oid1, sched_oid2]\n attrs = self.client.sai_thrift_get_port_attribute(port)\n for a in attrs.attr_list: \n if a.id == SAI_PORT_ATTR_QOS_NUMBER_OF_QUEUES:\n sys_logging(\"queue number:%d\"%a.value.u32)\n queue_num = a.value.u32\n if a.id == SAI_PORT_ATTR_QOS_QUEUE_LIST:\n for i in range(a.value.objlist.count):\n queueId_list.append(a.value.objlist.object_id_list[i])\n sys_logging(\"queue_oid[%d]:0x%X\"%(i, a.value.objlist.object_id_list[i]))\n attr_value = sai_thrift_attribute_value_t(oid=sched_oid_list[i%2])\n attr = sai_thrift_attribute_t(id=SAI_QUEUE_ATTR_SCHEDULER_PROFILE_ID, value=attr_value)\n self.client.sai_thrift_set_queue_attribute(queueId_list[i], attr)\n #pdb.set_trace()\n warmboot(self.client)\n try:\n for ii in range(queue_num):\n attrs = self.client.sai_thrift_get_queue_attribute(queueId_list[ii])\n for a in attrs.attr_list: \n if a.id == SAI_QUEUE_ATTR_SCHEDULER_PROFILE_ID:\n sys_logging(\"Get queue[0x%x] scheduler Profile oid: 0x%x\"%(queueId_list[ii], a.value.oid))\n if a.value.oid != sched_oid_list[ii%2]:\n raise NotImplementedError() \n for i in range(8):\n attr_value = sai_thrift_attribute_value_t(oid=sched_oid_list[(i+1)%2])\n attr = sai_thrift_attribute_t(id=SAI_QUEUE_ATTR_SCHEDULER_PROFILE_ID, value=attr_value)\n self.client.sai_thrift_set_queue_attribute(queueId_list[i], attr)\n #pdb.set_trace()\n for ii in range(queue_num):\n attrs = self.client.sai_thrift_get_queue_attribute(queueId_list[ii])\n for a in attrs.attr_list: \n if a.id == SAI_QUEUE_ATTR_SCHEDULER_PROFILE_ID:\n sys_logging(\"Get queue[0x%x] scheduler Profile oid: 0x%x\"%(queueId_list[ii], a.value.oid))\n if a.value.oid != sched_oid_list[(ii+1)%2]:\n raise NotImplementedError() \n finally:\n for i in range(queue_num):\n self.client.sai_thrift_remove_queue(queueId_list[i])\n self.client.sai_thrift_remove_scheduler_profile(sched_oid1)\n self.client.sai_thrift_remove_scheduler_profile(sched_oid2)", "title": "" }, { "docid": "ebf01b91402a0f6f1ca89b4cb8261dcd", "score": "0.56389487", "text": "def runTest(self):\n sys_logging(\"start test\")\n switch_init(self.client)\n\n port = port_list[1]\n level = [0,1,2]\n max_childs = [4, 64, 8]\n sched_group_id_group_node = [None]*8\n\n\n sched_type1 = SAI_SCHEDULING_TYPE_STRICT\n sched_type2 = SAI_SCHEDULING_TYPE_DWRR\n sched_weight = 10\n cir = 2000000\n cbs = 256000\n pir = 1000000\n pbs = 64000\n queueId_list = []\n\n sched_oid1 = sai_thrift_qos_create_scheduler_profile(self.client, sched_type1, 0, cir, cbs, pir, pbs)\n sys_logging(\"sched_oid_1 0x%x\"%sched_oid1)\n\n sched_oid2 = sai_thrift_qos_create_scheduler_profile(self.client, sched_type2, sched_weight, cir, cbs, pir, pbs)\n sys_logging(\"sched_oid_1 0x%x\"%sched_oid2)\n sched_oid_list = [sched_oid1, sched_oid2]\n\n \n sched_group_id_root = sai_thrift_qos_create_scheduler_group(self.client, port, level[0], max_childs[0], port, 0)\n \n sched_group_id_chan_node = sai_thrift_qos_create_scheduler_group(self.client, port, level[1], max_childs[1], sched_group_id_root, 0)\n \n sched_group_id_group_node[0] = sai_thrift_qos_create_scheduler_group(self.client, port, level[2], max_childs[2], sched_group_id_chan_node, 0) \n sched_group_id_group_node[1] = sai_thrift_qos_create_scheduler_group(self.client, port, level[2], max_childs[2], sched_group_id_chan_node, 0)\n sched_group_id_group_node[2] = sai_thrift_qos_create_scheduler_group(self.client, port, level[2], max_childs[2], sched_group_id_chan_node, 0)\n sched_group_id_group_node[3] = sai_thrift_qos_create_scheduler_group(self.client, port, level[2], max_childs[2], sched_group_id_chan_node, 0)\n sched_group_id_group_node[4] = sai_thrift_qos_create_scheduler_group(self.client, port, level[2], max_childs[2], sched_group_id_chan_node, 0) \n sched_group_id_group_node[5] = sai_thrift_qos_create_scheduler_group(self.client, port, level[2], max_childs[2], sched_group_id_chan_node, 0)\n sched_group_id_group_node[6] = sai_thrift_qos_create_scheduler_group(self.client, port, level[2], max_childs[2], sched_group_id_chan_node, 0)\n sched_group_id_group_node[7] = sai_thrift_qos_create_scheduler_group(self.client, port, level[2], max_childs[2], sched_group_id_chan_node, 0)\n\n \n attrs = self.client.sai_thrift_get_port_attribute(port)\n for a in attrs.attr_list: \n if a.id == SAI_PORT_ATTR_QOS_NUMBER_OF_QUEUES:\n sys_logging(\"queue number:%d\"%a.value.u32)\n queue_num = a.value.u32\n if a.id == SAI_PORT_ATTR_QOS_QUEUE_LIST:\n for i in range(a.value.objlist.count):\n queueId_list.append(a.value.objlist.object_id_list[i])\n sys_logging(\"queue_oid[%d]:0x%X\"%(i, a.value.objlist.object_id_list[i]))\n\n sys_logging(\"=======set queue parent node profile=======\")\n attr_value = sai_thrift_attribute_value_t(oid=sched_group_id_group_node[0])\n attr = sai_thrift_attribute_t(id=SAI_QUEUE_ATTR_PARENT_SCHEDULER_NODE, value=attr_value)\n self.client.sai_thrift_set_queue_attribute(queueId_list[0], attr)\n\n attr_value = sai_thrift_attribute_value_t(oid=sched_group_id_group_node[1])\n attr = sai_thrift_attribute_t(id=SAI_QUEUE_ATTR_PARENT_SCHEDULER_NODE, value=attr_value)\n self.client.sai_thrift_set_queue_attribute(queueId_list[1], attr)\n self.client.sai_thrift_set_queue_attribute(queueId_list[2], attr)\n\n attr_value = sai_thrift_attribute_value_t(oid=sched_group_id_group_node[3])\n attr = sai_thrift_attribute_t(id=SAI_QUEUE_ATTR_PARENT_SCHEDULER_NODE, value=attr_value)\n self.client.sai_thrift_set_queue_attribute(queueId_list[3], attr)\n\n attr_value = sai_thrift_attribute_value_t(oid=sched_group_id_group_node[4])\n attr = sai_thrift_attribute_t(id=SAI_QUEUE_ATTR_PARENT_SCHEDULER_NODE, value=attr_value)\n self.client.sai_thrift_set_queue_attribute(queueId_list[4], attr)\n self.client.sai_thrift_set_queue_attribute(queueId_list[5], attr)\n self.client.sai_thrift_set_queue_attribute(queueId_list[6], attr)\n\n attr_value = sai_thrift_attribute_value_t(oid=sched_group_id_group_node[7])\n attr = sai_thrift_attribute_t(id=SAI_QUEUE_ATTR_PARENT_SCHEDULER_NODE, value=attr_value)\n self.client.sai_thrift_set_queue_attribute(queueId_list[7], attr)\n #pdb.set_trace()\n\n warmboot(self.client)\n try:\n attrs = self.client.sai_thrift_get_queue_attribute(queueId_list[0])\n assert(SAI_STATUS_SUCCESS == attrs.status)\n for a in attrs.attr_list:\n if a.id == SAI_QUEUE_ATTR_PARENT_SCHEDULER_NODE:\n sys_logging(\"get queue parent node profile:0x%x\"%a.value.oid)\n if a.value.oid != sched_group_id_group_node[0]:\n raise NotImplementedError() \n attrs = self.client.sai_thrift_get_queue_attribute(queueId_list[1])\n assert(SAI_STATUS_SUCCESS == attrs.status)\n for a in attrs.attr_list:\n if a.id == SAI_QUEUE_ATTR_PARENT_SCHEDULER_NODE:\n sys_logging(\"get queue parent node profile:0x%x\"%a.value.oid)\n if a.value.oid != sched_group_id_group_node[1]:\n raise NotImplementedError()\n attrs = self.client.sai_thrift_get_queue_attribute(queueId_list[2])\n assert(SAI_STATUS_SUCCESS == attrs.status)\n for a in attrs.attr_list:\n if a.id == SAI_QUEUE_ATTR_PARENT_SCHEDULER_NODE:\n sys_logging(\"get queue parent node profile:0x%x\"%a.value.oid)\n if a.value.oid != sched_group_id_group_node[1]:\n raise NotImplementedError() \n attrs = self.client.sai_thrift_get_queue_attribute(queueId_list[3])\n assert(SAI_STATUS_SUCCESS == attrs.status)\n for a in attrs.attr_list:\n if a.id == SAI_QUEUE_ATTR_PARENT_SCHEDULER_NODE:\n sys_logging(\"get queue parent node profile:0x%x\"%a.value.oid)\n if a.value.oid != sched_group_id_group_node[3]:\n raise NotImplementedError()\n attrs = self.client.sai_thrift_get_queue_attribute(queueId_list[4])\n assert(SAI_STATUS_SUCCESS == attrs.status)\n for a in attrs.attr_list:\n if a.id == SAI_QUEUE_ATTR_PARENT_SCHEDULER_NODE:\n sys_logging(\"get queue parent node profile:0x%x\"%a.value.oid)\n if a.value.oid != sched_group_id_group_node[4]:\n raise NotImplementedError() \n attrs = self.client.sai_thrift_get_queue_attribute(queueId_list[5])\n assert(SAI_STATUS_SUCCESS == attrs.status)\n for a in attrs.attr_list:\n if a.id == SAI_QUEUE_ATTR_PARENT_SCHEDULER_NODE:\n sys_logging(\"get queue parent node profile:0x%x\"%a.value.oid)\n if a.value.oid != sched_group_id_group_node[4]:\n raise NotImplementedError()\n attrs = self.client.sai_thrift_get_queue_attribute(queueId_list[6])\n assert(SAI_STATUS_SUCCESS == attrs.status)\n for a in attrs.attr_list:\n if a.id == SAI_QUEUE_ATTR_PARENT_SCHEDULER_NODE:\n sys_logging(\"get queue parent node profile:0x%x\"%a.value.oid)\n if a.value.oid != sched_group_id_group_node[4]:\n raise NotImplementedError() \n attrs = self.client.sai_thrift_get_queue_attribute(queueId_list[7])\n assert(SAI_STATUS_SUCCESS == attrs.status)\n for a in attrs.attr_list:\n if a.id == SAI_QUEUE_ATTR_PARENT_SCHEDULER_NODE:\n sys_logging(\"get queue parent node profile:0x%x\"%a.value.oid)\n if a.value.oid != sched_group_id_group_node[7]:\n raise NotImplementedError()\n\n sys_logging(\"=======set queue scheduler profile=======\")\n attr_value = sai_thrift_attribute_value_t(oid=sched_oid2)\n attr = sai_thrift_attribute_t(id=SAI_QUEUE_ATTR_SCHEDULER_PROFILE_ID, value=attr_value)\n self.client.sai_thrift_set_queue_attribute(queueId_list[1], attr)\n self.client.sai_thrift_set_queue_attribute(queueId_list[2], attr)\n self.client.sai_thrift_set_queue_attribute(queueId_list[4], attr)\n self.client.sai_thrift_set_queue_attribute(queueId_list[5], attr)\n self.client.sai_thrift_set_queue_attribute(queueId_list[6], attr)\n\n attr_value = sai_thrift_attribute_value_t(oid=sched_oid1)\n attr = sai_thrift_attribute_t(id=SAI_QUEUE_ATTR_SCHEDULER_PROFILE_ID, value=attr_value)\n self.client.sai_thrift_set_queue_attribute(queueId_list[0], attr)\n self.client.sai_thrift_set_queue_attribute(queueId_list[3], attr)\n self.client.sai_thrift_set_queue_attribute(queueId_list[7], attr)\n\n attrs = self.client.sai_thrift_get_queue_attribute(queueId_list[0])\n assert(SAI_STATUS_SUCCESS == attrs.status)\n for a in attrs.attr_list:\n if a.id == SAI_QUEUE_ATTR_SCHEDULER_PROFILE_ID:\n sys_logging(\"get queue scheduler profile:0x%x\"%a.value.oid)\n if a.value.oid != sched_oid1:\n raise NotImplementedError() \n attrs = self.client.sai_thrift_get_queue_attribute(queueId_list[1])\n assert(SAI_STATUS_SUCCESS == attrs.status)\n for a in attrs.attr_list:\n if a.id == SAI_QUEUE_ATTR_SCHEDULER_PROFILE_ID:\n sys_logging(\"get queue scheduler profile:0x%x\"%a.value.oid)\n if a.value.oid != sched_oid2:\n raise NotImplementedError()\n attrs = self.client.sai_thrift_get_queue_attribute(queueId_list[2])\n assert(SAI_STATUS_SUCCESS == attrs.status)\n for a in attrs.attr_list:\n if a.id == SAI_QUEUE_ATTR_SCHEDULER_PROFILE_ID:\n sys_logging(\"get queue scheduler profile:0x%x\"%a.value.oid)\n if a.value.oid != sched_oid2:\n raise NotImplementedError() \n attrs = self.client.sai_thrift_get_queue_attribute(queueId_list[3])\n assert(SAI_STATUS_SUCCESS == attrs.status)\n for a in attrs.attr_list:\n if a.id == SAI_QUEUE_ATTR_SCHEDULER_PROFILE_ID:\n sys_logging(\"get queue scheduler profile:0x%x\"%a.value.oid)\n if a.value.oid != sched_oid1:\n raise NotImplementedError()\n attrs = self.client.sai_thrift_get_queue_attribute(queueId_list[4])\n assert(SAI_STATUS_SUCCESS == attrs.status)\n for a in attrs.attr_list:\n if a.id == SAI_QUEUE_ATTR_SCHEDULER_PROFILE_ID:\n sys_logging(\"get queue scheduler profile:0x%x\"%a.value.oid)\n if a.value.oid != sched_oid2:\n raise NotImplementedError() \n attrs = self.client.sai_thrift_get_queue_attribute(queueId_list[5])\n assert(SAI_STATUS_SUCCESS == attrs.status)\n for a in attrs.attr_list:\n if a.id == SAI_QUEUE_ATTR_SCHEDULER_PROFILE_ID:\n sys_logging(\"get queue scheduler profile:0x%x\"%a.value.oid)\n if a.value.oid != sched_oid2:\n raise NotImplementedError()\n attrs = self.client.sai_thrift_get_queue_attribute(queueId_list[6])\n assert(SAI_STATUS_SUCCESS == attrs.status)\n for a in attrs.attr_list:\n if a.id == SAI_QUEUE_ATTR_SCHEDULER_PROFILE_ID:\n sys_logging(\"get queue scheduler profile:0x%x\"%a.value.oid)\n if a.value.oid != sched_oid2:\n raise NotImplementedError() \n attrs = self.client.sai_thrift_get_queue_attribute(queueId_list[7])\n assert(SAI_STATUS_SUCCESS == attrs.status)\n for a in attrs.attr_list:\n if a.id == SAI_QUEUE_ATTR_SCHEDULER_PROFILE_ID:\n sys_logging(\"get queue scheduler profile:0x%x\"%a.value.oid)\n if a.value.oid != sched_oid1:\n raise NotImplementedError()\n\n\n sys_logging(\"=======remove queue parent node profile=======\")\n #pdb.set_trace()\n for ii in range(8):\n attr_value = sai_thrift_attribute_value_t(oid=0)\n attr = sai_thrift_attribute_t(id=SAI_QUEUE_ATTR_PARENT_SCHEDULER_NODE, value=attr_value)\n self.client.sai_thrift_set_queue_attribute(queueId_list[ii], attr)\n\n attrs = self.client.sai_thrift_get_queue_attribute(queueId_list[0])\n assert(SAI_STATUS_SUCCESS == attrs.status)\n for a in attrs.attr_list:\n if a.id == SAI_QUEUE_ATTR_PARENT_SCHEDULER_NODE:\n sys_logging(\"get queue parent node profile:0x%x\"%a.value.oid)\n if a.value.oid != SAI_NULL_OBJECT_ID:\n raise NotImplementedError() \n attrs = self.client.sai_thrift_get_queue_attribute(queueId_list[1])\n assert(SAI_STATUS_SUCCESS == attrs.status)\n for a in attrs.attr_list:\n if a.id == SAI_QUEUE_ATTR_PARENT_SCHEDULER_NODE:\n sys_logging(\"get queue parent node profile:0x%x\"%a.value.oid)\n if a.value.oid != SAI_NULL_OBJECT_ID:\n raise NotImplementedError()\n attrs = self.client.sai_thrift_get_queue_attribute(queueId_list[2])\n assert(SAI_STATUS_SUCCESS == attrs.status)\n for a in attrs.attr_list:\n if a.id == SAI_QUEUE_ATTR_PARENT_SCHEDULER_NODE:\n sys_logging(\"get queue parent node profile:0x%x\"%a.value.oid)\n if a.value.oid != SAI_NULL_OBJECT_ID:\n raise NotImplementedError() \n attrs = self.client.sai_thrift_get_queue_attribute(queueId_list[3])\n assert(SAI_STATUS_SUCCESS == attrs.status)\n for a in attrs.attr_list:\n if a.id == SAI_QUEUE_ATTR_PARENT_SCHEDULER_NODE:\n sys_logging(\"get queue parent node profile:0x%x\"%a.value.oid)\n if a.value.oid != SAI_NULL_OBJECT_ID:\n raise NotImplementedError()\n attrs = self.client.sai_thrift_get_queue_attribute(queueId_list[4])\n assert(SAI_STATUS_SUCCESS == attrs.status)\n for a in attrs.attr_list:\n if a.id == SAI_QUEUE_ATTR_PARENT_SCHEDULER_NODE:\n sys_logging(\"get queue parent node profile:0x%x\"%a.value.oid)\n if a.value.oid != SAI_NULL_OBJECT_ID:\n raise NotImplementedError() \n attrs = self.client.sai_thrift_get_queue_attribute(queueId_list[5])\n assert(SAI_STATUS_SUCCESS == attrs.status)\n for a in attrs.attr_list:\n if a.id == SAI_QUEUE_ATTR_PARENT_SCHEDULER_NODE:\n sys_logging(\"get queue parent node profile:0x%x\"%a.value.oid)\n if a.value.oid != SAI_NULL_OBJECT_ID:\n raise NotImplementedError()\n attrs = self.client.sai_thrift_get_queue_attribute(queueId_list[6])\n assert(SAI_STATUS_SUCCESS == attrs.status)\n for a in attrs.attr_list:\n if a.id == SAI_QUEUE_ATTR_PARENT_SCHEDULER_NODE:\n sys_logging(\"get queue parent node profile:0x%x\"%a.value.oid)\n if a.value.oid != SAI_NULL_OBJECT_ID:\n raise NotImplementedError() \n attrs = self.client.sai_thrift_get_queue_attribute(queueId_list[7])\n assert(SAI_STATUS_SUCCESS == attrs.status)\n for a in attrs.attr_list:\n if a.id == SAI_QUEUE_ATTR_PARENT_SCHEDULER_NODE:\n sys_logging(\"get queue parent node profile:0x%x\"%a.value.oid)\n if a.value.oid != SAI_NULL_OBJECT_ID:\n raise NotImplementedError()\n\n\n #pdb.set_trace()\n\n sys_logging(\"=======set queue parent node profile again=======\")\n attr_value = sai_thrift_attribute_value_t(oid=sched_group_id_group_node[0])\n attr = sai_thrift_attribute_t(id=SAI_QUEUE_ATTR_PARENT_SCHEDULER_NODE, value=attr_value)\n self.client.sai_thrift_set_queue_attribute(queueId_list[0], attr)\n\n attr_value = sai_thrift_attribute_value_t(oid=sched_group_id_group_node[1])\n attr = sai_thrift_attribute_t(id=SAI_QUEUE_ATTR_PARENT_SCHEDULER_NODE, value=attr_value)\n self.client.sai_thrift_set_queue_attribute(queueId_list[1], attr)\n self.client.sai_thrift_set_queue_attribute(queueId_list[2], attr)\n\n attr_value = sai_thrift_attribute_value_t(oid=sched_group_id_group_node[3])\n attr = sai_thrift_attribute_t(id=SAI_QUEUE_ATTR_PARENT_SCHEDULER_NODE, value=attr_value)\n self.client.sai_thrift_set_queue_attribute(queueId_list[3], attr)\n\n attr_value = sai_thrift_attribute_value_t(oid=sched_group_id_group_node[4])\n attr = sai_thrift_attribute_t(id=SAI_QUEUE_ATTR_PARENT_SCHEDULER_NODE, value=attr_value)\n self.client.sai_thrift_set_queue_attribute(queueId_list[4], attr)\n self.client.sai_thrift_set_queue_attribute(queueId_list[5], attr)\n self.client.sai_thrift_set_queue_attribute(queueId_list[6], attr)\n\n attr_value = sai_thrift_attribute_value_t(oid=sched_group_id_group_node[7])\n attr = sai_thrift_attribute_t(id=SAI_QUEUE_ATTR_PARENT_SCHEDULER_NODE, value=attr_value)\n self.client.sai_thrift_set_queue_attribute(queueId_list[7], attr)\n\n attrs = self.client.sai_thrift_get_queue_attribute(queueId_list[0])\n assert(SAI_STATUS_SUCCESS == attrs.status)\n for a in attrs.attr_list:\n if a.id == SAI_QUEUE_ATTR_PARENT_SCHEDULER_NODE:\n sys_logging(\"get queue parent node profile:0x%x\"%a.value.oid)\n if a.value.oid != sched_group_id_group_node[0]:\n raise NotImplementedError() \n attrs = self.client.sai_thrift_get_queue_attribute(queueId_list[1])\n assert(SAI_STATUS_SUCCESS == attrs.status)\n for a in attrs.attr_list:\n if a.id == SAI_QUEUE_ATTR_PARENT_SCHEDULER_NODE:\n sys_logging(\"get queue parent node profile:0x%x\"%a.value.oid)\n if a.value.oid != sched_group_id_group_node[1]:\n raise NotImplementedError()\n attrs = self.client.sai_thrift_get_queue_attribute(queueId_list[2])\n assert(SAI_STATUS_SUCCESS == attrs.status)\n for a in attrs.attr_list:\n if a.id == SAI_QUEUE_ATTR_PARENT_SCHEDULER_NODE:\n sys_logging(\"get queue parent node profile:0x%x\"%a.value.oid)\n if a.value.oid != sched_group_id_group_node[1]:\n raise NotImplementedError() \n attrs = self.client.sai_thrift_get_queue_attribute(queueId_list[3])\n assert(SAI_STATUS_SUCCESS == attrs.status)\n for a in attrs.attr_list:\n if a.id == SAI_QUEUE_ATTR_PARENT_SCHEDULER_NODE:\n sys_logging(\"get queue parent node profile:0x%x\"%a.value.oid)\n if a.value.oid != sched_group_id_group_node[3]:\n raise NotImplementedError()\n attrs = self.client.sai_thrift_get_queue_attribute(queueId_list[4])\n assert(SAI_STATUS_SUCCESS == attrs.status)\n for a in attrs.attr_list:\n if a.id == SAI_QUEUE_ATTR_PARENT_SCHEDULER_NODE:\n sys_logging(\"get queue parent node profile:0x%x\"%a.value.oid)\n if a.value.oid != sched_group_id_group_node[4]:\n raise NotImplementedError() \n attrs = self.client.sai_thrift_get_queue_attribute(queueId_list[5])\n assert(SAI_STATUS_SUCCESS == attrs.status)\n for a in attrs.attr_list:\n if a.id == SAI_QUEUE_ATTR_PARENT_SCHEDULER_NODE:\n sys_logging(\"get queue parent node profile:0x%x\"%a.value.oid)\n if a.value.oid != sched_group_id_group_node[4]:\n raise NotImplementedError()\n attrs = self.client.sai_thrift_get_queue_attribute(queueId_list[6])\n assert(SAI_STATUS_SUCCESS == attrs.status)\n for a in attrs.attr_list:\n if a.id == SAI_QUEUE_ATTR_PARENT_SCHEDULER_NODE:\n sys_logging(\"get queue parent node profile:0x%x\"%a.value.oid)\n if a.value.oid != sched_group_id_group_node[4]:\n raise NotImplementedError() \n attrs = self.client.sai_thrift_get_queue_attribute(queueId_list[7])\n assert(SAI_STATUS_SUCCESS == attrs.status)\n for a in attrs.attr_list:\n if a.id == SAI_QUEUE_ATTR_PARENT_SCHEDULER_NODE:\n sys_logging(\"get queue parent node profile:0x%x\"%a.value.oid)\n if a.value.oid != sched_group_id_group_node[7]:\n raise NotImplementedError()\n\n finally:\n\n #pdb.set_trace()\n for i in range(8):\n self.client.sai_thrift_remove_queue(queueId_list[i])\n\n for ii in range(8):\n self.client.sai_thrift_remove_scheduler_group(sched_group_id_group_node[ii])\n self.client.sai_thrift_remove_scheduler_group(sched_group_id_chan_node)\n self.client.sai_thrift_remove_scheduler_group(sched_group_id_root)\n\n self.client.sai_thrift_remove_scheduler_profile(sched_oid1)\n self.client.sai_thrift_remove_scheduler_profile(sched_oid2)", "title": "" }, { "docid": "e19b9979195fbdb29e7088afa802a59b", "score": "0.5636245", "text": "def send_task(self):\n packed = zlib.compress(cPickle.dumps(self.task_dict))\n encoded = base64.b64encode(packed)\n params_data = {\"task_data\":encoded}\n retry_options = taskqueue.TaskRetryOptions(\n task_retry_limit = self.task_retry_limit)\n the_task = taskqueue.Task(url=\"/task\", params=params_data,\n countdown = self.start_delay, retry_options = retry_options)\n the_task.add() \n #taskqueue.add(url=\"/task\", params=params_data) ", "title": "" }, { "docid": "c9121915943e133cb0ecd8d178b3438e", "score": "0.56360734", "text": "def next_in_schedule(self):\n if len(self.execution_queue) > 0:\n now = rospy.get_rostime()\n next_task = self.execution_queue[0]\n # get travel time to next task. \n travel_time_to_next_task = self._travel_duration_fn(next_task.start_node_id)\n\n rospy.loginfo('start window: %s' % rostime_to_python(next_task.start_after).time())\n rospy.loginfo(' now: %s' % rostime_to_python(now).time())\n rospy.loginfo(' travel time: %s' % rosduration_to_python(travel_time_to_next_task))\n \n # the task can be executed if we can travel to the task in time for the\n # the start window to open\n if now >= (next_task.start_after - travel_time_to_next_task):\n rospy.loginfo('start window is open')\n self.execute_next_task()\n else: \n # delay is the difference between now and the time to start navigating\n exe_delay = (next_task.start_after - travel_time_to_next_task) - now\n rospy.loginfo('need to delay %s.%s for execution' % (exe_delay.secs, exe_delay.nsecs))\n self.current_task = None\n self.execution_delay_timer = rospy.Timer(exe_delay, self.execution_delay_cb, oneshot=True)\n else:\n self.current_task = None", "title": "" }, { "docid": "85872894ea6da5ec2ca6a8f71360f951", "score": "0.56329817", "text": "def dequeue(self):\n pass", "title": "" }, { "docid": "738215a11a3ceae0faf5085512e352ce", "score": "0.56298023", "text": "def commandPeriodic(self):\n self.scheduler.run()", "title": "" }, { "docid": "89d792502ffdd9b809c144ae6a650680", "score": "0.56293863", "text": "def handle_queue(self):\n cmd, payload = self.queue.get()\n if cmd == 'connect':\n self.network.connect_to_node(payload['addr'], 'paxos_join_request')\n # TODO: add to self.paxos.peer_addresses\n elif cmd == 'first_connection':\n # self.network.connect_to_node(payload['addr'])\n self.network.broadcast({\n 'do': 'ping',\n })\n self.periodic_runner.register(watch_address_pool, {\n 'queue': self.queue,\n 'network': self.network,\n 'old': set(),\n }, 0.5)\n elif cmd == 'connect_to_sampled':\n picked = payload['picked']\n log.debug(\"picked: %s\", picked)\n for addr in picked:\n self.network.connect_to_node(addr)\n self.periodic_runner.unregister(watch_address_pool)\n elif cmd == 'start_paxos':\n if self.paxos is not None:\n self.paxos.start_paxos_round(payload['value'])\n else:\n raise ValueError('Unknown command: %s' % cmd)", "title": "" }, { "docid": "04ac545de20b6de2f9134bf4e231bf5a", "score": "0.56275237", "text": "def __init__(self):\n\t\tself.queue = []\n\t\tself.poper = []", "title": "" }, { "docid": "1674ebfbc119d61410be73cb43f6f664", "score": "0.56234163", "text": "def main():\n\n # Build data\n jobs = squeue(user='lmh1', # user id - change to your own\n name=['iprPy_1', 'iprPy_4']) # job names - change to your own\n logs = parse_runner_logs()\n rundirs, runners = check_run_directories()\n\n # Merge data\n logjobs = jobs.merge(logs, how='outer', on='jobid')\n logjobs.loc[(logjobs.status=='active') & (pd.isna(logjobs.user)), 'status'] = 'crashed'\n runlogjobs = logjobs.merge(runners, how='outer', on='pid')\n\n # Loop over all run directories\n keys = ['jobid', 'pid', 'status', 'time', 'calcid', 'tmpdir']\n for i in rundirs.index:\n rundir = rundirs.loc[i]\n\n # List number of prepared calculations and number of runners\n print(rundir.run_directory, rundir.numcalcs, rundir.numrunners)\n dirjobs = runlogjobs[runlogjobs.run_directory == rundir.run_directory]\n\n # Print data for runners\n if len(dirjobs) > 0:\n print(dirjobs[keys])\n print()\n\n # List jobs with no associated bid files (usually finished)\n print('Unknown/finished jobs')\n nodir = runlogjobs[pd.isna(runlogjobs.run_directory)][keys]\n with pd.option_context('display.max_rows', None,):\n print(nodir)\n\n # Delete run logs for successfully finished jobs\n for jobid in nodir[nodir.status=='finished'].jobid.values:\n Path(f'runner_{jobid}.txt').unlink()", "title": "" }, { "docid": "5cd22b40a658528fd6f5e5c0ad09acae", "score": "0.56176794", "text": "def test_enqueue_at(self):\n queue = Queue(connection=self.testconn)\n registry = ScheduledJobRegistry(queue=queue)\n scheduler = RQScheduler([queue], connection=self.testconn)\n scheduler.acquire_locks()\n # Jobs created using enqueue_at is put in the ScheduledJobRegistry\n job = queue.enqueue_at(datetime(2019, 1, 1, tzinfo=timezone.utc), say_hello)\n self.assertEqual(len(queue), 0)\n self.assertEqual(len(registry), 1)\n\n # enqueue_at set job status to \"scheduled\"\n self.assertTrue(job.get_status() == 'scheduled')\n\n # After enqueue_scheduled_jobs() is called, the registry is empty\n # and job is enqueued\n scheduler.enqueue_scheduled_jobs()\n self.assertEqual(len(queue), 1)\n self.assertEqual(len(registry), 0)", "title": "" }, { "docid": "e4b90ce4ac63d1fc3b70e270b44d071e", "score": "0.56115574", "text": "def collect(self):\n while True:\n time.sleep(2)\n logging.info(f'Maintain Queue Empty: {self._maintain_queue.empty()}')\n logging.info(f'Queue Empty: {self._queue.empty()}')\n if not self._queue.empty():\n message = self._queue.get()\n logging.info(f\"Popped off message from Queue: {message.entry_info}\")\n self.working_on = \"UPDATE\"\n elif not self._maintain_queue.empty():\n message = self._maintain_queue.get()\n logging.info(f\"Popped off message from Maintain Queue: {message.entry_info}\")\n self.working_on = \"MAINTAIN\"\n else:\n break\n\n if message.type == 'EXIT':\n break\n\n if message.type != 'TASK':\n raise ValueError(f'{message.type} is not a recognized task type')\n\n if message.type == 'TASK':\n try:\n github_url = message.entry_info['task']['given']['github_url']\n repo_id = message.entry_info['repo_id']\n self.query_pr({'github_url': github_url, 'repo_id': repo_id})\n except Exception:\n # logging.error(\"Worker ran into an error for task: {}\\n\".format(message.entry_info['task']))\n # logging.error(\"Error encountered: \" + str(e) + \"\\n\")\n # # traceback.format_exc()\n # logging.info(\"Notifying broker and logging task failure in database...\\n\")\n\n logging.exception(f'Worker ran into an error for task {message.entry_info}')\n self.register_task_failure(message.entry_info['repo_id'],\n message.entry_info['task']['given']['github_url'])\n\n # Add to history table\n task_history = {\n \"repo_id\": message.entry_info['repo_id'],\n \"worker\": self.config['id'],\n \"job_model\": message.entry_info['task']['models'][0],\n \"oauth_id\": self.config['zombie_id'],\n \"timestamp\": datetime.datetime.now(),\n \"status\": \"Error\",\n \"total_results\": self.results_counter\n }\n\n if self.history_id:\n self.helper_db.execute(self.history_table.update().where(self.history_table.c.history_id==self.history_id).values(task_history))\n else:\n r = self.helper_db.execute(self.history_table.insert().values(task_history))\n self.history_id = r.inserted_primary_key[0]\n\n logging.info(f\"Recorded job error for: {message.entry_info['task']}\")\n\n # Update job process table\n updated_job = {\n \"since_id_str\": message.entry_info['repo_id'],\n \"last_count\": self.results_counter,\n \"last_run\": datetime.datetime.now(),\n \"analysis_state\": 0\n }\n self.helper_db.execute(self.job_table.update().where(self.job_table.c.job_model==message.entry_info['task']['models'][0]).values(updated_job))\n logging.info(\"Updated job process for model: \" + message.entry_info['task']['models'][0] + \"\\n\")\n\n # Reset results counter for next task\n self.results_counter = 0\n pass", "title": "" }, { "docid": "94336d720287cddcd9005360a1490fd6", "score": "0.5610968", "text": "def run_queue(self):\n\n #FIXME: need a finally here to cleanup exceptions states\n lqueue = self.chunk_queue(self.command_queue)\n self.run_command_queue(lqueue)\n\n del self.command_queue[:]\n self.memo_map.clear()", "title": "" }, { "docid": "c51fe701898c1a3e0c07a6917fd0341a", "score": "0.56016207", "text": "def monitor_task_queue(self):\n\n self.handle_new_tasks()", "title": "" }, { "docid": "d546bf86e6a677575961e6b11e020bf9", "score": "0.559495", "text": "def agentbehavior1(queue):\n pass", "title": "" }, { "docid": "66edc80b7c52d3f24c17d2c5c9e12602", "score": "0.5594344", "text": "def run(self, *args, **kwargs):\n self.config_json = args[1]\n self.task_id = self.request.id\n self.temp_iviu = {\n \"database\":self.config_json['mysql']['db'],\n \"table\":\"p00390\",\n \"type\":\"insert\",\n \"ts\":1527621106,\n \"xid\":1784748,\n \"commit\":True,\n \"data\": \"\"\n }\n self.redis_conn = None\n self.redis_connections = {}\n self.net = CheckNet()\n self.retry_count = 0\n self.err_flag = False\n self.email_flag =True\n self.start()\n # print(\"tt in run method\",args[2])\n if len(args) == 3:\n self.thread_each_table(args[0],args[2])\n else:\n self.thread_each_table(args[0], \"\")", "title": "" }, { "docid": "e6d17fda6177fe2ce267ae69d19db552", "score": "0.5593024", "text": "def test_start_requeue(self):\n pass", "title": "" }, { "docid": "c17feb99867e2b14d923b20e64b9c1c0", "score": "0.55925614", "text": "def __init__( self ):\n\n # Create the queue\n self.queue = Queue.Queue()\n\n # Set up the GUI part\n self.gui_manager = tbh_gui_manager( self.queue, self.endApplication )\n self.gui_manager.show()\n \n self.data_viz_queue = Queue.Queue()\n \n self.pomdp_data_viz = tbh_pomdp_data_viz.tbh_pomdp_data_viz_t( \\\n self.queue, self.endApplication )\n self.pomdp_data_viz.show()\n #self.gui.console.show()\n\n # A timer to periodically call periodicCall :-)\n self.queue_timer = PyQt4.QtCore.QTimer()\n\n # Start the timer -- this replaces the initial call \n # to periodicCall\n self.queue_timer.start(100)\n PyQt4.QtCore.QObject.connect(self.queue_timer, PyQt4.QtCore.SIGNAL(\"timeout()\"), \\\n self.periodicCall)\n\n\n\n # Set up the thread to do asynchronous I/O\n # More can be made if necessary\n self.running = 1\n \n \tself.thread = threading.Thread( target=self.worker_thread )\n self.thread.start()\n\n # Start the periodic call in the GUI to check if the queue contains\n # anything\n self.periodicCall()", "title": "" }, { "docid": "d2c4295c672872fd4813356a760b166d", "score": "0.5592507", "text": "def add_job(self, func, *args):\n self.queue.append((func, args))", "title": "" }, { "docid": "32ac073776ac841780639545fd8c3812", "score": "0.5590981", "text": "def runTest(self):\n sys_logging(\"start test\")\n switch_init(self.client)\n\n port = port_list[1]\n level = [0,1,2]\n max_childs = [4, 64, 8]\n sched_group_id_group_node = [None]*8\n sched_group_service_id = 1\n service_queueId = [None]*8\n\n\n sched_type1 = SAI_SCHEDULING_TYPE_STRICT\n sched_type2 = SAI_SCHEDULING_TYPE_DWRR\n sched_weight = 10\n cir = 2000000\n cbs = 256000\n pir = 1000000\n pbs = 64000\n\n sched_oid1 = sai_thrift_qos_create_scheduler_profile(self.client, sched_type1, 0, cir, cbs, pir, pbs)\n sys_logging(\"sched_oid_1 0x%x\"%sched_oid1)\n\n sched_oid2 = sai_thrift_qos_create_scheduler_profile(self.client, sched_type2, sched_weight, cir, cbs, pir, pbs)\n sys_logging(\"sched_oid_1 0x%x\"%sched_oid2)\n sched_oid_list = [sched_oid1, sched_oid2]\n\n \n sched_group_id_root = sai_thrift_qos_create_scheduler_group(self.client, port, level[0], max_childs[0], port, 0)\n \n sched_group_id_chan_node = sai_thrift_qos_create_scheduler_group(self.client, port, level[1], max_childs[1], sched_group_id_root, 0)\n \n sched_group_id_group_node[0] = sai_thrift_qos_create_scheduler_group(self.client, port, level[2], max_childs[2], sched_group_id_chan_node, 0, service_id=sched_group_service_id) \n sched_group_id_group_node[1] = sai_thrift_qos_create_scheduler_group(self.client, port, level[2], max_childs[2], sched_group_id_chan_node, 0, service_id=sched_group_service_id)\n sched_group_id_group_node[2] = sai_thrift_qos_create_scheduler_group(self.client, port, level[2], max_childs[2], sched_group_id_chan_node, 0, service_id=sched_group_service_id)\n sched_group_id_group_node[3] = sai_thrift_qos_create_scheduler_group(self.client, port, level[2], max_childs[2], sched_group_id_chan_node, 0, service_id=sched_group_service_id)\n sched_group_id_group_node[4] = sai_thrift_qos_create_scheduler_group(self.client, port, level[2], max_childs[2], sched_group_id_chan_node, 0, service_id=sched_group_service_id) \n sched_group_id_group_node[5] = sai_thrift_qos_create_scheduler_group(self.client, port, level[2], max_childs[2], sched_group_id_chan_node, 0, service_id=sched_group_service_id)\n sched_group_id_group_node[6] = sai_thrift_qos_create_scheduler_group(self.client, port, level[2], max_childs[2], sched_group_id_chan_node, 0, service_id=sched_group_service_id)\n sched_group_id_group_node[7] = sai_thrift_qos_create_scheduler_group(self.client, port, level[2], max_childs[2], sched_group_id_chan_node, 0, service_id=sched_group_service_id)\n\n \n queue_type = SAI_QUEUE_TYPE_SERVICE\n queue_index = [0,1,2,3,4,5,6,7]\n service_queueId[0] = sai_thrift_create_queue_id(self.client, queue_type, port, queue_index[0], parent_id=sched_group_id_group_node[0], service_id=sched_group_service_id, sche_id=sched_oid1) \n service_queueId[1] = sai_thrift_create_queue_id(self.client, queue_type, port, queue_index[1], parent_id=sched_group_id_group_node[1], service_id=sched_group_service_id, sche_id=sched_oid2)\n service_queueId[2] = sai_thrift_create_queue_id(self.client, queue_type, port, queue_index[2], parent_id=sched_group_id_group_node[1], service_id=sched_group_service_id, sche_id=sched_oid2)\n service_queueId[3] = sai_thrift_create_queue_id(self.client, queue_type, port, queue_index[3], parent_id=sched_group_id_group_node[3], service_id=sched_group_service_id, sche_id=sched_oid1)\n service_queueId[4] = sai_thrift_create_queue_id(self.client, queue_type, port, queue_index[4], service_id=sched_group_service_id, sche_id=sched_oid2)\n service_queueId[5] = sai_thrift_create_queue_id(self.client, queue_type, port, queue_index[5], service_id=sched_group_service_id, sche_id=sched_oid2)\n service_queueId[6] = sai_thrift_create_queue_id(self.client, queue_type, port, queue_index[6], service_id=sched_group_service_id, sche_id=sched_oid2)\n service_queueId[7] = sai_thrift_create_queue_id(self.client, queue_type, port, queue_index[7], service_id=sched_group_service_id, sche_id=sched_oid1)\n #pdb.set_trace()\n\n warmboot(self.client)\n try:\n attrs = self.client.sai_thrift_get_queue_attribute(service_queueId[0])\n assert(SAI_STATUS_SUCCESS == attrs.status)\n for a in attrs.attr_list:\n if a.id == SAI_QUEUE_ATTR_PARENT_SCHEDULER_NODE:\n sys_logging(\"get queue parent node profile:0x%x\"%a.value.oid)\n if a.value.oid != sched_group_id_group_node[0]:\n raise NotImplementedError() \n attrs = self.client.sai_thrift_get_queue_attribute(service_queueId[1])\n assert(SAI_STATUS_SUCCESS == attrs.status)\n for a in attrs.attr_list:\n if a.id == SAI_QUEUE_ATTR_PARENT_SCHEDULER_NODE:\n sys_logging(\"get queue parent node profile:0x%x\"%a.value.oid)\n if a.value.oid != sched_group_id_group_node[1]:\n raise NotImplementedError()\n attrs = self.client.sai_thrift_get_queue_attribute(service_queueId[2])\n assert(SAI_STATUS_SUCCESS == attrs.status)\n for a in attrs.attr_list:\n if a.id == SAI_QUEUE_ATTR_PARENT_SCHEDULER_NODE:\n sys_logging(\"get queue parent node profile:0x%x\"%a.value.oid)\n if a.value.oid != sched_group_id_group_node[1]:\n raise NotImplementedError() \n attrs = self.client.sai_thrift_get_queue_attribute(service_queueId[3])\n assert(SAI_STATUS_SUCCESS == attrs.status)\n for a in attrs.attr_list:\n if a.id == SAI_QUEUE_ATTR_PARENT_SCHEDULER_NODE:\n sys_logging(\"get queue parent node profile:0x%x\"%a.value.oid)\n if a.value.oid != sched_group_id_group_node[3]:\n raise NotImplementedError()\n attrs = self.client.sai_thrift_get_queue_attribute(service_queueId[4])\n assert(SAI_STATUS_SUCCESS == attrs.status)\n for a in attrs.attr_list:\n if a.id == SAI_QUEUE_ATTR_PARENT_SCHEDULER_NODE:\n sys_logging(\"get queue parent node profile:0x%x\"%a.value.oid)\n if a.value.oid != SAI_NULL_OBJECT_ID:\n raise NotImplementedError() \n attrs = self.client.sai_thrift_get_queue_attribute(service_queueId[5])\n assert(SAI_STATUS_SUCCESS == attrs.status)\n for a in attrs.attr_list:\n if a.id == SAI_QUEUE_ATTR_PARENT_SCHEDULER_NODE:\n sys_logging(\"get queue parent node profile:0x%x\"%a.value.oid)\n if a.value.oid != SAI_NULL_OBJECT_ID:\n raise NotImplementedError()\n attrs = self.client.sai_thrift_get_queue_attribute(service_queueId[6])\n assert(SAI_STATUS_SUCCESS == attrs.status)\n for a in attrs.attr_list:\n if a.id == SAI_QUEUE_ATTR_PARENT_SCHEDULER_NODE:\n sys_logging(\"get queue parent node profile:0x%x\"%a.value.oid)\n if a.value.oid != SAI_NULL_OBJECT_ID:\n raise NotImplementedError() \n attrs = self.client.sai_thrift_get_queue_attribute(service_queueId[7])\n assert(SAI_STATUS_SUCCESS == attrs.status)\n for a in attrs.attr_list:\n if a.id == SAI_QUEUE_ATTR_PARENT_SCHEDULER_NODE:\n sys_logging(\"get queue parent node profile:0x%x\"%a.value.oid)\n if a.value.oid != SAI_NULL_OBJECT_ID:\n raise NotImplementedError()\n\n\n attr_value = sai_thrift_attribute_value_t(oid=sched_group_id_group_node[4])\n attr = sai_thrift_attribute_t(id=SAI_QUEUE_ATTR_PARENT_SCHEDULER_NODE, value=attr_value)\n self.client.sai_thrift_set_queue_attribute(service_queueId[4], attr)\n self.client.sai_thrift_set_queue_attribute(service_queueId[5], attr)\n self.client.sai_thrift_set_queue_attribute(service_queueId[6], attr)\n\n attr_value = sai_thrift_attribute_value_t(oid=sched_group_id_group_node[7])\n attr = sai_thrift_attribute_t(id=SAI_QUEUE_ATTR_PARENT_SCHEDULER_NODE, value=attr_value)\n self.client.sai_thrift_set_queue_attribute(service_queueId[7], attr)\n \n \n\n\n attrs = self.client.sai_thrift_get_queue_attribute(service_queueId[0])\n assert(SAI_STATUS_SUCCESS == attrs.status)\n for a in attrs.attr_list:\n if a.id == SAI_QUEUE_ATTR_PARENT_SCHEDULER_NODE:\n sys_logging(\"get queue parent node profile:0x%x\"%a.value.oid)\n if a.value.oid != sched_group_id_group_node[0]:\n raise NotImplementedError() \n attrs = self.client.sai_thrift_get_queue_attribute(service_queueId[1])\n assert(SAI_STATUS_SUCCESS == attrs.status)\n for a in attrs.attr_list:\n if a.id == SAI_QUEUE_ATTR_PARENT_SCHEDULER_NODE:\n sys_logging(\"get queue parent node profile:0x%x\"%a.value.oid)\n if a.value.oid != sched_group_id_group_node[1]:\n raise NotImplementedError()\n attrs = self.client.sai_thrift_get_queue_attribute(service_queueId[2])\n assert(SAI_STATUS_SUCCESS == attrs.status)\n for a in attrs.attr_list:\n if a.id == SAI_QUEUE_ATTR_PARENT_SCHEDULER_NODE:\n sys_logging(\"get queue parent node profile:0x%x\"%a.value.oid)\n if a.value.oid != sched_group_id_group_node[1]:\n raise NotImplementedError() \n attrs = self.client.sai_thrift_get_queue_attribute(service_queueId[3])\n assert(SAI_STATUS_SUCCESS == attrs.status)\n for a in attrs.attr_list:\n if a.id == SAI_QUEUE_ATTR_PARENT_SCHEDULER_NODE:\n sys_logging(\"get queue parent node profile:0x%x\"%a.value.oid)\n if a.value.oid != sched_group_id_group_node[3]:\n raise NotImplementedError()\n attrs = self.client.sai_thrift_get_queue_attribute(service_queueId[4])\n assert(SAI_STATUS_SUCCESS == attrs.status)\n for a in attrs.attr_list:\n if a.id == SAI_QUEUE_ATTR_PARENT_SCHEDULER_NODE:\n sys_logging(\"get queue parent node profile:0x%x\"%a.value.oid)\n if a.value.oid != sched_group_id_group_node[4]:\n raise NotImplementedError() \n attrs = self.client.sai_thrift_get_queue_attribute(service_queueId[5])\n assert(SAI_STATUS_SUCCESS == attrs.status)\n for a in attrs.attr_list:\n if a.id == SAI_QUEUE_ATTR_PARENT_SCHEDULER_NODE:\n sys_logging(\"get queue parent node profile:0x%x\"%a.value.oid)\n if a.value.oid != sched_group_id_group_node[4]:\n raise NotImplementedError()\n attrs = self.client.sai_thrift_get_queue_attribute(service_queueId[6])\n assert(SAI_STATUS_SUCCESS == attrs.status)\n for a in attrs.attr_list:\n if a.id == SAI_QUEUE_ATTR_PARENT_SCHEDULER_NODE:\n sys_logging(\"get queue parent node profile:0x%x\"%a.value.oid)\n if a.value.oid != sched_group_id_group_node[4]:\n raise NotImplementedError() \n attrs = self.client.sai_thrift_get_queue_attribute(service_queueId[7])\n assert(SAI_STATUS_SUCCESS == attrs.status)\n for a in attrs.attr_list:\n if a.id == SAI_QUEUE_ATTR_PARENT_SCHEDULER_NODE:\n sys_logging(\"get queue parent node profile:0x%x\"%a.value.oid)\n if a.value.oid != sched_group_id_group_node[7]:\n raise NotImplementedError()\n\n\n for ii in range(8):\n attr_value = sai_thrift_attribute_value_t(oid=0)\n attr = sai_thrift_attribute_t(id=SAI_QUEUE_ATTR_PARENT_SCHEDULER_NODE, value=attr_value)\n self.client.sai_thrift_set_queue_attribute(service_queueId[ii], attr)\n\n attr_value = sai_thrift_attribute_value_t(oid=sched_group_id_group_node[0])\n attr = sai_thrift_attribute_t(id=SAI_QUEUE_ATTR_PARENT_SCHEDULER_NODE, value=attr_value)\n self.client.sai_thrift_set_queue_attribute(service_queueId[0], attr)\n\n attr_value = sai_thrift_attribute_value_t(oid=sched_group_id_group_node[1])\n attr = sai_thrift_attribute_t(id=SAI_QUEUE_ATTR_PARENT_SCHEDULER_NODE, value=attr_value)\n self.client.sai_thrift_set_queue_attribute(service_queueId[1], attr)\n self.client.sai_thrift_set_queue_attribute(service_queueId[2], attr)\n\n attr_value = sai_thrift_attribute_value_t(oid=sched_group_id_group_node[3])\n attr = sai_thrift_attribute_t(id=SAI_QUEUE_ATTR_PARENT_SCHEDULER_NODE, value=attr_value)\n self.client.sai_thrift_set_queue_attribute(service_queueId[3], attr)\n\n attr_value = sai_thrift_attribute_value_t(oid=sched_group_id_group_node[4])\n attr = sai_thrift_attribute_t(id=SAI_QUEUE_ATTR_PARENT_SCHEDULER_NODE, value=attr_value)\n self.client.sai_thrift_set_queue_attribute(service_queueId[4], attr)\n self.client.sai_thrift_set_queue_attribute(service_queueId[5], attr)\n self.client.sai_thrift_set_queue_attribute(service_queueId[6], attr)\n\n attr_value = sai_thrift_attribute_value_t(oid=sched_group_id_group_node[7])\n attr = sai_thrift_attribute_t(id=SAI_QUEUE_ATTR_PARENT_SCHEDULER_NODE, value=attr_value)\n self.client.sai_thrift_set_queue_attribute(service_queueId[7], attr)\n\n attrs = self.client.sai_thrift_get_queue_attribute(service_queueId[0])\n assert(SAI_STATUS_SUCCESS == attrs.status)\n for a in attrs.attr_list:\n if a.id == SAI_QUEUE_ATTR_PARENT_SCHEDULER_NODE:\n sys_logging(\"get queue parent node profile:0x%x\"%a.value.oid)\n if a.value.oid != sched_group_id_group_node[0]:\n raise NotImplementedError() \n attrs = self.client.sai_thrift_get_queue_attribute(service_queueId[1])\n assert(SAI_STATUS_SUCCESS == attrs.status)\n for a in attrs.attr_list:\n if a.id == SAI_QUEUE_ATTR_PARENT_SCHEDULER_NODE:\n sys_logging(\"get queue parent node profile:0x%x\"%a.value.oid)\n if a.value.oid != sched_group_id_group_node[1]:\n raise NotImplementedError()\n attrs = self.client.sai_thrift_get_queue_attribute(service_queueId[2])\n assert(SAI_STATUS_SUCCESS == attrs.status)\n for a in attrs.attr_list:\n if a.id == SAI_QUEUE_ATTR_PARENT_SCHEDULER_NODE:\n sys_logging(\"get queue parent node profile:0x%x\"%a.value.oid)\n if a.value.oid != sched_group_id_group_node[1]:\n raise NotImplementedError() \n attrs = self.client.sai_thrift_get_queue_attribute(service_queueId[3])\n assert(SAI_STATUS_SUCCESS == attrs.status)\n for a in attrs.attr_list:\n if a.id == SAI_QUEUE_ATTR_PARENT_SCHEDULER_NODE:\n sys_logging(\"get queue parent node profile:0x%x\"%a.value.oid)\n if a.value.oid != sched_group_id_group_node[3]:\n raise NotImplementedError()\n attrs = self.client.sai_thrift_get_queue_attribute(service_queueId[4])\n assert(SAI_STATUS_SUCCESS == attrs.status)\n for a in attrs.attr_list:\n if a.id == SAI_QUEUE_ATTR_PARENT_SCHEDULER_NODE:\n sys_logging(\"get queue parent node profile:0x%x\"%a.value.oid)\n if a.value.oid != sched_group_id_group_node[4]:\n raise NotImplementedError() \n attrs = self.client.sai_thrift_get_queue_attribute(service_queueId[5])\n assert(SAI_STATUS_SUCCESS == attrs.status)\n for a in attrs.attr_list:\n if a.id == SAI_QUEUE_ATTR_PARENT_SCHEDULER_NODE:\n sys_logging(\"get queue parent node profile:0x%x\"%a.value.oid)\n if a.value.oid != sched_group_id_group_node[4]:\n raise NotImplementedError()\n attrs = self.client.sai_thrift_get_queue_attribute(service_queueId[6])\n assert(SAI_STATUS_SUCCESS == attrs.status)\n for a in attrs.attr_list:\n if a.id == SAI_QUEUE_ATTR_PARENT_SCHEDULER_NODE:\n sys_logging(\"get queue parent node profile:0x%x\"%a.value.oid)\n if a.value.oid != sched_group_id_group_node[4]:\n raise NotImplementedError() \n attrs = self.client.sai_thrift_get_queue_attribute(service_queueId[7])\n assert(SAI_STATUS_SUCCESS == attrs.status)\n for a in attrs.attr_list:\n if a.id == SAI_QUEUE_ATTR_PARENT_SCHEDULER_NODE:\n sys_logging(\"get queue parent node profile:0x%x\"%a.value.oid)\n if a.value.oid != sched_group_id_group_node[7]:\n raise NotImplementedError()\n\n finally:\n #pdb.set_trace()\n for ii in range(8):\n attr_value = sai_thrift_attribute_value_t(oid=0)\n attr = sai_thrift_attribute_t(id=SAI_QUEUE_ATTR_PARENT_SCHEDULER_NODE, value=attr_value)\n self.client.sai_thrift_set_queue_attribute(service_queueId[ii], attr)\n\n #pdb.set_trace()\n for i in range(8):\n self.client.sai_thrift_remove_queue(service_queueId[i])\n\n for ii in range(8):\n self.client.sai_thrift_remove_scheduler_group(sched_group_id_group_node[ii])\n self.client.sai_thrift_remove_scheduler_group(sched_group_id_chan_node)\n self.client.sai_thrift_remove_scheduler_group(sched_group_id_root)\n\n self.client.sai_thrift_remove_scheduler_profile(sched_oid1)\n self.client.sai_thrift_remove_scheduler_profile(sched_oid2)", "title": "" }, { "docid": "3909cc076c9fbb71777323f5ac029dce", "score": "0.5588554", "text": "def requeue(self):\n log.info('Requeueing the task')\n taskmanager.crawlNow(connector_instance_ids =[self.task.connector_instance_id],\n workspace_id=self.task.workspace_id)", "title": "" }, { "docid": "1c7fc3cadc529fbb0aa73b5413c2ba10", "score": "0.55814713", "text": "def dequeue_actions(self):\n dequeued = []\n for task in self.get_todo():\n cmd = self.format_cmd(task[\"action\"], task[\"path\"], task[\"rids\"])\n log_cmd = self.format_log_cmd(task[\"action\"], task[\"path\"], task[\"rids\"])\n self.log.info(\"run '%s' queued %s ago\", \" \".join(log_cmd), print_duration(self.now - task[\"queued\"]))\n self.exec_action(task[\"sigs\"], cmd)\n dequeued += task[\"sigs\"]\n self.delete_queued(dequeued)", "title": "" }, { "docid": "407a566ef3fdc17b0ba8b79245e5d803", "score": "0.5576157", "text": "def setupCmdQueue(self):\n raise NotImplementedError", "title": "" }, { "docid": "bf53116b6fda45c5c50f7183253490f6", "score": "0.55741537", "text": "def _execute(self):\n\t\traise NotImplementedError(\"Task handlers must override _execute\")", "title": "" }, { "docid": "c50f24e8dab399fdce369b39ee4823d0", "score": "0.556992", "text": "def refresh(self):\n self.queue.sort(key=lambda x: x.exe_time)\n self.catch_up()", "title": "" }, { "docid": "9d645f21808d504b03f92a5372717b6d", "score": "0.556369", "text": "def processQueueAsync(self):\n print(\"Plotter process started\")\n self.mcq = MotorCtrlQueue(self.config)\n self.mcq.start()\n\n i = 0\n item = self.workerQueue.get()\n start = time.time()\n while(item is not None):\n\n self.executeCmd(item)\n\n i += 1\n if i % 1000 == 0:\n print(\"Processed %d commands. %f ms per cmd. \" %\n (i, (time.time() - start)*1000/i))\n\n item = self.workerQueue.get()\n\n # Wait for stepper queue\n self.mcq.join()\n print(\"Plotter process stopped\")\n exit(0)", "title": "" }, { "docid": "13d9f8d58e85192394c4cd085a2cb146", "score": "0.5563209", "text": "def pre_schedule(worker, job):\n data = pickle.loads(job.data)\n #print data\n s = requests.Session()\n \n task_r = s.get(\"http://localhost:8001\"+data[\"task\"])\n print \"doing:\", data[\"task\"]\n task_js = json.loads(task_r.text)\n \n function = task_js[\"async_function\"]\n \n input_dataset = json.loads(s.get(\"http://localhost:8001\"+data[\"input_dataset\"]).text)\n output_dataset = json.loads(s.get(\"http://localhost:8001\"+data[\"output_dataset\"]).text)\n \n input_dataset_id = input_dataset[\"id\"]\n output_dataset_id = output_dataset[\"id\"]\n \n \n #print input_columns\n #print output_column\n #print function\n #print input_dataset\n #print output_dataset\n \n \n #schedule\n count = input_dataset[\"datapoint_count\"]\n \n #TODO bucket list is \"hardcoded\" here. find some other way to do that\n bucket_list = \",\".join([\"'\"+str(input_dataset_id)+\"-\"+str(c)+\"'\" for c in xrange(1+int(count/10000))])\n \n \n table = \"tsstore\"\n \n lowest_time = input_dataset[\"lowest_ts\"]\n highest_time = input_dataset[\"highest_ts\"]\n \n #partition time buckets\n date_format = \"%Y-%m-%d %H:%M:%S.%f\"\n lt = datetime.datetime.strptime(lowest_time, date_format)-datetime.timedelta(microseconds=1)\n ht = datetime.datetime.strptime(highest_time, date_format)\n diff = ht-lt\n \n #TODO change to some other value during production\n batch_size = 10\n \n try:\n interval = task_js[\"interval_in_seconds\"]\n #is aggregation\n print \"is aggregation\"\n IS_CALCULATION = False\n num_tasks = int(diff.total_seconds()/interval)\n \n cols = []\n dims_with_cols = []\n for dim in input_dataset[\"dimensions\"]:\n col_r = s.get(\"http://localhost:8001\"+dim)\n jreq = json.loads(col_r.text)[\"ts_column\"]\n cols.append(jreq)\n dims_with_cols.append(DimWithCol(dim, jreq))\n \n output_column = cols[0]\n input_columns = cols[1:]\n \n \n except KeyError:\n #is calculation\n print \"is calculation\"\n IS_CALCULATION = True\n cluster = Cluster(data[\"cassandra_nodes\"])\n session = cluster.connect('ws')\n stmt = \"select count(*) from tsstore where bucket in (%s) and dataset=%s;\" % (bucket_list, input_dataset_id)\n row = session.execute(stmt)\n count = row[0]._asdict()[\"count\"]\n \n session.shutdown()\n \n output_dimension = s.get(\"http://localhost:8001\"+task_js[\"output_dimension\"])\n output_column = json.loads(output_dimension.text)[\"ts_column\"]\n \n input_dimensions = task_js[\"input_dimensions\"]\n input_columns = []\n for ic in input_dimensions:\n ic_r = s.get(\"http://localhost:8001\"+ic)\n input_columns.append(json.loads(ic_r.text)[\"ts_column\"])\n interval = (diff.total_seconds()*batch_size)/count\n num_tasks = int(count/batch_size)\n \n #print interval\n #print count/batch_size\n \n #update task_count\n task_update_callback(data[\"task_id\"], int(num_tasks))\n \n temp_ht = lt+datetime.timedelta(seconds=interval)+datetime.timedelta(microseconds=1)\n lt -= datetime.timedelta(microseconds=1)\n lowest_time = datetime.datetime.strftime(lt, date_format)\n highest_time = datetime.datetime.strftime(temp_ht, date_format)\n \n #while True:\n for XX in xrange(num_tasks+2):\n stmt = \"select %s,dataset,bucket from %s where bucket in (%s) and time >= '%s' AND time <= '%s' and dataset=%s order by dataset, time;\" % (\",\".join(input_columns+[output_column]+([\"time\"] if IS_CALCULATION else [])), table, bucket_list, lowest_time, highest_time, input_dataset_id)\n print stmt\n print num_tasks+1, XX\n #create job with previous stmt\n dat = {}\n dat[\"stmt\"] = stmt\n dat[\"function\"] = function\n dat[\"output_column\"] = output_column\n dat[\"output_dataset\"] = data[\"output_dataset\"]\n dat[\"task_id\"] = data[\"task_id\"]\n dat[\"cassandra_nodes\"] = data[\"cassandra_nodes\"]\n client = GearmanClient(JOBSERVER_LIST)\n try:\n interval = task_js[\"interval_in_seconds\"]\n #is aggregation\n #dat[\"input_columns\"] = input_columns+[output_column]\n dat[\"input_dimensions\"] = dims_with_cols\n \n client.submit_job(\"row_aggregator\", pickle.dumps(dat),background=True)\n except KeyError:\n #is calculation\n dat[\"output_dimension\"] = task_js[\"output_dimension\"]\n client.submit_job(\"row_calculator\", pickle.dumps(dat),background=True)\n \n #st = statsd.StatsClient(\"192.168.149.161\",8125)\n #st.incr(\"task_created\")\n\n #update timestamps\n lt += datetime.timedelta(seconds=interval)+datetime.timedelta(microseconds=1)\n temp_ht += datetime.timedelta(seconds=interval)+datetime.timedelta(microseconds=1)\n \n lowest_time = datetime.datetime.strftime(lt, date_format)\n highest_time = datetime.datetime.strftime(temp_ht, date_format)\n print lowest_time\n print highest_time\n \n #if lt > ht:\n # break\n \n return \"a\"", "title": "" }, { "docid": "72fe02423100850a617257dca48ea999", "score": "0.5562906", "text": "def test_queue_and_jobs_6(self):\n output = \"\"\"\n [0] queue (sftp://someone:@localhost) -- 252 B/s\n sftp://someone:@localhost/home/someone\n Now executing: [1] mirror -c /tmp/test_controllerbsn4wlu2/remote/ra /tmp/test_controllerbsn4wlu2/local/ -- 249/8.2k (3%) 100 B/s\n -[2] mirror -c /tmp/test_controllerbsn4wlu2/remote/rb /tmp/test_controllerbsn4wlu2/local/ -- 374/9.3k (4%) 153 B/s\n Commands queued:\n 1. pget -c \"/tmp/test_controllerbsn4wlu2/remote/rc\" -o \"/tmp/test_controllerbsn4wlu2/local/\" \n [1] mirror -c /tmp/test_controllerbsn4wlu2/remote/ra /tmp/test_controllerbsn4wlu2/local/ -- 249/8.2k (3%) 100 B/s\n \\\\transfer `raa' \n `raa' at 238 (23%) 100b/s eta:8s [Receiving data]\n \\mirror `rab' -- 0/7.2k (0%)\n \\\\transfer `rab/raba' \n `raba' at 0 (0%) [Connecting...]\n \\\\transfer `rab/rabb' \n `rabb' at 0 (0%) [Waiting for response...]\n [2] mirror -c /tmp/test_controllerbsn4wlu2/remote/rb /tmp/test_controllerbsn4wlu2/local/ -- 374/9.3k (4%) 153 B/s\n \\\\transfer `rba' \n `rba' at 159 (3%) 77b/s eta:51s [Receiving data]\n \\\\transfer `rbb' \n `rbb' at 153 (2%) 76b/s eta:66s [Receiving data]\n \"\"\"\n parser = LftpJobStatusParser()\n statuses = parser.parse(output)\n golden_queue = [\n LftpJobStatus(job_id=1,\n job_type=LftpJobStatus.Type.PGET,\n state=LftpJobStatus.State.QUEUED,\n name=\"rc\",\n flags=\"-c\"),\n ]\n golden_job1 = LftpJobStatus(job_id=1,\n job_type=LftpJobStatus.Type.MIRROR,\n state=LftpJobStatus.State.RUNNING,\n name=\"ra\",\n flags=\"-c\")\n golden_job1.total_transfer_state = LftpJobStatus.TransferState(249, 8396, 3, 100, None)\n golden_job1.add_active_file_transfer_state(\"raa\", LftpJobStatus.TransferState(None, None, None, 100, 8))\n golden_job1.add_active_file_transfer_state(\"rab/raba\", LftpJobStatus.TransferState(None, None, None, None, None))\n golden_job1.add_active_file_transfer_state(\"rab/rabb\", LftpJobStatus.TransferState(None, None, None, None, None))\n golden_job2 = LftpJobStatus(job_id=2,\n job_type=LftpJobStatus.Type.MIRROR,\n state=LftpJobStatus.State.RUNNING,\n name=\"rb\",\n flags=\"-c\")\n golden_job2.total_transfer_state = LftpJobStatus.TransferState(374, 9523, 4, 153, None)\n golden_job2.add_active_file_transfer_state(\"rba\", LftpJobStatus.TransferState(None, None, None, 77, 51))\n golden_job2.add_active_file_transfer_state(\"rbb\", LftpJobStatus.TransferState(None, None, None, 76, 66))\n golden_jobs = [golden_job1, golden_job2]\n self.assertEqual(len(golden_queue)+len(golden_jobs), len(statuses))\n statuses_queue = [j for j in statuses if j.state == LftpJobStatus.State.QUEUED]\n self.assertEqual(golden_queue, statuses_queue)\n statuses_jobs = [j for j in statuses if j.state == LftpJobStatus.State.RUNNING]\n self.assertEqual(golden_jobs, statuses_jobs)", "title": "" }, { "docid": "46d7b2de353ab41d78a49c531dc0261c", "score": "0.55613804", "text": "def queue_events(self, timeout):", "title": "" }, { "docid": "2e7a152416156e0115c320ac03961d63", "score": "0.5554936", "text": "def _init_queue(self):\n self.queue.put((do_page_parse, self.root_url))", "title": "" }, { "docid": "d81c7b030e4380ac69de85200441b370", "score": "0.5553766", "text": "def submit(self):\n \n if len(self.queue) == 0:\n raise Exception(\"There are no script to be submitted to the queue\\n did you run create_scripts?\")\n\n for queueFileName in self.queue:\n os.system(\"qsub \" + queueFileName)", "title": "" }, { "docid": "a1e0ae997b610f20c6917c185ee971c9", "score": "0.5550807", "text": "def main_task():\n\n interval = 5\n apis = Scheduler.get_apis()\n for api in apis:\n class_name = api.api_class.split('.')[2]\n sched.add_job(Scheduler.fetch_data, 'interval',\n name='{}'.format(api.name),\n seconds=api.refresh_time,\n start_date=datetime.now()+timedelta(\n minutes=interval),\n end_date=datetime.now()+timedelta(hours=23),\n args=[api.api_class, api.name],\n replace_existing=True, id='{}'.format(class_name),\n jobstore='sqlalchemy', misfire_grace_time=300)\n interval += interval", "title": "" } ]
7cf6d7a7b128f652585284b8eb15cfc0
Emits a 'cld' instruction.
[ { "docid": "e034cd2a187354cc37d2628c032dc82a", "score": "0.5156586", "text": "def cld(self) -> None:\n self.buf[self.pos] = 252\n self.pos += 1", "title": "" } ]
[ { "docid": "10f6e1d757e33098a497f06762b836a0", "score": "0.6387597", "text": "def cl(self, c):\n self.force_menu('oper')\n self.send('cl ' + str(c))\n return output(self.proc.before)", "title": "" }, { "docid": "93fe9d0119da5f74106fdbe79ae1cdc7", "score": "0.5471392", "text": "def do_ccmd(self, args):\n print 'args:', args.split()", "title": "" }, { "docid": "753df6ce3ce639b27fc94b20be4374d9", "score": "0.5453434", "text": "def gen_c_instruction(self, dest, comp, jump):\n return '111' + self.comp(comp) + self.dest(dest) + self.jump(jump)", "title": "" }, { "docid": "91b8cb9b58d6fbcde90e6b276a962fce", "score": "0.53294784", "text": "def cmd_makecldf(self, args):", "title": "" }, { "docid": "a004240355f4033a155a694cf0dca221", "score": "0.5321995", "text": "def d017100cc ( state ) :\n byte = state[\"byte\"]\n stream = state[\"stream\"]\n\n cc = cctable[byte&0xFF-0o100]\n\n state[\"size\"] = \"r32\"\n\n rmfield = parse_rm(state)\n regfield = parse_reg(state)\n\n return f'CMOV{cc} {regfield}, {rmfield}'", "title": "" }, { "docid": "a4f57c2d0dd37eb167ac00eb2e22cfff", "score": "0.53189015", "text": "def d160cc ( state ) :\n byte = state[\"byte\"]\n stream = state[\"stream\"]\n\n cc = cctable[byte-0o160]\n\n disp8 = int.from_bytes(stream.read(1), byteorder='little', signed=True)\n state[\"pos\"] += 1\n state[\"hex\"].append(f'{disp8&0xFF:08X}')\n\n return f'J{cc} {disp8} to {state[\"pos\"]+disp8:08X}'", "title": "" }, { "docid": "515af1abe08cbabc3057958996665783", "score": "0.51874214", "text": "def cldf(self) -> pycldf.Dataset:\n if self._cldf is None:\n raise AttributeError('Writer.cldf is only set when Writer is used in with statement!')\n return self._cldf", "title": "" }, { "docid": "b9c6cc1a6e17af463fc83c0f895e6af6", "score": "0.5133083", "text": "def write_cdl(cdl):\n\n values = list(cdl.slope)\n values.extend(cdl.offset)\n values.extend(cdl.power)\n values.append(cdl.sat)\n values = [str(i) for i in values]\n\n ss_cdl = ' '.join(values)\n\n with open(cdl.file_out, 'wb') as cdl_f:\n cdl_f.write(enc(ss_cdl))", "title": "" }, { "docid": "ae55651318111e9c526738cc94a4439b", "score": "0.5131566", "text": "def ccds():\n allowed_str_args = ['hgnc_symbol', 'ccdsid']\n\n allowed_int_args = ['limit', 'hgnc_identifier']\n\n args = get_args(\n request_args=request.args,\n allowed_int_args=allowed_int_args,\n allowed_str_args=allowed_str_args\n )\n\n return jsonify(query.ccds(**args))", "title": "" }, { "docid": "3a79cc8f7e312979d06c7a7d06d2f4e8", "score": "0.5094653", "text": "def cldf_dir(self) -> DataDir:\n return self.dir / 'cldf'", "title": "" }, { "docid": "5a261b26f5a0f85c1b280a15c8f373a7", "score": "0.50675637", "text": "def _parse_c_instruction(self):\n line = '111'\n line += self.code.comp(self.parser.comp)\n line += self.code.dest(self.parser.dest)\n line += self.code.jump(self.parser.jump)\n self._write_line(line)", "title": "" }, { "docid": "b485e57165e7f1e004356df1547a1064", "score": "0.5047674", "text": "def cmd_makecldf(self, args: argparse.Namespace):\n args.log.warning('cmd_{0} not implemented for dataset {1}'.format('makecldf', self.id))\n return NOOP", "title": "" }, { "docid": "5015fb32fe82f726f9ad048426a158e3", "score": "0.5033455", "text": "def request_cdr(self):\n self.model.request_computed_value_output(\"cdr\")", "title": "" }, { "docid": "249e103a985c24429e822934d79ff480", "score": "0.49677145", "text": "def set_dc(self):\n self.instr.write(self.channel + ':COUPLING DC')\n return None", "title": "" }, { "docid": "01c26aa345440ac251f5bdbab47fd57e", "score": "0.49471664", "text": "def ldc(concentration, discharge,\n conversion_factor,\n target_concentration=None,\n y_label='Load',\n yscale='log',\n ylim=None,\n ax=None,\n label_conditions=False):\n rank = rankdata(discharge)\n # calculate probability of each rank\n prob = (1-(rank/(len(rank)+1)))*100\n load = concentration * discharge * conversion_factor\n\n if ax is None:\n fig, ax = plt.subplots(1)\n\n # plot target curve\n if target_concentration is None:\n target_concentration = load_mean_concentration(concentration, discharge)\n\n hb_extent = None\n if ylim is not None:\n plot_ylim = ylim\n hb_extent=(1,100, ylim[0], ylim[1])\n\n if yscale is 'log':\n plot_ylim = (10**ylim[0], 10**ylim[1])\n\n\n\n # plot data via matplotlib\n hb = ax.hexbin(prob, load, gridsize=50,\n yscale=yscale,\n cmap='pink_r',\n mincnt=1,\n extent=hb_extent,\n linewidths=0.2)\n\n target_ldc(target_concentration, discharge, conversion_factor, ax=ax)\n\n cb = plt.colorbar(hb, ax=ax, orientation='horizontal')\n cb.set_label('Count')\n #ax.set_yscale('log')\n ax.set_ylabel(y_label)\n ax.set_xlabel('Flow Duration Interval (%)')\n ax.grid(alpha=0.2, which='both', linestyle='--', linewidth=0.5)\n #ax.grid(alpha=0.2, which='major', linestyle='--', linewidth=0.5)\n ax.set_xlim(0,100)\n\n if ylim is not None:\n ax.set_ylim(plot_ylim)\n\n if label_conditions==True:\n label_dc_flow_conditions(ax)", "title": "" }, { "docid": "46e7009cc6e33ee12cffd3b98259dc60", "score": "0.49342784", "text": "def do_c(self, line):\n return self.do_connect(line)", "title": "" }, { "docid": "51bf1c0e1eda52758309b4cc82022db0", "score": "0.48940757", "text": "def d2cd(d, e):\n cd = e.css('cd', 0)\n # Test for division by 0\n if cd + e.gd:\n return (d + e.gd) / (cd + e.gd)\n # Undefined, no info about column depth and gutter or zero division.\n return 0", "title": "" }, { "docid": "e928e0c6aba962fd3537c711206634b8", "score": "0.4871802", "text": "def write_dcd (self, filename='tfd', dirname='.') :\n Conformers.write_dcd(self,filename,dirname)", "title": "" }, { "docid": "4efe20ed0dc8e705c5cea87f96f14e98", "score": "0.48573828", "text": "def d017200cc ( state ) :\n byte = state[\"byte\"]\n stream = state[\"stream\"]\n\n cc = cctable[byte&0xFF-0o200]\n\n disp32 = int.from_bytes(stream.read(4), byteorder='little', signed=True)\n state[\"pos\"] += 4\n state[\"hex\"].append(f'{disp32&0xFFFFFFFF:08X}')\n\n return f'J{cc} {disp32} to {state[\"pos\"]+disp32:08X}'", "title": "" }, { "docid": "2c6138323f516b24cbff4600d2fb59f2", "score": "0.4829934", "text": "def toggle_dcn(self, dcn_id: str):\n if self.dcn:\n self.dcn = None\n self.setText(12, '')\n self.cooling.setDisabled(False)\n else:\n self.dcn = dcn_id\n self.setText(12, dcn_id)\n self.cooling.setDisabled(True)", "title": "" }, { "docid": "b54c4b7d2d179e178c94032bf72acea6", "score": "0.48284918", "text": "def cpd():\n return pkp.cpd.CPD(ultimate_analysis=ua, proximate_analysis=pa,\n pressure=101325, name='CPD coal')", "title": "" }, { "docid": "a1d9ba9d6d04360d7bfe3d0a90554e9b", "score": "0.4827443", "text": "def cdf(self, d):\n y = norm.ppf(d, 0, 1)\n\n return multivariate_normal.cdf(y, mean=None, cov=self.corrMatrix)", "title": "" }, { "docid": "0bcae870cb45e81d55601e0b9b170f6f", "score": "0.481945", "text": "def d2y_dtdC(self, y, dydC):\n\n C = self._cdata['C']\n nC = len(C)\n i1, i2, i3, i4 = self._cdata['i'].T\n\n yext = np.concatenate([y, [1]])\n dydCext = dydC.reshape(-1, self._N)\n if dydCext.shape[0] != nC:\n raise ValueError('Invalid size gradient passed to d2y_dtdC')\n dydCext = np.concatenate(\n [dydCext, np.zeros((nC, 1))], axis=1)\n d2ydtdC = dydCext*0.\n\n dcy1 = yext[i1]*yext[i2]\n\n np.add.at(d2ydtdC, (range(nC), i3), -dcy1)\n np.add.at(d2ydtdC, (range(nC), i4), dcy1)\n\n iC = np.repeat(np.arange(nC), nC)\n i1t = np.tile(i1, nC)\n i2t = np.tile(i2, nC)\n i3t = np.tile(i3, nC)\n i4t = np.tile(i4, nC)\n\n dcy2 = np.tile(C, nC)*(dydCext[iC, i1t]*yext[i2t] +\n dydCext[iC, i2t]*yext[i1t])\n\n np.add.at(d2ydtdC, (iC, i3t), -dcy2)\n np.add.at(d2ydtdC, (iC, i4t), dcy2)\n\n d2ydtdC = d2ydtdC[:, :-1].reshape(-1)\n\n return d2ydtdC", "title": "" }, { "docid": "658024c6332736f7e1456cc4e97587bc", "score": "0.48092347", "text": "def evalc(d, expression, arg = 0, include = '', address = 0x1fffda0, show_disassembly = False):\n compile(d, address, expression, include, show_disassembly)\n return d.blx(address + 1, arg)[0]", "title": "" }, { "docid": "d11e89e8931eb85199ed3c025b0aae6e", "score": "0.48031557", "text": "def D_C(self, z):\n def integrand(z, om, ol):\n return 1. / sqrt(om * (1. + z)**3\n + (1. - om - ol) * (1. + z)**2 + ol)\n om, ol, h = self.om, self.ol, self.h\n res = integrate.quad(lambda x: integrand(x, om, ol), 0., z)\n return self.D_H * res[0]", "title": "" }, { "docid": "36fba0d5ee7e3f52c3fa1c6088f1df36", "score": "0.4795712", "text": "def do_dxcl(self, params):\n try:\n args = self.dxcl_parser.parse_args(params.split())\n calls_args = argparse.Namespace()\n\n if args.class_name:\n if args.direction == 'to':\n calls_args.to_class = args.class_name\n elif args.direction == 'from':\n calls_args.from_class = args.class_name\n\n if args.method_name:\n if args.direction == 'to':\n calls_args.to_method = args.method_name\n elif args.direction == 'from':\n calls_args.from_method = args.method_name\n\n log.info(calls_args)\n # Get calls\n results = self.get_calls(calls_args)\n\n # Get cross-references\n xresults = self.analysis.xref_call(results, args.direction, args.xref_depth)\n\n # Create new graph\n calls_graph = CallGraph()\n\n for r in xresults:\n calls_graph.add_call(r)\n\n # Finalize graph\n calls_graph.finalize()\n\n # Write output\n if args.output:\n calls_graph.write(args.output_format, args.output, args.output_prog, args.output_args)\n log.info(\"Wrote results to %s\" % args.output)\n\n except SystemExit:\n pass", "title": "" }, { "docid": "0c8629f97093fa64b4369cfcd602e20a", "score": "0.47871363", "text": "def calcdCdT(thermconsts):\n if thermconsts['LKFluid'] == 'ref':\n c1 = thermconsts['LKc1ref']\n c2 = thermconsts['LKc2ref']\n c3 = thermconsts['LKc3ref'] \n elif thermconsts['LKFluid'] == 'simple':\n c1 = thermconsts['LKc1simple']\n c2 = thermconsts['LKc2simple']\n c3 = thermconsts['LKc3simple']\n else:\n print ('LK error in calcLKC')\n\n T = thermconsts['Temperature']\n Tc = thermconsts['TCrit']\n t2 = T*T;\n t4 = t2*t2;\n tc3 = Tc*Tc*Tc;\n \n return (c2*Tc/t2) - (c3*tc3/t4)", "title": "" }, { "docid": "68905102798ce5f854b823357fd40c25", "score": "0.47779915", "text": "def ccd(self):\n return CCDData.read(self.path)", "title": "" }, { "docid": "53413c91c782d92eba920d33df543215", "score": "0.4772914", "text": "def det_coor(Gt, costth, wavelength, distance, y_size, z_size, \n dety_center, detz_center, R_tilt, tx, ty, tz):\n\n # Unit directional vector for reflection\n v = n.array([costth, \n wavelength/(2*n.pi)*Gt[1],\n wavelength/(2*n.pi)*Gt[2]])\n t = (R_tilt[0, 0]*distance - \\\n n.sum(R_tilt[:, 0]*n.array([tx, ty, tz])))/n.sum(R_tilt[:, 0]*v)\n Ltv = n.array([tx-distance, ty, tz])+ t*v\n dety = n.sum(R_tilt[:, 1]*Ltv)/y_size + dety_center\n detz = n.sum(R_tilt[:, 2]*Ltv)/z_size + detz_center\n return [dety, detz]", "title": "" }, { "docid": "93f7c6fc3ba99233cf668906c5b1f49f", "score": "0.4756373", "text": "def get_CD(self, CL, delta_e):\n return self.get_quantity('CDtot', CL, delta_e)", "title": "" }, { "docid": "bede35b9ab467b2a19ac860a6f13592f", "score": "0.47293612", "text": "def cdf(self, dist, v):", "title": "" }, { "docid": "30572bc32c737cfc9bbfc86223fa124c", "score": "0.4718782", "text": "def do_dcl(self, params):\n try:\n args = self.dcl_parser.parse_args(params.split())\n results = self.get_calls(args)\n\n # Create new graph\n calls_graph = CallGraph()\n for r in results:\n calls_graph.add_call(r)\n\n # Finalize graph\n calls_graph.finalize()\n\n # Write output\n if args.output:\n if results:\n calls_graph.write(\n args.output_format, args.output,\n args.output_prog, args.output_args)\n log.info(\"Wrote results to %s\" % args.output)\n else:\n log.info(\"No results :(\")\n\n except SystemExit:\n pass", "title": "" }, { "docid": "736cc0f7b23a7ffbf2d83d62dfa4084a", "score": "0.4680829", "text": "def set_Ccv_from_voltage_distance(\n self,\n voltage_node_idx: int,\n dV: float,\n charge_node_idx: int,\n ) -> None:\n capa_val = -elem_charge * dV\n self.set_capacitance(\n \"cv\",\n [charge_node_idx, voltage_node_idx],\n capa_val,\n )", "title": "" }, { "docid": "b293554ea3cf66f9e58642ec2c6834ce", "score": "0.46645945", "text": "def MatlabDC(self,event):\n\t\tself.dyncore=1", "title": "" }, { "docid": "67a26634c9cc9b498111881bdd404990", "score": "0.4623921", "text": "def CD_0(self):\r\n return self.CD_min + self.K_apo2 * self.CL_min ** 2", "title": "" }, { "docid": "6e90fe87f5d1a2237e8519442c4c3613", "score": "0.46181998", "text": "def genDCD(residues,name,prot,path,run_type,n_chains):\n top = md.Topology()\n for _ in range(n_chains):\n chain = top.add_chain()\n for resname in prot.fasta:\n residue = top.add_residue(residues.loc[resname,'three'], chain)\n top.add_atom(residues.loc[resname,'three'], element=md.element.carbon, residue=residue)\n traj = md.load_dcd(path+\"/{:s}.dcd\".format(run_type), top)\n traj.center_coordinates()\n traj.xyz *= 10\n traj.unitcell_lengths *= 10\n traj.xyz += traj.unitcell_lengths[0,0]/2\n traj[:].save_dcd(path+\"/{:s}.dcd\".format(name))\n traj[0].save_pdb(path+\"/{:s}.pdb\".format(name))", "title": "" }, { "docid": "b532cd39894710ea81daaeb6e066aa87", "score": "0.45979238", "text": "def ccmd(*targets, **kwargs):\n import subprocess\n\n def run_ccmd_checks(\n target, ret, checks, interval, source, timestamp, group, command, description, alert, **kwargs\n ):\n command = command.format(target=target)\n check_result = {\n \"success\": 0,\n \"checks\": checks,\n \"comment\": \"\",\n \"target\": target,\n \"check_type\": \"CCMD\",\n \"rtt\": 0.0,\n \"source\": source,\n \"@timestamp\": timestamp,\n \"group\": group,\n \"description\": description,\n \"command\": command,\n }\n command = [i.strip() for i in command.split(\" \") if i.strip()]\n # do CCMD checks\n for i in range(checks):\n try:\n result = subprocess.check_output(command, stderr=subprocess.STDOUT)\n result = result.decode(encoding=\"utf-8\")\n if (\n \"Destination net unreachable\" in result\n or \"TTL expired in transit\" in result\n or \"Destination host unreachable\" in result\n ):\n continue\n else:\n check_result[\"success\"] += 1\n except:\n check_result[\n \"comment\"\n ] += \"CCMD Check {}, ERROR in subprocess, command '{}' failed or returned non zero status code\\n\".format(\n i, str(command)\n )\n time.sleep(interval)\n # calculate success and loss percentages\n check_result[\"success\"] = (check_result[\"success\"] / checks) * 100\n check_result[\"loss\"] = 100 - check_result[\"success\"]\n # sent alert if requested to do so\n if check_result[\"success\"] == 0 and alert is True:\n _send_alert(check_type=\"CCMD\", target=target, data=check_result)\n # save thread run results\n ret[\"out\"].append(check_result)\n\n # initialize variables\n check_kwargs = _get_kwargs(**kwargs)\n # create threads objects\n job_threads = []\n for target_dict in targets:\n if not target_dict.get(\"command\") or not target_dict.get(\"target\"):\n log.error(\n \"dnuts.ccmd: no 'command' or 'target' argument found for target - {}\".format(target)\n )\n continue\n job_threads.append(\n Thread(\n target=run_ccmd_checks,\n kwargs=dict(\n target=target_dict[\"target\"].strip(),\n command=target_dict[\"command\"].strip(),\n description=target_dict.get(\"description\", \"\").strip(),\n **check_kwargs\n ),\n )\n )\n # run threads\n log.info(\n \"Running {len} CCMD connection checks in {threads} threads, {checks} check(s) per target, with {interval}s interval and {timeout}s timeout\".format(\n len=len(job_threads), **check_kwargs\n )\n )\n _run_threads(\n job_threads,\n max=check_kwargs[\"threads\"],\n timeout=check_kwargs[\"timeout\"] * 2 * check_kwargs[\"checks\"],\n )\n log.info(\"CCMD Checks completed\")\n\n return check_kwargs[\"ret\"]", "title": "" }, { "docid": "9478451c3fa2b27a8ed7e0dbd44d7b08", "score": "0.45846218", "text": "def crc(self):\n return self.comp_command('DSCRC').lstrip('DSCRC ').rstrip()", "title": "" }, { "docid": "ab2ea1f4c5e0d2418f73ac9044a6ec74", "score": "0.45644945", "text": "def _genCCCDiff(self,\n regName,\n name,\n title,\n caption,\n outputDir):\n\n import LATCRootData\n regInfo = LATCRootData.PRECINCT_INFO[self._precinctName][regName]\n \n regData = self._cfgPrecinctData.getRegisterData(regName)\n baselineData = self._baselinePrecinctData.getRegisterData(regName)\n diffData = [new - old for (new,old) in zip(regData,baselineData)]\n # extract GCCC index from global GCFE indeces\n import calConstant\n cccData = [calConstant.getCFEId(idx)[1] for idx in range(calConstant.GLOBAL_N_GCFE)]\n \n import ROOTPlotUtils\n return ROOTPlotUtils.make2DHist(cccData,\n diffData,\n name,\n title,\n caption,\n \"GCCC\",\n \"%s diff (cfg - baseline)\"%regName,\n outputDir,\n calConstant.NUM_GCCC,\n 2*(regInfo.maxVal+1),\n calConstant.NUM_GCCC,\n regInfo.maxVal+1,\n 0,\n -1*(regInfo.maxVal+1))", "title": "" }, { "docid": "f5f00f4dca7539272090535912b5a235", "score": "0.45629635", "text": "def pgcdl(l):\n p=l[0]\n for i in range(1,len(l)):\n p=pgcd(p,l[i])\n return p", "title": "" }, { "docid": "ffd453111561dafc3987bc185ecb1004", "score": "0.45616314", "text": "def write_cc(cdl):\n with open(cdl.file_out, 'wb') as cdl_f:\n cdl_f.write(cdl.xml_root)", "title": "" }, { "docid": "5a1be46bc78fd58aa761924da4c55802", "score": "0.455433", "text": "def _ccmd(self, args, rv=0):\n assert type(args) == str\n # Ensure 'coverage' is turned off-- it won't work.\n self.cmdline_run(\"{0}\".format(args), exit=rv, coverage=False)", "title": "" }, { "docid": "f55ee40d4f7dd059decd1e6cf99229f2", "score": "0.4548994", "text": "def Dc(self, z):\n if z==0:\n return 0\n else:\n f = lambda z: 1.0/self.__E(z)\n I = integrate.quad(f, 0, z)\n return self.Dh * I[0]", "title": "" }, { "docid": "5ec8891e11763bbf07bc9da6ed97a767", "score": "0.4543415", "text": "def get_cl(field, bp, hd, mask, map, coupled_cell, tmp_path=None):\n\n # Initialize Cls\n n_bins = map.shape[1]\n n_ells = len(bp)\n cl = np.zeros((2, 2, n_ells, n_bins, n_bins))\n\n # Dimensions\n Nx = hd['NAXIS1']\n Ny = hd['NAXIS2']\n Lx = Nx*abs(hd['CDELT1'])*np.pi/180 # Mask dimension in radians\n Ly = Ny*abs(hd['CDELT2'])*np.pi/180 # Mask dimension in radians\n\n # Fields definition\n fd = np.array([nmt.NmtFieldFlat(\n Lx, Ly, mask[x], [map[0, x], -map[1, x]]) for x in range(n_bins)])\n # Bins for flat sky fields\n b = nmt.NmtBinFlat(bp[:, 0], bp[:, 1])\n # Effective ells\n ell = b.get_effective_ells()\n\n # Iterate over redshift bins to compute Cl's\n mcm_paths = []\n for nb1 in range(n_bins):\n for nb2 in range(nb1, n_bins):\n # Temporary path for mode coupling matrix\n if tmp_path is None:\n mcm_p = os.path.expanduser('~')\n else:\n mcm_p = tmp_path\n mcm_p = mcm_p+'/mcm_{}_Z{}{}.dat'.format(field, nb1+1, nb2+1)\n mcm_paths.append(mcm_p)\n # Define workspace for mode coupling matrix\n wf = nmt.NmtWorkspaceFlat()\n try:\n wf.read_from(mcm_p)\n except RuntimeError:\n wf.compute_coupling_matrix(fd[nb1], fd[nb2], b)\n wf.write_to(mcm_p)\n print('Calculated mode coupling matrix for bins {}{}'\n ''.format(nb1+1, nb2+1))\n sys.stdout.flush()\n # Calculate Cl's\n cl_c = nmt.compute_coupled_cell_flat(fd[nb1], fd[nb2], b)\n if coupled_cell is True:\n cl_d = cl_c\n else:\n cl_d = wf.decouple_cell(cl_c)\n cl_d = np.reshape(cl_d, (2, 2, n_ells))\n cl[:, :, :, nb1, nb2] = cl_d\n cl[:, :, :, nb2, nb1] = cl_d\n\n return ell, cl, mcm_paths", "title": "" }, { "docid": "f03076ba667b5b3f85826b1dcf5ce982", "score": "0.45395768", "text": "def _ldc(self):\n value = self.pop(CONTROL)\n self._push_val(value) # adds value to memory and pushes it's address to STACK", "title": "" }, { "docid": "6e71f803d765eb328a43f8019e074f76", "score": "0.45364988", "text": "def draw_clonal_marker(self):\n\n # add background noise\n self.fill(1, self.bg_level, self.bg_noise)\n\n # draw nuclei\n means = self.data['clonal_marker'].values\n stds = self.data['clonal_marker_std'].values\n self.draw_nuclei(channel=1, means=means, stds=stds)", "title": "" }, { "docid": "2d5a35afa18a02a7c7a85b213b55688a", "score": "0.45291862", "text": "def _test_model_cld():\r\n print(\"Loading\")\r\n import coffeehouse_languagedetection\r\n print(\"Ready\\n\")\r\n\r\n while True:\r\n input_text = input(\"> \")\r\n results = coffeehouse_languagedetection.predict(input_text, dltc=False, cld=True, ld=False)\r\n print(results['cld'])\r\n print(results['cld'][0])", "title": "" }, { "docid": "c57a1b04c7b961a41ad089ccb912ee14", "score": "0.45217818", "text": "def execute_gen_dc(self):\n # write the shading header\n self.write_static_shading()\n\n command1 = 'gen_dc \"{hea_path}\" -dir'.format(hea_path=self.hea_path)\n command2 = 'gen_dc \"{hea_path}\" -dif'.format(hea_path=self.hea_path)\n command3 = 'gen_dc \"{hea_path}\" -paste'.format(hea_path=self.hea_path)\n\n CEADaySim.run_cmd(command1)\n CEADaySim.run_cmd(command2)\n CEADaySim.run_cmd(command3)", "title": "" }, { "docid": "055ed6c173c938da2fb017d7e9cee309", "score": "0.4516811", "text": "def calcdIdV(self):\n I = self.getI()\n self.dIdV = np.gradient(I,self.V_range)", "title": "" }, { "docid": "42362d5e5b19638a5bc85fd009ddc25e", "score": "0.44972062", "text": "def clifford_conductor(self):\n D = self.disc()\n return prod([x[0] for x in factor(2 * self.level()) if self.clifford_invariant(x[0]) == -1])", "title": "" }, { "docid": "b1ee72efbba9eae06c1783ac6f2ebfe8", "score": "0.4482374", "text": "def fetch_cdum() -> None:", "title": "" }, { "docid": "eaa3872a87b6051eddb7b65625102710", "score": "0.44770986", "text": "def to_cshl_short(self) -> str:\n if self.variant_type & core.Allele.Type.substitution:\n return f\"sub({self.ref}->{self.alt})\"\n if self.variant_type & core.Allele.Type.small_insertion:\n return f\"ins({self.alt})\"\n if self.variant_type & core.Allele.Type.small_deletion:\n return f\"del({self.length})\"\n if self.variant_type & core.Allele.Type.complex:\n return f\"comp({self.ref}->{self.alt})\"\n if self.variant_type & core.Allele.Type.large_duplication:\n return \"CNV+\"\n if self.variant_type & core.Allele.Type.large_deletion:\n return \"CNV-\"\n raise ValueError(f\"unexpected variant type: {self.variant_type}\")", "title": "" }, { "docid": "f32924eb57923c0873366b053a01239c", "score": "0.44690257", "text": "def cce_diag(config_map=None):\n # for pylint, otherwise \"Dangerous default value {} as argument\"\n if config_map is None:\n config_map = {}\n\n def _build(sch, tensor_list, kernel_name='cce_diag_op'):\n target = 'llvm'\n mod = tvm.build(sch, tensor_list, target, name=kernel_name)\n return mod\n\n def _lower(sch, tensor_list):\n print(tvm.lower(sch, tensor_list, simple_mode=True))\n\n def _check(mod, input_tensors, output_tensors, ref_input_func, ref_output_func, rtol):\n ctx = tvm.context('llvm', 0)\n if not ctx.exist:\n raise RuntimeError('Only support diagnose on CPU now')\n np_inputs = []\n tvm_inputs = []\n tvm_outputs = []\n if ref_input_func is None:\n for tensor in input_tensors:\n np_inputs.append(\n np.random.uniform(size=get_const_tuple(tensor.shape)).astype(tensor.dtype))\n else:\n np_inputs = ref_input_func(*input_tensors)\n for np_input in np_inputs:\n tvm_inputs.append(tvm.nd.array(np_input, ctx))\n\n for tensor in output_tensors:\n tvm_outputs.append(\n tvm.nd.array(np.zeros(get_const_tuple(tensor.shape), dtype=tensor.dtype), ctx))\n\n mod(*(tvm_inputs + tvm_outputs))\n if ref_output_func is None:\n print([output.asnumpy() for output in tvm_outputs])\n else:\n np_output = ref_output_func(*np_inputs)\n if rtol is None:\n np.testing.assert_allclose(tvm_outputs[0].asnumpy(), np_output, rtol=1e-3)\n else:\n np.testing.assert_allclose(tvm_outputs[0].asnumpy(), np_output, rtol=rtol)\n print('CHECK PASS')\n\n input_tensors = config_map[\"input_tensors\"]\n output_tensors = config_map[\"output_tensors\"]\n ref_input_func = config_map[\"ref_input_func\"]\n ref_output_func = config_map[\"ref_output_func\"]\n rtol = config_map[\"rtol\"]\n\n sch = tvm.create_schedule([x.op for x in output_tensors])\n if config_map[\"print_ir\"]:\n _lower(sch, input_tensors + output_tensors)\n\n mod = _build(sch, input_tensors + output_tensors, config_map[\"name\"])\n\n _check(mod, input_tensors, output_tensors, ref_input_func, ref_output_func, rtol)", "title": "" }, { "docid": "3aa04ec97657f1aced10bc834b7accea", "score": "0.44670403", "text": "def calcdDdT(thermconsts):\n if thermconsts['LKFluid'] == 'ref':\n d2 = thermconsts['LKd2ref']\n elif thermconsts['LKFluid'] == 'simple':\n d2 = thermconsts['LKd2simple']\n else:\n print ('LK error in calcLKD')\n\n T = thermconsts['Temperature']\n Tc = thermconsts['TCrit']\n t2 = T*T\n return -(d2*Tc/t2)", "title": "" }, { "docid": "8bd4484e6a5ab3f898361db36c5b89e3", "score": "0.44381437", "text": "def get_gate_invocation_c(self):\r\n # No function block is generated if one of the inputs is a wire with constant value.\r\n # I.e. either the constant or the second input wire is propagated to the output for the corresponding logic gate's logic function.\r\n if self.disable_generation:\r\n return \"\"\r\n else:\r\n return f\" {self.out.name} = {self.gate_type}({self.a.get_wire_value_c_hier()}, {self.b.get_wire_value_c_hier()});\\n\"", "title": "" }, { "docid": "dd1cfb74c94fc94a13784b612b4589fd", "score": "0.44379905", "text": "def lll_cost(d):\n return d ** 3", "title": "" }, { "docid": "9e94357064c0f2afb388440179f93aee", "score": "0.44379246", "text": "def cdy(f):\n n = f.shape[1]\n south = [0] + list(range(n-1))\n north = list(range(1,n)) + [n-1]\n return 0.5*(f[:,north] - f[:,south])", "title": "" }, { "docid": "5da04385d387b99ae27b9060104b393d", "score": "0.4416052", "text": "def make_dbc(build: BuildData, labs_dir: str, dbc_path: str) -> NoReturn:\n try:\n gendbc(\n source_dir=labs_dir,\n encoding=\"utf-8\",\n dbc_path=dbc_path,\n dbc_folder=build.top_dbc_folder_name,\n flatten=False,\n verbose=verbosity_is_enabled(),\n debugging=False,\n )\n finally:\n pass", "title": "" }, { "docid": "32c2ad273204b9006c5e7d36e8597b56", "score": "0.44115666", "text": "def toCylindrical(self,dir=[0,1,2]):\n f = zeros_like(self)\n x,y,z = [ self[...,i] for i in dir ]\n f[...,0] = sqrt(x*x+y*y)\n f[...,1] = arctan2(y,x) / rad\n f[...,2] = z\n return f", "title": "" }, { "docid": "3b6503cb6681437f9d4c9f71e1596946", "score": "0.44020143", "text": "def C(source):\n return _embed_or_inline_c(source, True)", "title": "" }, { "docid": "ce624478a3a2f1f5ba48ddb7dbbb3cfe", "score": "0.43961677", "text": "def hvs_crd(c1, c2, rev=False):\n if rev:\n endcord = (c2['elat'], c2['elon'])\n else:\n endcord = (c2['slat'], c2['slon'])\n return haversine((c1['elat'], c1['elon']), endcord)", "title": "" }, { "docid": "b192a8f581539dfd9f40ebb5f7382744", "score": "0.43924603", "text": "def cdx(f):\n m = f.shape[0]\n west = [0] + list(range(m-1))\n east = list(range(1,m)) + [m-1]\n return 0.5*(f[east,:] - f[west,:])", "title": "" }, { "docid": "ee6e872be2491df2b4169a8d904a057f", "score": "0.4390537", "text": "def LC2CL(tensor):\n # type: (Tensor) -> Tensor\n return tensor.transpose(0, 1).contiguous()", "title": "" }, { "docid": "e3b288a3db20dce938a1c63f026cf47d", "score": "0.4383879", "text": "def genomic_to_cdna_coord(transcript, genomic_coord):\n exons = [exon.get_as_interval()\n for exon in get_exons(transcript)]\n\n if len(exons) == 0:\n return None\n\n strand = transcript.strand\n\n if strand == \"+\":\n exons.sort(key=lambda exon: exon.chrom_start)\n else:\n exons.sort(key=lambda exon: -exon.chrom_end)\n\n distances = [exon.distance(genomic_coord)\n for exon in exons]\n min_distance_to_exon = min(map(abs, distances))\n\n coding_offset = 0\n for exon in exons:\n exon_length = exon.chrom_end - exon.chrom_start\n distance = exon.distance(genomic_coord)\n if abs(distance) == min_distance_to_exon:\n if strand == \"+\":\n exon_start_cds_offset = coding_offset + 1\n exon_end_cds_offset = coding_offset + exon_length\n else:\n exon_start_cds_offset = coding_offset + exon_length\n exon_end_cds_offset = coding_offset + 1\n # This is the exon we want to annotate against.\n if distance == 0:\n # Inside the exon.\n if strand == \"+\":\n coord = (exon_start_cds_offset +\n (genomic_coord -\n (exon.chrom_start + 1)))\n else:\n coord = (exon_end_cds_offset +\n (exon.chrom_end -\n genomic_coord))\n cdna_coord = CDNACoord(coord, 0)\n else:\n # Outside the exon.\n if distance > 0:\n nearest_exonic = exon_start_cds_offset\n else:\n nearest_exonic = exon_end_cds_offset\n if strand == \"+\":\n distance *= -1\n\n # If outside transcript, don't use offset.\n if (genomic_coord < transcript.tx_position.chrom_start + 1 or\n genomic_coord > transcript.tx_position.chrom_stop):\n nearest_exonic += distance\n distance = 0\n cdna_coord = CDNACoord(nearest_exonic, distance)\n break\n coding_offset += exon_length\n\n # Adjust coordinates for coding transcript.\n if transcript.is_coding:\n # Detect if position before start codon.\n utr5p = get_utr5p_size(transcript) if transcript.is_coding else 0\n cdna_coord.coord -= utr5p\n if cdna_coord.coord <= 0:\n cdna_coord.coord -= 1\n else:\n # Detect if position is after stop_codon.\n exons = get_exons(transcript)\n stop_codon = find_stop_codon(exons, transcript.cds_position)\n stop_codon -= utr5p\n if (cdna_coord.coord > stop_codon or\n cdna_coord.coord == stop_codon and cdna_coord.offset > 0):\n cdna_coord.coord -= stop_codon\n cdna_coord.landmark = CDNA_STOP_CODON\n\n return cdna_coord", "title": "" }, { "docid": "ca8eef915055a80110e94a478c94d646", "score": "0.43692192", "text": "def write_cl(filename, cl, dtype=np.float64):\n # check the dtype and convert it\n fitsformat = getformat(dtype)\n column_names = ['TEMPERATURE','GRADIENT','CURL','G-T','C-T','C-G']\n if isinstance(cl, list):\n cols = [pf.Column(name=column_name,\n format='%s'%fitsformat,\n array=column_cl) for column_name, column_cl in zip(column_names[:len(cl)], cl)]\n else: # we write only one TT\n cols = [pf.Column(name='TEMPERATURE',\n format='%s'%fitsformat,\n array=cl)]\n\n tbhdu = pf.BinTableHDU.from_columns(cols)\n # add needed keywords\n tbhdu.header['CREATOR'] = 'healpy'\n writeto(tbhdu, filename)", "title": "" }, { "docid": "43329a638c037c76595e84ebf59a64ed", "score": "0.43676528", "text": "def __ccmd(self, args, rv=0):\n assert type(args) == str\n # Ensure 'coverage' is turned off-- it won't work.\n self.cmdline_run(\"{0}\".format(args), exit=rv, coverage=False)", "title": "" }, { "docid": "2037383eadb3c2f16ee9bdab685fca19", "score": "0.4366915", "text": "def cllr(Hp_LRs, Hd_LRs):\n Np = len(Hp_LRs)\n Nd = len(Hd_LRs)\n return 1.0 / 2.0 * (\n (1.0 / Np * sum([np.log2(1 + 1.0 / LR) for LR in Hp_LRs])) + 1.0 / Nd * sum([np.log2(1 + LR) for LR in Hd_LRs]))", "title": "" }, { "docid": "7e3fdc4477bebddc08bd7f6b17a25465", "score": "0.43542147", "text": "def clc(self) -> None:\n self.buf[self.pos] = 248\n self.pos += 1", "title": "" }, { "docid": "3b97b58db7291c8d471c63b486ba8cc0", "score": "0.4349628", "text": "def test_cdm(baseline_file='cl_baseline_cdm.pkl',atol=1e-20,rtol=1e-3):\n print('\\nTesting Cls for cdm...\\n')\n check_cl_match(baseline_file,'cdm',atol=atol,rtol=rtol)", "title": "" }, { "docid": "1f76fcc9b2f0b4e8ba968c3814ea5de4", "score": "0.4337672", "text": "def cci(self, n, array=False):\n result = talib.CCI(self.high, self.low, self.close, n)\n if array:\n return result\n return result[-1]", "title": "" }, { "docid": "6a7181e257ef6314c2bc974c0dac6c3c", "score": "0.43283027", "text": "def genDCD(residues,name,prot):\n top = md.Topology()\n chain = top.add_chain()\n for resname in prot.fasta:\n residue = top.add_residue(residues.loc[resname,'three'], chain)\n top.add_atom(residues.loc[resname,'three'], element=md.element.carbon, residue=residue)\n for i in range(len(prot.fasta)-1):\n top.add_bond(top.atom(i),top.atom(i+1))\n traj = md.load(prot.path+\"/{:s}_0.gsd\".format(name), top)[1000:]\n traj.top = top\n for replica in range(1,replicas(prot)):\n t = md.load(prot.path+\"/{:s}_{:d}.gsd\".format(name,replica), top)[1000:]\n t.top = top\n traj = md.join([traj,t])\n del t\n traj = traj.image_molecules(inplace=False, anchor_molecules=[set(traj.top.chain(0).atoms)], make_whole=True)\n traj.center_coordinates()\n traj.xyz += traj.unitcell_lengths[0,0]/2\n traj.save_dcd(prot.path+\"/{:s}.dcd\".format(name))\n traj[0].save_pdb(prot.path+\"/{:s}.pdb\".format(name))\n for replica in range(replicas(prot)):\n os.remove(prot.path+\"/{:s}_{:d}.gsd\".format(name,replica))", "title": "" }, { "docid": "59f759cb79ef49dde6095ceb73a4ce42", "score": "0.43271166", "text": "def do_cd(self, line):\n args = self.line_to_args(line)\n if len(args) == 0:\n dirname = '~'\n else:\n if args[0] == '-':\n dirname = self.prev_dir\n else:\n dirname = args[0]\n dirname = resolve_path(self.cur_dir, dirname)\n\n mode = auto(self.devs, get_mode, dirname)\n if mode_isdir(mode):\n self.prev_dir = self.cur_dir\n self.cur_dir = dirname\n auto(self.devs, chdir, dirname)\n else:\n eprint(\"Directory '%s' does not exist\" % dirname)", "title": "" }, { "docid": "f5ac87133ba0723857a4b114a5633826", "score": "0.4322817", "text": "def dot_c(self):\n return ' '*self.indent_level+self._sep.join(self._defs)", "title": "" }, { "docid": "35a0bec5c5fa65e1c7bb723643d74062", "score": "0.4320173", "text": "def cdhit(\n input_fasta: str,\n percent_identity: float,\n length_difference_cutoff: float = None,\n min_alignment_coverage: float = None,\n percent_identity_suffix: bool = False,\n output_dir: str = None\n) -> None:\n\n if not os.path.exists(input_fasta):\n raise FileNotFoundError(f'Fasta file {input_fasta} was not found.')\n\n if output_dir is not None:\n if not os.path.isdir(output_dir):\n raise NotADirectoryError(f'{output_dir} is not a directory.')\n output_prefix = os.path.join(output_dir, os.path.splitext(os.path.basename(input_fasta))[0])\n else:\n output_prefix = os.path.splitext(input_fasta)[0]\n\n if percent_identity_suffix:\n output_prefix = output_prefix + str(percent_identity)\n # from pdb import set_trace; set_trace()\n params = [\n 'cd-hit',\n '-i', input_fasta,\n '-o', output_prefix,\n '-c', str(percent_identity),\n '-n', str(_cdhit_word_size(percent_identity)),\n '-d', '0',\n '-sc', '1',\n ]\n\n if length_difference_cutoff is not None:\n params.extend(['-s', str(length_difference_cutoff)])\n\n if min_alignment_coverage is not None:\n params.extend([\n '-aL', str(min_alignment_coverage),\n '-aS', str(min_alignment_coverage),\n ])\n\n subprocess.check_call(params)", "title": "" }, { "docid": "c2c6958c103993dd1bc1eb0e137796e9", "score": "0.43184483", "text": "def DC(self,V=0, ch=1):\n\t\t\n\t\tself.session.write(\":SOUR%d:APPL:DC DEF,DEF, %.4f\" % (ch,V))", "title": "" }, { "docid": "c249095397e69fd63a33dac96f568537", "score": "0.4313726", "text": "def calcd(self, t):\n\n if t < self.x[0]:\n return None\n elif t > self.x[-1]:\n return None\n\n i = self.__search_index(t)\n dx = t - self.x[i]\n result = self.b[i] + 2.0 * self.c[i] * dx + 3.0 * self.d[i] * dx ** 2.0\n return result", "title": "" }, { "docid": "9e65b76d841a1487f31b9746fbdea7b4", "score": "0.43133104", "text": "def dwwc(\n graph,\n metapath,\n damping=0.5,\n dense_threshold=0,\n dtype=numpy.float64,\n dwwc_method=None,\n):\n return dwwc_method(\n graph=graph,\n metapath=metapath,\n damping=damping,\n dense_threshold=dense_threshold,\n dtype=dtype,\n )", "title": "" }, { "docid": "a597b14013e443be5f5c3a8501155223", "score": "0.43100724", "text": "def cxtd_comp_from_undirected(self):\n self.cxtd_comp = list(nx.connected_component_subgraphs(self.disgraph))\n print(\"Num of cxtd comps in disgarph: %d\"%(len(self.cxtd_comp)))", "title": "" }, { "docid": "9d4c47402d034aba208f8f79d1e80079", "score": "0.43012372", "text": "def compute_C_matrix(self):\n self.C = np.repeat(np.diag(self.Lp)[:, np.newaxis],\n self.Lp.shape[0], axis=1)\n self.C += np.repeat(np.diag(self.Lp)[np.newaxis, :],\n self.Lp.shape[0], axis=0)\n self.C -= 2*self.Lp\n # the following is much slower\n # self.C = np.zeros(self.Lp.shape)\n # for i in range(self.Lp.shape[0]):\n # for j in range(self.Lp.shape[1]):\n # self.C[i, j] = self.Lp[i, i] + self.Lp[j, j] - 2*self.Lp[i, j]\n volG = np.sum(self.z)\n self.C *= volG\n sett.mt(0, 'computed commute distance matrix')\n self.Dchosen = self.C", "title": "" }, { "docid": "c53598f524c632a699ad3aefd3bf12b1", "score": "0.42999962", "text": "def convert_coulomb(self, data):\r\n\r\n return \"{} C\".format(data)", "title": "" }, { "docid": "2a8c151c0c157aa1896976808d02d0d0", "score": "0.42947423", "text": "def cy(self, ctl, tgt):\n return self.append(CyGate(), [ctl, tgt], [])", "title": "" }, { "docid": "6261b6e63f0b61d17cb423fef8c4f465", "score": "0.42942545", "text": "def cdist(self, x, y):\n\n differences = x.unsqueeze(1) - y.unsqueeze(0)\n distances = torch.sum(differences**2, -1).sqrt()\n return distances", "title": "" }, { "docid": "7803f994ac5b2fa4f389ef4c430673c0", "score": "0.42906135", "text": "def gen_cord(dot_number):\n global lt\n if len(lt[dot_number]) > 0: # if we already have coord-s for current dot in cache\n yield from lt[dot_number]\n return # we don't need to recalculate them again -> return\n i = (matrix[0][dot_number])\n s = set()\n for j in range(i + 1):\n s.update(((j, i - j), (j, -(i - j)), (-j, -(i - j)), (-j, i - j)))\n if len(s) > i * 2: # to speed up the calculation we compute partially\n yield from s # The same as \"for p in s: yield p\"\n lt[dot_number] = lt[dot_number].union(s)\n s.clear()\n if len(s) > 0: # remains\n yield from s\n lt[dot_number] = lt[dot_number].union(s)", "title": "" }, { "docid": "a281fa05065db4cb810bf44ba46ebe5e", "score": "0.42890027", "text": "def closeness_centrality(self, distance=None, write=False,\n write_property=None):\n if distance is not None:\n warnings.warn(\n \"Weighted closeness centrality for Neo4j graphs \"\n \"is not implemented: computing the unweighted version\",\n MetricProcessor.MetricProcessingWarning)\n result = self._run_gdc_query(\n \"gds.alpha.closeness\", \"closeness\", weight=None,\n write=write, write_property=write_property,\n score_name=\"centrality\")\n return result", "title": "" }, { "docid": "8ee95a5141df0890d43382cb3f4d5f7a", "score": "0.42743996", "text": "def _get_c_l(self) -> float:\n return self._c_l", "title": "" }, { "docid": "59174e599277637b60a3ed5f32c9d5b7", "score": "0.42741856", "text": "def convert_candela(self, data):\r\n\r\n return \"{} cd\".format(data)", "title": "" }, { "docid": "f045ac790efc806ebead1f0008344665", "score": "0.42726877", "text": "def C(self, q , dq ): \n \n [c1,s1,c2,s2,c12,s12] = self.trig( q )\n \n h = self.m2 * self.l1 * self.lc2 * s2\n \n C = np.zeros((2,2))\n \n C[0,0] = - h * dq[1]\n C[1,0] = h * dq[0]\n C[0,1] = - h * ( dq[0] + dq[1] )\n C[1,1] = 0\n\n \n return C", "title": "" }, { "docid": "d481ee0f690587c040b20bddd6a643cf", "score": "0.42699832", "text": "def run_command(command):\n p = subprocess.Popen(['cl'], shell=True, stdout=subprocess.PIPE,\n stderr=subprocess.STDOUT)\n text = p.stdout.read()\n return 0, text", "title": "" }, { "docid": "8665c12e99bbc4bba290ba67e6d85fee", "score": "0.42662162", "text": "def compute_ctc_loss(logits, labels, logit_length, label_length):\r\n return tf.nn.ctc_loss(\r\n labels=labels,\r\n logits=logits,\r\n label_length=label_length,\r\n logit_length=logit_length,\r\n logits_time_major=False,\r\n unique=None,\r\n blank_index=-1,\r\n name=None\r\n )", "title": "" }, { "docid": "9aa4d909b59fb3940f2682374f734545", "score": "0.4264086", "text": "def to_cshl_full(self) -> str:\n if self.variant_type & core.Allele.Type.tandem_repeat:\n return f\"TR({self.tr_ref}x{self.tr_unit}->{self.tr_alt})\"\n if self.variant_type & core.Allele.Type.substitution:\n return f\"sub({self.ref}->{self.alt})\"\n if self.variant_type & core.Allele.Type.small_insertion:\n return f\"ins({self.alt})\"\n if self.variant_type & core.Allele.Type.small_deletion:\n return f\"del({self.length})\"\n if self.variant_type & core.Allele.Type.complex:\n return f\"comp({self.ref}->{self.alt})\"\n if self.variant_type & core.Allele.Type.large_duplication:\n return \"CNV+\"\n if self.variant_type & core.Allele.Type.large_deletion:\n return \"CNV-\"\n raise ValueError(f\"unexpected variant type: {self.variant_type}\")", "title": "" }, { "docid": "5bf4a544d11dd84b0c2de9b26ad5c04a", "score": "0.42640367", "text": "def Calcd(self,t):\n \n j=int(math.floor(t))\n if(j<0):\n j=0\n elif(j>=len(self.a)):\n j=len(self.a)-1\n\n dt=t-j\n result=self.b[j]+2.0*self.c[j]*dt+3.0*self.d[j]*dt*dt\n return result", "title": "" }, { "docid": "e04d1e18bddbd4df1af371a80c05a324", "score": "0.42573228", "text": "def gcdex(self, a, b):\n return python_gcdex(a, b)", "title": "" }, { "docid": "c50fa3111f6bc964518c482970d84a7f", "score": "0.4257226", "text": "def _get_C_cc_diagonals(self) -> npt.NDArray[np.float64]:\n C_cv_sums = np.sum(np.absolute(np.array(self._C_cv)), axis=1)\n # from other dots:\n off_diag = self._C_cc - np.diag(np.diag(self._C_cc))\n off_diag_sums = np.sum(np.absolute(off_diag), axis=1)\n\n diag = C_cv_sums + off_diag_sums\n diag += np.absolute(self._c_r) + np.absolute(self._c_r)\n\n return np.diag(diag)", "title": "" }, { "docid": "da64da90405b8d849840b52f6f8c5767", "score": "0.42549163", "text": "def get_gate_invocation_c(self):\r\n # No function block is generated if one of the inputs is a wire with constant value.\r\n # I.e. either the constant or the second input wire is propagated to the output for the corresponding logic gate's logic function.\r\n if self.disable_generation:\r\n return \"\"\r\n else:\r\n return f\" {self.out.name} = {self.gate_type}({self.a.get_wire_value_c_hier()});\\n\"", "title": "" }, { "docid": "8a8b86338f1062b57dbed0901cc42815", "score": "0.42515898", "text": "def getNodeCD(self):\n return self.__nodeCD", "title": "" }, { "docid": "0131b73008109e2eef8aa3843d067f7e", "score": "0.4247958", "text": "def dV_C(self, z):\n om, ol, h = self.om, self.ol, self.h\n ok = 1. - om - ol\n _D_H = self.D_H\n _D_A = self.D_A(z)\n return _D_H * (1. + z)**2 * _D_A**2 / E(z, om, ol)", "title": "" }, { "docid": "5eb5af85f794eeb1dbc71afbf197cbab", "score": "0.42442828", "text": "def cosh(x1):\n\n if (use_origin_backend(x1)):\n return numpy.cosh(x1)\n\n if not isinstance(x1, dparray):\n raise TypeError(f\"DPNP cosh(): Unsupported x1={type(x1)}\")\n\n return dpnp_cosh(x1)", "title": "" }, { "docid": "6d710b10f3c798bbc43a98c10bc118bb", "score": "0.42385876", "text": "def CylindricalGrid2D(dr=None, dz=None, \n nr=None, nz=None, \n Lr=None, Lz=None,\n dx=1., dy=1., \n nx=None, ny=None,\n Lx=None, Ly=None,\n origin=((0,),(0,)),\n overlap=2,\n communicator=parallelComm):\n\n\n\n\n if dr is not None:\n dx = dr\n\n if dz is not None:\n dy = dz\n\n\n nx = nr or nx\n ny = nz or ny\n\n Lx = Lr or Lx\n Ly = Lz or Ly\n \n if numerix.getShape(dx) == () and numerix.getShape(dy) == ():\n\n dx, nx = _dnl(dx, nx, Lx)\n dy, ny = _dnl(dy, ny, Ly)\n from fipy.meshes.cylindricalUniformGrid2D import CylindricalUniformGrid2D\n return CylindricalUniformGrid2D(dx=dx, dy=dy, nx=nx or 1, ny=ny or 1, origin=origin, overlap=overlap, communicator=communicator)\n else:\n from fipy.meshes.cylindricalNonUniformGrid2D import CylindricalNonUniformGrid2D\n return CylindricalNonUniformGrid2D(dx=dx, dy=dy, nx=nx, ny=ny, origin=origin, overlap=overlap, communicator=communicator)", "title": "" }, { "docid": "9efaa01bf65441af1794a17447f4fa75", "score": "0.42385522", "text": "def cmidd(x, y, z, base=2):\n assert len(x) == len(y) == len(z), \"Arrays should have same length\"\n xz = np.c_[x, z]\n yz = np.c_[y, z]\n xyz = np.c_[x, y, z]\n return entropyd(xz, base) + entropyd(yz, base) - entropyd(xyz, base) - entropyd(z, base)", "title": "" }, { "docid": "24003c9b109a4f8fac267e7305ea8665", "score": "0.42337123", "text": "def addDCC(self, lda_address, lda_channel):\n print \"force DCC\"\n theDcc = Dcc()\n theDcc.setString(\"LDA_ADDRESS\", lda_address)\n theDcc.setInt(\"LDA_CHANNEL\", lda_channel)\n self.dccConf.add(theDcc)\n self.dccs.append(theDcc)\n print \"\\t DCC added\", theDcc.getString(\"LDA_ADDRESS\"), theDcc.getInt(\"LDA_CHANNEL\")", "title": "" } ]