query
stringlengths
9
9.05k
document
stringlengths
10
222k
negatives
sequencelengths
19
20
metadata
dict
Mix an image by a constant base color. The base color should be a 1by3 arraylike object representing an RGB color in [0, 255]^3 space. For example, to mix with orange, the transformation RGBTransform().mix_with((255, 127, 0)) might be used. The factor controls the strength of the color to be added. If the factor is 1.0, all pixels will be exactly the new color; if it is 0.0, the pixels will be unchanged.
def mix_with(self, base_color, factor=1.0): base_color = _to_rgb(base_color, "base_color") operation = _embed44((1 - factor) * np.eye(3)) operation[:3, 3] = factor * base_color return self._then(operation)
[ "def mix(\n self,\n color: ColorInput,\n percent: float = util.DEF_MIX,\n *,\n in_place: bool = False,\n **interpolate_args: Any\n ) -> 'Color':\n\n # Mix really needs to be between 0 and 1 or steps will break\n domain = interpolate_args.get('domain')\n if domain is not None:\n interpolate_args['domain'] = interpolate.normalize_domain(domain)\n\n if not self._is_color(color) and not isinstance(color, (str, Mapping)):\n raise TypeError(\"Unexpected type '{}'\".format(type(color)))\n mixed = self.interpolate([self, color], **interpolate_args)(percent)\n return self._hotswap(mixed) if in_place else mixed", "def mix(self, other, coef=0.5):\n def m(a, b):\n return a * (1 - coef) + b * coef\n\n return Color(from_rgba=(c(m(self.r, other.r)),\n c(m(self.g, other.g)),\n c(m(self.b, other.b)),\n c(m(self.a, other.a))))", "def blend(image1, image2, factor):\n if factor == 0.0:\n return image1\n if factor == 1.0:\n return image2\n\n image1 = image1.astype(np.float32)\n image2 = image2.astype(np.float32)\n\n difference = image2 - image1\n scaled = factor * difference\n\n # Do addition in float.\n temp = image1 + scaled\n\n # Interpolate\n if factor > 0.0 and factor < 1.0:\n # Interpolation means we always stay within 0 and 255.\n return temp.astype(np.uint8)\n\n # Extrapolate:\n #\n # We need to clip and then cast.\n return np.clip(temp, a_min=0, a_max=255).astype(np.uint8)", "def colour_boost(image, colour, factor):\n result = {'height' : image['height'], 'width' : image['width'], 'pixels' : image['pixels'][:]} \n red_image = get_red_pixels(result)\n green_image = get_green_pixels(result)\n blue_image = get_blue_pixels(result)\n \n for x in range(image['width']):\n for y in range(image['height']):\n red_pixel = get_pixel(red_image, x, y)\n green_pixel = get_pixel(green_image, x, y)\n blue_pixel = get_pixel(blue_image, x, y)\n if colour == 'r':\n set_pixel(result, x, y, (red_pixel,) + (green_pixel//factor,) + (blue_pixel//factor,)) #For whatever the user input, we divide the other coloured pixels by the factor, and recombine to create a colour boosted image\n if colour == 'g':\n set_pixel(result, x, y, (red_pixel//factor,) + (green_pixel,) + (blue_pixel//factor,))\n if colour == 'b':\n set_pixel(result, x, y, (red_pixel//factor,) + (green_pixel//factor,) + (blue_pixel,))\n return result", "def mixfactor(self, segment):\n mixfactor = 0\n a = (89.0/1.5) + self.template['mixpoint']\n b = (188.0/1.5) + self.template['mixpoint']\n loud = self.loudness(self.original.analysis.segments, segment)\n if not loud:\n loud = self.original.analysis.loudness\n if loud != -1 * b:\n mixfactor = float(float(loud + a)/float(loud + b))\n if mixfactor > 0.8:\n mixfactor = 0.8\n elif mixfactor < 0.3:\n mixfactor = 0.3\n return mixfactor", "def multiply_color(clip, factor):\n return clip.image_transform(\n lambda frame: np.minimum(255, (factor * frame)).astype(\"uint8\")\n )", "def _mix_color(c1, c2, mix):\n return round(c2 * mix + c1 * (1 - mix))", "def blend(image1, image2, factor):\n assert 0.0 <= factor <= 1.0\n image1 = tf.convert_to_tensor(image1)\n image2 = tf.convert_to_tensor(image2)\n dtype = image1.dtype\n if factor == 0.0:\n return image1\n if factor == 1.0:\n return image2\n\n image1 = tf.cast(image1, tf.float32)\n image2 = tf.cast(image2, tf.float32)\n assert image1.shape == image2.shape\n difference = image2 - image1\n scaled = factor * difference\n temp = image1 + scaled\n flip = 255 if dtype == tf.uint8 else 1.0\n temp = tf.clip_by_value(temp, 0.0, flip)\n return tf.cast(temp, dtype)", "def driftColor(baseColor, factor=110):\n if baseColor.lightness() > 128:\n return baseColor.darker(factor)\n else:\n return baseColor.lighter(factor+10)", "def blend(image1, image2, factor, name=None):\n _check_image_dtype(image1)\n _check_image_dtype(image2)\n assert image1.dtype == image2.dtype, \"image1 type should exactly match type of image2\"\n\n if factor == 0.0:\n return image1\n elif factor == 1.0:\n return image2\n else:\n with tf.name_scope(name or \"blend\"):\n orig_dtype = image2.dtype\n\n image1, image2 = tf.image.convert_image_dtype(image1, tf.float32), tf.image.convert_image_dtype(image2, tf.float32)\n scaled_diff = (image2 - image1) * factor\n\n blended_image = image1 + scaled_diff\n\n blended_image = tf.image.convert_image_dtype(blended_image, orig_dtype, saturate=True)\n return blended_image", "def brightness(self, factor):\n\n channels = [\"r\", \"g\", \"b\"]\n total_lumes = clamp(self.get_luminance() + (255.0 * factor) - 255.0, 0.0, 255.0)\n\n if total_lumes == 255.0:\n # white\n self.r, self.g, self.b = 0xFF, 0xFF, 0xFF\n elif total_lumes == 0.0:\n # black\n self.r, self.g, self.b = 0x00, 0x00, 0x00\n else:\n # Adjust Brightness\n pts = (total_lumes - 0.299 * self.r - 0.587 * self.g - 0.114 * self.b)\n slots = set(channels)\n components = [float(self.r) + pts, float(self.g) + pts, float(self.b) + pts]\n count = 0\n for c in channels:\n overage, components[count] = self._get_overage(components[count])\n if overage:\n slots.remove(c)\n components = list(self._distribute_overage(components, overage, slots))\n count += 1\n\n self.r = clamp(round_int(components[0]), 0, 255) & 0xFF\n self.g = clamp(round_int(components[1]), 0, 255) & 0xFF\n self.b = clamp(round_int(components[2]), 0, 255) & 0xFF", "def normalize_image(self, factor, luminosity=None):\n if not luminosity:\n luminosity = self.average_luminosity()\n\n for i in range(len(self.pixels)):\n self.pixels[i] = self.pixels[i] * (factor / luminosity)", "def mix(src_color, src_f, dst_color, dst_f):\n src_a = src_color[:, 3] / 255\n dst_a = dst_color[:, 3] / 255\n out_a = src_a * src_f + dst_a * dst_f\n outafilter = out_a > 0\n out_rgb = np.zeros((src_color.shape[0], 3), dtype='u1')\n out_rgb[outafilter] = np.clip(np.round((src_color[outafilter, 0:3] * np.tile(src_a[outafilter].reshape(-1, 1), (1, 3)) * np.tile(src_f[outafilter].reshape(-1, 1), (1, 3)) + dst_color[outafilter, 0:3] * np.tile(dst_a[outafilter].reshape(-1, 1), (1, 3)) * np.tile(dst_f[outafilter].reshape(-1, 1), (1, 3))) / np.tile(out_a[outafilter].reshape(-1, 1), (1, 3))), 0, 255)\n return np.concatenate([out_rgb, np.clip(np.round(out_a * 255), 0, 255).reshape(-1, 1)], axis=1).astype('u1').copy()", "def downsample(self, factor):\n self.img = self.img[::factor, ::factor, :] if self.fast else self.img\n self.comb_structure_mask = self.comb_structure_mask[::factor, ::factor]\n self.unknown_mask = self.unknown_mask[::factor, ::factor]", "def mix_colors(color1: Color, color2: Color, mix_amount: float) -> Color:\n return [(1-mix_amount)*v1 + mix_amount*v2 for v1, v2 in zip(color1, color2)]", "def mix(self):\n self.update_sensors()\n\n if self.ls_1 == 0:\n self.err= 1\n else:\n self.m_p+= 10\n\n # Limit mix percentage to 100\n if self.m_p > 100:\n self.m_p= 100", "def adjust_saturation(image, factor):\r\n image[..., 1] = np.clip(image[..., 1] * factor, 0, 255)\r\n return image", "def mul(self, factor):\n return audioop.mul(self._frames, self._sampwidth, factor)", "def blend(c: float, a: float) -> float:\n return 255 + (c - 255) * a" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Apply this transformation to a copy of the given RGB image. The image should be a PIL image with at least three channels. Specifically, the RGB and RGBA modes are both supported, but L is not. Any channels past the first three will pass through unchanged. The original image will not be modified; a new image of the same mode and dimensions will be returned.
def applied_to(self, image): # PIL.Image.convert wants the matrix as a flattened 12-tuple. # (The docs claim that they want a 16-tuple, but this is wrong; # cf. _imaging.c:767 in the PIL 1.1.7 source.) matrix = tuple(self.get_matrix().flatten()) channel_names = image.getbands() channel_count = len(channel_names) if channel_count < 3: raise ValueError("Image must have at least three channels!") elif channel_count == 3: return image.convert('RGB', matrix) else: # Probably an RGBA image. # Operate on the first three channels (assuming RGB), # and tack any others back on at the end. channels = list(image.split()) rgb = PIL.Image.merge('RGB', channels[:3]) transformed = rgb.convert('RGB', matrix) new_channels = transformed.split() channels[:3] = new_channels return PIL.Image.merge(''.join(channel_names), channels)
[ "def convert_image_to_rgb(self):\n self.image = self.image.convert('RGB')", "def __enhance_image(self, img):\n\n blue = self.g.clahe.apply(img[:,:,0])\n green = self.g.clahe.apply(img[:,:,1])\n red = self.g.clahe.apply(img[:,:,2])\n img[:,:,0] = blue\n img[:,:,1] = green\n img[:,:,2] = red\n return img", "def colorize(image, newColor):\n image = image.copy()\n\n # zero out RGB values\n image.fill((0, 0, 0, 255), None, pygame.BLEND_RGBA_MULT)\n # add in new RGB values\n image.fill(newColor[0:3] + (0,), None, pygame.BLEND_RGBA_ADD)\n\n return image", "def convert_to_rgb(image):\n if image.mode != \"RGB\":\n image = image.convert(\"RGB\")\n return image", "def apply(self,\n RGB,\n interpolator_1D=LinearInterpolator,\n interpolator_1D_args=None,\n interpolator_3D=table_interpolation_trilinear,\n interpolator_3D_args=None):\n\n for operation in self:\n if isinstance(operation, (LUT1D, LUT3x1D)):\n RGB = operation.apply(RGB, interpolator_1D,\n interpolator_1D_args)\n elif isinstance(operation, LUT3D):\n RGB = operation.apply(RGB, interpolator_3D,\n interpolator_3D_args)\n else:\n RGB = operation.apply(RGB)\n\n return RGB", "def color_convert(self, color_mode=ColorMode.RGB):\n self.image = color_convert(self.image, color_mode=color_mode)\n return self", "def rgb_image(self):\n z3 = self.z[:,:,newaxis]\n return z3 * self.c", "def _colorize(self, image, hue):\n img = image.convert(\"RGBA\")\n arr = np.array(img)\n new_img = Image.fromarray(\n self._shift_hue(arr, hue / 360.0).astype(\"uint8\"), \"RGBA\"\n )\n\n return new_img", "def reduce_color(image):\n\n # http://stackoverflow.com/questions/5906693/how-to-reduce-the-number-of-colors-in-an-image-with-opencv-in-python\n w, h, _ = image.shape\n for row in xrange(h-1):\n for col in xrange(w-1):\n #pi = row * w * 3 + col * 3\n pixel = image[col][row]\n pixel[0] = __reduceColorValue(pixel[0])\n pixel[1] = __reduceColorValue(pixel[1])\n pixel[2] = __reduceColorValue(pixel[2])\n return image", "def red_filter(img):\r\n #with Image.open(filename) as img:\r\n w = img.width\r\n h = img.height\r\n\r\n newimg = Image.new('RGB', (w,h))\r\n for y in range(h):\r\n for x in range(w):\r\n r, g, b = img.getpixel((x,y))\r\n \r\n newimg.putpixel((x, y), (r, 0, 0))\r\n \r\n return newimg", "def to_color(self):\n if self.channels == 4:\n color = opencv.cvtColor(self.img, opencv.COLOR_BGRA2BGR)\n return Image(color)\n elif self.channels == 1:\n color = opencv.cvtColor(self.img, opencv.COLOR_GRAY2BGR)\n return Image(color)\n else:\n return Image(self.img)", "def apply(self, image):\n if self.lut is None:\n msg = \"No operator loaded\"\n raise Exception(msg)\n\n if image.mode != \"L\":\n msg = \"Image mode must be L\"\n raise ValueError(msg)\n outimage = Image.new(image.mode, image.size, None)\n count = _imagingmorph.apply(bytes(self.lut), image.im.id, outimage.im.id)\n return count, outimage", "def grayscale(image: Image) -> Image:\r\n new_image = copy(image)\r\n for x,y,(r,g,b) in image:\r\n pix_bright = (r+g+b)//3\r\n Gray = create_color(pix_bright,pix_bright,pix_bright)\r\n set_color(new_image,x,y,Gray) \r\n return new_image", "def transform_image(self):\n im = cv2.imread(\"result.png\", 0)\n im2 = cv2.resize(im, (28, 28))\n im = im2.reshape(28, 28, -1)\n im = im.reshape(1, 1, 28, 28)\n im = cv2.bitwise_not(im)\n im = im.reshape(28,28)\n \n with out:\n clear_output()\n \n # resize\n img = np.array(im)\n img = img.reshape(28*28,)\n \n #img = img/255.0\n \n return img", "def apply(self, image):\n pass", "def reconstructImage(self,arr):\n\t\tarr = arr * 256\n\t\tarr = np.array(np.round(arr),dtype=np.uint8)\n\t\t#arr = np.array(arr,dtype=np.uint8)\n\n\t\t# We need to transpose the array because we flatten X by columns\n\t\t#arr = arr.T\n\t\t#a = arr.reshape((self.width, self.height,3))\n\t\t\n\t\tif self.mode == 'L':\n\t\t\ta = arr.reshape((self.width, self.height))\n\t\telse:\n\t\t\ta = arr.reshape((self.width, self.height,3))\n\n\t\t#a = arr.reshape((3,self.width, self.height))\t\t\n\t\t#a = arr.transpose(0, 3, 1, 2)\n\n\t\tim = Image.fromarray(a,mode=self.mode)\n\n\t\treturn im", "def rgb2I3 (img):\n\timg_width, img_height = img.size\n\t#make a copy to return\n\treturnimage = Image.new(\"RGB\", (img_width, img_height))\n\timagearray = img.load()\n\tfor y in range(0, img_height, 1):\t\t\t\t\t\n\t\tfor x in range(0, img_width, 1):\n\t\t\trgb = imagearray[x, y]\n\t\t\ti3 = ((2*rgb[1])-rgb[0]-rgb[2]) / 2\n\t\t\t#print rgb, i3\n\t\t\treturnimage.putpixel((x,y), (0,i3,0))\n\treturn returnimage", "def asRGB(self):\r\n\r\n width, height, pixels, meta = self.asDirect()\r\n if meta['alpha']:\r\n raise Error(\"will not convert image with alpha channel to RGB\")\r\n if not meta['greyscale']:\r\n return width, height, pixels, meta\r\n meta['greyscale'] = False\r\n typecode = 'BH'[meta['bitdepth'] > 8]\r\n\r\n def iterrgb():\r\n for row in pixels:\r\n a = array(typecode, [0]) * 3 * width\r\n for i in range(3):\r\n a[i::3] = row\r\n yield a\r\n return width, height, iterrgb(), meta", "def to_pillow(self) -> PILImage:\n return PILImage.fromarray(self.rgb().to_numpy())" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Embed a 4by4 or smaller matrix in the upperleft of I_4.
def _embed44(matrix): result = np.eye(4) r, c = matrix.shape result[:r, :c] = matrix return result
[ "def as4Matrix(self):\n return numpy.array([[ self._w, self._v[0], self._v[1], self._v[2]],\n [-self._v[0], self._w, -self._v[2], self._v[1]],\n [-self._v[1], self._v[2], self._w, -self._v[0]],\n [-self._v[2], -self._v[1], self._v[0], self._w]])", "def coupe_en_4(self):\n [r,c] = self.D\n t = max(next_pow_2(r),next_pow_2(c))\n m = Mat([t,t], lambda i,j: self[i,j] if (i < r and j < c) else 0)\n s = t//2\n A = Mat([s, s], lambda i,j: m[i ,j ])\n B = Mat([s, s], lambda i,j: m[i ,j + s])\n C = Mat([s, s], lambda i,j: m[i + s,j ])\n D = Mat([s, s], lambda i,j: m[i + s,j + s])\n return(A,B,C,D)", "def build_augmented_matrix(self):\r\n for row in range(self.SIZE):\r\n self.matrix[row].append(self.result[row])", "def expand_vol_4d(input,dim):\n\n expanded = np.tile(np.expand_dims(np.expand_dims(input, 0), 0), dim)\n return expanded", "def Mat4(A):\n assert A.ndim == 4, \"Only support 4th order tensor\"\n M = np.array(\n [\n [\n A[0, 0, 0, 0],\n A[0, 0, 1, 1],\n A[0, 0, 2, 2],\n 2 * A[0, 0, 0, 1],\n 2 * A[0, 0, 1, 2],\n 2 * A[0, 0, 0, 2],\n ],\n [\n A[1, 1, 0, 0],\n A[1, 1, 1, 1],\n A[1, 1, 2, 2],\n 2 * A[1, 1, 0, 1],\n 2 * A[1, 1, 1, 2],\n 2 * A[1, 1, 0, 2],\n ],\n [\n A[2, 2, 0, 0],\n A[2, 2, 1, 1],\n A[2, 2, 2, 2],\n 2 * A[2, 2, 0, 1],\n 2 * A[2, 2, 1, 2],\n 2 * A[2, 2, 0, 2],\n ],\n [\n A[0, 1, 0, 0],\n A[0, 1, 1, 1],\n A[0, 1, 2, 2],\n 2 * A[0, 1, 0, 1],\n 2 * A[0, 1, 1, 2],\n 2 * A[0, 1, 0, 2],\n ],\n [\n A[1, 2, 0, 0],\n A[1, 2, 1, 1],\n A[1, 2, 2, 2],\n 2 * A[1, 2, 0, 1],\n 2 * A[1, 2, 1, 2],\n 2 * A[1, 2, 0, 2],\n ],\n [\n A[0, 2, 0, 0],\n A[0, 2, 1, 1],\n A[0, 2, 2, 2],\n 2 * A[0, 2, 0, 1],\n 2 * A[0, 2, 1, 2],\n 2 * A[0, 2, 0, 2],\n ],\n ]\n )\n return M", "def getInverseMatrix(self) -> CMatrix4:\n ...", "def _mat3(self):\n if self.frame.orientation == HillFrame.DEFAULT_ORIENTATION:\n return np.identity(3)\n else:\n return self.QSW2TNW", "def matrix1():\n return eye(3)*4 # EXERCISE: eye(3)*4", "def separate_augmented_matrix(self):\r\n for row in range(self.SIZE):\r\n self.result[row] = self.matrix[row][-1]\r\n self.matrix[row].pop()", "def mat4_translate( direction ):\n M = np.identity(4)\n M[:3, 3] = direction[:3]\n return M.transpose()", "def identity_matrix():\r\n return numpy.identity(4)", "def mat4_inverse(matrix):\n return np.linalg.inv(matrix)", "def identity_matrix():\n return numpy.identity(4)", "def gen_lower_matrix(size):\n matrix = np.zeros(shape=(size, size), dtype=np.float128)\n for i in range(size):\n for j in range(size):\n if i > j:\n rand_num = uniform(-size, size)\n matrix[i][j] = rand_num\n vector_x = MatrixGenerator.gen_vector(size)\n vector_b = np.dot(matrix, vector_x).reshape(size, 1)\n return matrix, vector_x, vector_b", "def d4out():\n\td4x.moveTo(d4x_out)\n\td4y.moveTo(d4y_out)", "def warp(im, A, output_shape):\n invA = np.linalg.inv(A)\n warped = np.zeros(output_shape)\n for i in range(output_shape[0]):\n for j in range(output_shape[1]):\n ps = np.rint(np.dot(invA, [i, j, 1])).astype(int)\n if ps[0] >= 0 and ps[0] < output_shape[0] and ps[1] >=0 and ps[1] < output_shape[1]:\n warped[i][j] = im[ps[0]][ps[1]]\n return warped", "def AssembleMatrix(self, el, A):\n return _hybridization.Hybridization_AssembleMatrix(self, el, A)", "def expand(self) :\n\n # XXX Obviouse improvement: Check if this matrix is already full \n # (ie not diagonal structure)\n # and if so, return a view.\n \n # Also verifies the validity of the matrix.\n shape = self.mat_shape()\n # Current algorithm assumes specific format.\n self.assert_axes_ordered()\n # Allocate memory.\n out_mat = sp.zeros(shape, dtype=self.dtype)\n out_mat = info_array(out_mat)\n out_mat = mat_array(out_mat)\n\n # Figure out how many axes are in both row and col (and therefore block\n # diagonal).\n n_blocks, block_shape = self.get_num_blocks(True, False)\n \n # Loop over the blocks and assign data.\n for ii, mat_block in enumerate(self.iter_blocks()) :\n # Figure out where this block starts.\n row_start = ii*block_shape[0]\n col_start = ii*block_shape[1]\n out_mat[row_start:row_start + block_shape[0], \n col_start:col_start + block_shape[1]] = mat_block\n return out_mat", "def LotkaVolterra_InhibitMatrix(self):\n LV = -2 * (torch.ones(len(self.filler2index),\n len(self.filler2index)) - torch.eye(len(self.filler2index)))\n LV = LV.double()\n return LV" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
This will first try to load the specified module from the pyrominfo package using the current module search path. If it can't be found, then the parent directory is added to the module search path and the import attempt is repeated.
def loadModule(mod): try: # from pyrominfo import gameboy, etc pyrominfo = __import__("pyrominfo", globals(), locals(), [mod]) except ImportError: import os parentdir = os.path.dirname(os.path.dirname(os.path.abspath(__file__))) os.sys.path.insert(0, parentdir) pyrominfo = __import__("pyrominfo", globals(), locals(), [mod]) try: return getattr(pyrominfo, mod) except AttributeError: raise ImportError("testutils.loadModule() can't find module %s in pyrominfo package" % mod)
[ "def _find_module(self, name, path, parent=None):\n\n if parent is not None:\n # assert path is not None\n fullname = parent.identifier + '.' + name\n else:\n fullname = name\n\n node = self.findNode(fullname)\n if node is not None:\n self.msg(3, \"find_module: already included?\", node)\n raise ImportError(name)\n\n if path is None:\n if name in sys.builtin_module_names:\n return (None, BUILTIN_MODULE)\n\n path = self.path\n\n return self._find_module_path(fullname, name, path)", "def _import_top_module(self, parent, name):\n i = strop.find(name, '.')\n if i == -1:\n head = name\n tail = \"\"\n else:\n head = name[:i]\n tail = name[i+1:]\n if parent:\n fqname = \"%s.%s\" % (parent.__name__, head)\n else:\n fqname = head\n module = self._import_one(parent, head, fqname)\n if module:\n # the module was relative, or no context existed (the module was\n # simply found on the path).\n return module, tail\n if parent:\n # we tried relative, now try an absolute import (from the path)\n module = self._import_one(None, head, head)\n if module:\n return module, tail\n\n # the module wasn't found\n return None, None", "def load_module(self, name, extra_path=None):\n assert isinstance(name, basestring)\n assert is_iterable_typed(extra_path, basestring) or extra_path is None\n # See if we loaded module of this name already\n existing = self.loaded_tool_modules_.get(name)\n if existing:\n return existing\n\n # check the extra path as well as any paths outside\n # of the b2 package and import the module if it exists\n b2_path = os.path.normpath(b2.__path__[0])\n # normalize the pathing in the BOOST_BUILD_PATH.\n # this allows for using startswith() to determine\n # if a path is a subdirectory of the b2 root_path\n paths = [os.path.normpath(p) for p in self.manager.boost_build_path()]\n # remove all paths that start with b2's root_path\n paths = [p for p in paths if not p.startswith(b2_path)]\n # add any extra paths\n paths.extend(extra_path)\n\n try:\n # find_module is used so that the pyc's can be used.\n # an ImportError is raised if not found\n f, location, description = imp.find_module(name, paths)\n except ImportError:\n # if the module is not found in the b2 package,\n # this error will be handled later\n pass\n else:\n # we've found the module, now let's try loading it.\n # it's possible that the module itself contains an ImportError\n # which is why we're loading it in this else clause so that the\n # proper error message is shown to the end user.\n # TODO: does this module name really need to be mangled like this?\n mname = name + \"__for_jamfile\"\n self.loaded_tool_module_path_[mname] = location\n module = imp.load_module(mname, f, location, description)\n self.loaded_tool_modules_[name] = module\n return module\n\n # the cache is created here due to possibly importing packages\n # that end up calling get_manager() which might fail\n if not self.__python_module_cache:\n self.__build_python_module_cache()\n\n underscore_name = name.replace('-', '_')\n # check to see if the module is within the b2 package\n # and already loaded\n mname = self.__python_module_cache.get(underscore_name)\n if mname in sys.modules:\n return sys.modules[mname]\n # otherwise, if the module name is within the cache,\n # the module exists within the BOOST_BUILD_PATH,\n # load it.\n elif mname:\n # in some cases, self.loaded_tool_module_path_ needs to\n # have the path to the file during the import\n # (project.initialize() for example),\n # so the path needs to be set *before* importing the module.\n path = os.path.join(b2.__path__[0], *mname.split('.')[1:])\n self.loaded_tool_module_path_[mname] = path\n # mname is guaranteed to be importable since it was\n # found within the cache\n __import__(mname)\n module = sys.modules[mname]\n self.loaded_tool_modules_[name] = module\n return module\n\n self.manager.errors()(\"Cannot find module '%s'\" % name)", "def find_import(self, module_name: str) -> Tuple[Optional[str], bool]:\n module_name_split = module_name.split(\".\")\n for searchdir in self.options.pythonpath:\n path = path_utils.join(searchdir, *module_name_split)\n # See if this is a directory with a \"__init__.py\" defined.\n # (These also get automatically created in imports_map_loader.py)\n init_path = path_utils.join(path, \"__init__\")\n full_path = self.get_pyi_path(init_path)\n if full_path is not None:\n log.debug(\"Found module %r with path %r\", module_name, init_path)\n return full_path, True\n elif self.options.imports_map is None and path_utils.isdir(path):\n # We allow directories to not have an __init__ file.\n # The module's empty, but you can still load submodules.\n log.debug(\"Created empty module %r with path %r\",\n module_name, init_path)\n full_path = path_utils.join(path, \"__init__.pyi\")\n return full_path, False\n else: # Not a directory\n full_path = self.get_pyi_path(path)\n if full_path is not None:\n log.debug(\"Found module %r in path %r\", module_name, path)\n return full_path, True\n return None, None", "def lookupmodule(self, filename):\n if os.path.isabs(filename) and os.path.exists(filename):\n return filename\n f = os.path.join(sys.path[0], filename)\n if os.path.exists(f) and self.canonic(f) == self.mainpyfile:\n return f\n root, ext = os.path.splitext(filename)\n if ext == '':\n filename = filename + '.py'\n if os.path.isabs(filename):\n return filename\n for dirname in sys.path:\n while os.path.islink(dirname):\n dirname = os.readlink(dirname)\n fullname = os.path.join(dirname, filename)\n if os.path.exists(fullname):\n return fullname\n return None", "def loadModule (\r\n \r\n self,\r\n path = None\r\n ) :\r\n\r\n if not utilities.filePresent( path ) : return None\r\n\r\n directory = utilities.pathDirectory( path )\r\n\r\n package = utilities.pathLastName( directory )\r\n\r\n name = utilities.pathLastNameWithoutExtension( path )\r\n\r\n extension = utilities.pathExtension( path )\r\n\r\n if not extension.startswith( \"py\" ) : return None\r\n\r\n try :\r\n\r\n module = imp.load_source( package + \".\" + name, path )\r\n\r\n except Exception, exception :\r\n\r\n print str( exception )\r\n \r\n return None\r\n\r\n return module", "def _find_single_path(self, name, p, parent=None):\n if parent is not None:\n # assert path is not None\n fullname = parent.identifier + \".\" + name\n else:\n fullname = name\n\n try:\n return modulegraph.ModuleGraph.find_module(self, name, [p], parent)\n except ImportError as err:\n pass\n\n if not os.path.isfile(p):\n raise err\n\n zi = zipimport.zipimporter(p)\n m = zi.find_module(fullname.replace(\".\", \"/\"))\n if m:\n code = zi.get_code(fullname.replace(\".\", \"/\"))\n return zi, p, (\"\", \"\", 314)\n raise err", "def pythonpath_init():\n # Get this file's directory path\n my_dir = os.path.dirname(os.path.abspath(__file__))\n\n # Remove some paths from \"sys.path\" to avoid unexpected import resolution.\n\n # For each path in the list\n for path in ['', '.', my_dir]:\n # If the path is in \"sys.path\"\n if path in sys.path:\n # Remove the path from \"sys.path\"\n sys.path.remove(path)\n\n # Add \"src\" directory to \"sys.path\".\n # This is the import resolution we want.\n\n # Get \"src\" directory path\n src_dir = os.path.dirname(my_dir)\n\n # If \"src\" directory path is not in \"sys.path\"\n if src_dir not in sys.path:\n # Add \"src\" directory to \"sys.path\"\n sys.path.insert(0, src_dir)", "def load_module(name_or_path):\n if os.path.exists(name_or_path):\n path = name_or_path.rstrip(\"/\")\n modname = os.path.splitext(os.path.basename(path))[0]\n if os.path.isdir(path):\n path = os.path.join(path, \"__init__.py\")\n spec = importlib.util.spec_from_file_location(modname, path)\n mod = importlib.util.module_from_spec(spec)\n spec.loader.exec_module(mod)\n else:\n mod = importlib.import_module(name_or_path)\n try:\n path = mod.__path__[0]\n except AttributeError:\n path = mod.__file__\n return mod, path", "def lookup_module(filename):\r\n\r\n # stolen from pdb\r\n import os\r\n import sys\r\n\r\n if os.path.isabs(filename) and os.path.exists(filename):\r\n return filename\r\n f = os.path.join(sys.path[0], filename)\r\n if os.path.exists(f): # and self.canonic(f) == self.mainpyfile:\r\n return f\r\n root, ext = os.path.splitext(filename)\r\n if ext == '':\r\n filename = filename + '.py'\r\n if os.path.isabs(filename):\r\n return filename\r\n for dirname in sys.path:\r\n while os.path.islink(dirname):\r\n dirname = os.readlink(dirname)\r\n fullname = os.path.join(dirname, filename)\r\n if os.path.exists(fullname):\r\n return fullname\r\n return None", "def _safe_import_module(\n self, module_partname, module_name, parent_module):\n self.msgin(3, \"safe_import_module\", module_partname, module_name, parent_module)\n\n # If this module has *NOT* already been imported, do so.\n module = self.findNode(module_name)\n if module is None:\n # List of the absolute paths of all directories to be searched for\n # this module. This effectively defaults to \"sys.path\".\n search_dirs = None\n\n # If this module has a parent package...\n if parent_module is not None:\n # ...with a list of the absolute paths of all directories\n # comprising this package, prefer that to \"sys.path\".\n if parent_module.packagepath is not None:\n search_dirs = parent_module.packagepath\n # Else, something is horribly wrong. Return emptiness.\n else:\n self.msgout(3, \"safe_import_module -> None (parent_parent.packagepath is None)\")\n return None\n\n try:\n pathname, loader = self._find_module(\n module_partname, search_dirs, parent_module)\n except ImportError as exc:\n self.msgout(3, \"safe_import_module -> None (%r)\" % exc)\n return None\n\n module = self._load_module(module_name, pathname, loader)\n\n # If this is a submodule rather than top-level module...\n if parent_module is not None:\n self.msg(4, \"safe_import_module create reference\", module, \"->\", parent_module)\n\n # Add an edge from this submodule to its parent module.\n self._updateReference(\n module, parent_module, edge_data=DependencyInfo(\n conditional=False,\n fromlist=False,\n function=False,\n tryexcept=False,\n ))\n\n # Add this submodule to its parent module.\n parent_module.add_submodule(module_partname, module)\n\n # Return this module.\n self.msgout(3, \"safe_import_module ->\", module)\n return module", "def _fix_relative_import():\n parent_path = os.path.dirname(os.path.realpath(os.path.abspath(__file__)))\n sys.path.insert(0, os.path.dirname(parent_path))\n global __package__ #pylint: disable=global-variable-undefined\n __package__ = os.path.basename(parent_path) #pylint: disable=redefined-builtin\n __import__(__package__)\n sys.path.pop(0)", "def load_jamfile(self, dir, jamfile_module):\n assert isinstance(dir, basestring)\n assert isinstance(jamfile_module, basestring)\n\n # See if the Jamfile is where it should be.\n is_jamroot = False\n jamfile_to_load = b2.util.path.glob([dir], self.JAMROOT)\n if jamfile_to_load:\n if len(jamfile_to_load) > 1:\n get_manager().errors()(\n \"Multiple Jamfiles found at '{}'\\n\"\n \"Filenames are: {}\"\n .format(dir, ' '.join(os.path.basename(j) for j in jamfile_to_load))\n )\n is_jamroot = True\n jamfile_to_load = jamfile_to_load[0]\n else:\n jamfile_to_load = self.find_jamfile(dir)\n\n dir = os.path.dirname(jamfile_to_load)\n if not dir:\n dir = \".\"\n\n self.used_projects[jamfile_module] = []\n\n # Now load the Jamfile in it's own context.\n # The call to 'initialize' may load parent Jamfile, which might have\n # 'use-project' statement that causes a second attempt to load the\n # same project we're loading now. Checking inside .jamfile-modules\n # prevents that second attempt from messing up.\n if not jamfile_module in self.jamfile_modules:\n previous_project = self.current_project\n # Initialize the jamfile module before loading.\n self.initialize(jamfile_module, dir, os.path.basename(jamfile_to_load))\n\n if not jamfile_module in self.jamfile_modules:\n saved_project = self.current_project\n self.jamfile_modules[jamfile_module] = True\n\n bjam.call(\"load\", jamfile_module, jamfile_to_load)\n\n if is_jamroot:\n jamfile = self.find_jamfile(dir, no_errors=True)\n if jamfile:\n bjam.call(\"load\", jamfile_module, jamfile)\n\n # Now do some checks\n if self.current_project != saved_project:\n from textwrap import dedent\n self.manager.errors()(dedent(\n \"\"\"\n The value of the .current-project variable has magically changed\n after loading a Jamfile. This means some of the targets might be\n defined a the wrong project.\n after loading %s\n expected value %s\n actual value %s\n \"\"\"\n % (jamfile_module, saved_project, self.current_project)\n ))\n\n self.end_load(previous_project)\n\n if self.global_build_dir:\n id = self.attributeDefault(jamfile_module, \"id\", None)\n project_root = self.attribute(jamfile_module, \"project-root\")\n location = self.attribute(jamfile_module, \"location\")\n\n if location and project_root == dir:\n # This is Jamroot\n if not id:\n # FIXME: go via errors module, so that contexts are\n # shown?\n print \"warning: the --build-dir option was specified\"\n print \"warning: but Jamroot at '%s'\" % dir\n print \"warning: specified no project id\"\n print \"warning: the --build-dir option will be ignored\"", "def _load_module_recursive(self, dir) :\t\n\t\tfor filepath in os.listdir(dir) :\n\t\t\tfullpath = os.path.join(dir, filepath)\n\n\t\t\tif os.path.isdir(fullpath) :\n\t\t\t\tself._load_module_recursive(fullpath)\n\n\t\t\telif os.path.splitext(filepath)[1] == '.py' :\n\t\t\t\tutils.load_module(fullpath, self.settings.ROOT_PATH)", "def find_module_file(base_directory, path):\n return os.path.join(base_directory, path)", "def _LoadPackage(self, name, path, parent, deferredImports, namespace):\r\n module = self._AddModule(name)\r\n module.path = [path]\r\n try:\r\n fp, path, info = imp.find_module(\"__init__\", module.path)\r\n self._LoadModule(name, fp, path, info, deferredImports, parent)\r\n except ImportError:\r\n if not namespace:\r\n raise\r\n fileName = os.path.join(path, \"__init__.py\")\r\n module.code = compile(\"\", fileName, \"exec\")\r\n return module", "def import_path(modulename, path='.'):\n\tpath = _osp.abspath(path)\n\tif not _osp.isdir(path):\n\t\traise ImportError(f'no directory: {path}')\n\twith _EnsureSysPath(_osp.abspath(path)):\n\t\treturn _il.import_module(modulename)", "def get_imported_resource(self, context):\r\n if self.level == 0:\r\n return context.pycore.find_module(\r\n self.module_name, folder=context.folder)\r\n else:\r\n return context.pycore.find_relative_module(\r\n self.module_name, context.folder, self.level)", "def NP_LoadModuleFromBootstrap(winghome, modname):\n \n # Limited to simple module loads\n assert '.' not in modname\n \n orig_sys_path = sys.path[:]\n orig_modules = set(sys.modules)\n \n dirname = winghome + '/bootstrap'\n sys.path.insert(0, dirname)\n \n code = 'import %s' % modname\n exec(code)\n \n new_modules = set(sys.modules)\n new_modules.difference_update(orig_modules)\n for mod in new_modules:\n del sys.modules[mod]\n \n sys.path = orig_sys_path\n \n return locals()[modname]" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Attach disk to VM by reconfiguration.
def attach_disk_to_vm(self, vm_ref, instance_name, adapter_type, disk_type, vmdk_path=None, disk_size=None, linked_clone=False, controller_key=None, unit_number=None, device_name=None): client_factory = self._session._get_vim().client.factory vmdk_attach_config_spec = vm_util.get_vmdk_attach_config_spec( client_factory, adapter_type, disk_type, vmdk_path, disk_size, linked_clone, controller_key, unit_number, device_name) LOG.debug(_("Reconfiguring VM instance %(instance_name)s to attach " "disk %(vmdk_path)s or device %(device_name)s with type " "%(disk_type)s") % locals()) reconfig_task = self._session._call_method( self._session._get_vim(), "ReconfigVM_Task", vm_ref, spec=vmdk_attach_config_spec) self._session._wait_for_task(instance_name, reconfig_task) LOG.debug(_("Reconfigured VM instance %(instance_name)s to attach " "disk %(vmdk_path)s or device %(device_name)s with type " "%(disk_type)s") % locals())
[ "def attach_disk_to_vm(self, vm_ref, instance,\n adapter_type, disk_type, vmdk_path=None,\n disk_size=None, linked_clone=False,\n device_name=None, disk_io_limits=None):\n instance_name = instance.name\n client_factory = self._session.vim.client.factory\n devices = vm_util.get_hardware_devices(self._session, vm_ref)\n (controller_key, unit_number,\n controller_spec) = vm_util.allocate_controller_key_and_unit_number(\n client_factory,\n devices,\n adapter_type)\n\n vmdk_attach_config_spec = vm_util.get_vmdk_attach_config_spec(\n client_factory, disk_type, vmdk_path,\n disk_size, linked_clone, controller_key,\n unit_number, device_name, disk_io_limits)\n if controller_spec:\n vmdk_attach_config_spec.deviceChange.append(controller_spec)\n\n LOG.debug(\"Reconfiguring VM instance %(instance_name)s to attach \"\n \"disk %(vmdk_path)s or device %(device_name)s with type \"\n \"%(disk_type)s\",\n {'instance_name': instance_name, 'vmdk_path': vmdk_path,\n 'device_name': device_name, 'disk_type': disk_type},\n instance=instance)\n vm_util.reconfigure_vm(self._session, vm_ref, vmdk_attach_config_spec)\n LOG.debug(\"Reconfigured VM instance %(instance_name)s to attach \"\n \"disk %(vmdk_path)s or device %(device_name)s with type \"\n \"%(disk_type)s\",\n {'instance_name': instance_name, 'vmdk_path': vmdk_path,\n 'device_name': device_name, 'disk_type': disk_type},\n instance=instance)", "def AttachDisk(self, disk: 'AZComputeDisk') -> None:\n vm = self.compute_client.virtual_machines.get(\n self.resource_group_name, self.name)\n data_disks = vm.storage_profile.data_disks\n # ID to assign to the data disk to attach\n lun = 0 if len(data_disks) == 0 else len(data_disks) + 1\n\n update_data = {\n 'lun': lun,\n 'name': disk.name,\n 'create_option': models.DiskCreateOption.attach,\n 'managed_disk': {'id': disk.resource_id}\n }\n\n data_disks.append(update_data)\n\n try:\n request = self.compute_client.virtual_machines.begin_update(\n self.resource_group_name, self.name, vm)\n while not request.done():\n sleep(5) # Wait 5 seconds before checking vm status again\n except azure_exceptions.CloudError as exception:\n raise RuntimeError(\n 'Could not attach disk {0:s} to instance {1:s}: {2:s}'.format(\n disk.name, self.name, str(exception))) from exception", "def attach_disk(diskName=None, instanceName=None, diskPath=None):\n pass", "def attach_backup_disk(request, storage):\n self = request.node.cls\n\n if self.attach_backup_disk:\n testflow.setup(\"Attach backup disk from source VM to backup VM\")\n assert ll_vms.attach_backup_disk_to_vm(\n self.src_vm, self.backup_vm, self.first_snapshot_description\n ), (\"Failed to attach backup disk to backup vm %s\" % self.backup_vm\n )", "def attach_fcd_to_vm(vm, vdisk):\n # Finding next available unit number\n unit_number = 0\n controller = None\n for dev in vm.config.hardware.device:\n if hasattr(dev.backing, 'fileName'):\n unit_number = int(dev.unitNumber) + 1\n # unit_number 7 reserved for scsi controller\n if unit_number == 7:\n unit_number += 1\n if unit_number >= 16:\n raise Exception(\"We don't support this many disks.\")\n if isinstance(dev, vim.vm.device.VirtualSCSIController):\n controller = dev\n if controller is None:\n print(\"Disk SCSI controller not found!\")\n return -1\n\n # Setting backings\n spec = vim.vm.ConfigSpec()\n disk_spec = vim.vm.device.VirtualDeviceSpec()\n disk_spec.operation = vim.vm.device.VirtualDeviceSpec.Operation.add\n disk_spec.device = vim.vm.device.VirtualDisk()\n disk_spec.device.backing = vim.vm.device.VirtualDisk.FlatVer2BackingInfo()\n disk_spec.device.backing.diskMode = 'persistent'\n disk_spec.device.backing.fileName = vdisk.config.backing.filePath\n disk_spec.device.backing.thinProvisioned = True\n disk_spec.device.unitNumber = unit_number\n disk_spec.device.controllerKey = controller.key\n\n # Creating change list\n dev_changes = [disk_spec]\n spec.deviceChange = dev_changes\n\n # Sending the request\n task = vm.ReconfigVM_Task(spec=spec)\n return task", "def attach_disk(self, instance, disk, zone):\n return self.call_api(\n '/zones/%s/instances/%s/attachDisk' % (zone, instance),\n method='POST',\n payload={\n 'autoDelete': True,\n 'deviceName': disk,\n 'source': 'projects/%s/zones/%s/disks/%s' % (\n self.project_id, zone, disk),\n },\n )", "def test_attach_RO_disk(self):\n self.prepare_disks_for_vm(read_only=True)\n ll_vms.start_vms([self.vm_name], 1, wait_for_ip=True)\n helpers.write_on_vms_ro_disks(self.vm_name)", "def _extend_backing(self, backing, new_size_in_gb, disk_type):\n root_vmdk_path = self.volumeops.get_vmdk_path(backing)\n datacenter = self.volumeops.get_dc(backing)\n eager_zero = disk_type == EAGER_ZEROED_THICK_VMDK_TYPE\n self.volumeops.extend_virtual_disk(new_size_in_gb, root_vmdk_path,\n datacenter, eager_zero)", "def disk(self, disk):\n self._context[\"disk\"] = disk", "def disk_config(self, disk_config):\n\n self._disk_config = disk_config", "def disk(self, disk):\n self._disk = disk", "def _attach_volume_vmdk(self, connection_info, instance,\n adapter_type=None):\n vm_ref = vm_util.get_vm_ref(self._session, instance)\n LOG.debug(\"_attach_volume_vmdk: %s\", connection_info,\n instance=instance)\n data = connection_info['data']\n volume_ref = self._get_volume_ref(data)\n\n # Get details required for adding disk device such as\n # adapter_type, disk_type\n vmdk = vm_util.get_vmdk_info(self._session, volume_ref)\n adapter_type = adapter_type or vmdk.adapter_type\n\n # IDE does not support disk hotplug\n if adapter_type == constants.ADAPTER_TYPE_IDE:\n state = vm_util.get_vm_state(self._session, instance)\n if state != power_state.SHUTDOWN:\n raise exception.Invalid(_('%s does not support disk '\n 'hotplug.') % adapter_type)\n\n # Attach the disk to virtual machine instance\n self.attach_disk_to_vm(vm_ref, instance, adapter_type, vmdk.disk_type,\n vmdk_path=vmdk.path)\n\n # Store the uuid of the volume_device\n self._update_volume_details(vm_ref, data['volume_id'],\n vmdk.device.backing.uuid)\n\n LOG.debug(\"Attached VMDK: %s\", connection_info, instance=instance)", "def create_disk(self, disk):\n spec = {\n 'new_vmdk': {\n # Convert from mebibytes to bytes because VMDK is specified in bytes\n 'capacity': 1024\n * 1024\n * disk.size,\n }\n }\n\n try:\n backend_id = self.client.create_disk(disk.vm.backend_id, spec)\n except VMwareError as e:\n raise VMwareBackendError(e)\n else:\n disk.backend_id = backend_id\n disk.save(update_fields=['backend_id'])\n signals.vm_updated.send(self.__class__, vm=disk.vm)\n return disk", "def disk_recfg(self):\n devices = []\n edit = True\n host = Query.get_obj(self.virtual_machines.view, self.opts.name)\n disk_cfg_opts = {}\n # KB\n tokbytes = 1024*1024\n label = self.opts.disk_prefix + ' ' + str(self.opts.disk_id)\n try:\n key, controller = Query.get_key(host, label)\n except IOError:\n pass\n if self.opts.disk_id:\n for item in host.config.hardware.device:\n if label == item.deviceInfo.label:\n disk_new_size = self.opts.sizeGB * tokbytes\n current_size = item.capacityInKB\n current_size_gb = int(current_size / (1024*1024))\n if disk_new_size == current_size:\n raise ValueError(\n 'New size and existing size are equal'.format()\n )\n if disk_new_size < current_size:\n raise ValueError(\n 'Size {0} does not exceed {1}'.format(\n disk_new_size, current_size\n )\n )\n disk_delta = disk_new_size - current_size\n ds_capacity_kb = item.backing.datastore.summary.capacity / 1024\n ds_free_kb = item.backing.datastore.summary.freeSpace / 1024\n threshold_pct = 0.10\n if (ds_free_kb - disk_delta) / ds_capacity_kb < threshold_pct:\n raise ValueError(\n '{0} {1} disk space low, aborting.'.format(\n host.resourcePool.parent.name,\n item.backing.datastore.name\n )\n )\n\n disk_cfg_opts.update(\n {\n 'size' : disk_new_size,\n 'key' : key,\n 'controller' : controller,\n 'unit' : item.unitNumber,\n 'filename' : item.backing.fileName\n }\n )\n if disk_cfg_opts:\n devices.append(self.disk_config(edit=edit, **disk_cfg_opts))\n self.logger.info(\n '%s label: %s %s current_size: %s new_size: %s', host.name,\n self.opts.disk_prefix, self.opts.disk_id, current_size_gb, self.opts.sizeGB\n )\n self.reconfig(host, **{'deviceChange': devices})", "def set_virtual_disk_storage_profile(vm, hardware_device, profile):\n\n spec = vim.vm.ConfigSpec()\n device_specs = []\n profile_specs = []\n profile_spec = vim.vm.DefinedProfileSpec()\n profile_spec.profileId = profile.profileId.uniqueId\n profile_specs.append(profile_spec)\n\n device_spec = vim.vm.device.VirtualDeviceSpec()\n device_spec.operation = vim.vm.device.VirtualDeviceSpec.Operation.edit\n device_spec.device = hardware_device\n device_spec.profile = profile_specs\n device_specs.append(device_spec)\n spec.deviceChange = device_specs\n vm.ReconfigVM_Task(spec)", "def set_disk(self, disk):\n self.disk = disk", "def attach(ctx, drive, dev_channel, device):\n server = ctx.obj.find_server(ctx.obj.server_name)\n attachment=dict(\n drive=ctx.obj.find_drive(drive),\n dev_channel=dev_channel, \n device=device,\n boot_order=len(server['drives'])+1\n )\n server.setdefault('drives', []).append(attachment)\n output(ctx.obj.server.update(server['uuid'], server))", "def attach_volume(self, context, connection_info, instance, mountpoint,\n disk_bus=None, device_type=None, encryption=None):", "def attach_volume(self, context, connection_info, instance, mountpoint,\n disk_bus=None, device_type=None, encryption=None):\n config = self.load_config()\n instance_name = self.vm_prefix + instance.uuid\n if instance_name not in self._mounts:\n self._mounts[instance_name] = {}\n self._mounts[instance_name][mountpoint] = connection_info\n\n volume_id = connection_info['data']['volume_id']\n\n volume_self_link = self.get_or_create_volume(\n project=config['project'], zone=config['zone'], volume_name=volume_id)\n\n body = {\n \"kind\": \"compute#attachedDisk\",\n \"source\": volume_self_link,\n \"deviceName\": volume_id,\n \"boot\": False,\n \"autoDelete\": False,\n }\n\n operation = self.driver.instances().attachDisk(\n project=config['project'], zone=config['zone'], instance=self.vm_prefix + instance.uuid, body=body).execute()\n\n self.wait_for_operation(operation)" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Detach disk from VM by reconfiguration.
def detach_disk_from_vm(self, vm_ref, instance_name, device): client_factory = self._session._get_vim().client.factory vmdk_detach_config_spec = vm_util.get_vmdk_detach_config_spec( client_factory, device) disk_key = device.key LOG.debug(_("Reconfiguring VM instance %(instance_name)s to detach " "disk %(disk_key)s") % locals()) reconfig_task = self._session._call_method( self._session._get_vim(), "ReconfigVM_Task", vm_ref, spec=vmdk_detach_config_spec) self._session._wait_for_task(instance_name, reconfig_task) LOG.debug(_("Reconfigured VM instance %(instance_name)s to detach " "disk %(disk_key)s") % locals())
[ "def disk_detach(vmdk_path, vm):\n\n device = findDeviceByPath(vmdk_path, vm)\n\n if not device:\n # Could happen if the disk attached to a different VM - attach fails\n # and docker will insist to sending \"unmount/detach\" which also fails.\n msg = \"*** Detach failed: disk={0} not found. VM={1}\".format(\n vmdk_path, vm.config.uuid)\n logging.warning(msg)\n return err(msg)\n\n spec = vim.vm.ConfigSpec()\n dev_changes = []\n\n disk_spec = vim.vm.device.VirtualDeviceSpec()\n disk_spec.operation = vim.vm.device.VirtualDeviceSpec.Operation.remove\n disk_spec.device = device\n dev_changes.append(disk_spec)\n spec.deviceChange = dev_changes\n\n try:\n wait_for_tasks(si, [vm.ReconfigVM_Task(spec=spec)])\n except vim.fault.GenericVmConfigFault as ex:\n for f in ex.faultMessage:\n logging.warning(f.message)\n return err(\"Failed to detach \" + vmdk_path)\n\n setStatusDetached(vmdk_path)\n logging.info(\"Disk detached %s\", vmdk_path)\n return None", "def detach_disk(diskName=None):\n pass", "def detach_disk_from_vm(self, vm_ref, instance, device,\n destroy_disk=False):\n instance_name = instance.name\n client_factory = self._session.vim.client.factory\n vmdk_detach_config_spec = vm_util.get_vmdk_detach_config_spec(\n client_factory, device, destroy_disk)\n disk_key = device.key\n LOG.debug(\"Reconfiguring VM instance %(instance_name)s to detach \"\n \"disk %(disk_key)s\",\n {'instance_name': instance_name, 'disk_key': disk_key},\n instance=instance)\n vm_util.reconfigure_vm(self._session, vm_ref, vmdk_detach_config_spec)\n LOG.debug(\"Reconfigured VM instance %(instance_name)s to detach \"\n \"disk %(disk_key)s\",\n {'instance_name': instance_name, 'disk_key': disk_key},\n instance=instance)", "def detachDisk(positive, alias, vmName):\n logger.info(\"Detaching disk %s from vm %s\", alias, vmName)\n disk_attachment = get_disk_attachment(vmName, alias, attr='name')\n return DISK_ATTACHMENTS_API.delete(disk_attachment, positive)", "def DetachDisk(self, disk: 'GoogleComputeDisk') -> None:\n\n gce_instance_client = self.GceApi().instances() # pylint: disable=no-member\n device_name = None\n for disk_dict in self.GetValue('disks'):\n if disk_dict['source'].split('/')[-1] == disk.name:\n device_name = disk_dict['deviceName']\n request = gce_instance_client.detachDisk(\n instance=self.name,\n project=self.project_id,\n zone=self.zone,\n deviceName=device_name)\n response = request.execute()\n self.BlockOperation(response, zone=self.zone)", "def detach_volume(self, host_path: str):\n del self.volumes[host_path]", "def detach_volume(self, connection_info, instance, mountpoint,\n encryption=None):", "def _detach_volume_fcd(self, connection_info, instance):\n vm_ref = vm_util.get_vm_ref(self._session, instance)\n data = connection_info['data']\n adapter_type = data['adapter_type']\n\n if adapter_type == constants.ADAPTER_TYPE_IDE:\n state = vm_util.get_vm_state(self._session, instance)\n if state != power_state.SHUTDOWN:\n raise exception.Invalid(_('%s does not support disk '\n 'hotplug.') % adapter_type)\n\n vm_util.detach_fcd(self._session, vm_ref, data['id'])", "def detach_volume(self, connection_info, instance, mountpoint,\n encryption=None):\n config = self.load_config()\n\n try:\n del self._mounts[instance['name']][mountpoint]\n except KeyError:\n pass\n\n volume_id = connection_info['data']['volume_id']\n\n operation = self.driver.instances().detachDisk(\n project=config['project'], zone=config['zone'], instance=self.vm_prefix + instance.uuid, deviceName=volume_id).execute()\n\n self.wait_for_operation(operation)", "def detach_pd(self, conn, host, pd):\n zone = self.get_zone(conn, host)\n pdhost = self.get_pd_host(conn, pd, zone)\n if pdhost == \"\":\n self.tracer.info(\n \"disk %s is already attached to %s(%s)\" % (pd, host, zone))\n elif pdhost == host:\n self.tracer.info(\"attempting to detach %s from %s(%s)\" % (pd, host, zone))\n operation = conn.instances().detachDisk(project=PROJECT, zone=zone, instance=host, deviceName=pd).execute()\n self.wait_for_operation(conn, operation, zone)\n if self.get_pd_host(conn, pd, zone) == \"\":\n self.tracer.info(\"successfully detached %s from %s(%s)\" % (pd, host, zone))", "def disconnect_disk_from_mgmt(self, vios_uuid, disk_name):\n raise NotImplementedError()", "def detach(self):\r\n\r\n return self.driver.detach_volume(volume=self)", "def disconnect_disk(self, instance, stg_ftsk=None, disk_type=None):\n raise NotImplementedError()", "def detach(self, **kw):\n\n kw_copy = deepcopy(kw)\n image_snap_or_device_spec = kw_copy.pop(\"image-snap-or-device-spec\", \"\")\n cmd = f\"{self.base_cmd} detach {image_snap_or_device_spec} {build_cmd_from_args(**kw_copy)}\"\n\n return self.execute_as_sudo(cmd=cmd)", "def detach(self, name):\n volume_info = self.cm.find_name(name)\n if volume_info and volume_info[0]['State'] != \"deleted\":\n vms = volume_info[0]['AttachedToVm']\n path = volume_info[0]['path']\n if len(vms) == 0:\n Console.error(f\"{name} is not attached to any vm\")\n else:\n removed = []\n for vm in vms:\n result = self.unmount(path=f\"{path}/{name}\", vm=vm)\n mounts = result['mounts']\n if f\"{path}/{name}\" not in mounts.keys():\n removed.append(vm)\n for vm in removed:\n vms.remove(vm)\n result = self.update_volume_after_detach(volume_info, vms)\n return result[0]\n else:\n Console.error(\"volume does not exist or volume had been deleted\")", "def _delete_temp_disk(self, descriptor_ds_file_path, dc_ref):\n\n LOG.debug(\"Deleting temporary disk: %s.\", descriptor_ds_file_path)\n try:\n self.volumeops.delete_vmdk_file(\n descriptor_ds_file_path, dc_ref)\n except exceptions.VimException:\n LOG.warning(\"Error occurred while deleting temporary disk: %s.\",\n descriptor_ds_file_path, exc_info=True)", "def detach_volume(self, instance_name, mountpoint):\n return True", "def detach_volume(self, context, volume_id):\n # TODO(vish): refactor this into a more general \"unreserve\"\n # TODO(sleepsonthefloor): Is this 'elevated' appropriate?\n # self.db.volume_detached(context.elevated(), volume_id)\n self.db.volume_admin_metadata_delete(context.elevated(), volume_id,\n 'attached_mode')", "def eject(self) :\n success = False\n cmd = [self.hdiutil, \"detach\", self.myRamdiskDev]\n self.runWith.setCommand(cmd)\n self.runWith.communicate()\n retval, reterr, retcode = self.runWith.getNlogReturns()\n if not reterr:\n success = True\n self.runWith.getNlogReturns()\n\n return success" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Return volume connector information.
def get_volume_connector(self, instance): iqn = volume_util.get_host_iqn(self._session, self._cluster) return { 'ip': CONF.vmwareapi_host_ip, 'initiator': iqn, 'host': CONF.vmwareapi_host_ip }
[ "def get_volume_connector(self, instance):\n try:\n vm_ref = vm_util.get_vm_ref(self._session, instance)\n except exception.InstanceNotFound:\n vm_ref = None\n iqn = self._iscsi_get_host_iqn(instance)\n connector = {'ip': CONF.vmware.host_ip,\n 'initiator': iqn,\n 'host': CONF.vmware.host_ip}\n if vm_ref:\n connector['instance'] = vutil.get_moref_value(vm_ref)\n return connector", "def get_volume_connector(self, instance):\n props = {}\n # 'get_volume_connector' will be invoked during creation\n # of the partition and during deletion of the partition.\n # But 'wwpns' we can access only when partition is available.\n # During spawn flow 'get_volume_connector' function will be called\n # before 'spawn' function so to get 'wwpns' we first creating\n # the partition using 'prep_for_spawn' function so that\n # we can access 'wwpns'.(i.e - else part)\n # But during deletion 'get_volume_connector' will be called\n # after 'destroy' function which will delete the partition so\n # after that we can not get the 'wwpns'\n # In order to get 'wwpns' after 'destroy' function we are\n # saving 'wwpns' before deleting partition in 'destroy' function\n # in 'deleted_instance_wwpns_mapping' variable and using these 'wwpns'\n # in 'get_volume_connector'(i.e - if part)\n # after using these 'wwpns' we are removing these 'wwpns' from\n # 'deleted_instance_wwpns_mapping' variable because\n # we are not going to use these 'wwpns' any more after this.\n if instance.uuid in self.deleted_instance_wwpns_mapping:\n props['wwpns'] = self.deleted_instance_wwpns_mapping.pop(\n instance.uuid)\n else:\n inst = vm.PartitionInstance(instance, self._cpc)\n props['wwpns'] = inst.get_partition_wwpns()\n\n props['host'] = instance.uuid\n\n return props", "def initialize_connection(self, volume, connector, **kwargs):\n\n LOG.info(\"Connector is %s.\", connector)\n LOG.info(\"Volume is %s\",volume)\n connection_properties = dict(self.connection_properties)\n connection_properties['provider_id'] = volume['provider_id']\n return {'driver_volume_type': 'portworx',\n 'data': connection_properties}", "def initialize_connection(self, volume, connector):\n\n LOG.debug(\"Connector is %s.\", connector)\n connection_properties = dict(self.connection_properties)\n\n volname = self._id_to_base64(volume.id)\n connection_properties['scaleIO_volname'] = volname\n connection_properties['scaleIO_volume_id'] = volume.provider_id\n extra_specs = self._get_volumetype_extraspecs(volume)\n qos_specs = self._get_volumetype_qos(volume)\n storage_type = extra_specs.copy()\n storage_type.update(qos_specs)\n LOG.info(_LI(\"Volume type is %s.\"), storage_type)\n round_volume_size = self._round_to_num_gran(volume.size)\n iops_limit = self._get_iops_limit(round_volume_size, storage_type)\n bandwidth_limit = self._get_bandwidth_limit(round_volume_size,\n storage_type)\n LOG.info(_LI(\"iops limit is %s\"), iops_limit)\n LOG.info(_LI(\"bandwidth limit is %s\"), bandwidth_limit)\n connection_properties['iopsLimit'] = iops_limit\n connection_properties['bandwidthLimit'] = bandwidth_limit\n return {'driver_volume_type': 'scaleio',\n 'data': connection_properties}", "def getConnector(self):\r\n conntype= self.readReg(2, 1, False)[0]\r\n print \"Connector type:\", hex(conntype)\r\n return conntype", "def connect_info(self):\n return self._connect_info", "def GetConnectorType(self):\n return self._connector_type", "def get_volume(self):\n result = tuple()\n try:\n result = self.command('master-volume query')\n except:\n raise ReceiverError(\"Command query volume failed\", 'get_volume')\n finally:\n try:\n if len(result[1]) > 0:\n return str(int(result[1],16))\n except:\n raise ReceiverError(\"Volume data was bad and not sent back\", 'get_volume')", "def get_connector_properties(cls, root_helper, *args, **kwargs):\n execute = kwargs.get('execute') or priv_rootwrap.execute\n nvmf = NVMeOFConnector(root_helper=root_helper, execute=execute)\n ret = {}\n nqn = None\n uuid = nvmf._get_host_uuid()\n suuid = nvmf._get_system_uuid()\n if cls.nvme_present():\n nqn = utils.get_host_nqn()\n if uuid:\n ret['uuid'] = uuid\n if suuid:\n ret['system uuid'] = suuid # compatibility\n if nqn:\n ret['nqn'] = nqn\n ret['nvme_native_multipath'] = cls._set_native_multipath_supported()\n return ret", "def query_aws_volume_info(self):\n response = self.ec2_client.describe_volumes(Filters=[\n {'Name': 'tag:AXVolumeID', 'Values': [self.ax_volume_id]},\n {'Name': 'tag:KubernetesCluster', 'Values': [self.cluster_name_id]}\n ])\n\n for volume in response.get(\"Volumes\", None):\n for tag in volume[\"Tags\"]:\n if tag[\"Key\"] == \"KubernetesCluster\" and tag[\"Value\"] == self.cluster_name_id:\n return volume\n\n return None", "def connect_volume(self, connection_properties):\n\n sheepdog_handle = self._get_sheepdog_handle(connection_properties)\n return {'path': sheepdog_handle}", "def connection_details(self) -> Optional[Sequence['outputs.CompositionSpecToConnectionDetails']]:\n return pulumi.get(self, \"connection_details\")", "def connector(self):\n return self._connector", "def volume_info(self, volume):\n opts = dict(\n query=dict(\n volume_id_attributes=dict(\n name=volume\n )\n )\n )\n return self._item_to_volume(\n self.volume_get_iter(**opts)[\"netapp\"][\"results\"][\"attributes-list\"][\"volume-attributes\"])", "def connector(self):\n return self.__connector", "def virtual_channel_plugin_details(self):\n return self._virtual_channel_plugin_details", "def get_connector_properties(root_helper, *args, **kwargs):\n return {}", "def get_volume_info(host, disk_object, dc_obj):\n host_resource = get_host_resource_by_name(host)\n\n vol_id = disk_object.get_image_id()\n sd_id = disk_object.get_storage_domains().get_storage_domain()[0].get_id()\n image_id = disk_object.get_id()\n sp_id = dc_obj.get_id()\n\n args = {\n \"storagepoolID\": sp_id,\n \"storagedomainID\": sd_id,\n \"imageID\": image_id,\n \"volumeID\": vol_id,\n }\n\n return host_resource.vds_client(cmd=\"Volume.getInfo\", args=args)", "def getVolume(self):\n return _libsbml.Compartment_getVolume(self)" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
check that columns_lst is tbset of self.df.columns.names
def validate_col_lst(self, df, columns_lst): if columns_lst == []: raise ValueError("column_lst is empty") col_set = set(columns_lst) df_col_set = set(list(df)) if col_set - df_col_set != set(): msg = "col_lst has columns name that does not exists in the DataFrame columns:{}".format( str(col_set - df_col_set)) print(msg) raise ValueError(msg) return True
[ "def verify_columns_in_dataframe(df, columns):\n\n if not isinstance(columns, list):\n columns = [columns]\n return set(columns).issubset(df.columns)", "def _has_cols(trj: TrajaDataFrame, cols: list):\n return set(cols).issubset(trj.columns)", "def _check_columns(df: pd.DataFrame, names: typing.Sequence[str]) -> None:\n for expected in names:\n if expected not in df.columns:\n raise ValueError(f\"'{expected}' column not found in input\")\n return", "def _check_columns_exist(self, columns: List[str]):\n for col in columns:\n assert col in self.all_columns, f\"{col} is not a valid column.\"", "def check_columns_in_dataframe(df: pd.DataFrame, columns: Tuple[str]) -> None:\n\n for col in columns:\n if col not in df.columns:\n raise ValueError(f\"Column {col} is not in the dataframe.\")", "def get_column_names_set(df):\n return set(df.columns.values.tolist())", "def _validate_columns(self, names):\n if not is_list_like(names):\n raise ValueError(\"Columns should be list-like\")\n\n if len(set(names)) != len(names):\n raise ValueError(\"Duplicate column names\")\n\n if self._data and len(names) != len(self._data[0]):\n raise ValueError(\"Invalid columns length\")", "def _check_columns_unique(self, columns: List[str]):\n assert len(columns) == len(set(columns))", "def checkcolumnstest(chosen_columns, chosen_df):\n if not all([item in chosen_columns for item in chosen_df.columns]):\n raise ValueError('Columns do not match')", "def get_needed_columns(df, list_of_columns):\n return df[list_of_columns]", "def cols_valid(self,\n df: pd.DataFrame,\n req_cols: set) -> bool:\n missing_cols = req_cols.difference(df.columns)\n\n if len(missing_cols) > 0:\n logging.error(f\"{missing_cols} columns required but missing\")\n return False\n\n return True", "def verify_column_names_of_cdr_error_report_layer_3_table(self, column_name_list):\n if self.first_row_available:\n return self.verify_column_names_of_report_table(self.cdr_error_report_layer_3_table_id, column_name_list)\n else:\n return True", "def verify_column_names_of_cdr_error_report_layer_2_table(self, column_name_list):\n if self.first_row_available:\n return self.verify_column_names_of_report_table(self.cdr_error_report_layer_2_table_id, column_name_list)\n else:\n return True", "def verify_column_names_of_cdr_error_report_table(self, column_name_list):\n if self.first_row_available:\n return self.verify_column_names_of_report_table(self.cdr_error_report_table_id, column_name_list)\n else:\n return True", "def validate_columns(self) -> None:\n if self.attr_cols and not is_list_like(self.attr_cols):\n raise TypeError(\n f\"{type(self.attr_cols).__name__} is not a valid type for attr_cols\"\n )\n\n if self.elem_cols and not is_list_like(self.elem_cols):\n raise TypeError(\n f\"{type(self.elem_cols).__name__} is not a valid type for elem_cols\"\n )", "def _check_columns_with_table(table: Table, columns: Sequence[str]) -> Optional[bool]:\n for column in columns:\n if column not in table.c.keys():\n raise TypeError(f\"Specified column {column} did not exist on table {table}\")\n return True", "def _null_clean_check_valid_columns(self):\n valid_columns = ['imei_norm', 'imsi_norm']\n if self._perform_msisdn_import:\n valid_columns.append('msisdn_norm')\n if self._perform_rat_import:\n valid_columns.append('rat_norm')\n return valid_columns", "def __column_intersect(df, list_):\n return set(list_).intersection(set(df.columns.tolist()))", "def check_columns(self, column_names):\n expected_set = set(self.expected_columns)\n actual_set = set(column_names)\n diff = expected_set.difference(actual_set)\n if len(diff) != 0:\n raise ColumnNameException(str(list(diff)) + \": fields not available\")" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
given an OU, find all the OUs within that OU...
def get_child_ous(logger, org_client, org_unit): logger.debug("Getting OUs for: %s", org_unit) result = [org_unit] # for this OU, get all the children... args = dict(ParentId=org_unit["Id"]) children = utils.generic_paginator(logger, org_client.list_organizational_units_for_parent, "OrganizationalUnits", **args) # update child paths and then call ourselves recursively to find all children for child in children: child["Path"] = "{}/{}".format(org_unit["Path"], child["Name"]).replace("//", "/") result.extend(get_child_ous(logger, org_client, child)) return result
[ "def search_ou(self, unit):\n if unit.startswith(\"ou=\"):\n ret = self.conn.search_s(\n unit,\n ldap.SCOPE_BASE,\n \"objectClass=organizationalUnit\"\n )\n else:\n ret = self.conn.search_s(\n ajan.config.ldap.base_dn,\n ldap.SCOPE_SUBTREE,\n \"(&(objectClass=organizationalUnit)(ou=%s))\" % unit\n )\n return ret", "def get_accounts_for_ou(logger, options, org_client, path):\n logger.debug(\"Getting accounts for OU: %s\", path)\n org_unit = get_ou_from_path(logger, org_client, path)\n ous = []\n if options.no_recursive:\n ous.append(org_unit)\n else:\n ous.extend(get_child_ous(logger, org_client, org_unit))\n\n result = []\n for org_unit in ous:\n args = {\"ParentId\":org_unit[\"Id\"]}\n accounts = utils.generic_paginator(logger, org_client.list_accounts_for_parent,\n \"Accounts\", **args)\n for acc in accounts:\n acc[\"Path\"] = org_unit[\"Path\"]\n if 'Status' in acc:\n if acc['Status'] != 'SUSPENDED':\n result.append(acc)\n else:\n logger.info(\"found suspended account %s, ignoring it.\" % acc)\n return result", "def list_ou(self, _):\n cn_re = re_compile(\"{[^}]+}\")\n results = self.engine.query(self.engine.GPO_INFO_FILTER(), [\"cn\", \"displayName\"])\n gpos = {}\n for gpo in results:\n gpos[gpo[\"cn\"]] = gpo[\"displayName\"]\n\n results = self.engine.query(self.engine.OU_FILTER())\n for result in results:\n print(result[\"distinguishedName\"])\n if \"gPLink\" in result:\n guids = cn_re.findall(result[\"gPLink\"])\n if len(guids) > 0:\n print(\"[gPLink]\")\n print(\"* {}\".format(\"\\n* \".join([gpos[g] if g in gpos else g for g in guids])))", "def query_ad_ou_structure(self):\n get_ou_structure_command = \"$OUs = Get-ADOrganizationalUnit -Filter * \\n $OUs.DistinguishedName\"\n #Run the powershell command\n cmd_raw_output = subprocess.run([\"powershell\", \"-Command\", get_ou_structure_command], stdout=PIPE, stderr=PIPE, check=True)\n #convert the powershell command result to a string\n cmd_output_as_string = cmd_raw_output.stdout.decode('utf-8').rstrip()\n #convert the powershell command result to a list split by the \\r\\n characters\n cmd_output_list = cmd_output_as_string.split(\"\\r\\n\")\n\n return cmd_output_list", "def get_ou(self, ou_name, ou_cls_name, base=\"\"):\n # имя OU в LDAP не уникально\n rez = []\n # attributes=[u'name',u'description',u'distinguishedName']\n if base == \"\":\n base = self.ldap_domain_name\n resp = self.conn.search(\n search_base=base,\n search_filter=\"(&(objectClass=organizationalUnit)\"\n + \"(name={ou_name}))\".format(ou_name=ou_name),\n attributes=ldap3.ALL_ATTRIBUTES,\n )\n\n if resp == False:\n raise OperationFailure(\n \"Search failed. Server meseage: {desc} - {msgs}.\".format(\n desc=self.conn.result[\"description\"],\n msgs=self.conn.result[\"message\"],\n )\n )\n else:\n mdl = getattr(sys.modules[__name__], ou_cls_name)\n cls = getattr(mdl, ou_cls_name)\n for itm in self.conn.entries:\n cls_vl = {}\n for cls_attr, ldap_attr in cls._FIELD_MAP.items():\n if ldap_attr in itm:\n l3_attr = getattr(itm, ldap_attr)\n cls_vl[cls_attr] = (\n \"\" if l3_attr.value is None else l3_attr.value\n )\n DN = str(getattr(itm, \"distinguishedName\").value)\n CN = str(getattr(itm, \"name\").value)\n BASE = DN.replace(\"OU=\" + CN + \",\", \"\", 1)\n cls_vl[\"org_unit\"] = BASE\n ou = cls(**cls_vl)\n rez.append(ou)\n return rez", "def list_all_organizations(ctx):\n pprint(ctx.obj.orgs.get().data)", "async def get_organizations(request: Request):\n redis = request.app.state.redis\n organizations_obj = orjson.loads(await redis.get_key(\"influxdb_organizations\"))\n return [org for org in organizations_obj]", "def _get_orgs(self):\n return self.api.get('/v2/organizations')", "def createOuInLDAP(ldir, ou):\n\n dn = 'ou=%s,%s' % (ou, ldir.ldap_base_creation)\n attrs = {'objectClass': ['top', 'organizationalUnit'],\n 'ou': ou}\n ldir.insertLDAP(dn, attrs)", "def find_tools(config):\n with get_ldap_conn(config) as conn:\n conn.search(\n 'ou=people,ou=servicegroups,dc=wikimedia,dc=org',\n '(cn=%s.*)' % PROJECT,\n ldap3.SEARCH_SCOPE_WHOLE_SUBTREE,\n attributes=['uidNumber', 'cn'],\n time_limit=5\n )\n\n users = []\n for resp in conn.response:\n attrs = resp['attributes']\n users.append((attrs['cn'][0], int(attrs['uidNumber'][0])))\n\n return users", "def list_orgs(self):\n orgs = list(self.orgs.keys())\n orgs.sort()\n return orgs", "def test_get_ouis(self):\n pass", "def test_retrieve_l_organizations(self):\n pass", "def get_organizations(self):\n orgs = []\n if self.__document:\n for ent in self.__document.ents:\n lbl = ent.label_.upper()\n if lbl == 'ORG' or lbl == 'NORP':\n orgs.append(ent.text)\n return orgs", "def organizations(self):\n return self.get('{}/orgs'.format(ApiVersion.A1.value))", "def netapi32_NetGetJoinableOUs(jitter):\n ret_ad, args = jitter.func_args_stdcall([\"lpServer\", \"lpDomain\", \"lpAccount\", \"lpPassword\", \"OUCount\", \"OUs\"])\n raise RuntimeError('API not implemented')\n jitter.func_ret_stdcall(ret_ad, ret_value)", "def _compute_dept_ou_domain(self):\n department_ids = []\n if self.operating_unit_id:\n self.env.cr.execute(\"\"\"\n SELECT id\n FROM hr_department\n WHERE operating_unit_id = %s \n AND parent_id IS NULL\n \"\"\"\n % (self.operating_unit_id.id))\n\n result = self.env.cr.fetchall()\n for res in result:\n department_id = res[0]\n self.env.cr.execute(\"\"\"\n WITH RECURSIVE\n subordinates AS(\n SELECT id, parent_id FROM hr_department WHERE id = %s\n UNION\n SELECT h.id, h.parent_id FROM hr_department h\n INNER JOIN subordinates s ON s.id = h.parent_id)\n SELECT * FROM subordinates\"\"\"\n % (department_id))\n result2 = self.env.cr.fetchall()\n for res2 in result2:\n department_ids.append(res2[0])\n self.dept_ou_domain = json.dumps(\n [('id', 'in', department_ids)]\n )", "def get_organizations(self, language=None, include_descendants=True):\n return self.get_reverse_related_page_extensions(\n \"organization\", language=language, include_descendants=include_descendants\n )", "def getAllRooms(z, opts):\n params = {}\n dmerge(params, parse_param('@attrs=uid'))\n dmerge(params, parse_param('@types=resources'))\n #dmerge(params, parse_param('@limit=5'))\n response = z.request('SearchDirectoryRequest', params=params, opts=opts)\n names = [item['name'] for item in response['SearchDirectoryResponse']['calresource']]\n return names" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
given a path, traverse Organizations OUs to locate the required OU...
def get_ou_from_path(logger, org_client, path): logger.debug("Getting OU from path: %s", path) current_ou = org_client.list_roots()["Roots"][0]["Id"] if path == "/": return {"Id":current_ou, "Path":path} for dir_name in path.split("/")[1:]: logger.debug("Getting OU from path: %s, looking for: %s", path, dir_name) found = False args = dict(ParentId=current_ou) children = utils.generic_paginator(logger, org_client.list_organizational_units_for_parent, "OrganizationalUnits", **args) for org_unit in children: if org_unit["Name"] == dir_name: current_ou = org_unit["Id"] found = True break if not found: raise ValueError("OU path not found") return {"Id":current_ou, "Path":path}
[ "def get_accounts_for_ou(logger, options, org_client, path):\n logger.debug(\"Getting accounts for OU: %s\", path)\n org_unit = get_ou_from_path(logger, org_client, path)\n ous = []\n if options.no_recursive:\n ous.append(org_unit)\n else:\n ous.extend(get_child_ous(logger, org_client, org_unit))\n\n result = []\n for org_unit in ous:\n args = {\"ParentId\":org_unit[\"Id\"]}\n accounts = utils.generic_paginator(logger, org_client.list_accounts_for_parent,\n \"Accounts\", **args)\n for acc in accounts:\n acc[\"Path\"] = org_unit[\"Path\"]\n if 'Status' in acc:\n if acc['Status'] != 'SUSPENDED':\n result.append(acc)\n else:\n logger.info(\"found suspended account %s, ignoring it.\" % acc)\n return result", "def search_ou(self, unit):\n if unit.startswith(\"ou=\"):\n ret = self.conn.search_s(\n unit,\n ldap.SCOPE_BASE,\n \"objectClass=organizationalUnit\"\n )\n else:\n ret = self.conn.search_s(\n ajan.config.ldap.base_dn,\n ldap.SCOPE_SUBTREE,\n \"(&(objectClass=organizationalUnit)(ou=%s))\" % unit\n )\n return ret", "def get_child_ous(logger, org_client, org_unit):\n logger.debug(\"Getting OUs for: %s\", org_unit)\n result = [org_unit]\n\n # for this OU, get all the children...\n args = dict(ParentId=org_unit[\"Id\"])\n children = utils.generic_paginator(logger, org_client.list_organizational_units_for_parent,\n \"OrganizationalUnits\", **args)\n\n # update child paths and then call ourselves recursively to find all children\n for child in children:\n child[\"Path\"] = \"{}/{}\".format(org_unit[\"Path\"], child[\"Name\"]).replace(\"//\", \"/\")\n result.extend(get_child_ous(logger, org_client, child))\n\n return result", "def build_account_path(self, ou_id, account_path, cache):\n current = self.list_parents(ou_id)\n\n # While not at the root of the Organization\n while current.get('Type') != \"ROOT\":\n # check cache for ou name of id\n if not cache.check(current.get('Id')):\n cache.add(\n current.get('Id'),\n self.describe_ou_name(\n current.get('Id')))\n ou_name = cache.check(current.get('Id'))\n account_path.append(ou_name)\n return self.build_account_path(\n current.get('Id'),\n account_path,\n cache\n )\n return Organizations.determine_ou_path(\n '/'.join(list(reversed(account_path))),\n self.describe_ou_name(\n self.get_parent_info().get(\"ou_parent_id\")\n )\n )", "def build_account_path(self, ou_id, account_path, cache):\n current = self.list_parents(ou_id)\n\n # While not at the root of the Organization\n while current.get(\"Type\") != \"ROOT\":\n # check cache for ou name of id\n if not cache.exists(current.get(\"Id\")):\n cache.add(\n current.get(\"Id\"),\n self.describe_ou_name(current.get(\"Id\")),\n )\n ou_name = cache.get(current.get(\"Id\"))\n account_path.append(ou_name)\n return self.build_account_path(current.get(\"Id\"), account_path, cache)\n return Organizations.determine_ou_path(\n \"/\".join(list(reversed(account_path))),\n self.describe_ou_name(self.get_parent_info().get(\"ou_parent_id\")),\n )", "def test_retrieve_l_organizations(self):\n pass", "def ldappath(path):\n\tparts=path.split(\".\")\n\t# print len(parts)\n\t# print parts\n\ti=len(parts)\n\t# print i\n\ti=i-1\n\tnewpath=\"\"\n\tfirst=\"cn=\"+parts[0]+\",\"\n\tlast=\"o=\"+parts[i]\n\tfor c in range(1,i):\n\t\t# print c\n\t\tnewpath=newpath+\"ou=\"+parts[c]+\",\"\n\t\t# print newpath\n\tfinalpath=first+newpath+last\n\t#print finalpath\n\treturn(finalpath)", "def parse_common_organization_path(path: str) -> Dict[str,str]:\n m = re.match(r\"^organizations/(?P<organization>.+?)$\", path)\n return m.groupdict() if m else {}", "def parse_common_organization_path(path: str) -> Dict[str, str]:\n m = re.match(r\"^organizations/(?P<organization>.+?)$\", path)\n return m.groupdict() if m else {}", "def get_organizations(self, language=None, include_descendants=True):\n return self.get_reverse_related_page_extensions(\n \"organization\", language=language, include_descendants=include_descendants\n )", "def _compute_dept_ou_domain(self):\n department_ids = []\n if self.operating_unit_id:\n self.env.cr.execute(\"\"\"\n SELECT id\n FROM hr_department\n WHERE operating_unit_id = %s \n AND parent_id IS NULL\n \"\"\"\n % (self.operating_unit_id.id))\n\n result = self.env.cr.fetchall()\n for res in result:\n department_id = res[0]\n self.env.cr.execute(\"\"\"\n WITH RECURSIVE\n subordinates AS(\n SELECT id, parent_id FROM hr_department WHERE id = %s\n UNION\n SELECT h.id, h.parent_id FROM hr_department h\n INNER JOIN subordinates s ON s.id = h.parent_id)\n SELECT * FROM subordinates\"\"\"\n % (department_id))\n result2 = self.env.cr.fetchall()\n for res2 in result2:\n department_ids.append(res2[0])\n self.dept_ou_domain = json.dumps(\n [('id', 'in', department_ids)]\n )", "def organizations(self):\n self.elements('organizations')", "def organizations(self):\n return self.get('{}/orgs'.format(ApiVersion.A1.value))", "def test_retrieve_l_organization(self):\n pass", "def _get_organizations(self):\n return self.__organizations", "def _get_orgs(self):\n return self.api.get('/v2/organizations')", "def get_organizations(self):\n orgs = []\n if self.__document:\n for ent in self.__document.ents:\n lbl = ent.label_.upper()\n if lbl == 'ORG' or lbl == 'NORP':\n orgs.append(ent.text)\n return orgs", "def save_organizations(self, user, path=None):\n # Redis has an end_cursor if we've collected this data before\n end_cursor = self.redis.get(''.join(['gh:', user.login, ':organizations:endCursor']))\n if end_cursor:\n end_cursor = end_cursor.decode('utf-8')\n end_cursor = ''.join(['\"', end_cursor, '\"'])\n organizations = u.organizations(first=100, after=end_cursor)\n else:\n organizations = u.organizations(first=100)\n if not organizations:\n return False\n while True:\n if organizations['data']['user']['organizations']['edges']:\n index = ''.join(['gh_organizations-', self.timestamp])\n self._write_to_datastore(index=index,\n doc_type='GithubOrganizations',\n document=organizations,\n login=user.login,\n path=path)\n has_next_page = organizations['data']['user']['organizations']['pageInfo']['hasNextPage']\n end_cursor = organizations['data']['user']['organizations']['pageInfo']['endCursor']\n if has_next_page:\n organizations = u.organizations(first=100, after=end_cursor)\n else:\n # Cache the end_cursor where we last collected data\n self.redis.set(''.join(['gh:', u.login, ':organizations:endCursor']), end_cursor)\n break\n else:\n break\n\n return True", "def list_ou(self, _):\n cn_re = re_compile(\"{[^}]+}\")\n results = self.engine.query(self.engine.GPO_INFO_FILTER(), [\"cn\", \"displayName\"])\n gpos = {}\n for gpo in results:\n gpos[gpo[\"cn\"]] = gpo[\"displayName\"]\n\n results = self.engine.query(self.engine.OU_FILTER())\n for result in results:\n print(result[\"distinguishedName\"])\n if \"gPLink\" in result:\n guids = cn_re.findall(result[\"gPLink\"])\n if len(guids) > 0:\n print(\"[gPLink]\")\n print(\"* {}\".format(\"\\n* \".join([gpos[g] if g in gpos else g for g in guids])))" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
given a path, get all the AWS accounts within that part of an Organization...
def get_accounts_for_ou(logger, options, org_client, path): logger.debug("Getting accounts for OU: %s", path) org_unit = get_ou_from_path(logger, org_client, path) ous = [] if options.no_recursive: ous.append(org_unit) else: ous.extend(get_child_ous(logger, org_client, org_unit)) result = [] for org_unit in ous: args = {"ParentId":org_unit["Id"]} accounts = utils.generic_paginator(logger, org_client.list_accounts_for_parent, "Accounts", **args) for acc in accounts: acc["Path"] = org_unit["Path"] if 'Status' in acc: if acc['Status'] != 'SUSPENDED': result.append(acc) else: logger.info("found suspended account %s, ignoring it." % acc) return result
[ "def organizations(self):\n return self.get('{}/orgs'.format(ApiVersion.A1.value))", "def get_aws_customers_account(self):\n filters = dict()\n filters['customer_id__in'] = self.get_customers_based_on_partner()\n filters['type'] = 'AWS'\n filters['active'] = 1\n\n return CloudAccounts.objects.filter(**filters)", "def handleList(self, confInfo):\n logger.info('Organization account list requested.')\n\n # Get the grand central account information for the organization account\n conf_handler_path = self.get_conf_handler_path_name('grand_central_aws_accounts', 'nobody')\n grand_central_aws_accounts_eai_response_payload = self.simple_request_eai(conf_handler_path, 'list', 'GET', get_args={'count': -1})\n\n account_list_payload = {'entry': []}\n\n for grand_central_aws_account in grand_central_aws_accounts_eai_response_payload['entry']:\n\n if grand_central_aws_account['content']['organization_master_account'] == '0':\n continue\n\n aws_secret_key_link_alternate = grand_central_aws_account['content'][\n 'aws_secret_key_link_alternate']\n aws_access_key = grand_central_aws_account['content'][\n 'aws_access_key']\n aws_account_id = grand_central_aws_account['content']['aws_account_id']\n\n passwords_conf_payload = self.simple_request_eai(aws_secret_key_link_alternate, 'list', 'GET')\n SECRET_KEY = passwords_conf_payload['entry'][0]['content']['clear_password']\n\n # Make call to AWS API endpoint\n client = boto3.client('organizations', aws_access_key_id=aws_access_key, aws_secret_access_key=SECRET_KEY)\n\n root_id = client.list_roots()['Roots'][0]['Id']\n ou_response = client.list_organizational_units_for_parent(ParentId=root_id)\n\n child_to_ou_map = {}\n\n for ou in ou_response['OrganizationalUnits']:\n ou_id = ou['Id']\n inner_response = client.list_children(ParentId=ou_id, ChildType='ACCOUNT')\n\n for child in inner_response['Children']:\n child_to_ou_map[child['Id']] = {'Id': ou['Id'], 'Name': ou['Name'], 'Arn': ou['Arn']}\n\n while 'NextToken' in inner_response:\n inner_response = client.list_children(NextToken=inner_response['NextToken'], ParentId=ou_id, ChildType='ACCOUNT')\n\n for child in inner_response['Children']:\n child_to_ou_map[child['Id']] = {'Id': ou['Id'], 'Name': ou['Name'], 'Arn': ou['Arn']}\n\n response = client.list_accounts()\n\n self.update_account_list_payload(account_list_payload, response, aws_account_id, child_to_ou_map)\n\n while 'NextToken' in response:\n response = client.list_accounts(NextToken=response['NextToken'])\n\n self.update_account_list_payload(account_list_payload, response, aws_account_id, child_to_ou_map)\n\n self.set_conf_info_from_eai_payload(confInfo, account_list_payload)", "async def get_organizations(request: Request):\n redis = request.app.state.redis\n organizations_obj = orjson.loads(await redis.get_key(\"influxdb_organizations\"))\n return [org for org in organizations_obj]", "def get_aws_accounts(self):\n accounts = []\n excel, sheets = self.parse_budget_file()\n\n if len(sheets) == 0:\n return accounts\n\n for sheet in sheets:\n try:\n # All the columns in the data frame loaded as string data type\n # This required because some of the AWS account number has preceding zeros\n sheet_df = pd.read_excel(excel, sheet_name=sheet, dtype=str)\n\n # Convert month field data type from string to float\n convert_dict = {int(self.current_month): float}\n sheet_df = sheet_df.astype(convert_dict)\n\n # drop last row which has total\n sheet_df.drop(sheet_df.tail(1).index, inplace=True)\n accounts.extend(list(sheet_df['AWS Account ID'].unique()))\n\n except Exception as error:\n self.logger.exception(\"Unable to read sheet name %s \\n Error %s\", sheet, error)\n # In case a sheet malformed, process other\n continue\n\n return accounts", "def _get_orgs(self):\n return self.api.get('/v2/organizations')", "def get_organizations(self):\n from savoy.core.people.models import Role\n organizations = Role.objects.filter(person=self)\n person_orgs = []\n for role in organizations:\n if role.organization not in person_orgs:\n person_orgs.append(role.organization)\n return person_orgs", "def search_scm_accounts(self, project, q=None):", "def organizations(self):\n self.elements('organizations')", "def list_all_organizations(ctx):\n pprint(ctx.obj.orgs.get().data)", "def test_get_namespaces_from_account(self):\n pass", "def test_get_namespaces_from_accounts(self):\n pass", "def _get_arns(self):\n client = self._get_client()\n\n account_arns = set()\n\n for role in list_roles(**self.conn_details):\n account_arns.add(role['Arn'])\n\n for user in list_users(**self.conn_details):\n account_arns.add(user['Arn'])\n\n for page in client.get_paginator('list_policies').paginate(Scope='Local'):\n for policy in page['Policies']:\n account_arns.add(policy['Arn'])\n\n for page in client.get_paginator('list_groups').paginate():\n for group in page['Groups']:\n account_arns.add(group['Arn'])\n\n result_arns = set()\n for arn in self.arn_list:\n if arn.lower() == 'all':\n return account_arns\n\n if arn not in account_arns:\n self.current_app.logger.warn(\"Provided ARN {arn} not found in account.\".format(arn=arn))\n continue\n\n result_arns.add(arn)\n\n self.current_app.logger.debug(\"got %d arns\", len(result_arns))\n return list(result_arns)", "def fetch_account_catalogs(account:str):\n for config in accounts:\n if account in config['streamers']:\n return config['catalogs']\n return", "def fetch_accounts(self):\n return self.fetch('/accounts')", "def org_search():\n\n s3.prep = lambda r: r.method == \"search_ac\"\n\n return crud_controller(module, \"organisation\")", "def get_accounts(data_dir, **geth_kwargs):\n command, proc = spawn_geth(dict(\n data_dir=data_dir,\n suffix_args=['account', 'list'],\n **geth_kwargs\n ))\n stdoutdata, stderrdata = proc.communicate()\n\n if proc.returncode:\n if \"no keys in store\" in stderrdata:\n return tuple()\n else:\n raise ValueError(format_error_message(\n \"Error trying to list accounts\",\n command,\n proc.returncode,\n stdoutdata,\n stderrdata,\n ))\n accounts = parse_geth_accounts(stdoutdata)\n return accounts", "def parse_common_organization_path(path: str) -> Dict[str,str]:\n m = re.match(r\"^organizations/(?P<organization>.+?)$\", path)\n return m.groupdict() if m else {}", "def parse_common_organization_path(path: str) -> Dict[str, str]:\n m = re.match(r\"^organizations/(?P<organization>.+?)$\", path)\n return m.groupdict() if m else {}" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Checks globals() and builtins for the existence of the object name (used for StuWareSoftSystems' bootstrap)
def checkObjectInNameSpace(objectName): if objectName is None or not isinstance(objectName, basestring) or objectName == u"": return False if objectName in globals(): return True return objectName in dir(builtins)
[ "def _is_builtin(obj):\n\treturn obj.__class__.__module__ == 'builtins'", "def test_magicGlobalsBuiltins(self):\r\n self.flakes('__builtins__')", "def missing_global(name):\n return name not in globals()", "def _exists(name):\n return name in globals()", "def test_magicGlobalsName(self):\r\n self.flakes('__name__')", "def isbuiltin(object):\n if inspect.isbuiltin(object):\n return True\n\n return getattr(object, '__module__', None) == 'builtins'", "def global_exists(self, global_name):\n return self.evaluate('!(typeof %s === \"undefined\");' %\n global_name)", "def is_global(sym:str)->bool:\n return sym in globals() or sym in sys.modules", "def testFindsBuiltins(self):\r\n self.assertEqual('sys', modulefinder.get_module_filename('sys'))\r\n self.assertEqual('time', modulefinder.get_module_filename('time'))", "def has_global(node, name):\n return hasattr(node, \"globals\") and name in node.globals", "def check_is_builtin(cls) -> bool:\n return str(cls.check_package_path()).startswith('plugin/builtin')", "def is_builtins(self) -> bool:\n return self.source.startswith(self.builtins_import_string)", "def isbuiltin(object):\r\n return isinstance(object, types.BuiltinFunctionType)", "def in_builtin_module(self):\r\n return isinstance(self._module, compiled.CompiledObject)", "def test_core_object_types_global():\n for core_object_type in CORE_OBJECT_TYPES:\n core_object = get_object_from_string(core_object_type)\n assert core_object.__name__.lower() == core_object_type", "def is_builtin(f: 'function') -> bool:\n return hasattr(f, '__name__') and hasattr(builtins, f.__name__)", "def test_magicGlobalsPath(self):\r\n self.flakes('__path__', m.UndefinedName)\r\n self.flakes('__path__', filename='package/__init__.py')", "def test_global():\n global PATH, OS, collections, deque\n from os import path as PATH\n import os as OS\n import collections\n from collections import deque\n # make sure that these triggers unused-variable\n from sys import platform\n from sys import version as VERSION\n import this\n import re as RE", "def test_builtin_not_contains(self):\n self.assertFalse(\"foo\" in builtin)" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Pass a string in the format 'x.x.x'. Will check that this MacOSX version is at least that version. The 3rd micro number is optional
def isOSXVersionAtLeast(compareVersion): # type: (basestring) -> bool try: if not Platform.isOSX(): return False def convertVersion(convertString): _os_major = _os_minor = _os_micro = 0 _versionNumbers = [] for versionPart in StringUtils.splitIntoList(convertString, '.'): strippedPart = StringUtils.stripNonNumbers(versionPart, '.') if (StringUtils.isInteger(strippedPart)): _versionNumbers.append(Integer.valueOf(Integer.parseInt(strippedPart))) else: _versionNumbers.append(0) if len(_versionNumbers) >= 1: _os_major = max(0, _versionNumbers[0]) if len(_versionNumbers) >= 2: _os_minor = max(0, _versionNumbers[1]) if len(_versionNumbers) >= 3: _os_micro = max(0, _versionNumbers[2]) return _os_major, _os_minor, _os_micro os_major, os_minor, os_micro = convertVersion(System.getProperty("os.version", "0.0.0")) myPrint("DB", "MacOS Version number(s): %s.%s.%s" %(os_major, os_minor, os_micro)) if not isinstance(compareVersion, basestring) or len(compareVersion) < 1: myPrint("B", "ERROR: Invalid compareVersion of '%s' passed - returning False" %(compareVersion)) return False chk_os_major, chk_os_minor, chk_os_micro = convertVersion(compareVersion) myPrint("DB", "Comparing against Version(s): %s.%s.%s" %(chk_os_major, chk_os_minor, chk_os_micro)) if os_major < chk_os_major: return False if os_major > chk_os_major: return True if os_minor < chk_os_minor: return False if os_minor > chk_os_minor: return True if os_micro < chk_os_micro: return False return True except: myPrint("B", "ERROR: isOSXVersionAtLeast() failed - returning False") dump_sys_error_to_md_console_and_errorlog() return False
[ "def check_legitimate_ver(version):\n return re.match(\"^[0-9.]+$\", version)", "def check_regex(regex, version):\n os_version = version.split(' ')[0]\n os_version = os_version.split('-')[0]\n # If a version number is incomplete, extend it with zeros (e.g. 8 -> 8.0.0)\n dots = os_version.count('.')\n if dots > 2:\n raise Exception('Invalid version string: ' + os_version)\n while dots < 2:\n os_version += '.0'\n dots = os_version.count('.')\n return re.match(regex, os_version)", "def test_check_version():\n assert check_version('0.9.4-1', '0.9.4', '>=')\n assert check_version('3.0.0rc1', '3.0.0', '<')\n assert check_version('1.0', '1.0b2', '>')", "def validate_version(s):\n try:\n tpl = tuple(int(x) for x in s.split('.'))\n if len(tpl) != 3: raise ValueError\n except Exception:\n raise ValueError(\"HDF5 version string must be in X.Y.Z format\")", "def _is_python_version(s: str) -> bool:\n\n return s.startswith(\"2\") or s.startswith(\"3\")", "def check_version_str(version):\n if not version.startswith('v') and version != 'current':\n version = 'v%s' % version\n return version", "def __check_nm_version(self):\n try:\n proxy = self.bus.get_object(\n self.system_service_name, \"/org/freedesktop/NetworkManager\")\n props = dbus.Interface(proxy, \"org.freedesktop.DBus.Properties\")\n version = props.Get(\"org.freedesktop.NetworkManager\", \"Version\")\n except dbus.exceptions.DBusException:\n version = \"0.8\"\n if re.match(r'^1\\.', version):\n self.nm_version = \"1.0\"\n return\n if re.match(r'^0\\.9', version):\n self.nm_version = \"0.9\"\n return\n if re.match(r'^0\\.8', version):\n self.nm_version = \"0.8\"\n return\n self.nm_version = Messages.unknown_version", "def match_version(version):\n\n if version in available_versions:\n return version\n\n matching_major_version = [i for i in available_versions.keys() if i.startswith(\".\".join(version.split(\".\")[:-1]))]\n\n if len(matching_major_version) == 0:\n raise VersionNotSupported\n\n warnings.warn(\n \"The selected model is not from the exact same version, only the same major version. It could lead to high error rate.\"\n )\n return matching_major_version[-1]", "def check_python_version(match, current=None):\n if current is None:\n current = list(sys.version_info[:3])\n if not isinstance(match, list):\n match = [match]\n for m in match:\n minimal = False\n if isinstance(m, float):\n m = str(m)\n if m.endswith(\"+\"):\n minimal = True\n m = m[:-1]\n # assert m[0].isdigit()\n # assert m[-1].isdigit()\n m = [int(x) for x in m.split(\".\")]\n current_len = current[: len(m)]\n # print(m, current, current_len)\n if minimal:\n if current_len >= m:\n return True\n else:\n if current_len == m:\n return True\n return False", "def is_new_osx():\n name = distutils.util.get_platform()\n if sys.platform != \"darwin\":\n return False\n elif name.startswith(\"macosx-10\"):\n minor_version = int(name.split(\"-\")[1].split(\".\")[1])\n if minor_version >= 7:\n return True\n else:\n return False\n else:\n return False", "def test_osx_version_number_value(self):\n \n running_version_number = get_osx_version()[0]\n \n # Check to make sure the returned valued is 10.11.1\n self.assertEqual(running_version_number, '10.11.1')", "def validate_version(s):\n m = re.match('(\\d+)\\.(\\d+)\\.(\\d+)$', s)\n if m:\n return tuple(int(x) for x in m.groups())\n raise ValueError(f\"HDF5 version string {s!r} not in X.Y.Z format\")", "def checkBakefileVersion(version):\n vcur = mk.vars['BAKEFILE_VERSION'].split('.')\n vreq = version.split('.')\n return vcur >= vreq", "def os_is_compatible(required_os_version: str) -> bool:\n\tcurrent_version = [int(c) for c in os_release().split('.')]\n\trequired_version = [int(c) for c in required_os_version.split('.')]\n\n\t# 10.13.6.2 is not (necessarily) compatible with 10.13.6\n\tif len(required_version) > len(current_version) and\\\n\t required_version[0:len(current_version)] == current_version:\n\t return False\n\n\t# Compare versions component-wise\n\tfor (c, r) in zip(current_version, required_version):\n\t\tif c < r:\n\t\t\treturn False\n\n\treturn True", "def check_ip_version(self, prefix):\n if ':' in prefix:\n return '6'\n return '4'", "def check_version(version='2.2'):\n err = \"PaddlePaddle version {} or higher is required, \" \\\n \"or a suitable develop version is satisfied as well. \\n\" \\\n \"Please make sure the version is good with your code.\".format(version)\n\n version_installed = [\n paddle_version.major, paddle_version.minor, paddle_version.patch,\n paddle_version.rc\n ]\n\n if version_installed == ['0', '0', '0', '0']:\n return\n\n version_split = version.split('.')\n\n length = min(len(version_installed), len(version_split))\n for i in six.moves.range(length):\n if version_installed[i] > version_split[i]:\n return\n if version_installed[i] < version_split[i]:\n raise Exception(err)", "def supports_version(self, required_version: str) -> bool:\n return parse_version(self.server_version) >= parse_version(required_version)", "def compatible_go_version(*, compiler_version: str, target_version: str) -> bool:\n if target_version == \"1.0\":\n return True\n\n def parse(v: str) -> tuple[int, int]:\n major, minor = v.split(\".\", maxsplit=1)\n return int(major), int(minor)\n\n return parse(target_version) <= parse(compiler_version)", "def rpn_version_check(self):" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Detect Intel x86 32bit system
def isIntelX86_32bit(): return String(System.getProperty("os.arch", "null").strip()).toLowerCase(Locale.ROOT) == "x86"
[ "def is_32bit(self):\n return self.machine in ['i386', 'i586', 'i686']", "def osarch_is_32_bit():\n return osarch_match(\"32-bit\")", "def osarch_is_32_bit():\n return osarch_match(\"32-bit\")", "def is_32bit(self) -> bool:\n return kernel32.IsWow64Process(self.handle)", "def osarch_is_ia32():\n return osarch_match(\"ia32\")", "def osarch_is_ia32():\n return osarch_match(\"ia32\")", "def check_32bit(pe): \n bits = True\n if not hex(pe.FILE_HEADER.Machine) == '0x14c':\n bits = False\n return bits", "def use32_bit_worker_process(self) -> bool:\n return pulumi.get(self, \"use32_bit_worker_process\")", "def get_os_name(x86=0):\r\n platform_in_short, on_win = sys.platform[:3], 0\r\n\r\n if platform_in_short == \"win\":\r\n on_win = 1\r\n os_name = \"nt\"\r\n elif platform_in_short == \"lin\":\r\n os_name = \"lin\"\r\n else:\r\n os_name = \"sol\"\r\n if not x86:\r\n os_name += \"64\"\r\n return on_win, os_name", "def architecture():\n \n arch_dict = {'AMD64': 64, 'x86_64': 64, 'i386': 32, 'i686': 32, 'x86': 32}\n \n machine = platform.machine()\n return arch_dict.get(machine, None)", "def host_arch_cc():\n\n k = cc_macros()\n\n matchup = {\n '__x86_64__' : 'x64',\n '__i386__' : 'ia32',\n '__arm__' : 'arm',\n }\n\n rtn = 'ia32' # default\n\n for i in matchup:\n if i in k and k[i] != '0':\n rtn = matchup[i]\n break\n\n return rtn", "def host_architecture(self):\n return self._host_architecture", "def interpreter_architecture(): # pragma: no cover\n if hasattr(sys, \"maxsize\"):\n if sys.maxsize > 2**32:\n return 64\n else:\n return 32\n else:\n # Python < 2.6, not as accurate as the above\n if platform.architecture()[0] == \"64bits\":\n return 64\n else:\n return 32", "def is64bit():\r\n return platform.machine().endswith('64')", "def bitness():\n # see https://docs.python.org/2/library/platform.html#platform.architecture\n return '64-bit' if sys.maxsize > 2**32 else '32-bit'", "def osarch_is_amd64():\n return osarch_match(\"amd64\")", "def is_os_64bit():\n import platform\n return platform.machine().endswith('64')", "def platform_is_64bit():\n is64bit = sys.maxsize > 2 ** 32\n if sys.platform == \"cli\":\n is64bit = sys.executable.endswith(\"ipy64.exe\")\n return is64bit", "def host_arch_cc():\n\n k = cc_macros()\n\n matchup = {\n '__x86_64__': 'x64',\n '__i386__': 'ia32',\n '__arm__': 'arm',\n }\n\n rtn = 'ia32' # default\n\n for i in matchup:\n if i in k and k[i] != '0':\n rtn = matchup[i]\n break\n\n return rtn" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Grabs the MD defaultText font, reduces default size down to below 18, sets UIManager defaults (if runtime extension, will probably error, so I catch and skip)
def setDefaultFonts(): if MD_REF_UI is None: return # If a runtime extension, then this may fail, depending on timing... Just ignore and return... try: myFont = MD_REF.getUI().getFonts().defaultText except: myPrint("B","ERROR trying to call .getUI().getFonts().defaultText - skipping setDefaultFonts()") return if myFont is None: myPrint("B","WARNING: In setDefaultFonts(): calling .getUI().getFonts().defaultText has returned None (but moneydance_ui was set) - skipping setDefaultFonts()") return if myFont.getSize()>18: try: myFont = myFont.deriveFont(16.0) myPrint("B", "I have reduced the font size down to point-size 16 - Default Fonts are now set to: %s" %(myFont)) except: myPrint("B","ERROR - failed to override font point size down to 16.... will ignore and continue. Font set to: %s" %(myFont)) else: myPrint("DB", "Attempting to set default font to %s" %myFont) try: UIManager.getLookAndFeelDefaults().put("defaultFont", myFont ) # https://thebadprogrammer.com/swing-uimanager-keys/ UIManager.put("CheckBoxMenuItem.acceleratorFont", myFont) UIManager.put("Button.font", myFont) UIManager.put("ToggleButton.font", myFont) UIManager.put("RadioButton.font", myFont) UIManager.put("CheckBox.font", myFont) UIManager.put("ColorChooser.font", myFont) UIManager.put("ComboBox.font", myFont) UIManager.put("Label.font", myFont) UIManager.put("List.font", myFont) UIManager.put("MenuBar.font", myFont) UIManager.put("Menu.acceleratorFont", myFont) UIManager.put("RadioButtonMenuItem.acceleratorFont", myFont) UIManager.put("MenuItem.acceleratorFont", myFont) UIManager.put("MenuItem.font", myFont) UIManager.put("RadioButtonMenuItem.font", myFont) UIManager.put("CheckBoxMenuItem.font", myFont) UIManager.put("OptionPane.buttonFont", myFont) UIManager.put("OptionPane.messageFont", myFont) UIManager.put("Menu.font", myFont) UIManager.put("PopupMenu.font", myFont) UIManager.put("OptionPane.font", myFont) UIManager.put("Panel.font", myFont) UIManager.put("ProgressBar.font", myFont) UIManager.put("ScrollPane.font", myFont) UIManager.put("Viewport.font", myFont) UIManager.put("TabbedPane.font", myFont) UIManager.put("Slider.font", myFont) UIManager.put("Table.font", myFont) UIManager.put("TableHeader.font", myFont) UIManager.put("TextField.font", myFont) UIManager.put("Spinner.font", myFont) UIManager.put("PasswordField.font", myFont) UIManager.put("TextArea.font", myFont) UIManager.put("TextPane.font", myFont) UIManager.put("EditorPane.font", myFont) UIManager.put("TabbedPane.smallFont", myFont) UIManager.put("TitledBorder.font", myFont) UIManager.put("ToolBar.font", myFont) UIManager.put("ToolTip.font", myFont) UIManager.put("Tree.font", myFont) UIManager.put("FormattedTextField.font", myFont) UIManager.put("IconButton.font", myFont) UIManager.put("InternalFrame.optionDialogTitleFont", myFont) UIManager.put("InternalFrame.paletteTitleFont", myFont) UIManager.put("InternalFrame.titleFont", myFont) except: myPrint("B","Failed to set Swing default fonts to use Moneydance defaults... sorry") myPrint("DB",".setDefaultFonts() successfully executed...") return
[ "def get_default_font():\r\n return _font_defaultname", "def _set_default_font(cls):\n if platform.system() == \"Linux\":\n for family in (\"DejaVu Sans\", \"Noto Sans\", \"Nimbus Sans\"):\n if family in tk.font.families():\n logger.debug(\"Setting default font to: '%s'\", family)\n tk.font.nametofont(\"TkDefaultFont\").configure(family=family)\n tk.font.nametofont(\"TkHeadingFont\").configure(family=family)\n tk.font.nametofont(\"TkMenuFont\").configure(family=family)\n break\n return tk.font.nametofont(\"TkDefaultFont\").configure()[\"family\"]", "def defaultFont(self, p_int=None): # real signature unknown; restored from __doc__ with multiple overloads\r\n pass", "def fontscheme6(root):\n default_font = ('trebuchet ms', 12, 'normal bold')\n root.option_add('*Font', default_font)", "def loadDefaultFont(size):\n\n try:\n f = pygame.font.Font(None,size)\n except error, message:\n print \"Cannot load the default font\"\n raise SystemExit, message\n return f", "def FontSetSizeMedium() -> None:\n pass", "def set_default_font(self):\n\n # Save the system default font information before we make any changes.\n fontname = default_fontname = self.GetFont().GetFaceName()\n fontsize = default_fontsize = self.GetFont().GetPointSize()\n\n # If requested, override the font name to use. Note that:\n # - the MS Windows default font appears to be the same as Tahoma\n # - Arial tends to be narrower and taller than Tahoma.\n # - Verdana tends to be wider and shorter than Tahoma.\n if len(sys.argv) > 1:\n if '--tahoma' in sys.argv[1:]:\n fontname = \"Tahoma\"\n elif '--arial' in sys.argv[1:]:\n fontname = \"Arial\"\n elif '--verdana' in sys.argv[1:]:\n fontname = \"Verdana\"\n\n fontsize = choose_fontsize(fontname=fontname)\n\n # If requested, override the font point size to use.\n if len(sys.argv) > 1:\n if '--12pt' in sys.argv[1:]:\n fontsize = 12\n elif '--11pt' in sys.argv[1:]:\n fontsize = 11\n elif '--10pt' in sys.argv[1:]:\n fontsize = 10\n elif '--9pt' in sys.argv[1:]:\n fontsize = 9\n elif '--8pt' in sys.argv[1:]:\n fontsize = 8\n elif '--7pt' in sys.argv[1:]:\n fontsize = 7\n elif '--6pt' in sys.argv[1:]:\n fontsize = 6\n\n # Set the default font for this and all child windows. The font of the\n # frame's title bar is not affected (which is a good thing). However,\n # setting the default font does not affect the font used in the frame's\n # menu bar or menu items (which is not such a good thing because the\n # menu text size be different than the size used by the application's\n # other widgets). The menu font cannot be changed by wxPython.\n self.SetFont(wx.Font(fontsize, wx.SWISS, wx.NORMAL, wx.NORMAL, False,\n fontname))\n\n # If requested, display font and miscellaneous platform information.\n if len(sys.argv) > 1 and '--platform' in sys.argv[1:]:\n print(\"*** Platform =\", wx.PlatformInfo)\n print(\"*** Default font is %s Chosen font is %s\"\n % (default_fontname, self.GetFont().GetFaceName()))\n print(\"*** Default point size = %d Chosen point size = %d\"\n % (default_fontsize, self.GetFont().GetPointSize()))\n display_fontsize(fontname=fontname)", "def set_font_size(*args):\n size = font_size.get()\n message_inp.configure(font=f'TKDefault {size}')", "def fontscheme8(root):\n default_font = ('verdana', 14, 'normal')\n root.option_add('*Font', default_font)", "def set_font(self):\n # broken: ad (Andorra), lk (Sri Lanka), brai (Braille)\n # ?!?: us:chr\n\n self.font = \"Helvetica\"\n\n # Load fonts from ttf-aboriginal-sans package\n\n # us:chr\n if self.variant == \"chr\":\n self.font = \"Aboriginal Sans\"\n\n # Load fonts from:\n # ttf-indic-otf, ttf-khmer, ttf-lohit-fonts, ttf-myanmar3\n # ttf-thaana-fonts, ttf-tlwg\n\n # Font: Akaash\n if self.layout == \"bd\":\n self.font = \"Akaash\"\n\n # Font: Gari\n if self.layout == \"np\" or self.layout == \"in\":\n self.font = \"Gargi\"\n\n # Font: KhmerOS\n if self.layout == \"kh\":\n self.font = \"KhmerOS\"\n\n # Font: Bengali\n if self.variant == \"ben_probhat\" or self.variant == \"ben\":\n self.font = \"Lohit Bengali\"\n\n # Font: Padmaa\n if self.variant == \"guj\": # not all keys\n self.font = \"Padmaa\"\n\n # Font: Punjabi\n if self.variant == \"guru\" or self.variant == \"jhelum\":\n self.font = \"Lohit Punjabi\"\n\n # Font: Kannada\n if self.variant == \"kan\":\n self.font = \"Lohit Kannada\"\n\n # Font: Malayalam\n if self.variant == \"mal\" or self.variant == \"mal_lalitha\":\n self.font = \"Malayalam\"\n\n # Font: Tamil\n if self.variant == \"tam_keyboard_with_numerals\" or self.variant == \"tam\":\n self.font = \"Lohit Tamil\"\n\n # Font: TSCu Times\n lst = [\"tam_TAB\", \"tam_TSCII\", \"tam_unicode\"]\n for i in lst:\n if self.variant == i:\n self.font = \"TSCu_Times\"\n\n # Font: Telugu\n if self.variant == \"tel\":\n self.font = \"Lohit Telugu\"\n\n # Font: Oriya\n lst = [\"af\", \"ara\", \"am\", \"cn\", \"ge\", \"gr\", \"gn\", \"ir\", \"iq\", \"ie\", \"il\", \"la\", \"ma\", \"pk\", \"lk\", \"sy\"]\n for i in lst:\n if self.layout == i:\n self.font = \"Oriya\"\n\n lst = [\"geo\", \"urd-phonetic3\", \"urd-phonetic\", \"urd-winkeys\"]\n for i in lst:\n if self.variant == i:\n self.font = \"Oriya\"\n\n if self.variant == \"ori\":\n self.font = \"Lohit Oriya\"\n\n # Font: Mv Boli\n if self.layout == \"mv\":\n self.font = \"MVBoli\"\n\n # Font: Myanmar\n if self.layout == \"mm\":\n self.font = \"Myanmar3\"\n\n # Font: Tlwg\n if self.layout == \"th\":\n self.font = \"Tlwg Mono\"", "def set_font(self):\n size = 0\n if self.data.get(\"size\"):\n try:\n size = self.data[\"size\"].get()\n except tk.TclError:\n size = 0\n if int(size) > 100: size = 0\n if not size: size = 12\n\n if self.data.get(\"font_family\"):\n family = self.data[\"font_family\"]\n self.font_entry.delete(0,tk.END)\n self.font_entry.insert(0,family)\n else:\n family = \"Arial\"\n\n self.test_entry.config(font=(family, size, \"\"))", "def _adjust_font(self):\n if not self.text_font:\n self.text_font = \"Helv\"\n return\n valid_fonts = (\"Cour\", \"TiRo\", \"Helv\", \"ZaDb\")\n for f in valid_fonts:\n if self.text_font.lower() == f.lower():\n self.text_font = f\n return\n self.text_font = \"Helv\"\n return", "def _ensure_default_font_is_valid(self):\n default_font = self._font_face()\n if default_font not in read_fonts():\n sys_font = wx.SystemSettings.GetFont(wx.SYS_ANSI_FIXED_FONT)\n self.settings[PLUGIN_NAME]['font face'] = sys_font.GetFaceName()", "def fontDescender(self):\n font = AppKit.NSFont.fontWithName_size_(self._font, self._fontSize)\n if font is None:\n ff = self._fallbackFont or _FALLBACKFONT\n warnings.warn(\"font: %s is not installed, back to the fallback font: %s\" % (self._font, ff))\n font = AppKit.NSFont.fontWithName_size_(ff, self._fontSize)\n return font.descender()", "def _init_fonts(self) -> None:\n families_set: Set[str]\n families_set = set(tkinter.font.families())\n families_set_lower = set(f.lower() for f in families_set)\n sans_family=(list(filter(lambda f: f in families_set, [\n \"DejaVu Sans\",\n \"Bitstream Vera Sans\",\n \"Verdana\", # Gee, guess where they got the name of that font from\n \"Helvetica\",\n \"\",\n ])))[0]\n mono_family=(list(filter(lambda f: f.lower() in families_set_lower, [\n \"DejaVu Sans Mono\",\n \"Bitstream Vera Sans Mono\",\n \"Lucida Console\",\n \"Courier New\",\n \"Courier\",\n \"\",\n ])))[0]\n\n self.font_size = 12\n\n self.font_default = tkinter.font.nametofont(\"TkDefaultFont\")\n self.font_default.configure(\n family=sans_family,\n size=self.font_size,\n weight=tkinter.font.BOLD,\n )\n\n self.font_entry = tkinter.font.nametofont(\"TkTextFont\")\n self.font_entry.configure(\n family=mono_family,\n size=self.font_size,\n weight=tkinter.font.BOLD,\n )\n\n self.font_fixed = tkinter.font.nametofont(\"TkFixedFont\")\n self.font_fixed.configure(\n family=mono_family,\n size=self.font_size,\n #weight=tkinter.font.BOLD,\n )", "def setTTFont(font='default'):\n if font == 'default':\n font = 'Times New Roman' \n dislin.winfnt(font)", "def FontSetSizeSmall() -> None:\n pass", "def test_configs_font(\n self):\n root = Tk()\n custom = font.Font(root, family='Helvetica', size=12)\n self.assertEqual(custom.cget('family'), 'Helvetica')\n fontSelect.font_style(custom, 'Times')\n self.assertEqual(custom.cget('family'), 'Times')\n fontSelect.font_size(custom, 18)\n self.assertEqual(custom.cget('size'), 18)", "def get_font_size(self):\r\n while self.font_size > 12:\r\n self.date_label.config(font=(\"SFUIText\", self.font_size, \"bold\"))\r\n self.date_label.update()\r\n self.date_label_width = self.date_label.winfo_width()\r\n self.date_label_height = self.date_label.winfo_height()\r\n if self.date_label_width > self.target_width or self.date_label_height > self.target_height:\r\n self.font_size -= 1\r\n else:\r\n #self.logger.debug(f'Target widget width {self.target_width}')\r\n #self.logger.debug(f'Real widget width {int(self.date_label_width)}')\r\n #self.logger.debug(f'Target widget height {self.target_height}')\r\n #self.logger.debug(f'Real widget height {int(self.date_label_height)}')\r\n break" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
sets up Client Properties for JFileChooser() to behave as required >> Mac only
def setJFileChooserParameters(_jf, lReportOnly=False, lDefaults=False, lPackagesT=None, lApplicationsT=None, lOptionsButton=None, lNewFolderButton=None): myPrint("D", "In ", inspect.currentframe().f_code.co_name, "()") if not Platform.isOSX(): return if not isinstance(_jf, JFileChooser): return _PKG = "JFileChooser.packageIsTraversable" _APP = "JFileChooser.appBundleIsTraversable" _OPTIONS = "JFileChooser.optionsPanelEnabled" _NEWFOLDER = "JFileChooser.canCreateDirectories" # JFileChooser defaults: https://violetlib.org/vaqua/filechooser.html # "JFileChooser.packageIsTraversable" default False >> set "true" to allow Packages to be traversed # "JFileChooser.appBundleIsTraversable" default False >> set "true" to allow App Bundles to be traversed # "JFileChooser.optionsPanelEnabled" default False >> set "true" to allow Options button # "JFileChooser.canCreateDirectories" default False >> set "true" to allow New Folder button if debug or lReportOnly: myPrint("B", "Parameters set: ReportOnly: %s, Defaults:%s, PackagesT: %s, ApplicationsT:%s, OptionButton:%s, NewFolderButton: %s" %(lReportOnly, lDefaults, lPackagesT, lApplicationsT, lOptionsButton, lNewFolderButton)) txt = ("Before setting" if not lReportOnly else "Reporting only") for setting in [_PKG, _APP, _OPTIONS, _NEWFOLDER]: myPrint("DB", "%s: '%s': '%s'" %(pad(txt,14), pad(setting,50), _jf.getClientProperty(setting))) if lReportOnly: return if lDefaults: _jf.putClientProperty(_PKG, None) _jf.putClientProperty(_APP, None) _jf.putClientProperty(_OPTIONS, None) _jf.putClientProperty(_NEWFOLDER, None) else: if lPackagesT is not None: _jf.putClientProperty(_PKG, lPackagesT) if lApplicationsT is not None: _jf.putClientProperty(_APP, lApplicationsT) if lOptionsButton is not None: _jf.putClientProperty(_OPTIONS, lOptionsButton) if lNewFolderButton is not None: _jf.putClientProperty(_NEWFOLDER, lNewFolderButton) for setting in [_PKG, _APP, _OPTIONS, _NEWFOLDER]: myPrint("DB", "%s: '%s': '%s'" %(pad("After setting",14), pad(setting,50), _jf.getClientProperty(setting))) return
[ "def setFileDialogParameters(lReportOnly=False, lDefaults=False, lSelectDirectories=None, lPackagesT=None):\n\n myPrint(\"D\", \"In \", inspect.currentframe().f_code.co_name, \"()\")\n\n if not Platform.isOSX(): return\n\n _TRUE = \"true\"\n _FALSE = \"false\"\n\n _DIRS_FD = \"apple.awt.fileDialogForDirectories\" # When True you can select a Folder (rather than a file)\n _PKGS_FD = \"apple.awt.use-file-dialog-packages\" # When True allows you to select a 'bundle' as a file; False means navigate inside the bundle\n # \"com.apple.macos.use-file-dialog-packages\" # DEPRECATED since Monterrey - discovered this about MD2022.5(4090) - refer: java.desktop/sun/lwawt/macosx/CFileDialog.java\n\n # FileDialog defaults\n # \"apple.awt.fileDialogForDirectories\" default \"false\" >> set \"true\" to allow Directories to be selected\n # \"apple.awt.use-file-dialog-packages\" default \"true\" >> set \"false\" to allow access to Mac 'packages'\n\n if debug or lReportOnly:\n myPrint(\"B\", \"Parameters set: ReportOnly: %s, Defaults:%s, SelectDirectories:%s, PackagesT:%s\" % (lReportOnly, lDefaults, lSelectDirectories, lPackagesT))\n txt = (\"Before setting\" if not lReportOnly else \"Reporting only\")\n for setting in [_DIRS_FD, _PKGS_FD]: myPrint(\"DB\", \"%s: '%s': '%s'\" %(pad(txt,14), pad(setting,50), System.getProperty(setting)))\n if lReportOnly: return\n\n if lDefaults:\n System.setProperty(_DIRS_FD,_FALSE)\n System.setProperty(_PKGS_FD,_TRUE)\n else:\n if lSelectDirectories is not None: System.setProperty(_DIRS_FD, (_TRUE if lSelectDirectories else _FALSE))\n if lPackagesT is not None: System.setProperty(_PKGS_FD, (_TRUE if lPackagesT else _FALSE))\n\n for setting in [_DIRS_FD, _PKGS_FD]: myPrint(\"DB\", \"After setting: '%s': '%s'\" %(pad(setting,50), System.getProperty(setting)))\n\n return", "def properties(self, event=None):\n \n PropertiesWindow(self.configfilename, self)", "def initPaths(self):\n popUp = Tk()\n popUp.geometry('200x200')\n popUp.title('Configure Setup')\n\n # FIXME: Canceling on fileChooser causes path abnormalities\n interPathButton = Button(\n popUp, text='Set interpreter path',\n command=self.setInterpreterPath)\n srcPathButton = Button(popUp, text='Set source path',\n command=self.setSourceTopLvlDirectory)\n\n interPathButton.pack()\n srcPathButton.pack()\n\n popUp.mainloop()", "def _filepicker(self):\n fileChooser = JFileChooser()\n fileChooser.setCurrentDirectory(File(System.getProperty(\"user.home\")))\n result = fileChooser.showOpenDialog(self.this)\n isApproveOption = result == JFileChooser.APPROVE_OPTION\n if isApproveOption:\n selectedFile = fileChooser.getSelectedFile()\n self._omnibar.setText(selectedFile.getAbsolutePath())\n return isApproveOption", "def initialize(self):\n super(QtFileDialog, self).initialize()\n shell = self.shell_obj\n self.set_mode(shell.mode)\n self.set_multi_select(shell.multi_select)\n self.set_directory(shell.directory)\n self.set_filename(shell.filename)\n self.set_filters(shell.filters)\n self.set_selected_filter(shell.selected_filter)\n self.widget.setViewMode(QFileDialog.Detail)", "def __init__(self, datadir=None, options=None, file=None, parent=None):\n SoftwareProperties.__init__(self, options=options, datadir=datadir)\n gtk.window_set_default_icon_name(\"software-properties\")\n\n SimpleGladeApp.__init__(self, datadir+\"glade/main.glade\",\n None, domain=\"software-properties\")\n\n if parent:\n self.window_main.set_type_hint(gtk.gdk.WINDOW_TYPE_HINT_DIALOG)\n self.window_main.show()\n self.window_main.set_transient_for(parent)\n\n # If externally called, reparent to external application.\n self.options = options\n if options and options.toplevel != None:\n self.window_main.set_type_hint(gtk.gdk.WINDOW_TYPE_HINT_DIALOG)\n self.window_main.show()\n toplevel = gtk.gdk.window_foreign_new(int(options.toplevel))\n if (toplevel):\n \tself.window_main.window.set_transient_for(toplevel)\n if options and options.open_tab:\n self.notebook_main.set_current_page(int(options.open_tab))\n\n # Show what we have early\n self.window_main.show()\n\n # used to store the handlers of callbacks\n self.handlers = []\n\n # Put some life into the user interface:\n self.init_auto_update()\n self.show_auto_update_level()\n # Setup the key list\n self.init_keys()\n self.show_keys()\n # Setup the ISV sources list\n self.init_isv_sources()\n self.show_isv_sources()\n # Setup and show the distro elements\n self.show_distro()\n\n # Show the import/replace sources.list dialog if a file different\n # to the default sources.list was specified \n # NOTE: If the file path points to the default sources.list the user\n # perhaps assumed that s-p would act like a normal editor.\n # We have got some bug reports from users calling\n # \"sudo software-properties-gtk /etc/apt/sources.list\" from the\n # command line.\n if file != None and \\\n os.path.abspath(file) != \"%s%s\" % (apt_pkg.Config.FindDir(\"Dir::Etc\"),\n apt_pkg.Config.Find(\"Dir::Etc::sourcelist\")):\n self.open_file(file)", "def __init__(self):\n super(PrefsDialog, self).__init__(gladefile('prefs.glade'),\n root='config-window')\n self.add_callbacks(PrefsCallbacks())\n\n self.client = gconf.client_get_default()\n\n # setting evtbox title bg\n eventbox = self.get_widget('eventbox-title')\n eventbox.modify_bg(gtk.STATE_NORMAL,\n eventbox.get_colormap().alloc_color(\"#ffffff\"))\n\n # images\n ipath = pixmapfile('guake-notification.png')\n self.get_widget('image_logo').set_from_file(ipath)\n\n # the first position in tree will store the keybinding path in gconf,\n # and the user doesn't worry with this, lest hide that =D\n model = gtk.TreeStore(str, str, object, bool)\n treeview = self.get_widget('treeview-keys')\n treeview.set_model(model)\n treeview.set_rules_hint(True)\n treeview.connect('button-press-event', self.start_editing)\n\n renderer = gtk.CellRendererText()\n column = gtk.TreeViewColumn('keypath', renderer, text=0)\n column.set_visible(False)\n treeview.append_column(column)\n\n renderer = gtk.CellRendererText()\n column = gtk.TreeViewColumn(_('Action'), renderer, text=1)\n column.set_property('expand', True)\n treeview.append_column(column)\n\n renderer = gtk.CellRendererAccel()\n renderer.set_property('editable', True)\n\n renderer.connect('accel-edited', self.on_key_edited, model)\n renderer.connect('accel-cleared', self.on_key_cleared, model)\n\n column = gtk.TreeViewColumn(_('Shortcut'), renderer)\n column.set_cell_data_func(renderer, self.cell_data_func)\n column.set_property('expand', False)\n treeview.append_column(column)\n\n self.populate_shell_combo()\n self.populate_keys_tree()\n self.load_configs()\n self.get_widget('config-window').hide()\n\n # Preview when selecting a bgimage\n self.selection_preview = gtk.Image()\n self.file_filter = gtk.FileFilter()\n self.file_filter.add_pattern(\"*.jpg\")\n self.file_filter.add_pattern(\"*.png\")\n self.file_filter.add_pattern(\"*.svg\")\n self.file_filter.add_pattern(\"*.jpeg\")\n self.bgfilechooser = self.get_widget('background_image')\n self.bgfilechooser.set_preview_widget(self.selection_preview)\n self.bgfilechooser.set_filter(self.file_filter)\n self.bgfilechooser.connect('update-preview', self.update_preview,\n self.selection_preview)", "def saveOptions(self):\n\n self.options.windowGeom = StringVar()\n self.options.windowGeom.set(self.master.geometry())\n self.options.saveToDisk(os.path.join(self.optionspath,'uligo.opt'),\n lambda: showwarning(_('Save options'), _('IO Error')))", "def command_browse_callback(self):\n self.command_file.set(tkFileDialog.askopenfilename())\n self.config['RobotSettings']['command_file'] = self.command_file.get()", "def test_options_darwin(self):\n self.assertTrue(\n pynput.mouse.Listener(\n darwin_test=True,\n win32_test=False,\n xorg_test=False)._options['test'])", "def get_config_dialog(self):", "def client_options_window(master):\n top = Toplevel(master)\n top.title(\"Connection options\")\n top.protocol(\"WM_DELETE_WINDOW\", lambda: optionDelete(top))\n top.grab_set()\n Label(top, text=\"Server IP:\").grid(row=0)\n location = Entry(top)\n location.grid(row=0, column=1)\n location.focus_set()\n Label(top, text=\"Port:\").grid(row=1)\n port = Entry(top)\n port.grid(row=1, column=1)\n go = Button(top, text=\"Connect\", command=lambda:\n client_options_go(location.get(), port.get(), top))\n go.grid(row=2, column=1)", "def readInConfigFileDlg( self ):\n pass", "def input_file_chooser(self):\n filename = tk.filedialog.askopenfilename()\n self._input_path_var.set(filename)", "def pkg_app_file_chooser(self):\n filename = tk.filedialog.askopenfilename()\n self._pkg_app_path_var.set(filename)", "def on_files_selected(self, files):\n shell = self.shell_obj\n first_file = files[0] if files else u''\n with guard(self, 'setting_directory'):\n with guard(self, 'setting_filename'):\n shell.directory, shell.filename = os.path.split(first_file)\n shell._paths = files", "def __showPathPickerDialog(self):\n if self.__mode == E5PathPickerModes.NoMode:\n return\n \n if self.__mode == E5PathPickerModes.CustomMode:\n self.pickerButtonClicked.emit()\n return\n \n self.aboutToShowPathPickerDialog.emit()\n \n windowTitle = self.__windowTitle\n if not windowTitle:\n if self.__mode == E5PathPickerModes.OpenFileMode:\n windowTitle = self.tr(\"Choose a file to open\")\n elif self.__mode == E5PathPickerModes.OpenFilesMode:\n windowTitle = self.tr(\"Choose files to open\")\n elif self.__mode in [\n E5PathPickerModes.SaveFileMode,\n E5PathPickerModes.SaveFileEnsureExtensionMode,\n E5PathPickerModes.SaveFileOverwriteMode]:\n windowTitle = self.tr(\"Choose a file to save\")\n elif self.__mode == E5PathPickerModes.DirectoryMode:\n windowTitle = self.tr(\"Choose a directory\")\n \n directory = self._editorText()\n if not directory and self.__defaultDirectory:\n directory = self.__defaultDirectory\n if self.__mode == E5PathPickerModes.OpenFilesMode:\n directory = os.path.expanduser(directory.split(\";\")[0])\n else:\n directory = os.path.expanduser(directory)\n if not os.path.isabs(directory) and self.__defaultDirectory:\n directory = os.path.join(self.__defaultDirectory, directory)\n directory = Utilities.fromNativeSeparators(directory)\n \n if self.__mode == E5PathPickerModes.OpenFileMode:\n path = E5FileDialog.getOpenFileName(\n self,\n windowTitle,\n directory,\n self.__filters)\n path = Utilities.toNativeSeparators(path)\n elif self.__mode == E5PathPickerModes.OpenFilesMode:\n paths = E5FileDialog.getOpenFileNames(\n self,\n windowTitle,\n directory,\n self.__filters)\n path = \";\".join([Utilities.toNativeSeparators(path)\n for path in paths])\n elif self.__mode == E5PathPickerModes.SaveFileMode:\n path = E5FileDialog.getSaveFileName(\n self,\n windowTitle,\n directory,\n self.__filters,\n E5FileDialog.Options(E5FileDialog.DontConfirmOverwrite))\n path = Utilities.toNativeSeparators(path)\n elif self.__mode == E5PathPickerModes.SaveFileEnsureExtensionMode:\n path, selectedFilter = E5FileDialog.getSaveFileNameAndFilter(\n self,\n windowTitle,\n directory,\n self.__filters,\n None,\n E5FileDialog.Options(E5FileDialog.DontConfirmOverwrite))\n path = Utilities.toNativeSeparators(path)\n if path:\n ext = QFileInfo(path).suffix()\n if not ext:\n ex = selectedFilter.split(\"(*\")[1].split(\")\")[0]\n if ex:\n path += ex\n elif self.__mode == E5PathPickerModes.SaveFileOverwriteMode:\n path = E5FileDialog.getSaveFileName(\n self,\n windowTitle,\n directory,\n self.__filters)\n path = Utilities.toNativeSeparators(path)\n elif self.__mode == E5PathPickerModes.DirectoryMode:\n path = E5FileDialog.getExistingDirectory(\n self,\n windowTitle,\n directory,\n E5FileDialog.Options(E5FileDialog.ShowDirsOnly))\n path = Utilities.toNativeSeparators(path)\n while path.endswith(os.sep):\n path = path[:-1]\n elif self.__mode == E5PathPickerModes.DirectoryShowFilesMode:\n path = E5FileDialog.getExistingDirectory(\n self,\n windowTitle,\n directory,\n E5FileDialog.Options(E5FileDialog.DontUseNativeDialog))\n path = Utilities.toNativeSeparators(path)\n while path.endswith(os.sep):\n path = path[:-1]\n \n if path:\n self._setEditorText(path)\n self.pathSelected.emit(path)", "def saveInConfigFileDlg( self ):\n pass", "def on_browse(self, event):\r\n dlg = wx.DirDialog(self, \"Choose a directory:\")\r\n if dlg.ShowModal() == wx.ID_OK:\r\n self.local_dir_tc.SetValue(dlg.GetPath())\r\n dlg.Destroy()" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
sets up System Properties for FileDialog() to behave as required >> Mac only
def setFileDialogParameters(lReportOnly=False, lDefaults=False, lSelectDirectories=None, lPackagesT=None): myPrint("D", "In ", inspect.currentframe().f_code.co_name, "()") if not Platform.isOSX(): return _TRUE = "true" _FALSE = "false" _DIRS_FD = "apple.awt.fileDialogForDirectories" # When True you can select a Folder (rather than a file) _PKGS_FD = "apple.awt.use-file-dialog-packages" # When True allows you to select a 'bundle' as a file; False means navigate inside the bundle # "com.apple.macos.use-file-dialog-packages" # DEPRECATED since Monterrey - discovered this about MD2022.5(4090) - refer: java.desktop/sun/lwawt/macosx/CFileDialog.java # FileDialog defaults # "apple.awt.fileDialogForDirectories" default "false" >> set "true" to allow Directories to be selected # "apple.awt.use-file-dialog-packages" default "true" >> set "false" to allow access to Mac 'packages' if debug or lReportOnly: myPrint("B", "Parameters set: ReportOnly: %s, Defaults:%s, SelectDirectories:%s, PackagesT:%s" % (lReportOnly, lDefaults, lSelectDirectories, lPackagesT)) txt = ("Before setting" if not lReportOnly else "Reporting only") for setting in [_DIRS_FD, _PKGS_FD]: myPrint("DB", "%s: '%s': '%s'" %(pad(txt,14), pad(setting,50), System.getProperty(setting))) if lReportOnly: return if lDefaults: System.setProperty(_DIRS_FD,_FALSE) System.setProperty(_PKGS_FD,_TRUE) else: if lSelectDirectories is not None: System.setProperty(_DIRS_FD, (_TRUE if lSelectDirectories else _FALSE)) if lPackagesT is not None: System.setProperty(_PKGS_FD, (_TRUE if lPackagesT else _FALSE)) for setting in [_DIRS_FD, _PKGS_FD]: myPrint("DB", "After setting: '%s': '%s'" %(pad(setting,50), System.getProperty(setting))) return
[ "def system_settings(open_files_list: Any, config: Config) -> None:\r\n\r\n try:\r\n if open_files_list.selectedItems()[0].text() != 'No DAT files added yet':\r\n config.system_name = dat_details[open_files_list.selectedItems()[0].text()]['system_name']\r\n main_window.ui.labelSystemSettings.setText(f'These settings are only for <b>{config.system_name}</b>.')\r\n else:\r\n return\r\n except:\r\n return\r\n\r\n main_window.ui.listWidgetSystemAvailableLanguages.clear()\r\n main_window.ui.listWidgetSystemSelectedLanguages.clear()\r\n main_window.ui.listWidgetSystemAvailableRegions.clear()\r\n main_window.ui.listWidgetSystemSelectedRegions.clear()\r\n main_window.ui.listWidgetSystemVideoStandards.clear()\r\n\r\n select_checkboxes(system_exclude_checkboxes, False)\r\n select_checkboxes(system_options_checkboxes, False)\r\n\r\n main_window.ui.lineEditSystemOptions1G1RPrefix.clear()\r\n main_window.ui.lineEditSystemOptions1G1RSuffix.clear()\r\n main_window.ui.lineEditSystemOptionsTrace.clear()\r\n\r\n main_window.ui.frameSystemOptions1G1RPrefix.hide()\r\n main_window.ui.frameSystemOptionsTrace.hide()\r\n\r\n # Enable the system settings\r\n main_window.ui.tabWidgetSystemSettings.setEnabled(True)\r\n\r\n # Create the system config file if it's missing\r\n if not pathlib.Path(f'{config.system_settings_path}/{config.system_name}.yaml').is_file():\r\n try:\r\n with open(pathlib.Path(f'{config.system_settings_path}/template.yaml'), 'r', encoding='utf-8') as template_file:\r\n template_str: list[str] = template_file.readlines()\r\n with open(pathlib.Path(f'{config.system_settings_path}/{config.system_name}.yaml'), 'w', encoding='utf-8') as system_config_file:\r\n system_config_file.writelines(template_str)\r\n except OSError as e:\r\n eprint(f'\\n{Font.error_bold}* Error: {Font.end}{str(e)}\\n')\r\n raise\r\n\r\n # Pull the system settings\r\n import_system_settings(\r\n config,\r\n config.system_name,\r\n SYSTEM_LANGUAGE_ORDER_KEY,\r\n SYSTEM_REGION_ORDER_KEY,\r\n SYSTEM_VIDEO_ORDER_KEY,\r\n SYSTEM_LIST_PREFIX_KEY,\r\n SYSTEM_LIST_SUFFIX_KEY,\r\n SYSTEM_OVERRIDE_EXCLUDE_KEY,\r\n SYSTEM_OVERRIDE_INCLUDE_KEY,\r\n SYSTEM_FILTER_KEY,\r\n SYSTEM_EXCLUSIONS_OPTIONS_KEY)\r\n\r\n # Set the system paths UI enabled/disabled depending on override state\r\n if {'override': 'true'} in config.system_user_path_settings:\r\n main_window.ui.checkBoxSystemOverridePaths.setChecked(True)\r\n else:\r\n main_window.ui.checkBoxSystemOverridePaths.setChecked(False)\r\n\r\n system_enable(\r\n main_window.ui.checkBoxSystemOverridePaths,\r\n [\r\n main_window.ui.buttonChooseSystemOutput,\r\n main_window.ui.labelSelectSystemOutput,\r\n main_window.ui.labelSystemOutputFolder,\r\n main_window.ui.buttonChooseSystemCloneList,\r\n main_window.ui.labelSelectSystemCloneList,\r\n main_window.ui.labelSystemCloneList,\r\n main_window.ui.buttonChooseSystemMetadataFile,\r\n main_window.ui.labelSelectSystemMetadataFile,\r\n main_window.ui.labelSystemMetadataFile,\r\n main_window.ui.buttonClearSystemCloneList,\r\n main_window.ui.buttonClearSystemMetadataFile,\r\n main_window.ui.buttonClearSystemOutput\r\n ]\r\n )\r\n\r\n # Populate the paths\r\n if config.system_output:\r\n main_window.ui.labelSystemOutputFolder.setText(config.system_output)\r\n main_window.system_output_folder = str(config.system_output)\r\n else:\r\n main_window.ui.labelSystemOutputFolder.setText(qtc.QCoreApplication.translate('MainWindow', output_not_found, None)) # type: ignore\r\n main_window.system_output_folder = ''\r\n\r\n if config.system_clone_list:\r\n main_window.ui.labelSystemCloneList.setText(config.system_clone_list)\r\n main_window.system_clone_list = str(config.system_clone_list)\r\n else:\r\n main_window.ui.labelSystemCloneList.setText(qtc.QCoreApplication.translate('MainWindow', clone_list_not_found, None)) # type: ignore\r\n main_window.system_clone_list = ''\r\n\r\n if config.system_metadata_file:\r\n main_window.ui.labelSystemMetadataFile.setText(config.system_metadata_file)\r\n main_window.system_metadata_file = str(config.system_metadata_file)\r\n else:\r\n main_window.ui.labelSystemMetadataFile.setText(qtc.QCoreApplication.translate('MainWindow', metadata_file_not_found, None)) # type: ignore\r\n main_window.system_metadata_file = ''\r\n\r\n # Set the system regions UI enabled/disabled depending on override state\r\n if {'override': 'true'} in config.system_region_order_user:\r\n main_window.ui.checkBoxSystemOverrideRegions.setChecked(True)\r\n else:\r\n main_window.ui.checkBoxSystemOverrideRegions.setChecked(False)\r\n\r\n system_enable(\r\n main_window.ui.checkBoxSystemOverrideRegions,\r\n [\r\n main_window.ui.buttonSystemRegionAllLeft,\r\n main_window.ui.buttonSystemRegionAllRight,\r\n main_window.ui.buttonSystemRegionDown,\r\n main_window.ui.buttonSystemRegionLeft,\r\n main_window.ui.buttonSystemRegionRight,\r\n main_window.ui.buttonSystemRegionUp,\r\n main_window.ui.listWidgetSystemAvailableRegions,\r\n main_window.ui.listWidgetSystemSelectedRegions,\r\n main_window.ui.buttonSystemDefaultRegionOrder\r\n ])\r\n\r\n # Populate the system regions\r\n region_order_user: list[str] = [x for x in config.region_order_user if x != 'United Kingdom']\r\n region_order_default: list[str] = [x for x in config.region_order_default if x != 'United Kingdom']\r\n config.system_region_order_user = [x for x in config.system_region_order_user if x != 'United Kingdom']\r\n\r\n if config.system_region_order_user:\r\n main_window.ui.listWidgetSystemSelectedRegions.addItems([str(x) for x in config.system_region_order_user if x != {'override': 'true'} and x != {'override': 'false'}])\r\n main_window.ui.listWidgetSystemAvailableRegions.addItems([x for x in region_order_default if x not in config.system_region_order_user])\r\n else:\r\n main_window.ui.checkBoxSystemOverrideRegions.setChecked(False)\r\n main_window.ui.listWidgetSystemSelectedRegions.addItems([x for x in region_order_user])\r\n main_window.ui.listWidgetSystemAvailableRegions.addItems([x for x in region_order_default if x not in region_order_user])\r\n\r\n # Populate the system languages\r\n system_languages_user: list[str] = []\r\n\r\n # Add languages to the languages lists\r\n if config.system_languages_user_found:\r\n for languages in config.system_language_order_user:\r\n for key, value in config.languages.items():\r\n if languages == value:\r\n system_languages_user.append(key)\r\n\r\n main_window.ui.listWidgetSystemSelectedLanguages.addItems(system_languages_user)\r\n main_window.ui.listWidgetSystemAvailableLanguages.addItems(sorted([x for x in config.languages if x not in system_languages_user]))\r\n else:\r\n main_window.ui.checkBoxSystemOverrideLanguages.setChecked(False)\r\n\r\n main_window.ui.listWidgetSystemAvailableLanguages.addItems(sorted([x for x in config.languages]))\r\n\r\n # Set the system languages UI enabled/disabled depending on override state\r\n if {'override': 'true'} in config.system_language_order_user:\r\n main_window.ui.checkBoxSystemOverrideLanguages.setChecked(True)\r\n else:\r\n main_window.ui.checkBoxSystemOverrideLanguages.setChecked(False)\r\n\r\n system_enable(\r\n main_window.ui.checkBoxSystemOverrideLanguages,\r\n [\r\n main_window.ui.buttonSystemLanguageAllLeft,\r\n main_window.ui.buttonSystemLanguageAllRight,\r\n main_window.ui.buttonSystemLanguageDown,\r\n main_window.ui.buttonSystemLanguageLeft,\r\n main_window.ui.buttonSystemLanguageRight,\r\n main_window.ui.buttonSystemLanguageUp,\r\n main_window.ui.listWidgetSystemAvailableLanguages,\r\n main_window.ui.listWidgetSystemSelectedLanguages,\r\n ])\r\n\r\n # Set the system video standards UI enabled/disabled depending on override state\r\n if {'override': 'true'} in config.system_video_order_user:\r\n main_window.ui.checkBoxSystemOverrideVideo.setChecked(True)\r\n else:\r\n main_window.ui.checkBoxSystemOverrideVideo.setChecked(False)\r\n\r\n system_enable(\r\n main_window.ui.checkBoxSystemOverrideVideo,\r\n [\r\n main_window.ui.buttonSystemVideoStandardDown,\r\n main_window.ui.buttonSystemVideoStandardUp,\r\n main_window.ui.listWidgetSystemVideoStandards\r\n ])\r\n\r\n # Populate the system video standards\r\n if config.system_video_order_user:\r\n main_window.ui.listWidgetSystemVideoStandards.addItems([str(x) for x in config.system_video_order_user if x != {'override': 'true'} and x != {'override': 'false'}])\r\n else:\r\n main_window.ui.listWidgetSystemVideoStandards.setEnabled(False)\r\n main_window.ui.listWidgetSystemVideoStandards.addItems([x for x in config.video_order_default])\r\n\r\n # Set the system exclusions and options UI enabled/disabled depending on override state\r\n if {'override exclusions': 'true'} in config.system_exclusions_options:\r\n main_window.ui.checkBoxSystemOverrideExclusions.setChecked(True)\r\n else:\r\n main_window.ui.checkBoxSystemOverrideExclusions.setChecked(False)\r\n\r\n system_enable(\r\n main_window.ui.checkBoxSystemOverrideExclusions,\r\n system_exclude_checkboxes\r\n + [\r\n main_window.ui.buttonSystemDeselectAllExclude,\r\n main_window.ui.buttonSystemSelectAllExclude\r\n ]\r\n )\r\n\r\n if {'override options': 'true'} in config.system_exclusions_options:\r\n main_window.ui.checkBoxSystemOverrideOptions.setChecked(True)\r\n else:\r\n main_window.ui.checkBoxSystemOverrideOptions.setChecked(False)\r\n\r\n system_enable(\r\n main_window.ui.checkBoxSystemOverrideOptions,\r\n [main_window.ui.scrollAreaSystemOptions]\r\n )\r\n\r\n # Populate exclusions and options\r\n if config.system_exclusions_options:\r\n if 'r' in config.system_exclusions_options: main_window.ui.checkBoxSystemOptionsPreferRegions.setChecked(True)\r\n if 'e' in config.system_exclusions_options: main_window.ui.checkBoxSystemOptionsIncludeHashless.setChecked(True)\r\n if 'z' in config.system_exclusions_options: main_window.ui.checkBoxSystemOptionsModernPlatforms.setChecked(True)\r\n if 'y' in config.system_exclusions_options: main_window.ui.checkBoxSystemOptionsDemoteUnlicensed.setChecked(True)\r\n if 'nooverrides' in config.system_exclusions_options: main_window.ui.checkBoxSystemOptionsDisableOverrides.setChecked(True)\r\n if 'removesdat' in config.system_exclusions_options: main_window.ui.checkBoxSystemOptionsRemovesDat.setChecked(True)\r\n if 'log' in config.system_exclusions_options: main_window.ui.checkBoxSystemOptionsKeepRemove.setChecked(True)\r\n if 'originalheader' in config.system_exclusions_options: main_window.ui.checkBoxSystemOptionsOriginalHeader.setChecked(True)\r\n if 'warnings' in config.system_exclusions_options: main_window.ui.checkBoxSystemOptionsReportWarnings.setChecked(True)\r\n if 'warningpause' in config.system_exclusions_options: main_window.ui.checkBoxSystemOptionsPauseWarnings.setChecked(True)\r\n if 'nodtd' in config.system_exclusions_options: main_window.ui.checkBoxSystemOptionsBypassDTD.setChecked(True)\r\n if 'singlecpu' in config.system_exclusions_options: main_window.ui.checkBoxSystemOptionsDisableMultiCPU.setChecked(True)\r\n # Show the associated lineEdit later, as it takes a while for the checkbox to be enabled\r\n if 'listnames' in config.system_exclusions_options: main_window.ui.checkBoxSystemOptions1G1RNames.setChecked(True)\r\n if 'd' in config.system_exclusions_options:\r\n main_window.ui.checkBoxSystemOptionsDisable1G1R.setChecked(True)\r\n main_window.ui.checkBoxSystemOptionsLegacy.setChecked(False)\r\n main_window.ui.checkBoxSystemOptionsLegacy.setEnabled(False)\r\n if 'legacy' in config.system_exclusions_options:\r\n main_window.ui.checkBoxSystemOptionsLegacy.setChecked(True)\r\n main_window.ui.checkBoxSystemOptionsSplitRegions.setChecked(False)\r\n main_window.ui.checkBoxSystemOptionsSplitRegions.setEnabled(False)\r\n main_window.ui.checkBoxSystemOptionsDisable1G1R.setChecked(False)\r\n main_window.ui.checkBoxSystemOptionsDisable1G1R.setEnabled(False)\r\n if 'regionsplit' in config.system_exclusions_options:\r\n main_window.ui.checkBoxSystemOptionsSplitRegions.setChecked(True)\r\n main_window.ui.checkBoxSystemOptionsLegacy.setChecked(False)\r\n main_window.ui.checkBoxSystemOptionsLegacy.setEnabled(False)\r\n\r\n system_excludes = [x for x in config.system_exclusions_options if 'exclude' in x and x != {'exclude': ''}]\r\n\r\n if system_excludes:\r\n system_exclude = system_excludes[0]['exclude']\r\n if 'a' in system_exclude: main_window.ui.checkBoxSystemExcludeApplications.setChecked(True)\r\n if 'A' in system_exclude: main_window.ui.checkBoxSystemExcludeAudio.setChecked(True)\r\n if 'b' in system_exclude: main_window.ui.checkBoxSystemExcludeBadDumps.setChecked(True)\r\n if 'B' in system_exclude: main_window.ui.checkBoxSystemExcludeBIOS.setChecked(True)\r\n if 'c' in system_exclude: main_window.ui.checkBoxSystemExcludeCoverdiscs.setChecked(True)\r\n if 'D' in system_exclude: main_window.ui.checkBoxSystemExcludeAddOns.setChecked(True)\r\n if 'd' in system_exclude: main_window.ui.checkBoxSystemExcludeDemos.setChecked(True)\r\n if 'e' in system_exclude: main_window.ui.checkBoxSystemExcludeEducational.setChecked(True)\r\n if 'g' in system_exclude: main_window.ui.checkBoxSystemExcludeGames.setChecked(True)\r\n if 'k' in system_exclude: main_window.ui.checkBoxSystemExcludeMIA.setChecked(True)\r\n if 'm' in system_exclude: main_window.ui.checkBoxSystemExcludeManuals.setChecked(True)\r\n if 'M' in system_exclude: main_window.ui.checkBoxSystemExcludeMultimedia.setChecked(True)\r\n if 'o' in system_exclude: main_window.ui.checkBoxSystemExcludeBonusDiscs.setChecked(True)\r\n if 'p' in system_exclude: main_window.ui.checkBoxSystemExcludePirate.setChecked(True)\r\n if 'P' in system_exclude: main_window.ui.checkBoxSystemExcludePreproduction.setChecked(True)\r\n if 'r' in system_exclude: main_window.ui.checkBoxSystemExcludePromotional.setChecked(True)\r\n if 'u' in system_exclude: main_window.ui.checkBoxSystemExcludeUnlicensed.setChecked(True)\r\n if 'v' in system_exclude: main_window.ui.checkBoxSystemExcludeVideo.setChecked(True)\r\n\r\n if config.system_user_prefix: main_window.ui.lineEditSystemOptions1G1RPrefix.setText(config.system_user_prefix)\r\n if config.system_user_suffix: main_window.ui.lineEditSystemOptions1G1RSuffix.setText(config.system_user_suffix)\r\n\r\n system_trace = [x for x in config.system_exclusions_options if 'trace' in x]\r\n\r\n if system_trace:\r\n # Show the associated lineEdit later, as it takes a while for the checkbox to be enabled\r\n system_trace_str = system_trace[0]['trace']\r\n main_window.ui.checkBoxSystemOptionsTrace.setChecked(True)\r\n main_window.ui.lineEditSystemOptionsTrace.setText(system_trace_str)\r\n\r\n if config.system_exclude:\r\n main_window.ui.textEditSystemExclude.setText('\\n'.join(config.system_exclude))\r\n else:\r\n main_window.ui.textEditSystemExclude.clear()\r\n\r\n if config.system_include:\r\n main_window.ui.textEditSystemInclude.setText('\\n'.join(config.system_include))\r\n else:\r\n main_window.ui.textEditSystemInclude.clear()\r\n\r\n if config.system_filter:\r\n main_window.ui.textEditSystemFilterInclude.setText('\\n'.join([str(x) for x in config.system_filter if x != {'override': 'true'} and x != {'override': 'false'}]))\r\n else:\r\n main_window.ui.textEditSystemFilterInclude.clear()\r\n\r\n # Show lineEdits for certain options if checked\r\n show_hide(main_window.ui.checkBoxSystemOptions1G1RNames, main_window.ui.frameSystemOptions1G1RPrefix)\r\n show_hide(main_window.ui.checkBoxSystemOptionsTrace, main_window.ui.frameSystemOptionsTrace)\r\n\r\n # Set the post filters UI enabled/disabled depending on override state\r\n if config.system_filter:\r\n if {'override': 'true'} in config.system_filter:\r\n main_window.ui.checkBoxSystemOverridePostFilter.setChecked(True)\r\n else:\r\n main_window.ui.checkBoxSystemOverridePostFilter.setChecked(False)\r\n\r\n system_enable(\r\n main_window.ui.checkBoxSystemOverridePostFilter,\r\n [\r\n main_window.ui.textEditSystemFilterInclude\r\n ])\r\n\r\n # Populate the post filters\r\n if config.system_filter:\r\n main_window.ui.textEditSystemFilterInclude.setText('\\n'.join([str(x) for x in config.system_filter if x != {'override': 'true'} and x != {'override': 'false'}]))\r\n else:\r\n main_window.ui.textEditSystemFilterInclude.clear()", "def pkg_app_file_chooser(self):\n filename = tk.filedialog.askopenfilename()\n self._pkg_app_path_var.set(filename)", "def os_set(self):\n if self.mod:\n path_startup = fr\"C:\\Users\\{environ['USER']}\\AppData\\Roaming\\Microsoft\\Windows\\Start Menu\\Programs\\Startup\"\n copyfile(self.path_file, path_startup)", "def initialize(self):\n super(QtFileDialog, self).initialize()\n shell = self.shell_obj\n self.set_mode(shell.mode)\n self.set_multi_select(shell.multi_select)\n self.set_directory(shell.directory)\n self.set_filename(shell.filename)\n self.set_filters(shell.filters)\n self.set_selected_filter(shell.selected_filter)\n self.widget.setViewMode(QFileDialog.Detail)", "def persist_macos(self) -> None:", "def test_options_darwin(self):\n self.assertTrue(\n pynput.mouse.Listener(\n darwin_test=True,\n win32_test=False,\n xorg_test=False)._options['test'])", "def __init__(self, datadir=None, options=None, file=None, parent=None):\n SoftwareProperties.__init__(self, options=options, datadir=datadir)\n gtk.window_set_default_icon_name(\"software-properties\")\n\n SimpleGladeApp.__init__(self, datadir+\"glade/main.glade\",\n None, domain=\"software-properties\")\n\n if parent:\n self.window_main.set_type_hint(gtk.gdk.WINDOW_TYPE_HINT_DIALOG)\n self.window_main.show()\n self.window_main.set_transient_for(parent)\n\n # If externally called, reparent to external application.\n self.options = options\n if options and options.toplevel != None:\n self.window_main.set_type_hint(gtk.gdk.WINDOW_TYPE_HINT_DIALOG)\n self.window_main.show()\n toplevel = gtk.gdk.window_foreign_new(int(options.toplevel))\n if (toplevel):\n \tself.window_main.window.set_transient_for(toplevel)\n if options and options.open_tab:\n self.notebook_main.set_current_page(int(options.open_tab))\n\n # Show what we have early\n self.window_main.show()\n\n # used to store the handlers of callbacks\n self.handlers = []\n\n # Put some life into the user interface:\n self.init_auto_update()\n self.show_auto_update_level()\n # Setup the key list\n self.init_keys()\n self.show_keys()\n # Setup the ISV sources list\n self.init_isv_sources()\n self.show_isv_sources()\n # Setup and show the distro elements\n self.show_distro()\n\n # Show the import/replace sources.list dialog if a file different\n # to the default sources.list was specified \n # NOTE: If the file path points to the default sources.list the user\n # perhaps assumed that s-p would act like a normal editor.\n # We have got some bug reports from users calling\n # \"sudo software-properties-gtk /etc/apt/sources.list\" from the\n # command line.\n if file != None and \\\n os.path.abspath(file) != \"%s%s\" % (apt_pkg.Config.FindDir(\"Dir::Etc\"),\n apt_pkg.Config.Find(\"Dir::Etc::sourcelist\")):\n self.open_file(file)", "def user_safety_config():\n\n\tprint_section_header(\"USER SAFETY\", Fore.BLUE)\n\n\tif prompt_yes_no(top_line=\"-> Lock Mac as soon as screen saver starts?\",\n\t bottom_line=\"If your screen is black or on screensaver mode, you'll be prompted for a password to login every time.\"):\n\t\tprint_confirmation(\"Configuring account lock on screensaver...\")\n\t\tsp.run(['defaults', 'write', 'com.apple.screensaver', 'askForPassword', '-int', '1'], stdout=sp.PIPE)\n\t\tsp.run(['defaults', 'write', 'com.apple.screensaver', 'askForPasswordDelay', '-int', '0'], stdout=sp.PIPE)\n\n\tif prompt_yes_no(top_line=\"-> Display all file extensions?\",\n\t bottom_line=\"This prevents malware from disguising itself as another file type.\"):\n\t\tprint_confirmation(\"Configuring display of all file extensions...\")\n\t\tsp.run(['defaults', 'write', 'NSGlobalDomain', 'AppleShowAllExtensions', '-bool', 'true'], stdout=sp.PIPE)\n\n\tif prompt_yes_no(top_line=\"-> Disable saving to the cloud by default?\",\n\t bottom_line=\"This prevents sensitive documents from being unintentionally stored on the cloud.\"):\n\t\tprint_confirmation(\"Disabling cloud saving by default...\")\n\t\tsp.run(['defaults', 'write', 'NSGlobalDomain', 'NSDocumentSaveNewDocumentsToCloud', '-bool', 'false'], stdout=sp.PIPE)\n\n\tif prompt_yes_no(top_line=\"-> Show hidden files in Finder?\",\n\t bottom_line=\"This lets you see all files on the system without having to use the terminal.\"):\n\t\tprint_confirmation(\"Displaying hidden files in Finder...\")\n\t\tsp.run(['defaults', 'write', 'com.apple.finder', 'AppleShowAllFiles', '-boolean', 'true'], shell=True, stdout=sp.PIPE)\n\n\t# Reset finder after messing with it.\n\tprint_confirmation(\"Resetting Finder to finalize changes...\")\n\tsp.run(['killAll', 'Finder'], stdout=sp.PIPE)", "def fileDialog(*args, application: bool=True, defaultFileName: AnyStr=\"\", directoryMask:\n AnyStr=\"\", mode: int=0, title: AnyStr=\"\", **kwargs)->AnyStr:\n pass", "def on_customize_inpudat( self, event ):\n if platform.system() == 'Windows':\n test=os.system(\"start \"+self.project['filepaths']['marxan_input'])\n if test==1:\n marxanconpy.warn_dialog(\n \"Your computer does not have a default editor for the select file. In Windows File Explorer, double click on a the selected file, You will be asked to set the default program (notepad, notepad++, etc). After that MC will be able to open the file in the default editor\")\n elif platform.system() == \"Darwin\":\n os.system(\"open -t \" + self.project['filepaths']['marxan_input'])", "def _filepicker(self):\n fileChooser = JFileChooser()\n fileChooser.setCurrentDirectory(File(System.getProperty(\"user.home\")))\n result = fileChooser.showOpenDialog(self.this)\n isApproveOption = result == JFileChooser.APPROVE_OPTION\n if isApproveOption:\n selectedFile = fileChooser.getSelectedFile()\n self._omnibar.setText(selectedFile.getAbsolutePath())\n return isApproveOption", "def set_spec_file(self):\n self.specfile = select_file(os.getcwd())\n if self.specfile is not None:\n self.spec_file_button.setStyleSheet(\"Text-align:left\")\n self.spec_file_button.setText(self.specfile)\n else:\n self.specfile = None\n self.spec_file_button.setText('')\n if self.is_exp_exists() or self.is_exp_set():\n self.set_experiment()", "def properties(self, event=None):\n \n PropertiesWindow(self.configfilename, self)", "def input_file_chooser(self):\n filename = tk.filedialog.askopenfilename()\n self._input_path_var.set(filename)", "def load_debug_darwin_settings(conf):\n\tv = conf.env\n\tconf.load_darwin_common_settings()", "def system_properties(self):\r\n return dict(self._get_system_properties(self.java))", "def initPaths(self):\n popUp = Tk()\n popUp.geometry('200x200')\n popUp.title('Configure Setup')\n\n # FIXME: Canceling on fileChooser causes path abnormalities\n interPathButton = Button(\n popUp, text='Set interpreter path',\n command=self.setInterpreterPath)\n srcPathButton = Button(popUp, text='Set source path',\n command=self.setSourceTopLvlDirectory)\n\n interPathButton.pack()\n srcPathButton.pack()\n\n popUp.mainloop()", "def readInConfigFileDlg( self ):\n pass", "def saveInConfigFileDlg( self ):\n pass" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
This triggers MD to firePreferencesUpdated().... Hopefully refreshing Home Screen Views too
def fireMDPreferencesUpdated(): myPrint("DB", "In ", inspect.currentframe().f_code.co_name, "()" ) class FPSRunnable(Runnable): def __init__(self): pass def run(self): myPrint("DB",".. Inside FPSRunnable() - calling firePreferencesUpdated()...") myPrint("B","Triggering an update to the Summary/Home Page View") MD_REF.getPreferences().firePreferencesUpdated() if not SwingUtilities.isEventDispatchThread(): myPrint("DB",".. Not running within the EDT so calling via FPSRunnable()...") SwingUtilities.invokeLater(FPSRunnable()) else: myPrint("DB",".. Already running within the EDT so calling FPSRunnable() naked...") FPSRunnable().run() return
[ "def on_show_prefs(self):\n self.init_tv_trackers()\n client.updatorr.get_config().addCallback(self.config_to_ui)", "def on_apply_prefs(self):\n trackers_settings = {}\n\n for row in self.trackers_data_model:\n trackers_settings[row[0]] = {'login': row[2], 'password': row[3]}\n\n config = {\n 'walk_period': self.glade.get_widget('walk_period').get_text(),\n 'trackers_settings': trackers_settings\n }\n client.updatorr.set_config(config)", "def refresh_window(self):", "def preferencesChanged(self):\n self.__logViewer.preferencesChanged()", "def onPreferences(self, event):\n sdlg = GlobalSettingsDialog(None)\n sdlg.setSettings(self.globalSession.getSettings())\n sdlg.ShowModal()\n self.globalSession.setSettings(sdlg.getSettings())", "def on_settings_updated(self):\n\t\t\n\t\tpersist.debug(\"Settings Updated\", persist.settings.settings)\n\n\t\tself.reset()\n\t\tself.initOpen()", "def onReload(self):\n return", "def refresh(self):\n self.screen.refresh()\n self.title_bar.refresh()\n self.main_output.refresh()\n self.main_input.refresh()", "def onUpdated(self):", "def _refresh(self):\n self._need_display_update = True\n self._update()", "def refresh(self) -> None:\n self.ids.retract_button.disabled = False\n self.ids.save_button.disabled = False\n self.profile_name = controller.active_profile\n logger.info(controller.values)\n if controller.values['PORT'] == controller.values['STARBOARD']:\n self.ids.invert_button.disabled = True\n else:\n self.ids.invert_button.disabled = False", "def refresh(self):\n pass", "def on_settings(self):\n\n # Pull the current app state from the relay Observer object\n status, interval, ntfc_status, ntfc_state = settings_state.get_state()\n\n # Pass it to the Observable object in order to render the Settings window\n settings_changed, update_interval, ntfc_changed, ntfc_selected = render_settings_window(\n status, interval, ntfc_status, ntfc_state, settings_state)\n\n # Register any state changes\n settings_state.update_state(settings_changed, update_interval, ntfc_changed, ntfc_selected)\n\n # If the interval has changed, reprogram scheduler to run at the new interval\n if settings_state.intrvl_change_trig:\n modify_scheduler(JOB_ID, settings_state.settings_interval)\n\n if settings_state.notification_change_trig:\n NewsIndicator.notifications = False if not settings_state.notification_state else True", "def preferencesChanged(self):\n # reload the APIs\n self.apisManager.reloadAPIs()\n \n # reload editor settings\n for editor in self.editors:\n zoom = editor.getZoom()\n editor.readSettings()\n editor.zoomTo(zoom)\n \n # reload the autosave timer setting\n self.autosaveInterval = Preferences.getEditor(\"AutosaveInterval\")\n if len(self.editors):\n if (\n self.autosaveTimer.isActive() and\n self.autosaveInterval == 0\n ):\n self.autosaveTimer.stop()\n elif (\n not self.autosaveTimer.isActive() and\n self.autosaveInterval > 0\n ):\n self.autosaveTimer.start(self.autosaveInterval * 60000)\n \n self.__enableSpellingActions()", "def ui_update_game_view_data(self):\n\n if self.running_game.is_set():\n self.log(\"UI asks 'update data'.\")\n self.ask_interface(\"update_tables\", self.get_current_data())\n self.ask_interface(\"update_figures\", self.get_current_data())", "def _full_screen_switch_hook(self):\n pass", "def showPrefs ():\n log (\"showPrefs\")\n panel = RvPreferencesPanel()\n panel.setMinimumSize (450, 100)\n if not panel.showModalDialog():\n return\n\n panel.rvPrefs.saveToDisk()\n\n # XXX panel.rvPrefs.saveToRoot()\n global rvmon\n if (rvmon) :\n rvmon.updateFromPrefs()", "def _refresh(self):\n # if we have all the values we need to hookup to the URL\n for key in self.DBMSettings.keys():\n if not key.startswith(LOCALCHAR):\n self.DBMSettings[key] = self._urldict()[key]", "def update_ui(self):\n # main data\n self.lAcc.setText(self.settings.ACCOUNT)\n # self.lExcessLiquidity.setText(str(self.ibkrworker.app.excessLiquidity))\n # self.lSma.setText(str(self.ibkrworker.app.sMa))\n if hasattr(self.ibkrworker.app, 'smaWithSafety'):\n self.lSma.setText(str(round(self.ibkrworker.app.smaWithSafety, 1)))\n else:\n self.lSma.setText(str(round(self.ibkrworker.app.sMa, 1)))\n self.lMarketValue.setText(str(self.ibkrworker.app.netLiquidation))\n self.lblAvailTrades.setText(str(self.ibkrworker.app.tradesRemaining))\n self.lcdPNL.display(self.ibkrworker.app.dailyPnl)\n if self.ibkrworker.app.dailyPnl > 0:\n palette = self.lcdPNL.palette()\n palette.setColor(palette.WindowText, QtGui.QColor(51, 153, 51))\n self.lcdPNL.setPalette(palette)\n elif self.ibkrworker.app.dailyPnl < 0:\n palette = self.lcdPNL.palette()\n palette.setColor(palette.WindowText, QtGui.QColor(255, 0, 0))\n self.lcdPNL.setPalette(palette)\n\n total_positions_value = 0\n for p in self.ibkrworker.app.openPositions.values():\n if hasattr(p, 'Value'):\n total_positions_value += p[\"Value\"]\n self.lPositionsTotalValue.setText(str(round(total_positions_value, 1)))\n\n self.update_open_positions()\n self.update_live_candidates()\n self.update_open_orders()\n\n # everything disabled for safety - is now enabled\n self.chbxProcess.setEnabled(True)\n self.btnSettings.setEnabled(True)\n\n self.update_session_state()\n\n if not self.uiTimer.isActive():\n self.update_console(\"UI resumed.\")\n self.uiTimer.start(int(self.settings.INTERVALUI) * 1000) # reset the ui timer" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Will detect and then run the codeblock on the EDT
def genericSwingEDTRunner(ifOffEDTThenRunNowAndWait, ifOnEDTThenRunNowAndWait, codeblock, *args): isOnEDT = SwingUtilities.isEventDispatchThread() # myPrint("DB", "** In .genericSwingEDTRunner(), ifOffEDTThenRunNowAndWait: '%s', ifOnEDTThenRunNowAndWait: '%s', codeblock: '%s', args: '%s'" %(ifOffEDTThenRunNowAndWait, ifOnEDTThenRunNowAndWait, codeblock, args)) myPrint("DB", "** In .genericSwingEDTRunner(), ifOffEDTThenRunNowAndWait: '%s', ifOnEDTThenRunNowAndWait: '%s', codeblock: <codeblock>, args: <args>" %(ifOffEDTThenRunNowAndWait, ifOnEDTThenRunNowAndWait)) myPrint("DB", "** In .genericSwingEDTRunner(), isOnEDT:", isOnEDT) class GenericSwingEDTRunner(Runnable): def __init__(self, _codeblock, arguments): self.codeBlock = _codeblock self.params = arguments def run(self): myPrint("DB", "** In .genericSwingEDTRunner():: GenericSwingEDTRunner().run()... about to execute codeblock.... isOnEDT:", SwingUtilities.isEventDispatchThread()) self.codeBlock(*self.params) myPrint("DB", "** In .genericSwingEDTRunner():: GenericSwingEDTRunner().run()... finished executing codeblock....") _gser = GenericSwingEDTRunner(codeblock, args) if ((isOnEDT and not ifOnEDTThenRunNowAndWait) or (not isOnEDT and not ifOffEDTThenRunNowAndWait)): myPrint("DB", "... calling codeblock via .invokeLater()...") SwingUtilities.invokeLater(_gser) elif not isOnEDT: myPrint("DB", "... calling codeblock via .invokeAndWait()...") SwingUtilities.invokeAndWait(_gser) else: myPrint("DB", "... calling codeblock.run() naked...") _gser.run() myPrint("DB", "... finished calling the codeblock via method reported above...")
[ "def takeControl(self):\n mainloop()", "def swingRun(self, isCancelled: bool) -> None:\n ...", "def run(self) -> None:\n self.mainloop()", "def mainloop(self):\n self.window.mainloop()", "def run():\n gui = GUI()\n gui.mainloop()", "def main_loop(self):\n\n self.window.mainloop()", "def mainloop(self):\n self.root.mainloop()", "def loop_run(self):\n super(EventLoop, self).loop_run()\n self.inq = self.cothread.EventQueue()", "def mainloop(self):\n self.master.mainloop()", "def start_event_loop(self):", "def run(self):\n if self.okay:\n ExtLoopWin32.run()", "def run_main_loop():\n mainloop = GObject.MainLoop()", "def run(self):\n self.cmdloop()", "def run(self):\n\t\tlogger.info(\"GUI thread started\")\n\t\twhile not self._stop.isSet():\n\t\t\ttry:\n\t\t\t\tfunction, args, kwargs = self._queueIn.get(0.01) #NOTE this may be a bad thing\n\t\t\t\tfunction(*args, **kwargs)\n\t\t\texcept Queue.Empty:\n\t\t\t\tpass", "def showEvent(self, e):\n # start simulation when showing window\n self.start_timer()", "def _run_delayed_gui_load_code(self):\n #Stop the timer.\n self._delayed_gui_timer.stop()\n print(f'_run_delayed_gui_load_code() called!')\n # Try to select the first combo item after they've loaded\n self.ui.contextSelectorWidget._trySelectFirstComboItem()", "def startEventHandling():\n if not _nativeThreading:\n if _graphicsManager._handlingEvents == 'No':\n _graphicsManager._handlingEvents = 'Yes'\n _graphicsManager.mainLoop()", "def run(self):\r\n glutMainLoop()", "def on_run_clicked(self):\n self.start_threading()\n self.stepping = False\n self.step_event.set()" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Implement your canvas drawing logic here, returning False will stop the rendering, returning True will continue it
def draw(self, canvas) -> bool: return False
[ "def on_draw(self, widget, cr):\n #print \"starting to draw\"\n if self.double_buffer is not None:\n self.draw_tiles()\n cr.set_source_surface(self.double_buffer, 0.0, 0.0)\n cr.paint()\n else:\n print('Invalid double buffer')\n #print \"done drawing\"\n return False", "def draw_animation(self, canvas, animation_tick) -> bool:\n return False", "def paint(self, canvas):\n raise Exception(\"Not implemented\")", "def _prepare_draw(self, view=None):\n\n if self._changed['pos']:\n self.pos_buf.set_data(self._pos)\n self._changed['pos'] = False\n\n if self._changed['color']:\n self.color_buf.set_data(self._color)\n self._program.vert['color'] = self.color_buf\n self._changed['color'] = False\n\n return True", "def do(self, canvas):", "def _draw(self, canvas, options):\n pass # must override in subclass", "def draw (self):\n screen = self.screen\n dirty = False\n for display in self.displays:\n dirty |= display.draw(screen)\n return dirty", "def _draw(self):\r\n if self.changed or self.alwaysDirty:\r\n self.on_draw()\r\n self.changed = False\r\n return", "def draw(self):\r\n if not self.stopped:\r\n super().draw()\r\n self.next_frame()", "def save_drawing_if_necessary(self):\n\n app_doc_data = AppDocData.instance()\n if app_doc_data.activeDrawing and app_doc_data.activeDrawing.modified:\n #if QMessageBox.Yes == QMessageBox.question(self, self.tr(\"Question\"),\n # self.tr(\"Do you want to save drawing?\"),\n # QMessageBox.Yes | QMessageBox.No):\n # self.actionSaveCliked()\n # return True\n if QMessageBox.Ignore == QMessageBox.question(self, self.tr('Continue?'),\n self.tr('Changes may not have been saved.'),\n QMessageBox.Ignore | QMessageBox.Cancel):\n return False\n return True", "def isOnCanvas(self, x, y):\n return 0 <= x < self.width and 0 <= y < self.height", "def update(self):\n actualCanvas = self._canvas\n \n self._canvas = self._buffer\n canvas = self._buffer\n \n canvas.fill_style = self._backgroundColor\n canvas.fill_rect(0, 0, self._width, self._height)\n \n shouldDrawCells = False\n for element in self._visible:\n if element == constants.VISIBLE_CELL:\n shouldDrawCells = True\n continue\n self.drawEnvironment(element)\n \n \n if shouldDrawCells:\n self.drawCells()\n \n super().update()\n \n actualCanvas.draw_image(self._canvas)\n \n self._canvas = actualCanvas", "def draw(self, surface):\n checked_color = (0, 196, 0) if self.checked else pg.Color(\"white\")\n surface.fill(pg.Color(\"black\"), self.rect)\n surface.fill(self.color, self.rect.inflate(-2,-2))\n surface.fill(pg.Color(\"white\"), self.rect.inflate(-6,-6))\n surface.fill((205,205,205), self.rect.inflate(-8,-8))\n surface.fill(checked_color, self.select_rect)", "def testdraw():", "def shouldDrawVideoFrame(self):\r\n return self.getTimeToNextFrameDraw() <= 0.0", "def draw(self):\n if self.draw_no_delay:\n self.fig.canvas.draw()\n else:\n self.fig.canvas.draw_idle()", "def handle_draw( self, brush ):\n raise NotImplementedError(\n \"classes that subclass Canvas must implement a handle_draw method\" )", "def draw(self):\n # IMPLEMENT ME\n if self._mssg is not None:\n self._mssg.draw(self.view)\n if not self._game is None: \n self._game.draw(self.view)\n if self._state==STATE_ACTIVE: \n self._game.drawBall(self.view)\n if self._mssg2 is not None:\n self._mssg2.draw(self.view)\n if self._mssg3 is not None:\n self._mssg3.draw(self.view)", "def draw(self):\n # IMPLEMENT ME\n \"\"\"\n GRectangle(x=GAME_WIDTH/2,y=GAME_HEIGHT/2,\n width=GAME_WIDTH,height=GAME_HEIGHT,\n fillcolor=introcs.RGB(0,0,0)).draw(self.view)\n if self.getState() == STATE_INACTIVE:\n self.getText().draw(self.view)\n if self.getState() == STATE_PAUSED:\n self.getText().draw(self.view)\n if not self.getWave() is None:\n self.getWave().draw(self.view)\n if self.getState() == STATE_COMPLETE:\n self.getText().draw(self.view)\n if self.getState() == STATE_PAUSED or self.getState() == STATE_ACTIVE or self.getState() == STATE_COMPLETE:\n self.getText().draw(self.view)\n\n GRectangle(x=GAME_WIDTH/2,y=GAME_HEIGHT/2,\n width=GAME_WIDTH,height=GAME_HEIGHT,\n fillcolor=introcs.RGB(0,0,0)).draw(self.view)\"\"\"\n if not self.getText() is None:\n self.getText().draw(self.view)\n if not self.getWave() is None:\n self.getWave().draw(self.view)" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Implement your canvas animation drawing logic here, returning False will stop the rendering, returning True will continue it
def draw_animation(self, canvas, animation_tick) -> bool: return False
[ "def draw(self, canvas) -> bool:\n return False", "def draw(self):\r\n if not self.stopped:\r\n super().draw()\r\n self.next_frame()", "def on_draw(self, widget, cr):\n #print \"starting to draw\"\n if self.double_buffer is not None:\n self.draw_tiles()\n cr.set_source_surface(self.double_buffer, 0.0, 0.0)\n cr.paint()\n else:\n print('Invalid double buffer')\n #print \"done drawing\"\n return False", "def draw(self):\n if self.draw_no_delay:\n self.fig.canvas.draw()\n else:\n self.fig.canvas.draw_idle()", "def do(self, canvas):", "def _prepare_draw(self, view=None):\n\n if self._changed['pos']:\n self.pos_buf.set_data(self._pos)\n self._changed['pos'] = False\n\n if self._changed['color']:\n self.color_buf.set_data(self._color)\n self._program.vert['color'] = self.color_buf\n self._changed['color'] = False\n\n return True", "def animation_action(self):\r\n if self.animation:\r\n self.check_termination()\r\n if self.endOfSearch:\r\n return\r\n self.canvas.after(self.delay, self.animation_action)", "def paint(self, canvas):\n raise Exception(\"Not implemented\")", "def shouldDrawVideoFrame(self):\r\n return self.getTimeToNextFrameDraw() <= 0.0", "def redraw(self):\r\n if self.__drawing_queued == False: #if we are moving, then there is a timeout somewhere already\r\n self.__drawing_queued = True\r\n self._last_frame_time = dt.datetime.now()\r\n gobject.timeout_add(1000 / self.framerate, self.__redraw_loop)", "def EndDraw(self):\r\n\r\n pass", "def _draw(self):\r\n if self.changed or self.alwaysDirty:\r\n self.on_draw()\r\n self.changed = False\r\n return", "def _update_anim(self):\n if self._skip_frames > 1:\n # Do not render while _skip_frames is > 1\n self._skip_frames -= 1\n else:\n # Render frame\n self._visualization.taskMgr.step()\n # Calculate number of frames that need to be skipped\n self._skip_frames = int(1 / self._fps / self._dt)", "def update(self):\r\n if self.state != PYGWIDGETS_ANIMATION_PLAYING:\r\n return False\r\n returnValue = False # typical return value\r\n\r\n # The job here is to figure out the index of the image to show\r\n # and the matching elapsed time threshold for the current image\r\n self.elapsed = (time.time() - self.animationPlayingStartTime)\r\n\r\n if self.elapsed > self.elapsedStopTime: # anim finished\r\n if self.loop: # restart the animation\r\n self.animationPlayingStartTime = time.time()\r\n self.nextElapsedThreshold = self.endTimesList[0]\r\n self.index = 0\r\n else: # not looping\r\n self.nIterationsLeft = self.nIterationsLeft - 1\r\n if self.nIterationsLeft == 0: # done\r\n self.state = PYGWIDGETS_ANIMATION_STOPPED\r\n if self.callBack is not None: # if there is a callBack\r\n self.callBack(self.nickname) # do it\r\n returnValue = True # animation has ended\r\n\r\n else: # another iteration - start over again\r\n self.animationPlayingStartTime = time.time()\r\n self.nextElapsedThreshold = self.endTimesList[0]\r\n if self.showFirstImageAtEnd:\r\n self.index = 0 # show first image\r\n else:\r\n self.index = len(self.imagesList) - 1 # show last image\r\n\r\n elif self.elapsed > self.nextElapsedThreshold:\r\n # Time to move on to next picture\r\n self.index = self.index + 1\r\n self.nextElapsedThreshold = self.endTimesList[self.index]\r\n\r\n return returnValue", "def drawStar(duration):\n # START CODE HERE #\n\n\n pass\n # END CODE HERE # (remove the pass statement)", "def update(self):\n actualCanvas = self._canvas\n \n self._canvas = self._buffer\n canvas = self._buffer\n \n canvas.fill_style = self._backgroundColor\n canvas.fill_rect(0, 0, self._width, self._height)\n \n shouldDrawCells = False\n for element in self._visible:\n if element == constants.VISIBLE_CELL:\n shouldDrawCells = True\n continue\n self.drawEnvironment(element)\n \n \n if shouldDrawCells:\n self.drawCells()\n \n super().update()\n \n actualCanvas.draw_image(self._canvas)\n \n self._canvas = actualCanvas", "def draw(self):\n # IMPLEMENT ME\n if self._mssg is not None:\n self._mssg.draw(self.view)\n if not self._game is None: \n self._game.draw(self.view)\n if self._state==STATE_ACTIVE: \n self._game.drawBall(self.view)\n if self._mssg2 is not None:\n self._mssg2.draw(self.view)\n if self._mssg3 is not None:\n self._mssg3.draw(self.view)", "def _draw(self, canvas, options):\n pass # must override in subclass", "def draw(self):\n # IMPLEMENT ME\n \"\"\"\n GRectangle(x=GAME_WIDTH/2,y=GAME_HEIGHT/2,\n width=GAME_WIDTH,height=GAME_HEIGHT,\n fillcolor=introcs.RGB(0,0,0)).draw(self.view)\n if self.getState() == STATE_INACTIVE:\n self.getText().draw(self.view)\n if self.getState() == STATE_PAUSED:\n self.getText().draw(self.view)\n if not self.getWave() is None:\n self.getWave().draw(self.view)\n if self.getState() == STATE_COMPLETE:\n self.getText().draw(self.view)\n if self.getState() == STATE_PAUSED or self.getState() == STATE_ACTIVE or self.getState() == STATE_COMPLETE:\n self.getText().draw(self.view)\n\n GRectangle(x=GAME_WIDTH/2,y=GAME_HEIGHT/2,\n width=GAME_WIDTH,height=GAME_HEIGHT,\n fillcolor=introcs.RGB(0,0,0)).draw(self.view)\"\"\"\n if not self.getText() is None:\n self.getText().draw(self.view)\n if not self.getWave() is None:\n self.getWave().draw(self.view)" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Respond to theme load ins here
def load_theme_values(self): pass
[ "def manageTheme():", "def on_load_theme (self):\n\n\t\tif self.has_started:\n\t\t\tself.init_buffers()\n\t\t\tself.redraw_background()\n\t\t\tself.redraw_foreground()", "def setup(self, theme: Theme):", "def onStartup(event):\n\n plugins = getPlugins()\n\n for themeDirectory in iterDirectoriesOfType(THEME_RESOURCE_NAME):\n if themeDirectory.directory in reload_paths: # only for sauna.reload!\n pluginSettings = getPluginSettings(themeDirectory, plugins)\n\n for name, plugin in plugins:\n plugin.onDiscovery(themeDirectory.__name__,\n pluginSettings[name],\n pluginSettings)", "def _on_modules_load(self):", "def _post_load(self):\n pass", "def on_load(self):\n self.__init__()", "def handle_reload_toolbox(self):", "def on_loaded(self):\n pass", "def on_pre_enter(self, *args):\r\n for theme in self.themes:\r\n chosen = self.app.user_data['theme_name'] == theme\r\n self.add_theme_choice(theme, get_theme_palette(theme), chosen)", "def use_my_theme():\n # register and enable the theme\n alt.themes.register(\"my_theme\", my_theme)\n alt.themes.enable(\"my_theme\")", "def OnThemeChange(self, msg):\n self.__InitImageList()", "def on_startup(self) -> None:\n ...", "def _load_themes(self):\n # create a theme definition object for each theme, this will be\n # used to generate the theme in tkinter along with any assets\n # at run-time\n if USER_DEFINED:\n DEFINED_THEMES.update(USER_DEFINED)\n theme_settings = {\"themes\": DEFINED_THEMES}\n for name, definition in theme_settings[\"themes\"].items():\n self.register_theme(\n ThemeDefinition(\n name=name,\n themetype=definition[\"type\"],\n font=definition.get(\"font\") or DEFAULT_FONT,\n colors=Colors(**definition[\"colors\"]),\n )\n )", "def refresh(self):\n self._themes = {}\n for theme in starchain(ldr(self.app) for ldr in self.loaders):\n if self.valid_app_id(theme.application):\n self.themes[theme.identifier] = theme\n self.register_theme_assets()", "def setTheme(self):\n pass", "def custom_setup(self):\r\n pass", "def packaged_themes_loader(app):\n themes_path = os.path.join(app.root_path, 'themes')\n if os.path.exists(themes_path):\n return load_themes_from(themes_path)\n else:\n return ()", "def _install_content_handlers(self):\n @self.app.before_first_request\n def init_content():\n self.content.initialize(self.app.config)\n self.content.load()\n\n if self.freezing:\n return\n\n @self.app.before_request\n def auto_update_content():\n # avoid reloading on static files:\n if request.endpoint == 'static':\n return\n\n # reload on explicit view requests only (e.g. not favicons):\n if request.endpoint in self.app.view_functions:\n self.content.load()" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Starts a setup mode that is used for moving, resizing and other various changes that the user might setup
def start_setup(self, setup_type): # Persist the user preferences when we end our setup if (self.setup_type != "" and not setup_type): self.setup_type = setup_type rect = self.canvas.get_rect() self.x = int(rect.x) self.y = int(rect.y) self.width = int(rect.width) self.height = int(rect.height) self.preferences.persist_preferences({ self.id + '_x': self.x, self.id + '_y': self.y, self.id + '_width': self.width, self.id + '_height': self.height }) # Start the setup state elif self.setup_type != setup_type: self.setup_type = setup_type if (self.setup_type == "position"): x, y = ctrl.mouse_pos() self.canvas.move(x, y)
[ "def start_setup(self):\n self.all_buttons_inactive()\n self.b1.active = True\n self.b6.active = True\n self.b7.active = True\n self.all_dice_inactive()\n self.all_dice_roll()", "def _setup(self):\n # Start the application (if not already running).\n if not self._active:\n self._window.switch_to()\n # Set the window color, this will be transparent in saved images.\n glClearColor(VERY_LIGHT_GREY, VERY_LIGHT_GREY, VERY_LIGHT_GREY, 0)\n # Reset the transformation state.\n # Most of this is already taken care of in Pyglet.\n #glMatrixMode(GL_PROJECTION)\n #glLoadIdentity()\n #glOrtho(0, self.width, 0, self.height, -1, 1)\n #glMatrixMode(GL_MODELVIEW)\n glLoadIdentity()\n # Enable line anti-aliasing.\n glEnable(GL_LINE_SMOOTH)\n # Enable alpha transparency.\n glEnable(GL_BLEND)\n glBlendFuncSeparate(GL_SRC_ALPHA, GL_ONE_MINUS_SRC_ALPHA, GL_ONE, GL_ONE_MINUS_SRC_ALPHA)\n #glBlendFunc(GL_SRC_ALPHA, GL_ONE_MINUS_SRC_ALPHA)\n self._window.dispatch_events()\n self._window.set_visible(True)\n self._active = True\n self.clear()\n self.setup()", "def set_display_setup(self, mode):\n self.command(f\"DISPLAY {mode}\")", "def startup(self):\n self.screen.launch_screen()", "def change_mode(self):\n master.destroy()\n os.system(\"add_mode_run.py\")", "def _didPressQuickSetup(self, *args):\n if(uib.GetUserConfirmation(\"Quick Scene Setup\", \"This will set some of your nParticle attribute values within Maya \"\n + \"(friction, self collision etc) to get the swarm up and running.\\n\"\n + \"***This includes changing the nucleus space scale and enabling it's ground plane.***\"\n + \"\\nContinue?\")):\n self.delegate.quickSceneSetup()", "def setup_pymol():\n pymol.finish_launching() # Prevent threading errors\n # Configure global settings\n cmd.set('scene_buttons', 1)\n cmd.set('matrix_mode', 1)\n cmd.set('movie_panel', 1)\n # Configure quality settings\n cmd.mset(\"1 x500\")\n cmd.set('ray_trace_frames', 1)\n cmd.viewport(800, 800)", "def screen_setup(screen_size):\n window = turtle.Screen()\n window.bgcolor(\"black\")\n window.title(\"Maze Game\")\n window.setup(screen_size, screen_size)", "def _display_setup(self):\r\n display_file = \"{}/display.json\".format(self.settings_dir)\r\n with open(display_file) as json_file:\r\n win_settings = json.load(json_file)\r\n self.win = visual.Window(**win_settings)\r\n framerate = self.win.fps()\r\n self.frame_duration = 1.0/framerate\r\n self.mouse = event.Mouse(visible=False, win=self.win)", "def SetupWindow(self):\n\n #VM ADDED\n pti.infant_tobii_controller.leapCalibrateBaby()\n\n # Important to do this first because it gets the windows in the correct order for focus etc.\n\n #VM CHANGED SELF.WIN FULLSCR TO TRUE\n if self.stimPres:\n # Stimulus presentation window\n self.win = visual.Window((self.screenWidth, self.screenHeight), fullscr=True, screen=self.screenIndex, allowGUI=False,\n units='pix', color=self.screenColor)\n self.dummyThing = visual.Circle(self.win, size=1, color=self.win.color) # This is for fixing a display glitch in PsychoPy3 involving multiple windows of different sizes.\n # Coder window\n self.win2 = visual.Window((400, 400), fullscr=False, screen=self.expScreenIndex, allowGUI=True, units='pix', waitBlanking=False,\n rgb=[-1, -1, -1])\n\n #VM ADDED\n self.controller = pti.infant_tobii_controller(self.win)\n self.controller.start_recording('data/toerase2.tsv', embed_event = True)\n\n if self.stimPres:\n tempText = visual.TextStim(self.win2, text=\"Loading Stimuli\", pos=[0, 0], color='white', bold=True, height=40)\n tempText.draw()\n self.win2.flip()\n # Step 1: Load and present \"startImage\"\n if self.startImage is not '':\n self.dummyThing.draw()\n tempStim = self.stimList[self.startImage]\n tempStimObj = visual.ImageStim(self.win, tempStim['stimLoc'], size=[self.movieWidth, self.movieHeight])\n tempStimObj.draw()\n self.win.flip() # This should now be on the screen until the first attngetter\n self.stimDict = {x: [] for x in self.stimNames.keys()} # This holds all the loaded movies.\n self.counters = {x: 0 for x in self.stimNames.keys()} # list of counters, one per index of the dict, so it knows which movie to play\n tempCtr = {x: 0 for x in self.stimNames.keys()}\n for i in self.actualTrialOrder:\n # Adjust for hab sub-trials. Looks for a very specific set of traits, which could occur, but...shouldn't.\n if '.' in i:\n tempI = i\n while '.' in tempI:\n tempI = tempI[tempI.index('.')+1:]\n i = tempI\n x = tempCtr[i] # Changed so hab trials get the same treatment as everything else.\n if x < len(self.stimNames[i]):\n tempStim = self.stimList[self.stimNames[i][x]]\n if tempStim['stimType'] == 'Movie':\n tempStimObj = visual.MovieStim3(self.win, tempStim['stimLoc'],\n size=[self.movieWidth, self.movieHeight], flipHoriz=False,\n flipVert=False, loop=False)\n elif tempStim['stimType'] == 'Image':\n tempStimObj = visual.ImageStim(self.win, tempStim['stimLoc'],\n size=[self.movieWidth, self.movieHeight])\n elif tempStim['stimType'] == 'Audio':\n tempStimObj = sound.Sound(tempStim['stimLoc'])\n else: # The eternal problem of audio/image pair. Just creates an object that's a dict of audio and image.\n audioObj = sound.Sound(tempStim['audioLoc'])\n imageObj = visual.ImageStim(self.win, tempStim['imageLoc'],\n size=[self.movieWidth, self.movieHeight])\n tempStimObj = {'Audio': audioObj, 'Image': imageObj}\n tempAdd = {'stimType':tempStim['stimType'], 'stim':tempStimObj}\n self.stimDict[i].append(tempAdd)\n tempCtr[i] += 1\n\n if len(list(self.playAttnGetter.keys())) > 0:\n for i in list(self.attnGetterList.keys()):\n if self.attnGetterList[i]['stimType'] == 'Audio':\n self.attnGetterList[i]['file'] = sound.Sound(self.attnGetterList[i]['stimLoc'])\n else:\n self.attnGetterList[i]['file'] = visual.MovieStim3(self.win, self.attnGetterList[i]['stimLoc'],\n size=[self.movieWidth, self.movieHeight],\n flipHoriz=False, flipVert=False, loop=False)\n if self.endImage is not '': # Load image for end of experiment, if needed.\n tempStim = self.stimList[self.endImage]\n self.endImageObject = visual.ImageStim(self.win, tempStim['stimLoc'], size=[self.movieWidth, self.movieHeight])\n else:\n self.endImageObject = None\n self.keyboard = self.key.KeyStateHandler()\n self.win2.winHandle.push_handlers(self.keyboard)\n if self.stimPres:\n self.win.winHandle.push_handlers(self.keyboard)\n self.baseSize = 40 # Base size of all attention-getters, in pixels\n self.attnGetterSquare = visual.Rect(self.win, height=self.baseSize, width=self.baseSize, pos=[self.testOffset + 0, 0], fillColor='black')\n self.attnGetterCross = visual.ShapeStim(self.win, vertices='cross', size=self.baseSize, pos=[self.testOffset + 0, 0], fillColor='black')\n\n numVertices = 10\n starRad = self.baseSize #This creates a large but static rotating star. It does not loom.\n starVerts = []\n for x in range(0,numVertices):\n if x % 2 == 1:\n tempRad = starRad*.55 # How much to draw in between the \"points\"\n else:\n tempRad = starRad\n tempVert = [tempRad*sin((2*pi)/numVertices * x), tempRad*cos((2*pi)/numVertices * x)]\n starVerts.append(tempVert)\n\n self.attnGetterStar = visual.ShapeStim(self.win, vertices=starVerts, pos=[self.testOffset + 0, 0], fillColor='black')\n\n self.statusSquareA = visual.Rect(self.win2, height=80, width=80,\n pos=[self.statusOffset - 60, self.statusOffsetY + 0],\n fillColor='black') # These two appear on the status screen window.\n self.statusSquareB = visual.Rect(self.win2, height=80, width=80,\n pos=[self.statusOffset + 60, self.statusOffsetY + 0], fillColor='black')\n self.statusTextA = visual.TextStim(self.win2, text=\"\", pos=[self.statusOffset - 60, self.statusOffsetY + 0],\n color='white', bold=True, height=30)\n self.statusTextB = visual.TextStim(self.win2, text=\"\", pos=[self.statusOffset + 60, self.statusOffsetY + 0],\n color='white', bold=True, height=30)\n self.trialText = visual.TextStim(self.win2, text=\"Trial no: \", pos=[-100, 150], color='white')\n self.readyText = visual.TextStim(self.win2, text=\"Trial not active\", pos=[-25, 100], color='white')\n \n #VM ADDED\n print('experiment starting...')\n \n self.doExperiment() # Get this show on the road!", "def mode_started(self):\n self.game.coils.backboxR.enable()\n self.game.coils.backboxG.enable()\n self.game.coils.backboxB.enable()\n pass", "def prepare_screening(self):\r\n self.prepared_folder(folder_name=\"dock\")\r\n self.prepare_receptors()\r\n self.prepare_ligands()", "def onClick(self):\n self.app.setActiveMode(\"start\")", "def setup_callback():\n self.setup_window.deiconify()", "def setup(self):\n self.grid = GridForm(self.screen, self.title, 1, 1)\n self.content = self.create_content()\n self.grid.add(self.content, 0, 0)\n for key in self.hotkeys:\n self.grid.addHotKey(key)\n if self.timer:\n self.grid.setTimer(self.timer)", "def setup_window(self, fullscreen, dual):\n cv2.startWindowThread()\n if fullscreen:\n cv2.namedWindow(self.wname, cv2.WINDOW_NORMAL)\n else:\n cv2.namedWindow(self.wname)\n cv2.namedWindow(self.wname)\n cv2.setWindowProperty(self.wname, cv2.WND_PROP_FULLSCREEN, cv2.WINDOW_FULLSCREEN)\n\n if dual:\n # Move is to make sure it's on the right monitor\n cv2.moveWindow(self.wname, 1920, 0)\n cv2.namedWindow(self.wname + ' Small View')\n cv2.resizeWindow(self.wname + ' Small View', 960, 540)", "def setup(self):\n print(\"------------POZYX POSITIONING V1.1 -------------\")\n print(\"NOTES: \")\n print(\"- No parameters required.\")\n print()\n print(\"- System will auto start configuration\")\n print()\n print(\"- System will auto start positioning\")\n print()\n self.pozyx.printDeviceInfo(self.remote_id)\n print()\n print(\"------------POZYX POSITIONING V1.1 --------------\")\n print()\n self.pozyx.clearDevices(self.remote_id)\n\n self.setAnchorsManual()\n self.printPublishConfigurationResult()", "def switch(self):\n self.fullscreen = not (self.fullscreen)\n self.setScreenMode()", "def setup(self):\n print(\"------------POZYX POSITIONING V1.1 -------------\")\n print(\"NOTES: \")\n print(\"- No parameters required.\")\n print()\n print(\"- System will auto start configuration\")\n print()\n print(\"- System will auto start positioning\")\n print()\n if self.remote_id is None:\n self.pozyx.printDeviceInfo(self.remote_id)\n else:\n for device_id in [None, self.remote_id]:\n self.pozyx.printDeviceInfo(device_id)\n print()\n print(\"------------POZYX POSITIONING V1.1 --------------\")\n print()\n\n self.setAnchorsManual(save_to_flash=False)\n self.printPublishConfigurationResult()\n self.pozyx.setPositionFilter(self.filter_type, self.filter_strength, self.remote_id)" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Extract bbox info from file name.
def get_bbox(fname): fname = fname.split('_') # fname -> list i = fname.index('bbox') return map(float, fname[i+1:i+5]) # m
[ "def get_bbox(fname):\r\n fname = fname.split('_') # fname -> list\r\n i = fname.index('bbox')\r\n return list(map(float, fname[i+1:i+5])) # m\r", "def format_bbox_file(self, img_name, data):\r\n\r\n with open(self.bboxes_local, 'w+') as fbbox:\r\n # remove path\r\n bboxes = data.split(' ')[1:]\r\n for i in range(0, len(bboxes), 4):\r\n cur_bbox = bboxes[i:i+4]\r\n fbbox.write(img_name + ' ' + ' '.join(cur_bbox) + '\\n')", "def get_bbox_data(self):\r\n with open(self.bboxes_local, 'r') as fbbox:\r\n data = fbbox.read()\r\n\r\n return data", "def test_get_bounding_box(self):\n\n # Note there are two possible correct values of bbox depending on\n # the version of gdal:\n # http://trac.osgeo.org/gdal/wiki/rfc33_gtiff_pixelispoint\n\n # Get gdal version number\n x = gdal.VersionInfo('').replace('dev', '').split()\n y = x[1].split('.')[:2]\n z = ''.join(y) # Turn into number and\n if z.endswith(','):\n z = z[:-1] # Remove trailing comma\n\n # Reference bbox for vector data\n ref_bbox = {'tsunami_building_exposure.shp': [150.15238387897742,\n -35.71084183517241,\n 150.18779267086208,\n -35.70131768155173]}\n\n # Select correct reference bbox for rasters\n if float(z) < 17:\n ref_bbox['Earthquake_Ground_Shaking_clip.tif'] = [99.3641696,\n -2.2031806,\n 102.2411696,\n -0.0041806]\n else:\n ref_bbox['Earthquake_Ground_Shaking_clip.tif'] = [99.36,\n -2.199,\n 102.237,\n 0.0]\n\n for filename in ['Earthquake_Ground_Shaking_clip.tif',\n 'tsunami_building_exposure.shp']:\n abspath = os.path.join(TESTDATA, filename)\n bbox = get_bounding_box(abspath)\n msg = ('Got bbox %s from filename %s, but expected %s '\n % (str(bbox), filename, str(ref_bbox[filename])))\n assert numpy.allclose(bbox, ref_bbox[filename]), msg\n\n # Check the conversions\n bbox_string = bboxlist2string(bbox)\n\n # Check the check :-)\n check_bbox_string(bbox_string)\n\n # Check that it works for layer objects instantiated from file\n L = read_layer(abspath)\n L_bbox = L.get_bounding_box()\n msg = ('Got bbox %s from filename %s, but expected %s '\n % (str(L_bbox), filename, str(ref_bbox[filename])))\n assert numpy.allclose(L_bbox, ref_bbox[filename]), msg\n\n # Check that it works for layer objects instantiated from data\n if L.is_raster:\n D = Raster(data=L.get_data(),\n projection=L.get_projection(),\n geotransform=L.get_geotransform())\n elif L.is_vector:\n D = Vector(data=L.get_data(),\n projection=L.get_projection(),\n geometry=L.get_geometry())\n else:\n msg = 'Unexpected layer object: %s' % str(L)\n raise RuntimeError(msg)\n\n # Check that get_bounding_box works for data instantiated layers\n D_bbox = D.get_bounding_box()\n msg = ('Got bbox %s from layer %s, but expected %s '\n % (str(D_bbox), str(D), str(L_bbox)))\n assert numpy.allclose(D_bbox, L_bbox), msg", "def read_in_bb_file():\n with open(\"text/bounding_boxes.txt\", 'r') as f:\n bbs = f.readlines()\n bb_dict = {}\n for line in bbs:\n spl = line.strip().split(\",\")\n city = spl[0].title()\n place_name = city + \", \" + spl[1]\n lats_longs = [(spl[2], spl[3]), (spl[4], spl[5])]\n bb_dict[place_name] = lats_longs\n return bb_dict", "def get_bb(frame_annots, segm_name):\n my_left_seg = frame_annots[segm_name]\n if len(my_left_seg):\n x_min, y_min = my_left_seg.min(0)\n x_max, y_max = my_left_seg.max(0)\n bbox = np.array([int(x_min), int(y_min),\n int(x_max), int(y_max)])\n else:\n bbox = None\n return bbox", "def parse_blobname(filename):\n match = re.search(r'(?P<fileset>\\S+?)_(?P<vmname>\\S+?)_(?P<type>full|incr)_(?P<start>\\d{8}_\\d{6})\\.tar.gz', filename)\n if match is None:\n return None\n\n (fileset, is_full, start_timestamp, vmname) = (\n match.group('fileset'),\n Naming.type_str_is_full(match.group('type')),\n match.group('start'),\n match.group('vmname'))\n\n return fileset, is_full, start_timestamp, vmname", "def read_bounding_boxes(filename):\n f = open(filename)\n objects = []\n weight = 0\n height = 0\n for line in f:\n print(line)\n first_word = line.split(';')[0]\n if first_word == \"Dimensions\":\n weight = line.split(';')[1]\n height = line.split(';')[2]\n if first_word == \"Object\":\n objects.append((line.split(';')[1], line.split(';')[2], line.split(';')[4],\n line.split(';')[5], line.split(';')[6], line.split(';')[7]))\n return weight, height, objects", "def export_bbox_with_object_name(object_name, xml_path):\n bboxs = []\n tree = ET.parse(xml_path)\n root = tree.getroot()\n for objects in root.findall('object'):\n if objects[0].text == object_name:\n bboxs.append([int(objects[4][0].text),\n int(objects[4][1].text),\n int(objects[4][2].text),\n int(objects[4][3].text)])\n return bboxs", "def get_bbox_label(self, index):\n frame = self.current_samples[index]\n lidar_file = os.path.join(self.root, frame)\n assert os.path.exists(lidar_file)\n # point labels not used here, bboxes instead \n _, _, bbox = data_utils.load_h5(lidar_file, bbox=True)\n # transform single bbox annotation in list for compability reasons (dataset can be extended with >1 bboxes per frame)\n bbox_list = np.reshape(bbox, (1,-1)) \n bbox_obj_list = [object3d.Object3d(box, gt=True) for box in bbox_list]\n return bbox_list", "def calc_extra_info_bboxes(bboxes):\n for bbox in bboxes:\n bbox['left'] = bbox['rect'][0]\n bbox['right'] = bbox['rect'][0] + bbox['rect'][2]\n bbox['top'] = bbox['rect'][1]\n bbox['bot'] = bbox['rect'][1] + bbox['rect'][3]\n bbox['w'] = bbox['rect'][2]\n bbox['h'] = bbox['rect'][3]\n bbox['x_center'] = (bbox['left'] + bbox['right']) / 2\n bbox['y_center'] = (bbox['top'] + bbox['bot']) / 2", "def find_bbox(pred_file_path: str, train_file_path: str) -> Dict:\n\n f_pred = open(pred_file_path, \"r\")\n pred_result = f_pred.readlines()\n f_pred.close()\n\n img_index = get_img_index(pred_result)\n\n img_names = get_image_names(train_file_path)\n\n if len(img_index) - 1 != len(img_names):\n return \"There is mismatch between the number of predictions and the number of images.\"\n\n # Create dictionary with the img name as the key and the bbox information as values.\n target_labels = [\"TableCaption\", \"TableBody\", \"TableFootnote\", \"Paragraph\", \"Table\"]\n result = {}\n for i, name in enumerate(img_names):\n key = name\n start = img_index[i] + 1\n end = img_index[i + 1]\n unfiltered_value = pred_result[start:end]\n filtered_value = [\n v for v in unfiltered_value if v.split(\":\")[0] in target_labels\n ]\n result[key] = filtered_value\n\n return result", "def parseBoundaryField(fn):\n content = getFileContent(fn)\n if content is not None:\n return parseBoundaryContent(content)\n else:\n return None", "def get_str_bbox(self, s):\n return self.get_str_bbox_and_descent(s)[:4]", "def get_bounding_boxes(image_path):\n csv_path = get_csv_path_from_image_path(image_path)\n try:\n df = pd.read_csv(csv_path, encoding='ISO-8859-1')\n except ParserError:\n print(f\"Fatal Error while parsing file {csv_path}. bounding boxes cannot be retrieved for {image_path}!\")\n return\n image_name = get_image_name(image_path)\n image_data = df.loc[df.Image == image_name, [\"Unicode\", \"X\", \"Y\", \"Width\", \"Height\"]]\n return image_data", "def load_annotations(path, img_w, img_h):\n bboxes = []\n with open(path, 'r') as file:\n for row in file:\n _, xc , yc, w, h = row.split()\n xc = float(xc)*img_w\n yc = float(yc)*img_h\n w = float(w)*img_w\n h = float(h)*img_h\n bboxes.append([xc - w/2 , yc - h/2, xc + w/2 , yc + h/2])\n\n return bboxes", "def get_bbox(bbox):\n xmin, ymin, w, h = bbox\n xmin = round(xmin)\n ymin = round(ymin)\n xmax = round(xmin + w) - 1\n ymax = round(ymin + h) - 1\n return [xmin, ymin, xmax, ymax]", "def parse_bbox(self, bbox):\n bbox_coords = bbox.split(',')\n if len(bbox_coords) == 4:\n try:\n # convert coordinates to numbers\n bbox_coords = [float(c) for c in bbox_coords]\n # check min <= max\n if (bbox_coords[0] <= bbox_coords[2] and\n bbox_coords[1] <= bbox_coords[3]):\n return bbox_coords\n except ValueError:\n # conversion failed\n pass\n\n # invalid bbox\n return None", "def extract_geotiff_bounding_box(geotiff_path):\n # Extract bounding box\n with rio.open(geotiff_path) as src:\n bounding_box = src.bounds\n\n return bounding_box" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Extract EPSG number from file name.
def get_proj(fname): fname = fname.split('_') # fname -> list i = fname.index('epsg') return fname[i+1]
[ "def extract_document_name(file_name):\n lines = reader.read_file_line('data/458/ids/' + file_name)\n name = converter.convert_to_latin(lines[1])\n return name.split(':')[0]", "def visit_from_file_name(filename):\n expr = re.compile(r\"\\d{4}(?:\\d+)\")\n res = expr.search(filename)\n if res is None:\n return None\n return res.group()", "def extract_id(name):\n return name.replace(\"../images/\", \"\").replace('_free_energy.png', '')", "def url_to_gid(url):\n match = re.search(GID_P, url)\n if match:\n return url[match.start():match.end()]\n else:\n print(\"[ERROR] Could not find gid in '{}'\".format(url), file=sys.stderr)\n return url", "def extract_name(filename):\n name = os.path.splitext(os.path.basename(filename))[0]\n pattern = \"([0-9a-zA-Z_\\-\\.]+)_[0-9]+_[0-9]+$\"\n g = re.search(pattern, name)\n if g is not None:\n name = g.groups()[0]\n return name", "def get_num_from_file(file_name):\n basename = file_name.partition('.')[0]\n first, second = basename.split('_')\n num = second.replace(\"genome\", '')\n num = num[1:]\n return int(num)", "def _extract(img_name: str) -> int:\n\n img_id = os.path.splitext(img_name)[0]\n img_id = os.path.basename(img_id)\n img_id = img_id.split('_')[-1]\n\n try:\n img_id = int(img_id)\n except ValueError:\n img_id = -1\n\n return img_id", "def _get_id_of_contour(filename):\n # The id is the 3 section of the filename\n contour_id = os.path.basename(filename).split('-')[2]\n # Remove leading zeros from the id\n return str(int(contour_id))", "def get_filename(url):\n path = urlparse.urlparse(url).path\n last_component = path.split('/')[-1]\n return last_component", "def deconstruct_filename(filename):\n decon = (filename.split('.')[0]).split('-')\n return (int(decon[0]), int(decon[1]))", "def get_radius_from_grfile(grfile, default=0):\n match = re.findall('(\\d+)', grfile)\n if len(match) > 0 and str(grfile).endswith(str(match[-1]) + '.gr'):\n return int(match[-1])\n return default", "def parseFilename(self, filename):\r\n match = self.filename_regex.match(filename)\r\n if match is None:\r\n # TODO?: Raise exception?\r\n '''print \"Filename\", filename, \"unrecognized!\"'''\r\n return None\r\n lat = int(match.group(2))\r\n lon = int(match.group(4))\r\n if match.group(1) == \"S\":\r\n lat = -lat\r\n if match.group(3) == \"W\":\r\n lon = -lon\r\n return lat, lon", "def get_document_id(file_name):\n doc,id,ending = file_name.split(\".\")\n return id", "def get_yearsuffix_from_filepath(filepath):\n re_match = re.search(r\"(\\d{4})\", filepath)\n if re_match:\n year = re_match.group(1)\n return year[2:] \n else: return None", "def get_location(filename):\r\n\tlocation = filename.split('_')[2][8:]\r\n\treturn location", "def parse_num(path):\n nbasename = path.basename.lower()\n if nbasename.startswith(nprefix):\n try:\n return int(nbasename[len(nprefix) :])\n except ValueError:\n pass", "def _filename_from_url(url):\n file_name = url.split(\"/\")[-1]\n return file_name", "def filename(self):\n _, tail = os.path.split(self.url)\n return self.folder + '/' + tail[:-4] + '/' + tail[:-3] + 'shp'", "def get_epsg_code(self):\n crs = self.img.crs\n epsg = crs['init']\n epsg_code = int(epsg[5:])\n return epsg_code" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Return all 2d '/variable' names in the HDF5.
def get_grid_names(fname): with h5py.File(fname, 'r') as f: vnames = [k for k in f.keys() if f[k].ndim == 2] return vnames
[ "def load_varnames_from_hdf5(fname, h5path='/'):\n def walk(group, node_type=h5py.Dataset):\n for node in list(group.values()):\n if isinstance(node, node_type):\n yield node\n\n h5file = h5py.File(fname, mode='r')\n varlist = []\n try:\n h5group = h5file.require_group(h5path)\n\n for node in walk(h5group):\n varlist.append(node.name)\n\n except:\n log.error('ERROR reading .hdf5: {0}'.fpath)\n log.error(sys.exc_info())\n\n h5file.close()\n\n return varlist", "def variable_names(self):\n return self.defining_polynomial().parent().variable_names()", "def variable_names(self):\n \n return [x['variable'] for x in self.variable_dicts()]", "def get_variable_names(self):\n \n return [x['variable'] for x in self.variable_list]", "def getOthVarNames( self ):\n\n if self.othVarNames:\n return self.othVarNames.keys()\n\n n = self.adb.get( \"nOthVars\" )\n for indx in range( n ):\n name = self.adb.get( \"othVarName\",\n indx ) \n self.othVarNames[ name ] = indx\n\n return self.othVarNames.keys()", "def getOhcVarNames( self ):\n\n if self.ohcVarNames:\n return self.ohcVarNames.keys()\n \n n = self.adb.get( \"nOhcVars\" )\n for indx in xrange( n ):\n name = self.adb.get( \"ohcVarName\",\n indx ) \n self.ohcVarNames[name] = indx\n\n return self.ohcVarNames.keys()", "def get_node_variable_names(self):\n return [b\"\".join(_i).strip().decode()\n for _i in self._f.variables[\"name_nod_var\"][:]]", "def _get_var_h5(self, var, selection=[]):\n \n self.openh5()\n \n values = {}\n var_replaced = var.replace('.', '_dot_')\n if selection == []:\n selection = [n._v_name for n in self.h5.listNodes('/')]\n \n for node in self.h5.iterNodes('/'):\n try:\n # move on if this node is NOT in the selection\n selection.index(node._v_name)\n try:\n # look up the variable in this node\n array = self.h5.getNode(node, name=var_replaced)\n values[node._v_name] = array.read()\n except(tbl.NoSuchNodeError):\n # either the node is Metadata, or this variable does not\n # exist in this node (perfectly possible and normal)\n pass \n #raise tbl.NoSuchNodeError(var + \" not found in node \" + node._v_pathname)\n except(ValueError):\n # it's a node that was not in the selection\n if self.verbose:\n print \" node not selected: \", node._v_name\n pass\n \n \n \n self.h5.close()\n return values", "def get_variables(self, names: Sequence[str]) -> List[types.NestedArray]:", "def variables(model):\r\n return model.keys()", "def variables(model):\n return model.keys()", "def getOeiVarNames( self ):\n\n if self.oeiVarNames:\n return self.oeiVarNames.keys()\n\n n = self.adb.get( \"nOeiVars\" )\n for indx in xrange( n ):\n name = self.adb.get( \"oeiVarName\",\n indx ) \n self.oeiVarNames[name] = indx\n\n return self.oeiVarNames.keys()", "def variables(model: Model) -> AbstractSet[str]:\r\n assert is_model(model)\r\n return model.keys()", "def get_variable_names(self):\n return list(self.symtable.keys())", "def getVariableList(dataset):\n variables = [v for v in dataset.variables.keys() if v not in dataset.dimensions.keys()]\n for d in dataset.dimensions.keys():\n try:\n variables.pop(variables.index(dataset.variables[d].getncattr(\"bounds\")))\n except:\n pass\n return variables", "def variables(model: Model) -> AbstractSet[str]:\n assert is_model(model)\n return model.keys()", "def variables(self):\n return [i.name for i in self.inputs + self.outputs]", "def get_dataset_keys(filename):\n dataset_keys = []\n def walk(name, obj):\n if type(obj) == h5py._hl.dataset.Dataset:\n dataset_keys.append(name)\n\n with h5py.File(filename, 'r') as file:\n file.visititems(walk)\n\n return dataset_keys", "def get_element_variable_names(self):\n return [b\"\".join(_i).strip().decode()\n for _i in self._f.variables[\"name_elem_var\"][:]]" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Test that initializing a Matern1/2 kernel with 0 lengthscale raises an exception
def test_matern_zero_lengthscale(matern): with pytest.raises(ValueError) as exp: matern(lengthscale=0.0, variance=1.0, output_dim=1) assert exp.value.args[0].find("lengthscale must be positive.") >= 0
[ "def testZeroInput(self):\n nb.rescale_length(2.0)\n nb.rescale_length(0)\n self.assertEqual(2.0, nb.rscale)", "def test_nonpositive_nu_raises_exception(nu):\n with pytest.raises(ValueError):\n kernels.Matern(input_dim=1, nu=nu)", "def test_ard_init_scalar(D):\n kernel_1 = gpflow.kernels.SquaredExponential(lengthscale=2.3)\n kernel_2 = gpflow.kernels.SquaredExponential(lengthscale=np.ones(D) * 2.3)\n lengthscale_1 = kernel_1.lengthscale.read_value()\n lengthscale_2 = kernel_2.lengthscale.read_value()\n assert np.allclose(lengthscale_1, lengthscale_2, atol=1e-10)", "def init_kernel(cls, m):\n pass", "def test_kernel_matrices(self):\n matrices, kernels = kernel_matrices(3, 5, kernel_function=RBF, length_scale=0.6)\n\n self.assertEqual(len(matrices), len(kernels))\n self.assertEqual(len(matrices), 10)\n for i in range(len(matrices)):\n self.assertEqual(matrices[i].shape, (5, 3))\n self.assertEqual(kernels[i].length_scale, 0.6)\n\n with self.assertRaises(RuntimeError) as context:\n kernel_matrices(6, 5, kernel_function=RBF, length_scale=0.6)\n self.assertEqual(context.exception.args[0], \"order must be larger than 1 and less than dim which is 5 but 6 \"\n \"was given.\")", "def testKernelShape(self):\n\n snt.Conv3D(output_channels=10, kernel_shape=[3, 4, 5], name=\"conv1\")\n snt.Conv3D(output_channels=10, kernel_shape=3, name=\"conv1\")\n\n with self.assertRaisesRegexp(snt.Error, \"Invalid kernel shape.*\"):\n snt.Conv3D(output_channels=10, kernel_shape=[3, 3], name=\"conv1\")\n snt.Conv3D(output_channels=10, kernel_shape=[3, 3, 3, 3], name=\"conv1\")", "def testNoFeatureColumnsOrKernelMappers(self):\n with self.assertRaises(ValueError):\n _ = kernel_estimators.KernelLinearClassifier()", "def test_check_init_params(self):\n x = ds.array([[0, 0], [0, 1], [1, 0]], block_size=(3, 2))\n with self.assertRaises(ValueError):\n gm = GaussianMixture(init_params='')\n gm.fit(x)", "def test_fixedkernel(self):\r\n X = np.random.rand(30, 4)\r\n K = np.dot(X, X.T)\r\n kernel = GPy.kern.fixed(4, K)\r\n kern = GPy.kern.poly(5, degree=4)\r\n self.assertTrue(GPy.kern.kern_test(kern, verbose=verbose))", "def test_init_array_except(self):\n mat = self.rand_matrix(4, 4)\n self.assertRaises(QiskitError, Operator, mat, input_dims=[4, 2])\n self.assertRaises(QiskitError, Operator, mat, input_dims=[2, 4])\n self.assertRaises(QiskitError, Operator, mat, input_dims=5)", "def test_nu_large_recovers_rbf_kernel(x0: np.ndarray, x1: np.ndarray, input_dim: int):\n lengthscale = 1.25\n kernmat_rbf = kernels.ExpQuad(lengthscale=lengthscale, input_dim=input_dim)\n kernmat_matern = kernels.Matern(lengthscale=lengthscale, nu=15, input_dim=input_dim)\n np.testing.assert_allclose(\n kernmat_rbf(x0, x1),\n kernmat_matern(x0, x1),\n err_msg=\"RBF and Matern kernel are not equivalent for nu=infty.\",\n rtol=0.05,\n atol=0.01,\n )", "def testInvalidKernelMapper(self):\n\n class DummyKernelMapper(object):\n\n def __init__(self):\n pass\n\n feature = layers.real_valued_column('feature')\n kernel_mappers = {feature: [DummyKernelMapper()]}\n with self.assertRaises(ValueError):\n _ = kernel_estimators.KernelLinearClassifier(\n feature_columns=[feature], kernel_mappers=kernel_mappers)", "def testNegativeInput(self):\n nb.rescale_length(2.0)\n nb.rescale_length(-1.0)\n self.assertEqual(2.0, nb.rscale)", "def __init__(self, kernel, centers, n_label, mem_gb,\n n_subsample=None, q=None, bs=None,\n metric='accuracy', scale=.5, seed=1):\n \n n, d = centers.shape\n if n_subsample is None:\n if n < 100000:\n n_subsample = min(2000, n)\n else:\n n_subsample = 12000\n\n mem_bytes = (mem_gb - 0.6) * 1024**3 # preserve 600MB\n # Has a factor 3 due to tensorflow implementation.\n bsizes = np.arange(n_subsample)\n mem_usages = ((d + 2 * n_label + 3 * bsizes) * n + n_subsample * 1000) * 4\n mG = np.sum(mem_usages < mem_bytes) # device-dependent batch size\n\n # Calculate batch/step size for improved EigenPro iteration.\n np.random.seed(seed)\n pinx = np.random.choice(n, n_subsample, replace=False).astype('int32')\n kf, gap, s1, beta = pre_eigenpro_f(\n centers[pinx], kernel, q, n, mG, alpha=.95, seed=seed)\n new_s1 = s1 / gap\n\n if bs is None:\n bs = min(np.int32(beta / new_s1 + 1), mG)\n\n if bs < beta / new_s1 + 1:\n \teta = bs / beta\n elif bs < n:\n \teta = 2 * bs / (beta + (bs - 1) * new_s1)\n else:\n \teta = 0.95 * 2 / new_s1\n eta = scale * eta\n\n print(\"n_subsample=%d, mG=%d, eta=%.2f, bs=%d, s1=%.2e, beta=%.2f\" %\n (n_subsample, mG, eta, bs, s1, beta))\n eta = np.float32(eta * n_label) \n print('d',d)\n # Assemble kernel model.\n ix = Input(shape=(d+1,), dtype='float32', name='indexed-feat')\n x, index = utils.separate_index(ix) # features, sample_id\n kfeat = KernelEmbedding(kernel, centers,\n input_shape=(d,))(x)\n\n y = Dense(n_label, input_shape=(n,),\n activation='linear',\n kernel_initializer='zeros',\n use_bias=False)(kfeat)\n model = Model(ix, y)\n print(model.summary())\n model.compile(\n loss='mse',\n optimizer=PSGD(pred_t=y, index_t=index, eta=eta,\n eigenpro_f=asm_eigenpro_f(kf, kfeat, pinx)),\n metrics=[metric])\n print(model,'model')\n self.n_label = n_label\n self.seed = seed\n self.bs = bs\n self.model = model", "def test_scaler_2d_arrays(self):\n rng = np.random.RandomState(0)\n X = rng.randn(4, 5)\n X[:, 0] = 0.0 # first feature is always of zero\n\n scaler = StandardScaler()\n X_scaled = scaler.fit(X).transform(X, copy=True)\n self.assertFalse(np.any(np.isnan(X_scaled)))\n\n assert_array_almost_equal(X_scaled.mean(axis=0), 5 * [0.0])\n assert_array_almost_equal(X_scaled.std(axis=0), [0., 1., 1., 1., 1.])\n # Check that X has been copied\n self.assertTrue(X_scaled is not X)\n\n # check inverse transform\n X_scaled_back = scaler.inverse_transform(X_scaled)\n self.assertTrue(X_scaled_back is not X)\n self.assertTrue(X_scaled_back is not X_scaled)\n assert_array_almost_equal(X_scaled_back, X)\n\n X_scaled = scale(X, axis=1, with_std=False)\n self.assertFalse(np.any(np.isnan(X_scaled)))\n assert_array_almost_equal(X_scaled.mean(axis=1), 4 * [0.0])\n X_scaled = scale(X, axis=1, with_std=True)\n self.assertFalse(np.any(np.isnan(X_scaled)))\n assert_array_almost_equal(X_scaled.mean(axis=1), 4 * [0.0])\n assert_array_almost_equal(X_scaled.std(axis=1), 4 * [1.0])\n # Check that the data hasn't been modified\n self.assertTrue(X_scaled is not X)\n\n X_scaled = scaler.fit(X).transform(X, copy=False)\n self.assertFalse(np.any(np.isnan(X_scaled)))\n assert_array_almost_equal(X_scaled.mean(axis=0), 5 * [0.0])\n assert_array_almost_equal(X_scaled.std(axis=0), [0., 1., 1., 1., 1.])\n # Check that X has not been copied\n self.assertTrue(X_scaled is X)\n\n X = rng.randn(4, 5)\n X[:, 0] = 1.0 # first feature is a constant, non zero feature\n scaler = StandardScaler()\n X_scaled = scaler.fit(X).transform(X, copy=True)\n self.assertFalse(np.any(np.isnan(X_scaled)))\n assert_array_almost_equal(X_scaled.mean(axis=0), 5 * [0.0])\n assert_array_almost_equal(X_scaled.std(axis=0), [0., 1., 1., 1., 1.])\n # Check that X has not been copied\n self.assertTrue(X_scaled is not X)\n\n # Same thing for sparse matrices...\n X = scipy.sparse.coo_matrix((np.random.random((12,)),\n ([i for i in range(12)],\n [int(i / 3) for i in range(12)])))\n X = X.tocsr()\n scaler = StandardScaler()\n X_scaled = scaler.fit(X).transform(X, copy=False)\n\n self.assertFalse(np.any(np.isnan(X_scaled.data)))\n assert_array_almost_equal(\n [X_scaled.data[X_scaled.indptr[i]:X_scaled.indptr[i + 1]].mean()\n for i in range(X_scaled.shape[1])],\n np.zeros((4, ), dtype=np.float64))\n assert_array_almost_equal(np.sqrt([\n X_scaled.data[X_scaled.indptr[i]:X_scaled.indptr[i + 1]].var()\n for i in range(X_scaled.shape[1])]),\n np.ones((4, ), dtype=np.float64))\n\n # Because we change the sparse format to csc, we cannot assert that\n # the matrix did not change!\n # self.assertTrue(X_scaled is X)\n # Check that the matrix is still sparse\n self.assertEqual(len(X.indices), 12)", "def test_conv2d_out_of_range_scale():\n np.random.seed(0)\n\n input_sc = 1024\n kernel_sc = 1024\n output_sc = 1\n\n model, _ = _get_model(\n (1, 4, 4, 4),\n 1,\n 1,\n 0,\n input_sc,\n 0,\n kernel_sc,\n 0,\n output_sc,\n \"none\",\n (1, 1),\n (1, 1),\n 1,\n \"uint8\",\n 8,\n \"HWIO\",\n )\n model = tei.make_ethosn_composite(model, \"ethos-n.qnn_conv2d\")\n mod = tei.make_ethosn_partition(model)\n\n expected_err_msg = (\n \"Overall scale (of the input * weights / output) should be in the range (2^-32, 65536)\"\n )\n tei.test_error(mod, {}, expected_err_msg)", "def test_multidim_init_fails_initval(self):\n initval = np.ones(2)\n with self.assertRaises(TypeError):\n linode.LinearODE(t0=0., tmax=1., params=1.0,\n initval=initval, initval_unc=2.1)", "def __init__(self, x, filter_shape, bias=True, stride=1, pad_size=0, pad_mode='CONSTANT',\n is_training=True, niter=1, stop_grad_sigma=False,\n name='sn_conv2d', filler=('msra', 0., 1.), update_collection=None):\n super(SpecNormConv2d, self).__init__(name, update_collection)\n # inputs\n self.inputs.append(x)\n in_shape = x.shape.as_list()\n if len(filter_shape) == 3:\n # get chn_in from input tensor\n kin = in_shape[-1]\n kout = filter_shape[-1]\n filter_shape[-1] = kin\n filter_shape.append(kout)\n kh, kw, kin, kout = filter_shape\n with tf.variable_scope(name) as scope:\n # padding\n padding = 'VALID'\n if pad_size == -1:\n # 'SAME' padding\n if pad_mode == 'CONSTANT':\n padding = 'SAME'\n else:\n w_in = in_shape[-2]\n if w_in % stride == 0:\n pad_size_both = max(kw - stride, 0)\n else:\n pad_size_both = max(kw - (w_in % stride), 0)\n if pad_size_both > 0:\n pad_size = pad_size_both / 2\n x = tf.pad(x, [[0,0], [pad_size, pad_size_both-pad_size],\n [pad_size, pad_size_both-pad_size], [0,0]], pad_mode)\n elif pad_size > 0:\n # pad_size padding on both sides of each dimension\n x = tf.pad(x, [[0,0], [pad_size, pad_size], [pad_size, pad_size], [0,0]], pad_mode)\n # initializer for convolutional kernel\n initializer = None\n if filler[0] == 'uniform':\n initializer = tf.random_uniform_initializer(filler[1], filler[2])\n elif filler[0] == 'msra':\n fan_in = kh * kw * kin\n stdev = np.sqrt(2. / ((filler[1]**2 + filler[2]**2) * fan_in))\n initializer = tf.truncated_normal_initializer(0., stdev)\n elif filler[0] == 'gaussian':\n initializer = tf.truncated_normal_initializer(filler[1], filler[2])\n else:\n raise ValueError('Invalid filler type: %s' % (filler[0]))\n # params\n weight = tf.get_variable('weight', shape=filter_shape, dtype=TF_DTYPE, initializer=initializer)\n self.params.append(weight)\n # update_params\n u = tf.get_variable('u', [1, kout], dtype=TF_DTYPE,\n initializer=tf.truncated_normal_initializer(), trainable=False)\n sigma = tf.get_variable('sigma', [], dtype=TF_DTYPE,\n initializer=tf.constant_initializer(1.), trainable=False)\n self.update_params.extend([u, sigma])\n # normalize weight\n if is_training:\n weight_normalized, u_new, sigma_new = spec_norm_weight(weight, u, niter, stop_grad_sigma)\n else:\n weight_normalized = weight / sigma\n u_new, sigma_new = None, None\n # udpate_ops\n def get_update_ops(update_collection=update_collection):\n if self._update_ops is None:\n self._update_ops = list()\n with tf.name_scope(scope.original_name_scope):\n with tf.name_scope(update_collection, default_name='default'):\n self._update_ops.extend([u.assign(u_new), sigma.assign(sigma_new)])\n return self._update_ops\n if is_training:\n self.update_ops_getter = get_update_ops\n # conv2d\n y = tf.nn.conv2d(x, weight_normalized, [1, stride, stride, 1], padding=padding)\n # add channel-wise bias\n if bias:\n b = tf.get_variable('bias', shape=kout, dtype=TF_DTYPE, initializer=tf.constant_initializer(0.))\n self.params.append(b)\n y = tf.nn.bias_add(y, b)\n # outputs\n self.outputs.append(y)\n self.print_info(LAYERS_VERBOSE)", "def __init__(self, kernel_size):\r\n super().__init__()\r\n self.kernel_size = kernel_size" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Test that initializing a Matern1/2 kernel with 0 variance raises an exception
def test_matern12_zero_variance(matern): with pytest.raises(ValueError) as exp: matern(lengthscale=1.0, variance=0.0, output_dim=1) assert exp.value.args[0].find("variance must be positive.") >= 0
[ "def test_nonpositive_nu_raises_exception(nu):\n with pytest.raises(ValueError):\n kernels.Matern(input_dim=1, nu=nu)", "def test_sample_zero_cov(self):\n for mean, cov in self.normal_params:\n with self.subTest():\n dist = prob.Normal(mean=mean, cov=0 * cov, random_state=1)\n dist_sample = dist.sample(size=1)\n assert_str = \"Draw with kernels zero does not match mean.\"\n if isinstance(dist.mean(), linops.LinearOperator):\n self.assertAllClose(\n dist_sample, dist.mean().todense(), msg=assert_str\n )\n else:\n self.assertAllClose(dist_sample, dist.mean(), msg=assert_str)", "def test_check_reg_covar(self):\n x = ds.array([[0, 0], [0, 1], [1, 0]], block_size=(3, 2))\n with self.assertRaises(ValueError):\n gm = GaussianMixture(reg_covar=-0.1)\n gm.fit(x)", "def test_check_initial_parameters(self):\n x = ds.array([[0, 0], [0, 1], [1, 0]], block_size=(3, 2))\n with self.assertRaises(ValueError):\n gm = GaussianMixture(weights_init=[1, 2])\n gm.fit(x)\n with self.assertRaises(ValueError):\n gm = GaussianMixture(means_init=[1, 2])\n gm.fit(x)\n with self.assertRaises(ValueError):\n gm = GaussianMixture(precisions_init=[1, 2],\n covariance_type='full')\n gm.fit(x)\n with self.assertRaises(ValueError):\n gm = GaussianMixture(precisions_init=[1, 2],\n covariance_type='tied')\n gm.fit(x)\n with self.assertRaises(ValueError):\n gm = GaussianMixture(precisions_init=[1, 2],\n covariance_type='diag')\n gm.fit(x)\n with self.assertRaises(ValueError):\n gm = GaussianMixture(precisions_init=[1, 2],\n covariance_type='spherical')\n gm.fit(x)\n with self.assertRaises(ValueError):\n gm = GaussianMixture(means_init=[[1, 2, 3]],\n precisions_init=[[1, 2], [3, 4]],\n covariance_type='tied')\n gm.fit(x)", "def test_check_init_params(self):\n x = ds.array([[0, 0], [0, 1], [1, 0]], block_size=(3, 2))\n with self.assertRaises(ValueError):\n gm = GaussianMixture(init_params='')\n gm.fit(x)", "def testBiasInitializerIsZeroByDefault(self):\n\n conv1 = snt.Conv3D(\n output_channels=5,\n kernel_shape=3,\n stride=1)\n\n conv1(tf.placeholder(tf.float32, [5, 10, 10, 10, 7]))\n\n with self.test_session():\n tf.variables_initializer([conv1.w, conv1.b]).run()\n\n self.assertAllClose(\n conv1.b.eval(),\n np.zeros([5], dtype=np.float32))", "def test_normal_dimension_mismatch(self):\n for mean, cov in [\n (0, [1, 2]),\n (np.array([1, 2]), np.array([1, 0])),\n (np.array([[-1, 0], [2, 1]]), np.eye(3)),\n ]:\n with self.subTest():\n err_msg = \"Mean and kernels mismatch in normal distribution did not raise a ValueError.\"\n with self.assertRaises(ValueError, msg=err_msg):\n assert prob.Normal(mean=mean, cov=cov)", "def testNoFeatureColumnsOrKernelMappers(self):\n with self.assertRaises(ValueError):\n _ = kernel_estimators.KernelLinearClassifier()", "def testKernelShape(self):\n\n snt.Conv3D(output_channels=10, kernel_shape=[3, 4, 5], name=\"conv1\")\n snt.Conv3D(output_channels=10, kernel_shape=3, name=\"conv1\")\n\n with self.assertRaisesRegexp(snt.Error, \"Invalid kernel shape.*\"):\n snt.Conv3D(output_channels=10, kernel_shape=[3, 3], name=\"conv1\")\n snt.Conv3D(output_channels=10, kernel_shape=[3, 3, 3, 3], name=\"conv1\")", "def test_init_array_except(self):\n mat = self.rand_matrix(4, 4)\n self.assertRaises(QiskitError, Operator, mat, input_dims=[4, 2])\n self.assertRaises(QiskitError, Operator, mat, input_dims=[2, 4])\n self.assertRaises(QiskitError, Operator, mat, input_dims=5)", "def test_kernel_matrices(self):\n matrices, kernels = kernel_matrices(3, 5, kernel_function=RBF, length_scale=0.6)\n\n self.assertEqual(len(matrices), len(kernels))\n self.assertEqual(len(matrices), 10)\n for i in range(len(matrices)):\n self.assertEqual(matrices[i].shape, (5, 3))\n self.assertEqual(kernels[i].length_scale, 0.6)\n\n with self.assertRaises(RuntimeError) as context:\n kernel_matrices(6, 5, kernel_function=RBF, length_scale=0.6)\n self.assertEqual(context.exception.args[0], \"order must be larger than 1 and less than dim which is 5 but 6 \"\n \"was given.\")", "def test_kernel_matrix(kernel, sample):\n sample = [ele for ele in sample] # consumed several times\n\n potato = KernelMethod(kernel)\n mat = potato.matrix(sample)\n assert np.all(np.linalg.eigvals(mat) > 0) or np.isclose(\n [np.min(np.linalg.eigvals(mat))], [0]\n )", "def test_check_covariance_type(self):\n x = ds.array([[0, 0], [0, 1], [1, 0]], block_size=(3, 2))\n with self.assertRaises(ValueError):\n gm = GaussianMixture(covariance_type='')\n gm.fit(x)", "def test_param_cov_with_uncertainties(self, fitter):\n fitter = fitter()\n\n a = 2\n b = 100\n\n with NumpyRNGContext(_RANDOM_SEED):\n x = np.linspace(0, 1, 100)\n # y scatter is amplitude ~1 to make sure covariance is\n # non-negligible\n y = x * a + b + np.random.normal(size=len(x))\n sigma = np.random.normal(loc=1, scale=0.1, size=len(x))\n\n # compute the ordinary least squares covariance matrix\n # accounting for measurement uncertainties `sigma`\n X = np.vstack([x, np.ones(len(x))]).T\n inv_N = np.linalg.inv(np.diag(sigma) ** 2)\n cov = np.linalg.inv(X.T @ inv_N @ X)\n beta = cov @ X.T @ inv_N @ y.T\n\n # now do the non-linear least squares fit\n mod = models.Linear1D(a, b)\n\n with pytest.warns(AstropyUserWarning, match=r\"Model is linear in parameters\"):\n fmod = fitter(mod, x, y, weights=sigma**-1)\n\n assert_allclose(fmod.parameters, beta.ravel())\n assert_allclose(cov, fitter.fit_info[\"param_cov\"])", "def init_kernel(cls, m):\n pass", "def white(input_dim,variance=1.):\r\n part = parts.white.White(input_dim,variance)\r\n return kern(input_dim, [part])", "def test_calculate_variance_covariance_zero_division_shape(self):\n\n _var_covar = calculate_variance_covariance(22, 620.0, 0.4239, 0.0)\n self.assertAlmostEqual(_var_covar[0][0], 0.006105992)\n self.assertAlmostEqual(_var_covar[0][1], 0.03925982)\n self.assertAlmostEqual(_var_covar[1][0], 0.03925982)\n self.assertAlmostEqual(_var_covar[1][1], -0.7475704)", "def test_uninitialized():\n random_state = check_random_state(0)\n mvn = MVN(random_state=random_state)\n assert_raises(ValueError, mvn.sample, 10)\n assert_raises(ValueError, mvn.to_probability_density, np.ones((1, 1)))\n assert_raises(ValueError, mvn.marginalize, np.zeros(0))\n assert_raises(ValueError, mvn.condition, np.zeros(0), np.zeros(0))\n assert_raises(ValueError, mvn.predict, np.zeros(0), np.zeros(0))\n assert_raises(ValueError, mvn.to_ellipse)\n mvn = MVN(mean=np.ones(2), random_state=random_state)\n assert_raises(ValueError, mvn.sample, 10)\n assert_raises(ValueError, mvn.to_probability_density, np.ones((1, 1)))\n assert_raises(ValueError, mvn.marginalize, np.zeros(0))\n assert_raises(ValueError, mvn.condition, np.zeros(0), np.zeros(0))\n assert_raises(ValueError, mvn.predict, np.zeros(0), np.zeros(0))\n assert_raises(ValueError, mvn.to_ellipse)", "def testBiasInitializerIsZeroByDefault(self):\n\n conv1 = snt.Conv3DTranspose(\n output_channels=7,\n kernel_shape=3,\n stride=1)\n\n conv1(tf.placeholder(tf.float32, [7, 10, 10, 10, 5]))\n\n with self.test_session():\n tf.variables_initializer([conv1.w, conv1.b]).run()\n\n self.assertAllClose(\n conv1.b.eval(),\n np.zeros([7], dtype=np.float32))" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Test that the assertion fires for a negative delta time
def test_to_delta_time_positive_difference(with_tf_random_seed, np_time_points): time_points = tf.constant(np_time_points, dtype=default_float()) with pytest.raises(InvalidArgumentError) as exp: to_delta_time(time_points) assert exp.value.message.find("Condition x >= y") >= 0
[ "def testNegative(self):\n self.assertRaises(hf.NegativeDeltaError, hf.humanize, \n datetime(2011, 12, 31, 23, 59, 0),\n datetime(2012, 1, 1, 0, 0, 0))\n self.assertRaises(hf.NegativeDeltaError, hf.humanize,\n datetime(2012, 1, 1, 11, 59, 0),\n datetime(2012, 1, 1, 12, 0, 0))", "def test_timeout_elapsed_no_exception(self):\n deadline = Deadline(-MS)\n timeout = deadline.timeout(raise_if_elapsed=False)\n self.assertGreater(timeout, -2 * MS)\n self.assertLess(timeout, -MS)", "def test_negative_timedelta(self):\n @converters.wrap\n def inner_test(param: datetime.timedelta):\n \"\"\"This shouldn't be called, converting should fail.\"\"\"\n pass\n self.assert_raises_request_error(\n lambda: inner_test(param='-60'), 3117\n )", "def test_certain_uncertain():\n\n delta = test_loop()\n u_delta = test_uncertain_loop()\n print\n print delta\n print u_delta\n assert np.all(delta[:, :2] != u_delta[:, :2])\n assert np.all(delta[:, 2] == u_delta[:, 2])\n uncertain_time_delay = 3\n u_delta = test_uncertain_loop(uncertain_time_delay)\n print u_delta\n assert np.all(delta[:, :2] == u_delta[:, :2])", "def test_validate_delta():\n with pytest.raises(ValueError):\n validate_delta(1.1)\n\n with pytest.raises(ValueError):\n validate_delta(-0.1)\n\n assert validate_delta(0.1) == 0.1", "def test_sub_timedate(self):\n t1 = Timestamp.utcnow()\n t2 = t1 - timedelta(days=1)\n td = t1 - t2\n self.assertIsInstance(td, timedelta)\n self.assertEqual(t2 + td, t1)", "def test_past(self):\n\n # Using seconds()\n start = time.time()\n pause.seconds(-5)\n end = time.time()\n self.assertEqual(int(end - start), 0)\n\n # Using until()\n start = time.time()\n pause.until(time.time() - 10)\n end = time.time()\n self.assertEqual(int(end - start), 0)", "def test_subtract_all_args_less_zero(self):\n try:\n self.assertEqual(subtract(-18, -5), -13)\n except Exception as error:\n print(error)", "def test_absolute_time(self):\n dt = datetime.datetime.now() + datetime.timedelta(0,1)\n job = self._scheduler.add_date_job(self._callback, dt)\n self.assert_event_triggered(dt)", "def test_time(self):\n self.assertEqual(type(float(0)), type(self.backup.time), \"backup.time should be a float number\")\n # The following test fails if test_make_backup() is run:\n # self.assertAlmostEqual(time.time(), self.backup.time, places=0 )", "def test_subtract_less_zero(self):\n result = calculation.subtract(-5,-6)\n self.assertEqual(result,1)", "def assertTimeGap(self, time1, time2, delta):\n if time1 < time2:\n self.assertLess(time2 - time1, timedelta(seconds=delta))\n else:\n self.assertLess(time1 - time2, timedelta(seconds=delta))", "def test_time_gap(self):\n return self._test_time_gap", "def test_sub_timedelta(self):\n t1 = Timestamp.utcnow()\n t2 = t1 - timedelta(days=1)\n if t1.month != t2.month:\n self.assertEqual(calendar.monthrange(t2.year, t2.month)[1], t2.day)\n else:\n self.assertEqual(t1.day - 1, t2.day)\n self.assertIsInstance(t2, Timestamp)", "def test_no_deltas(self):\r\n cmd = self.run(self.job(deltas=None), [\r\n self.filename('1d'),\r\n self.filename('5d'),\r\n ])\r\n assert cmd.backend.match([])", "def test_subtract_more_zero(self):\n result = calculation.subtract(10,5)\n self.assertEqual(result,5)", "def test_subtract_all_args_greater_zero(self):\n try:\n self.assertEqual(subtract(30, 16), 15)\n except Exception as error:\n print(f'Got error in {inspect.stack()[0][3]}, {error}')", "def test_bad_interval(self):\n # Intentionally set a small interval (3 min) to fail.\n interval = np.timedelta64(3, 'm')\n self.assertFalse(utils.check_timestamps(self.times, interval))", "def test_time(self):\n now = time.time()\n pause.seconds(5)\n end = time.time()\n\n # True if 5 seconds have passed\n diff = int(end - now)\n self.assertEqual(diff, 5)" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Compute the derivative of the logpdf with respect to the parameters.
def log_pdf_derivative(x): return gs.autodiff.jacobian(log_pdf_at_x(x))(base_point)
[ "def log_derivative(x):\n der = derivative(log,x,dx=1e-9)\n return der", "def logpdf(self, x):\n reg = self(x)\n nelem = tf.cast(tf.size(x), x.dtype)\n logz = nelem * (-math.log(self.stddev) - 0.5*math.log(2.0*math.pi))\n ll = -reg + self.weight*logz # weight already in reg\n\n return ll", "def gradient_logpdf(self, x):\n grads = self._gradient_unnormalized_loglikelihood(x) + \\\n self.prior.gradient_logpdf(x)\n\n # nan grads are result from -inf logpdf\n # return np.where(np.isnan(grads), 0, grads)[0]\n return grads", "def logpdf(x, dfn, dfd):\n\n with mp.extradps(5):\n x = mp.mpf(x)\n if x <= 0:\n return -mp.inf\n dfn = mp.mpf(dfn)\n dfd = mp.mpf(dfd)\n r = dfn / dfd\n hdfn = dfn / 2\n hdfd = dfd / 2\n lp = (hdfn * (mp.log(dfn) - mp.log(dfd))\n + xlogy(hdfn - 1, x)\n - xlog1py(hdfn + hdfd, r*x)\n - logbeta(hdfn, hdfd))\n return lp", "def logpdf(self, x):\n if self.transform is not None:\n x = self.transform(x) \n return (-self.alpha-1)*np.log(x) - (self.beta/float(x))", "def get_log_pdf(self, x):\n if self.dist==None:\n return self.logpdf\n else:\n return torch.log(self.get_pdf(x))", "def grad_log(self, X):\n # \"\"\"\n # Evaluate the gradients (with respect to the input) of the log density at\n # each of the n points in X. This is the score function.\n\n # X: n x d numpy array.\n XB = np.dot(X, self.B)\n Y = 0.5*XB + self.c\n E2y = np.exp(2*Y)\n # n x dh\n Phi = old_div((E2y-1.0),(E2y+1))\n # n x dx\n T = np.dot(Phi, 0.5*self.B.T)\n S = self.b - X + T\n return S", "def logpdf(self, x, *args, **kwargs):\n return self.scipy_distribution.logpdf(x, *args, **kwargs, **self.scipy_distribution_arguments)", "def logPdf(self,x):\n logPdf = np.log(self.pdf(x))\n return logPdf", "def logpdf(self, X) -> np.ndarray:\n return self.dist.logpdf(self.inv_trans(X))", "def logit_deriv(y):\n# if y.any() < 0.0 or y.any() > 1.0:\n# raise Exception\n\n return y*(1-y)", "def logp_grad(self, xs, ys, fs, **kwargs):", "def logtrapzexp(lnf, dx):\n\n lnfdx1 = lnf[:-1]\n lnfdx2 = lnf[1:]\n if isinstance(dx, (int, float)):\n C = np.log(dx / 2.0)\n elif isinstance(dx, (list, np.ndarray)):\n if len(dx) != len(lnf) - 1:\n raise ValueError(\n \"Step size array must have length one less than the function length\"\n )\n\n lndx = np.log(dx)\n lnfdx1 = lnfdx1.copy() + lndx\n lnfdx2 = lnfdx2.copy() + lndx\n C = -np.log(2.0)\n else:\n raise TypeError(\"Step size must be a single value or array-like\")\n\n return C + logsumexp([logsumexp(lnfdx1), logsumexp(lnfdx2)])", "def logpdf(self, x):\n return self._unnormalized_loglikelihood(x) + self.prior.logpdf(x)", "def loglikelihood_dgamma(parameters, X):\n a = parameters[0]\n loc = parameters[1]\n scale = parameters[2]\n likelihood = dgamma.pdf(X, a, loc, scale)\n loglik = -sum(np.log(likelihood))\n return loglik", "def exp_pdf(x, l):\n return l * np.exp(-l * (x - 1))", "def dlogit(beta, Y, X):\n \n dlogit = sum(inverselogit(beta, Y, X)*(1 - inverselogit(beta, Y, X)))\n \n return(dlogit)", "def logpdf(self, X) -> np.ndarray:\n raise NotImplementedError", "def logistic_logpdf(x, *, mean, logscale):\n z = (x - mean) * torch.exp(-logscale)\n return z - logscale - 2 * F.softplus(z)" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
r"""Compute the derivative of the innerproduct matrix. Compute the derivative of the innerproduct matrix of the Fisher information metric at the tangent space at base point.
def inner_product_derivative_matrix(self, base_point): def pdf(x): """Compute pdf at a fixed point on the support. Parameters ---------- x : float, shape (,) Point on the support of the distribution """ return lambda point: self.information_manifold.point_to_pdf(point)(x) def _function_to_integrate(x): pdf_x = pdf(x) pdf_x_at_base_point = pdf_x(base_point) pdf_x_derivative = gs.autodiff.jacobian(pdf_x) pdf_x_derivative_at_base_point = pdf_x_derivative(base_point) return ( 1 / (pdf_x_at_base_point**2) * ( 2 * pdf_x_at_base_point * gs.einsum( "...ij, ...k -> ...ijk", gs.autodiff.jacobian(pdf_x_derivative)(base_point), pdf_x_derivative_at_base_point, ) + gs.einsum( "...i, ...j, ...k -> ...ijk", pdf_x_derivative_at_base_point, pdf_x_derivative_at_base_point, pdf_x_derivative_at_base_point, ) ) ) return quad_vec(_function_to_integrate, *self.support)[0]
[ "def inner_product_derivative_matrix(self, base_point=None):\n return gs.autodiff.jacobian_vec(self.metric_matrix)(base_point)", "def inner_product_derivative_matrix(self, base_point):\n\n def pdf(x):\n \"\"\"Compute pdf at a fixed point on the support.\n\n Parameters\n ----------\n x : float, shape (,)\n Point on the support of the distribution\n \"\"\"\n return lambda point: gs.squeeze(self._space.point_to_pdf(point)(x), axis=-1)\n\n def _function_to_integrate(x):\n pdf_x = pdf(x)\n (\n pdf_x_at_base_point,\n pdf_x_derivative_at_base_point,\n pdf_x_hessian_at_base_point,\n ) = gs.autodiff.value_jacobian_and_hessian(pdf_x)(base_point)\n\n return gs.einsum(\n \"...,...ijk->...ijk\",\n 1 / (pdf_x_at_base_point**2),\n gs.einsum(\n \"...,...ijk->...ijk\",\n pdf_x_at_base_point,\n gs.einsum(\n \"...ki,...j->...ijk\",\n pdf_x_hessian_at_base_point,\n pdf_x_derivative_at_base_point,\n )\n + gs.einsum(\n \"...kj,...i->...ijk\",\n pdf_x_hessian_at_base_point,\n pdf_x_derivative_at_base_point,\n ),\n )\n - gs.einsum(\n \"...i, ...j, ...k -> ...ijk\",\n pdf_x_derivative_at_base_point,\n pdf_x_derivative_at_base_point,\n pdf_x_derivative_at_base_point,\n ),\n )\n\n return quad_vec(_function_to_integrate, *self.support)[0]", "def integrability_tensor_derivative(\n self,\n horizontal_vec_x,\n horizontal_vec_y,\n nabla_x_y,\n tangent_vec_e,\n nabla_x_e,\n base_point,\n ):\n raise NotImplementedError", "def derivatives(x_p, y_p):\r\n # set up the matrix equation\r\n n = x_p.shape[0]\r\n M = np.zeros( [n,n] )\r\n d = np.zeros( [n,1] )\r\n \r\n # fill in the constants where they can be\r\n for i in np.arange(1,n-1 ): # for all but the first and last row\r\n M[i,i-1 ] = ( x_p[i] - x_p[i-1] ) / 6.\r\n M[i,i] = ( x_p[i+1] - x_p[i-1] ) / 3.\r\n M[i,i+1] = ( x_p[i+1] - x_p[i] ) /6.\r\n d[i,0 ] = ( y_p[i+1] - y_p[i] ) / ( x_p[i+1] - x_p[i] ) - ( y_p[i] - y_p[i-1] ) / ( x_p[i] - x_p[i-1] )\r\n \r\n M[0,0],M[-1,-1] = 1.,1. # compactly sets the BCs\r\n \r\n LU = lu.LU_decomp(M) # solves the matrix equations\r\n return lu.FB_sub(LU.Low, LU.Upp, d) # find and return 2nd derivatives\r", "def _derX(self, x, y):\n m = len(x)\n temp = np.zeros((m, self.funcCount))\n for j in range(self.funcCount):\n temp[:, j] = self.functions[j](x, y)\n i = self.argcompare(temp, axis=1)\n dfdx = np.zeros_like(x)\n for j in range(self.funcCount):\n c = i == j\n dfdx[c] = self.functions[j].derivativeX(x[c], y[c])\n return dfdx", "def _2ndderiv_xyz(self,x,y,z,i,j):\n return -np.pi*self._rhoc_M*self.a**3*self._b*self._c *\\\n _2ndDerivInt(x,y,z,self._a2,self._b2*self._a2,self._c2*self._a2,self.n,i,j)", "def det2x2(a):\n return a[..., 0, 0] * a[..., 1, 1] - a[..., 1, 0] * a[..., 0, 1]", "def _derY(self, x, y):\n m = len(x)\n temp = np.zeros((m, self.funcCount))\n for j in range(self.funcCount):\n temp[:, j] = self.functions[j](x, y)\n i = self.argcompare(temp, axis=1)\n y = temp[np.arange(m), i]\n dfdy = np.zeros_like(x)\n for j in range(self.funcCount):\n c = i == j\n dfdy[c] = self.functions[j].derivativeY(x[c], y[c])\n return dfdy", "def _matrix_delaunay_dy(self, PHI):\n return self.gradMy.dot(PHI)", "def determinant(self):\n if not self.is_square():\n raise(ValueError, \"Cannot calculate determinant of non-square matrix.\")\n if self.h > 2:\n raise(NotImplementedError, \"Calculating determinant not implemented for matrices largerer than 2x2.\")\n\n # TODO - your code here\n if self.h == 1:\n return self.g[0][0] # a 1x1 matrix\n else:\n return ((self.g[0][0] * self.g[1][1]) - (self.g[0][1] * self.g[1][0])) # a 2x2 matrix\n # TODO - your code here", "def _compute_deriv(self):\n\n # Connect trend data points with spline to get derivative at each point\n tck = interpolate.splrep(self.xinterp, self.trend, s=0.0)\n self.deriv = interpolate.splev(self.xinterp, tck, der=1)\n\n # compute derivative of polynomial at each interpolated data point\n # we need to reverse order of polynomial coefficients for input into poly1d\n poly = numpy.poly1d(self.params[self.numpoly - 1 :: -1])\n pd = numpy.polyder(poly)\n self.deriv += pd(self.xinterp - self.timezero)", "def derivative_matrix(g):\n\n def _(g):\n B = g.B[0].grad\n N = g.N[0]\n P = g.dec.P(1)\n H = np.vstack(P(B(i)) for i in range(N)).T\n return H\n\n return _(g), _(g.dual)", "def derivative(f, t):\n dfdt = np.empty_like(f)\n\n for i in range(2):\n t_i = t[i]\n t1 = t[0]\n t2 = t[1]\n t3 = t[2]\n t4 = t[3]\n t5 = t[4]\n h1 = t1 - t_i\n h2 = t2 - t_i\n h3 = t3 - t_i\n h4 = t4 - t_i\n h5 = t5 - t_i\n h12 = t1 - t2\n h13 = t1 - t3\n h14 = t1 - t4\n h15 = t1 - t5\n h23 = t2 - t3\n h24 = t2 - t4\n h25 = t2 - t5\n h34 = t3 - t4\n h35 = t3 - t5\n h45 = t4 - t5\n dfdt[i] = (-((h2 * h3 * h4 + h2 * h3 * h5 + h2 * h4 * h5 + h3 * h4 * h5) / (h12 * h13 * h14 * h15)) * f[0]\n + ((h1 * h3 * h4 + h1 * h3 * h5 + h1 * h4 * h5 + h3 * h4 * h5) / (h12 * h23 * h24 * h25)) * f[1]\n - ((h1 * h2 * h4 + h1 * h2 * h5 + h1 * h4 * h5 + h2 * h4 * h5) / (h13 * h23 * h34 * h35)) * f[2]\n + ((h1 * h2 * h3 + h1 * h2 * h5 + h1 * h3 * h5 + h2 * h3 * h5) / (h14 * h24 * h34 * h45)) * f[3]\n - ((h1 * h2 * h3 + h1 * h2 * h4 + h1 * h3 * h4 + h2 * h3 * h4) / (h15 * h25 * h35 * h45)) * f[4])\n\n for i in range(2, len(t) - 2):\n t1 = t[i - 2]\n t2 = t[i - 1]\n t3 = t[i]\n t4 = t[i + 1]\n t5 = t[i + 2]\n h1 = t1 - t3\n h2 = t2 - t3\n h4 = t4 - t3\n h5 = t5 - t3\n h12 = t1 - t2\n h13 = t1 - t3\n h14 = t1 - t4\n h15 = t1 - t5\n h23 = t2 - t3\n h24 = t2 - t4\n h25 = t2 - t5\n h34 = t3 - t4\n h35 = t3 - t5\n h45 = t4 - t5\n dfdt[i] = (-((h2 * h4 * h5) / (h12 * h13 * h14 * h15)) * f[i - 2]\n + ((h1 * h4 * h5) / (h12 * h23 * h24 * h25)) * f[i - 1]\n - ((h1 * h2 * h4 + h1 * h2 * h5 + h1 * h4 * h5 + h2 * h4 * h5) / (h13 * h23 * h34 * h35)) * f[i]\n + ((h1 * h2 * h5) / (h14 * h24 * h34 * h45)) * f[i + 1]\n - ((h1 * h2 * h4) / (h15 * h25 * h35 * h45)) * f[i + 2])\n\n for i in range(len(t) - 2, len(t)):\n t_i = t[i]\n t1 = t[-5]\n t2 = t[-4]\n t3 = t[-3]\n t4 = t[-2]\n t5 = t[-1]\n h1 = t1 - t_i\n h2 = t2 - t_i\n h3 = t3 - t_i\n h4 = t4 - t_i\n h5 = t5 - t_i\n h12 = t1 - t2\n h13 = t1 - t3\n h14 = t1 - t4\n h15 = t1 - t5\n h23 = t2 - t3\n h24 = t2 - t4\n h25 = t2 - t5\n h34 = t3 - t4\n h35 = t3 - t5\n h45 = t4 - t5\n dfdt[i] = (-((h2 * h3 * h4 + h2 * h3 * h5 + h2 * h4 * h5 + h3 * h4 * h5) / (h12 * h13 * h14 * h15)) * f[-5]\n + ((h1 * h3 * h4 + h1 * h3 * h5 + h1 * h4 * h5 + h3 * h4 * h5) / (h12 * h23 * h24 * h25)) * f[-4]\n - ((h1 * h2 * h4 + h1 * h2 * h5 + h1 * h4 * h5 + h2 * h4 * h5) / (h13 * h23 * h34 * h35)) * f[-3]\n + ((h1 * h2 * h3 + h1 * h2 * h5 + h1 * h3 * h5 + h2 * h3 * h5) / (h14 * h24 * h34 * h45)) * f[-2]\n - ((h1 * h2 * h3 + h1 * h2 * h4 + h1 * h3 * h4 + h2 * h3 * h4) / (h15 * h25 * h35 * h45)) * f[-1])\n\n return dfdt", "def derivative_matrix(self):\n from sage.matrix.constructor import matrix\n [a,b,c,d,e,f] = self.coefficients()\n return matrix([[ 2*a , b , c ],\n [ b , 2*d , e ],\n [ c , e , 2*f ]])", "def compute_derivs_matrices(vecs, adv_vecs, dt):\n return (adv_vecs - vecs)/(1.*dt)", "def _evalAndDer(self, x):\n m = len(x)\n fx = np.zeros((m, self.funcCount))\n for j in range(self.funcCount):\n fx[:, j] = self.functions[j](x)\n i = self.argcompare(fx, axis=1)\n y = fx[np.arange(m), i]\n dydx = np.zeros_like(y)\n for j in range(self.funcCount):\n c = i == j\n dydx[c] = self.functions[j].derivative(x[c])\n return y, dydx", "def second_derivative(self, h):\n grid = np.pad(self.grid, (1, 1), 'constant')[:, :, 1:-1]\n\n grid[0, :] = grid[1, :]\n grid[-1, :] = grid[-2, :]\n grid[:, 0] = grid[:, 1]\n grid[:, -1] = grid[:, -2]\n\n\n above = grid[0:-2, 1:-1]\n below = grid[2:, 1:-1]\n left = grid[1:-1, 0:-2]\n right = grid[1:-1, 2:]\n center = grid[1:-1, 1:-1]\n dxx = self.D_xx * (left + right - 2 * center) / (h ** 2)\n dyy = self.D_yy * (above + below - 2 * center) / (h ** 2)\n return dxx + dyy", "def derv(self, t, y):\n x = y[0];\n xc = y[1];\n n = y[2];\n\n Bhat = self.G * (1.0 - n) * self.alpha0(t) * (1 - 0.4 * x) * (1 - 0.4 * xc);\n\n dydt = np.zeros(3)\n\n dydt[0] = sp.pi / 12.0 * (xc + Bhat);\n dydt[1] = sp.pi / 12.0 * (self.mu * (xc - 4.0 / 3.0 * pow(xc, 3.0)) - x * (\n pow(24.0 / (0.99669 * self.taux), 2.0) + self.kparam * Bhat));\n dydt[2] = 60.0 * (self.alpha0(t) * (1.0 - n) - self.delta * n);\n\n return (dydt)", "def deriv(y, t, L1, L2, m1, m2):\n theta1, z1, theta2, z2 = y\n c, s = np.cos(theta1 - theta2), np.sin(theta1 - theta2)\n theta1dot = z1\n z1dot = (m2 * g * np.sin(theta2) * c - m2 * s * (L1 * z1 ** 2 * c + L2 * z2 ** 2) -\n (m1 + m2) * g * np.sin(theta1)) / L1 / (m1 + m2 * s ** 2)\n theta2dot = z2\n z2dot = ((m1 + m2) * (L1 * z1 ** 2 * s - g * np.sin(theta2) + g * np.sin(theta1) * c) +\n m2 * L2 * z2 ** 2 * s * c) / L2 / (m1 + m2 * s ** 2)\n return theta1dot, z1dot, theta2dot, z2dot" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Compute the cost function given a set of features / values, and the values for our thetas.
def compute_cost(features, values, theta): # your code here error = (values - features.dot(theta)) cost = error.dot(error) return cost
[ "def compute_cost(features, values, theta):\n m = len(values)\n sum_of_square_errors = numpy.square(numpy.dot(features, theta) - values).sum()\n cost = sum_of_square_errors / (2*m)\n\n return cost", "def calculate_cost_function(self, test_set, theta_list, regularizar=True):\n n = len(test_set.index) # número de linhas\n cost_function = 0\n\n for _,entry in test_set.iterrows():\n y_k = []\n for i in self.output_columns:\n y_k.append(entry[i])\n f_k = self.predict(entry.drop(self.output_columns)) \n # y_k e f_k são listas (com as saídas esperadas pra cada neuronio na camada de saída)\n cummulative_sum = 0\n for i in range(len(f_k)):\n cummulative_sum += (-y_k[i] * np.log(f_k[i])) - ((1-y_k[i]) * np.log(1-f_k[i]))\n\n cost_function += cummulative_sum\n\n cost_function = cost_function/n\n\n theta_sum = 0\n for i in theta_list:\n theta_sum += (np.square(i[:,1:])).sum() # não somo o bias, tiro a primeira coluna\n\n if regularizar:\n cost_function += (self.reg_factor / (2*n)) * theta_sum\n\n return float(cost_function)", "def _cost_function(self):\n return np.sum((self._inp.dot(self._bias) - self._pred) ** 2) / (2 * self._data_set_len)", "def set_cost_functions(self):\n\n # Create functions and function variables for calculating the cost\n Q = ca.MX.sym('Q', self.Nx - 1, self.Nx - 1)\n P = ca.MX.sym('P', self.Nx - 1, self.Nx - 1)\n R = ca.MX.sym('R', self.Nu, self.Nu)\n\n x = ca.MX.sym('x', self.Nx)\n xr = ca.MX.sym('xr', self.Nx)\n u = ca.MX.sym('u', self.Nu)\n\n # Prepare variables\n p = x[0:3]\n v = x[3:6]\n q = x[6:10]\n w = x[10:]\n\n pr = xr[0:3]\n vr = xr[3:6]\n qr = xr[6:10]\n wr = xr[10:]\n\n # Calculate errors\n ep = p - pr\n ev = v - vr\n ew = w - wr\n eq = 0.5 * inv_skew(ca.mtimes(r_mat(qr).T, r_mat(q))\n - ca.mtimes(r_mat(q).T, r_mat(qr)))\n\n e_vec = ca.vertcat(*[ep, ev, eq, ew])\n\n # Calculate running cost\n ln = ca.mtimes(ca.mtimes(e_vec.T, Q), e_vec) \\\n + ca.mtimes(ca.mtimes(u.T, R), u)\n\n self.running_cost = ca.Function('ln', [x, xr, Q, u, R], [ln],\n self.fun_options)\n\n # Calculate terminal cost\n V = ca.mtimes(ca.mtimes(e_vec.T, P), e_vec)\n self.terminal_cost = ca.Function('V', [x, xr, P], [V],\n self.fun_options)\n\n return", "def _atomic_cost_function(self):\n\n self._TotalEnergy, AllEnergies = self._Net.energy_of_all_atomic_networks()\n with _tf.name_scope(\"cost_function\"):\n self._EnergyCost = _tf.divide(self.cost_for_network(\n self._TotalEnergy, self._OutputLayer, self.CostFunType),sum(self.NumberOfAtomsPerType))\n Cost = self._EnergyCost\n\n # add force cost\n if self.UseForce:\n self._OutputForce, AllForces = self._Net.force_of_all_atomic_networks(\n self)\n self._ForceCost = self.ForceCostParam* _tf.divide(\n self.cost_for_network(\n self._OutputForce, self._OutputLayerForce, self.CostFunType),\n sum(self.NumberOfAtomsPerType))\n Cost += self._ForceCost\n\n trainableVars = _tf.trainable_variables()\n regVars=[]\n for var in trainableVars:\n shape=var.get_shape()\n if shape[-1]!=1 and not(\"bias\" in var.name):\n regVars.append(var)\n if self.Regularization == \"L1\":\n\n l1_regularizer = _tf.contrib.layers.l1_regularizer(\n scale=self.RegularizationParam, scope=None)\n self._RegLoss = _tf.contrib.layers.apply_regularization(\n l1_regularizer, regVars)\n Cost += self._RegLoss\n elif self.Regularization == \"L2\":\n l2_regularizer=_tf.contrib.layers.l2_regularizer(\n scale=self.RegularizationParam, scope=None)\n self._RegLoss = _tf.contrib.layers.apply_regularization(\n l2_regularizer, regVars)\n Cost += self._RegLoss\n\n # Create tensor for energy difference calculation\n self._dE_Fun = _tf.divide(_tf.abs(self._TotalEnergy - self._OutputLayer),sum(self.NumberOfAtomsPerType))\n _tf.summary.scalar(\"delta_e\",_tf.reduce_mean(self._dE_Fun))\n\n return Cost", "def cost_func(weights)->float:\n\n cost = 0\n for ith_element in training_set:\n cost += math.pow(hypothesis_value(weights, ith_element[:-1]) - ith_element[-1], 2)\n return cost / 2", "def set_cost_functions(self):\n\n # Create functions and function variables for calculating the cost\n Q = ca.MX.sym('Q', self.Nx - 1, self.Nx - 1)\n P = ca.MX.sym('P', self.Nx - 1, self.Nx - 1)\n R = ca.MX.sym('R', self.Nu, self.Nu)\n\n x = ca.MX.sym('x', self.Nx)\n xr = ca.MX.sym('xr', self.Nx)\n u = ca.MX.sym('u', self.Nu)\n\n # Prepare variables\n p = x[0:3]\n v = x[3:6]\n q = x[6:10]\n w = x[10:]\n\n pr = xr[0:3]\n vr = xr[3:6]\n qr = xr[6:10]\n wr = xr[10:]\n\n # Calculate errors\n ep = p - pr\n ev = v - vr\n ew = w - wr\n eq = 0.5 * inv_skew(ca.mtimes(r_mat(qr).T, r_mat(q))\n - ca.mtimes(r_mat(q).T, r_mat(qr)))\n\n if not self.formation:\n e_vec = ca.vertcat(*[ep, ev, eq, ew])\n\n # Calculate running cost\n ca.mtimes(ca.mtimes(e_vec.T, Q), e_vec)\n ln = ca.mtimes(ca.mtimes(e_vec.T, Q), e_vec) \\\n + ca.mtimes(ca.mtimes(u.T, R), u)\n\n self.running_cost = ca.Function('ln', [x, xr, Q, u, R], [ln],\n self.fun_options)\n\n # Calculate terminal cost\n V = ca.mtimes(ca.mtimes(e_vec.T, P), e_vec)\n self.terminal_cost = ca.Function('V', [x, xr, P], [V],\n self.fun_options)\n else:\n if self.role == 'leader':\n # Set leader cost functions\n pass\n elif self.role == 'local_leader':\n # Set follower cost functions\n pass\n else:\n pass\n\n return", "def run_with_cost(self, inputs, outputs):\n f_dict = {self.input_placeholder: inputs, self.output_placeholder: outputs}\n _, cost = self.sess.run([self.optimizer, self.cost_function], feed_dict=f_dict)\n return cost", "def _construct_compute_costs(self):\n outputs = [self.joint_cost, self.nll_cost, self.kld_cost_1, \\\n self.kld_cost_2, self.reg_cost]\n func = theano.function(inputs=[ self.Xd, self.Xc, self.Xm ], \\\n outputs=outputs)\n return func", "def eval_cost(self, params, **kwargs):\n raise NotImplementedError", "def costFun(self, x):\n\ttmp = x.reshape(self.inp_shape)\n\tc = np.float64(self.calcCost(np.asarray(tmp,dtype=np.float32))) + self.alpha * np.dot(x.T, x)\n\treturn c", "def cofiCostFunc(params, Y, R, num_users, num_movies, num_features, lbd):\n X = np.reshape(params[:num_movies*num_features], (num_movies, num_features))\n Theta = np.reshape(params[num_movies*num_features:], (num_users, num_features))\n\n # J=sum((X*Theta'-Y)^2) where R[i,j]==1\n h = X.dot(Theta.T)-Y\n M = h**2\n J = (M*R).sum()/2\n reg = lbd/2*((X**2).sum()+(Theta**2).sum())\n J = J+reg\n\n X_grad = (h*R).dot(Theta)+lbd*X\n Theta_grad = (h*R).T.dot(X)+lbd*Theta\n\n grad = np.r_[X_grad.flatten(), Theta_grad.flatten()]\n return J, grad", "def calculate_cost(theta_values, data):\n population = data[:,0]\n prices = data[:,1]\n total_error = 0\n for i in range(0,len(population)):\n x = array([[1],[population[i]]])\n hypothesis = theta_values.dot(x).flatten() \n squared_error = (hypothesis - prices[i])**2\n total_error += squared_error\n return .5*total_error/len(population) #division by m is just a scaling factor since we're only interested in whether this function is minimized", "def cost_function(self, config_samples):\n cost = self.work_tracker(config_samples)\n return cost", "def setup(self, cost_function1,cost_function2, tf_compatible):\n if tf_compatible and self.tf_sess is None:\n raise RuntimeError(\"Cannot pass in a tf.Tensor-valued cost function without passing in a TensorFlow \"\n \"session into the constructor\")\n\n self.tf_compatible = tf_compatible\n\n if not tf_compatible:\n self.cost_function = cost_function1\n else:\n def continue_optimization(t, mean, var, best_val, best_sol, goal_pos, cost_choice):\n return tf.cond(\n tf.equal(cost_choice, 0),\n lambda: tf.logical_and(tf.less(t, self.max_iters), tf.reduce_max(var) > self.epsilon),\n lambda: tf.logical_and(tf.less(t, 2), tf.reduce_max(var) > self.epsilon)\n )\n\n def sample_random_trajectories( n_samples, n_dims, time_horizon):\n samples = []\n sigma = 3\n steps_per_basis = 5\n max_basis = time_horizon//5\n trajectory_generator = GRBFTrajectory(n_dims, sigma, steps_per_basis, max_basis)\n for _ in range(n_samples):\n m = 2. * np.random.random(n_dims*max_basis) - 1.\n traj = trajectory_generator.trajectory(m)\n sample = []\n for ac in traj:\n for x in ac:\n sample.append(x)\n samples.append(sample)\n return tf.print( tf.convert_to_tensor( samples), [\"GBRF\"])\n # ~ return tf.convert_to_tensor([[ 2*random.random()-1 for _ in range(7*time_horizon)] for _ in range(n_samples)], dtype=tf.float32)\n \n def iteration(t, mean, var, best_val, best_sol, goal_pos, cost_choice):\n\n lb_dist, ub_dist = mean - self.lb, self.ub - mean\n constrained_var = tf.minimum(tf.minimum(tf.square(lb_dist / 2), tf.square(ub_dist / 2)), var)\n \n \n samples = tf.cond(\n tf.equal(cost_choice, 0),\n lambda: tf.truncated_normal([self.popsize, self.sol_dim], mean, tf.sqrt(constrained_var)),\n lambda: tf.truncated_normal([500, self.sol_dim], mean, tf.sqrt(constrained_var))\n )\n \n # ~ samples = tf.cond(\n # ~ tf.equal(mean[0],0.),\n # ~ # to replace with a generation coming from a DMP\n # ~ lambda: sample_random_trajectories(self.popsize,7,self.sol_dim//7),\n # ~ lambda: tf.truncated_normal([self.popsize, self.sol_dim], mean, tf.sqrt(constrained_var))\n # ~ )\n\n costs = tf.cond(\n tf.equal(cost_choice, 0),\n lambda: cost_function1(samples, goal_pos),\n lambda: cost_function2(samples, goal_pos)\n )\n\n\n values, indices = tf.nn.top_k(-costs, k=self.num_elites, sorted=True)\n\n best_val, best_sol = tf.cond(\n tf.less(-values[0], best_val),\n lambda: (-values[0], samples[indices[0]]),\n lambda: (best_val, best_sol)\n )\n\n elites = tf.gather(samples, indices)\n\n new_mean = tf.reduce_mean(elites, axis=0)\n new_var = tf.reduce_mean(tf.square(elites - new_mean), axis=0)\n\n mean = self.alpha * mean + (1 - self.alpha) * new_mean\n var = self.alpha * var + (1 - self.alpha) * new_var\n\n return t + 1, mean, var, best_val, best_sol, goal_pos, cost_choice\n\n with self.tf_sess.graph.as_default():\n self.num_opt_iters, self.mean, self.var, self.best_val, self.best_sol, _, _ = tf.while_loop(\n cond=continue_optimization, body=iteration,\n loop_vars=[0, self.init_mean, self.init_var, float(\"inf\"),\n self.init_mean, self.init_goal_pos, self.init_cost_choice]\n )", "def costFun(self, S, x):", "def cofiCostFunc(params, Y, R, num_users, num_movies, num_features, reg_lambda, returnCostOnly=False,\n returnGradOnly=False):\n\n # Unfold the U and W matrices from params\n X = params[0:num_movies * num_features].reshape((num_movies, num_features))\n Theta = params[num_movies * num_features:].reshape((num_users, num_features))\n\n errors = (X.dot(Theta.T) - Y) * R\n J = 1 / 2 * np.sum(np.sum(errors ** 2))\n\n penalty = (reg_lambda / 2) * (np.sum(np.sum(Theta ** 2)) + np.sum(np.sum(X ** 2)))\n J = J + penalty\n\n X_grad = errors.dot(Theta) + reg_lambda * X\n Theta_grad = errors.T.dot(X) + reg_lambda * Theta\n\n grad = np.r_[X_grad.flatten(), Theta_grad.flatten()]\n\n if returnGradOnly:\n return grad.flatten()\n if returnCostOnly:\n return J\n\n return J, grad", "def compute_functional_and_gradients(self):\n x = self.x.as_numpy_array()\n self.f = self.target.compute_functional(x)\n self.g = self.target.compute_gradients(x)\n return self.f, flex.double(self.g)", "def _cost_method(self, *args, **kwargs):\n\n return np.sum([operator.cost(data) for operator, data in\n zip(self.operators, args[0])])" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Calculate yj = rj + gamma argmaxQ or yj = rj (terminating state) This is the target value used to train the neural network and it uses the target network to make predictions
def get_target(self, batch): # initialise array to store yj values target = np.zeros((len(batch[0]), self.num_actions)) # loop over samples in the minibatch for j in range(len(batch[0])): a0_i = self.action_str2idx(batch[1][j]) r0 = batch[2][j] done = batch[3][j] s1 = batch[4][j] # if terminating state if done: target[j, a0_i] = r0 else: qs_target = self.target_Qmodel.predict(s1) target[j, a0_i] = r0 + self.gamma * np.max(qs_target) return target
[ "def calc_target_q(self, **kwargs):\n feed_dict = {\n self.obs_input: kwargs['obs'],\n self.feat_input: kwargs['feature']\n }\n\n \n t_q, e_q = self.sess.run([self.t_q, self.e_q], feed_dict=feed_dict)\n act_idx = np.argmax(e_q, axis=1)\n q_values = t_q[np.arange(len(t_q)), act_idx]\n q_values=q_values.reshape(-1)\n q_v=[]\n i=0\n # pdb.set_trace()\n for k in 1.-np.array(kwargs['dones']):\n if k:\n q_v.append(q_values[i])\n i+=1\n else:\n q_v.append(0)\n q_v=np.array(q_v)\n target_q_value = kwargs['rewards'] + q_v* self.gamma\n \n return target_q_value", "def _build_target_q_op(self):\n # Get the maximum Q-value across the actions dimension.\n replay_next_qt_max = tf.reduce_max(\n self._replay_next_target_net_outputs.q_values, 1)\n\n # Calculate the Bellman target value.\n # Q_t = R_t + \\gamma^N * Q'_t+1\n # where,\n # Q'_t+1 = \\argmax_a Q(S_t+1, a)\n # (or) 0 if S_t is a terminal state,\n # and\n # N is the update horizon (by default, N=1).\n # Here, R_t is augmented for AL:\n # R_t = R_t + Q_target(s_t, a_t) - max_a Q_target(s_t, a),\n replay_target_q = tf.reduce_max(\n self._replay_target_net_outputs.q_values,\n axis=1,\n name='replay_max_target_q')\n\n replay_action_one_hot = tf.one_hot(\n self._replay.actions, self.num_actions, 1., 0., name='action_one_hot')\n replay_target_q_al = tf.reduce_sum(\n self._replay_target_net_outputs.q_values * replay_action_one_hot,\n axis=1,\n name='replay_chosen_target_q_al')\n\n if self._clip > 0.:\n al_bonus = self._alpha * tf.clip_by_value(\n tf.nn.relu(replay_target_q_al - replay_target_q),\n 0., self._clip)\n else:\n al_bonus = self._alpha * tf.nn.relu(\n replay_target_q_al - replay_target_q)\n\n rewards = self._replay.rewards + al_bonus\n\n update_target = rewards + self.cumulative_gamma * replay_next_qt_max * (\n 1. - tf.cast(self._replay.terminals, tf.float32))\n\n return (al_bonus, update_target)", "def Q_net(self, state):\n\t\tif not self._prediction_made: \n\t\t\tQ = tf.matmul(tf.nn.relu( tf.matmul(state, self.weights_hidden) + self.bias_hidden ), self.weights_out) + self.bias_out \n\t\t\tself._Qval = Q\t\n\t\t\tself._prediction_made = True\n\t\treturn self._Qval", "def evaluate(self, state):\n with torch.no_grad():\n target_value = self.target_model(state).max(1)[0].view(1, 1)\n return target_value", "def _build_target_q_op(self):\n q_values_next = self.online_convnet(self._replay.next_states)\n best_actions = tf.math.argmax(tf.squeeze(q_values_next), axis=1)\n q_values_next_target = self.target_convnet(self._replay.next_states)\n bb = tf.stack([np.arange(best_actions.get_shape().as_list()[0]), tf.squeeze(best_actions)], axis=-1)\n return self._replay.rewards + self.cumulative_gamma * \\\n tf.gather_nd(tf.squeeze(q_values_next_target), bb) * (\n 1. - tf.cast(self._replay.terminals, tf.float32))", "def predict_target(self, state):\r\n return np.max(self.sess.run(self.target_q_values, {self.target_state: state}).flatten())", "def __getAndUpdateLearningRate(self):\n rate = self.gamma_0 / (1 + (self.gamma_0 / self.d) * self.t)\n self.t += 1\n return rate", "def _build_target_q_op(self):\n # Get the maximum Q-value across the actions dimension for each head.\n replay_next_qt_max = tf.reduce_max(\n self._replay_next_target_net_outputs.q_networks, axis=1)\n is_non_terminal = 1. - tf.cast(self._replay.terminals, tf.float32)\n is_non_terminal = tf.expand_dims(is_non_terminal, axis=-1)\n rewards = tf.expand_dims(self._replay.rewards, axis=-1)\n return rewards + (\n self.cumulative_gamma * replay_next_qt_max * is_non_terminal)", "def train_replay(self):\n\n if len(self.memory) < self.train_start:\n return\n\n if self.epsilon > self.epsilon_end:\n self.epsilon -= self.epsilon_decay_step\n\n mini_batch = random.sample(self.memory, self.batch_size)\n\n history = np.zeros((self.batch_size, self.state_size[0],\n self.state_size[1], self.state_size[2]))\n next_history = np.zeros((self.batch_size, self.state_size[0],\n self.state_size[1], self.state_size[2]))\n\n # Initialize the Value targets to optimize\n v_target = np.zeros((self.batch_size,))\n\n action, reward, dead = [], [], []\n\n for i in range(self.batch_size):\n history[i] = np.float32(mini_batch[i][0] / 255.)\n next_history[i] = np.float32(mini_batch[i][3] / 255.)\n action.append(mini_batch[i][1])\n reward.append(mini_batch[i][2])\n dead.append(mini_batch[i][4])\n\n # current state-action values Q(st, at)\n q_outputs = self.q_duelling_part.predict(history)\n\n # TD-values for updating the networks coming from the target model\n if self.target_model is True:\n v_target_value = self.target_v_duelling_part.predict(next_history)\n elif self.target_model is False:\n v_target_value = self.v_duelling_part.predict(next_history)\n\n q_targets = []\n\n for i in range(self.batch_size):\n if dead[i]:\n v_target[i] = reward[i]\n q_outputs[i][action[i]] = reward[i]\n\n else:\n v_target[i] = reward[i] + \\\n self.discount_factor * v_target_value[i]\n q_outputs[i][action[i]] = reward[i] + \\\n self.discount_factor * v_target_value[i]\n\n q_targets.append(q_outputs[i][action[i]])\n\n self.optimizer([history, action, q_targets]) # optimize the state-action-value head\n self.v_duelling_part.fit(history, v_target, epochs=1, verbose=0) # optimize the state-value head", "def learn(self):\r\n \r\n # take a mini-batch from replay experience\r\n cur_batch_size = min(len(self.replay_exp), self.batch_size)\r\n mini_batch = random.sample(self.replay_exp, cur_batch_size)\r\n \r\n # batch data\r\n sample_states = np.ndarray(shape = (cur_batch_size, self.state_size)) # replace 128 with cur_batch_size\r\n sample_actions = np.ndarray(shape = (cur_batch_size, 1))\r\n sample_rewards = np.ndarray(shape = (cur_batch_size, 1))\r\n sample_next_states = np.ndarray(shape = (cur_batch_size, self.state_size))\r\n sample_dones = np.ndarray(shape = (cur_batch_size, 1))\r\n\r\n temp=0\r\n for exp in mini_batch:\r\n sample_states[temp] = exp[0]\r\n sample_actions[temp] = exp[1]\r\n sample_rewards[temp] = exp[2]\r\n sample_next_states[temp] = exp[3]\r\n sample_dones[temp] = exp[4]\r\n temp += 1\r\n \r\n \r\n sample_qhat_next = self.brain_target.predict(sample_next_states)\r\n \r\n # set all Q values terminal states to 0\r\n sample_qhat_next = sample_qhat_next * (np.ones(shape = sample_dones.shape) - sample_dones)\r\n # choose max action for each state\r\n sample_qhat_next = np.max(sample_qhat_next, axis=1)\r\n \r\n sample_qhat = self.brain_policy.predict(sample_states)\r\n \r\n for i in range(cur_batch_size):\r\n a = sample_actions[i,0]\r\n sample_qhat[i,int(a)] = sample_rewards[i] + self.gamma * sample_qhat_next[i]\r\n \r\n q_target = sample_qhat\r\n \r\n self.brain_policy.fit(sample_states, q_target, epochs = 1, verbose = 0)\r\n \r\n \r\n \r\n \"\"\"\r\n \r\n for state, action, reward, next_state, done in mini_batch:\r\n target_Q_s_a = 0 # new target for Q(s,a)\r\n state = np.reshape(state, [1, state_size])\r\n next_state = np.reshape(next_state, [1, state_size])\r\n \r\n # if it is not the terminal state\r\n if not done:\r\n qhat_next = self.brain_target.predict(next_state) # estimate Q(s',a')\r\n target_Q_s_a = reward + self.gamma * np.amax(qhat_next[0]) # because the output is m * n, so we need to consider the dimension [0]\r\n else:\r\n target_Q_s_a = reward\r\n \r\n target_output = self.brain_policy.predict(state) # we will replace target of Q(s,a) for specific a later\r\n target_output[0][action] = target_Q_s_a # new target for state s and action a\r\n \r\n self.brain_policy.fit(state, target_output, epochs = 1, verbose = 0)\r\n \r\n \"\"\"", "def back_propagation(self, x_input, target):\n if self.output is None:\n return\n # 誤差逆伝搬では順伝搬で計算したニューロン出力値を使う\n u_hidden = self.output['u_hidden']\n y_hidden = self.output['y_hidden']\n y_output = self.output['y_output']\n\n # 隠れ層 - 出力層間の重みを更新\n # 出力層の活性化関数は恒等関数なので、φ_o'(u_k) = 1\n delta_o = []\n for y_output_k, target_k in zip(y_output, target):\n delta_o.append(y_output_k - target_k)\n\n for j, y_hidden_j in enumerate(y_hidden):\n for k, delta_o_k in enumerate(delta_o):\n self.params['W_HIDDEN'][j][k] += -self.LEARNING_RATE * y_hidden_j * delta_o_k\n\n # 入力層 - 隠れ層間の重みを更新\n # φ_h'(u_j)はReLUの微分\n delta_relu = [value > 0 for value in u_hidden]\n # delta_w1_tmpは Σ_k{ δ_output_k * w_jk } * φ_h'(u_j) までの計算\n delta_w1_dot = self._poor_dot(delta_o, self.params['W_HIDDEN'])\n delta_w1_tmp = []\n for delta_relu_j, delta_w1_dot_j in zip(delta_relu, delta_w1_dot):\n delta_w1_tmp.append(delta_relu_j * delta_w1_dot_j)\n\n for i, x_input_i in enumerate(x_input):\n for j, delta_w1_j in enumerate(delta_w1_tmp):\n self.params['W_INPUT'][i][j] += -self.LEARNING_RATE * x_input_i * delta_w1_j", "def calculate_reward(self):\n # Update the GD model and perform gradient descent optimisation on the examples\n emb_indices = np.array([i for i in range(self.BATCH_SIZE)])\n self.reset_GD_model_embedding()\n\n self.GD_model.fit(\n x=[emb_indices],\n y=[self.target_params, self.target_pcs],\n batch_size=self.BATCH_SIZE,\n epochs=self.opt_iter,\n verbose=0\n )\n\n # Get the new predicted parameters and compute the MSE\n # @TODO: should this be the PC MSE instead?\n new_pred_params = np.squeeze(self.GD_model.get_layer(\"pred_embedding\").get_weights())\n #params_mse = (np.square(self.target_params - new_pred_params)).mean(axis=-1)\n params_mse = (np.square(A2CEnv.centred_mod((self.target_params - new_pred_params), np.pi))).mean(axis=-1)\n\n # Convert the MSE into a 'reward' and determine whether each example is finished or not\n #reward = np.squeeze(np.array([self.reward_factor * np.exp(-params_mse/self.reward_scale)]))\n reward = np.squeeze(np.array(np.exp(-np.sqrt(params_mse)/self.reward_scale) - 0.2))\n #reward = -params_mse\n self.done = np.array((params_mse <= self.epsilon) | (self.steps_taken >= (self.step_limit - 1)) | (self.done))\n reward[self.done] = 0.0 # no reward given to finished examples\n #print(\"reward shape: \" +str(reward.shape))\n\n return reward, self.done", "def update_predict_network(self):\n states, actions, rewards, new_states, is_terminals = self.memory.sample(self.batch_size)\n\n preprocessed_states, preprocessed_new_states = self.preprocessor.process_batch(states, new_states)\n\n actions = self.preprocessor.process_action(actions)\n # update network\n q_values = self.cal_target_q_values(preprocessed_new_states)\n max_q_values = np.max(q_values, axis=1)\n max_q_values[is_terminals] = 0.0\n targets = rewards + self.gamma * max_q_values\n targets = np.expand_dims(targets, axis=1)\n\n self.q_network.train_on_batch([preprocessed_states, actions], targets)\n if self.num_steps % self.target_update_freq ==0:\n print(\"Update target network at %d steps\" % self.num_steps)\n self.update_target_network()", "def update(Q, target_Q, opt, samples, gamma=0.99, target_type='double_dqn'):\n xp = Q.xp\n obs = xp.asarray([sample[0] for sample in samples], dtype=np.float32)\n action = xp.asarray([sample[1] for sample in samples], dtype=np.int32)\n reward = xp.asarray([sample[2] for sample in samples], dtype=np.float32)\n done = xp.asarray([sample[3] for sample in samples], dtype=np.float32)\n obs_next = xp.asarray([sample[4] for sample in samples], dtype=np.float32)\n # Predicted values: Q(s,a)\n y = F.select_item(Q(obs), action)\n # Target values: r + gamma * max_b Q(s',b)\n with chainer.no_backprop_mode():\n if target_type == 'dqn':\n next_q = F.max(target_Q(obs_next), axis=1)\n elif target_type == 'double_dqn':\n next_q = F.select_item(target_Q(obs_next),\n F.argmax(Q(obs_next), axis=1))\n else:\n raise ValueError('Unsupported target_type: {}'.format(target_type))\n target = reward + gamma * (1 - done) * next_q\n loss = mean_clipped_loss(y, target)\n Q.cleargrads()\n loss.backward()\n opt.update()", "def train(self, min_epochs=1.0, max_epochs=10,\n batch_size=128, loss_limit=0.015,\n learning_rate=1e-3):\n print(\"Optimizing Neural Network to better estimate Q-values ...\")\n print(\"\\tLearning-rate: {0:.1e}\".format(learning_rate))\n print(\"\\tLoss-limit: {0:.3f}\".format(loss_limit))\n print(\"\\tMax epochs: {0:.1f}\".format(max_epochs))\n # Prepare the probability distribution for sampling the replay-memory.\n self.replay_memory.prepare_sampling_prob(batch_size=batch_size)\n # Number of optimization iterations corresponding to one epoch.\n iterations_per_epoch = self.replay_memory.num_used / batch_size\n # Minimum number of iterations to perform.\n min_iterations = int(iterations_per_epoch * min_epochs)\n # Maximum number of iterations to perform.\n max_iterations = int(iterations_per_epoch * max_epochs)\n # Buffer for storing the loss-values of the most recent batches.\n loss_history = np.zeros(100, dtype=float)\n clone_model_update_frequency = int(max_iterations / 10)\n for i in range(max_iterations):\n\n if i % clone_model_update_frequency == 0:\n self.clone_network()\n\n # Randomly sample a batch of states and target Q-values\n # from the replay-memory. These are the Q-values that we\n # want the Neural Network to be able to estimate.\n state_batch, q_values_batch, next_states_batch, rewards_batch, end_episode_batch = self.replay_memory.random_batch()\n # Create a feed-dict for inputting the data to the TensorFlow graph.\n # Note that the learning-rate is also in this feed-dict.\n\n target_q = self.get_cloned_q_values(next_states_batch)\n #target_q_max = np.argmax(target_q, axis=1)\n #target_q = np.array(rewards_batch) + ((1 - np.array(end_episode_batch)) * (self.replay_memory.discount_factor * np.array(target_q)))\n #target_q = rewards_batch + (self.replay_memory.discount_factor * target_q)\n\n for i in range(len(end_episode_batch)):\n if end_episode_batch[i]:\n shape = np.shape(target_q[i])\n target_q[i] = np.full(shape, rewards_batch[i])\n else:\n target_q[i] = target_q[i] * self.replay_memory.discount_factor\n target_q[i] = target_q[i] + rewards_batch[i]\n\n\n feed_dict = {self.x: state_batch,\n self.target_q_values: target_q,\n self.learning_rate: learning_rate}\n # Perform one optimization step and get the loss-value.\n loss_val, _ = self.sess.run([self.loss, self.train_step],\n feed_dict=feed_dict)\n # Shift the loss-history and assign the new value.\n # This causes the loss-history to only hold the most recent values.\n loss_history = np.roll(loss_history, 1)\n loss_history[0] = loss_val\n # Calculate the average loss for the previous batches.\n loss_mean = np.mean(loss_history)\n # Print status.\n pct_epoch = i / iterations_per_epoch\n msg = \"\\tIteration: {0} ({1:.2f} epoch), Batch loss: {2:.4f}, Mean loss: {3:.4f}\"\n msg = msg.format(i, pct_epoch, loss_val, loss_mean)\n print_progress(msg)\n\n # Stop the optimization if we have performed the required number\n # of iterations and the loss-value is sufficiently low.\n if i > min_iterations and loss_mean < loss_limit:\n break\n # Print newline.\n print()", "def _build_target_q_op(self):\n targets = []\n for gamma, target_q in zip(self.gammas,\n self._replay_next_target_net_outputs.q_values):\n # Get the maximum Q-value across the actions dimension.\n replay_next_qt_max = tf.reduce_max(target_q, 1)\n\n # Calculate the Bellman target value.\n # Q_t = R_t + \\gamma^N * Q'_t+1\n # where,\n # Q'_t+1 = \\argmax_a Q(S_t+1, a)\n # (or) 0 if S_t is a terminal state,\n # and\n # N is the update horizon (by default, N=1).\n cumulative_gamma = math.pow(gamma, self.update_horizon)\n n_step_reward = self._build_discounted_n_step_rewards(gamma)\n targets.append(n_step_reward + cumulative_gamma * replay_next_qt_max *\n (1. - tf.cast(self._replay.terminals, tf.float32)))\n return targets", "def result(self):\r\n # TODO: how about xcurrent?\r\n return self.best.get() + (\r\n self.countevals, self.countiter, self.gp.pheno(self.mean), self.gp.scales * self.sigma * self.sigma_vec * self.dC**0.5)", "def value_iteration(self):\n #Create a utility function of the environment shape\n gamma = 0.9\n epsilon = 0.01\n iteration = 0\n\n #create a utility function that matches the size of the number of states\n u = np.zeros(self.env.observation_space.n, dtype=float)\n\n u_copy = u.copy()\n\n #Create the reward grid\n reward = np.array([state_map.get(sublist) for state in frozen_lake.MAPS[self.env.spec._kwargs.get('map_name')] for sublist in state])\n\n T = self.frozen_transition()\n\n graph_list = list()\n\n #keep track of the convergence\n policy_convergence = list()\n\n while True:\n delta = 0\n iteration += 1\n u = u_copy.copy()\n graph_list.append(u)\n start_time = time()\n for s in range(self.env.observation_space.n):\n r = reward[s]\n v = np.zeros((1, self.env.observation_space.n), dtype=float)\n v[0, s] = 1.0\n u_copy[s] = self.return_state_utility(v, T, u, r, gamma)\n delta = max(delta, np.abs(u_copy[s] - u[s]))\n policy_convergence.append({'iter': iteration, 'delta': delta})\n if delta < epsilon * (1 - gamma) / gamma:\n print(\"Total Iterations: {}\".format(iteration))\n print(\"=================== VALUE ITERATION RESULT ==================\")\n print(\"Iterations: \" + str(iteration))\n print(\"Delta: \" + str(delta))\n print(\"Gamma: \" + str(gamma))\n print(\"Epsilon: \" + str(epsilon))\n print(\"Time to converge: {} seconds\".format(time() - start_time))\n print(\"===================================================\")\n utility_reshape = np.reshape(u, (int(np.sqrt(self.env.observation_space.n)), int(np.sqrt(self.env.observation_space.n))))\n print (np.array(utility_reshape, dtype=float))\n print(\"===================================================\")\n break\n\n return u", "def getQValue(self, state, action):\n \"\"\" YOUR CODE HERE \"\"\"\n # initialilze q value to zero\n q_value = 0\n # get all the features for state, action\n features = self.featExtractor.getFeatures(state, action, self)\n # step through each feature, multiply by appropriate weight\n for feature in features:\n q_value += features[feature] #* self.weights[feature]\n return q_value" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Small function to build the correct argtypes for the LibXC computers
def _build_comute_argtype(num_nd, num_nd_write): ret = [_xc_func_p, ctypes.c_size_t] ret += [_ndptr] * num_nd ret += [_ndptr_w] * num_nd_write return tuple(ret)
[ "def _cast_types(args):\n\targs.x_val = None if args.x_val == 'None' else int(args.x_val)\n\targs.test_size = float(args.test_size)\n\targs.alpha = float(args.alpha)\n\targs.fit_prior = (args.fit_prior in ['True', \"True\", 'true', \"true\"])\n\n\t# class_prior - array like type (problem to convert)\n\tif args.class_prior == \"None\" or args.class_prior == 'None':\n\t\targs.class_prior = None\n\n\t# --------- #\n\treturn args", "def determine_arg_locations(self, arg_types): # pragma: no cover\n raise NotImplementedError(\"Implement this\")", "def convert_to_impl_arguments(self):\n\n # Create local shadows, for convenience.\n args = self.args\n toolchain = self.toolchain\n\n cmake = CMake(args=args,\n toolchain=self.toolchain)\n\n impl_args = [\n \"--workspace\", self.workspace.source_root,\n \"--build-dir\", self.workspace.build_root,\n \"--install-prefix\", args.install_prefix,\n \"--host-target\", args.host_target,\n \"--stdlib-deployment-targets={}\".format(\n \" \".join(args.stdlib_deployment_targets)),\n \"--host-cc\", toolchain.cc,\n \"--host-cxx\", toolchain.cxx,\n \"--darwin-xcrun-toolchain\", args.darwin_xcrun_toolchain,\n \"--darwin-deployment-version-osx=%s\" % (\n args.darwin_deployment_version_osx),\n \"--darwin-deployment-version-ios=%s\" % (\n args.darwin_deployment_version_ios),\n \"--darwin-deployment-version-tvos=%s\" % (\n args.darwin_deployment_version_tvos),\n \"--darwin-deployment-version-watchos=%s\" % (\n args.darwin_deployment_version_watchos),\n \"--cmake\", toolchain.cmake,\n \"--llvm-build-type\", args.llvm_build_variant,\n \"--swift-build-type\", args.swift_build_variant,\n \"--swift-stdlib-build-type\", args.swift_stdlib_build_variant,\n \"--lldb-build-type\", args.lldb_build_variant,\n \"--foundation-build-type\", args.foundation_build_variant,\n \"--libdispatch-build-type\", args.libdispatch_build_variant,\n \"--libicu-build-type\", args.libicu_build_variant,\n \"--xctest-build-type\", args.build_variant,\n \"--llbuild-build-type\", args.build_variant,\n \"--swift-enable-assertions\", str(args.swift_assertions).lower(),\n \"--swift-stdlib-enable-assertions\", str(\n args.swift_stdlib_assertions).lower(),\n \"--swift-analyze-code-coverage\", str(\n args.swift_analyze_code_coverage).lower(),\n \"--llbuild-enable-assertions\", str(\n args.llbuild_assertions).lower(),\n \"--lldb-assertions\", str(\n args.lldb_assertions).lower(),\n \"--cmake-generator\", args.cmake_generator,\n \"--cross-compile-append-host-target-to-destdir\", str(\n args.cross_compile_append_host_target_to_destdir).lower(),\n \"--build-jobs\", str(args.build_jobs),\n \"--lit-jobs\", str(args.lit_jobs),\n \"--common-cmake-options=%s\" % ' '.join(\n shlex.quote(opt) for opt in cmake.common_options()),\n \"--build-args=%s\" % ' '.join(\n shlex.quote(arg) for arg in cmake.build_args()),\n \"--dsymutil-jobs\", str(args.dsymutil_jobs),\n '--build-swift-libexec', str(args.build_swift_libexec).lower(),\n '--swift-enable-backtracing', str(args.swift_enable_backtracing).lower(),\n '--build-swift-remote-mirror', str(args.build_swift_remote_mirror).lower(),\n ]\n\n # Compute any product specific cmake arguments.\n #\n # NOTE: The sum(list(...)) is b/c compute_product_pipelines returns a\n # tuple of lists of which the first is the build-script-impl products\n # and the second is the non-build-script-impl-products. It guarantees\n # that when we concatenate these two lists together we get a valid\n # dependency graph.\n for product_class in sum(list(self.compute_product_pipelines()[0]), []):\n if not product_class.is_build_script_impl_product():\n continue\n\n product_name = product_class.product_name()\n product_source_name = product_class.product_source_name()\n source_dir = self.workspace.source_dir(product_source_name)\n\n if not os.path.exists(source_dir):\n fatal_error(\n \"can't find source directory for %s \"\n \"(tried %s)\" % (product_name, source_dir))\n\n product = product_class(\n args=args,\n toolchain=self.toolchain,\n source_dir=source_dir,\n # FIXME: This is incorrect since it always assumes the host\n # target I think?\n build_dir=self.workspace.build_dir(\n args.host_target, product_name))\n cmake_opts = product.cmake_options\n\n # FIXME: We should be using shlex.quote here but we run into issues\n # with build-script-impl/cmake not being happy with all of the\n # extra \"'\" in the strings. To fix this easily, we really need to\n # just invoke cmake from build-script directly rather than futzing\n # with build-script-impl. This makes even more sense since there\n # really isn't a security issue here.\n if cmake_opts:\n impl_args += [\n \"--{}-cmake-options={}\".format(\n product_name, ' '.join(cmake_opts))\n ]\n\n if args.build_toolchain_only:\n impl_args += [\n \"--build-toolchain-only=1\"\n ]\n\n if args.build_stdlib_deployment_targets:\n impl_args += [\n \"--build-stdlib-deployment-targets\", \" \".join(\n args.build_stdlib_deployment_targets)]\n if args.cross_compile_hosts:\n impl_args += [\n \"--cross-compile-hosts\", \" \".join(args.cross_compile_hosts)]\n if args.cross_compile_deps_path is not None:\n impl_args += [\n \"--cross-compile-deps-path=%s\" % args.cross_compile_deps_path\n ]\n\n if args.test_paths:\n impl_args += [\"--test-paths\", \" \".join(args.test_paths)]\n\n if toolchain.ninja:\n impl_args += [\"--ninja-bin=%s\" % toolchain.ninja]\n if args.distcc:\n impl_args += [\n \"--distcc\",\n \"--distcc-pump=%s\" % toolchain.distcc_pump\n ]\n if args.sccache:\n args.cmake_c_launcher = toolchain.sccache\n args.cmake_cxx_launcher = toolchain.sccache\n\n # *NOTE* We use normal cmake to pass through tsan/ubsan options. We do\n # NOT pass them to build-script-impl.\n if args.enable_asan:\n impl_args += [\"--enable-asan\"]\n # If we are on linux, disable leak detection when running ASAN. We\n # have a separate bot that checks for leaks.\n if platform.system() == 'Linux':\n os.environ['ASAN_OPTIONS'] = 'detect_leaks=0'\n if args.enable_ubsan:\n impl_args += [\"--enable-ubsan\"]\n\n # If we have lsan, we need to export our suppression list. The actual\n # passing in of the LSAN flag is done via the normal cmake method. We\n # do not pass the flag to build-script-impl.\n if args.enable_lsan:\n supp_file = os.path.join(SWIFT_SOURCE_ROOT, SWIFT_REPO_NAME,\n \"utils\",\n \"lsan_leaks_suppression_list.txt\")\n os.environ['LSAN_OPTIONS'] = 'suppressions={}'.format(supp_file)\n if args.verbose_build:\n impl_args += [\"--verbose-build\"]\n if args.install_symroot:\n impl_args += [\n \"--install-symroot\", os.path.abspath(args.install_symroot)\n ]\n if args.install_destdir:\n impl_args += [\n \"--install-destdir\", os.path.abspath(args.install_destdir)\n ]\n\n if args.skip_build:\n impl_args += [\"--skip-build\"]\n if not args.build_benchmarks:\n impl_args += [\"--skip-build-benchmarks\"]\n\n if args.swift_disable_dead_stripping:\n args.extra_cmake_options.append('-DSWIFT_DISABLE_DEAD_STRIPPING:BOOL=TRUE')\n if args.build_backdeployconcurrency:\n args.extra_cmake_options.append(\n '-DSWIFT_BACK_DEPLOY_CONCURRENCY:BOOL=TRUE')\n\n swift_syntax_src = os.path.join(self.workspace.source_root,\n \"swift-syntax\")\n args.extra_cmake_options.append(\n '-DSWIFT_PATH_TO_SWIFT_SYNTAX_SOURCE:PATH={}'.format(swift_syntax_src))\n\n if args.build_early_swiftsyntax:\n impl_args += [\"--swift-earlyswiftsyntax\"]\n\n # Then add subproject install flags that either skip building them /or/\n # if we are going to build them and install_all is set, we also install\n # them.\n conditional_subproject_configs = [\n (args.build_llvm, \"llvm\"),\n (args.build_swift, \"swift\"),\n (args.build_foundation, \"foundation\"),\n (args.build_xctest, \"xctest\"),\n (args.build_lldb, \"lldb\"),\n (args.build_llbuild, \"llbuild\"),\n (args.build_libcxx, \"libcxx\"),\n (args.build_libdispatch, \"libdispatch\"),\n (args.build_libicu, \"libicu\"),\n (args.build_libxml2, 'libxml2'),\n (args.build_zlib, 'zlib'),\n (args.build_curl, 'curl')\n ]\n for (should_build, string_name) in conditional_subproject_configs:\n if not should_build and not self.args.infer_dependencies:\n impl_args += [\"--skip-build-{}\".format(string_name)]\n elif self.install_all:\n impl_args += [\"--install-{}\".format(string_name)]\n\n if args.build_swift_dynamic_stdlib:\n impl_args += [\"--build-swift-dynamic-stdlib\"]\n if args.build_swift_static_stdlib:\n impl_args += [\"--build-swift-static-stdlib\"]\n if args.build_swift_stdlib_unittest_extra:\n impl_args += [\"--build-swift-stdlib-unittest-extra\"]\n if args.build_swift_dynamic_sdk_overlay:\n impl_args += [\"--build-swift-dynamic-sdk-overlay\"]\n if args.build_swift_static_sdk_overlay:\n impl_args += [\"--build-swift-static-sdk-overlay\"]\n\n if not args.build_android:\n impl_args += [\"--skip-build-android\"]\n if not args.build_clang_tools_extra:\n impl_args += [\"--skip-build-clang-tools-extra\"]\n\n if not args.test and not args.long_test and not args.stress_test:\n impl_args += [\"--skip-test-swift\"]\n if not args.test:\n impl_args += [\n \"--skip-test-lldb\",\n \"--skip-test-llbuild\",\n \"--skip-test-xctest\",\n \"--skip-test-foundation\",\n \"--skip-test-libdispatch\",\n \"--skip-test-libicu\",\n ]\n if args.build_runtime_with_host_compiler:\n impl_args += [\"--build-runtime-with-host-compiler\"]\n if args.validation_test:\n impl_args += [\"--validation-test\"]\n if args.long_test:\n impl_args += [\"--long-test\"]\n if args.stress_test:\n impl_args += [\"--stress-test\"]\n if args.skip_local_build:\n impl_args += [\"--skip-local-build\"]\n if args.only_executable_test:\n impl_args += [\"--only-executable-test\"]\n if not args.benchmark:\n impl_args += [\"--skip-test-benchmarks\"]\n if args.android:\n impl_args += [\n \"--android-arch\", args.android_arch,\n \"--android-ndk\", args.android_ndk,\n \"--android-api-level\", args.android_api_level,\n ]\n # If building natively on an Android host, only pass the API level.\n if StdlibDeploymentTarget.Android.contains(StdlibDeploymentTarget\n .host_target().name):\n impl_args += [\"--android-api-level\", args.android_api_level]\n if args.android_deploy_device_path:\n impl_args += [\n \"--android-deploy-device-path\",\n args.android_deploy_device_path,\n ]\n\n if platform.system() == 'Darwin':\n impl_args += [\n \"--toolchain-prefix\",\n targets.darwin_toolchain_prefix(\n args.install_prefix),\n \"--host-lipo\", toolchain.lipo,\n ]\n\n # Isolate build from the system; Darwin toolchains build against SDKs.\n # For additional isolation, disable pkg-config. Homebrew's pkg-config\n # prioritizes CommandLineTools paths, resulting in compile errors.\n args.extra_cmake_options += [\n '-DCMAKE_IGNORE_PATH=/usr/lib;/usr/local/lib;/lib',\n '-DPKG_CONFIG_EXECUTABLE=/usr/bin/false',\n ]\n\n if toolchain.libtool is not None:\n impl_args += [\n \"--host-libtool\", toolchain.libtool,\n ]\n if args.native_clang_tools_path is not None:\n impl_args += [\n \"--native-clang-tools-path=%s\" % args.native_clang_tools_path\n ]\n if args.native_llvm_tools_path is not None:\n impl_args += [\n \"--native-llvm-tools-path=%s\" % args.native_llvm_tools_path\n ]\n if args.native_swift_tools_path is not None:\n impl_args += [\n \"--native-swift-tools-path=%s\" % args.native_swift_tools_path\n ]\n\n # If we have extra_swift_args, combine all of them together and then\n # add them as one command.\n if args.extra_swift_args:\n impl_args += [\n \"--extra-swift-args=%s\" % ';'.join(args.extra_swift_args)\n ]\n\n # Enable macCatalyst\n if args.maccatalyst:\n (args.extra_cmake_options\n .append('-DSWIFT_ENABLE_MACCATALYST:BOOL=TRUE'))\n if args.maccatalyst_ios_tests:\n impl_args += [\"--darwin-test-maccatalyst-ios-like=1\"]\n\n # Provide a fixed backtracer path, if required\n if args.swift_runtime_fixed_backtracer_path is not None:\n impl_args += [\n '--swift-runtime-fixed-backtracer-path=%s' %\n args.swift_runtime_fixed_backtracer_path\n ]\n\n # If we have extra_cmake_options, combine all of them together and then\n # add them as one command.\n if args.extra_cmake_options:\n impl_args += [\n \"--extra-cmake-options=%s\" % ' '.join(\n shlex.quote(opt) for opt in args.extra_cmake_options)\n ]\n\n if args.lto_type is not None:\n impl_args += [\n \"--llvm-enable-lto=%s\" % args.lto_type,\n \"--swift-tools-enable-lto=%s\" % args.lto_type\n ]\n if args.llvm_max_parallel_lto_link_jobs is not None:\n impl_args += [\n \"--llvm-num-parallel-lto-link-jobs=%s\" %\n min(args.llvm_max_parallel_lto_link_jobs, args.build_jobs)\n ]\n if args.swift_tools_max_parallel_lto_link_jobs is not None:\n impl_args += [\n \"--swift-tools-num-parallel-lto-link-jobs=%s\" %\n min(args.swift_tools_max_parallel_lto_link_jobs,\n args.build_jobs)\n ]\n\n if args.bootstrapping_mode is not None:\n impl_args += [\n \"--bootstrapping=%s\" % args.bootstrapping_mode,\n ]\n\n impl_args += args.build_script_impl_args\n\n if args.dry_run:\n impl_args += [\"--dry-run\"]\n\n if args.reconfigure:\n impl_args += [\"--reconfigure\"]\n\n if args.clang_profile_instr_use:\n impl_args += [\n \"--clang-profile-instr-use=%s\" %\n os.path.abspath(args.clang_profile_instr_use)\n ]\n\n if args.lit_args:\n impl_args += [\"--llvm-lit-args=%s\" % args.lit_args]\n\n if args.coverage_db:\n impl_args += [\n \"--coverage-db=%s\" %\n os.path.abspath(args.coverage_db)\n ]\n\n if args.llvm_install_components:\n impl_args += [\n \"--llvm-install-components=%s\" % args.llvm_install_components\n ]\n\n # On non-Darwin platforms, build lld so we can always have a\n # linker that is compatible with the swift we are using to\n # compile the stdlib.\n #\n # This makes it easier to build target stdlibs on systems that\n # have old toolchains without more modern linker features.\n #\n # On Darwin, only build lld if explicitly requested using --build-lld.\n should_build_lld = (platform.system() != 'Darwin' or args.build_lld)\n if not should_build_lld:\n impl_args += [\n \"--skip-build-lld\"\n ]\n\n if not args.clean_libdispatch:\n impl_args += [\n \"--skip-clean-libdispatch\"\n ]\n\n if not args.clean_foundation:\n impl_args += [\n \"--skip-clean-foundation\"\n ]\n\n if not args.clean_xctest:\n impl_args += [\n \"--skip-clean-xctest\"\n ]\n\n if not args.clean_llbuild:\n impl_args += [\n \"--skip-clean-llbuild\"\n ]\n\n if args.llvm_ninja_targets:\n impl_args += [\n \"--llvm-ninja-targets=%s\" % ' '.join(args.llvm_ninja_targets)\n ]\n\n if args.llvm_ninja_targets_for_cross_compile_hosts:\n impl_args += [\n \"--llvm-ninja-targets-for-cross-compile-hosts=%s\" %\n ' '.join(args.llvm_ninja_targets_for_cross_compile_hosts)\n ]\n\n if args.darwin_symroot_path_filters:\n impl_args += [\n \"--darwin_symroot_path_filters=%s\" %\n ' '.join(args.darwin_symroot_path_filters)\n ]\n\n # Compute the set of host-specific variables, which we pass through to\n # the build script via environment variables.\n host_specific_variables = self.compute_host_specific_variables()\n impl_env = {}\n for (host_target, options) in host_specific_variables.items():\n for (name, value) in options.items():\n # We mangle into an environment variable we can easily evaluate\n # from the `build-script-impl`.\n impl_env[\"HOST_VARIABLE_{}__{}\".format(\n host_target.replace(\"-\", \"_\"), name)] = value\n\n return (impl_env, impl_args)", "def read_cmd_arguments(no_of_layers, no_of_kernels):\n\n\tconfig = sys.argv[1]\n\tparam = genfromtxt(config, delimiter=',')\n\tprint(param)\n\n\tk_type = genfromtxt('kernels.csv', delimiter=',')\n\n\treturn param, k_type", "def MacroArgTypes(self,name,ixArg):\n if name not in self.macros['list']:\n raise Exception('Program Bug -- name \"%s\" is not a macro' % name);\n ix = self.macros['list'].index(name);\n return self.macros['args'][ix][ixArg][1:];", "def generate_python_argument_types(argtypes: Union[List, str], outdir: str, prefix: str = 'sc', types=None):\n if type(argtypes) is str:\n argtypes = json.load(open(argtypes, 'r'))\n if not os.path.exists(outdir):\n os.mkdir(outdir)\n type_to_args = collections.defaultdict(set)\n for arg in argtypes:\n argtype = argtypes[arg]\n if types is not None:\n if argtype not in types:\n continue\n type_to_args[argtype].add(arg)\n for argtype in type_to_args:\n real_args = sorted(list(type_to_args[argtype]))\n arguments_to_python(real_args, argtype, outdir, prefix)", "def get_args_type(java_args):\n if len(java_args) == 0:\n return 'JNIEnv* env, jobject thiz'\n jargs = java_args.lower()\n args = jargs.split(', ')\n # print 'arg count:', len(args)\n full_arg = 'JNIEnv* env, jobject thiz, '\n i = 1\n for java_arg in args:\n java_type = java_arg.split(' ')[0]\n full_arg += get_jni_type(java_type)\n full_arg += ' arg'\n full_arg += str(i)\n full_arg += ', '\n i += 1\n\n return full_arg[:-2]", "def _create_args(self, func_args):\n self.llvm_ret_type = self._from_ctype(self.signature.ret_type)\n self.llvm_arg_types = \\\n [self._from_ctype(a) for a in self.signature.arg_ctypes]", "def parse_abc_args(abc_args):\n abc_flow_type = \"iterative_bb\"\n abc_run_args = \"\"\n lut_size = None\n use_old_latches_restoration_script = False\n if \"iterative_bb\" in abc_args:\n abc_flow_type = \"iterative_bb\"\n del abc_args[\"iterative_bb\"]\n if \"blanket_bb\" in abc_args:\n abc_flow_type = \"blanket_bb\"\n del abc_args[\"blanket_bb\"]\n if \"once_bb\" in abc_args:\n abc_flow_type = \"once_bb\"\n del abc_args[\"once_bb\"]\n if \"use_old_latches_restoration_script\" in abc_args:\n use_old_latches_restoration_script = True\n del abc_args[\"use_old_latches_restoration_script\"]\n if \"lut_size\" in abc_args:\n lut_size = abc_args[\"lut_size\"]\n del abc_args[\"lut_size\"]\n\n for arg, value in abc_args.items():\n if isinstance(value, bool) and value:\n abc_run_args += [\"--\" + arg]\n elif isinstance(value, (str, int, float)):\n abc_run_args += [\"--\" + arg, str(value)]\n else:\n pass\n return abc_args, abc_flow_type, lut_size, abc_run_args, use_old_latches_restoration_script", "def dev_args(devnames):\n devc = len(devnames)\n devnames_type = ctypes.c_char_p * devc\n devnames_arg = devnames_type()\n for idx, val in enumerate(devnames):\n devnames_arg[idx] = (val + chr(0)).encode('ascii')\n return ctypes.c_int(devc), ctypes.cast(\n devnames_arg, ctypes.POINTER(ctypes.c_char_p)\n )", "def Args(parser):\n flags.AddPlatformArg(parser, managed_only=True)", "def arg_type(self):\n\n arg_type = self.ctype\n\n if 'int' in arg_type:\n arg_type = 'int'\n\n if self.is_list:\n arg_type = 'list of {}'.format(arg_type)\n\n if 'required' in self.qualifiers:\n arg_type = \"{}, optional\".format(arg_type)\n\n return arg_type", "def process(self, tool_args, pytype_args):\n # Override in subclasses", "def parse_arguments(args):", "def dev_args(self,devnames):\n devc = len(devnames)\n devnames_type = ctypes.c_char_p * devc\n devnames_arg = devnames_type()\n for idx, val in enumerate(devnames):\n devnames_arg[idx] = (val + chr(0)).encode('ascii')\n return ctypes.c_int(devc), ctypes.cast(\n devnames_arg, ctypes.POINTER(ctypes.c_char_p)\n )", "def _container_specification_args(self):\n args = [\n \"--distro=\" + self._distro,\n \"--release=\" + self._distro_version,\n \"--local\"\n ]\n\n if self._distro_arch:\n args.append(\"--arch=\" + self._distro_arch)\n\n return args", "def __init__(self, host, port, quiet, **kwargs):\n super(Bentham32, self).__init__(kwargs['lib_path'], 'cdll', host, port, quiet)\n\n self.lib.BI_automeasure.restype = c_int\n self.lib.BI_automeasure.argtypes = [POINTER(c_double)]\n self.lib.BI_autorange.restype = c_int\n self.lib.BI_autorange.argtypes = []\n self.lib.BI_build_group.restype = c_int\n self.lib.BI_build_group.argtypes = []\n self.lib.BI_build_system_model.restype = c_int\n self.lib.BI_build_system_model.argtypes = [c_char_p, c_char_p]\n self.lib.BI_close.restype = c_int\n self.lib.BI_close.argtypes = []\n self.lib.BI_close_shutter.restype = c_int\n self.lib.BI_close_shutter.argtypes = []\n self.lib.BI_component_select_wl.restype = c_int\n self.lib.BI_component_select_wl.argtypes = [c_char_p, c_double, POINTER(c_long)]\n self.lib.BI_get.restype = c_int\n self.lib.BI_get.argtypes = [c_char_p, c_int, c_int, POINTER(c_double)]\n self.lib.BI_get_c_group.restype = c_int\n self.lib.BI_get_c_group.argtypes = [POINTER(c_int)]\n self.lib.BI_get_component_list.restype = c_int\n self.lib.BI_get_component_list.argtypes = [c_char_p]\n self.lib.BI_get_group.restype = c_int\n self.lib.BI_get_group.argtypes = [c_int, c_char_p]\n self.lib.BI_get_hardware_type.restype = c_int\n self.lib.BI_get_hardware_type.argtypes = [c_char_p, POINTER(c_int)]\n self.lib.BI_get_mono_items.restype = c_int\n self.lib.BI_get_mono_items.argtypes = [c_char_p, c_char_p]\n self.lib.BI_get_no_of_dark_currents.restype = c_int\n self.lib.BI_get_no_of_dark_currents.argtypes = [POINTER(c_int)]\n self.lib.BI_get_zero_calibration_info.restype = c_int\n self.lib.BI_get_zero_calibration_info.argtypes = [POINTER(c_double), POINTER(c_double), POINTER(c_double)]\n self.lib.BI_group_add.restype = c_int\n self.lib.BI_group_add.argtypes = [c_char_p, c_int]\n self.lib.BI_group_remove.restype = c_int\n self.lib.BI_group_remove.argtypes = [c_char_p, c_int]\n self.lib.BI_initialise.restype = c_int\n self.lib.BI_initialise.argtypes = []\n self.lib.BI_load_setup.restype = c_int\n self.lib.BI_load_setup.argtypes = [c_char_p]\n self.lib.BI_measurement.restype = c_int\n self.lib.BI_measurement.argtypes = [POINTER(c_double)]\n self.lib.BI_multi_autorange.restype = c_int\n self.lib.BI_multi_autorange.argtypes = []\n self.lib.BI_multi_get_no_of_dark_currents.restype = c_int\n self.lib.BI_multi_get_no_of_dark_currents.argtypes = [c_int, POINTER(c_int)]\n self.lib.BI_multi_get_zero_calibration_info.restype = c_int\n self.lib.BI_multi_get_zero_calibration_info.argtypes = [c_int, POINTER(c_double), POINTER(c_double), POINTER(c_double)]\n self.lib.BI_multi_initialise.restype = c_int\n self.lib.BI_multi_initialise.argtypes = []\n self.lib.BI_multi_measurement.restype = c_int\n self.lib.BI_multi_measurement.argtypes = [POINTER(c_double)]\n self.lib.BI_multi_select_wavelength.restype = c_int\n self.lib.BI_multi_select_wavelength.argtypes = [c_double, POINTER(c_long)]\n self.lib.BI_multi_zero_calibration.restype = c_int\n self.lib.BI_multi_zero_calibration.argtypes = [c_double, c_double]\n self.lib.BI_park.restype = c_int\n self.lib.BI_park.argtypes = []\n self.lib.BI_read.restype = c_int\n self.lib.BI_read.argtypes = [c_char_p, c_short, POINTER(c_short), c_char_p]\n self.lib.BI_report_error.restype = c_int\n self.lib.BI_report_error.argtypes = []\n self.lib.BI_save_setup.restype = c_int\n self.lib.BI_save_setup.argtypes = [c_char_p]\n self.lib.BI_select_wavelength.restype = c_int\n self.lib.BI_select_wavelength.argtypes = [c_double, POINTER(c_long)]\n self.lib.BI_send.restype = c_int\n self.lib.BI_send.argtypes = [c_char_p, c_char_p]\n self.lib.BI_set.restype = c_int\n self.lib.BI_set.argtypes = [c_char_p, c_int, c_int, c_double]\n self.lib.BI_trace.restype = c_int\n self.lib.BI_trace.argtypes = [c_int]\n self.lib.BI_use_group.restype = c_int\n self.lib.BI_use_group.argtypes = [c_int]\n self.lib.BI_version.restype = None\n self.lib.BI_version.argtypes = [c_char_p]\n self.lib.BI_zero_calibration.restype = c_int\n self.lib.BI_zero_calibration.argtypes = [c_double, c_double]\n self.lib.BI_camera_get_zero_calibration_info.restype = c_int\n self.lib.BI_camera_get_zero_calibration_info.argtypes = [c_char_p, POINTER(c_double), POINTER(c_double), POINTER(c_double)]\n self.lib.BI_camera_measurement.restype = c_int\n self.lib.BI_camera_measurement.argtypes = [c_char_p, c_int, POINTER(c_double)]\n self.lib.BI_camera_zero_calibration.restype = c_int\n self.lib.BI_camera_zero_calibration.argtypes = [c_char_p, c_double, c_double]\n self.lib.BI_delete_group.restype = c_int\n self.lib.BI_delete_group.argtypes = [c_int]\n self.lib.BI_display_advanced_window.restype = c_int\n self.lib.BI_display_advanced_window.argtypes = [c_char_p, HINSTANCE]\n self.lib.BI_display_setup_window.restype = c_int\n self.lib.BI_display_setup_window.argtypes = [c_char_p, HINSTANCE]\n self.lib.BI_get_log.restype = c_int\n self.lib.BI_get_log.argtypes = [c_char_p]\n self.lib.BI_get_log_size.restype = c_int\n self.lib.BI_get_log_size.argtypes = [POINTER(c_int)]\n self.lib.BI_get_max_bw.restype = c_int\n self.lib.BI_get_max_bw.argtypes = [c_int, c_double, c_double, POINTER(c_double)]\n self.lib.BI_get_min_step.restype = c_int\n self.lib.BI_get_min_step.argtypes = [c_int, c_double, c_double, POINTER(c_double)]\n self.lib.BI_get_n_groups.restype = c_int\n self.lib.BI_get_n_groups.argtypes = [POINTER(c_int)]\n self.lib.BI_get_str.restype = c_int\n self.lib.BI_get_str.argtypes = [c_char_p, c_int, c_int, c_char_p]\n self.lib.BI_Mapped_Logging.restype = None\n self.lib.BI_Mapped_Logging.argtypes = [c_int]\n self.lib.BI_multi_automeasure.restype = c_int\n self.lib.BI_multi_automeasure.argtypes = [POINTER(c_double)]\n self.lib.BI_multi_park.restype = c_int\n self.lib.BI_multi_park.argtypes = []\n self.lib.BI_start_log.restype = c_int\n self.lib.BI_start_log.argtypes = [c_char_p]\n self.lib.BI_stop_log.restype = c_int\n self.lib.BI_stop_log.argtypes = [c_char_p]", "def getOpArgumentsInternalTypes(self):\n\t\traise Exception(\"Abstract method IOperation.getOpArgumentsInternalTypes not implemented in: \" + str(self))", "def fill_args(cls, toolchain, parser):\n pass # pass must be overloaded (if required)" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Returns the LibXCFunctional family.
def get_family(self): return self._family
[ "def device_family(self):\n return self._dll.JLINKARM_GetDeviceFamily()", "def get_device_family(self, strict = False):\r\n\t\tc = self.get_device_category(self.category_device_family, strict)\r\n\t\tif c == None:\r\n\t\t\treturn None\r\n\t\treturn c.get_value()", "def read_device_family(self):\n family = ctypes.c_int()\n\n result = self._lib.NRFJPROG_read_device_family(ctypes.byref(family))\n if result != NrfjprogdllErr.SUCCESS:\n raise APIError(result)\n\n return DeviceFamily(family.value).name", "def family(self):", "def get_family_name(self):\n return self.family_name", "def GetFamily(*args, **kwargs):\n return _gdi_.Font_GetFamily(*args, **kwargs)", "def msix_package_family_name(self) -> Optional[str]:\n return pulumi.get(self, \"msix_package_family_name\")", "def GetFamilyType(self):\r\n\r\n return self._family_type", "def model_family(self) -> str:\n return self._model_family", "def GetFamilyString(*args, **kwargs):\n return _gdi_.Font_GetFamilyString(*args, **kwargs)", "def getFamilyName(self):\n return _libsbml.ModelCreator_getFamilyName(self)", "def test_02_GetFamilyObj1(self):\n self.m_device_obj.DeviceFamily = TESTING_FAMILY_NAME_1\n l_obj = FamUtil._get_family_obj(self.m_pyhouse_obj, self.m_device_obj)\n # print(PrettyFormatAny.form(l_obj, 'B2-02-A - Family'))\n self.assertEqual(l_obj.Name, TESTING_FAMILY_NAME_1)\n self.assertEqual(l_obj.Active, True)\n self.assertEqual(l_obj.Key, 1)\n self.assertEqual(l_obj.FamilyDevice_ModuleName, 'Insteon_device')\n self.assertEqual(l_obj.FamilyPackageName, 'Modules.Families.Insteon')\n self.assertEqual(l_obj.FamilyXml_ModuleName, 'Insteon_xml')", "def get_xtag_family(self, primary, secondary):\n return self.xtag_mapping.get((primary, secondary))", "def family(self) -> str:\n return self._learner.family + \"-chanceconstrained\"", "def search_family(self, family):", "def navigation_type(self):\n return 'Family'", "def get_nh_family(self):\n return int(self.get('nhr_family'))", "def get_family_attribute_types(self):\n return list(self.family_attributes)", "def family_name(self):\n return FAMILY_NAME" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Returns the VV10 (b, C) coefficients
def get_vv10_coef(self): if self._nlc_b is False: raise ValueError("get_vv10_coeff can only be called on -V functionals.") return (self._nlc_b, self._nlc_C)
[ "def coefficients(self):\n\t return self.coef_['x']", "def coefficients(self):\r\n return self.coef_['x']", "def coefficients(self) :\n raise NotImplementedError", "def b_coefficients(x1,x2,x3,y1,y2,y3,CCoefficients,DCoefficients):\n\tBCoefficients = np.array([\t((y2-y1)/(x2-x1)-CCoefficients[0]*(x2-x1) - DCoefficients[0]*((x2-x1)**2)), \\\n\t\t\t\t\t\t\t\t((y3-y2)/(x3-x2)-CCoefficients[1]*(x3-x2) - DCoefficients[1]*((x3-x2)**2)) \t]).astype(float)\n\treturn(BCoefficients)", "def polynomial_coefficients(self):\n x = self.predictions_vector()\n if self.svd == None:\n X = self.prediction_matrix()\n self.svd = svd(X)\n U,s,Vh = self.svd\n \n n = self.get_signal_count(s)\n \n invs = 1.0/self.bias_filter(s,n)\n \n invSig = np.diag(invs[:n])\n a = np.dot(Vh.T[:,:n],\n np.dot(invSig,\n np.dot(U[:,:n].T, x)\n )\n )\n return a", "def coefficients(self) :\n return self.__coefficients", "def test_coefficients(self):\n\n coefs = self.cs.coefficients\n\n self.assertEqual(coefs, (1, 0, 1, 0, 0, -1))", "def _retrieve_cwv_coefficients(self, subrange):\n b0 = COLUMN_WATER_VAPOR[subrange].b0\n b1 = COLUMN_WATER_VAPOR[subrange].b1\n b2 = COLUMN_WATER_VAPOR[subrange].b2\n b3 = COLUMN_WATER_VAPOR[subrange].b3\n b4 = COLUMN_WATER_VAPOR[subrange].b4\n b5 = COLUMN_WATER_VAPOR[subrange].b5\n b6 = COLUMN_WATER_VAPOR[subrange].b6\n b7 = COLUMN_WATER_VAPOR[subrange].b7\n\n cwv_coefficients = (b0,\n b1,\n b2,\n b3,\n b4,\n b5,\n b6,\n b7)\n\n return cwv_coefficients", "def coefficients(self):\n\t\tpolynomials=self.dom.getElementsByTagName('polynomial')\n\t\tassert len(polynomials)==2, \"Was expecitng two sets of coefficients\"\n\t\t# I'm assuming the low T comes first. \n\t\tassert (float(polynomials[0].getElementsByTagName('bound')[0].firstChild.data) < \n\t\t float(polynomials[1].getElementsByTagName('bound')[0].firstChild.data) ) # check ordering assumption\n\t\thighTcoeffNodes=polynomials[1].getElementsByTagName('coefficient')\n\t\tlowTcoeffNodes=polynomials[0].getElementsByTagName('coefficient')\n\t\thighTcoeffs=[]\n\t\tfor coef in highTcoeffNodes:\n\t\t\tname=coef.getAttribute('label')\n\t\t\tvalue=float(coef.firstChild.data)\n\t\t\thighTcoeffs.append(value)\n\t\tlowTcoeffs=[]\n\t\tfor coef in lowTcoeffNodes:\n\t\t\tname=coef.getAttribute('label')\n\t\t\tvalue=float(coef.firstChild.data)\n\t\t\tlowTcoeffs.append(value)\n\t\tallCoeffs=numpy.array([lowTcoeffs, highTcoeffs])\n\t\treturn allCoeffs", "def _compute_coeffs(W, tol_svd, b, option):\n\tif option == 'partial':\n\t\tU, S, VT = LA.svd(W)\n\telse:\n\t\tU, S, VT = LA.svd(W, full_matrices=False)\n\n\t# Make sure the condition number is not too high (!!! \"low\" Lu)\n\tindices = S < tol_svd\n\tS[indices] = tol_svd\n\tSinv = np.diag(1/S)\n\tV = VT.T\n\t# Get the coefficients\n\tlambda_0 = reduce(np.dot, [V, Sinv, U.T, b])\n\treturn (lambda_0)", "def c_coefficients(x1,x2,x3,y1,y2,y3,initial_slope,final_slope):\n\tC = c_matrix(x1,x2,x3)\n\ty = y_vector(x1,x2,x3,y1,y2,y3,initial_slope,final_slope)\n\tCCoefficients = np.dot(inv(C),y)\n\treturn(CCoefficients)", "def get_coeff(self):\n\t\treturn self.coeff.round(6).transpose()[0]", "def langevin_coefficients(\n temperature,\n dt,\n friction,\n masses):\n vscale = np.exp(-dt*friction)\n if friction == 0:\n fscale = dt\n else:\n fscale = (1-vscale)/friction\n kT = BOLTZ * temperature\n nscale = np.sqrt(kT*(1-vscale*vscale)) # noise scale\n invMasses = 1.0/masses\n sqrtInvMasses = np.sqrt(invMasses)\n\n ca = vscale\n cb = fscale*invMasses\n cc = nscale*sqrtInvMasses\n return ca, cb, cc", "def coefficients(self):\n\t\tcoefficientsets=self.dom.getElementsByTagName('coefficients')\n\t\tassert len(coefficientsets)==1, \"there should only be one set of coefficients\"\n\t\tcoeffs=coefficientsets[0]\n\t\thighTcoeffNodes=coeffs.getElementsByTagName('range_1000_to_Tmax')[0].getElementsByTagName('coef')\n\t\tlowTcoeffNodes=coeffs.getElementsByTagName('range_Tmin_to_1000')[0].getElementsByTagName('coef')\n\t\thighTcoeffs=[]\n\t\tfor coef in highTcoeffNodes:\n\t\t\tname=coef.getAttribute('name')\n\t\t\tvalue=float(coef.firstChild.data.replace(' ','').replace('D','E'))\n\t\t\thighTcoeffs.append(value)\n\t\tlowTcoeffs=[]\n\t\tfor coef in lowTcoeffNodes:\n\t\t\tname=coef.getAttribute('name')\n\t\t\tvalue=float(coef.firstChild.data.replace(' ','').replace('D','E'))\n\t\t\tlowTcoeffs.append(value)\n\t\tallCoeffs=numpy.array([lowTcoeffs, highTcoeffs])\n\t\treturn allCoeffs", "def getCoefficients( MOvect ):\n for ind in range(len(MOvect)):\n currMOv=re.split('\\n| ', MOvect[ind].strip().split(\"------------ ---------------\\n\")[-1])\n elements=[]\n orbital_nr=[]\n #print(MOvect[ind])\n for i in range(len(currMOv)):\n if currMOv[i].split()==[]:\n #print(i)\n break\n elements.append(currMOv[i].split())\n orbital_nr.append(int(elements[i][0]))\n\n #resort elemnts by index\n index=np.argsort(orbital_nr)\n if ind==0:\n coeff=np.zeros(( len(MOvect),len(elements) ))\n #fill elements into matrix\n for i in range(len(elements)):\n coeff[ind][i]=float(elements[index[i]][1])\n #print(coeff[ind][i])\n return coeff", "def circuit(V, I0, L, C, alpha, beta):\n Vdot = [V[0], V[0]/(psi(np.pi(/2))) # first and second derivative of V\n return Vdot[1] - (1/C) * (alpha - 3*gamma*V[0]**2)*Vdot[0] + 1/(L*C)*V[0]", "def Bescoef(k,r,a,n):\n \n kr , ka = k*r , k*a \n coef = -(spec.jvp(n,ka,n=1)/spec.h1vp(n,ka,n=1))*spec.hankel1(n,kr)\n \n return coef", "def polynomial_model(z, c0, c1, c2, c3, c4, c5):\n c = np.asarray([c0, c1, c2, c3, c4, c5])\n Z = np.power(np.asarray([z]).T, np.arange(6)).T \n return c.dot(Z)", "def bridageAcVi(v,coef):\r\n if ((v==3) & (coef==1)) | ((v==0) & (coef==-1)): return 0\r\n return coef" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Gets the names of all external parameters
def get_ext_param_names(self): num_param = core.xc_func_info_get_n_ext_params(self.xc_func_info) ret = [] for p in range(num_param): tmp = core.xc_func_info_get_ext_params_name(self.xc_func_info, p) ret.append(tmp.decode("UTF-8")) return ret
[ "def param_names(self) -> List[str]:", "def _getAllParamNames(self):\r\n names=copy.deepcopy(self._paramNamesSoFar)\r\n #get names (or identifiers) for all contained loops\r\n for thisLoop in self.loops:\r\n theseNames, vals = self._getLoopInfo(thisLoop)\r\n for name in theseNames:\r\n if name not in names:\r\n names.append(name)\r\n return names", "def get_param_names(self):\n return list(self.params.keys())", "def known_parameters(self) -> List[str]:\n return self._known_parameters", "def parameters(self):\r\n pyname = self.pyname\r\n if isinstance(pyname, pynames.ImportedName):\r\n pyname = pyname._get_imported_pyname()\r\n if isinstance(pyname, pynames.DefinedName):\r\n pyobject = pyname.get_object()\r\n if isinstance(pyobject, pyobjects.AbstractFunction):\r\n return pyobject.get_param_names()", "def get_ext_param_descriptions(self):\n num_param = core.xc_func_info_get_n_ext_params(self.xc_func_info)\n\n ret = []\n for p in range(num_param):\n tmp = core.xc_func_info_get_ext_params_description(self.xc_func_info, p)\n ret.append(tmp.decode(\"UTF-8\"))\n\n return ret", "def GetAllParameterNames(self):\n callResult = self._Call(\"GetAllParameterNames\", )\n\n if callResult is None:\n return None\n\n return callResult", "def get_param_names(hf):\n parameters = get_params(hf)\n return [p.name for p in parameters]", "def parameterNames(self, p_int): # real signature unknown; restored from __doc__\n return []", "def get_parameter_names(self):\n parNames = []\n # for par in self.variables: # TODO: LIKELY A BUG! DOES THE SAME AS get_variable_names()\n for par in self.parameters: # TRYING TO SOLVE THE ISSUE\n # EstimationVariable\n parNames.append(par.name)\n return parNames", "def _get_parameter_keys(self) -> list:\n return list(self._get_parameters().keys())", "def get_hyperparameter_names():\n params = ['mu', 'nu', 'r', 's']\n return params", "def _get_fitted_param_names(self):\n return self._fitted_param_names", "def get_paramnames_list(self):\n # TODO include syselem?\n\n query = \"SELECT NAME FROM %s\" % self.__schema\n with self.__connection.cursor() as cursor:\n cursor.execute(query)\n result = cursor.fetchall()\n return [val['NAME'] for val in result]", "def getNamedParams(self):\n return self.namedParams.items()", "def param(self):\n parameters = []\n for layer in self.layers:\n parameters.extend(layer.param)\n return parameters", "def names_free(self) -> List[str]:\n return [p.name for p in self.parameters_free]", "def get_parameters_list(self):\n return self.description[\"config\"][\"values\"].keys()", "def get_global_parameters(self):" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Gets the descriptions of all external parameters
def get_ext_param_descriptions(self): num_param = core.xc_func_info_get_n_ext_params(self.xc_func_info) ret = [] for p in range(num_param): tmp = core.xc_func_info_get_ext_params_description(self.xc_func_info, p) ret.append(tmp.decode("UTF-8")) return ret
[ "def get_resource_params():\n return Parameter.list()", "def print_params():\n\n help_out = convert_phil_to_text(master_phil, att_level=1)\n txt_out = convert_phil_to_text(master_phil)\n\n return help_out, txt_out", "def show_parameters(self):\n print(self.mk_summary())", "def get_parameters_list(self):\n return self.description[\"config\"][\"values\"].keys()", "def paramDetails(cls):\n return {\n 'dim': (10, 20, 2, 20),\n 'nIter': (1, 10, 2, 5),\n 'lamb': (.1, 1., .1, .05),\n 'alph': (30, 50, 5, 40)\n }", "def known_parameters(self) -> List[str]:\n return self._known_parameters", "def display_parameters(self):\n\n self.logging.debug(\"============\")\n for attr in self.parm_list:\n self.logging.debug(attr.label + \" (\" + attr.when + \")\" + \" = \" + str(attr.value))\n self.logging.debug(\"============\")", "def get_ext_param_names(self):\n num_param = core.xc_func_info_get_n_ext_params(self.xc_func_info)\n\n ret = []\n for p in range(num_param):\n tmp = core.xc_func_info_get_ext_params_name(self.xc_func_info, p)\n ret.append(tmp.decode(\"UTF-8\"))\n\n return ret", "def show_all_params(self):\n for key, val in self.information.items():\n print (\"{} : {}\".format(key,val))", "def param_names(self) -> List[str]:", "def print_params(self):\n print(self._list_params())", "def param_strs(self):\n name_len = max(len(p.name) for p in self)\n value_len = max(len(p.value_str) for p in self.params.values())\n units_len = max(len(p.units) for p in self.params.values())\n return [(p.name.ljust(name_len), p.value_str.ljust(value_len),\n p.units.ljust(units_len), p.__doc__)\n for p in self.params.values() if p]", "def get_global_parameters(self):", "def print_params(env) -> None:\n dict_pretty_print(env.config['parameters'])", "def get_all_component_parameters(self) -> Dict[str, Any]:\n return self._node[\"app_data\"][\"component_parameters\"]", "def get_parameters(self, module: RLModule) -> Sequence[ParamType]:", "def show_param(self):\n\n print '__ell_pot = '#, self.__ell_gravlens\n print '__b_sis = '#, self.__b_sis\n print '__z_lens = ', self.__z_lens", "def param(self):\n parameters = []\n for layer in self.layers:\n parameters.extend(layer.param)\n return parameters", "def showParams(self):\n for x in self.parameters():\n if type(x['params']) == list:\n for tens in x['params']:\n print(tens.shape,tens.is_leaf)\n else:\n print(x['params'].shape,x['params'].is_leaf)" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Gets the default values of all external parameters.
def get_ext_param_default_values(self): num_param = core.xc_func_info_get_n_ext_params(self.xc_func_info) ret = [] for p in range(num_param): tmp = core.xc_func_info_get_ext_params_default_value(self.xc_func_info, p) ret.append(tmp) return ret
[ "def parameters_default(cls):\n return cls._Parameters.__new__.__defaults__", "def initDefaults(self):\n return _libsbml.Parameter_initDefaults(self)", "def get_default_values(self, entity, params):\n return entity.default_values", "def get_default_values(self, *args):\n def _getdefval(idx):\n return _proc.getlongannodefval(\n self._env._e, self._cplex._lp, idx)\n return _aux.apply_freeform_one_arg(\n _getdefval, self._conv, self.get_num(), args)", "def get_default_parameters(self):\n\n return DottedDict(self.default_parameters)", "def get_default_model_parameters(self):\n\n return(self._default_model_parameters)", "def default_parameters(self):\n return self._vertex.default_parameters", "def get_default_values(self, *args):\n def _getdefval(idx):\n return _proc.getdblannodefval(\n self._env._e, self._cplex._lp, idx)\n return _aux.apply_freeform_one_arg(\n _getdefval, self._conv, self.get_num(), args)", "def _resolve_defaults(self, **kwargs):\n res = list()\n for name, value in kwargs.items():\n if value is None:\n value = self.default(name)\n if value is None:\n raise RuntimeError(f\"Missing default {name}\")\n res.append(value)\n return res", "def get_default_args(func):\n\tsignature = inspect.signature(func)\n\treturn {k: v.default\n\t\tfor k, v in signature.parameters.items() if v.default is not inspect.Parameter.empty}", "def get_defaultvalues(host):\n return get_obj_defaultvalues(OBJT_HOST, host)", "def default_values(self) -> Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]:\n return pulumi.get(self, \"default_values\")", "def Params_defaultParams(): # real signature unknown; restored from __doc__\n pass", "def get_default_params():\n\n with IOTools.open_file(os.path.join(os.path.dirname(__file__),\n \"defaults.yml\")) as inf:\n result = yaml.load(inf, Loader=RoundTripLoader)\n return result", "def parameter_defaults(func: Callable) -> dict[str, Any]:\n signature = inspect.signature(func)\n return {k: v.default for k, v in signature.parameters.items() if v.default is not inspect.Parameter.empty}", "def get_default_args(func):\n signature = inspect.signature(func)\n return {\n k: v.default\n for k, v in signature.parameters.items()\n if v.default is not inspect.Parameter.empty\n }", "def default_params():\n return {\n \"optimizer.name\": \"Adam\",\n \"optimizer.learning_rate\": 1e-4,\n \"optimizer.params\": {}, # Arbitrary parameters for the optimizer\n \"optimizer.lr_decay\": {\n \"decay_type\": None,\n \"decay_steps\": 100,\n \"decay_rate\": 0.99,\n \"start_decay_at\": 0,\n \"stop_decay_at\": sys.maxsize,\n \"min_learning_rate\": 1.0e-9,\n \"staircase\": False,\n \"patience\": None, # for loss_decay\n \"dmodel\": None, # for noam_decay\n \"scale\": 1.0 # for noam_decay\n },\n \"optimizer.clip_gradients\": 1.0,\n \"optimizer.sync_replicas\": 0,\n \"optimizer.sync_replicas_to_aggregate\": 0,\n }", "def load_defaults_for_env_var(self):\n self.parameters = self.defaults", "def get_optional_parameters(self) -> list:\n results = []\n if self.no_params or self.params_required:\n return []\n else:\n for parameter, parameter_details in self.parameters.items():\n # Fixing issue #92\n # if parameter == \"effect\":\n # continue\n if parameter_details.default_value:\n results.append(parameter_details.name)\n return results" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Sets the density threshold below which the functional will not be evaluated.
def set_dens_threshold(self, dens_threshold): if dens_threshold < 0: raise ValueError("The density threshold cannot be smaller than 0.") core.xc_func_set_dens_threshold(self.xc_func, ctypes.c_double(dens_threshold))
[ "def update_threshold(self, threshold):\n self.mf.set_threshold(self.cm.estimate)", "def setThreshold(self, threshold): # real signature unknown; restored from __doc__\n pass", "def setDropThreshold(self, dropThreshold): # real signature unknown; restored from __doc__\n pass", "def clear_density(self):\n self._density = None", "def blank_threshold(self, value):\n self._internal.set_blank_threshold(float(value))", "def set_threshold(self, threshold):\n self._threshold = check_value_positive('threshold', threshold)", "def setThreshold(self, threshold):\n self.t = threshold", "def threshold(self, threshold):\n\n self._threshold = threshold", "def setThreshold(self, thresh):\n\t\tif thresh <= 0:\n\t\t\traise ValueError(\"Threshold value must be > 0\")\n\t\t\n\t\tself.threshold = thresh", "def set_fmask(self):\n # Potentially can configure thresholds per sensor\n self.fmask_cloudprob = 22.5\n # Threshold for water.\n # NB: This seems to miss some clouds over water (which end up having\n # about 35-40% probability, not >50%)\n self.fmask_wclr_max = 50", "def _change_point_density(self, point_density):\n self.volCanvas.setParam('lores_density', point_density)", "def auto_cutoff(water_d):\n return .9/np.sqrt(np.average(water_d))", "def SetThreshold (self,VolumeNode, min, max):\n DisplayNode = VolumeNode.GetScalarVolumeDisplayNode()\n DisplayNode.SetApplyThreshold(True)\n DisplayNode.SetThreshold(min,max)", "def _fix_propensity(self):\n if self.propensity is not None:\n num_bad_prop = np.sum((self.propensity*(1-self.propensity)) == 0)\n if num_bad_prop > 0:\n self.propensity[self.propensity == 0] += self.eps\n self.propensity[self.propensity == 1] -= self.eps\n warnings.warn(\"Propensity scores has {} number of 0s or 1s.\"\n .format(num_bad_prop))", "def density(self, density):\n\n self._density = density", "def setPowerIfNecessary(self):\n if self.p.power == 0 and self.p.powerDensity > 0:\n self.setPowerFromDensity()", "def remove_noise(self, thr):\n if thr >= 0:\n mask = self.data_pred > thr\n self.data_pred = mask_predictions(self.data_pred, mask)", "def unsetThresholdLevel(self):\n return _libsbml.Input_unsetThresholdLevel(self)", "def query_under_threshold(self):\n volts = self.daq_object.point_measure()\n if volts > self.threshold:\n raise ac_excepts.ThresholdError(\n self.error_text + ' is over threshold.', self.query_threshold)" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Calculate the number of columns and rows required to divide an image into ``n`` parts. Return a tuple of integers in the format (num_columns, num_rows)
def calc_columns_rows(n): num_columns = int(ceil(sqrt(n))) num_rows = int(ceil(n / float(num_columns))) return (num_columns, num_rows)
[ "def compute_nrows_ncolumns(nplots):\n n_rows = int(np.sqrt(nplots)) + (np.sqrt(nplots) != int(np.sqrt(nplots))) * 1\n n_columns = int(nplots / n_rows) + (nplots / n_rows != int(nplots / n_rows)) * 1\n return n_rows, n_columns", "def get_fig_dimension(n_subplots):\n if int(np.sqrt(n_subplots) + 0.5) ** 2 == n_subplots:\n # perfect square number\n n_cols = int(np.sqrt(n_subplots))\n else:\n n_cols = int(np.floor(np.sqrt(n_subplots))) + 1 \n \n if n_subplots % n_cols == 0:\n n_rows = int(n_subplots / n_cols)\n else:\n n_rows = int(np.floor(n_subplots / n_cols)) + 1 \n \n return n_cols, n_rows", "def calculate_grid_dimensions(num_items, num_columns=None):\n if num_columns is None:\n num_rows_columns = int(math.ceil(math.sqrt(num_items)))\n return num_rows_columns, num_rows_columns\n else:\n num_rows = int(math.ceil(num_items / num_columns))\n return num_rows, num_columns", "def get_num_tiles(rows, cols, row_tile_size, col_tile_size):\n num_row_tiles = math.ceil(rows / row_tile_size)\n num_col_tiles = math.ceil(cols / col_tile_size)\n return num_row_tiles, num_col_tiles", "def get_size(self):\n count_cols = self.max_col + 1 - self.min_col\n count_rows = self.max_row + 1 - self.min_row\n return count_cols, count_rows", "def getLayoutDimensions(n, pref=\"height\"):\n nopt = np.sqrt(n)\n inoptw = int(nopt)\n inopth = int(nopt)\n while inoptw * inopth < n:\n if pref == \"width\":\n inoptw += 1\n if inoptw * inopth > (n - inopth):\n inoptw -= 1\n inopth += 1\n else:\n inopth += 1\n if inoptw * inopth > (n - inoptw):\n inopth -= 1\n inoptw += 1\n\n return (inopth, inoptw)", "def CalcRowsCols(self):\n nitems = len(self.GetChildren())\n rows = self.GetRows()\n cols = self.GetCols()\n assert rows != 0 or cols != 0, \"Grid sizer must have either rows or columns fixed\"\n if cols != 0:\n rows = (nitems + cols - 1) / cols\n elif rows != 0:\n cols = (nitems + rows - 1) / rows\n return (rows, cols)", "def int_n_components(nbcolumns, n_components):\n if n_components < 1 or (isinstance(n_components, float) and n_components == 1.0):\n n_components = min(max(int(nbcolumns * n_components), 1), nbcolumns - 1)\n else:\n n_components = min(max(int(n_components), 1), nbcolumns - 1)\n\n return n_components", "def getImageDimensions(self) -> (int, int):\n binning = self.getBinning()\n dim_x, dim_y = self.getCameraDimensions()\n\n dim_x = int(dim_x / binning)\n dim_y = int(dim_y / binning)\n\n return dim_x, dim_y", "def calculate_number_of_source_images(img: Image.Image) -> tuple:\n img_width = img.size[0]\n img_height = img.size[1]\n source_images_width = int(math.ceil(img_width / settings.SOURCE_IMAGE_WIDTH_HEIGHT))\n source_images_height = int(\n math.ceil(img_height / settings.SOURCE_IMAGE_WIDTH_HEIGHT)\n )\n\n return source_images_width, source_images_height", "def slice_size(self):\n width = self.image.shape[1]\n height = self.image.shape[0]\n return width, height", "def size(self):\r\n rows = len(self.pixels[0])\r\n cols = len(self.pixels[0][0])\r\n return (rows, cols)", "def getNumTiles(self):\n return len(list(product(list(range(self.width+1))[1:], list(range(self.height+1))[1:])))", "def part(n):\n return part_count(n, current_num=n)", "def getPlotRowsAndCols(maxCols: int, numPlots: int = 0) -> Tuple[int, int]:\n if numPlots <= maxCols:\n ncols = numPlots\n nrows = 1\n else:\n ncols = maxCols\n nrows = int(np.ceil(1.0 * numPlots / maxCols))\n return nrows, ncols", "def count_rectangles(m: int, n: int) -> int:\n return m * (m + 1) * n * (n + 1) // 4", "def _get_dimensions(self):\n corners = []\n for module in self.modules:\n for tile in module:\n corners.append(tile.corners())\n corners = np.concatenate(corners)[:, :2] / self._pixel_shape\n\n # Find extremes, add 1 px margin to allow for rounding errors\n min_xy = corners.min(axis=0).astype(int) - 1\n max_xy = corners.max(axis=0).astype(int) + 1\n\n size = max_xy - min_xy\n centre = -min_xy\n # Switch xy -> yx\n return tuple(size[::-1]), centre[::-1]", "def calculate_layout(num_axes, n_rows=None, n_cols=None):\n if n_rows is not None and n_cols is not None:\n raise ValueError(\n 'cannot derive number of rows/columns if both values provided')\n if n_rows is None and n_cols is None:\n n_cols = 2\n if n_rows is None:\n n_rows = max(1, math.ceil(num_axes / n_cols))\n else:\n n_cols = max(1, math.ceil(num_axes / n_rows))\n return n_rows, n_cols", "def get_grid_shape(num_examples):\n height = int(numpy.floor(numpy.sqrt(num_examples)))\n width = int(numpy.ceil(num_examples * 1. / height))\n\n return (height, width)" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Calculate combined size of tiles.
def get_combined_size(tiles): # TODO: Refactor calculating layout to avoid repetition. columns, rows = calc_columns_rows(len(tiles)) tile_size = tiles[0].image.size return (tile_size[0] * columns, tile_size[1] * rows)
[ "def tile_size_2d(self):\n return 32.0, 32.0", "def poss_tile_sizes(self):\n\n path_raw = self.raw_path\n\n for img in os.listdir(path_raw + \"/image\"):\n read_img = cv2.imread(path_raw + \"/image/\" + img, -1)\n y,x = read_img.shape\n\n break\n\n size = 16\n\n while size < max([y, x]) / 2 + 16:\n\n x_tile = math.ceil(x / size)\n y_tile = math.ceil(y / size)\n\n x_overlap = (np.abs(x - x_tile * size)) / (x_tile - 1)\n y_overlap = (np.abs(y - y_tile * size)) / (y_tile - 1)\n\n if (x_overlap.is_integer() and y_overlap.is_integer()) and (x_tile * y_tile) % 2 == 0:\n print(\"tile size (px):\", size, \"number of tiles: \", x_tile * y_tile)\n\n size += 16", "def get_num_tiles(rows, cols, row_tile_size, col_tile_size):\n num_row_tiles = math.ceil(rows / row_tile_size)\n num_col_tiles = math.ceil(cols / col_tile_size)\n return num_row_tiles, num_col_tiles", "def get_tilesize(self, sampling):\n xsize = {\n 'T6': 600000,\n 'T3': 300000,\n 'T1': 100000\n }[self.get_tiletype(sampling)]\n ysize = {\n 'T6': 600000,\n 'T3': 300000,\n 'T1': 100000\n }[self.get_tiletype(sampling)]\n return xsize, ysize", "def getNumTiles(self):\n return len(list(product(list(range(self.width+1))[1:], list(range(self.height+1))[1:])))", "def tile_size(self) -> int:\n return self._tile_size", "def tile_size(self):\n return self._tile_size", "def getNumTiles(self):\n return self.height * self.width", "def _calculate_mask_size(self):\n width_in_meters = self._map_boundaries.max_x - self._map_boundaries.min_x\n height_in_meters = self._map_boundaries.max_y - self._map_boundaries.min_y\n width_in_pixels = int(width_in_meters * self._pixels_per_meter)\n height_in_pixels = int(height_in_meters * self._pixels_per_meter)\n return height_in_pixels, width_in_pixels", "def calculate_size(self, num_dots):\n self.objects = num_dots\n square = sqrt(self.objects)\n if self.objects % square == 0:\n return int(square), int(square)\n else:\n denom = self.objects // sqrt(self.objects)\n while self.objects % denom != 0:\n denom -= 1\n return int(denom), int(self.objects // denom)", "def get_tile_size(self) -> int:\n return self.tile_size.spin.value()", "def get_size(self):\n tmpsize = 0\n for variable in self.variables:\n tmpsize += variable.get_size()\n for subchunk in self.subchunks:\n tmpsize += subchunk.get_size()\n self.size.value = tmpsize\n return self.size.value + self.ID.get_size() + self.size.get_size()", "def _get_tile_size() -> int:\n return octree_config['octree']['tile_size'] if octree_config else 256", "def get_size(self):\n tmpsize = 0\n for variable in self.variables:\n tmpsize += variable.get_size()\n for subchunk in self.subchunks:\n tmpsize += subchunk.get_size()\n return tmpsize", "def calculate_min_max_tiles(self):", "def _calculate_room_size(self):\n config = self.game.config\n\n short_side = min(config.map_height, config.map_width)\n\n largest_room_size = 0\n total_size = 0\n total_corridor_len = self.corridor_length * (self.grid_size - 1)\n for check_size in range(3, short_side, 2):\n all_rooms_len = check_size * self.grid_size\n rooms_and_corridors = all_rooms_len + total_corridor_len\n if rooms_and_corridors <= short_side:\n largest_room_size = check_size\n total_size = rooms_and_corridors\n else:\n break\n\n return largest_room_size, total_size", "def _get_dimensions(self):\n corners = []\n for module in self.modules:\n for tile in module:\n corners.append(tile.corner_idx)\n corners.append(tile.opp_corner_idx)\n corners = np.stack(corners)\n\n # Find extremes\n min_yx = corners.min(axis=0)\n max_yx = corners.max(axis=0)\n\n size = max_yx - min_yx\n centre = -min_yx\n return tuple(size), centre", "def calculateTierSize(imageWidth, imageHeight, tileSize=256):\n tierSizeInTiles = []\n while (imageWidth > tileSize or imageHeight > tileSize):\n tileWidth = float(imageWidth) / tileSize\n tileHeight = float(imageHeight) / tileSize\n tierSizeInTiles.append([math.ceil(tileWidth), math.ceil(tileHeight)])\n tileSize += tileSize\n tierSizeInTiles.append([1.0, 1.0]) \n tierSizeInTiles.reverse() \n return tierSizeInTiles", "def calculate_size(self):\n top_left_y = 0\n top_left_x = 0\n\n bottom_right_y = 1\n bottom_right_x = 1\n\n # TODO: calculate the correct bounds of the threat zone.\n\n raise NotImplementedError\n\n # if there is a sight_range for this map_obstacle then increase the size of the zone.\n if self.sight_range > 0:\n top_left_y += self.sight_range\n top_left_x += self.sight_range\n bottom_right_y += self.sight_range\n bottom_right_x += self.sight_range\n\n top_left = (top_left_y, top_left_x)\n bottom_right = (bottom_right_y, bottom_right_x)\n\n height = bottom_right_y - top_left_y\n width = bottom_right_x - top_left_x\n\n self.top_left_y = top_left_y\n self.top_left_x = top_left_x\n self.bottom_right_y = bottom_right_y\n self.bottom_right_x = bottom_right_x\n self.height = height\n self.width = width\n\n return (top_left, bottom_right, height, width)" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
``tiles`` Tuple of ``Image`` instances. ``width`` Optional, width of combined image. ``height`` Optional, height of combined image. ``Image`` instance.
def join(tiles, width=0, height=0): # Don't calculate size if width and height are provided # this allows an application that knows what the # combined size should be to construct an image when # pieces are missing. if width > 0 and height > 0: im = Image.new("RGBA", (width, height), None) else: im = Image.new("RGBA", get_combined_size(tiles), None) columns, rows = calc_columns_rows(len(tiles)) for tile in tiles: try: im.paste(tile.image, tile.coords) except IOError: # do nothing, blank out the image continue return im
[ "def combine_images(images: list) -> Image:\n img_width = images[0][0].width\n img_height = images[0][0].height\n new_size = (img_width * len(images[0]), img_height * len(images))\n new_image = Image.new('RGB', new_size)\n\n # Add all the images from the grid to the new, blank image\n for rowindex, row in enumerate(images):\n for colindex, image in enumerate(row):\n location = (colindex * img_width, rowindex * img_height)\n new_image.paste(image, location)\n\n return new_image", "def tiles2images(tiles: List[np.ndarray], im_shape: tuple, h: int, w: int):\n im_height, im_width, channels = im_shape\n n_h = math.ceil(im_height / h)\n n_w = math.ceil(im_width / w)\n im = []\n for y_index in range(n_h):\n im_row = tiles[y_index * n_w:(y_index + 1) * n_w]\n dw = im_width % w\n im_row[-1] = im_row[-1][:, -dw:]\n im_row = np.concatenate(im_row, axis=1)\n im.append(im_row)\n dh = im_height % h\n im[-1] = im[-1][-dh:, :]\n im = np.concatenate(im, axis=0)\n return im", "def tile_images(imgs, rows, cols, padding=0):\n assert imgs.dim()==4\n N,C,H,W = list(imgs.size())\n assert C in [3,1]\n assert N==rows*cols\n tiled = imgs.new().resize_(C, (H+padding)*rows-padding, (W+padding)*cols-padding).fill_(0)\n for i in xrange(N):\n x = (i % cols)*(W+padding)\n y = (i / cols)*(H+padding)\n tiled[:, y:y+H, x:x+W] = imgs[i]\n return tiled", "def pack_images( images ):\n width = images[0].size[0]\n height = images[0].size[1]\n\n\n out_width = min([len(images), 10]) * width\n out_height = ((len(images)-1) / 10 + 1) * height\n\n out_image = Image.new(\"RGBA\", (out_width,out_height))\n\n x = 0\n y = 0\n for im in images:\n out_image.paste( im, (x*width, y*height) )\n x += 1\n if x == 10:\n y += 1\n x = 0\n return out_image", "def tiles(self, width: int, height: int) -> TileSet:\n y_count = len(self.tiling)\n for y_index, y_tile in enumerate(self.tiling):\n\n x_count = len(y_tile)\n for x_index, tile_strength in enumerate(y_tile):\n\n # Doing multiplication before devision here to make sure rounding is correct\n bounding_box = (\n # from (x1, y1)\n int(width * x_index / x_count),\n int(height * y_index / y_count),\n # to (x2, y2)\n int(width * (x_index + 1) / x_count),\n int(height * (y_index + 1) / y_count),\n )\n\n yield bounding_box, tile_strength", "def combine_pictures(images):\n widths, heights = zip(*(i.size for i in images))\n\n total_width = sum(widths)\n max_height = max(heights)\n\n new_im = Image.new('RGB', (total_width, max_height))\n\n x_offset = 0\n for im in images:\n new_im.paste(im, (x_offset, 0))\n x_offset += im.size[0]\n\n new_im.save('test.jpg')\n\n return True", "def _assemble_tiles(self,images,X,Y,Z,C,T):\n self._buffer_supertile(X[0][0],X[0][1])\n \n if X[-1][0] - self._tile_x_offset > self._TILE_SIZE:\n split_ind = 0\n while X[split_ind][0] - self._tile_x_offset < self._TILE_SIZE:\n split_ind += 1\n else:\n split_ind = len(X)\n \n # Untile the data\n num_rows = Y[0][1] - Y[0][0]\n num_cols = X[0][1] - X[0][0]\n num_tiles = len(X)\n \n for ind in range(split_ind):\n r_min = Y[ind][0]-self._tile_y_offset\n r_max = Y[ind][1]-self._tile_y_offset\n c_min = X[ind][0]-self._tile_x_offset\n c_max = X[ind][1]-self._tile_x_offset\n self._pixel_buffer[r_min:r_max,c_min:c_max] = images[ind,:,:,0]\n \n if split_ind != num_tiles:\n self._buffer_supertile(X[-1][0],X[-1][1])\n for ind in range(split_ind,num_tiles):\n r_min = Y[ind][0]-self._tile_y_offset\n r_max = Y[ind][1]-self._tile_y_offset\n c_min = X[ind][0]-self._tile_x_offset\n c_max = X[ind][1]-self._tile_x_offset\n self._pixel_buffer[r_min:r_max,c_min:c_max] = images[ind,:,:,0]\n \n self._tile_last_column = c_max\n \n return True", "def tile_image(\n im: Image.Image, width: int, height: int, mode: Optional[str] = \"RGB\", **kwargs: Any\n) -> Image.Image:\n im_out = Image.new(mode, (width, height), **kwargs)\n\n h_tiles = ceil(width / im.width)\n v_tiles = ceil(height / im.height)\n\n for i in range(v_tiles):\n y = im.height * i\n for j in range(h_tiles):\n x = im.width * j\n im_out.paste(im, box=(x, y))\n\n return im_out", "def get_combined_size(tiles):\n # TODO: Refactor calculating layout to avoid repetition.\n columns, rows = calc_columns_rows(len(tiles))\n tile_size = tiles[0].image.size\n return (tile_size[0] * columns, tile_size[1] * rows)", "def mergeTiles(tileDir, targetPath, tierSizeInTiles, zoom, width, height, tileSize):\n print \"Merge image tiles ...\"\n mergeImage = Image.new(\"RGB\", (width, height))\n for tileCoordX in range(0, int(tierSizeInTiles[zoom][0])):\n for tileCoordY in range(0, int(tierSizeInTiles[zoom][1])):\n filename = \"%(zoom)s-%(tileCoordX)s-%(tileCoordY)s.jpg\"%{\n \"zoom\": zoom,\n \"tileCoordX\": tileCoordX,\n \"tileCoordY\": tileCoordY\n }\n file = os.path.join(tileDir, filename)\n mergeImage.paste(Image.open(file), (tileCoordX * tileSize, tileCoordY * 256))\n mergeImage.save(targetPath)\n return targetPath", "def generate_images(levels: List[LevelType], width: int = 10, height: int = 10) -> None:\n _clear()\n image_width, image_height = width * CELL_WIDTH, height * CELL_HEIGHT\n\n with Progress() as progress:\n task = progress.add_task(\n total=len(levels), description=\"[bold yellow]Images generation...\")\n\n for i, level in enumerate(levels):\n image = Image.new(\n 'RGBA', (image_width, image_height), (0, 0, 0, 0))\n for _i, row in enumerate(level):\n for _j, cell_type in enumerate(row):\n if cell_type is None:\n continue\n\n left = _j * CELL_WIDTH\n upper = _i * CELL_HEIGHT\n right = left + CELL_WIDTH\n lower = upper + CELL_HEIGHT\n\n image.paste(CELL_SPRITES[cell_type],\n box=(left, upper, right, lower))\n image.save(f'{output_dir}/{i}.png')\n progress.advance(task)", "def image_tiles(bqsession, image_service_url, tile_size=64):\n dims = bqsession.fetchxml(image_service_url, dims='')\n x = int(dims.xpath('//tag[@name=\"image_num_x\"]')[0].attrib[ 'value'])\n y = int(dims.xpath('//tag[@name=\"image_num_y\"]')[0].attrib[ 'value'])\n \n for ix in range(int(x/tile_size)-1):\n for iy in range(int(y/tile_size)-1):\n yield bqsession.c.prepare_url(image_service_url, tile='0,%s,%s,%s' % (str(ix), str(iy), str(tile_size)))", "def pack_image_nest(cls, imgs):\n assert rpack is not None, \"You need to install rectangle-packer first!\"\n\n imgs = nest.flatten(imgs)\n if len(imgs) == 0:\n return\n\n # first get all images' sizes (w,h)\n sizes = [(i.shape[1], i.shape[0]) for i in imgs]\n # call rpack for an approximate solution: [(x,y),...] positions\n positions = rpack.pack(sizes)\n # compute the height and width of the enclosing rectangle\n H, W = 0, 0\n for size, pos in zip(sizes, positions):\n H = max(H, pos[1] + size[1])\n W = max(W, pos[0] + size[0])\n\n packed_img = np.full((H, W, 3), 255, dtype=np.uint8)\n for pos, img in zip(positions, imgs):\n packed_img[pos[1]:pos[1] + img.shape[0], pos[0]:pos[0] +\n img.shape[1], :] = img.data\n return cls(packed_img)", "def _stitch_images(*args):\n assert len(args) == 2\n lines, height, width, channels = args[0].shape\n min = 0\n stack = args[0]\n # print([(type(x), x.shape) for x in args])\n # print(np.min(args[0]), np.max(args[0]), args[0].dtype, args[0].mean())\n # print(np.min(args[1]), np.max(args[1]), args[1].dtype, args[1].mean())\n if len(args) > 1:\n for i in range(len(args) - 1):\n stack = np.concatenate((stack, args[i + 1]), axis=2)\n # stack - array of lines of pictures (arr_0[0], arr_1[0], ...)\n # concatenate lines in one picture (height = tile_h * #lines)\n picture_lines = stack.reshape(lines * height, stack.shape[2], channels)\n picture_lines = np.hstack((\n picture_lines,\n np.ones((lines * height, 2, channels), dtype=np.uint8) * min)) # pad 2 pixels\n\n # slice/reshape to have better image proportions\n return picture_lines, height", "def loadImages(file, tiles):\n array = []\n image = wx.Image(file, wx.BITMAP_TYPE_PNG)\n \n width = image.GetWidth() / tiles\n height = image.GetHeight()\n \n for i in xrange(tiles):\n array.append(image.GetSubImage(wx.Rect(i*width, 0, width, height)))\n \n return array", "def tile(img):\n rows, cols, res = img.rows, img.cols, img.res\n pixels, pixsize = img.pixels, channels[img.pixtype] # assumes 8-bit channels\n width, height = cols/res, rows/res\n\n def tiled(x, y):\n h = (x + width/2.0) % width # horz, vert offset from top left\n v = (height/2.0 - y) % height \n r, c = int(v*res), int(h*res)\n offset = (cols*r + c)*pixsize\n return pixels[offset:offset+pixsize]\n return (tiled, img.pixtype)", "def readTiles(self):\n TileImage = Image.open(self.Filename).convert(\"RGB\")\n TileIW, TileIH = TileImage.size\n TilesetW, TilesetH = TileIW / self.TileWidth, TileIH / self.TileHeight\n\n for y in range(TilesetH):\n for x in range(TilesetW):\n box = self.TileWidth * x, self.TileHeight * y, self.TileWidth * (x + 1), self.TileHeight * (y + 1)\n tile = TileImage.crop(box)\n self.List.append(tile)\n\n tileString = tile.tostring()\n if not self.TileDict.has_key(tileString):\n self.TileDict[tileString] = len(self.List) - 1", "def get_tiles(self) -> list:\n n_rows = self.mosaic_dimensions[0]\n n_columns = self.mosaic_dimensions[1]\n return [\n self.get_tile(i_row, i_column)\n for i_row in range(n_rows)\n for i_column in range(n_columns)\n ]", "def join_tiles(tiles):\n out = None\n for tile, metadata in tiles:\n if out is None:\n out = np.zeros(metadata['original_shape'], dtype=tile.dtype)\n\n start = np.asarray(metadata['origin'])\n end = start + np.asarray(tile.shape)\n slices = [slice(start[i], end[i]) for i in range(tile.ndim)]\n\n out[tuple(slices)] += tile\n\n return out" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Determine column and row position for filename.
def get_image_column_row(filename): row, column = os.path.splitext(filename)[0][-5:].split("_") return (int(column) - 1, int(row) - 1)
[ "def position(file_, pattern):\n pattern = pattern[1:-1]\n pattern = pattern.replace('(', '\\(')\n pattern = pattern.replace(')', '\\)')\n file_obj = open(file_, 'rU')\n for line_number, line in enumerate(file_obj):\n m = re.search(pattern, line)\n if m is not None:\n return line_number, m.pos\n file_obj.close()\n return 0, 0", "def get_position(self, number):\n for rowidx, row in enumerate(self.numbers):\n for colidx, num in enumerate(row):\n if num == number:\n return rowidx, colidx", "def extract_row_and_col_number(self, entry):\n\n row_col_string = entry.split(\"_\")\n row = int(row_col_string[0][1:])\n col = int(row_col_string[1][1:])\n return row, col", "def position(self):\n self.updatePosition()\n if self.lineLengths:\n line, col = len(self.lineLengths), self.lineLengths[-1]\n else:\n line, col = 1,0\n return (line, col)", "def _errpos(self, fpos):\r\n filename, string = self._includestack[-1]\r\n return filename, srow(string, fpos), scol(string, fpos)", "def getpos(self):\n return self.lineno, self.offset", "def _get_file_and_line():\n code, f = _get_caller()\n if not code:\n return '<unknown>', 0\n return code.co_filename, f.f_lineno", "def get_coords(self) -> Tuple[int]:\r\n return self.file, self.rank", "def source_position(self) -> Tuple[int, int]:\n return self.templated_file.get_line_pos_of_char_pos(\n self.source_slice.start, source=True\n )", "def find_cursor_at_pos(tu, filename, line, col):\n loc = tu.get_location(filename, (line, col))\n return get_smallest_cursor_containing(tu.cursor, loc)", "def get_col_left_loc(colNum):\r\n return (colNum*width) + offset", "def cursor_coordinates(self):\n text = self.getText()\n lines = text.split(\"\\n\")\n pos = self.getCursorPos()\n if pos == 0:\n return (0, 0)\n i = 0\n cursor_row = -1\n cursor_col = -1\n for row, line in enumerate(lines):\n i += len(line) + 1 # we need to include \"\\n\"\n if pos < i:\n cursor_row = row\n cursor_col = pos - i + len(line) + 1\n break\n return (cursor_col, cursor_row)", "def _get_header_position(header_row: List[str], column_title: str) -> int:\n for pos, column in enumerate(header_row):\n if column_title.lower() in column.lower():\n return pos\n\n raise Exception(\"Expected column header not found for {}\".format(column_title))", "def get_pos(filename):\n # command prior to start in separate process to read from ethport:\n # sudo tcpdump -i eth0 udp port 61557 -A >>test3.txt\n\n x = file(filename,'r').read()\n x = x.splitlines()\n last = -1\n proper_read = False\n while not(proper_read):\n y = x[last] # assumes there will be a successful read\n try:\n if y[0] == '.':\n msg = y.split(',')\n xpos =int(msg[6])\n ypos =int(msg[7])\n zpos =int(msg[8])\n proper_read = True\n else:\n last += -1\n except:\n last += -1\n\n return xpos, ypos, zpos", "def _get_file_info(filename):\n filename = os.path.split(filename)[-1]\n filename = filename[:str.rfind(filename, '.jsonl.gz')]\n _, mode, idx = filename.split('_')\n return mode, idx", "def find_tok_column(self, lexpos):\n found = self.lexer.lexdata.rfind('\\n', 0, lexpos)\n column = lexpos - max(0, found)\n return column", "def xFileInfo(filename):\n delim = getDelimiter(filename)\n f = open(filename, 'r')\n reader = csv.reader(f, delimiter=delim)\n num_rows = 0\n for (row_i, row) in enumerate(reader):\n if row_i == 0: #ignore empty strings (e.g. at end of row)\n num_cols = len([val for val in row if val])\n num_rows += 1\n f.close()\n return (num_rows, num_cols)", "def guess_lineno(file):\n offset = file.tell()\n file.seek(0)\n startpos = 0\n lineno = 1\n # looks like file.read() return bytes in python3\n # so I need more complicated algorithm here\n while True:\n line = file.readline()\n if not line:\n break\n endpos = file.tell()\n if startpos <= offset < endpos:\n break\n lineno += 1\n file.seek(offset)\n return lineno", "def location(self):\n # Store the current position.\n stored_position = self.file.tell()\n\n # Return to the file's beginning and create an empty list of lengths.\n self.file.seek(0)\n linelengths = []\n\n i = 0\n\n # Occupy the list with cumulative line lengths.\n for line in self.file:\n i += 1\n if len(linelengths) == 0:\n linelengths.append(len(line))\n else:\n linelengths.append(len(line) + linelengths[-1])\n\n num_line = i\n\n current_line = ''\n current_position = ''\n\n # Return to the stored (current) position within the file.\n self.file.seek(stored_position)\n\n # Return line and position by comparing current position to the list.\n for n in range(num_line):\n if n == 0:\n if self.file.tell() <= linelengths[n]:\n current_line = n + 1\n current_position = self.file.tell()\n elif(self.file.tell() <= linelengths[n] and self.file.tell() >\n linelengths[n-1]):\n current_line = n + 1\n current_position = self.file.tell() - linelengths[n-1]\n\n return [current_line, current_position]" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Open all images in a directory. Return tuple of Tile instances.
def open_images_in(directory): files = [ filename for filename in os.listdir(directory) if "_" in filename and not filename.startswith("joined") ] tiles = [] if len(files) > 0: i = 0 for file in files: pos = get_image_column_row(file) im = Image.open(os.path.join(directory, file)) position_xy = [0, 0] count = 0 for a, b in zip(pos, im.size): position_xy[count] = a * b count = count + 1 tiles.append( Tile( image=im, position=pos, number=i + 1, coords=position_xy, filename=file, ) ) i = i + 1 return tiles
[ "def get_images(directory=None): #import from mask.py\n \n if directory == None:\n directory = os.getcwd() # Use working directory if unspecified\n \n image_list = [] # Initialize aggregaotrs\n file_list = []\n \n directory_list = os.listdir(directory) # Get list of files\n for entry in directory_list:\n absolute_filename = os.path.join(directory, entry)\n try:\n image = PIL.Image.open(absolute_filename)\n file_list += [entry]\n image_list += [image]\n except IOError:\n pass # do nothing with errors tying to open non-images\n return image_list, file_list", "def get_images(directory=None):\n \n if directory == None:\n directory = os.getcwd() # Use working directory if unspecified\n\n image_list = [] # Initialize aggregaotrs\n file_list = []\n \n directory_list = os.listdir(directory) # Get list of files\n for entry in directory_list:\n absolute_filename = os.path.join(directory, entry)\n try:\n image = PIL.Image.open(absolute_filename)\n file_list += [entry]\n image_list += [image]\n except IOError:\n pass # do nothing with errors tying to open non-images\n return image_list, file_list", "def get_images(directory=None):\r\n if directory == None:\r\n directory = os.getcwd() # Use working directory if unspecified\r\n image_list = [] # Initialize aggregaotrs\r\n file_list = []\r\n directory_list = os.listdir(directory) # Get list of files\r\n \r\n for entry in directory_list:\r\n \r\n absolute_filename = os.path.join(directory, entry)\r\n \r\n try:\r\n \r\n image = PIL.Image.open(absolute_filename)\r\n \r\n file_list += [entry]\r\n \r\n image_list += [image]\r\n \r\n except IOError:\r\n \r\n pass # do nothing with errors tying to open non-images\r\n \r\n return image_list, file_list", "def get_images(directory=None):\n \n if directory == None:\n directory = os.getcwd() # Use working directory if unspecified\n \n image_list = [] # Initialize aggregaotrs\n file_list = []\n \n directory_list = os.listdir(directory) # Get list of files\n for entry in directory_list:\n absolute_filename = os.path.join(directory, entry)\n try:\n image = PIL.Image.open(absolute_filename)\n file_list += [entry]\n image_list += [image]\n except IOError:\n pass # do nothing with errors tying to open non-images\n return image_list, file_list", "def loadImages(file, tiles):\n array = []\n image = wx.Image(file, wx.BITMAP_TYPE_PNG)\n \n width = image.GetWidth() / tiles\n height = image.GetHeight()\n \n for i in xrange(tiles):\n array.append(image.GetSubImage(wx.Rect(i*width, 0, width, height)))\n \n return array", "def openImage(self, fn):\n\n #get the image, and make sure it's pixel dimensions are consistent\n #tilesets have 1 spacing between each tile,\n #so adding 1 should give a multiple of the tilesize+1\n tilesetImage = data.getImage(fn)\n tilesetImage.set_colorkey(self.transparency)\n \n data.check(((tilesetImage.get_width()+1)%(self.tileSize[0]+1))==0, fn)\n data.check(((tilesetImage.get_height()+1)%(self.tileSize[1]+1))==0, fn)\n dimensions = ((tilesetImage.get_width()+1)/(self.tileSize[0]+1),\n (tilesetImage.get_height()+1)/(self.tileSize[1]+1))\n\n #iterate over each tile, cutting it out and adding to our list\n #go across each row in turn to get index numbering correct\n self.tiles = []\n for y in range(0, dimensions[1]):\n for x in range(0, dimensions[0]):\n tile = tilesetImage.subsurface((x*(self.tileSize[0]+1), y*(self.tileSize[1]+1), self.tileSize[0], self.tileSize[1]))\n self.tiles.append(tile)\n\n #calculate offset\n self.tileOffset = ((globs.TILESIZE[0]-self.tileSize[0])/2,\n globs.TILESIZE[1]-self.tileSize[1])", "def readTiles(self):\n TileImage = Image.open(self.Filename).convert(\"RGB\")\n TileIW, TileIH = TileImage.size\n TilesetW, TilesetH = TileIW / self.TileWidth, TileIH / self.TileHeight\n\n for y in range(TilesetH):\n for x in range(TilesetW):\n box = self.TileWidth * x, self.TileHeight * y, self.TileWidth * (x + 1), self.TileHeight * (y + 1)\n tile = TileImage.crop(box)\n self.List.append(tile)\n\n tileString = tile.tostring()\n if not self.TileDict.has_key(tileString):\n self.TileDict[tileString] = len(self.List) - 1", "def load_images(self, folder):\n cwd = os.getcwd()\n dir = cwd + '/' + folder\n files = os.listdir(dir)\n for file in files:\n img = pygame.image.load(dir + '/' + file)\n self.images.append(img)", "def iter_images(root_dir_path: str) -> (str, np.array, str):\n for i, path in enumerate(glob.iglob(f\"{root_dir_path}/**/*.jpg\")):\n name = path.split(\"/\")[-1][:-4]\n img = imageio.imread(path)\n yield name, img, path\n return i", "def load_images_from_folder(folder):\n images = []\n for filename in os.listdir(folder):\n img = Image.open(os.path.join(folder,filename))\n images.append(img)\n return images", "def get_files(directory):\n imgs = os.listdir(directory)\n return imgs", "def list_open_images():\n\tid = 0\n\tif len(open_images) > 0:\n\t\tfor i in open_images:\n\t\t\tfilename = os.path.basename(i.get_filename())\n\t\t\tprint(id, filename)\n\t\t\tid += 1\n\telse:\n\t\tprint(\"No open images\")", "def readTiles(self):\n TileImage = Image.open(self.Filename).convert(\"RGB\")\n TileIW, TileIH = TileImage.size\n TilesetW, TilesetH = TileIW // self.TileWidth, TileIH // self.TileHeight\n\n for y in range(TilesetH):\n for x in range(TilesetW):\n box = self.TileWidth * x, self.TileHeight * y, self.TileWidth * (x+1), self.TileHeight * (y+1)\n tile = TileImage.crop(box)\n self.List.append(tile)\n\n str = tile.tostring()\n if not str in self.TileDict:\n #print(\"add tile: \", str)\n self.TileDict[str] = len(self.List) - 1\n print(\"tile count: {}, unique count: {}\".format(len(self.List),len(self.TileDict.values())))", "def _walk_images(self, directory):\n filenames = next(os.walk(directory))[2]\n for filename in filenames:\n if is_supported(filename):\n yield filename", "def get_images(self, file_path: str) -> Iterable[Image]:\n return []", "def load_images(self):\n for f in glob.glob('pix/weather/*.png'):\n base = os.path.splitext(os.path.basename(f))[0]\n self.images[base] = itk.PhotoImage(file=f)", "def glob_directory_images(directory: Path) -> list[Path]:\n return (\n list(directory.glob(\"*.png\"))\n + list(directory.glob(\"*.jpg\"))\n + list(directory.glob(\"*.tif\"))\n )", "def get_existing_images(directory):\n validate_directory(directory)\n directory += '/'\n try:\n return listdir(directory)\n except:\n mkdir(directory)\n return []", "def _open_images(self):\n filenames, _ = QFileDialog.getOpenFileNames(\n parent=self,\n caption='Select image(s)...',\n directory=self._last_visited_dir, # home dir by default\n )\n self._add_files(filenames)" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
If a resource has a title, it should be included in the string representation.
def test_str_with_title(media_resource_factory): resource = media_resource_factory(title="Test Resource") assert str(resource) == f"{resource.id} ({resource.title})"
[ "def get_resource_title(self, context):\n resource = context.resource\n if resource is None:\n return ''\n if resource['Title']:\n return resource['Title'].value\n else:\n return 'Resource: %s' % resource['LinkID'].value", "def resource_link_title(self):\n return self.request.POST.get(\"resource_link_title\", self.resource_link_id)", "def res_title(self):\n return self.get(\"res_title\", default=None, decode=True)", "def short_title(self):\n if hasattr(self, \"title\"):\n return self.title\n else:\n return \"\"", "def title_string(self):\n return self.title", "def __repr_title(self):\n return (\n self.title if not self.done\n else '̶'.join(c for c in self.title)\n )", "def test_string_representation_from_title(self):\n asset = HtmlAsset()\n asset.save()\n title = \"Test Asset Title\"\n translation = HtmlAssetTranslation(title=title, asset=asset)\n translation.save()\n self.assertEqual(unicode(asset), asset.title)", "def get_title(self):\n return self.display_title and self.title or \"\"", "def get_title(self):\n\n if \"title_prefix\" in self:\n return \"{} : {}\".format(self.title_prefix, self.title)\n else:\n return self.title", "def inclusive_title(self):\n return self.title + (\" %s\" % (self.episode_to_string(self.latest_season, self.latest_episode),) if self.is_series() else \"\")", "def title_string(self):\n return ' '.join(self.title).replace(' - ', '')", "def test_display_title_with_no_explicit_title(self):\n asset = HtmlAsset()\n asset.save()\n body = \"Eboney Brown's kids range from age one to age nine, so it helps that her daycare center, Early Excellence, is just a short walk from Wyatt-Edison Charter School, where her older kids go. Early Excellence, which welcomes guests with a mural of a white Denver skyline set against a backdrop of blue mountains, serves families from as far away as Thornton and Green Valley Ranch. And many of those families, says Early Excellence director Jennifer Luke, are caught in a cycle of poverty and depend daily on regional transportation. \\\"I know they can't put a bus on every corner,\\\" says Luke, who knows many parents who combine public transportation with long walks - year round, no matter the weather.\"\n translation = HtmlAssetTranslation(body=body, asset=asset)\n translation.save()\n self.assertEqual(asset.display_title(),\n truncatewords(striptags(asset.body), 4))", "def test_string_rep_render_is_title(self):\n render = Render.objects.first()\n self.assertTrue(str(render) == render.title)", "def is_title(self, key):\n return self.fields[key].title == key", "def item_title(self, item):\n return item.title", "def name_with_title(self):\n return \"%s %s\" % (self.title, self.name)", "def _get_title(self, book_data: Dict[Any, Any]) -> str:\n title = book_data.get('title')\n subtitle = book_data.get('subtitle')\n if subtitle:\n title = f\"{title}: {subtitle}\"\n return title", "def get_title(self):\n\t\treturn self.title", "def get_title(self):\n # self.title can be a Struct (for SurveyDetails) or just a string\n if isinstance(self.title, Struct):\n return self.title.text\n else:\n return self.title" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Media resources should be ordered by creation time, ascending.
def test_ordering(media_resource_factory): m1 = media_resource_factory() m2 = media_resource_factory() assert list(models.MediaResource.objects.all()) == [m1, m2]
[ "def get_recent_media(self):\n medialist = get(self.token,\n '/users/{}/media/recent'.format(self.id))\n for data in medialist:\n media = Media(data)\n yield media", "def sort(self):\n cfg = config.ConfigSingleton()\n\n working_dir = cfg.get('Paths', 'working_dir', default = '')\n if working_dir == '':\n logger.error('working_dir not set')\n raise ValueError\n else:\n working_dir = Path(working_dir)\n\n regex = cfg.get('Sorting', 'sorting_tag_regex', default = '')\n if regex == '':\n logger.error('Missing sorting_tag_regex in config')\n raise ValueError\n\n sub = cfg.get('Sorting', 'sorting_tag_sub', default = '')\n if regex == '':\n logger.error('Missing sorting_tag_sub in config')\n raise ValueError\n\n errors = []\n\n mediafiles = self.__medialist.get_mediafiles()\n # scan all mediafiles\n for mediafile in mediafiles:\n if mediafile.is_deleted():\n # skip \"deleted files\n continue\n logger.debug('Scanning \"{}\"'.format(mediafile.get_name()))\n # sorting tag for this mediafile\n target = ''\n source = mediafile.get_primary_source()\n source.load()\n for tag in source.get_taglist().get_tags():\n target, n = re.subn(regex, sub, tag)\n if n > 0:\n # we found a sorting tag\n logger.debug('Matched target \"{}\"'.format(target))\n source.unload()\n break\n else:\n target = ''\n source.unload()\n\n # no sorting tag found\n if target == '':\n errors.append('No sorting tag found in \"{}\"'.format(\n source.get_path()))\n # skip to next mediafile\n continue\n\n # create destination\n destination = working_dir / Path(target)\n # create the target directory\n try:\n destination.mkdir(parents = True, exist_ok = True)\n except PermissionError:\n errors.append('Insufficient permissions to create \"{}\"'.format(\n destination))\n # skip to next mediafile\n continue\n except FileExistsError:\n errors.append('\"{}\" is not a directory'.format(destination))\n # skip to next mediafile\n continue\n\n # move the file\n mediafile.move(destination)\n\n # display errors\n errors = list(set(errors))\n for error in errors:\n logger.info(error)\n if len(errors) > 0:\n self.__ui.display_message(\"\\n\".join(errors))\n # rescan working dir\n logger.debug('Initiate re-scan of \"{}\"'.format(str(working_dir)))\n self.__ui.set_working_dir(str(working_dir))", "def filter(self):\n for f in FileHelper.ALL_PATHS:\n media_obj = MediaObject(FileHelper.get_url(f), FileHelper.get_title(f), FileHelper.get_media_type(f), FileHelper.get_icon(f), FileHelper.get_duration(f), FileHelper.get_ctype(f))\n _id = media_obj.uuid\n if media_obj.media_type == \"image\":\n DB.IMAGES[_id] = media_obj\n elif media_obj.media_type == \"audio\":\n DB.MUSIC[_id] = media_obj\n elif media_obj.media_type == \"video\":\n DB.VIDEOS[_id] = media_obj\n else:\n print \"File '%s' doesn't play nice.\" % (f)", "def get_media_list(path, page=0, items=25):\n pass", "def test_get_resources_ordered(db_session):\n query_params = {\n \"sort\": \"-album_id,title\"\n }\n parser = ModelQueryParamParser(query_params)\n album_resource = AlbumResource(session=db_session)\n result = album_resource.get_collection(\n filters=parser.parse_filters(album_resource.model),\n sorts=parser.parse_sorts()\n )\n assert len(result) == 347\n assert result[0][\"album_id\"] == 347", "def get_list(self ):\n headers = { 'Authorization' : self.client.authorization_header }\n response = requests.get(\n self.client.url + '/media', \n headers = headers\n )\n\n return json.loads(response.text)", "def test_get_top_n_attachment_resources(self):\n pass", "def files_since( self, dt ):\n new_files = []\n \n # sort local media files by modify date, descending\n listed = sorted(\n os.scandir( self._media_path_local ),\n key=lambda item: item.stat().st_mtime,\n reverse = True\n )\n \n # get timestamp from passed datetime object\n ts = dt.timestamp()\n \n # step through sorted files, bail as soon as we reach one older than ts\n for item in listed:\n if item.stat().st_mtime <= ts:\n break\n new_files.append( item.name )\n \n return new_files", "def organize(self):\n for media in self:\n self.move(media)", "def ordered_images(self):\n return self.images.order_by('story_images__id')", "def get_merged_media(self):\n list_media_names = list(self.media.keys())\n list_media_names.sort(key=lambda medium_name: medium_name.lower())\n \n media_names_joined = '/'.join(list_media_names)\n \n # TODO: Manage the one below\n #if media_names_joined == 'OPAC B-27':\n #import pdb; pdb.set_trace()\n return media_names_joined", "def list_media(storage, filter_list):\n results = []\n total = 0\n try:\n for media in storage.listdir('.')[1]:\n if not media.endswith('/') and media != \"\":\n location = storage.url(media).split('?')[0]\n total += 1\n if not filter_list or location in filter_list:\n results += [\n {'location': location,\n 'tags': MediaTag.objects.filter(\n location=location).values_list(\n 'tag', flat=True)\n }]\n except OSError:\n LOGGER.exception(\n \"Unable to list objects in %s.\", storage.__class__.__name__)\n except S3ResponseError:\n LOGGER.exception(\n \"Unable to list objects in %s bucket.\", storage.bucket_name)\n return {'count': total, 'results': results}", "def get_media(self):\n return [media for dataset in self.datasets for media in dataset.media]", "def get_apid_objects(queue, media_base, args, absolute=False):\n work_lock = Lock()\n work_queue = Queue()\n\n readers = os.cpu_count()\n read_lock = Lock()\n read_queue = Queue()\n read_processes = []\n for number in range(readers):\n read_process = Process(\n target=read_apids, args=(read_queue, read_lock, work_queue, work_lock)\n )\n read_process.start()\n read_processes.append(read_process)\n\n logging.info(\"Collecting APID object information\")\n file_list = []\n file_total = 0\n for file_object in os.scandir(media_base + \"/metadata/apid\"):\n if not file_object.is_file():\n continue\n file_list.append({\"fileName\": file_object.path})\n file_total = file_total + 1\n\n read_lock.acquire()\n for item in file_list:\n read_queue.put(item)\n for item in read_processes:\n read_queue.put({\"exit\": True})\n read_lock.release()\n\n index = 100000\n apid_image_map = {}\n apid_screenshot_map = {}\n apid_full_map = {}\n object_map = {}\n image_cache = {}\n item_count = 0\n if absolute:\n image_base = \"{0}\".format(media_base) + \"/media/{0}\"\n else:\n image_base = \"./media/{0}\"\n while True:\n work_lock.acquire()\n if not work_queue.empty():\n metadata = work_queue.get()\n work_lock.release()\n else:\n work_lock.release()\n time.sleep(0.01)\n continue\n\n item_count = item_count + 1\n apid_full_map.update({metadata[\"apid\"]: metadata})\n if \"image\" in metadata:\n if metadata[\"image\"] not in image_cache:\n base_name = metadata[\"image\"].split(\"/media/\").pop(1)\n image_name = image_base.format(base_name)\n image_extension = image_name.split(\".\").pop(-1)\n\n object_id = \"@M{0}@\".format(index)\n object_entry = [\n \"0 {0} OBJE\".format(object_id),\n \"1 FILE {0}\".format(image_name),\n \"1 FORM {0}\".format(image_extension),\n \"1 TYPE document\",\n ]\n\n object_map.update({object_id: object_entry})\n image_cache.update({metadata[\"image\"]: object_id})\n index = index + 1\n else:\n object_id = image_cache[metadata[\"image\"]]\n apid_image_map.update({metadata[\"apid\"]: object_id})\n if \"screenshot\" in metadata:\n base_name = os.path.basename(metadata[\"screenshot\"])\n image_name = image_base.format(\"apid\") + \"/\" + base_name\n image_extension = image_name.split(\".\").pop(-1)\n\n if \"title\" in metadata and metadata[\"title\"] != \"\":\n title = metadata[\"title\"]\n else:\n title = \"Ancestry.com Source Record, {0}\".format(metadata[\"apid\"])\n\n object_id = \"@M{0}@\".format(index)\n object_entry = [\n \"0 {0} OBJE\".format(object_id),\n \"1 FILE {0}\".format(image_name),\n \"1 FORM {0}\".format(image_extension),\n \"1 TITL {0}\".format(title),\n \"1 REFN {0}\".format(metadata[\"apid\"]),\n ]\n\n if \"url\" in metadata and metadata[\"url\"] != \"\":\n object_entry.append(\"1 NOTE {0}\".format(metadata[\"url\"]))\n\n object_map.update({object_id: object_entry})\n index = index + 1\n apid_screenshot_map.update({metadata[\"apid\"]: object_id})\n\n if item_count == file_total:\n break\n\n for read_process in read_processes:\n read_process.join()\n queue.put((apid_image_map, apid_screenshot_map, apid_full_map, object_map))\n logging.info(\"APID object collection completed\")", "def recent_media(self):\r\n return media.RecentMedia(self)", "def test_get_top_n_attachment_resources1(self):\n pass", "def generates_media(self):\n return self._generates_media", "def rotate_thumbs(queryset, direction):\r\n\tfor media in queryset:\r\n\t\tpath = os.path.join(settings.DATA_DIR, '%04d' % media.year,\r\n\t\t\t\t'%02d' % media.month, '%02d' % media.day, media.name)\r\n\r\n\t\tdir = 1 if direction == 'rccw' else -1\r\n\t\tprint 'Rotate', path, dir\r\n\r\n\t\tim = Image.open(path)\r\n\t\tim = im.transpose(Image.ROTATE_90 if dir == 1 else Image.ROTATE_270)\r\n\t\tim.save(path)\r\n\t\t# Save the rotation applied to the media record\r\n\t\t# (so that we can display the original rotated too)\r\n\t\tmedia.rotation = (media.rotation + dir) % 4\r\n\t\tmedia.save()", "def location_medias_top_v1(\n self, location_pk: int, amount: int = 21\n ) -> List[Media]:\n return self.location_medias_v1(location_pk, amount, tab_key=\"ranked\")" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
If a media resource has both an image and YouTube video ID specified then cleaning it should throw an error.
def test_clean_both_image_and_youtube_id(image): resource = models.MediaResource(image=image, youtube_id="dQw4w9WgXcQ") with pytest.raises(ValidationError): resource.clean()
[ "def test_clean_no_image_or_youtube_id():\n resource = models.MediaResource()\n\n with pytest.raises(ValidationError):\n resource.clean()", "def test_clean_only_youtube_id():\n resource = models.MediaResource(youtube_id=\"dQw4w9WgXcQ\")\n\n resource.clean()", "def test_clean_only_image(image):\n resource = models.MediaResource(image=image)\n\n resource.clean()", "def clean_up_media(chat_id, input_type, output_type):\n if input_media_exist(chat_id, input_type):\n os.remove(f\"./input_media/{chat_id}.{input_type}\")\n if output_media_exist(chat_id, output_type):\n os.remove(f\"./output_media/{chat_id}.{output_type}\")", "def verify_media(self):\n self.check_dataset_duplicate_ids(self.media)", "def test_video_delete(self):\n v1, v2 = make_video(media_id='1234'), make_video(media_id='2345')\n set_resources_and_sync([v1, v2])\n i1 = mpmodels.MediaItem.objects.get(jwp__key=v1.key)\n i2 = mpmodels.MediaItem.objects.get(jwp__key=v2.key)\n set_resources_and_sync([v1])\n self.assertIsNone(mpmodels.MediaItem.objects.get(id=i1.id).deleted_at)\n self.assertIsNotNone(mpmodels.MediaItem.objects_including_deleted.get(id=i2.id).deleted_at)\n self.assertFalse(mpmodels.MediaItem.objects.filter(id=i2.id).exists())", "def _handle_removed_media(self):\r\n if self.has_media():\r\n try:\r\n image = str(self.image)\r\n os.remove(image)\r\n except OSError:\r\n raise('Failure trying to remove image from filesystem.')\r\n return True", "def test_get_thumbnail_url_video_no_oembed(self):\n url = 'http://fakedomain.com/uploads/video.m4v'\n asset = create_external_asset(type='video', title='', url=url)\n self.assertEqual(asset.get_thumbnail_url(), None)", "def clean_video_id(self):\n failed = False\n d = self.cleaned_data\n service = d.get('service')\n # Get the video id and clear whitespace on either side.\n video_id = d.get('video_id', '').strip()\n\n # Validate using YouTube's API:\n if service == 'youtube':\n url = ('http://gdata.youtube.com/feeds/api/videos/{}?alt=json'.\n format(video_id))\n data = requests.get(url)\n # Ensure we can parse the JSON data.\n try:\n json = simplejson.loads(data.text)\n # If not, mark this as a failure.\n except ValueError:\n failed = True\n\n # Validate using Vimeo's API:\n elif service == 'vimeo':\n data = requests.get('http://vimeo.com/api/v2/video/{}.json'.\n format(video_id))\n # Ensure we can parse the JSON data.\n try:\n json = simplejson.loads(data.text)\n # If not, mark this as a failure.\n except ValueError:\n failed = True\n\n # Respond based on the outcome.\n if failed:\n message = _(\"Couldn't validate video id using {} API. Please \"\n \"verify it exists and check for \"\n \"typos.\".format(service))\n raise forms.ValidationError(message)\n\n return video_id", "def delete_video(self, video_ID): # WORKS\n try:\n self.cur.execute(\"DELETE FROM videos WHERE video_ID = \\\"{}\\\"\".format(video_ID))\n self.db.commit()\n os.remove('static/videos/' + str(video_ID) + '.mp4')\n os.remove('static/images/' + str(video_ID) + '.jpg')\n except:\n self.db.rollback()", "def test_remote_media_thumbnail_normally_unblocked(self) -> None:\n response = self.make_request(\n \"GET\",\n f\"/_matrix/media/v3/thumbnail/evil.com/{self.remote_media_id}?width=100&height=100\",\n shorthand=False,\n )\n self.assertEqual(response.code, 200)", "def clean(self):\n cleaned_data = super(VideoReplaceForm, self).clean()\n video_file = self.cleaned_data.get('video_file')\n video_url = self.cleaned_data.get('video_url')\n\n if not video_file and not video_url:\n raise forms.ValidationError(_(\"Video must be a file or url.\"))\n\n if video_file and video_url:\n raise forms.ValidationError(\n _(\"A video cannot have both a file and a url.\"))\n\n return cleaned_data", "def test_empty_media(self):\n manifest = copy.deepcopy(job_test_utils.COMPLETE_MANIFEST)\n manifest['job']['interface']['inputs']['files'][0]['mediaTypes'] = []\n config = copy.deepcopy(self.configuration)\n json_data = {\n 'manifest': manifest,\n 'configuration': config\n }\n\n url = '/%s/job-types/validation/' % self.api\n response = self.client.generic('POST', url, json.dumps(json_data), 'application/json')\n self.assertEqual(response.status_code, status.HTTP_200_OK, response.content)\n\n results = json.loads(response.content)\n self.assertTrue(results['is_valid'])\n self.assertDictEqual(results, {u'errors': [], u'is_valid': True, u'warnings': []})\n manifest = copy.deepcopy(job_test_utils.COMPLETE_MANIFEST)\n del manifest['job']['interface']['inputs']['files'][0]['mediaTypes']\n config = copy.deepcopy(self.configuration)\n json_data = {\n 'manifest': manifest,\n 'configuration': config\n }\n\n url = '/%s/job-types/validation/' % self.api\n response = self.client.generic('POST', url, json.dumps(json_data), 'application/json')\n self.assertEqual(response.status_code, status.HTTP_200_OK, response.content)\n\n results = json.loads(response.content)\n self.assertTrue(results['is_valid'])\n self.assertDictEqual(results, {u'errors': [], u'is_valid': True, u'warnings': []})", "def test_no_video_image(self):\n edx_video_id = 'test1'\n get_videos_url = reverse_course_url('videos_handler', self.course.id)\n video_image_upload_url = self.get_url_for_course_key(self.course.id, {'edx_video_id': edx_video_id})\n with make_image_file(\n dimensions=(settings.VIDEO_IMAGE_MIN_WIDTH, settings.VIDEO_IMAGE_MIN_HEIGHT),\n ) as image_file:\n self.client.post(video_image_upload_url, {'file': image_file}, format='multipart')\n\n val_image_url = get_course_video_image_url(course_id=self.course.id, edx_video_id=edx_video_id)\n\n response = self.client.get_json(get_videos_url)\n self.assertEqual(response.status_code, 200)\n response_videos = json.loads(response.content.decode('utf-8'))[\"videos\"]\n for response_video in response_videos:\n if response_video['edx_video_id'] == edx_video_id:\n self.assertEqual(response_video['course_video_image_url'], val_image_url)\n else:\n self.assertEqual(response_video['course_video_image_url'], None)", "def media_failed(self, failure, request, info):\n return failure", "def item_media_failed(self, failure, item, request, info):\n return failure", "def clean(self):\n cleaned_data = super(VideoCreateForm, self).clean()\n video_file = self.cleaned_data.get('video_file')\n video_url = self.cleaned_data.get('video_url')\n\n\n if not video_file and not video_url:\n raise forms.ValidationError(_(\"Video must be a file or url.\"))\n\n if video_file and video_url:\n raise forms.ValidationError(\n _(\"A video cannot have both a file and a url.\"))\n\n return cleaned_data", "def validate_video(form, video):\n if \"youtube.com\" in video.data or \"youtu.be\" in video.data or \"vimeo.com\" in video.data:\n try:\n video_test = requests.get(video.data)\n if video_test.status_code != 200:\n raise ValidationError('Invalid video URL, please try again.')\n except:\n raise ValidationError('Invalid video URL, please try again.')\n else:\n raise ValidationError('Invalid video URL, please try again.')", "def test_recreate_deleted_item(self):\n v1 = make_video(media_id='1234', title='testing')\n set_resources_and_sync([v1])\n i1 = mpmodels.MediaItem.objects.filter(jwp__key=v1.key).first()\n self.assertIsNotNone(i1)\n self.assertEqual(i1.title, 'testing')\n i1.delete()\n\n set_resources_and_sync([v1])\n i1 = mpmodels.MediaItem.objects.filter(jwp__key=v1.key).first()\n self.assertIsNotNone(i1)\n self.assertEqual(i1.title, 'testing')" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
If a media resource does not encapsulate any media, cleaning it should throw an error.
def test_clean_no_image_or_youtube_id(): resource = models.MediaResource() with pytest.raises(ValidationError): resource.clean()
[ "def _handle_removed_media(self):\r\n if self.has_media():\r\n try:\r\n image = str(self.image)\r\n os.remove(image)\r\n except OSError:\r\n raise('Failure trying to remove image from filesystem.')\r\n return True", "def test_clean_only_image(image):\n resource = models.MediaResource(image=image)\n\n resource.clean()", "def clean_up_media(chat_id, input_type, output_type):\n if input_media_exist(chat_id, input_type):\n os.remove(f\"./input_media/{chat_id}.{input_type}\")\n if output_media_exist(chat_id, output_type):\n os.remove(f\"./output_media/{chat_id}.{output_type}\")", "def test_clean_both_image_and_youtube_id(image):\n resource = models.MediaResource(image=image, youtube_id=\"dQw4w9WgXcQ\")\n\n with pytest.raises(ValidationError):\n resource.clean()", "def delete(self):\n self._cc._delete(\"/1/media/%s\" % self.media_id)\n self.__dict__.clear()", "def test_clean_only_youtube_id():\n resource = models.MediaResource(youtube_id=\"dQw4w9WgXcQ\")\n\n resource.clean()", "def verify_media(self):\n self.check_dataset_duplicate_ids(self.media)", "def test_empty_media(self):\n manifest = copy.deepcopy(job_test_utils.COMPLETE_MANIFEST)\n manifest['job']['interface']['inputs']['files'][0]['mediaTypes'] = []\n config = copy.deepcopy(self.configuration)\n json_data = {\n 'manifest': manifest,\n 'configuration': config\n }\n\n url = '/%s/job-types/validation/' % self.api\n response = self.client.generic('POST', url, json.dumps(json_data), 'application/json')\n self.assertEqual(response.status_code, status.HTTP_200_OK, response.content)\n\n results = json.loads(response.content)\n self.assertTrue(results['is_valid'])\n self.assertDictEqual(results, {u'errors': [], u'is_valid': True, u'warnings': []})\n manifest = copy.deepcopy(job_test_utils.COMPLETE_MANIFEST)\n del manifest['job']['interface']['inputs']['files'][0]['mediaTypes']\n config = copy.deepcopy(self.configuration)\n json_data = {\n 'manifest': manifest,\n 'configuration': config\n }\n\n url = '/%s/job-types/validation/' % self.api\n response = self.client.generic('POST', url, json.dumps(json_data), 'application/json')\n self.assertEqual(response.status_code, status.HTTP_200_OK, response.content)\n\n results = json.loads(response.content)\n self.assertTrue(results['is_valid'])\n self.assertDictEqual(results, {u'errors': [], u'is_valid': True, u'warnings': []})", "def item_media_failed(self, failure, item, request, info):\n return failure", "def test_media_attribute_blows_up():\n with pytest.raises(AssertionError):\n MediaBag().media", "def media_failed(self, failure, request, info):\n return failure", "async def _apply_media_retention_rules(self) -> None:\n # Purge remote media\n if self._media_retention_remote_media_lifetime_ms is not None:\n # Calculate a threshold timestamp derived from the configured lifetime. Any\n # media that has not been accessed since this timestamp will be removed.\n remote_media_threshold_timestamp_ms = (\n self.clock.time_msec() - self._media_retention_remote_media_lifetime_ms\n )\n\n logger.info(\n \"Purging remote media last accessed before\"\n f\" {remote_media_threshold_timestamp_ms}\"\n )\n\n await self.delete_old_remote_media(\n before_ts=remote_media_threshold_timestamp_ms\n )\n\n # And now do the same for local media\n if self._media_retention_local_media_lifetime_ms is not None:\n # This works the same as the remote media threshold\n local_media_threshold_timestamp_ms = (\n self.clock.time_msec() - self._media_retention_local_media_lifetime_ms\n )\n\n logger.info(\n \"Purging local media last accessed before\"\n f\" {local_media_threshold_timestamp_ms}\"\n )\n\n await self.delete_old_local_media(\n before_ts=local_media_threshold_timestamp_ms,\n keep_profiles=True,\n delete_quarantined_media=False,\n delete_protected_media=False,\n )", "def test_delete_media(self) -> None:\n\n number_media = 5\n other_user_tok = self.login(\"user\", \"pass\")\n media_ids = self._create_media_for_user(other_user_tok, number_media)\n\n # Test if the file exists\n local_paths = []\n for media_id in media_ids:\n local_path = self.filepaths.local_media_filepath(media_id)\n self.assertTrue(os.path.exists(local_path))\n local_paths.append(local_path)\n\n channel = self.make_request(\n \"DELETE\",\n self.url,\n access_token=self.admin_user_tok,\n )\n\n self.assertEqual(200, channel.code, msg=channel.json_body)\n self.assertEqual(number_media, channel.json_body[\"total\"])\n self.assertEqual(number_media, len(channel.json_body[\"deleted_media\"]))\n self.assertCountEqual(channel.json_body[\"deleted_media\"], media_ids)\n\n # Test if the file is deleted\n for local_path in local_paths:\n self.assertFalse(os.path.exists(local_path))", "def test_user_has_no_media_DELETE(self) -> None:\n\n channel = self.make_request(\n \"DELETE\",\n self.url,\n access_token=self.admin_user_tok,\n )\n\n self.assertEqual(200, channel.code, msg=channel.json_body)\n self.assertEqual(0, channel.json_body[\"total\"])\n self.assertEqual(0, len(channel.json_body[\"deleted_media\"]))", "def clean_old_data():\n logger.info('Cleaning standalone files on disk...')\n for absolute_path in glob.glob(MEDIA_URL + '*'):\n file_name = os.path.basename(absolute_path)\n try:\n relative_path = os.path.join(AUDIOS_URL, file_name)\n audio = Audio.objects.get(filename=relative_path)\n if audio.get_type() == 'episode':\n try:\n # If there are inactive audios on its being\n for e in audio.podcast.episode_set.exclude(pk=audio.podcast.active_episode.pk):\n if not e.is_active():\n logger.info('Inactive audio found in podcast set. Erasing files.')\n e.delete_files()\n except Exception, e:\n logger.exception(e.message)\n except ObjectDoesNotExist, e:\n logger.info('A file with no audio registered in database')\n if os.path.isfile(relative_path):\n logger.info('Erasing: %s' % relative_path)\n os.remove(relative_path)\n logger.info('... Done.')", "def test_media_attribute_is_fine_after_being_set():\n b = MediaBag()\n b.media = None\n assert b.media is None", "def clean_media(self):\n # Percentage part\n all_medium = list(self.media.values())\n only_percentage_medium = list(filter(lambda medium: medium.is_percentage, all_medium))\n only_percentage_medium.sort(key=lambda medium: medium.amount_int)\n \n total_percentage = 0\n \n list_ok_medium = []\n for media in only_percentage_medium:\n total_percentage += media.amount_int\n if total_percentage > 100:\n break\n list_ok_medium.append(media)\n \n for media in all_medium:\n if media not in list_ok_medium:\n del self.media[media.name]\n \n all_medium_name = list(self.media.keys())\n # Duplicates\n for medium_name in all_medium_name:\n lowered_medium_name = medium_name.lower()\n # Kubota duplicate, kubota stem cell is different\n if 'kubota' in lowered_medium_name and 'stem' not in lowered_medium_name:\n self.media[\"Kubota's Hepatoblast\"] = self.media.pop(medium_name)\n \n # CM duplicates\n if 'cm1' in lowered_medium_name or 'cm2' in lowered_medium_name or 'cm' == lowered_medium_name:\n self.media[\"CM\"] = self.media.pop(medium_name)\n\n # M87 Duplicates\n if 'm87' in lowered_medium_name:\n self.media[\"M87\"] = self.media.pop(medium_name)\n \n # WIT-P Duplicates\n if 'witp' in lowered_medium_name or 'wit_p' in lowered_medium_name or 'wit-p' in lowered_medium_name:\n self.media[\"WIT-P\"] = self.media.pop(medium_name)\n \n # BEGM duplicates\n if 'begm' in lowered_medium_name:\n self.media[\"BEGM\"] = self.media.pop(medium_name)\n \n # Pancreas Organoid duplicates\n if 'pancreas organoid' in lowered_medium_name:\n self.media[\"Pancreas Organoid\"] = self.media.pop(medium_name)\n \n # Endothelial Growth Medium duplicates (should all be EGM)\n if 'endothelial growth medium' in lowered_medium_name:\n self.media[\"EGM\"] = self.media.pop(medium_name)\n \n #null management\n medias = self.media.keys()\n if \"null\" in self.media.keys():\n if len(medias) == 2:\n del self.media['null']\n # Update the other media to 100%\n remaining_media = list(self.media.copy().keys())[0]\n self.media[remaining_media].amount = '100%'\n #else:\n #print(len(medias))", "def cleanup_old_backups(self):\n print(\"Cleaning Old Backups for media files\")\n\n file_list = utils.get_backup_file_list(\n self.get_databasename(),\n self.get_servername(),\n 'media.tar.gz',\n self.storage\n )\n\n for backup_date, filename in file_list[0:-dbbackup_settings.CLEANUP_KEEP_MEDIA]:\n if int(backup_date.strftime(\"%d\")) != 1:\n print(\" Deleting: %s\" % filename)\n self.storage.delete_file(filename)", "def clean_error(self):\r\n return self._arm.clean_error()" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Cleaning a media resource that only has an image should do nothing.
def test_clean_only_image(image): resource = models.MediaResource(image=image) resource.clean()
[ "def _handle_removed_media(self):\r\n if self.has_media():\r\n try:\r\n image = str(self.image)\r\n os.remove(image)\r\n except OSError:\r\n raise('Failure trying to remove image from filesystem.')\r\n return True", "def test_clean_no_image_or_youtube_id():\n resource = models.MediaResource()\n\n with pytest.raises(ValidationError):\n resource.clean()", "def clean_up_media(chat_id, input_type, output_type):\n if input_media_exist(chat_id, input_type):\n os.remove(f\"./input_media/{chat_id}.{input_type}\")\n if output_media_exist(chat_id, output_type):\n os.remove(f\"./output_media/{chat_id}.{output_type}\")", "def on_delete_pre(sender, instance, **kwargs):\n if instance.big_image and os.path.isfile(instance.big_image.path):\n os.remove(instance.big_image.path)\n if instance.min_image and os.path.isfile(instance.min_image.path):\n os.remove(instance.min_image.path)", "def applyMorphologicalCleaning(self, image):", "def clean_resources(self):\n self.operations.clean_server(images=self.args.all)", "def clear_renders(self, media_id, owner_username, node):\n\t\ttry:\n\t\t\tmedia_id = validation.media_id(media_id)\n\t\t\towner_username = validation.username(owner_username)\n\t\t\tvalidation.required(node, 'node')\n\t\texcept errors.ValidationError, ex:\n\t\t\treturn utils.return_deferred_error(ex.value)\n\n\t\tself.log.debug(\"Clearing renders for image [%s] in user [%s]'s account\" % (media_id, owner_username))\n\n\t\t@stack\n\t\tdef do_clear(result):\n\t\t\tif result[0] != 0:\n\t\t\t\traise errors.APIError(result[1])\n\n\t\t\tpaths = result[1]\n\t\t\tdl = []\n\t\t\tfor path in paths:\n\t\t\t\tself.log.debug(\"running delete on [%s.jpg]\" % path)\n\t\t\t\tdl.append(self._delete_binary(\"%s.jpg\" % path))\n\t\t\tdList = DeferredList(dl)\n\t\t\tdList.addCallback(lambda _: \"success\")\n\t\t\treturn dList\n\n\n\t\td = self._generate_render_paths(media_id, node, owner_username)\n\t\td.addCallback(do_clear)\n\t\td.addCallback(lambda _: (0, _))\n\t\td.addErrback(lambda _: (-1, _.getErrorMessage))\n\t\treturn d", "def clearImage(self):\n if self.hasImage():\n self.scene.removeItem(self._image)\n self._image = None", "def get_clean_image(image):\n if not image:\n return \"\"\n if \"music@\" in image:\n # fix for embedded images\n thumbcache = xbmc.getCacheThumbName(image).replace(\".tbn\", \".jpg\")\n thumbcache = \"special://thumbnails/%s/%s\" % (thumbcache[0], thumbcache)\n if not xbmcvfs.exists(thumbcache):\n xbmcvfs.copy(image, thumbcache)\n image = thumbcache\n if image and \"image://\" in image:\n image = image.replace(\"image://\", \"\")\n image = urllib.unquote(image.encode(\"utf-8\"))\n if image.endswith(\"/\"):\n image = image[:-1]\n if not isinstance(image, unicode):\n image = image.decode(\"utf8\")\n return image", "def except_image_only(resource):\n if resource.image is None:\n raise FeatureExtractionError(resource, 400, 'Image resource is required')\n if resource.mask:\n raise FeatureExtractionError(resource, 400, 'Mask resource is not accepted')\n if resource.gobject:\n raise FeatureExtractionError(resource, 400, 'Gobject resource is not accepted')", "def delete_image(self, image):", "def test_clean_both_image_and_youtube_id(image):\n resource = models.MediaResource(image=image, youtube_id=\"dQw4w9WgXcQ\")\n\n with pytest.raises(ValidationError):\n resource.clean()", "def strip(self):\n result = library.MagickStripImage(self.wand)\n if not result:\n self.raise_exception()", "def clearImage(self):\n if self.hasImage():\n self.scene.removeItem(self._pixmapHandle)\n self._pixmapHandle = None\n self.zoom=-1\n self.scene.clear()", "def test_remote_media_thumbnail_normally_unblocked(self) -> None:\n response = self.make_request(\n \"GET\",\n f\"/_matrix/media/v3/thumbnail/evil.com/{self.remote_media_id}?width=100&height=100\",\n shorthand=False,\n )\n self.assertEqual(response.code, 200)", "def clear_thumbnails(self):", "def test_delete_namespaced_image_stream(self):\n pass", "def check_files(self):\n for f in self.filenames:\n img = cv2.imread(f, int(self.color))\n if img is None:\n os.remove(f)", "def test_delete_collection_namespaced_image_stream(self):\n pass" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Cleaning a media resource that only has a YouTube video ID should do nothing.
def test_clean_only_youtube_id(): resource = models.MediaResource(youtube_id="dQw4w9WgXcQ") resource.clean()
[ "def test_clean_no_image_or_youtube_id():\n resource = models.MediaResource()\n\n with pytest.raises(ValidationError):\n resource.clean()", "def test_clean_both_image_and_youtube_id(image):\n resource = models.MediaResource(image=image, youtube_id=\"dQw4w9WgXcQ\")\n\n with pytest.raises(ValidationError):\n resource.clean()", "def clean_video_id(self):\n failed = False\n d = self.cleaned_data\n service = d.get('service')\n # Get the video id and clear whitespace on either side.\n video_id = d.get('video_id', '').strip()\n\n # Validate using YouTube's API:\n if service == 'youtube':\n url = ('http://gdata.youtube.com/feeds/api/videos/{}?alt=json'.\n format(video_id))\n data = requests.get(url)\n # Ensure we can parse the JSON data.\n try:\n json = simplejson.loads(data.text)\n # If not, mark this as a failure.\n except ValueError:\n failed = True\n\n # Validate using Vimeo's API:\n elif service == 'vimeo':\n data = requests.get('http://vimeo.com/api/v2/video/{}.json'.\n format(video_id))\n # Ensure we can parse the JSON data.\n try:\n json = simplejson.loads(data.text)\n # If not, mark this as a failure.\n except ValueError:\n failed = True\n\n # Respond based on the outcome.\n if failed:\n message = _(\"Couldn't validate video id using {} API. Please \"\n \"verify it exists and check for \"\n \"typos.\".format(service))\n raise forms.ValidationError(message)\n\n return video_id", "def test_video_delete(self):\n v1, v2 = make_video(media_id='1234'), make_video(media_id='2345')\n set_resources_and_sync([v1, v2])\n i1 = mpmodels.MediaItem.objects.get(jwp__key=v1.key)\n i2 = mpmodels.MediaItem.objects.get(jwp__key=v2.key)\n set_resources_and_sync([v1])\n self.assertIsNone(mpmodels.MediaItem.objects.get(id=i1.id).deleted_at)\n self.assertIsNotNone(mpmodels.MediaItem.objects_including_deleted.get(id=i2.id).deleted_at)\n self.assertFalse(mpmodels.MediaItem.objects.filter(id=i2.id).exists())", "def test_get_thumbnail_url_video_no_oembed(self):\n url = 'http://fakedomain.com/uploads/video.m4v'\n asset = create_external_asset(type='video', title='', url=url)\n self.assertEqual(asset.get_thumbnail_url(), None)", "def test_type_youtube():\n resource = models.MediaResource(youtube_id=\"dQw4w9WgXcQ\")\n\n assert resource.type == models.MediaResource.TYPE_YOUTUBE", "def test_video_removal(self):\n edx_video_id = 'test1'\n remove_url = self.get_url_for_course_key(self.course.id, {'edx_video_id': edx_video_id})\n response = self.client.delete(remove_url, HTTP_ACCEPT=\"application/json\")\n self.assertEqual(response.status_code, 204)\n\n self._assert_video_removal(self.url, edx_video_id, 1)", "def play_youtube(self, media_id):\n pass", "def clean_up_media(chat_id, input_type, output_type):\n if input_media_exist(chat_id, input_type):\n os.remove(f\"./input_media/{chat_id}.{input_type}\")\n if output_media_exist(chat_id, output_type):\n os.remove(f\"./output_media/{chat_id}.{output_type}\")", "def clear_all_youtube_links(self):\n bool_matching_songs = self.Music['path'].str.contains(\"https://\")\n matching_songs = self.Music[bool_matching_songs]\n\n youtube_links = matching_songs['path'].to_list()\n\n for link in youtube_links:\n self.tags.pop(link, None)\n\n self.Music = self.Music[~bool_matching_songs]", "def youtube_video_id(self, link):\n #http://stackoverflow.com/questions/2964678/jquery-youtube-url-validation-with-regex/10315969#10315969\n yrx = r\"^(?:https?:\\/\\/)?(?:www\\.)?(?:youtu\\.be\\/|youtube\\.com\\/(?:embed\\/|v\\/|watch\\?v=|watch\\?.+&v=))((\\w|-){11})(?:\\S+)?$\"\n import re\n match = re.search(yrx, link)\n if match:\n yid = match.group(1)\n else:\n yid = None\n\n return yid", "def test_no_media(self):\n with patched_client() as jwclient:\n # No matter how videos are searched, return none\n jwclient.videos.list.return_value = self.LIST_RESPONSE_WITH_NOTHING\n r = self.client.get(reverse('smsjwplatform:embed', kwargs={'media_id': 34}))\n self.assertEqual(r.status_code, 404)", "def verify_media(self):\n self.check_dataset_duplicate_ids(self.media)", "def test_clean_only_image(image):\n resource = models.MediaResource(image=image)\n\n resource.clean()", "def play_youtube(self, media_id):\n raise NotImplementedError()", "def allow_video(self, video_id: str):\n video_to_unflag = self._video_library.get_video(video_id)\n if video_to_unflag is None:\n print(\"Cannot remove flag from video: Video does not exist\")\n elif video_to_unflag.flag_reason == \"\":\n print(\"Cannot remove flag from video: Video is not flagged\")\n else:\n video_to_unflag.flag_reason = \"\"\n print(f\"Successfully removed flag from video: {video_to_unflag.title}\")", "def test_parse_youtube_empty(self):\r\n self.assertEqual(VideoDescriptor._parse_youtube(''),\r\n {'0.75': '',\r\n '1.00': '',\r\n '1.25': '',\r\n '1.50': ''})", "def delete_video(self, video_ID): # WORKS\n try:\n self.cur.execute(\"DELETE FROM videos WHERE video_ID = \\\"{}\\\"\".format(video_ID))\n self.db.commit()\n os.remove('static/videos/' + str(video_ID) + '.mp4')\n os.remove('static/images/' + str(video_ID) + '.jpg')\n except:\n self.db.rollback()", "def test_parse_youtube_empty(self):\r\n self.assertEqual(\r\n VideoDescriptor._parse_youtube(''),\r\n {'0.75': '',\r\n '1.00': '',\r\n '1.25': '',\r\n '1.50': ''}\r\n )" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
If a media resource has an image, its type property should indicate it's an image.
def test_type_image(image): resource = models.MediaResource(image=image) assert resource.type == models.MediaResource.TYPE_IMAGE
[ "def is_image(media_node):\n return media_node['__typename'] == JinstaScrape.IMAGE_TYPENAME", "def is_image(content_type):\n return content_type == \"image/jpeg\" or content_type == \"image/png\"", "def isphoto(self):\n return self.media_type == Photos.PHAssetMediaTypeImage", "def test_get_media_info_image(self):\n img = image()\n info_img, info_format = _get_media_info(img.pk, 'image')\n eq_(img.pk, info_img.pk)\n eq_('jpeg', info_format)", "def has_media(self):\r\n if self.image:\r\n return True\r\n return False", "def picture(result):\n media = result.entities.get('media')\n if media:\n return media[0].get('type') == u'photo'\n return False", "def resource_type(self, resource_type):\n allowed_values = [\"image\"]\n if resource_type not in allowed_values:\n raise ValueError(\n \"Invalid value for `resource_type` ({0}), must be one of {1}\"\n .format(resource_type, allowed_values)\n )\n\n self._resource_type = resource_type", "def SourceIsStorageMediaImage(self):\n return self._scan_context.source_type in [\n self._scan_context.SOURCE_TYPE_STORAGE_MEDIA_DEVICE,\n self._scan_context.SOURCE_TYPE_STORAGE_MEDIA_IMAGE]", "def _is_image_attempt(self, message):\n img_attempt = False\n if 'attachments' in message:\n img_attempt = message['attachments'][0]['type'] == 'image'\n elif 'image' in message:\n img_attempt = True\n return img_attempt", "def post_image(context, *args, **kwargs):\n obj = context['object']\n fallback = \"%s%s\" % (settings.STATIC_URL, kwargs.get('placeholder')) if kwargs.has_key('placeholder') else ''\n\n if kwargs.has_key('save'):\n context[kwargs['save']] = ''\n\n if not obj:\n return '<img src=\"%s\" />' % fallback\n attrs = kwargs.get('attrs', '') \n size = kwargs.get('size', 'content') # size can be [width]x[height] autocrop :)\n img = None\n if isinstance(obj, MediaContent):\n img = obj\n elif kwargs.has_key('title'):\n iqs = obj.media_content.filter(mimetype__startswith=\"image\", title=kwargs['title'])\n if iqs.exists():\n img = iqs[0]\n elif kwargs.has_key('thumbnail_only'):\n iqs = obj.media_content.get_thumbnail()\n if iqs.exists():\n img = iqs[0]\n elif kwargs.has_key('no_thumbnail'):\n img = obj.media_content.exclude(thumbnail_only=True).first()\n elif kwargs.has_key('gallery_only'):\n iqs = obj.media_content.get_gallery()\n if iqs.exists():\n img = iqs[0]\n elif kwargs.has_key('no_gallery'):\n img = obj.media_content.exclude(gallery_only=True).first()\n else:\n img = obj.get_first_image()\n\n if not img or not isinstance(img, MediaContent) or not img.content:\n if fallback:\n return mark_safe('<img src=\"%s\" %s />' % (fallback, attrs))\n elif isinstance(img, basestring):\n return mark_safe('<img src=\"%s\" />' % img)\n else:\n return ''\n \n if size == 'gallery' and img.gallery:\n img_url = img.gallery.url\n elif size == 'thumbnail' and img.thumbnail:\n img_url = img.thumbnail.url\n elif len(size.split('x')) == 2:\n width, height = size.split('x')\n content_path = img.content.path.split('.')\n content_url = img.content.url.split('/')\n crop_path = '%s_%s.%s' % (''.join(content_path[:-1]), size, content_path[-1])\n crop_url = '%s/%s' % ('/'.join(content_url[:-1]), crop_path.split('/')[-1])\n try:\n with open(crop_path):\n pass\n except IOError:\n try:\n image = Image.open(img.content.path)\n thumb = ImageOps.fit(image, [int(x) for x in size.split('x')], Image.ANTIALIAS, 0, (0.5, 0.0))\n thumb.save(crop_path, 'JPEG', quality=90)\n except IOError:\n img_alt = \"imagen no disponible\"\n img_title = img_alt\n img_url = \"http://placehold.it/%sx%s&text=%s\" % (width, height, img_title)\n img_tag = \"<!-- no disponible %(origin_src)s --><img src=\\\"%(src)s\\\" %(attrs)s alt=\\\"%(alt)s\\\" title=\\\"%(title)s\\\"/>\"\n return mark_safe(img_tag % {'origin_src':img.content.path, 'src': img_url, 'alt': img_alt, 'title': img_title, 'attrs': attrs })\n\n img_url = crop_url\n else:\n img_url = img.content.url\n\n img_alt = img.content.name\n img_title = img.title or obj.title\n img_tag = \"<img src=\\\"%(src)s\\\" %(attrs)s alt=\\\"%(alt)s\\\" title=\\\"%(title)s\\\"/>\"\n if kwargs.get('just_url', False):\n return img_url\n result = mark_safe(img_tag % {'src': img_url, 'alt': img_alt, 'title': img_title, 'attrs': attrs })\n if kwargs.has_key('save'):\n context[kwargs['save']] = result\n return '<!---->'\n return result", "def img_mime_type(img):\n if img.format:\n ext = \".\" + img.format\n return mimetypes.types_map.get(ext.lower())\n return None", "def test_badge_should_have_image(self):\n\n badge = self.get_sample_badge()\n # It's a string, even though it often looks like a URL\n self.assertIsInstance(badge.image, str)", "def isImage(filepath):\n filepath = os.path.normpath(filepath)\n return magic.from_file(filepath, mime = True).split('/')[0] == 'image'", "def is_url_image(image_url):\n image_formats = (\"image/png\", \"image/jpeg\", \"image/jpg\")\n r = requests.head(image_url)\n logger.info(f'{image_url} has content type {r.headers[\"content-type\"]}')\n if r.headers[\"content-type\"] in image_formats:\n return True\n return False", "def issuer_image(self, obj):\n\n if obj.diploma.event.issuer.image_thumb:\n return mark_safe(\n '<img src=\"/media/{url}\" width=\"75\" height=\"auto\" >'.format(url = obj.diploma.event.issuer.image_thumb.url.split('/media/')[-1])\n )\n else:\n return mark_safe(\n '<img src=\"/static/not-available.png\" width=\"75\" height=\"75\" >'\n )", "def image():\n\n headers = get_headers()\n if \"accept\" not in headers:\n return image_png() # Default media type to png\n\n accept = headers[\"accept\"].lower()\n\n if \"image/webp\" in accept:\n return image_webp()\n elif \"image/svg+xml\" in accept:\n return image_svg()\n elif \"image/jpeg\" in accept:\n return image_jpeg()\n elif \"image/png\" in accept or \"image/*\" in accept or \"*/*\" in accept:\n return image_png()\n else:\n return status_code(406) # Unsupported media type", "def image_mimetype(render):\r\n render = parse_render(render)\r\n # All most web browsers don't support 'image/x-ms-bmp'.\r\n if render == 'bmp':\r\n return 'image/bmp'\r\n return guess_type('foo.%s' % render)[0]", "def isImage(imgref):\n if (imgref.endswith(\"JPG\")):\n return True\n if (imgref.endswith(\"jpg\")):\n return True\n if (imgref.endswith(\"gif\")):\n return True\n if (imgref.endswith(\"png\")):\n return True\n return False", "def image_items(self):\n return list(e for e in self.items if MediaType.is_image(e.v_media_type))" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
If a media resource has a YouTube video ID, its type property should indicate it's a YouTube video.
def test_type_youtube(): resource = models.MediaResource(youtube_id="dQw4w9WgXcQ") assert resource.type == models.MediaResource.TYPE_YOUTUBE
[ "def video_type(self):\n\t\treturn 'movie'", "def play_youtube(self, media_id):\n pass", "def isYouTube(self):\n if 'youtube' in self.link.split('.'):\n return True\n return None", "def play_youtube(self, media_id):\n raise NotImplementedError()", "def is_video(media_node):\n return media_node['__typename'] == JinstaScrape.VIDEO_TYPENAME", "def video_type(self):\n\t\tpass", "def ismovie(self):\n return self.media_type == Photos.PHAssetMediaTypeVideo", "def video(youtube_id):\r\n display(YouTubeVideo(youtube_id, 720, 480, rel=0))", "def video_type(self):\n return self._video_type", "def video_type(self, video_type):\n if video_type is None:\n raise ValueError(\"Invalid value for `video_type`, must not be `None`\")\n allowed_values = [\"mp4\", \"hls\"]\n if video_type not in allowed_values:\n raise ValueError(\n \"Invalid value for `video_type` ({0}), must be one of {1}\"\n .format(video_type, allowed_values)\n )\n\n self._video_type = video_type", "def is_youtube_video_url(url):\n res = re.match(\n '^(?:http://|https://|)(?:www\\.)?youtu(?:be\\.com/watch\\?v=|\\.be/)([\\w\\-\\_]*)(&(amp;)?‌​[\\w\\?‌​=]*)?$', url)\n if res is None:\n return False\n else:\n return YoutubeID(video=next(s for s in res.groups() if s), list=None)", "def get_youtube_id(url):\n r = is_youtube_video_url(url)\n if r:\n return r\n r = is_youtube_playlist_url(url)\n if r:\n return r\n else:\n return is_youtube_video_and_playlist_url(url)", "def youtube_video_id(self, link):\n #http://stackoverflow.com/questions/2964678/jquery-youtube-url-validation-with-regex/10315969#10315969\n yrx = r\"^(?:https?:\\/\\/)?(?:www\\.)?(?:youtu\\.be\\/|youtube\\.com\\/(?:embed\\/|v\\/|watch\\?v=|watch\\?.+&v=))((\\w|-){11})(?:\\S+)?$\"\n import re\n match = re.search(yrx, link)\n if match:\n yid = match.group(1)\n else:\n yid = None\n\n return yid", "def is_youtube(self):\r\n if not self.source_url:\r\n return False\r\n parsed = urlparse(self.source_url.lower())\r\n return 'youtube' in parsed.netloc or 'youtu.be' in parsed.netloc", "def test_get_oembed_response_youtube_short(self):\n url = 'http://youtu.be/KpichyyCutw'\n response_data = ExternalAsset.get_oembed_response(url)\n self.assertIn('html', response_data)", "def testVideoTrackType(self):\n\n trackLine = _buildTrackLine(0, 'video', {'hello': 'goodbye'})\n\n trackID, trackType, trackDict = tools._trackInfo(trackLine)\n\n self.assertEqual(\n 'video',\n trackType,\n )", "def video_type(self, video_type):\n self._video_type = video_type", "def testVideoTrackType(self):\n\n trackLine = _buildTrackLine(967, 'subtitles', {'hello': 'goodbye'})\n\n trackID, trackType, trackDict = tools._trackInfo(trackLine)\n\n self.assertEqual(\n 'subtitles',\n trackType,\n )", "def is_youtube_video_and_playlist_url(url):\n res = re.match(\n '^(?:http://|https://|)(?:www\\.)?youtube\\.com/watch\\?(list=[a-zA-Z0-9_-]+|v=[a-zA-Z0-9_-]+)'\n '(&list=[a-zA-Z0-9_-]+|&v=[a-zA-Z0-9_-]+)$', url)\n if res is None:\n return False\n else:\n return YoutubeID(\n video=next(s.replace('&v=', '').replace('v=', '')\n for s in res.groups() if s and str(s).lower().replace('&', '').startswith('v=')),\n list=next(s.replace('&list=', '').replace('list=', '')\n for s in res.groups() if s and str(s).lower().replace('&', '').startswith('list=')))" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Extract text and other things from the raw_html for this document.
def extract(self, doc, raw_html): super(KenyaTodayCrawler, self).extract(doc, raw_html) soup = BeautifulSoup(raw_html) # gather title doc.title = soup.find(attrs={"property":"og:title"})['content'] #gather publish date date = self.extract_plaintext(soup.select("main.content .entry-meta .entry-time")) doc.published_at = self.parse_timestamp(date) nodes = soup.select(".content .entry-content p") self.log.info(nodes) if len(nodes) > 1: doc.summary = self.extract_plaintext(nodes[0:1]) doc.text = "\n\n".join(p.text.strip() for p in nodes[2:]) doc.author = Author.unknown()
[ "def extract(self, doc, raw_html):\n super(ImzansiCrawler, self).extract(doc, raw_html)\n\n soup = BeautifulSoup(raw_html)\n\n # gather title\n doc.title = self.extract_plaintext(soup.select(\".content .post h1.post-title\"))\n\n #gather publish date\n date = self.extract_plaintext(soup.select(\".content .post .post-byline .published\"))\n doc.published_at = self.parse_timestamp(date)\n \n #gather text and summary\n nodes = soup.select(\".content .post .entry-inner p\")\n if len(nodes) > 1:\n doc.summary = self.extract_plaintext(nodes[0:1])\n doc.text = \"\\n\\n\".join(p.text.strip() for p in nodes[2:])\n\n # gather author \n author = self.extract_plaintext(soup.select(\".content .post .post-byline .author .fn a\"))\n if author:\n doc.author = Author.get_or_create(author.strip(), AuthorType.journalist())\n else:\n doc.author = Author.unknown()", "def extract_text(self, data):", "def _extract_text(self, force_reload=False):\n text_url = '/'.join(self._sources['url'].split('/')[:-1]) + '/text?format=txt'\n text_html = self.ROOT_DIR + 'web/' + text_url.split('://')[-1].replace('/', '_')\n\n html = download_file(text_url, text_html, force_reload)\n\n soup = BeautifulSoup(html, 'html.parser')\n container = soup.find('pre', attrs={'id': 'billTextContainer'})\n try:\n self._text = container.text.strip()\n except AttributeError:\n self._text = ''", "def process_html(raw_html_text):\n\tbounds_list = pre_proc.get_page_bounds(raw_html_text)\n\n\tprocessed_text_html = ( pre_proc.split_spans(raw_html_text) \t| p(pre_proc.delete_non_textual_elements)\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t| p(pre_proc.delete_headers, bounds_list)\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t| p(pre_proc.delete_vertical_text)\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t| p(pre_proc.sort_html)\n\t\t)\n\treturn processed_text_html", "def parse_html(src):\n with open(src, 'r', encoding='utf-8') as f:\n raw_data_ = f.read()\n soup = bs(raw_data_)\n try:\n text = ' '.join(soup.find('article').get_text().split())\n except AttributeError:\n text = None\n return text", "def extract_page_text(html):\n soup = bs4.BeautifulSoup(html)\n\n # Remove <script/> and <style/> content\n for script in soup([\"script\", \"style\"]):\n script.extract()\n\n text = soup.get_text()\n\n # Strip leading and trailing whitespace from each line, then join all the\n # non-empty lines together.\n lines = (line.strip() for line in text.splitlines())\n text = '\\n'.join(line for line in lines if line)\n\n return text", "def _process_html(self):\n pass", "def process_html(html: str, current_url):\n soup = BeautifulSoup(html, 'html.parser')\n urls = process_urls(soup, current_url)\n title = make_title(soup)\n if not title:\n return '', '', ''\n text = process_text(soup)\n return urls, title, text", "def extract_page_text(self, bs_object):\n\n # kill all script and style elements\n for script in bs_object([\"script\", \"style\", \"head\"]):\n script.extract() # rip it out\n\n # get text\n text = bs_object.get_text()\n\n # break into lines and remove leading and trailing space on each\n lines = (line.strip() for line in text.splitlines())\n # break multi-headlines into a line each\n chunks = (phrase.strip() for line in lines for phrase in line.split(\" \"))\n # drop blank lines\n text_list_gen = (chunk for chunk in chunks if chunk)\n text_list = list(text_list_gen)\n # print \"TEXT LIST >>>\\n\", text_list\n \n return text_list", "def parseHtmlText(text):\n # text processing\n raw = BeautifulSoup(text.text, 'html.parser').get_text()\n nltk.data.path.append('./nltk_data/') # set the path\n tokens = nltk.word_tokenize(raw)\n text = nltk.Text(tokens)\n # remove punctuation, count raw words\n nonPunct = re.compile('.*[A-Za-z].*')\n raw_words = [w for w in text if nonPunct.match(w)]\n raw_word_count = Counter(raw_words)\n # stop words\n no_stop_words = [w for w in raw_words if w.lower() not in stops]\n no_stop_words_count = Counter(no_stop_words)\n return raw_word_count, no_stop_words_count", "def _get_recipe_html_text(self,):\n self.html = urllib2.urlopen(self.url)\n self.html = \"\".join(self.html.readlines())", "def parse_html(html_txt):\n\n\tsoup = BeautifulSoup(html_txt, 'html.parser')\n\treturn '\\n'.join([line for line in soup.stripped_strings])", "def parse(html):\n\n return BeautifulSoup(html, 'html.parser')", "def _parseMainText(self):\n try:\n self.mainText = re.search('<p class=\"mainText mb-0\">(.+?)</p>', self.reviewHTML).group(1)\n except:\n self.mainText = str()", "def _extract_description(html: str) -> str:\n\n soup = BeautifulSoup(html, features='html.parser')\n div = soup.html.body.findAll('div', {'id': 'mw-content-text'})[0]\n return div.findAll('p', {'class': None})[0].get_text()", "def process_doc_html(self, doc_in):\n self.feed(doc_in) #SGMLParser call\n self.close() #SGMLParser call\n self.hand_off_temp_pieces('to_doc_pieces')\n self.all_pieces = self.all_pieces[:-16] # drop </body></html>\n return self.all_pieces", "def parse_unspliced(html):\n soup = bs.BeautifulSoup(html)\n content = soup.find(id=\"content\")\n return content.string.replace('\\n', '')", "def _extract_html(self, url):\n self.response = requests.get(url, timeout=5)\n self.html = BeautifulSoup(self.response.content, \"lxml\") if self.response.ok else None\n # return self.html", "def extractText(html_code):\n html_tree = html.fromstring(html_code)\n chapter_list = html_tree.find_class(\"chapter\")\n chapter_text = chapter_list[0].text_content()\n return chapter_text" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
phase5 requires a 4edge combo where none of the edges are in the zplane. phase4 will put a 4edge combo into that state. There are 12!/(4!8!) or 495 different 4edge combinations. Try them all and see which one has the lowest phase4 cost.
def find_first_four_edges_to_pair(self): original_state = self.state[:] original_solution = self.solution[:] original_solution_len = len(self.solution) results = [] for wing_str_index, wing_str_combo in enumerate(itertools.combinations(wing_strs_all, 4)): wing_str_combo = sorted(wing_str_combo) self.state = original_state[:] self.solution = original_solution[:] self.lt_phase4.wing_strs = wing_str_combo if self.lt_phase4.solve(): phase4_solution = self.solution[original_solution_len:] phase4_solution_len = len(phase4_solution) results.append((phase4_solution_len, wing_str_combo)) logger.debug( f"{wing_str_index+1}/495 {wing_str_combo} phase-4 solution length is {phase4_solution_len}" ) else: logger.debug(f"{wing_str_index+1}/495 {wing_str_combo} phase-4 solution length is >= 4 ") self.lt_phase4.fh_txt_cache = {} self.state = original_state[:] self.solution = original_solution[:] results.sort() return results
[ "def test_fk5_galactic():\n\n fk5 = FK5(ra=1*u.deg, dec=2*u.deg)\n\n direct = fk5.transform_to(Galactic)\n indirect = fk5.transform_to(FK4).transform_to(Galactic)\n\n assert direct.separation(indirect).degree < 1.e-10\n\n direct = fk5.transform_to(Galactic)\n indirect = fk5.transform_to(FK4NoETerms).transform_to(Galactic)\n\n assert direct.separation(indirect).degree < 1.e-10", "def step4(self) :\r\n self.global_solution = self.solutions[0]\r\n for k in range(self.nb_ants) : \r\n U_k = self.compute_U_k(self.solutions[k])\r\n L = self.compute_ATT(U_k)\r\n if L < self.L_gb : \r\n self.L_gb = L\r\n self.global_solution = self.solutions[k]\r\n for l in range(self.nb_buslines):\r\n #Diminution du niveau de phéromone global\r\n for i in range(1,self.nb_buses+1) : \r\n for j in range(1,self.nb_buses+1) :\r\n self.update_pheromone_4 (l, i, j)\r\n #Augmentation du niveau de phéromones des arrêts appartenant à la solution globale. \r\n for i in range(len(self.global_solution)) :\r\n for j in range(len(self.global_solution[i])-1):\r\n self.pheromone_level[self.global_solution[i][j],self.global_solution[i][j+1],l] += self.alpha / self.L_gb \r\n self.pheromone_level[self.global_solution[i][j+1],self.global_solution[i][j],l] += self.alpha / self.L_gb", "def step5(my_cube):\n\ti = 0\n\twhile True:\n\t\ti += 1\n\t\tif i > 5000:\n\t\t\traise ValueError(\"不正确的魔方\")\n\t\ta = judge_finished(my_cube)\n\t\tif a:\n\t\t\tbreak\n\t\t# a=judge_bottom_side()\n\t\ta = judge_bottom_side(my_cube)\n\t\tif a:\n\t\t\tif a == 3:\n\t\t\t\tmy_cube.rotation(my_cube.sides['yellow'], 'l', PRINT_OUT)\n\t\t\telif a == 2:\n\t\t\t\tmy_cube.rotation(my_cube.sides['yellow'], 'l', PRINT_OUT)\n\t\t\t\tmy_cube.rotation(my_cube.sides['yellow'], 'l', PRINT_OUT)\n\t\t\telif a == 1:\n\t\t\t\tmy_cube.rotation(my_cube.sides['yellow'], 'r', PRINT_OUT)\n\t\ta = judge_bottom_conner(my_cube)\n\t\tif a:\n\t\t\tif type(a) is str:\n\t\t\t\talgorithm5_2(my_cube, a)\n\t\t\telif type(a) is tuple:\n\t\t\t\talgorithm5_1(my_cube, a[0], a[1])\n\t\telse:\n\t\t\talgorithm5_2(my_cube, 'red')\n\t\t\tcontinue\n\n\tpass", "def test_4_2_5D_cube_splits(self):\n check = [(0, 0, 0, 0, 0), (1, 1, 1, 1, 1), (1, 0, 0, 0, 0),\n (1, 1, 0, 0, 0), (1, 1, 1, 0, 0), (1, 1, 1, 1, 0),\n (1, 1, 1, 0, 1), (1, 1, 0, 1, 0), (1, 1, 0, 1, 1),\n (1, 1, 0, 0, 1), (1, 0, 1, 0, 0), (1, 0, 1, 1, 0),\n (1, 0, 1, 1, 1), (1, 0, 1, 0, 1), (1, 0, 0, 1, 0),\n (1, 0, 0, 1, 1), (1, 0, 0, 0, 1), (0, 1, 0, 0, 0),\n (0, 1, 1, 0, 0), (0, 1, 1, 1, 0), (0, 1, 1, 1, 1),\n (0, 1, 1, 0, 1), (0, 1, 0, 1, 0), (0, 1, 0, 1, 1),\n (0, 1, 0, 0, 1), (0, 0, 1, 0, 0), (0, 0, 1, 1, 0),\n (0, 0, 1, 1, 1), (0, 0, 1, 0, 1), (0, 0, 0, 1, 0),\n (0, 0, 0, 1, 1), (0, 0, 0, 0, 1), (0.5, 0.5, 0.5, 0.5, 0.5),\n (0.0, 0.5, 0.5, 0.5, 0.5), (0.0, 0.0, 0.5, 0.5, 0.5),\n (0.0, 0.0, 0.0, 0.5, 0.5), (0.0, 0.0, 0.0, 0.0, 0.5),\n (0.0, 0.0, 0.0, 0.5, 0.0), (0.0, 0.0, 0.5, 0.0, 0.5),\n (0.0, 0.0, 0.5, 0.0, 0.0), (0.0, 0.0, 0.5, 0.5, 0.0),\n (0.0, 0.5, 0.0, 0.5, 0.5), (0.0, 0.5, 0.0, 0.0, 0.5),\n (0.0, 0.5, 0.0, 0.0, 0.0), (0.0, 0.5, 0.0, 0.5, 0.0),\n (0.0, 0.5, 0.5, 0.0, 0.5), (0.0, 0.5, 0.5, 0.0, 0.0),\n (0.0, 0.5, 0.5, 0.5, 0.0), (0.5, 0.0, 0.5, 0.5, 0.5),\n (0.5, 0.0, 0.0, 0.5, 0.5), (0.5, 0.0, 0.0, 0.0, 0.5),\n (0.5, 0.0, 0.0, 0.0, 0.0), (0.5, 0.0, 0.0, 0.5, 0.0),\n (0.5, 0.0, 0.5, 0.0, 0.5), (0.5, 0.0, 0.5, 0.0, 0.0),\n (0.5, 0.0, 0.5, 0.5, 0.0), (0.5, 0.5, 0.0, 0.5, 0.5),\n (0.5, 0.5, 0.0, 0.0, 0.5), (0.5, 0.5, 0.0, 0.0, 0.0),\n (0.5, 0.5, 0.0, 0.5, 0.0), (0.5, 0.5, 0.5, 0.0, 0.5),\n (0.5, 0.5, 0.5, 0.0, 0.0), (0.5, 0.5, 0.5, 0.5, 0.0),\n (0.25, 0.25, 0.25, 0.25, 0.25), (1.0, 0.5, 0.5, 0.5, 0.5),\n (1.0, 1.0, 0.5, 0.5, 0.5), (1.0, 1.0, 1.0, 0.5, 0.5),\n (1.0, 1.0, 1.0, 1.0, 0.5), (1.0, 1.0, 1.0, 0.5, 1.0),\n (1.0, 1.0, 0.5, 1.0, 0.5), (1.0, 1.0, 0.5, 1.0, 1.0),\n (1.0, 1.0, 0.5, 0.5, 1.0), (1.0, 0.5, 1.0, 0.5, 0.5),\n (1.0, 0.5, 1.0, 1.0, 0.5), (1.0, 0.5, 1.0, 1.0, 1.0),\n (1.0, 0.5, 1.0, 0.5, 1.0), (1.0, 0.5, 0.5, 1.0, 0.5),\n (1.0, 0.5, 0.5, 1.0, 1.0), (1.0, 0.5, 0.5, 0.5, 1.0),\n (0.5, 1.0, 0.5, 0.5, 0.5), (0.5, 1.0, 1.0, 0.5, 0.5),\n (0.5, 1.0, 1.0, 1.0, 0.5), (0.5, 1.0, 1.0, 1.0, 1.0),\n (0.5, 1.0, 1.0, 0.5, 1.0), (0.5, 1.0, 0.5, 1.0, 0.5),\n (0.5, 1.0, 0.5, 1.0, 1.0), (0.5, 1.0, 0.5, 0.5, 1.0),\n (0.5, 0.5, 1.0, 0.5, 0.5), (0.5, 0.5, 1.0, 1.0, 0.5),\n (0.5, 0.5, 1.0, 1.0, 1.0), (0.5, 0.5, 1.0, 0.5, 1.0),\n (0.5, 0.5, 0.5, 1.0, 0.5), (0.5, 0.5, 0.5, 1.0, 1.0),\n (0.5, 0.5, 0.5, 0.5, 1.0), (0.75, 0.75, 0.75, 0.75, 0.75),\n (1.0, 0.0, 0.5, 0.5, 0.5), (1.0, 0.0, 0.0, 0.5, 0.5),\n (1.0, 0.0, 0.0, 0.0, 0.5), (1.0, 0.0, 0.0, 0.5, 0.0),\n (1.0, 0.0, 0.5, 0.0, 0.5), (1.0, 0.0, 0.5, 0.0, 0.0),\n (1.0, 0.0, 0.5, 0.5, 0.0), (1.0, 0.5, 0.0, 0.5, 0.5),\n (1.0, 0.5, 0.0, 0.0, 0.5), (1.0, 0.5, 0.0, 0.0, 0.0),\n (1.0, 0.5, 0.0, 0.5, 0.0), (1.0, 0.5, 0.5, 0.0, 0.5),\n (1.0, 0.5, 0.5, 0.0, 0.0), (1.0, 0.5, 0.5, 0.5, 0.0),\n (0.75, 0.25, 0.25, 0.25, 0.25), (1.0, 1.0, 0.0, 0.5, 0.5),\n (1.0, 1.0, 0.0, 0.0, 0.5), (1.0, 1.0, 0.0, 0.5, 0.0),\n (1.0, 1.0, 0.5, 0.0, 0.5), (1.0, 1.0, 0.5, 0.0, 0.0),\n (1.0, 1.0, 0.5, 0.5, 0.0), (0.5, 1.0, 0.0, 0.5, 0.5),\n (0.5, 1.0, 0.0, 0.0, 0.5), (0.5, 1.0, 0.0, 0.0, 0.0),\n (0.5, 1.0, 0.0, 0.5, 0.0), (0.5, 1.0, 0.5, 0.0, 0.5),\n (0.5, 1.0, 0.5, 0.0, 0.0), (0.5, 1.0, 0.5, 0.5, 0.0),\n (0.75, 0.75, 0.25, 0.25, 0.25), (1.0, 1.0, 1.0, 0.0, 0.5),\n (1.0, 1.0, 1.0, 0.5, 0.0), (1.0, 0.5, 1.0, 0.0, 0.5),\n (1.0, 0.5, 1.0, 0.0, 0.0), (1.0, 0.5, 1.0, 0.5, 0.0),\n (0.5, 1.0, 1.0, 0.0, 0.5), (0.5, 1.0, 1.0, 0.0, 0.0),\n (0.5, 1.0, 1.0, 0.5, 0.0), (0.5, 0.5, 1.0, 0.0, 0.5),\n (0.5, 0.5, 1.0, 0.0, 0.0), (0.5, 0.5, 1.0, 0.5, 0.0),\n (0.75, 0.75, 0.75, 0.25, 0.25), (1.0, 1.0, 0.5, 1.0, 0.0),\n (1.0, 0.5, 1.0, 1.0, 0.0), (1.0, 0.5, 0.5, 1.0, 0.0),\n (0.5, 1.0, 1.0, 1.0, 0.0), (0.5, 1.0, 0.5, 1.0, 0.0),\n (0.5, 0.5, 1.0, 1.0, 0.0), (0.5, 0.5, 0.5, 1.0, 0.0),\n (0.75, 0.75, 0.75, 0.75, 0.25), (1.0, 1.0, 0.5, 0.0, 1.0),\n (1.0, 0.5, 1.0, 0.0, 1.0), (1.0, 0.5, 0.5, 0.0, 1.0),\n (0.5, 1.0, 1.0, 0.0, 1.0), (0.5, 1.0, 0.5, 0.0, 1.0),\n (0.5, 0.5, 1.0, 0.0, 1.0), (0.5, 0.5, 0.5, 0.0, 1.0),\n (0.75, 0.75, 0.75, 0.25, 0.75), (1.0, 1.0, 0.0, 1.0, 0.5),\n (1.0, 0.5, 0.0, 1.0, 0.5), (1.0, 0.5, 0.0, 1.0, 0.0),\n (0.5, 1.0, 0.0, 1.0, 0.5), (0.5, 1.0, 0.0, 1.0, 0.0),\n (0.5, 0.5, 0.0, 1.0, 0.5), (0.5, 0.5, 0.0, 1.0, 0.0),\n (0.75, 0.75, 0.25, 0.75, 0.25), (1.0, 1.0, 0.0, 0.5, 1.0),\n (1.0, 0.5, 0.0, 1.0, 1.0), (1.0, 0.5, 0.0, 0.5, 1.0),\n (0.5, 1.0, 0.0, 1.0, 1.0), (0.5, 1.0, 0.0, 0.5, 1.0),\n (0.5, 0.5, 0.0, 1.0, 1.0), (0.5, 0.5, 0.0, 0.5, 1.0),\n (0.75, 0.75, 0.25, 0.75, 0.75), (1.0, 0.5, 0.0, 0.0, 1.0),\n (0.5, 1.0, 0.0, 0.0, 1.0), (0.5, 0.5, 0.0, 0.0, 1.0),\n (0.75, 0.75, 0.25, 0.25, 0.75), (1.0, 0.0, 1.0, 0.5, 0.5),\n (1.0, 0.0, 1.0, 0.0, 0.5), (1.0, 0.0, 1.0, 0.5, 0.0),\n (0.5, 0.0, 1.0, 0.5, 0.5), (0.5, 0.0, 1.0, 0.0, 0.5),\n (0.5, 0.0, 1.0, 0.0, 0.0), (0.5, 0.0, 1.0, 0.5, 0.0),\n (0.75, 0.25, 0.75, 0.25, 0.25), (1.0, 0.0, 1.0, 1.0, 0.5),\n (1.0, 0.0, 0.5, 1.0, 0.5), (1.0, 0.0, 0.5, 1.0, 0.0),\n (0.5, 0.0, 1.0, 1.0, 0.5), (0.5, 0.0, 1.0, 1.0, 0.0),\n (0.5, 0.0, 0.5, 1.0, 0.5), (0.5, 0.0, 0.5, 1.0, 0.0),\n (0.75, 0.25, 0.75, 0.75, 0.25), (1.0, 0.0, 1.0, 0.5, 1.0),\n (1.0, 0.0, 0.5, 1.0, 1.0), (1.0, 0.0, 0.5, 0.5, 1.0),\n (0.5, 0.0, 1.0, 1.0, 1.0), (0.5, 0.0, 1.0, 0.5, 1.0),\n (0.5, 0.0, 0.5, 1.0, 1.0), (0.5, 0.0, 0.5, 0.5, 1.0),\n (0.75, 0.25, 0.75, 0.75, 0.75), (1.0, 0.0, 0.5, 0.0, 1.0),\n (0.5, 0.0, 1.0, 0.0, 1.0), (0.5, 0.0, 0.5, 0.0, 1.0),\n (0.75, 0.25, 0.75, 0.25, 0.75), (1.0, 0.0, 0.0, 1.0, 0.5),\n (0.5, 0.0, 0.0, 1.0, 0.5), (0.5, 0.0, 0.0, 1.0, 0.0),\n (0.75, 0.25, 0.25, 0.75, 0.25), (1.0, 0.0, 0.0, 0.5, 1.0),\n (0.5, 0.0, 0.0, 1.0, 1.0), (0.5, 0.0, 0.0, 0.5, 1.0),\n (0.75, 0.25, 0.25, 0.75, 0.75), (0.5, 0.0, 0.0, 0.0, 1.0),\n (0.75, 0.25, 0.25, 0.25, 0.75), (0.0, 1.0, 0.5, 0.5, 0.5),\n (0.0, 1.0, 0.0, 0.5, 0.5), (0.0, 1.0, 0.0, 0.0, 0.5),\n (0.0, 1.0, 0.0, 0.5, 0.0), (0.0, 1.0, 0.5, 0.0, 0.5),\n (0.0, 1.0, 0.5, 0.0, 0.0), (0.0, 1.0, 0.5, 0.5, 0.0),\n (0.25, 0.75, 0.25, 0.25, 0.25), (0.0, 1.0, 1.0, 0.5, 0.5),\n (0.0, 1.0, 1.0, 0.0, 0.5), (0.0, 1.0, 1.0, 0.5, 0.0),\n (0.0, 0.5, 1.0, 0.5, 0.5), (0.0, 0.5, 1.0, 0.0, 0.5),\n (0.0, 0.5, 1.0, 0.0, 0.0), (0.0, 0.5, 1.0, 0.5, 0.0),\n (0.25, 0.75, 0.75, 0.25, 0.25), (0.0, 1.0, 1.0, 1.0, 0.5),\n (0.0, 1.0, 0.5, 1.0, 0.5), (0.0, 1.0, 0.5, 1.0, 0.0),\n (0.0, 0.5, 1.0, 1.0, 0.5), (0.0, 0.5, 1.0, 1.0, 0.0),\n (0.0, 0.5, 0.5, 1.0, 0.5), (0.0, 0.5, 0.5, 1.0, 0.0),\n (0.25, 0.75, 0.75, 0.75, 0.25), (0.0, 1.0, 1.0, 0.5, 1.0),\n (0.0, 1.0, 0.5, 1.0, 1.0), (0.0, 1.0, 0.5, 0.5, 1.0),\n (0.0, 0.5, 1.0, 1.0, 1.0), (0.0, 0.5, 1.0, 0.5, 1.0),\n (0.0, 0.5, 0.5, 1.0, 1.0), (0.0, 0.5, 0.5, 0.5, 1.0),\n (0.25, 0.75, 0.75, 0.75, 0.75), (0.0, 1.0, 0.5, 0.0, 1.0),\n (0.0, 0.5, 1.0, 0.0, 1.0), (0.0, 0.5, 0.5, 0.0, 1.0),\n (0.25, 0.75, 0.75, 0.25, 0.75), (0.0, 1.0, 0.0, 1.0, 0.5),\n (0.0, 0.5, 0.0, 1.0, 0.5), (0.0, 0.5, 0.0, 1.0, 0.0),\n (0.25, 0.75, 0.25, 0.75, 0.25), (0.0, 1.0, 0.0, 0.5, 1.0),\n (0.0, 0.5, 0.0, 1.0, 1.0), (0.0, 0.5, 0.0, 0.5, 1.0),\n (0.25, 0.75, 0.25, 0.75, 0.75), (0.0, 0.5, 0.0, 0.0, 1.0),\n (0.25, 0.75, 0.25, 0.25, 0.75), (0.0, 0.0, 1.0, 0.5, 0.5),\n (0.0, 0.0, 1.0, 0.0, 0.5), (0.0, 0.0, 1.0, 0.5, 0.0),\n (0.25, 0.25, 0.75, 0.25, 0.25), (0.0, 0.0, 1.0, 1.0, 0.5),\n (0.0, 0.0, 0.5, 1.0, 0.5), (0.0, 0.0, 0.5, 1.0, 0.0),\n (0.25, 0.25, 0.75, 0.75, 0.25), (0.0, 0.0, 1.0, 0.5, 1.0),\n (0.0, 0.0, 0.5, 1.0, 1.0), (0.0, 0.0, 0.5, 0.5, 1.0),\n (0.25, 0.25, 0.75, 0.75, 0.75), (0.0, 0.0, 0.5, 0.0, 1.0),\n (0.25, 0.25, 0.75, 0.25, 0.75), (0.0, 0.0, 0.0, 1.0, 0.5),\n (0.25, 0.25, 0.25, 0.75, 0.25), (0.0, 0.0, 0.0, 0.5, 1.0),\n (0.25, 0.25, 0.25, 0.75, 0.75), (0.25, 0.25, 0.25, 0.25, 0.75)]\n\n nn_checks = {(1, 1, 1, 1, 1): [(1.0, 1.0, 1.0, 0.5, 1.0),\n (1.0, 1.0, 0.5, 1.0, 1.0),\n (1.0, 0.5, 0.5, 0.5, 0.5),\n (1.0, 0.5, 1.0, 1.0, 0.5),\n (0.5, 0.5, 1.0, 1.0, 1.0),\n (1.0, 0.5, 0.5, 1.0, 0.5),\n (1.0, 0.5, 1.0, 0.5, 0.5),\n (0.5, 0.5, 0.5, 0.5, 1.0),\n (0.5, 0.5, 1.0, 0.5, 1.0),\n (0.5, 0.5, 0.5, 1.0, 1.0),\n (0.5, 1.0, 0.5, 0.5, 0.5),\n (0.5, 1.0, 1.0, 1.0, 0.5),\n (0.5, 1.0, 1.0, 0.5, 0.5),\n (0.5, 1.0, 0.5, 1.0, 0.5),\n (1.0, 1.0, 1.0, 0.5, 0.5),\n (1.0, 1.0, 0.5, 1.0, 0.5),\n (1.0, 1.0, 0.5, 0.5, 0.5),\n (1.0, 1.0, 1.0, 1.0, 0.5),\n (1.0, 1.0, 0.5, 0.5, 1.0),\n (1.0, 0.5, 1.0, 0.5, 1.0),\n (1.0, 0.5, 0.5, 1.0, 1.0),\n (0.5, 1.0, 0.5, 0.5, 1.0),\n (0.5, 0.5, 1.0, 0.5, 0.5),\n (1.0, 0.5, 0.5, 0.5, 1.0),\n (1.0, 0.5, 1.0, 1.0, 1.0),\n (0.5, 0.5, 0.5, 1.0, 0.5),\n (0.75, 0.75, 0.75, 0.75, 0.75),\n (0.5, 0.5, 1.0, 1.0, 0.5),\n (0.5, 1.0, 1.0, 1.0, 1.0),\n (0.5, 1.0, 1.0, 0.5, 1.0),\n (0.5, 1.0, 0.5, 1.0, 1.0)],\n (0.25, 0.75, 0.75, 0.75, 0.25): [(0.5, 1.0, 1.0, 1.0, 0.0),\n (0.5, 0.5, 0.5, 1.0, 0.5),\n (0, 1, 1, 1, 0),\n (0.5, 1.0, 0.5, 0.5, 0.5),\n (0.5, 1.0, 1.0, 1.0, 0.5),\n (0.0, 1.0, 0.5, 0.5, 0.5),\n (0.0, 1.0, 1.0, 1.0, 0.5),\n (0.5, 1.0, 0.5, 1.0, 0.5),\n (0.0, 1.0, 0.5, 1.0, 0.5),\n (0.5, 1.0, 1.0, 0.5, 0.5),\n (0.0, 1.0, 1.0, 0.5, 0.5),\n (0.0, 1.0, 1.0, 0.5, 0.0),\n (0.0, 1.0, 0.5, 1.0, 0.0),\n (0.5, 1.0, 1.0, 0.5, 0.0),\n (0.5, 1.0, 0.5, 1.0, 0.0),\n (0.5, 1.0, 0.5, 0.5, 0.0),\n (0.0, 1.0, 0.5, 0.5, 0.0),\n (0.5, 0.5, 1.0, 0.5, 0.0),\n (0.5, 0.5, 0.5, 1.0, 0.0),\n (0.0, 0.5, 1.0, 0.5, 0.5),\n (0.0, 0.5, 0.5, 1.0, 0.5),\n (0.0, 0.5, 1.0, 0.5, 0.0),\n (0.0, 0.5, 1.0, 1.0, 0.5),\n (0.5, 0.5, 1.0, 1.0, 0.0),\n (0.0, 0.5, 0.5, 0.5, 0.5),\n (0.5, 0.5, 0.5, 0.5, 0.0),\n (0.0, 0.5, 0.5, 1.0, 0.0),\n (0.0, 0.5, 0.5, 0.5, 0.0),\n (0.0, 0.5, 1.0, 1.0, 0.0),\n (0.5, 0.5, 0.5, 0.5, 0.5),\n (0.5, 0.5, 1.0, 1.0, 0.5),\n (\n 0.5, 0.5, 1.0, 0.5, 0.5)],\n (0.0, 0.0, 1.0, 0.5, 1.0): [(0.5, 0.0, 0.5, 0.5, 1.0),\n (0.0, 0.5, 0.5, 0.5, 1.0),\n (0.5, 0.5, 0.5, 0.5, 1.0),\n (0.0, 0.0, 0.5, 0.5, 1.0),\n (0, 0, 1, 1, 1),\n (0.5, 0.5, 1.0, 0.5, 1.0),\n (0.5, 0.0, 1.0, 0.5, 1.0),\n (0.0, 0.5, 1.0, 0.5, 1.0),\n (0, 0, 1, 0, 1),\n (0.5, 0.0, 1.0, 0.5, 0.5),\n (0.0, 0.5, 1.0, 0.5, 0.5),\n (0.5, 0.5, 1.0, 0.5, 0.5),\n (0.0, 0.0, 1.0, 0.5, 0.5),\n (0.5, 0.5, 0.5, 0.5, 0.5),\n (0.0, 0.0, 0.5, 0.5, 0.5),\n (0.25, 0.25, 0.75, 0.75, 0.75),\n (0.5, 0.0, 0.5, 0.5, 0.5),\n (0.0, 0.5, 0.5, 0.5, 0.5), (\n 0.25, 0.25, 0.75, 0.25, 0.75)]}\n\n init_triangulation(5, 1, check, nn_checks)", "def _step4(state):\n # We convert to int as numpy operations are faster on int\n C = (state.C == 0).astype(int)\n covered_C = C * state.row_uncovered[:, np.newaxis]\n covered_C *= np.asarray(state.col_uncovered, dtype=int)\n covered_C *= (state.available).astype(int)\n n = state.C.shape[0]\n m = state.C.shape[1]\n\n while True:\n # Find an uncovered, available zero\n row, col = np.unravel_index(np.argmax(covered_C), (n, m))\n if covered_C[row, col] == 0:\n return _step6\n else:\n state._prime(row, col)\n # Find the first starred element in the row\n star_col = np.argmax(state.marked[row] == 1)\n if state.marked[row, star_col] != 1:\n # Could not find one\n state.Z0_r = row\n state.Z0_c = col\n return _step5\n else:\n col = star_col\n state.row_uncovered[row] = False\n state.col_uncovered[col] = True\n covered_C[:, col] = C[:, col] * (\n np.asarray(state.row_uncovered, dtype=int)) * (\n state.available[:, col])\n covered_C[row] = 0", "def rule5(pag,i,j,k,l):\n for path in pag.findUncoveredCirclePaths(i,j):\n edge = pag.has_o(i,j,j) and pag.has_o(i,j,j)\n on_path = False\n if l in path and k in path:\n on_path = path.index(k) == 1 and path.index(l) == (len(path) - 2)\n nonadj = not pag.has_edge(i,l) and not pag.has_edge(k,j)\n if edge and on_path and nonadj:\n pag.undirect_edge(i,j)\n print('Orienting edge {},{} with rule 5'.format(i,j))\n for x in range(len(path)-1):\n pag.undirect_edge(path[x], path[x+1])\n print('Orienting edge {},{} with rule 5'.format(path[x], path[x+1]))", "def fkine_ur5(q):\n \n \n T1 = dh(0.08916, +q[0], 0.0, +pi/2)\n T2 = dh( 0.0, +q[1], -0.425, 0.0)\n T3 = dh( 0.0, +q[2], -0.392, 0.0)\n T4 = dh(0.10915, +q[3], 0.0, +pi/2)\n T5 = dh(0.09465, +pi+q[4], 0.0, +pi/2)\n T6 = dh( 0.0823, +pi+q[5], 0.0, 0.0)\n \n # Efector final con respecto a la base\n T = np.dot(np.dot(np.dot(np.dot(np.dot(T1,T2),T3),T4),T5),T6)\n return T", "def bdf4_step(self, f, jac, y0, t0, t1, f_params, jac_params):\n # compute the step size\n h = (t1 - t0)\n \n # use rk5 to compute the starting values\n if len(self.past_values['y']) < 4:\n y1, t1 = rk5_step(self, f, y0, t0, t1, f_params)\n \n # remember theses values\n self.past_values['y'].append(y1)\n self.past_values['t'].append(t1)\n \n return [y1, t1, True, None]\n \n else:\n # recall previous values\n y0, y1, y2, y3 = self.past_values['y']\n t0, t1, t2, t3 = self.past_values['t']\n \n # compute next time step\n t4 = t3 + h\n \n # implicit method requires solving a non-linear equation in y4\n F = lambda y4: (y4 - (48 / 25) * y3 + (36 / 25) * y2 - (16 / 25) * y1 + \n (3 / 25) * y0 - (12 / 25) * h * f(t4, y4, *f_params))\n \n # create jacobian for F using jac\n F_jac = lambda y4: 1 - (12 / 25) * h * jac(t4, y4, *jac_params)\n \n # use 4th order Adams-Bashford to predict y4\n guess_y4 = y3 + h * ((55 / 24) * f(t3, y3, *f_params) - \n (59 / 24) * f(t2, y2, *f_params) + \n (37 / 24) * f(t1, y1, *f_params) - \n (3 / 8) * f(t0, y0, *f_params)) \n \n # use root finding to solve the non-linear equation for y4\n res = optimize.root(F, guess_y4, method=self.method, jac=F_jac)\n \n # unpack the Result object\n y4 = res.x\n success = res.success\n mesg = res.message\n \n # erase memory\n self.past_values['y'].pop(0)\n self.past_values['t'].pop(0)\n \n # remember current values\n self.past_values['y'].append(y4) \n self.past_values['t'].append(t4)\n \n return [y4, t4, success, mesg]", "def phosphorene_4band():\n a = 0.222\n ax = 0.438\n ay = 0.332\n theta = 96.79 * (pi / 180)\n phi = 103.69 * (pi / 180)\n\n lat = pb.Lattice(a1=[ax, 0], a2=[0, ay])\n\n h = a * sin(phi - pi / 2)\n s = 0.5 * ax - a * cos(theta / 2)\n lat.add_sublattices(\n ('A', [-s/2, -ay/2, h], 0),\n ('B', [ s/2, -ay/2, 0], 0),\n ('C', [-s/2 + ax/2, 0, 0], 0),\n ('D', [ s/2 + ax/2, 0, h], 0)\n )\n\n lat.register_hopping_energies({\n 't1': -1.22,\n 't2': 3.665,\n 't3': -0.205,\n 't4': -0.105,\n 't5': -0.055\n })\n\n lat.add_hoppings(\n # t1\n ([-1, 0], 'A', 'D', 't1'),\n ([-1, -1], 'A', 'D', 't1'),\n ([ 0, 0], 'B', 'C', 't1'),\n ([ 0, -1], 'B', 'C', 't1'),\n # t2\n ([ 0, 0], 'A', 'B', 't2'),\n ([ 0, 0], 'C', 'D', 't2'),\n # t3\n ([ 0, 0], 'A', 'D', 't3'),\n ([ 0, -1], 'A', 'D', 't3'),\n ([ 1, 1], 'C', 'B', 't3'),\n ([ 1, 0], 'C', 'B', 't3'),\n # t4\n ([ 0, 0], 'A', 'C', 't4'),\n ([ 0, -1], 'A', 'C', 't4'),\n ([-1, 0], 'A', 'C', 't4'),\n ([-1, -1], 'A', 'C', 't4'),\n ([ 0, 0], 'B', 'D', 't4'),\n ([ 0, -1], 'B', 'D', 't4'),\n ([-1, 0], 'B', 'D', 't4'),\n ([-1, -1], 'B', 'D', 't4'),\n # t5\n ([-1, 0], 'A', 'B', 't5'),\n ([-1, 0], 'C', 'D', 't5')\n )\n\n return lat", "def bdf5_step(self, f, jac, y0, t0, t1, f_params, jac_params):\n # compute the step size\n h = (t1 - t0)\n \n # use rk6 to compute the starting values\n if len(self.past_values['y']) < 5:\n y1, t1 = rk6_step(self, f, y0, t0, t1, f_params)\n \n # remember theses values\n self.past_values['y'].append(y1)\n self.past_values['t'].append(t1)\n \n return [y1, t1, True, None]\n \n else:\n # recall previous values\n y0, y1, y2, y3, y4 = self.past_values['y']\n t0, t1, t2, t3, t4 = self.past_values['t']\n \n # compute next time step\n t5 = t4 + h\n \n # implicit method requires solving a non-linear equation in y5\n F = lambda y5: (y5 - (300 / 137) * y4 + (300 / 137) * y3 - \n (200 / 137) * y2 + (75 / 137) * y1 - \n (12 / 137) * y0 - (60 / 137) * h * f(t5, y5, *f_params))\n \n # create jacobian for F using jac\n F_jac = lambda y5: 1 - (60 / 137) * h * jac(t5, y5, *jac_params)\n \n # use 5th order Adams-Bashford to predict y5\n guess_y5 = y4 + h * ((1901 / 720) * f(t4, y4, *f_params) - \n (1387 / 360) * f(t3, y3, *f_params) + \n (109 / 30) * f(t2, y2, *f_params) - \n (637 / 360) * f(t1, y1, *f_params) + \n (251 / 720) * f(t0, y0, *f_params)) \n \n # use root finding to solve the non-linear equation for y5\n res = optimize.root(F, guess_y5, method=self.method, jac=F_jac)\n \n # unpack the Result object\n y5 = res.x\n success = res.success\n mesg = res.message\n \n # erase memory\n self.past_values['y'].pop(0)\n self.past_values['t'].pop(0)\n \n # remember current values\n self.past_values['y'].append(y5) \n self.past_values['t'].append(t5)\n \n return [y5, t5, success, mesg]", "def test_4_1_5D_cube_init(self):\n check = [(0, 0, 0, 0, 0), (1, 1, 1, 1, 1), (1, 0, 0, 0, 0),\n (1, 1, 0, 0, 0),\n (1, 1, 1, 0, 0), (1, 1, 1, 1, 0), (1, 1, 1, 0, 1),\n (1, 1, 0, 1, 0),\n (1, 1, 0, 1, 1), (1, 1, 0, 0, 1), (1, 0, 1, 0, 0),\n (1, 0, 1, 1, 0),\n (1, 0, 1, 1, 1), (1, 0, 1, 0, 1), (1, 0, 0, 1, 0),\n (1, 0, 0, 1, 1),\n (1, 0, 0, 0, 1), (0, 1, 0, 0, 0), (0, 1, 1, 0, 0),\n (0, 1, 1, 1, 0),\n (0, 1, 1, 1, 1), (0, 1, 1, 0, 1), (0, 1, 0, 1, 0),\n (0, 1, 0, 1, 1),\n (0, 1, 0, 0, 1), (0, 0, 1, 0, 0), (0, 0, 1, 1, 0),\n (0, 0, 1, 1, 1),\n (0, 0, 1, 0, 1), (0, 0, 0, 1, 0), (0, 0, 0, 1, 1),\n (0, 0, 0, 0, 1),\n (0.5, 0.5, 0.5, 0.5, 0.5)]\n\n nn_checks = {(0, 1, 0, 1, 1): [(0, 0, 0, 0, 0), (\n 0.5, 0.5, 0.5, 0.5, 0.5), (0, 0, 0, 1, 1), (1, 1, 0, 1, 1),\n (0, 1, 0, 0, 0),\n (0, 1, 0, 0, 1),\n (0, 1, 0, 1, 0),\n (0, 0, 0, 0, 1),\n (1, 1, 1, 1, 1),\n (0, 1, 1, 1, 1),\n (0, 0, 0, 1, 0)]}\n\n init_triangulation(5, 0, check, nn_checks)", "def test_circuit_mod_5_4(self):\n operations = [\n qml.PauliX(wires=4),\n qml.Hadamard(wires=4),\n qml.CNOT(wires=[3, 4]),\n qml.CNOT(wires=[0, 4]),\n qml.T(wires=4),\n qml.CNOT(wires=[3, 4]),\n qml.adjoint(qml.T)(wires=4),\n qml.CNOT(wires=[0, 4]),\n qml.CNOT(wires=[0, 3]),\n qml.adjoint(qml.T)(wires=3),\n qml.CNOT(wires=[0, 3]),\n qml.CNOT(wires=[3, 4]),\n qml.CNOT(wires=[2, 4]),\n qml.adjoint(qml.T)(wires=4),\n qml.CNOT(wires=[3, 4]),\n qml.T(wires=4),\n qml.CNOT(wires=[2, 4]),\n qml.CNOT(wires=[2, 3]),\n qml.T(wires=3),\n qml.CNOT(wires=[2, 3]),\n qml.Hadamard(wires=4),\n qml.CNOT(wires=[3, 4]),\n qml.Hadamard(wires=4),\n qml.CNOT(wires=[2, 4]),\n qml.adjoint(qml.T)(wires=4),\n qml.CNOT(wires=[1, 4]),\n qml.T(wires=4),\n qml.CNOT(wires=[2, 4]),\n qml.adjoint(qml.T)(wires=4),\n qml.CNOT(wires=[1, 4]),\n qml.T(wires=4),\n qml.CNOT(wires=[1, 2]),\n qml.adjoint(qml.T)(wires=2),\n qml.CNOT(wires=[1, 2]),\n qml.Hadamard(wires=4),\n qml.CNOT(wires=[2, 4]),\n qml.Hadamard(wires=4),\n qml.CNOT(wires=[1, 4]),\n qml.T(wires=4),\n qml.CNOT(wires=[0, 4]),\n qml.adjoint(qml.T)(wires=4),\n qml.CNOT(wires=[1, 4]),\n qml.T(wires=4),\n qml.CNOT(wires=[0, 4]),\n qml.adjoint(qml.T)(wires=4),\n qml.CNOT(wires=[0, 1]),\n qml.T(wires=1),\n qml.CNOT(wires=[0, 1]),\n qml.Hadamard(wires=4),\n qml.CNOT(wires=[1, 4]),\n qml.CNOT(wires=[0, 4]),\n ]\n\n qs = QuantumScript(operations, [], [])\n zx_g = qml.transforms.to_zx(qs)\n\n assert isinstance(zx_g, pyzx.graph.graph_s.GraphS)\n\n matrix_qscript = qml.matrix(qs)\n matrix_zx = zx_g.to_matrix()\n # Check whether the two matrices are each others conjugate transposes\n mat_product = qml.math.dot(matrix_qscript, qml.math.conj(matrix_zx.T))\n # Remove global phase\n mat_product /= mat_product[0, 0]\n I = qml.math.eye(2**5)\n assert qml.math.allclose(mat_product, I)\n\n qscript_back = qml.transforms.from_zx(zx_g)\n assert isinstance(qscript_back, qml.tape.QuantumScript)\n\n matrix_qscript_back = qml.matrix(qscript_back, wire_order=list(range(len(qs.wires))))\n\n # Check whether the two matrices are each others conjugate transposes\n mat_product = qml.math.dot(matrix_qscript, qml.math.conj(matrix_qscript_back.T))\n # Remove global phase\n mat_product /= mat_product[0, 0]\n assert qml.math.allclose(mat_product, I)", "def step5(self):\n\t\tself.j = self.k\n\t\tif self.b[self.k] == 'e':\n\t\t\ta = self.m()\n\t\t\tif a > 1 or (a == 1 and not self.cvc(self.k-1)):\n\t\t\t\tself.k = self.k - 1\n\t\tif self.b[self.k] == 'l' and self.doublec(self.k) and self.m() > 1:\n\t\t\tself.k = self.k -1", "def test_dphase(self):\n model = BDF(debug=False)\n node1, c1, t1 = 100, 3, 0.3\n node2, c2, t2 = 101, 4, 0.4\n sid = 42\n card_lines = ['DPHASE', sid, node1, c1, t1, node2, c2, t2]\n model.add_card(card_lines, card_lines[0], comment='', is_list=True,\n has_none=True)\n model.add_grid(100, [0., 0., 0.])\n model.add_grid(101, [0., 0., 0.])\n model.validate()\n model.cross_reference()\n #print(model.dphases[42])\n save_load_deck(model)", "def step5(self):\n self.j = self.k\n if self.b[self.k] == 'e':\n a = self.m()\n if a > 1 or (a == 1 and not self.cvc(self.k-1)):\n self.k = self.k - 1\n if self.b[self.k] == 'l' and self.doublec(self.k) and self.m() > 1:\n self.k = self.k -1", "def am4_step(self, f, jac, y0, t0, t1, f_params, jac_params):\n # compute the step size\n h = (t1 - t0)\n \n # use rk5 to compute the starting values\n if len(self.past_values['y']) < 4:\n y1, t1 = rk5_step(self, f, y0, t0, t1, f_params)\n \n # remember theses values\n self.past_values['y'].append(y1)\n self.past_values['t'].append(t1)\n \n return [y1, t1, True, None]\n \n else:\n # recall previous values\n y0, y1, y2, y3 = self.past_values['y']\n t0, t1, t2, t3 = self.past_values['t']\n \n # compute next time step\n t4 = t3 + h\n \n # implicit method requires solving a non-linear equation in y4\n F = lambda y4: y4 - (y3 + h * ((251 / 720) * f(t4, y4, *f_params) + \n (646 / 720) * f(t3, y3, *f_params) - \n (264 / 720) * f(t2, y2, *f_params) + \n (106 / 720) * f(t1, y1, *f_params) -\n (19 / 720) * f(t0, y0, *f_params))) \n \n # create jacobian for F using jac\n F_jac = lambda y4: 1 - h * (251 / 720) * jac(t4, y4, *jac_params)\n \n # use 4th order Adams-Bashford to predict y4\n guess_y4 = y3 + h * ((55 / 24) * f(t3, y3, *f_params) - \n (59 / 24) * f(t2, y2, *f_params) + \n (37 / 24) * f(t1, y1, *f_params) - \n (3 / 8) * f(t0, y0, *f_params)) \n \n # use root finding to solve the non-linear equation for y1\n res = optimize.root(F, guess_y4, method=self.method, jac=F_jac)\n \n # unpack the Result object\n y4 = res.x\n success = res.success\n mesg = res.message\n \n # erase memory\n self.past_values['y'].pop(0)\n self.past_values['t'].pop(0)\n \n # remember current values\n self.past_values['y'].append(y4) \n self.past_values['t'].append(t4)\n \n return [y4, t4, success, mesg]", "def smooth5(size: int) -> int:\n if size < 6:\n return size\n if not size % 2:\n return size\n\n new = np.inf\n power5 = 1\n while power5 < size:\n power35 = power5\n while power35 < size:\n power2 = 2 ** ((-int(-size // power35) - 1).bit_length())\n n = power2 * power35\n if n == size:\n return new\n elif n < new:\n new = n\n power35 *= 3\n if power35 == size:\n return new\n if power35 < new:\n new = power35\n power5 *= 5\n if power5 == size:\n return new\n if power5 < new:\n new = power5\n return new", "def makeTAPE5(self):\n\n wn1, wn2 = self.wnLims\n\n # loop through each HITRAN molecule and create an associated TAPE5\n allT5 = []\n for iMol, mol in enumerate(self.mols):\n base = os.path.basename(mol)\n print(base)\n tape5 = 'TAPE5_%s' % base\n\n # LNFL TAPE5 records \n # (see lnfl_instructions document in LNFL release)\n rec1 = '$ %s' % base\n rec2 = '%10.3f%10.3f' % (wn1-25, wn2+25)\n\n # start off with all molecules off, then turn iMol on, then \n # generate a single string instead of a list of characters\n # and append \n rec3 = ['0'] * self.nMols\n rec3[iMol] = '1'\n rec3 = ''.join(rec3) + ' NBLK1 NOCPL LNOUT '\n end = '%%%%%'\n\n outDat = [rec1, rec2]\n\n # line coupling molecules\n if base in ['02_CO2', '06_CH4', '07_O2']:\n rec3 = rec3.replace('NOCPL', 'MRG2')\n rec4 = [' '] * self.nMols\n rec4[iMol] = '1'\n rec4 = ''.join(rec4)\n outDat += [rec3, rec4]\n else:\n outDat.append(rec3)\n # endif coupling\n\n outDat.append(end)\n\n # now write TAPE5\n outFP = open(tape5, 'w')\n for line in outDat: outFP.write('%s\\n' % line)\n outFP.close()\n\n # copy TAPE5 to subdirectory for molecule in buildDir\n target = '%s/%s' % (self.dirT5, tape5)\n if os.path.exists(target):\n print('WARNING: overwriting %s' % target)\n # endif target check\n os.rename(tape5, target)\n\n allT5.append(target)\n # end molecule loop\n\n self.allT5 = list(allT5)\n return self", "def processPhaseHeight(self, phasesInRing1, phasesInRing2):\n P11, P12, P21, P22 = ([] for i in range(4))\n phaseHeightDictionary = {}\n\n [P11.append(index+1)for index, value in enumerate(self.phaseDurationList)\n if value > 0.0 and index < 2]\n [P12.append(index+1)for index, value in enumerate(self.phaseDurationList)\n if value > 0.0 and index >= 2 and index < 4]\n [P21.append(index+1)for index, value in enumerate(self.phaseDurationList)\n if value > 0.0 and index >= 4 and index < 6]\n [P22.append(index+1)for index, value in enumerate(self.phaseDurationList)\n if value > 0.0 and index >= 6 and index < 8]\n\n if (len(P11) == len(P21)):\n for index in range(len(P11)):\n if len(P11) > 0:\n phaseHeightDictionary[str(P11[index])] = 10\n\n for index in range(len(P21)):\n if len(P21) > 0:\n phaseHeightDictionary[str(P21[index])] = 10\n\n elif (len(P11) < len(P21)):\n for index in range(len(P11)):\n if len(P11) > 0:\n phaseHeightDictionary[str(P11[index])] = 20\n\n for index in range(len(P21)):\n if len(P21) > 0:\n phaseHeightDictionary[str(P21[index])] = 10\n\n elif (len(P11) > len(P21)):\n for index in range(len(P11)):\n if len(P11) > 0:\n phaseHeightDictionary[str(P11[index])] = 10\n\n for index in range(len(P21)):\n if len(P21) > 0:\n phaseHeightDictionary[str(P21[index])] = 20\n\n if (len(P12) == len(P22)):\n for index in range(len(P12)):\n if len(P12) > 0:\n phaseHeightDictionary[str(P12[index])] = 10\n\n for index in range(len(P22)):\n if len(P22) > 0:\n phaseHeightDictionary[str(P22[index])] = 10\n\n elif (len(P12) < len(P22)):\n for index in range(len(P12)):\n if len(P12) > 0:\n phaseHeightDictionary[str(P12[index])] = 20\n for index in range(len(P22)):\n if len(P22) > 0:\n phaseHeightDictionary[str(P22[index])] = 10\n\n elif (len(P12) > len(P22)):\n for index in range(len(P12)):\n if len(P12) > 0:\n phaseHeightDictionary[str(P12[index])] = 10\n for index in range(len(P22)):\n if len(P22) > 0:\n phaseHeightDictionary[str(P22[index])] = 20\n\n for phase in phasesInRing1:\n for key, value in phaseHeightDictionary.items():\n if int(key) == phase:\n self.phaseHeightInRing1.append(value)\n\n for phase in phasesInRing2:\n for key, value in phaseHeightDictionary.items():\n if int(key) == phase:\n self.phaseHeightInRing2.append(value)" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
phase1 stages the centers on sides L and R phase2 stages the centers on sides F and B and put the LR centers in one of 495 states that can be solved without L L' R R'...this is prep work for phase 3 TODO this needs more work BLBFRUFRDDFBUULBRLBRRLDLDLFURFLUBUDRRRDDFDFBBLUFRUFFBBFBLLLDBDFBDBLFDUUFRFBLDUDDURFDRBBDFUUFUBFBDLULDLRRUDFDFULLLUUBUDRLURLBBDURFRBULBRFRBRDRRULDFLFLR results in "5x5x5 edge swaps are odd, cannot pair edges"
def group_centers_phase1_and_2(self) -> None: self.rotate_U_to_U() self.rotate_F_to_F() if self.centers_staged(): return original_state = self.state[:] original_solution = self.solution[:] tmp_solution_len = len(self.solution) # find multiple phase1 solutions phase1_solutions = self.lt_LR_centers_stage.solutions_via_c(solution_count=100) pt_state_indexes = [] pt_state_indexes_LR_centers_special = [] phase2_pt_state_indexes_to_phase1_solution = {} logger.info(f"found {len(phase1_solutions)} phase1 solutions") # find the phase2 solution for each phase1 solution for phase1_solution, (pt0_state, pt1_state, pt2_state, pt3_state, pt4_state) in phase1_solutions: self.state = original_state[:] self.solution = original_solution[:] for step in phase1_solution: self.rotate(step) # stage the LR centers phase2_pt_state_indexes = tuple([pt.state_index() for pt in self.lt_FB_centers_stage.prune_tables]) pt_state_indexes.append(phase2_pt_state_indexes) phase2_pt_state_indexes_to_phase1_solution[phase2_pt_state_indexes] = phase1_solution # stage the LR centers and put them into one of 495 states solveable with L L' R R' phase2_pt_state_indexes = tuple( [pt.state_index() for pt in self.lt_FB_centers_stage_LR_centers_special.prune_tables] ) pt_state_indexes_LR_centers_special.append(phase2_pt_state_indexes) phase2_pt_state_indexes_to_phase1_solution[phase2_pt_state_indexes] = phase1_solution self.state = original_state[:] self.solution = original_solution[:] # stage the FB centers phase2_solutions = self.lt_FB_centers_stage.solutions_via_c(pt_states=pt_state_indexes, solution_count=1) phase2_solution = phase2_solutions[0][0] # stage the FB centers and put LR centers into one of 495 states solveable with L L' R R' phase2_solutions_lr_centers_special = self.lt_FB_centers_stage_LR_centers_special.solutions_via_c( pt_states=pt_state_indexes_LR_centers_special, solution_count=1 ) phase2_solution_lr_centers_special = phase2_solutions_lr_centers_special[0][0] # if we can put the LR centers into one of 495 states without adding to the move count, make it so if len(phase2_solution_lr_centers_special) <= len(phase2_solution): min_phase2_solution, ( pt0_state, pt1_state, pt2_state, pt3_state, pt4_state, ) = phase2_solutions_lr_centers_special[0] min_phase1_solution = phase2_pt_state_indexes_to_phase1_solution[pt0_state, pt1_state, pt2_state] else: min_phase2_solution, (pt0_state, pt1_state, pt2_state, pt3_state, pt4_state) = phase2_solutions[0] min_phase1_solution = phase2_pt_state_indexes_to_phase1_solution[pt0_state, pt1_state] logger.info( f"phase2 solution length {len(phase2_solution)}, phase2_lr_centers_special solution length {len(phase2_solution_lr_centers_special)}" ) for step in min_phase1_solution: self.rotate(step) self.print_cube_add_comment("LR centers staged", tmp_solution_len) tmp_solution_len = len(self.solution) for step in min_phase2_solution: self.rotate(step) self.print_cube_add_comment("UD FB centers staged", tmp_solution_len)
[ "def chain_corrections():\n \n #read the files\n sample_4m=read_sample(map_files('sample_4m'))\n empty_cell_4m=read_sample(map_files('empty_cell_4m'))\n empty_4m=read_sample(map_files('empty_4m'))\n transmission_sample_cell_4m=read_sample(map_files('trans_sample_4m'))\n transmission_empty_cell_4m=read_sample(map_files('trans_empty_cell_4m'))\n blocked_beam_4m=read_sample(map_files('blocked_4m'))\n sensitivity=read_div(map_files('div'))\n #mask=read_sample(map_files('mask'))\n \n #normalize the monitors\n \n sample_4m_norm=monitor_normalize(sample_4m)\n empty_cell_4m_norm=monitor_normalize(empty_cell_4m)\n transmission_sample_cell_4m_norm=monitor_normalize(transmission_sample_cell_4m)\n transmission_empty_cell_4m_norm=monitor_normalize(transmission_empty_cell_4m)\n empty_4m_norm=monitor_normalize(empty_4m)\n blocked_beam_4m_norm=monitor_normalize(blocked_beam_4m)\n \n #calculate q\n sample_4m_norm_q=convert_q(sample_4m_norm)\n empty_cell_4m_norm_q=convert_q(empty_cell_4m)\n blocked_beam_4m_norm_q=convert_q(blocked_beam_4m_norm)\n transmission_sample_cell_4m_norm_q=convert_q(transmission_sample_cell_4m_norm)\n transmission_empty_cell_4m_norm_q=convert_q(transmission_empty_cell_4m_norm)\n empty_4m_norm_q=convert_q(empty_4m_norm)\n \n \n print 'converted'\n #convert flatness\n sample_4m_solid=correct_solid_angle(sample_4m_norm_q)\n empty_cell_4m_solid=correct_solid_angle(empty_cell_4m_norm_q)\n blocked_beam_4m_solid=correct_solid_angle(blocked_beam_4m_norm_q)\n transmission_sample_cell_4m_solid=correct_solid_angle(transmission_sample_cell_4m_norm_q)\n transmission_empty_cell_4m_solid=correct_solid_angle(transmission_empty_cell_4m_norm_q)\n empty_4m_solid=correct_solid_angle(empty_4m_norm_q)\n \n \n #calculate transmission\n coord_left=(60,60)\n coord_right=(70,70)\n transmission_sample_cell_4m_rat=generate_transmission(transmission_sample_cell_4m_solid,empty_4m_solid,\n coord_left,coord_right)\n transmission_empty_cell_4m_rat=generate_transmission(transmission_empty_cell_4m_solid,empty_4m_solid,\n coord_left,coord_right)\n print 'Sample transmission= {} (IGOR Value = 0.724)'.format(transmission_sample_cell_4m_rat)\n print 'Empty Cell transmission= {} (IGOR Value = 0.929)'.format(transmission_empty_cell_4m_rat)\n print 'hi'\n \n #Initial Correction -- Not with the sub/mult tools,\n #SAM = sample_4m_solid.data\n #print SAM.x\n #EMP = empty_4m_solid.data\n #print \"EMP: \"\n #print EMP.x\n #BGD = blocked_beam_4m_solid.data\n #print \"BGD\"\n #print BGD.x\n #Tsam = transmission_sample_cell_4m_rat\n #Temp = transmission_empty_cell_4m_rat\n #COR1 = SAM.__sub__(BGD)\n #COR2 = (EMP.__sub__(BGD)).__mul__(Tsam/Temp)\n #COR = COR1.__sub__(COR2)\n #print \"after initial correction: \"\n #print COR.x\n \n SAM = sample_4m_solid\n print SAM.data.x\n EMP = empty_4m_solid\n print \"EMP: \"\n print EMP.data.x\n BGD = blocked_beam_4m_solid\n print \"BGD:\"\n print BGD.data.x\n Tsam = transmission_sample_cell_4m_rat\n Temp = transmission_empty_cell_4m_rat\n print \"COR1:\"\n COR1 = SAM.__sub1__(BGD)\n print COR1.data.x #check=works\n #-----Problems Here-------\n print \"COR2:\"\n COR2 = (EMP.__sub1__(BGD)) #check=works\n print COR2.data.x\n print \"COR3:\"\n #AJJ - __mul__ not working because Tsam and Temp are Measurement instances and not simply floats. See above.\n COR3 = COR2.__mul__(Tsam/Temp) #mul not working\n print COR3.data.x\n #COR = COR1.__sub1__(COR2)\n #print \"after initial correction: \"\n #print COR.x\n #COR2 = (EMP.__sub__(BGD)).__mul__(Tsam/Temp)\n #COR = COR1.__sub__(COR2)\n #print \"after initial correction: \"\n #print COR.data.x", "def part_4():\n l = cv2.imread(os.path.join(input_dir, 'pair2-L.png'), 0) / 255.\n r = cv2.imread(os.path.join(input_dir, 'pair2-R.png'), 0) / 255.\n \n ls = cv2.GaussianBlur(l, (21,21), 0)\n rs = cv2.GaussianBlur(r, (21,21), 0)\n \n kernel_sharpen_1 = np.array([[-1,-1,-1], [-1,9,-1], [-1,-1,-1]])\n lsp = cv2.filter2D(l, -1, kernel_sharpen_1)\n rsp = cv2.filter2D(r, -1, kernel_sharpen_1)\n \n w_size = (5,5)\n dmax = 100 \n \n d_l = disparity_ssd(lsp, rsp, 0, w_size, dmax)\n d_r = disparity_ssd(lsp, rsp, 1, w_size, dmax)\n\n d_l = normalize_and_scale(d_l)\n d_r = normalize_and_scale(d_r)\n\n cv2.imwrite(os.path.join(output_dir, 'ps3-4-a-1.png'), d_l)\n cv2.imwrite(os.path.join(output_dir, 'ps3-4-a-2.png'), d_r)\n\n return image_l, image_r # These will be used in 3b", "def step4(self) :\r\n self.global_solution = self.solutions[0]\r\n for k in range(self.nb_ants) : \r\n U_k = self.compute_U_k(self.solutions[k])\r\n L = self.compute_ATT(U_k)\r\n if L < self.L_gb : \r\n self.L_gb = L\r\n self.global_solution = self.solutions[k]\r\n for l in range(self.nb_buslines):\r\n #Diminution du niveau de phéromone global\r\n for i in range(1,self.nb_buses+1) : \r\n for j in range(1,self.nb_buses+1) :\r\n self.update_pheromone_4 (l, i, j)\r\n #Augmentation du niveau de phéromones des arrêts appartenant à la solution globale. \r\n for i in range(len(self.global_solution)) :\r\n for j in range(len(self.global_solution[i])-1):\r\n self.pheromone_level[self.global_solution[i][j],self.global_solution[i][j+1],l] += self.alpha / self.L_gb \r\n self.pheromone_level[self.global_solution[i][j+1],self.global_solution[i][j],l] += self.alpha / self.L_gb", "def _phase1(self):\r\n\t\t\r\n\t\t# Compute the connected synapse mask\r\n\t\tself.syn_c = self.p >= self.syn_th\r\n\t\t\r\n\t\t# Compute the overlaps\r\n\t\tself.overlap[:, 1:] = self.overlap[:, :-1] # Shift\r\n\t\tself.overlap[:, 0] = bn.nansum(self.x[self.syn_map] * self.syn_c, 1)\r\n\t\tself.overlap[:, 0][self.overlap[:, 0] < self.seg_th] = 0\r\n\t\tself.overlap[:, 0] = self.overlap[:, 0] * self.boost", "def hierarchical_lk(img_a, img_b, levels, k_size, k_type, sigma, interpolation,\n border_mode):\n \n \n \n #img_a = cv2.blur(img_a,(2,2))\n #img_b = cv2.blur(img_b,(2,2))\n gp_imga = gaussian_pyramid(img_a, levels)\n gp_imgb = gaussian_pyramid(img_b, levels)\n red_img = reduce_image(gp_imga[levels-1])\n m,n = red_img.shape\n uk1 = np.zeros((m,n))\n vk1 = np.zeros((m,n))\n curr_imga = gp_imga[levels-1]\n \n for i in range(levels-1,-1,-1):\n uk1 = expand_image(uk1)\n vk1 = expand_image(vk1)\n uk1 = uk1 * 2\n vk1 = vk1 * 2\n uk1 = uk1[0:gp_imgb[i].shape[0],0:gp_imgb[i].shape[1]]\n vk1 = vk1[0:gp_imgb[i].shape[0],0:gp_imgb[i].shape[1]]\n warp_b = warp(gp_imgb[i], uk1, vk1, interpolation, border_mode)\n (u_corr, v_corr) = optic_flow_lk(gp_imga[i], warp_b, k_size, k_type, sigma=1)\n uk1 = uk1 + u_corr\n vk1 = vk1 + v_corr\n \n \n# =============================================================================\n# for i in range(levels-1,0,-1):\n# print 'iteration =' + str(i)\n# curr_imgb = gp_imgb[i]\n# (uk, vk) = optic_flow_lk(curr_imga, curr_imgb, k_size, k_type, sigma=1)\n# #print('uk1 shape =' + str(uk1.shape))\n# #print('uk shape =' + str(uk.shape))\n# uk1 = uk + uk1\n# vk1 = vk + vk1\n# uk1 = expand_image(uk1) * 2\n# vk1 = expand_image(vk1) * 2\n# warped_imga = warp(gp_imgb[i-1], uk1, vk1, interpolation, border_mode)\n# curr_imga = warped_imga\n# =============================================================================\n# uk1 = cv2.blur(uk1,(5,5))\n# vk1 = cv2.blur(vk1,(5,5))\n return (uk1,vk1)", "def frame3dlin_Kg(E,A1,A2,L,Te1,Te2,R=None):\n Kge1= np.array([\n [0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0],\n [0 , -((3*A2+3*A1)*E)/(5*L**2) , 0 , 0 , 0 , -(A2*E)/(10*L) , 0 , ((3*A2+3*A1)*E)/(5*L**2) , 0 , 0 , 0 , -(A1*E)/(10*L)],\n [0 , 0 , -((3*A2+3*A1)*E)/(5*L**2) , 0 , (A2*E)/(10*L) , 0 , 0 , 0 , ((3*A2+3*A1)*E)/(5*L**2) , 0 , (A1*E)/(10*L) , 0],\n [0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0],\n [0 , 0 , (A2*E)/(10*L) , 0 , -((A2+3*A1)*E)/30 , 0 , 0 , 0 , -(A2*E)/(10*L) , 0 , ((A2+A1)*E)/60 , 0],\n [0 , -(A2*E)/(10*L) , 0 , 0 , 0 , -((A2+3*A1)*E)/30 , 0 , (A2*E)/(10*L) , 0 , 0 , 0 , ((A2+A1)*E)/60],\n [0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0],\n [0 , ((3*A2+3*A1)*E)/(5*L**2) , 0 , 0 , 0 , (A2*E)/(10*L) , 0 , -((3*A2+3*A1)*E)/(5*L**2) , 0 , 0 , 0 , (A1*E)/(10*L)],\n [0 , 0 , ((3*A2+3*A1)*E)/(5*L**2) , 0 , -(A2*E)/(10*L) , 0 , 0 , 0 , -((3*A2+3*A1)*E)/(5*L**2) , 0 , -(A1*E)/(10*L) , 0],\n [0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0],\n [0 , 0 , (A1*E)/(10*L) , 0 , ((A2+A1)*E)/60 , 0 , 0 , 0 , -(A1*E)/(10*L) , 0 , -((3*A2+A1)*E)/30 , 0],\n [0 , -(A1*E)/(10*L) , 0 , 0 , 0 , ((A2+A1)*E)/60 , 0 , (A1*E)/(10*L) , 0 , 0 , 0 , -((3*A2+A1)*E)/30]\n ])\n Kge2= np.array([\n [0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0],\n [0 , ((3*A2+3*A1)*E)/(5*L**2) , 0 , 0 , 0 , (A2*E)/(10*L) , 0 , -((3*A2+3*A1)*E)/(5*L**2) , 0 , 0 , 0 , (A1*E)/(10*L)],\n [0 , 0 , ((3*A2+3*A1)*E)/(5*L**2) , 0 , -(A2*E)/(10*L) , 0 , 0 , 0 , -((3*A2+3*A1)*E)/(5*L**2) , 0 , -(A1*E)/(10*L) , 0],\n [0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0],\n [0 , 0 , -(A2*E)/(10*L) , 0 , ((A2+3*A1)*E)/30 , 0 , 0 , 0 , (A2*E)/(10*L) , 0 , -((A2+A1)*E)/60 , 0],\n [0 , (A2*E)/(10*L) , 0 , 0 , 0 , ((A2+3*A1)*E)/30 , 0 , -(A2*E)/(10*L) , 0 , 0 , 0 , -((A2+A1)*E)/60],\n [0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0],\n [0 , -((3*A2+3*A1)*E)/(5*L**2) , 0 , 0 , 0 , -(A2*E)/(10*L) , 0 , ((3*A2+3*A1)*E)/(5*L**2) , 0 , 0 , 0 , -(A1*E)/(10*L)],\n [0 , 0 , -((3*A2+3*A1)*E)/(5*L**2) , 0 , (A2*E)/(10*L) , 0 , 0 , 0 , ((3*A2+3*A1)*E)/(5*L**2) , 0 , (A1*E)/(10*L) , 0],\n [0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0],\n [0 , 0 , -(A1*E)/(10*L) , 0 , -((A2+A1)*E)/60 , 0 , 0 , 0 , (A1*E)/(10*L) , 0 , ((3*A2+A1)*E)/30 , 0],\n [0 , (A1*E)/(10*L) , 0 , 0 , 0 , -((A2+A1)*E)/60 , 0 , -(A1*E)/(10*L) , 0 , 0 , 0 , ((3*A2+A1)*E)/30]])\n\n Kg = Kge1*Te1 + Kge2*Te2\n\n if (R is not None):\n RR = scipy.linalg.block_diag(R,R,R,R)\n Kg = np.transpose(RR).dot(Kg.dot(RR))\n\n return Kg", "def linear_LS_triangulation(u1, P1, u2, P2):\n A = np.zeros((4, 3))\n b = np.zeros((4, 1))\n\n # Create array of triangulated points\n x = np.zeros((3, len(u1)))\n\n # Initialize C matrices\n C1 = np.array(linear_LS_triangulation_C)\n C2 = np.array(linear_LS_triangulation_C)\n\n for i in range(len(u1)):\n # Derivation of matrices A and b:\n # for each camera following equations hold in case of perfect point matches:\n # u.x * (P[2,:] * x) = P[0,:] * x\n # u.y * (P[2,:] * x) = P[1,:] * x\n # and imposing the constraint:\n # x = [x.x, x.y, x.z, 1]^T\n # yields:\n # (u.x * P[2, 0:3] - P[0, 0:3]) * [x.x, x.y, x.z]^T + (u.x * P[2, 3] - P[0, 3]) * 1 = 0\n # (u.y * P[2, 0:3] - P[1, 0:3]) * [x.x, x.y, x.z]^T + (u.y * P[2, 3] - P[1, 3]) * 1 = 0\n # and since we have to do this for 2 cameras, and since we imposed the constraint,\n # we have to solve 4 equations in 3 unknowns (in LS sense).\n #\n # Build C matrices, to construct A and b in a concise way\n C1[:, 2] = u1[i, :]\n C2[:, 2] = u2[i, :]\n\n # Build A matrix:\n # [\n # [ u1.x * P1[2,0] - P1[0,0], u1.x * P1[2,1] - P1[0,1], u1.x * P1[2,2] - P1[0,2] ],\n # [ u1.y * P1[2,0] - P1[1,0], u1.y * P1[2,1] - P1[1,1], u1.y * P1[2,2] - P1[1,2] ],\n # [ u2.x * P2[2,0] - P2[0,0], u2.x * P2[2,1] - P2[0,1], u2.x * P2[2,2] - P2[0,2] ],\n # [ u2.y * P2[2,0] - P2[1,0], u2.y * P2[2,1] - P2[1,1], u2.y * P2[2,2] - P2[1,2] ]\n # ]\n A[0:2, :] = C1.dot(P1[0:3, 0:3]) # C1 * R1\n A[2:4, :] = C2.dot(P2[0:3, 0:3]) # C2 * R2\n\n # Build b vector:\n # [\n # [ -(u1.x * P1[2,3] - P1[0,3]) ],\n # [ -(u1.y * P1[2,3] - P1[1,3]) ],\n # [ -(u2.x * P2[2,3] - P2[0,3]) ],\n # [ -(u2.y * P2[2,3] - P2[1,3]) ]\n # ]\n b[0:2, :] = C1.dot(P1[0:3, 3:4]) # C1 * t1\n b[2:4, :] = C2.dot(P2[0:3, 3:4]) # C2 * t2\n b *= -1\n\n # Solve for x vector\n cv2.solve(A, b, x[:, i:i + 1], cv2.DECOMP_SVD)\n\n return np.transpose(x), np.ones(len(u1), dtype=bool)", "def combine_phase(laz, raz, grf_lf_ind, grf_rf_ind, hz, acc_hip_z, acc_hip_x, total_accel):\n # reshape for faster computation\n laz = laz.values.reshape(-1, )\n raz = raz.values.reshape(-1, )\n\n # Check and mark rows with missing data\n length = len(laz)\n missing_data = False\n nan_row = []\n if np.isnan(laz).any() or np.isnan(raz).any():\n missing_data = True\n if missing_data:\n nan_row = np.where(np.isnan(laz) | np.isnan(raz))[0]\n finite_row = np.array(list(set(range(length)) - set(nan_row)))\n laz = np.delete(laz, nan_row, )\n raz = np.delete(raz, nan_row, )\n\n # Filter through low-pass filter\n la_magn = filter_data(laz, filt='low', highcut=ct.cutoff_magn, fs=hz)\n ra_magn = filter_data(raz, filt='low', highcut=ct.cutoff_magn, fs=hz)\n\n acc_hip_z = filter_data(acc_hip_z, filt='low', highcut=6)\n acc_hip_x = filter_data(acc_hip_x, filt='low', highcut=40)\n acc_hip = filter_data(total_accel, filt='low', highcut=15)\n\n # Get balance/movement phase and start and end of movement phase for both\n # right and left foot\n lf_ph, lf_sm, lf_em = _body_phase(la_magn, hz)\n rf_ph, rf_sm, rf_em = _body_phase(ra_magn, hz)\n\n _impact_detect(phase=lf_ph,\n start_move=lf_sm,\n end_move=lf_em,\n grf=grf_lf_ind,\n acc_hip_z=acc_hip_z,\n acc_hip_x=acc_hip_x,\n acc_hip=acc_hip) # detect and add impacts\n del lf_sm, lf_em # no use in further computations\n\n _impact_detect(phase=rf_ph,\n start_move=rf_sm,\n end_move=rf_em,\n grf=grf_rf_ind,\n acc_hip_z=acc_hip_z,\n acc_hip_x=acc_hip_x,\n acc_hip=acc_hip) # detect and add impacts\n del rf_sm, rf_em, raz # no use in further computations\n\n # Insert previous value for phase where data needed to predict was missing\n if missing_data:\n phase_lf = np.ones(length).astype(int)\n phase_lf[finite_row] = lf_ph\n phase_rf = np.ones(length).astype(int)\n phase_rf[finite_row] = rf_ph\n for i in nan_row:\n phase_lf[i] = phase_lf[i - 1]\n phase_rf[i] = phase_rf[i - 1]\n else:\n phase_lf, phase_rf = lf_ph, rf_ph\n\n return phase_lf, phase_rf", "def processPhaseHeight(self, phasesInRing1, phasesInRing2):\n P11, P12, P21, P22 = ([] for i in range(4))\n phaseHeightDictionary = {}\n\n [P11.append(index+1)for index, value in enumerate(self.phaseDurationList)\n if value > 0.0 and index < 2]\n [P12.append(index+1)for index, value in enumerate(self.phaseDurationList)\n if value > 0.0 and index >= 2 and index < 4]\n [P21.append(index+1)for index, value in enumerate(self.phaseDurationList)\n if value > 0.0 and index >= 4 and index < 6]\n [P22.append(index+1)for index, value in enumerate(self.phaseDurationList)\n if value > 0.0 and index >= 6 and index < 8]\n\n if (len(P11) == len(P21)):\n for index in range(len(P11)):\n if len(P11) > 0:\n phaseHeightDictionary[str(P11[index])] = 10\n\n for index in range(len(P21)):\n if len(P21) > 0:\n phaseHeightDictionary[str(P21[index])] = 10\n\n elif (len(P11) < len(P21)):\n for index in range(len(P11)):\n if len(P11) > 0:\n phaseHeightDictionary[str(P11[index])] = 20\n\n for index in range(len(P21)):\n if len(P21) > 0:\n phaseHeightDictionary[str(P21[index])] = 10\n\n elif (len(P11) > len(P21)):\n for index in range(len(P11)):\n if len(P11) > 0:\n phaseHeightDictionary[str(P11[index])] = 10\n\n for index in range(len(P21)):\n if len(P21) > 0:\n phaseHeightDictionary[str(P21[index])] = 20\n\n if (len(P12) == len(P22)):\n for index in range(len(P12)):\n if len(P12) > 0:\n phaseHeightDictionary[str(P12[index])] = 10\n\n for index in range(len(P22)):\n if len(P22) > 0:\n phaseHeightDictionary[str(P22[index])] = 10\n\n elif (len(P12) < len(P22)):\n for index in range(len(P12)):\n if len(P12) > 0:\n phaseHeightDictionary[str(P12[index])] = 20\n for index in range(len(P22)):\n if len(P22) > 0:\n phaseHeightDictionary[str(P22[index])] = 10\n\n elif (len(P12) > len(P22)):\n for index in range(len(P12)):\n if len(P12) > 0:\n phaseHeightDictionary[str(P12[index])] = 10\n for index in range(len(P22)):\n if len(P22) > 0:\n phaseHeightDictionary[str(P22[index])] = 20\n\n for phase in phasesInRing1:\n for key, value in phaseHeightDictionary.items():\n if int(key) == phase:\n self.phaseHeightInRing1.append(value)\n\n for phase in phasesInRing2:\n for key, value in phaseHeightDictionary.items():\n if int(key) == phase:\n self.phaseHeightInRing2.append(value)", "def match_l2(self):\n\n fstem = self.path.split('.')[0]# Split('.')[0] takes off the extension\n P = self.P.copy()\n # print(P.Q_SKS)\n # print(P.Q_SKKS)\n # Now apply the test to find the discrepant pairs, by definition the remainder must by the matches\n # First find pairs that are split or not\n uID = P[((P.Q_SKS > -0.7) & (P.Q_SKS < 0.5)) | ((P.Q_SKKS > -0.7) & (P.Q_SKKS < 0.5))]\n P.drop(uID.index)\n null_pairs = P[((P.Q_SKS <= -0.7) & (P.Q_SKKS <= -0.7))] # Pairs where both phases are nulls (according to Q), auto classify as matching\n null_split_pair = P[(((P.Q_SKS <= -0.7) & (P.Q_SKKS >= 0.5)) | ((P.Q_SKS >= 0.5) & (P.Q_SKKS <= -0.7)))] # Test for pairs with 1 null 1 split, discrepant by definition\n splits = P[((P.Q_SKS > 0.5) & (P.Q_SKKS > 0.5 ))] # Test for pairs whjere both phases are split\n t_l2_splits = splits.LAM2_SUM # Lam2_SUm is now the sum of the two 95% confidence levels\n t_l2_ns = null_split_pair.LAM2_SUM\n t_dSI = 0.4 # Threshold of 0.4 taken from Deng et al (2017)\n diff= splits[(splits.LAM2_BAR > t_l2_splits) & (splits.D_SI_Pr > t_dSI)] #| (splits.D_SI > t_dSI))] # Apply tests for discrepant splitting\n match = splits[(splits.LAM2_BAR <= t_l2_splits) | (splits.D_SI_Pr <= t_dSI)] # If the pair fails either lam2 or dSI test then we call it matching\n diff_dsi = splits[(splits.D_SI_Pr > 0.4)]\n match_dsi = splits[(splits.D_SI_Pr <= 0.4)]\n ns_diff = null_split_pair[(null_split_pair.LAM2_BAR > t_l2_ns) & (null_split_pair.D_SI_Pr > t_dSI)]\n ns_match = null_split_pair[(null_split_pair.LAM2_BAR <= t_l2_ns) | (null_split_pair.D_SI_Pr <= t_dSI)]\n\n print(len(self.P))\n print('There are {} split pairs. {} are matches and {} are discrepant!'.format(len(splits),len(match),len(diff)))\n print('There are {} null-split pairs. {} are matches and {} are discrepant!'.format(len(null_split_pair),len(ns_match),len(ns_diff)))\n\n test = len(uID) + len(null_pairs) + len(null_split_pair) + len(diff) + len(match)\n print(test)\n # Now combined matching and discrepant pairs together\n null_pairs.to_csv('{}/{}_{:02d}_nulls.pairs'.format(self.path,self.sdb_stem,int(self.snr)),index=False,sep=' ')\n ns_match.to_csv('{}/{}_{:02d}_matches_null_split.pairs'.format(self.path,self.sdb_stem,int(self.snr)),index=False,sep=' ')\n ns_diff.to_csv('{}/{}_{:02d}_diffs_null_split.pairs'.format(self.path,self.sdb_stem,int(self.snr)),index=False,sep=' ')\n match.to_csv('{}/{}_{:02d}_matches_l2.pairs'.format(self.path,self.sdb_stem,int(self.snr)),index=False,sep=' ')\n diff.to_csv('{}/{}_{:02d}_diffs_l2.pairs'.format(self.path,self.sdb_stem,int(self.snr)),index=False,sep=' ')\n match_dsi.to_csv('{}/{}_{:02d}_matches_dsi.pairs'.format(self.path,self.sdb_stem,int(self.snr)),index=False,sep=' ')\n diff_dsi.to_csv('{}/{}_{:02d}_diffs_dsi.pairs'.format(self.path,self.sdb_stem,int(self.snr)),index=False,sep=' ')\n uID.to_csv('{}/{}_{:02d}_uID_l2.pairs'.format(self.path,self.sdb_stem,int(self.snr)),index=False,sep=' ')\n # Open up mspp files\n print('Writing to {}/{}_{:02d}'.format(self.path,self.sdb_stem,int(self.snr)))\n mspp_match = open('{}/{}_{:02d}_matches_l2.mspp'.format(self.path,self.sdb_stem,int(self.snr)),'w+')\n mspp_diff = open('{}/{}_{:02d}_diffs_l2.mspp'.format(self.path,self.sdb_stem,int(self.snr)),'w+')\n # mspp_match_dsi = open('{}/{}_{:02d}_matches_dsi.mspp'.format(self.path,self.sdb_stem,int(self.snr)),'w+')\n # mspp_diff_dsi = open('{}/{}_{:02d}_diffs_dsi.mspp'.format(self.path,self.sdb_stem,int(self.snr)),'w+')\n mspp_uID = open('{}/{}_{:02d}_uID_l2.mspp'.format(self.path,self.sdb_stem,int(self.snr)),'w+')\n mspp_null_pairs = open('{}/{}_{:02d}_nulls.mspp'.format(self.path,self.sdb_stem,int(self.snr)),'w+')\n mspp_null_split_match = open('{}/{}_{:02d}_matches_null_split.mspp'.format(self.path,self.sdb_stem,int(self.snr)),'w+')\n mspp_null_split_diff = open('{}/{}_{:02d}_diffs_null_split.mspp'.format(self.path,self.sdb_stem,int(self.snr)),'w+')\n\n\n for i,index in enumerate(diff.index):\n SKS_pp_lat = self.pp.lat_SKS.values[index]\n SKS_pp_lon = self.pp.lon_SKS.values[index]\n SKKS_pp_lat = self.pp.lat_SKKS.values[index]\n SKKS_pp_lon = self.pp.lon_SKKS.values[index]\n #print(i,date,stat,evla,evlo,stla,stlo,SKS_pp_lat,SKS_pp_lon)\n mspp_diff.write('> \\n {} {} \\n {} {} \\n'.format(SKS_pp_lon,SKS_pp_lat,SKKS_pp_lon,SKKS_pp_lat))\n\n for i,index in enumerate(null_pairs.index):\n SKS_pp_lat = self.pp.lat_SKS.values[index]\n SKS_pp_lon = self.pp.lon_SKS.values[index]\n SKKS_pp_lat = self.pp.lat_SKKS.values[index]\n SKKS_pp_lon = self.pp.lon_SKKS.values[index]\n #print(i,date,stat,evla,evlo,stla,stlo,SKS_pp_lat,SKS_pp_lon)\n mspp_null_pairs.write('> \\n {} {} \\n {} {} \\n'.format(SKS_pp_lon,SKS_pp_lat,SKKS_pp_lon,SKKS_pp_lat))\n\n for i,index in enumerate( ns_match.index):\n SKS_pp_lat = self.pp.lat_SKS.values[index]\n SKS_pp_lon = self.pp.lon_SKS.values[index]\n SKKS_pp_lat = self.pp.lat_SKKS.values[index]\n SKKS_pp_lon = self.pp.lon_SKKS.values[index]\n #print(i,date,stat,evla,evlo,stla,stlo,SKS_pp_lat,SKS_pp_lon)\n mspp_null_split_match.write('> \\n {} {} \\n {} {} \\n'.format(SKS_pp_lon,SKS_pp_lat,SKKS_pp_lon,SKKS_pp_lat))\n\n for i,index in enumerate( ns_diff.index):\n SKS_pp_lat = self.pp.lat_SKS.values[index]\n SKS_pp_lon = self.pp.lon_SKS.values[index]\n SKKS_pp_lat = self.pp.lat_SKKS.values[index]\n SKKS_pp_lon = self.pp.lon_SKKS.values[index]\n #print(i,date,stat,evla,evlo,stla,stlo,SKS_pp_lat,SKS_pp_lon)\n mspp_null_split_diff.write('> \\n {} {} \\n {} {} \\n'.format(SKS_pp_lon,SKS_pp_lat,SKKS_pp_lon,SKKS_pp_lat))\n\n for i,index in enumerate(match.index):\n SKS_pp_lat = self.pp.lat_SKS.values[index]\n SKS_pp_lon = self.pp.lon_SKS.values[index]\n SKKS_pp_lat = self.pp.lat_SKKS.values[index]\n SKKS_pp_lon = self.pp.lon_SKKS.values[index]\n #print(i,date,stat,evla,evlo,stla,stlo,SKS_pp_lat,SKS_pp_lon)\n mspp_match.write('> \\n {} {} \\n {} {} \\n'.format(SKS_pp_lon,SKS_pp_lat,SKKS_pp_lon,SKKS_pp_lat))\n\n for i,index in enumerate(uID.index):\n SKS_pp_lat = self.pp.lat_SKS.values[index]\n SKS_pp_lon = self.pp.lon_SKS.values[index]\n SKKS_pp_lat = self.pp.lat_SKKS.values[index]\n SKKS_pp_lon = self.pp.lon_SKKS.values[index]\n #print(i,date,stat,evla,evlo,stla,stlo,SKS_pp_lat,SKS_pp_lon)\n mspp_uID.write('> \\n {} {} \\n {} {} \\n'.format(SKS_pp_lon,SKS_pp_lat,SKKS_pp_lon,SKKS_pp_lat))\n\n mspp_uID.close()\n mspp_diff.close()\n mspp_match.close()\n mspp_null_pairs.close()\n mspp_null_split_match.close()\n mspp_null_split_diff.close()", "def setInitialFromFinalValues(previous_phase, next_phase):\n next_phase.c_init = previous_phase.c_final\n next_phase.dc_init = previous_phase.dc_final\n next_phase.ddc_init = previous_phase.ddc_final\n next_phase.L_init = previous_phase.L_final\n next_phase.dL_init = previous_phase.dL_final\n next_phase.q_init = previous_phase.q_final", "def monolayer_4band():\n a = 0.222\n ax = 0.438\n ay = 0.332\n theta = 96.79 * (pi / 180)\n phi = 103.69 * (pi / 180)\n\n lat = pb.Lattice(a1=[ax, 0], a2=[0, ay])\n\n h = a * sin(phi - pi / 2)\n s = 0.5 * ax - a * cos(theta / 2)\n lat.add_sublattices(\n ('A', [0, 0, h], 0),\n ('B', [s, 0, 0], 0),\n ('C', [ax/2, ay/2, 0], 0),\n ('D', [ax/2 + s, ay/2, h], 0)\n )\n\n lat.register_hopping_energies({\n 't1': -1.22,\n 't2': 3.665,\n 't3': -0.205,\n 't4': -0.105,\n 't5': -0.055\n })\n\n lat.add_hoppings(\n # t1\n ([-1, 0], 'A', 'D', 't1'),\n ([-1, -1], 'A', 'D', 't1'),\n ([ 0, 0], 'B', 'C', 't1'),\n ([ 0, -1], 'B', 'C', 't1'),\n # t2\n ([ 0, 0], 'A', 'B', 't2'),\n ([ 0, 0], 'C', 'D', 't2'),\n # t3\n ([ 0, 0], 'A', 'D', 't3'),\n ([ 0, -1], 'A', 'D', 't3'),\n ([ 1, 1], 'C', 'B', 't3'),\n ([ 1, 0], 'C', 'B', 't3'),\n # t4\n ([ 0, 0], 'A', 'C', 't4'),\n ([ 0, -1], 'A', 'C', 't4'),\n ([-1, 0], 'A', 'C', 't4'),\n ([-1, -1], 'A', 'C', 't4'),\n ([ 0, 0], 'B', 'D', 't4'),\n ([ 0, -1], 'B', 'D', 't4'),\n ([-1, 0], 'B', 'D', 't4'),\n ([-1, -1], 'B', 'D', 't4'),\n # t5\n ([-1, 0], 'A', 'B', 't5'),\n ([ 0, 1], 'A', 'B', 't5'),\n ([ 0, -1], 'A', 'B', 't5'),\n ([-1, 0], 'C', 'D', 't5'),\n ([ 0, 1], 'C', 'D', 't5'),\n ([ 0, -1], 'C', 'D', 't5'),\n )\n\n return lat", "def SLC_phase_shift(SLC_1, SLC_par1, SLC_2, SLC_par2, ph_shift, logpath=None):\n process(['/cluster/GAMMA_SOFTWARE-20161207/ISP/bin/SLC_phase_shift', SLC_1, SLC_par1, SLC_2, SLC_par2, ph_shift], logpath=logpath)", "def update_chains(self):\r\n _, black_positions, white_positions = self.get_positions()\r\n\r\n self.bfs(black_positions, 1)\r\n self.bfs(white_positions, 2)", "def method1(self):\n cres=0. # Variable for storing Chern number.\n # The U matrices from Fukui's method; storage...\n Ux=np.zeros((self.kS.Nx+1,self.kS.Ny+1),dtype=complex)\n Uy=np.zeros((self.kS.Nx+1,self.kS.Ny+1),dtype=complex)\n \n # ... and calculation of U matrices\n for ix in range(self.kS.Nx+1):\n for iy in range(self.kS.Ny+1):\n mat1=self.alleigvecs[:,:,ix ,iy ]\n if ix<self.kS.Nx:\n mat2=self.alleigvecs[:,:,ix+1,iy ]\n else:\n mat2=self.alleigvecs[:,:,1 ,iy ]\n if iy<self.kS.Ny:\n mat3=self.alleigvecs[:,:,ix ,iy+1]\n else:\n mat3=self.alleigvecs[:,:,ix ,1 ]\n Ux[ix,iy]=np.linalg.det(np.dot(np.conj(mat1.T),mat2)[:self.NL,:self.NL])\n Uy[ix,iy]=np.linalg.det(np.dot(np.conj(mat1.T),mat3)[:self.NL,:self.NL])\n \n # Local estimates of Berry curvature; storage ...\n ftempall=np.zeros((self.kS.Nx,self.kS.Ny),complex)\n # ... and calculation\n for ix in range(self.kS.Nx):\n for iy in range(self.kS.Ny):\n ftemp=np.log(Ux[ix,iy]*Uy[ix+1,iy]/Ux[ix,iy+1]/Uy[ix,iy])\n ftempall[ix,iy]=ftemp # ... of local Berry curvature ...\n cres+=ftemp/2./pi/1j # ... and of Berry phase (Chern number).\n\n return cres.real, ftempall", "def _init_rhs(self): \n # prepare interpolated wave function\n \n wfinter=np.empty((self.nqpoints+1,self.nx),dtype=np.double)\n for iq in range(self.nqpoints+1):\n for ix in range(self.nx): \n wfinter[iq,ix]=np.sum(self.wfd*self.splpip[0:self.npoints,iq,self.nqpoints,ix])\n \n # prepare CG coefficient and Ylam0 factor \n \n cgfakt=np.empty((self.nalpha),dtype=np.double)\n for qnsetp in self.qnalpha: # go through allowed l,lam combinations\n alphap=qnsetp[\"alpha\"]\n lp=qnsetp[\"l\"]\n lamp=qnsetp[\"lam\"]\n cgfakt[alphap]=float(CG(lp,0,lamp,0,self.bl,0).doit())*np.sqrt((2*lamp+1)/(4*m.pi))\n \n # then also perform interpolation of tmatrix a priori\n tinter=np.empty((self.lmax//2+1,self.npoints,self.nqpoints+1,self.nx),dtype=np.cdouble) \n for l in range(0,self.lmax+1,2):\n for ip in range(self.npoints): \n for iq in range(self.nqpoints+1):\n for ix in range(self.nx): \n tinter[l//2,ip,iq,ix]=np.sum(self.tmat[l//2,iq,ip,0:self.npoints]*self.splpi[0:self.npoints,iq,self.nqpoints,ix])\n \n \n \n # the vector depends on the combined index \n # indx_h_rhs=self.npoints*(self.nqpoints+1)*alpha+self.npoints*iq+ip\n # dimensionality: self.npoints*(self.nqpoints+1)*self.nalpha\n \n self.h_rhs=np.zeros((self.npoints*(self.nqpoints+1)*self.nalpha),dtype=np.cdouble)\n for qnset in self.qnalpha: # go through allowed l,lam combinations\n alpha=qnset[\"alpha\"]\n l=qnset[\"l\"]\n for qnsetp in self.qnalpha: # go through allowed l,lam combinations\n alphap=qnsetp[\"alpha\"]\n lp=qnsetp[\"l\"]\n if lp==0: # bound state only in s-wave \n for iq in range(self.nqpoints+1):\n for ip in range(self.npoints): \n indx_h_rhs=self.npoints*(self.nqpoints+1)*alpha+self.npoints*iq+ip \n for jp in range(self.npoints):\n self.h_rhs[indx_h_rhs]+=np.sum(self.xw*tinter[l//2,ip,iq,:]\n *2*self.gfunc[alpha,alphap,iq,self.nqpoints,:]\n *cgfakt[alphap]*wfinter[iq,:])", "def _phase2(self):\r\n\t\t\r\n\t\t# Shift the outputs\r\n\t\tself.y[:, 1:] = self.y[:, :-1]\r\n\t\tself.y[:, 0] = 0\r\n\t\t\r\n\t\t# Calculate k\r\n\t\t# - For a column to be active its overlap must be at least as large\r\n\t\t# as the overlap of the k-th largest column in its neighborhood.\r\n\t\tk = self._get_num_cols()\r\n\t\t\r\n\t\tif self.global_inhibition:\r\n\t\t\t# The neighborhood is all columns, thus the set of active columns\r\n\t\t\t# is simply columns that have an overlap >= the k-th largest in the\r\n\t\t\t# entire region\r\n\t\t\t\r\n\t\t\t# Compute the winning column indexes\r\n\t\t\tif self.learn:\t\t\t\t\r\n\t\t\t\t# Randomly break ties\r\n\t\t\t\tix = np.argpartition(-self.overlap[:, 0] -\r\n\t\t\t\t\tself.prng.uniform(.1, .2, self.ncolumns), k - 1)[:k]\r\n\t\t\telse:\r\n\t\t\t\t# Choose the same set of columns each time\r\n\t\t\t\tix = np.argpartition(-self.overlap[:, 0], k - 1)[:k]\r\n\t\t\t\r\n\t\t\t# Set the active columns\r\n\t\t\tself.y[ix, 0] = self.overlap[ix, 0] > 0\r\n\t\telse:\r\n\t\t\t# The neighborhood is bounded by the inhibition radius, therefore\r\n\t\t\t# each column's neighborhood must be considered\r\n\t\t\t\r\n\t\t\tfor i in xrange(self.ncolumns):\r\n\t\t\t\t# Get the neighbors\r\n\t\t\t\tix = np.where(self.neighbors[i])[0]\r\n\t\t\t\t\r\n\t\t\t\t# Compute the minimum top overlap\r\n\t\t\t\tif ix.shape[0] <= k:\r\n\t\t\t\t\t# Desired number of candidates is at or below the desired\r\n\t\t\t\t\t# activity level, so find the overall min\r\n\t\t\t\t\tm = max(bn.nanmin(self.overlap[ix, 0]), 1)\r\n\t\t\t\telse:\r\n\t\t\t\t\t# Desired number of candidates is above the desired\r\n\t\t\t\t\t# activity level, so find the k-th largest\r\n\t\t\t\t\tm = max(-np.partition(-self.overlap[ix, 0], k - 1)[k - 1],\r\n\t\t\t\t\t\t1)\r\n\t\t\t\t\r\n\t\t\t\t# Set the column activity\r\n\t\t\t\tif self.overlap[i, 0] >= m: self.y[i, 0] = True", "def update_variables(self):\n self.dl21 = self.l21-self.l11; self.dl22 = self.l22-self.l12; self.dl23 = self.l23-self.l13;\n self.kappa1, self.phi1, self.seg_len1 = self.configuration_space(self.l11, self.l12, self.l13, self.d, self.n)\n self.kappa2, self.phi2, self.seg_len2 = self.configuration_space(self.dl21, self.dl22, self.dl23, self.d, self.n)\n # aquire transformation matrices and tips for segment 1 and 2\n self.T01_bishop = self.transformation_matrix_bishop(self.kappa1, self.phi1, self.seg_len1)\n self.T12_bishop = self.transformation_matrix_bishop(self.kappa2, self.phi2, self.seg_len2)\n self.T02_bishop = np.matmul(self.T01_bishop, self.T12_bishop)\n self.T01_frenet = self.transformation_matrix_frenet(self.kappa1, self.phi1, self.seg_len1)\n self.T12_frenet = self.transformation_matrix_frenet(self.kappa2, self.phi2, self.seg_len2)\n self.T02_frenet = np.matmul(self.T01_frenet, self.T12_frenet)\n self.tip_vec1 = np.matmul(self.T01_bishop, self.base)[0:3]\n self.tip_vec2 = np.matmul(self.T02_bishop, self.base)[0:3]\n # Frenet frames\n self.normal_vec_frenet1 = self.T01_frenet[0:3, 0]\n self.binormal_vec_frenet1 = self.T01_frenet[0:3, 1]\n self.tangent_vec_frenet1 = self.T01_frenet[0:3, 2]\n self.normal_vec_frenet2 = self.T02_frenet[0:3, 0]\n self.binormal_vec_frenet2 = self.T02_frenet[0:3, 1]\n self.tangent_vec_frenet2 = self.T02_frenet[0:3, 2]\n # Bishop frames\n self.normal_vec_bishop1 = self.T01_bishop[0:3, 0]\n self.binormal_vec_bishop1 = self.T01_bishop[0:3, 1]\n self.tangent_vec_bishop1 = self.T01_bishop[0:3, 2]\n self.normal_vec_bishop2 = self.T02_bishop[0:3, 0]\n self.binormal_vec_bishop2 = self.T02_bishop[0:3, 1]\n self.tangent_vec_bishop2 = self.T02_bishop[0:3, 2]", "def set_up_orbit_correctors(ps_beg, delay, id_slice1, ds_slice, zplot, id_slices, U_core, lambdaref):\n SXSS = Chicane(3.2716, 0.362, 0.830399, delay[0])\n HXSS = Chicane(3.2, 0.3636, 0.5828, delay[1])\n\n OC2 = [CORR08, D1_SXSS, SXSS, D2_SXSS, QUAD09, CORR09]\n OC3 = [CORR15, D1_HXSS, HXSS, D2_HXSS, QUAD16, CORR16]\n\n ps_end1 = beam_transportation(ps_beg, U_core[0])\n\n # ps_end1 is a 4-by-N array. N is the number of macro-particles. It is the full\n # 4D phase space distribution at the end of the first undulator section.\n\n # The id of the slice on the axis in the second undulator section\n on_axis_id_U2 = int(id_slice1+delay[0]/ds_slice+ (8*110)*lambdaref/ds_slice) # The last part is slippage\n\n print(on_axis_id_U2)\n\n ps_end_slice1 = beam_property_along_s(ps_end1, id_slices)[0:4, :]\n ps_on_axis_2 = np.ravel(ps_end_slice1[:, on_axis_id_U2])\n\n # print(ps_on_axis_2)\n\n OC2_optimized = analyze_orbit_corrector(OC2[0], OC2[-1], OC2[1:-1], ps_on_axis_2)\n print(OC2_optimized)\n CORR08_new = Orbit_Corrector(OC2[0].length, OC2_optimized[0], OC2_optimized[2])\n CORR09_new = Orbit_Corrector(OC2[-1].length, OC2_optimized[1], OC2_optimized[3])\n\n # The whole U2 with optimized orbit correctors\n U2_new = [CORR08_new] + OC2[1:-1] + [CORR09_new] + U_core[1]\n ps_end2 = beam_transportation(ps_end1, U2_new)\n\n # ps_end2 is a 4-by-N array. N is the number of macro-particles. It is the full\n # 4D phase space distribution at the end of the second undulator section.\n\n # The id of the slice on the axis in the third undulator section\n on_axis_id_U3 = int(id_slice1+(delay[0]+delay[1])/ds_slice +(14*110*lambdaref)/ds_slice) # The last term is the slipage\n\n print(on_axis_id_U3)\n\n ps_end_slice2 = beam_property_along_s(ps_end2, id_slices)[0:4, :]\n ps_on_axis_3 = np.ravel(ps_end_slice2[ :, on_axis_id_U3])\n\n # print(ps_on_axis_3)\n\n OC3_optimized = analyze_orbit_corrector(OC3[0], OC3[-1], OC3[1:-1], ps_on_axis_3)\n print(OC3_optimized)\n CORR15_new = Orbit_Corrector(OC3[0].length, OC3_optimized[0], OC3_optimized[2])\n CORR16_new = Orbit_Corrector(OC3[-1].length, OC3_optimized[1], OC3_optimized[3])\n\n U3_new = [CORR15_new] + OC3[1:-1] + [CORR16_new] + U_core[2]\n\n Undulator_Beamline = U_core[0]+U2_new+U3_new\n\n return Undulator_Beamline" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Convert a column number into a column letter (3 > 'C') Right shift the column col_idx by 26 to find column letters in reverse order. These numbers are 1based, and can be converted to ASCII ordinals by adding 64.
def _get_column_letter(col_idx): # these indicies corrospond to A -> ZZZ and include all allowed # columns if not 1 <= col_idx <= 18278: raise ValueError("Invalid column index {0}".format(col_idx)) letters = [] while col_idx > 0: col_idx, remainder = divmod(col_idx, 26) # check for exact division and borrow if needed if remainder == 0: remainder = 26 col_idx -= 1 letters.append(chr(remainder+64)) return ''.join(reversed(letters))
[ "def get_column_alphabetical_index_from_zero_indexed_num(col_idx: int) -> str:\n num_letters_alphabet = 26\n\n def get_letter_from_zero_indexed_idx(idx: int):\n ascii_start = 65\n return chr(ascii_start + idx)\n\n prefix_str = ''\n if col_idx < num_letters_alphabet:\n return get_letter_from_zero_indexed_idx(col_idx)\n last_char = get_letter_from_zero_indexed_idx(col_idx % num_letters_alphabet)\n prefix_str = get_column_alphabetical_index_from_zero_indexed_num(col_idx // num_letters_alphabet)\n return prefix_str + last_char", "def int_to_column(num):\n # type: (int) -> str\n col = \"\"\n while num >= 702:\n col += string.ascii_uppercase[(num // (26 ** 2)) - 1]\n num %= (26 ** 2)\n while num >= 26:\n col += string.ascii_uppercase[(num // 26) - 1]\n num = num % 26\n col += string.ascii_uppercase[num % 26]\n return col", "def _col_name(index):\n for exp in itertools.count(1):\n limit = 26 ** exp\n if index < limit:\n return ''.join(chr(ord('A') + index // (26 ** i) % 26) for i in range(exp-1, -1, -1))\n index -= limit", "def int_to_excel_col(self, col_num: int) -> str:\n def conv_num_to_letter(n: int) -> str:\n return chr(n + ord('A') - 1)\n ans = \"\"\n if col_num <= 702: # 702 = 26*27 or ZZ\n second_num = col_num\n if col_num > 26:\n first_num = int((col_num - 1) / 26)\n ans = conv_num_to_letter(first_num)\n second_num = col_num - first_num * 26\n return ans + conv_num_to_letter(second_num)\n logger.warning(f'Columns only go up to ZZ (or 702), but {col_num} was requested. Returning \"BAD\"')\n return 'BAD'", "def get_alpha_column(column):\n letters = []\n while column >= 0:\n letters.append(string.ascii_uppercase[column % 26])\n column = column // 26 - 1\n return ''.join(reversed(letters))", "def reverseCol(input):\n try:\n parsed = chr(input + ord('A'))\n except TypeError:\n raise PositionException, \"Bad input for col; %s\" % input\n if not 0 <= input < CHESS_COLS:\n raise PositionException, \"Col out of range; %d parsed as %s.\" \\\n % (input, parsed)\n return parsed", "def excel_col_letter_to_index(x):\n return reduce(lambda s,a:s*26+ord(a)-ord('A')+1, x, 0)", "def index_from_col(col_name):\n return ord(col_name.upper()) - 65", "def alphabet_col_id(col_num):\n alphabet = \"ABCDEFGHIJKLMNOPQRSTUVWXYZ\"\n len_alphabet = len(alphabet)\n col_id = \"\"\n while col_num > 0:\n q = (col_num - 1) // len_alphabet\n r = (col_num - 1) % len_alphabet\n col_id = alphabet[r] + col_id\n col_num = q\n return col_id", "def convert_number_to_excel_colname(n):\n\n assert 0 < n <= 256\n\n alphabet = [chr(x) for x in xrange(65, 91)]\n\n if n > 26:\n return '{0}{1}'.format(alphabet[(n/26) - 1], alphabet[(n%26) - 1])\n else:\n return alphabet[(n%26) - 1]", "def colToLetter(self,aNumber):\r\n letter =\"\"\r\n if aNumber == 1: letter = 'A'\r\n elif aNumber == 2: letter = 'B'\r\n elif aNumber == 3: letter = 'C'\r\n elif aNumber == 4: letter = 'D'\r\n elif aNumber == 5: letter = 'E'\r\n elif aNumber == 6: letter = 'F'\r\n elif aNumber == 7: letter = 'G'\r\n elif aNumber == 8: letter = 'H'\r\n return letter", "def GetExcelStyleColumnLabel(ColNum):\n Letters = 'ABCDEFGHIJKLMNOPQRSTUVWXYZ'\n \n ColLabelList = []\n while ColNum:\n ColNum, SubColNum = divmod(ColNum - 1, 26)\n ColLabelList[:0] = Letters[SubColNum]\n \n return ''.join(ColLabelList)", "def getColIdx(self, col):\n try: \n return int(col)\n except:\n return ord(col)-ord('a')", "def getColIdx(self, col):\n try:\n return int(col)\n except:\n return ord(col)-ord('a')", "def _index_to_char(self, ind):\n return chr(ord('A') + ind % 26)", "def col_to_num(col_str):\n expn = 0\n col_num = 0\n for char in reversed(col_str):\n col_num += (ord(char) - ord('A') + 1) * (26 ** expn)\n expn += 1\n col_index=col_num-1\n\n return col_index", "def getColName(self, col):\n try:\n return chr(ord('a') + col)\n except:\n return col", "def col_to_num(col_str):\n expn = 0\n col_num = 0\n for char in reversed(col_str):\n col_num += (ord(char) - ord('A') + 1) * (26 ** expn)\n expn += 1\n\n return col_num", "def _get_alphanum_cell_index(row, col):\n\n # Convert the column number to the corresponding alphabetic index.\n # Taken from https://stackoverflow.com/questions/181596/how-to-convert-a-column-number-e-g-127-into-an-excel-column-e-g-aa\n dividend = col\n column_name = \"\"\n modulo = 0\n while (dividend > 0):\n modulo = (dividend - 1) % 26\n column_name = chr(65 + modulo) + column_name\n dividend = int((dividend - modulo) / 26)\n\n # Return the alphanumeric cell index.\n return column_name + str(row)" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Given a head of LinkedList, delete from that linkedList index j and skip index i iterative
def skip_i_delete_j(head, i, j): if i == 0: return None if head is None or j < 0 or i < 0: return head current = head previous = None while current: # skip (i - 1) nodes for _ in range(i - 1): if current is None: return head current = current.next previous = current current = current.next # delete next j nodes for _ in range(j): if current is None: break next_node = current.next current = next_node previous.next = current return head
[ "def deleteAtIndex(self, index):\n cur = self.head\n if cur == None:\n return\n elif index == 0:\n self.head = cur.next\n\n cur, i = self.head, 1\n while cur and i != index:\n cur = cur.next\n i += 1\n if cur.next == None:\n cur = None\n else:\n cur.next = cur.next.next", "def remove(head: Optional[ListNode], index: int) -> Optional[ListNode]:", "def erase(self, index):\r\n if index >= self.length():\r\n print(\"ERROR\")\r\n return None\r\n current_index = 0\r\n current_node = self.head\r\n while True:\r\n last_node = current_node\r\n current_node = current_node.next\r\n if current_index == index:\r\n last_node.next = current_node.next\r\n return\r\n current_index += 1", "def delete_index(self, index):\r\n if index > self.length():\r\n print(\"The index you put in to delete a node is out of range.\")\r\n return\r\n current_node = self.head\r\n for _ in range(index):\r\n prev_node = current_node\r\n current_node = current_node.next\r\n prev_node.next = current_node.next\r\n del current_node", "def deleteAtIndex(self, index: int) -> None:\n prev, curt, count, dummy = self.find_index_node(index)\n \n # 1. index not find\n # 2. index == len(linkedlist) -> no node need to be delete\n # valide index range: [0, len(linkedlist)]\n if count != index or not curt:\n return\n \n prev.next = curt.next\n self.head = dummy.next", "def delete_list(self): \n temp_node = self.head\n while temp_node is not None:\n prev_node = temp_node\n temp_node = temp_node.next\n # prev_node.val += \": deleted\" # for sanity check\n # reset data\n prev_node.val = None\n prev_node.next = None", "def remove(self, index):\n counter = 0\n n = self\n prev = None\n while n.next:\n if counter >= index:\n n.value = n.next.value\n prev = n\n n = n.next\n counter += 1\n prev.next = None", "def delete(self, ele):\n prev = current = self.head\n element_in_head = False\n if self.head:\n while True:\n\tif current.data == ele:\n\t if current == self.head:\n\t element_in_head = True\n\t else:\n\t prev.next = current.next\n\t break\n\tprev = current\n\tcurrent = current.next\n\tif current == self.head:\n\t break\n if element_in_head:\n\tif self.head.next == self.head:\n\t self.head = None\n\telse:\n\t prev.next = self.head.next\n\t self.head = self.head.next", "def delete(self,pos):\n temp=self.head\n for i in range(self.size):\n while(i==pos and temp.nxt!=None):\n a=temp.nxt\n temp.nxt=a.nxt\n self.size-=1\n if(temp.nxt=None):\n break\n temp=temp.nxt\n\n pass", "def remove_nth_element(self, position):\n if not self.head or position > self.length() -1:\n raise LinkedListException\n if position == 0 and self.head == self.head.next:\n self.head = None\n else:\n current = self.head\n prev = self.head\n counter = 0\n while counter < position or position == 0:\n counter += 1\n prev = current\n current = current.next\n\tif current == self.head:\n\t break\n if position == 0:\n self.head = current.next\n prev.next = current.next", "def removeNthFromEnd(self, head: ListNode, n: int) :\n dummy_node = ListNode(0)\n dummy_node.next = head\n\n slow,fast = dummy_node,dummy_node\n \n for i in range(n):\n fast = fast.next\n\n while fast and fast.next:\n slow = slow.next\n fast = fast.next\n \n slow.next = slow.next.next\n \n return dummy_node.next", "def delete(self, data):\r\n current_node = self.head\r\n current_index = 0\r\n index = self.get_index(data)\r\n while current_node.next != None:\r\n last_node = current_node\r\n current_node = current_node.next\r\n if current_index == index:\r\n last_node.next = current_node.next\r\n return\r\n current_index += 1", "def remove_index(self, index):\n current = self.head\n position = index\n if index > (self.size() - 1):\n return None\n elif index == 0:\n self.head = current.next_node\n else: \n while position >= 1:\n previous = current\n current = current.next_node\n position -= 1 \n previous.next_node = current.next_node\n\n return current", "def delI(current,i):\r\n j=1\r\n while(current.next):\r\n if j<i:\r\n j+=1\r\n current=current.next\r\n elif j==i:\r\n delNode=current.next\r\n nextNode=delNode.next\r\n current.next=nextNode\r\n print('del num is ',delNode)\r\n return nextNode\r\n return False", "def delete_from_head(self):\n if self.head:\n if self.head == self.tail: # If there is only one node in the list\n self.head = None\n self.tail = None\n else:\n self.head = self.head.next\n self.head.prev = None\n else:\n print(\"The playlist is empty!\")", "def remove(self , element):\n current = self.head \n previous = None\n\n while current and current.data != element:\n previous = current\n current = current.next\n\n if previous == None :\n self.head = current.next\n elif current :\n previous.next = current.next\n current.next = None", "def deleteDuplicates(head):\n p, q = head, ListNode(0.0)\n res = q\n while p:\n if p.next == None:\n q.next = p\n q = q.next\n break\n if p.val == p.next.val:\n while p.next and p.val == p.next.val:\n p = p.next\n else:\n q.next = p\n q = q.next\n p = p.next\n q.next = None\n return res.next", "def delete_node_at_start(self):\n if not self.head:\n print('List already empty.')\n return\n self.head = self.head.next", "def deleteAtIndex(self, index: int) -> None:\n #Case 1: If index is greater then length, no node will be deleted\n if self.num_node < index:\n pass\n #Case 2: If there is only one node in the list\n elif self.num_node == 1:\n del self.head\n self.num_node -= 1\n \n #Case 3: Delete the node at the index\n else:\n \n curr = self.head\n pev = None\n \n #Traverse the list\n for i in range(index):\n pev = curr\n curr = curr.nxt\n \n # Delete Node is at the end of the list\n if curr.nxt is None:\n pev = None\n # The Node is inbetween two nodes\n else:\n pev.nxt = curr.nxt\n curr.nxt = None\n \n self.num_node -= 1\n del curr" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Perform a context visibility test. Creates a (fake) image with the specified owner and is_public attributes, then creates a context with the given keyword arguments and expects exp_res as the result of an is_image_visible() call on the context.
def do_visible(self, exp_res, img_owner, img_public, **kwargs): img = FakeImage(img_owner, img_public) ctx = context.RequestContext(**kwargs) self.assertEqual(ctx.is_image_visible(img), exp_res)
[ "def test_public_image_visibility(self, images_steps):\n images_steps.check_public_image_visible(config.HORIZON_TEST_IMAGE)", "def test_image_privacy(self, glance_steps, images_steps,\n auth_steps):\n image = glance_steps.create_images(\n utils.get_file_path(config.CIRROS_QCOW2_URL),\n image_names=utils.generate_ids(length=20))[0]\n images_steps.check_non_public_image_not_visible(image.name)", "def view(self, user, image, *args):\n if user.is_anonymous or user.is_client:\n return False\n\n if user.is_administrator:\n return False\n\n if user.is_manager:\n return False\n\n if user.is_advisor:\n return Image.objects.filter(pk=image.pk).accessible_by(user).exists()\n\n return self.admin_permission(user, image, *args)", "def do_sharable(self, exp_res, img_owner, membership=None, **kwargs):\n\n img = FakeImage(img_owner, True)\n ctx = context.RequestContext(**kwargs)\n\n sharable_args = {}\n if membership is not None:\n sharable_args['membership'] = membership\n\n self.assertEqual(ctx.is_image_sharable(img, **sharable_args), exp_res)", "def create_image_from_visibility(vis, **kwargs) -> Image:\n assert isinstance(vis, Visibility) or isinstance(vis, BlockVisibility), \\\n \"vis is not a Visibility or a BlockVisibility: %r\" % (vis)\n \n log.debug(\"create_image_from_visibility: Parsing parameters to get definition of WCS\")\n \n imagecentre = get_parameter(kwargs, \"imagecentre\", vis.phasecentre)\n phasecentre = get_parameter(kwargs, \"phasecentre\", vis.phasecentre)\n \n # Spectral processing options\n ufrequency = numpy.unique(vis.frequency)\n vnchan = len(ufrequency)\n \n frequency = get_parameter(kwargs, \"frequency\", vis.frequency)\n inchan = get_parameter(kwargs, \"nchan\", vnchan)\n reffrequency = frequency[0] * units.Hz\n channel_bandwidth = get_parameter(kwargs, \"channel_bandwidth\", 0.99999999999 * vis.channel_bandwidth[0]) * units.Hz\n \n if (inchan == vnchan) and vnchan > 1:\n log.debug(\n \"create_image_from_visibility: Defining %d channel Image at %s, starting frequency %s, and bandwidth %s\"\n % (inchan, imagecentre, reffrequency, channel_bandwidth))\n elif (inchan == 1) and vnchan > 1:\n assert numpy.abs(channel_bandwidth.value) > 0.0, \"Channel width must be non-zero for mfs mode\"\n log.debug(\"create_image_from_visibility: Defining single channel MFS Image at %s, starting frequency %s, \"\n \"and bandwidth %s\"\n % (imagecentre, reffrequency, channel_bandwidth))\n elif inchan > 1 and vnchan > 1:\n assert numpy.abs(channel_bandwidth.value) > 0.0, \"Channel width must be non-zero for mfs mode\"\n log.debug(\"create_image_from_visibility: Defining multi-channel MFS Image at %s, starting frequency %s, \"\n \"and bandwidth %s\"\n % (imagecentre, reffrequency, channel_bandwidth))\n elif (inchan == 1) and (vnchan == 1):\n assert numpy.abs(channel_bandwidth.value) > 0.0, \"Channel width must be non-zero for mfs mode\"\n log.debug(\"create_image_from_visibility: Defining single channel Image at %s, starting frequency %s, \"\n \"and bandwidth %s\"\n % (imagecentre, reffrequency, channel_bandwidth))\n else:\n raise ValueError(\"create_image_from_visibility: unknown spectral mode \")\n \n # Image sampling options\n npixel = get_parameter(kwargs, \"npixel\", 512)\n uvmax = numpy.max((numpy.abs(vis.data['uvw'][:, 0:1])))\n if isinstance(vis, BlockVisibility):\n uvmax *= numpy.max(frequency) / constants.c.to('m s^-1').value\n log.debug(\"create_image_from_visibility: uvmax = %f wavelengths\" % uvmax)\n criticalcellsize = 1.0 / (uvmax * 2.0)\n log.debug(\"create_image_from_visibility: Critical cellsize = %f radians, %f degrees\" % (\n criticalcellsize, criticalcellsize * 180.0 / numpy.pi))\n cellsize = get_parameter(kwargs, \"cellsize\", 0.5 * criticalcellsize)\n log.debug(\"create_image_from_visibility: Cellsize = %g radians, %g degrees\" % (cellsize,\n cellsize * 180.0 / numpy.pi))\n override_cellsize = get_parameter(kwargs, \"override_cellsize\", True)\n if override_cellsize and cellsize > criticalcellsize:\n log.debug(\"create_image_from_visibility: Resetting cellsize %g radians to criticalcellsize %g radians\" % (\n cellsize, criticalcellsize))\n cellsize = criticalcellsize\n pol_frame = get_parameter(kwargs, \"polarisation_frame\", PolarisationFrame(\"stokesI\"))\n inpol = pol_frame.npol\n \n # Now we can define the WCS, which is a convenient place to hold the info above\n # Beware of python indexing order! wcs and the array have opposite ordering\n shape = [inchan, inpol, npixel, npixel]\n log.debug(\"create_image_from_visibility: image shape is %s\" % str(shape))\n w = wcs.WCS(naxis=4)\n # The negation in the longitude is needed by definition of RA, DEC\n w.wcs.cdelt = [-cellsize * 180.0 / numpy.pi, cellsize * 180.0 / numpy.pi, 1.0, channel_bandwidth.to(units.Hz).value]\n # The numpy definition of the phase centre of an FFT is n // 2 (0 - rel) so that's what we use for\n # the reference pixel. We have to use 0 rel everywhere.\n w.wcs.crpix = [npixel // 2 + 1, npixel // 2 + 1, 1.0, 1.0]\n w.wcs.ctype = [\"RA---SIN\", \"DEC--SIN\", 'STOKES', 'FREQ']\n w.wcs.crval = [phasecentre.ra.deg, phasecentre.dec.deg, 1.0, reffrequency.to(units.Hz).value]\n w.naxis = 4\n \n # TODO: Why is this check being done?\n # direction_centre = pixel_to_skycoord(npixel // 2 + 1, npixel // 2 + 1, wcs=w, origin=1)\n # assert direction_centre.separation(imagecentre).value < 1e-7, \\\n # \"Image phase centre [npixel//2, npixel//2] should be %s, actually is %s\" % \\\n # (str(imagecentre), str(direction_centre))\n \n w.wcs.radesys = get_parameter(kwargs, 'frame', 'ICRS')\n w.wcs.equinox = get_parameter(kwargs, 'equinox', 2000.0)\n \n return create_image_from_array(numpy.zeros(shape), wcs=w, polarisation_frame=pol_frame)", "def test_empty_private_owned(self):\n self.do_visible(True, 'pattieblack', False, is_admin=True)", "def test_empty_public_owned(self):\n self.do_visible(True, 'pattieblack', True, is_admin=True)", "def create(self, user, image, *args):\n if user.is_anonymous or user.is_client:\n return False\n\n if user.is_administrator:\n return True\n\n if user.is_manager or user.is_advisor:\n if image.group.pk == user.group.pk:\n return True", "def test_visibility(self, data, visible):\n layer = Points(data)\n assert layer.visible is True\n\n layer = Points(data, visible=visible)\n assert layer.visible is visible\n\n layer.visible = not visible\n assert layer.visible is not visible", "def test_bfs_priviledged_exists(self):\n\n print(\"Test: Testing an image with a 'priviledged' flag...\")\n\n topology = {\"outside\" : [\"container1\"],\n \"container1\": [\"container2\", \"outside\", \"docker host\"],\n \"container2\": [\"container1\", \"docker host\"],\n \"container3\": [\"docker host\"],\n \"docker host\": [\"container1\", \"container2\", \"container3\"]}\n\n exploitable_vuls = {\"container1\": {\"precond\" : {\"CVE-2015-0000\" : 0},\n \"postcond\" : {\"CVE-2015-0000\" : 3}},\n \"container2\" : {\"precond\" : {\"CVE-2015-0001\" : 3},\n \"postcond\" : {\"CVE-2015-0001\" : 4}},\n \"container3\": {\"precond\" : {\"CVE-2015-0002\" : 3},\n \"postcond\" : {\"CVE-2015-0002\" : 4}}}\n\n privileged_access = {\"container1\" : False, \"container2\" : True, \"container3\" : False}\n\n nodes, edges, _ = breadth_first_search(topology,\n exploitable_vuls,\n privileged_access)\n\n # Checking that container3 has been attacked and the edges that lead to it.\n self.assertTrue('container2(ADMIN)|docker host(ADMIN)' in edges)\n self.assertTrue('docker host(ADMIN)|container3(ADMIN)' in edges)\n self.assertTrue('container3(ADMIN)' in nodes)", "def test_group_public_hidden(self):\n group = Group.objects.create(name='test-group', visible=False)\n\n self.assertFalse(group.visible)\n self.assertTrue(group.is_accessible_by(self.user))\n self.assertTrue(\n group in Group.objects.accessible(self.user, visible_only=False))\n self.assertFalse(\n group in Group.objects.accessible(self.user, visible_only=True))", "def test_is_profile_visible_with_public(self):\n user1 = User.objects.get(username='admin')\n user2 = User.objects.get(username='doc')\n\n self.assertTrue(user1.is_profile_visible(user2))", "def exp_image_filter(*args, **kwargs):\n import itk\n instance = itk.ExpImageFilter.New(*args, **kwargs)\n return instance.__internal_call__()", "def imaging_contexts():\n contexts = {'2d': {'predict': predict_2d,\n 'invert': invert_2d,\n 'vis_iterator': vis_null_iter,\n 'inner': 'image'},\n 'facets': {'predict': predict_2d,\n 'invert': invert_2d,\n 'vis_iterator': vis_null_iter,\n 'inner': 'image'},\n 'facets_timeslice': {'predict': predict_timeslice_single,\n 'invert': invert_timeslice_single,\n 'vis_iterator': vis_timeslice_iter,\n 'inner': 'image'},\n 'facets_wstack': {'predict': predict_wstack_single,\n 'invert': invert_wstack_single,\n 'vis_iterator': vis_wslice_iter,\n 'inner': 'image'},\n 'timeslice': {'predict': predict_timeslice_single,\n 'invert': invert_timeslice_single,\n 'vis_iterator': vis_timeslice_iter,\n 'inner': 'image'},\n 'wstack': {'predict': predict_wstack_single,\n 'invert': invert_wstack_single,\n 'vis_iterator': vis_wslice_iter,\n 'inner': 'image'}}\n \n return contexts", "def fake_willow_image(self, create, extracted, **kwargs): # pylint: disable=unused-argument\n image_dir = tempfile.mkdtemp()\n origin_image_path = os.path.join(\n os.path.dirname(os.path.dirname(__file__)), \"test_resources\", \"stata_center.jpg\",\n )\n shutil.copy(origin_image_path, image_dir)\n fake_image_path = os.path.join(image_dir, \"stata_center.jpg\")\n fake_image = WillowImage.open(open(fake_image_path, \"rb\"))\n\n @contextmanager\n def get_fake_willow(): # pylint: disable=missing-docstring\n yield fake_image\n\n self.get_willow_image = get_fake_willow # pylint: disable=attribute-defined-outside-init\n return self", "def examine_image(request):\n\n if 'examiner' in CONFIG:\n examiner = CONFIG['examiner']\n retcode = subprocess.call([examiner, request['expandedpath'],\n request['id']])\n if retcode != 0:\n return False\n\n return True", "def test_non_visible_images():\n layer = Image(\n data_dask_2D, visible=False, multiscale=False, contrast_limits=[0, 1],\n )\n assert layer.data.shape == data_dask_2D.shape", "def test_review_request_public(self):\n review_request = self.create_review_request(publish=True)\n\n self.assertTrue(review_request.is_accessible_by(self.user))\n self.assertTrue(review_request.is_accessible_by(self.anonymous))", "def builder_should_create_target_image(self, builder, target, image_id, template, parameters):" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Perform a context sharability test. Creates a (fake) image with the specified owner and is_public attributes, then creates a context with the given keyword arguments and expects exp_res as the result of an is_image_sharable() call on the context. If membership is not None, its value will be passed in as the 'membership' keyword argument of is_image_sharable().
def do_sharable(self, exp_res, img_owner, membership=None, **kwargs): img = FakeImage(img_owner, True) ctx = context.RequestContext(**kwargs) sharable_args = {} if membership is not None: sharable_args['membership'] = membership self.assertEqual(ctx.is_image_sharable(img, **sharable_args), exp_res)
[ "def do_visible(self, exp_res, img_owner, img_public, **kwargs):\n\n img = FakeImage(img_owner, img_public)\n ctx = context.RequestContext(**kwargs)\n\n self.assertEqual(ctx.is_image_visible(img), exp_res)", "def fake_willow_image(self, create, extracted, **kwargs): # pylint: disable=unused-argument\n image_dir = tempfile.mkdtemp()\n origin_image_path = os.path.join(\n os.path.dirname(os.path.dirname(__file__)), \"test_resources\", \"stata_center.jpg\",\n )\n shutil.copy(origin_image_path, image_dir)\n fake_image_path = os.path.join(image_dir, \"stata_center.jpg\")\n fake_image = WillowImage.open(open(fake_image_path, \"rb\"))\n\n @contextmanager\n def get_fake_willow(): # pylint: disable=missing-docstring\n yield fake_image\n\n self.get_willow_image = get_fake_willow # pylint: disable=attribute-defined-outside-init\n return self", "def test_auth_sharable_cannot_share(self):\n self.do_sharable(False, 'pattieblack', FakeMembership(False),\n tenant='froggy')", "def create(self, user, image, *args):\n if user.is_anonymous or user.is_client:\n return False\n\n if user.is_administrator:\n return True\n\n if user.is_manager or user.is_advisor:\n if image.group.pk == user.group.pk:\n return True", "def builder_should_create_target_image(self, builder, target, image_id, template, parameters):", "def create_image_from_visibility(vis, **kwargs) -> Image:\n assert isinstance(vis, Visibility) or isinstance(vis, BlockVisibility), \\\n \"vis is not a Visibility or a BlockVisibility: %r\" % (vis)\n \n log.debug(\"create_image_from_visibility: Parsing parameters to get definition of WCS\")\n \n imagecentre = get_parameter(kwargs, \"imagecentre\", vis.phasecentre)\n phasecentre = get_parameter(kwargs, \"phasecentre\", vis.phasecentre)\n \n # Spectral processing options\n ufrequency = numpy.unique(vis.frequency)\n vnchan = len(ufrequency)\n \n frequency = get_parameter(kwargs, \"frequency\", vis.frequency)\n inchan = get_parameter(kwargs, \"nchan\", vnchan)\n reffrequency = frequency[0] * units.Hz\n channel_bandwidth = get_parameter(kwargs, \"channel_bandwidth\", 0.99999999999 * vis.channel_bandwidth[0]) * units.Hz\n \n if (inchan == vnchan) and vnchan > 1:\n log.debug(\n \"create_image_from_visibility: Defining %d channel Image at %s, starting frequency %s, and bandwidth %s\"\n % (inchan, imagecentre, reffrequency, channel_bandwidth))\n elif (inchan == 1) and vnchan > 1:\n assert numpy.abs(channel_bandwidth.value) > 0.0, \"Channel width must be non-zero for mfs mode\"\n log.debug(\"create_image_from_visibility: Defining single channel MFS Image at %s, starting frequency %s, \"\n \"and bandwidth %s\"\n % (imagecentre, reffrequency, channel_bandwidth))\n elif inchan > 1 and vnchan > 1:\n assert numpy.abs(channel_bandwidth.value) > 0.0, \"Channel width must be non-zero for mfs mode\"\n log.debug(\"create_image_from_visibility: Defining multi-channel MFS Image at %s, starting frequency %s, \"\n \"and bandwidth %s\"\n % (imagecentre, reffrequency, channel_bandwidth))\n elif (inchan == 1) and (vnchan == 1):\n assert numpy.abs(channel_bandwidth.value) > 0.0, \"Channel width must be non-zero for mfs mode\"\n log.debug(\"create_image_from_visibility: Defining single channel Image at %s, starting frequency %s, \"\n \"and bandwidth %s\"\n % (imagecentre, reffrequency, channel_bandwidth))\n else:\n raise ValueError(\"create_image_from_visibility: unknown spectral mode \")\n \n # Image sampling options\n npixel = get_parameter(kwargs, \"npixel\", 512)\n uvmax = numpy.max((numpy.abs(vis.data['uvw'][:, 0:1])))\n if isinstance(vis, BlockVisibility):\n uvmax *= numpy.max(frequency) / constants.c.to('m s^-1').value\n log.debug(\"create_image_from_visibility: uvmax = %f wavelengths\" % uvmax)\n criticalcellsize = 1.0 / (uvmax * 2.0)\n log.debug(\"create_image_from_visibility: Critical cellsize = %f radians, %f degrees\" % (\n criticalcellsize, criticalcellsize * 180.0 / numpy.pi))\n cellsize = get_parameter(kwargs, \"cellsize\", 0.5 * criticalcellsize)\n log.debug(\"create_image_from_visibility: Cellsize = %g radians, %g degrees\" % (cellsize,\n cellsize * 180.0 / numpy.pi))\n override_cellsize = get_parameter(kwargs, \"override_cellsize\", True)\n if override_cellsize and cellsize > criticalcellsize:\n log.debug(\"create_image_from_visibility: Resetting cellsize %g radians to criticalcellsize %g radians\" % (\n cellsize, criticalcellsize))\n cellsize = criticalcellsize\n pol_frame = get_parameter(kwargs, \"polarisation_frame\", PolarisationFrame(\"stokesI\"))\n inpol = pol_frame.npol\n \n # Now we can define the WCS, which is a convenient place to hold the info above\n # Beware of python indexing order! wcs and the array have opposite ordering\n shape = [inchan, inpol, npixel, npixel]\n log.debug(\"create_image_from_visibility: image shape is %s\" % str(shape))\n w = wcs.WCS(naxis=4)\n # The negation in the longitude is needed by definition of RA, DEC\n w.wcs.cdelt = [-cellsize * 180.0 / numpy.pi, cellsize * 180.0 / numpy.pi, 1.0, channel_bandwidth.to(units.Hz).value]\n # The numpy definition of the phase centre of an FFT is n // 2 (0 - rel) so that's what we use for\n # the reference pixel. We have to use 0 rel everywhere.\n w.wcs.crpix = [npixel // 2 + 1, npixel // 2 + 1, 1.0, 1.0]\n w.wcs.ctype = [\"RA---SIN\", \"DEC--SIN\", 'STOKES', 'FREQ']\n w.wcs.crval = [phasecentre.ra.deg, phasecentre.dec.deg, 1.0, reffrequency.to(units.Hz).value]\n w.naxis = 4\n \n # TODO: Why is this check being done?\n # direction_centre = pixel_to_skycoord(npixel // 2 + 1, npixel // 2 + 1, wcs=w, origin=1)\n # assert direction_centre.separation(imagecentre).value < 1e-7, \\\n # \"Image phase centre [npixel//2, npixel//2] should be %s, actually is %s\" % \\\n # (str(imagecentre), str(direction_centre))\n \n w.wcs.radesys = get_parameter(kwargs, 'frame', 'ICRS')\n w.wcs.equinox = get_parameter(kwargs, 'equinox', 2000.0)\n \n return create_image_from_array(numpy.zeros(shape), wcs=w, polarisation_frame=pol_frame)", "def view(self, user, image, *args):\n if user.is_anonymous or user.is_client:\n return False\n\n if user.is_administrator:\n return False\n\n if user.is_manager:\n return False\n\n if user.is_advisor:\n return Image.objects.filter(pk=image.pk).accessible_by(user).exists()\n\n return self.admin_permission(user, image, *args)", "def maketestimage(self, *args, **kwargs):\n return _image.image_maketestimage(self, *args, **kwargs)", "def export_prepared_image(self, **kwargs):\n owner = kwargs.pop(\"owner\", None)\n indent = kwargs.pop(\"indent\", 2)\n key = _Texture(**kwargs)\n image = key.image\n\n if key not in self._pending:\n self._report.msg(\"Stashing '{}' for conversion as '{}'\", image.name, key, indent=indent)\n self._pending[key] = [owner.key,]\n else:\n self._report.msg(\"Found another user of '{}'\", key, indent=indent)\n self._pending[key].append(owner.key)", "def test_bfs_priviledged_exists(self):\n\n print(\"Test: Testing an image with a 'priviledged' flag...\")\n\n topology = {\"outside\" : [\"container1\"],\n \"container1\": [\"container2\", \"outside\", \"docker host\"],\n \"container2\": [\"container1\", \"docker host\"],\n \"container3\": [\"docker host\"],\n \"docker host\": [\"container1\", \"container2\", \"container3\"]}\n\n exploitable_vuls = {\"container1\": {\"precond\" : {\"CVE-2015-0000\" : 0},\n \"postcond\" : {\"CVE-2015-0000\" : 3}},\n \"container2\" : {\"precond\" : {\"CVE-2015-0001\" : 3},\n \"postcond\" : {\"CVE-2015-0001\" : 4}},\n \"container3\": {\"precond\" : {\"CVE-2015-0002\" : 3},\n \"postcond\" : {\"CVE-2015-0002\" : 4}}}\n\n privileged_access = {\"container1\" : False, \"container2\" : True, \"container3\" : False}\n\n nodes, edges, _ = breadth_first_search(topology,\n exploitable_vuls,\n privileged_access)\n\n # Checking that container3 has been attacked and the edges that lead to it.\n self.assertTrue('container2(ADMIN)|docker host(ADMIN)' in edges)\n self.assertTrue('docker host(ADMIN)|container3(ADMIN)' in edges)\n self.assertTrue('container3(ADMIN)' in nodes)", "def test_image_privacy(self, glance_steps, images_steps,\n auth_steps):\n image = glance_steps.create_images(\n utils.get_file_path(config.CIRROS_QCOW2_URL),\n image_names=utils.generate_ids(length=20))[0]\n images_steps.check_non_public_image_not_visible(image.name)", "def test_api_thumbnail_retrieve_by_organization_student(self):\n organization_access = OrganizationAccessFactory(\n organization=self.some_organization,\n role=STUDENT,\n )\n\n self.assert_user_cannot_retrieve_thumbnail(\n organization_access.user, self.some_thumbnail\n )", "def create_image_allowed(self, create_image_allowed):\n self._create_image_allowed = create_image_allowed", "def get_images_by_vulnerability(self, **kwargs):\n ...", "def test_create_image(self):\n pass", "async def cheek(self,ctx,user: discord.Member=None):\n if user == None or user.id == ctx.author.id:\n await ctx.send(\"{} is trying to kiss his own cheek OwO\".format(ctx.author.mention))\n img = self.getreaction(\"cheek\", \"himself\")\n elif user.id == 343156375800643594:\n await ctx.send(\"N-No {}, your breath stinks...\".format(ctx.author.mention))\n img = self.getreaction(\"cheek\", \"rias\")\n else:\n await ctx.send(\"Aww, {} kissed {} on the cheek!\".format(ctx.author.mention, user.mention))\n img = random.choice(self.getreaction(\"cheek\", \"0\"))\n embed = discord.Embed(colour=ctx.guild.me.top_role.colour)\n embed.set_image(url=img)\n await ctx.send(embed=embed)", "def examine_image(request):\n\n if 'examiner' in CONFIG:\n examiner = CONFIG['examiner']\n retcode = subprocess.call([examiner, request['expandedpath'],\n request['id']])\n if retcode != 0:\n return False\n\n return True", "def make_star_thumbnails():\n os.chdir(unicorn.GRISM_HOME+'ANALYSIS/SURVEY_PAPER')\n \n ######### Make full COSMOS catalog \n file=unicorn.GRISM_HOME+'COSMOS/PREP_FLT/COSMOS-F140W_drz.fits'\n ROOT_GRISM = os.path.basename(file).split('_drz.fits')[0]\n se = threedhst.sex.SExtractor()\n se.aXeParams()\n se.copyConvFile()\n se.overwrite = True\n se.options['CATALOG_NAME'] = ROOT_GRISM+'_drz.cat'\n se.options['CHECKIMAGE_NAME'] = ROOT_GRISM+'_seg.fits'\n se.options['CHECKIMAGE_TYPE'] = 'SEGMENTATION'\n se.options['WEIGHT_TYPE'] = 'MAP_WEIGHT'\n se.options['WEIGHT_IMAGE'] = file+'[1]'\n se.options['FILTER'] = 'Y'\n se.options['DETECT_THRESH'] = '1.4'\n se.options['ANALYSIS_THRESH'] = '1.4'\n se.options['MAG_ZEROPOINT'] = '26.46'\n status = se.sextractImage(file+'[0]', mode='direct')\n \n cat = threedhst.sex.mySexCat('COSMOS-F140W_drz.cat')\n \n mag, radius = np.cast[float](cat.MAG_AUTO), np.cast[float](cat.FLUX_RADIUS)\n xpix, ypix = np.cast[float](cat.X_IMAGE), np.cast[float](cat.Y_IMAGE)\n ra, dec = np.cast[float](cat.X_WORLD), np.cast[float](cat.Y_WORLD)\n \n #### Find isolated point sources\n points = (mag > 17) & (mag < 22) & (radius < 2.7)\n # plt.plot(mag, radius, marker='o', linestyle='None', alpha=0.5, color='blue')\n # plt.plot(mag[points], radius[points], marker='o', linestyle='None', alpha=0.8, color='red')\n # plt.ylim(0,20)\n # plt.xlim(14,26)\n \n idx = np.arange(len(points))\n isolated = mag > 1.e10\n \n buff = 3 ## buffer, in arcsec\n dmag = 2.5\n scale = 0.06\n \n for i in idx[points]:\n dr = np.sqrt((xpix[i]-xpix)**2+(ypix[i]-ypix)**2)*scale\n near = (dr > 0) & (dr < buff) & (mag < (mag[i]+dmag))\n if len(near[near]) == 0:\n isolated[i] = True\n else:\n isolated[i] = False\n \n #### Make thumbnails\n img = pyfits.open(unicorn.GRISM_HOME+'COSMOS/PREP_FLT/COSMOS-F140W_drz.fits')\n img_data = img[1].data\n img_wht = img[2].data\n \n NPIX = int(np.ceil(buff/scale))\n\n prim = pyfits.PrimaryHDU()\n list_d = [prim]\n list_w = [prim]\n \n head = img[1].header\n head['CRPIX1'], head['CRPIX2'] = NPIX, NPIX\n \n for i in idx[points & isolated]:\n print unicorn.noNewLine+'%d' %(i)\n id = np.int(cat.NUMBER[i])\n xi, yi = int(np.round(xpix[i])), int(np.round(ypix[i]))\n sub_data = img_data[yi-NPIX:yi+NPIX, xi-NPIX: xi+NPIX]\n sub_wht = img_wht[yi-NPIX:yi+NPIX, xi-NPIX: xi+NPIX]\n #\n head['CRVAL1'], head['CRVAL2'] = ra[i], dec[i]\n head.update('MAG',mag[i])\n head.update('RADIUS',radius[i])\n head.update('XCENTER',xpix[i]-xi+NPIX)\n head.update('YCENTER',ypix[i]-yi+NPIX)\n #\n list_d.append(pyfits.ImageHDU(sub_data, header=head))\n list_w.append(pyfits.ImageHDU(sub_wht, header=head))\n \n pyfits.HDUList(list_d).writeto('stars_sci.fits', clobber=True)\n pyfits.HDUList(list_w).writeto('stars_wht.fits', clobber=True)", "def prepared_image_file(create_filesystem=True):\n # Create a 10 MB image file and a key file of 2048 bytes.\n execute('dd', 'if=/dev/zero', 'of=%s' % IMAGE_FILE, 'bs=1M', 'count=10')\n execute('dd', 'if=/dev/urandom', 'of=%s' % KEY_FILE, 'bs=512', 'count=4')\n # Encrypt and unlock the image file.\n execute('cryptsetup', '--batch-mode', 'luksFormat', IMAGE_FILE, KEY_FILE, sudo=True)\n # Create a filesystem on the encrypted image file?\n if create_filesystem:\n with unlocked_device(CRYPTO_NAME):\n execute('mkfs.ext4', FILESYSTEM_DEVICE, sudo=True)\n yield\n os.unlink(IMAGE_FILE)\n os.unlink(KEY_FILE)" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Tests that an empty context (with is_admin set to True) can access an owned image with is_public set to True.
def test_empty_public_owned(self): self.do_visible(True, 'pattieblack', True, is_admin=True)
[ "def test_empty_private_owned(self):\n self.do_visible(True, 'pattieblack', False, is_admin=True)", "def do_visible(self, exp_res, img_owner, img_public, **kwargs):\n\n img = FakeImage(img_owner, img_public)\n ctx = context.RequestContext(**kwargs)\n\n self.assertEqual(ctx.is_image_visible(img), exp_res)", "def test_profile_view_get_context_has_view_photos_and_albums_for_non_owner(self):\n from imager_profile.views import ProfileView\n request = self.request.get('')\n request.user = AnonymousUser()\n view = ProfileView(request=request, object='')\n data = view.get_context_data(object=ImagerProfile.objects.first())\n self.assertIn('view', data)\n self.assertIn('photos', data)\n self.assertIn('albums', data)\n self.assertFalse(data['owner'])", "def view(self, user, image, *args):\n if user.is_anonymous or user.is_client:\n return False\n\n if user.is_administrator:\n return False\n\n if user.is_manager:\n return False\n\n if user.is_advisor:\n return Image.objects.filter(pk=image.pk).accessible_by(user).exists()\n\n return self.admin_permission(user, image, *args)", "def test_logged_in_owner(self):\n self.make_logged_in_owner()\n\n # test show album\n self.perm_escalate_helper(self.albumcontrol, self.showalbumrequest, self.testalbum, self.testalbum.id,\n self.u, album.display_album, ALBUM_PRIVATE)\n\n # test photo view\n self.perm_escalate_helper_get_with_client(self.albumcontrol, self.testalbum, self.photo.id,\n \"photoid\", \"show_photo\", ALBUM_PRIVATE)\n\n # test individual photo view page\n self.perm_escalate_helper(self.albumcontrol, self.indivphotorequest, self.testalbum, self.photo.id,\n self.u, album.display_photo, ALBUM_PRIVATE)", "def test_aws_service_api_private_image_get(self):\n pass", "def test_user_own_gallery(self):\n\n self.login_user()\n resp = self.client.get(reverse('gallery-user'))\n self.assertEqual(resp.status_code, 200)\n # Check if all (public and private) images are shown\n self.assertEqual(len(resp.context['photo_list']), 2)", "def test_edit_image_without_permissions(self):\n u = User.objects.get(username='pcraciunoiu')\n img = image(creator=u)\n r = post(self.client, 'gallery.edit_media', {'description': 'arrr'},\n args=['image', img.id])\n\n eq_(403, r.status_code)", "def test_aws_service_api_private_images_get(self):\n pass", "def test_get_owner_image(self):\n\n # user1 is owner of image_id 1\n # user2 is owner of image ids (2,3)\n for image_id in range(1, 4):\n url = reverse(self.url_name_one, args=(image_id,))\n if image_id == 1:\n self.client.force_authenticate(self.user1)\n else:\n self.client.force_authenticate(self.user2)\n\n response = self.client.get(url, format=\"json\")\n self.assertEqual(response.status_code, status.HTTP_200_OK)\n if image_id == 1:\n self.assertEqual(response.data[\"owner\"], \"user1\")\n else:\n self.assertEqual(response.data[\"owner\"], \"user2\")\n\n # user2 try to get image_id 1 which is owner user1\n url = reverse(self.url_name_one, args=(1,))\n response = self.client.get(url, format=\"json\")\n self.assertEqual(response.status_code, status.HTTP_403_FORBIDDEN)", "def test_profile_view_get_context_has_view_photos_and_albums_for_owner(self):\n from imager_profile.views import ProfileView\n request = self.request.get('')\n request.user = self.bob\n view = ProfileView(request=request, object='')\n data = view.get_context_data(object=ImagerProfile.objects.first())\n self.assertIn('view', data)\n self.assertIn('photos', data)\n self.assertIn('albums', data)\n self.assertTrue(data['owner'])", "def test_public_image_visibility(self, images_steps):\n images_steps.check_public_image_visible(config.HORIZON_TEST_IMAGE)", "def test_image_privacy(self, glance_steps, images_steps,\n auth_steps):\n image = glance_steps.create_images(\n utils.get_file_path(config.CIRROS_QCOW2_URL),\n image_names=utils.generate_ids(length=20))[0]\n images_steps.check_non_public_image_not_visible(image.name)", "def test_aws_service_api_public_image_get(self):\n pass", "def test_get_returns_403_if_not_public(self):\n recipe = mixer.blend(models.Recipe, user=None, public=False)\n url = reverse('recipes-detail', kwargs={'pk': recipe.pk})\n response = self.client.get(url, format='json')\n\n self.assertEqual(response.status_code, status.HTTP_403_FORBIDDEN)", "def test_neighborhood_has_access_not_admin(self):\n self.api_get(\n '/rest/p/has_access?user=test-admin&perm=admin',\n user='test-user',\n status=403)", "def test_user_gallery(self):\n\n resp = self.client.get(\n reverse('gallery-user', kwargs={'username': 'admin' }))\n self.assertEqual(resp.status_code, 200)\n # Check if only public images are shown\n self.assertEqual(len(resp.context['photo_list']), 1)", "def test_admin_public(self):\n with self.login(self.user_admin):\n self.assertTrue(current_user.is_authenticated)\n self.assertEqual(current_user, self.user_admin)\n\n rv = self.client.get('/')\n self.assertEqual(b'public', rv.data)", "def test_permissions(self):\n taxonomy = self.get_taxonomy()\n return True if self.request.user == taxonomy.author else taxonomy.public" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Tests that an empty context (with is_admin set to True) can access an owned image with is_public set to False.
def test_empty_private_owned(self): self.do_visible(True, 'pattieblack', False, is_admin=True)
[ "def test_empty_public_owned(self):\n self.do_visible(True, 'pattieblack', True, is_admin=True)", "def do_visible(self, exp_res, img_owner, img_public, **kwargs):\n\n img = FakeImage(img_owner, img_public)\n ctx = context.RequestContext(**kwargs)\n\n self.assertEqual(ctx.is_image_visible(img), exp_res)", "def test_profile_view_get_context_has_view_photos_and_albums_for_non_owner(self):\n from imager_profile.views import ProfileView\n request = self.request.get('')\n request.user = AnonymousUser()\n view = ProfileView(request=request, object='')\n data = view.get_context_data(object=ImagerProfile.objects.first())\n self.assertIn('view', data)\n self.assertIn('photos', data)\n self.assertIn('albums', data)\n self.assertFalse(data['owner'])", "def view(self, user, image, *args):\n if user.is_anonymous or user.is_client:\n return False\n\n if user.is_administrator:\n return False\n\n if user.is_manager:\n return False\n\n if user.is_advisor:\n return Image.objects.filter(pk=image.pk).accessible_by(user).exists()\n\n return self.admin_permission(user, image, *args)", "def test_edit_image_without_permissions(self):\n u = User.objects.get(username='pcraciunoiu')\n img = image(creator=u)\n r = post(self.client, 'gallery.edit_media', {'description': 'arrr'},\n args=['image', img.id])\n\n eq_(403, r.status_code)", "def test_logged_in_owner(self):\n self.make_logged_in_owner()\n\n # test show album\n self.perm_escalate_helper(self.albumcontrol, self.showalbumrequest, self.testalbum, self.testalbum.id,\n self.u, album.display_album, ALBUM_PRIVATE)\n\n # test photo view\n self.perm_escalate_helper_get_with_client(self.albumcontrol, self.testalbum, self.photo.id,\n \"photoid\", \"show_photo\", ALBUM_PRIVATE)\n\n # test individual photo view page\n self.perm_escalate_helper(self.albumcontrol, self.indivphotorequest, self.testalbum, self.photo.id,\n self.u, album.display_photo, ALBUM_PRIVATE)", "def test_aws_service_api_private_image_get(self):\n pass", "def test_aws_service_api_private_images_get(self):\n pass", "def test_get_owner_image(self):\n\n # user1 is owner of image_id 1\n # user2 is owner of image ids (2,3)\n for image_id in range(1, 4):\n url = reverse(self.url_name_one, args=(image_id,))\n if image_id == 1:\n self.client.force_authenticate(self.user1)\n else:\n self.client.force_authenticate(self.user2)\n\n response = self.client.get(url, format=\"json\")\n self.assertEqual(response.status_code, status.HTTP_200_OK)\n if image_id == 1:\n self.assertEqual(response.data[\"owner\"], \"user1\")\n else:\n self.assertEqual(response.data[\"owner\"], \"user2\")\n\n # user2 try to get image_id 1 which is owner user1\n url = reverse(self.url_name_one, args=(1,))\n response = self.client.get(url, format=\"json\")\n self.assertEqual(response.status_code, status.HTTP_403_FORBIDDEN)", "def test_user_own_gallery(self):\n\n self.login_user()\n resp = self.client.get(reverse('gallery-user'))\n self.assertEqual(resp.status_code, 200)\n # Check if all (public and private) images are shown\n self.assertEqual(len(resp.context['photo_list']), 2)", "def test_neighborhood_has_access_not_admin(self):\n self.api_get(\n '/rest/p/has_access?user=test-admin&perm=admin',\n user='test-user',\n status=403)", "def test_image_privacy(self, glance_steps, images_steps,\n auth_steps):\n image = glance_steps.create_images(\n utils.get_file_path(config.CIRROS_QCOW2_URL),\n image_names=utils.generate_ids(length=20))[0]\n images_steps.check_non_public_image_not_visible(image.name)", "def test_get_returns_403_if_not_public(self):\n recipe = mixer.blend(models.Recipe, user=None, public=False)\n url = reverse('recipes-detail', kwargs={'pk': recipe.pk})\n response = self.client.get(url, format='json')\n\n self.assertEqual(response.status_code, status.HTTP_403_FORBIDDEN)", "def test_profile_view_get_context_has_view_photos_and_albums_for_owner(self):\n from imager_profile.views import ProfileView\n request = self.request.get('')\n request.user = self.bob\n view = ProfileView(request=request, object='')\n data = view.get_context_data(object=ImagerProfile.objects.first())\n self.assertIn('view', data)\n self.assertIn('photos', data)\n self.assertIn('albums', data)\n self.assertTrue(data['owner'])", "def test_public_image_visibility(self, images_steps):\n images_steps.check_public_image_visible(config.HORIZON_TEST_IMAGE)", "def test_not_owner(self):\n creating_user = create_user()\n creating_user.save()\n festival = create_festival('test', creating_user)\n festival.save()\n\n concert = create_concert(festival, 'test')\n concert.save()\n\n login(self.client)\n\n client = create_client('test')\n client.delete_access = True\n client.save()\n\n response = self.client.post('/backend/u/conc/', {'client': 'test', 'id': concert.pk})\n self.assertEqual(response.status_code, 200)\n self.assertEqual('Permission not granted', response.content.decode('utf-8'))", "def test_permissions(self):\n taxonomy = self.get_taxonomy()\n return True if self.request.user == taxonomy.author else taxonomy.public", "def test_user_gallery(self):\n\n resp = self.client.get(\n reverse('gallery-user', kwargs={'username': 'admin' }))\n self.assertEqual(resp.status_code, 200)\n # Check if only public images are shown\n self.assertEqual(len(resp.context['photo_list']), 1)", "def test_aws_service_api_public_image_get(self):\n pass" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Tests that an authenticated context (with is_admin set to False) cannot share an image it does not own even if it is shared with it, but with can_share = False.
def test_auth_sharable_cannot_share(self): self.do_sharable(False, 'pattieblack', FakeMembership(False), tenant='froggy')
[ "def cant_share_photo(request, ttl=None,*args, **kwargs):\n\tif ttl:\n\t\ttry:\n\t\t\tttl = int(ttl)\n\t\texcept ValueError:\n\t\t\tttl = None\n\tphoto_id = request.session.get(\"personal_group_shared_photo_id\",None)\n\torigin = request.session.get(\"personal_group_shared_photo_origin\",None)\n\tphoto_url = request.session.get(\"personal_group_shared_photo_url\",None)\n\tphoto_caption = request.session.get(\"personal_group_shared_photo_caption\",None)\n\tphoto_owner_username = request.session.get(\"personal_group_shared_photo_owner_username\",None)\n\treturn render(request,\"personal_group/sharing/photo_not_shared.html\",{'photo_caption':photo_caption,'photo_id':photo_id,'photo_url':photo_url,\\\n\t\t'photo_owner_username':photo_owner_username,'origin':origin,'ttl':ttl})", "def canShare(self):\n return False", "def deny_access(self, context, share, access, share_server=None):\n self._get_helper(share).deny_access('/', share, access)", "def deny_access(self, context, share, access, share_server=None):\r\n LOG.debug(\"Deny access.\")\r\n self.helper._deny_access(share['name'], access, share['share_proto'])", "def test_un_logged_in_user_can_not_upload_picture(self):\n tmp_file = generate_image_for_testing()\n response = self.client.post(self.user_passport_url,\n data={'passport': tmp_file})\n\n self.assertEqual(403, response.status_code)", "def allow_access(self, context, share, access, share_server=None):\r\n LOG.debug(\"Allow access.\")\r\n self.helper._allow_access(share['name'], access, share['share_proto'])", "def test_no_shares(self):\n\n calendar = yield self.calendarUnderTest(home=\"user01\", name=\"calendar\")\n invites = yield calendar.sharingInvites()\n self.assertEqual(len(invites), 0)\n self.assertFalse(calendar.isSharedByOwner())", "def test_image_privacy(self, glance_steps, images_steps,\n auth_steps):\n image = glance_steps.create_images(\n utils.get_file_path(config.CIRROS_QCOW2_URL),\n image_names=utils.generate_ids(length=20))[0]\n images_steps.check_non_public_image_not_visible(image.name)", "def do_sharable(self, exp_res, img_owner, membership=None, **kwargs):\n\n img = FakeImage(img_owner, True)\n ctx = context.RequestContext(**kwargs)\n\n sharable_args = {}\n if membership is not None:\n sharable_args['membership'] = membership\n\n self.assertEqual(ctx.is_image_sharable(img, **sharable_args), exp_res)", "def allow_access(self, context, share, access, share_server=None):\n self._get_helper(share).allow_access('/', share, access)", "def deny_access(self, context, share, access, share_server):\n share_proto = share['share_proto'].upper()\n\n self._validate_share_protocol(share_proto)\n self._validate_share_access_type(share, access)\n\n if share_proto == 'CIFS':\n self._cifs_deny_access(share, access)\n elif share_proto == 'NFS':\n self._nfs_deny_access(share, access)", "def test_is_shared_non_existing_path(self):\n with self.assertRaises(owncloud.ResponseError) as e:\n self.client.is_shared(self.test_root + 'does_not_exist')\n self.assertEqual(e.exception.status_code, 404)", "def test_edit_image_without_permissions(self):\n u = User.objects.get(username='pcraciunoiu')\n img = image(creator=u)\n r = post(self.client, 'gallery.edit_media', {'description': 'arrr'},\n args=['image', img.id])\n\n eq_(403, r.status_code)", "def _deny_access(self, share, access, share_server=None):\r\n pool, share_name, proto = self._get_share_instance_pnp(share)\r\n share_path = self._generate_share_path(share_name)\r\n\r\n if proto == 'NFS':\r\n self._deny_nfs_access(share_path, share_name, access)\r\n else:\r\n self._deny_cifs_access(share_path, share_name, access)", "def test_kyc_post_legal_share_holder(self):\n pass", "def test_wrong_config_shares0(self):\n self.app.post_json(url=\"/config/shares\",\n params=dict(\n source='gsiftp://source',\n destination='gsiftp://nowhere',\n vo='dteam',\n share='dfdf'\n ),\n status=400\n )", "def test_dashboards_v2_share(self):\n pass", "def test_locked_asset_not_logged_in(self):\r\n self.client.logout()\r\n resp = self.client.get(self.url_locked)\r\n self.assertEqual(resp.status_code, 403) # pylint: disable=E1103\r", "def test_no_images_unauthorized(self):\n res = self.client.get(IMAGE_URL)\n self.assertEqual(res.status_code, status.HTTP_401_UNAUTHORIZED)" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
loads file FILTER, returns filter matrix
def load_filter(): if not os.path.isfile(FILTER): print('no filter found, creating square grid') return [] with open(FILTER, 'r') as ff: reader = csv.reader(ff) l = list(reader) ar = numpy.asarray(l) # ar = numpy.transpose(ar, (0, 1)) # ar = numpy.flip(ar, 1) # ar = numpy.rot90(ar, k=3, axes=(0, 1)) # ar = numpy.swapaxes(ar, 0, 1) f = list(map(list, ar)) return f
[ "def load_filter_file(filter_file):\n df = pd.DataFrame()\n # if\n if filter_file:\n with open(filter_file,'rb') as csvfile:\n df = pd.read_csv(csvfile)\n return df", "def load_filter(self, dataset, args):\n attitude_filter_path = osp.join(args.root_dir, dataset, \"attitude.txt\")\n attitudes = np.loadtxt(attitude_filter_path, delimiter=\",\", skiprows=3)\n self.filter_ts = attitudes[:, 0] * 1e-6\n filter_r = Rotation.from_quat(\n np.concatenate(\n [attitudes[:, 2:5], np.expand_dims(attitudes[:, 1], axis=1)], axis=1\n )\n )\n R_filter = filter_r.as_matrix()\n R_wf = np.array([[1, 0, 0], [0, 0, -1], [0, 1, 0]])\n R_filter = np.matmul(R_wf, R_filter)\n filter_r = Rotation.from_matrix(R_filter)\n self.filter_eul = filter_r.as_euler(\"xyz\", degrees=True)", "def _read_filter_data(filename):\n gains = []\n freqs = []\n freq_scale = 0\n with open(filename) as f:\n for line in f:\n words = line.split()\n if line.startswith('Freq'):\n _, scale = words[0].split(\"(\")\n scale = scale.rstrip(\")\")\n if scale==\"Hz\":\n freq_scale = 1\n elif scale==\"kHz\":\n freq_scale = 1e3\n elif scale==\"MHz\":\n freq_scale = 1e6\n elif scale==\"GHz\":\n freq_scale = 1e9\n else:\n raise ValueError(\"Cannot parse line: '\"+line+\"'\")\n elif len(words)==3 and words[0]!=\"Total\":\n f, g, p = line.split(\",\")\n freq = float(f) * freq_scale\n gain = float(g)\n phase = float(p)\n freqs.append(freq)\n gains.append(gain * np.exp(1j*phase))\n\n return np.array(gains), np.array(freqs)", "def read_filter(filter_file):\n\n fd = open(filter_file, \"r\")\n lines = fd.readlines()\n fd.close()\n\n wavelengths = []\n weights = []\n for line in lines:\n line = line.strip()\n words = line.split()\n wavelengths.append(float(words[0]))\n weights.append(float(words[1]))\n\n return (wavelengths, weights)", "def parseFilter(filterList):\n filter_mat = None\n for line in filterList:\n try:\n line = np.array([float(x) for x in line.split()])\n if line.shape[0] != len(filterList):\n raise Exception(\"Filter must be square, pad with zeroes if you need a non-square filter\")\n\n if filter_mat is None:\n filter_mat = line\n else:\n filter_mat = np.vstack((filter_mat,line))\n except ValueError:\n logging.fatal(\"Invalid configuration: filter must contain only numbers\"); exit()\n except Exception as e:\n logging.fatal(e); exit()\n return filter_mat", "def unpack(self, filter_file_type=\".dat\", verbose=False):\n\n if hasattr(self, \"phot\"):\n filter_names = np.unique(self.phot[\"filter\"])\n\n self.phot.add_index('filter', unique = True)\n\n\n for filter_name in filter_names:\n\n phot_table = self.phot.loc[\"filter\", filter_name]\n filter_filename = filter_name + filter_file_type\n if verbose: print(filter_filename)\n if verbose: print(phot_table)\n if verbose: print(type(filter_name), type(filter_file_type))\n\n # phot_table.meta = {\"filter_filename\": filter_filename}\n phot_table.meta[\"filter_filename\"] = filter_filename\n if not isinstance(phot_table, Row):\n # if len(np.unique(self.phot.loc[\"filter\", filter_name][\"MJD\"])) > 1:\n indices = phot_table.argsort(\"MJD\")\n # for column_name in phot_table.colnames:\n # phot_table[column_name] = phot_table[column_name][indices]\n sorted_phot_table = Table([phot_table[column_name][indices] for column_name in phot_table.colnames])\n else:\n sorted_phot_table = phot_table\n\n filter_key = np.unique(phot_table[\"filter\"])[0]\n\n if len(np.unique(phot_table[\"filter\"])) > 1 or filter_key != filter_name:\n raise errors.FilterMismatchError(\"There is a more than one filterdata in here! or there is a mismatch with filename\")\n path_to_filter = os.path.join(self.filter_directory, phot_table.meta['filter_filename'])\n\n # def load_filter(path, cmap = False, verbose = False):\n #\n if utils.check_file_path(os.path.abspath(path_to_filter)):\n filter_object = FilterClass()\n filter_object.read_filter_file(os.path.abspath(path_to_filter), verbose = verbose)\n filter_object.calculate_AB_zp()\n else:\n warnings.warn(\"Couldn't load the filter\")\n\n self.data_filters[filter_key] = filter_object\n\n self.data[filter_name] = sorted_phot_table\n\n self.filter_names = filter_names\n\n else:\n warnings.warn(\"Doesn't seem to be any data here (empty self.data)\")\n\n pass", "def read_filt(self, filt):\n d = np.genfromtxt(os.path.join(self.filterdir, filt+'.dat'))\n wave = d[:,0]\n flux = d[:,1]\n\n norm = integrate.simps(flux)\n flux = flux / norm\n\n # interpolate transmission curve to a finer wavelength resolution \n transmission = self.interp_spectrum(wave, flux) \n\n return transmission", "def load_filter(self):\n for f in parameters.FILTERS:\n if f['label'] == self.filter:\n parameters.LAMBDA_MIN = f['min']\n parameters.LAMBDA_MAX = f['max']\n self.my_logger.info('\\n\\tLoad filter %s: lambda between %.1f and %.1f' % (\n f['label'], parameters.LAMBDA_MIN, parameters.LAMBDA_MAX))\n break", "def load_filters(self):\n buffer_dict = dict(self.named_buffers())\n n = 0\n\n for k in self.phi_f.keys():\n if type(k) != str:\n self.phi_f[k] = buffer_dict['tensor' + str(n)]\n n += 1\n\n for psi_f in self.psi1_f:\n for sub_k in psi_f.keys():\n if type(sub_k) != str:\n psi_f[sub_k] = buffer_dict['tensor' + str(n)]\n n += 1\n\n for psi_f in self.psi2_f:\n for sub_k in psi_f.keys():\n if type(sub_k) != str:\n psi_f[sub_k] = buffer_dict['tensor' + str(n)]\n n += 1", "def load_data():\n raw = read_data(os.path.join(DATA_DIR, DATA_FILE))\n data = process_data(raw)\n \n #Returned filtered data\n return filter_data(data)", "def read_flt(input_file):\n\n if input_file.endswith('.flt') or input_file.endswith('.hdr'):\n input_file = input_file[:-4]\n else:\n print 'Incorrect filename'\n return 0,0 #exits module gracefully\n\n headers = read_headers(input_file)\n\n #read the data as a 1D array and reshape it to the dimensions in the header\n raster_array = read_bin(input_file).reshape(int(headers[1]), int(headers[0]))\n raster_array = raster_array.reshape(int(headers[1]), int(headers[0])) #rows, columns\n\n return raster_array, headers", "def _read_filters(self, path):\n blob = utils.read_blob_file_contents(path)\n try:\n rules = json.loads(blob)\n except ValueError as e:\n msg = _(\n \"An error occurred when reading filters from file \"\n \"%(path)s: %(error)s\"\n ) % {\"path\": path, \"error\": e}\n raise exceptions.CommandError(msg)\n else:\n return rules", "def read(self, filename):\n CPX_PROC.readcopysolnpoolfilters(self._env._e, self._cplex._lp,\n filename, enc=self._env._apienc)", "def load_BL62_text_files(file_name_filter):\n files = os.listdir('.')\n\n spectra = {}\n\n for spec_file in files:\n m = re.search('('+file_name_filter+'.*)\\.dat',spec_file)\n if (m):\n name = spec_file\n spectra[name] = Spectrum()\n spectra[name].name = name\n data = np.genfromtxt(name,skip_header=37)\n #print data\n spectra[name].EE = data[:,1]\n spectra[name].data = data[:,1:]\n\n return spectra", "def load_BL7_XES_files(file_name_filter):\n files = os.listdir('.')\n\n spectra = {}\n\n for spec_file in files:\n m = re.search('('+file_name_filter+'.*)\\_spec.txt',spec_file)\n if (m):\n filename = spec_file\n name = filename\n spectra[name] = Spectrum()\n spectra[name].name = name\n data = np.genfromtxt(filename,skip_header=0)\n #print data\n spectra[name].EE = data[:-1,0]\n spectra[name].data = data[:-1,1]\n\n return spectra", "def getFileAsFiltFloatMatrix(dirPath, filt, columns, delim=\",\"):\n\tmat = list()\n\tfor rec in fileFiltSelFieldsRecGen(dirPath, filt, columns, delim):\n\t\tmat.append(asFloatList(rec))\n\treturn mat", "def read_filters(self, filepath):\n\n try:\n with open(filepath, \"r\") as infile:\n filters = json.load(infile)\n return filters\n except IOError as e:\n print \"Error when reading filters from file: %s\", e.message\n return None", "def load_filter(filename):\n # parse config file\n if not os.path.isfile(filename):\n raise IOError('File \"%s\" does not exist' % filename)\n try:\n f = open(filename)\n except IOError:\n raise IOError('Could not open file \"%s\"' % filename)\n\n cfg_items = []\n for (i, line) in enumerate(f):\n try:\n # remove all comments and unnecessary whitespace\n normalizer = shlex.shlex(line)\n normalizer.wordchars += '.-'\n normal_line = ' '.join([t for t in normalizer])\n if normal_line:\n # split up normalized line and build dictionary\n cfg_item = {}\n for part in normal_line.split(','):\n cfg_split = shlex.split(part)\n key = cfg_split.pop(0)\n value = cfg_split\n cfg_item[key] = value\n cfg_items.append(cfg_item)\n except (IndexError, ValueError):\n raise RuntimeError( \\\n 'Could not parse line %i of file \"%s\"' % (i, filename))\n\n # look for global bit settings\n bits_global = None\n factor_bits_global = None\n norm_bits_global = None\n for cfg_item in cfg_items:\n if 'bits_global' in cfg_item:\n if bits_global is None:\n [bits_global] = cfg_item.pop('bits_global')\n bits_global = int(bits_global)\n else:\n raise RuntimeError( \\\n 'bits_global must not be specified more than once')\n if 'factor_bits_global' in cfg_item:\n if factor_bits_global is None:\n [factor_bits_global] = cfg_item.pop('factor_bits_global')\n factor_bits_global = int(factor_bits_global)\n else:\n raise RuntimeError( \\\n 'factor_bits_global must not be specified more than once')\n if 'norm_bits_global' in cfg_item:\n if norm_bits_global is None:\n [norm_bits_global] = cfg_item.pop('norm_bits_global')\n norm_bits_global = int(norm_bits_global)\n else:\n raise RuntimeError( \\\n 'norm_bits_global must not be specified more than once')\n\n # remove empty items from cfg_items, only node definitions should be left\n cfg_items = filter(None, cfg_items)\n\n # look for filter nodes\n filter_nodes = {}\n adjacency = {}\n input_node = None\n output_node = None\n for cfg_item in cfg_items:\n # mandatory settings\n try:\n [node] = cfg_item['node']\n except KeyError:\n raise RuntimeError('Node type not specified')\n try:\n [name] = cfg_item['name']\n except KeyError:\n raise RuntimeError('Name not specified')\n # optional settings\n if 'bits' in cfg_item:\n [bits] = map(int, cfg_item['bits'])\n else:\n bits = bits_global\n if 'connect' in cfg_item:\n connect = cfg_item['connect']\n else:\n connect = []\n if 'input' in cfg_item:\n if input_node is None:\n input_node = name\n else:\n raise RuntimeError('More than one input node specified')\n if 'output' in cfg_item:\n if output_node is None:\n output_node = name\n else:\n raise RuntimeError('More than one output node specified')\n\n # make filter node\n if name not in filter_nodes:\n if bits is not None:\n if node == 'Const':\n filter_nodes[name] = Const(bits)\n elif node == 'Add':\n filter_nodes[name] = Add(bits)\n elif node == 'Delay':\n filter_nodes[name] = Delay(bits)\n elif node == 'Multiply':\n if 'factor_bits' in cfg_item:\n [factor_bits] = cfg_item['factor_bits']\n factor_bits = int(factor_bits)\n else:\n factor_bits = factor_bits_global\n if 'norm_bits' in cfg_item:\n [norm_bits] = cfg_item['norm_bits']\n norm_bits = int(norm_bits)\n else:\n norm_bits = norm_bits_global\n if (factor_bits is not None and norm_bits is not None):\n filter_nodes[name] = Multiply(\n bits, factor_bits, norm_bits)\n if 'factor' in cfg_item:\n [factor] = cfg_item['factor']\n factor = float(factor)\n filter_nodes[name].set_factor(factor, norm=True)\n else:\n raise ValueError('Unknown node type: %s' % node)\n else:\n raise RuntimeError('Number of bits for node \"%s\" not specified' \\\n % name)\n adjacency[name] = connect\n else:\n raise RuntimeError('Node \"%s\" already present' % name)\n\n # make filter\n if input_node is None:\n raise RuntimeError('No input node specified')\n elif output_node is None:\n raise RuntimeError('No output node specified')\n else:\n return Filter(filter_nodes, adjacency, input_node, output_node)", "def loadFile(filterExt):\n basicFilter = \"*.\" + filterExt\n filePath = fileDialog2(fileFilter=basicFilter, dialogStyle=2, fm=1)\n if(filePath != None):\n #openfile = open('/Users/camtton/Desktop/drawing.svg', 'r')\n tokens = getSVGpath(filePath[0])\n return tokens\n else:\n print 'Please select a %s file'%(filterExt)" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
returns boolean, whether xy is occupied in filter matrix
def filtered(filter, xy): try: x, y = xy return bool(filter[x][y]) except IndexError: return False
[ "def xy_occupied(xy, board):\n return True if board[xy[0]][xy[1]] else False", "def in_map(self, x, y):\n return 0 <= x < self._width and 0 <= y < self._height", "def __collides_w_used(self, x, y, wi, he):\n for j in range(y, y + he):\n for i in range(x, x + wi):\n if not (i, j) in self.empty:\n return True\n return False", "def check_indices(f, x, y):\n h = f.shape[0]\n w = f.shape[1]\n return (0 <= y < h) and (0 <= x < w)", "def inworld(self, x, y):\n return 0 <= x < self.size[0] and 0 <= y < self.size[1]", "def contains(self, x):\n\n return np.all(point > 0.) and np.all(point < 1.)", "def __check_neighbour(self, mat, x, y, background):\r\n if x < mat.shape[0]-1 and mat[x+1][y] in background:\r\n return True\r\n elif x > 1 and mat[x-1][y] in background:\r\n return True\r\n elif y < mat.shape[1]-1 and mat[x][y+1] in background:\r\n return True\r\n elif y > 1 and mat[x][y-1] in background:\r\n return True\r\n elif x < mat.shape[0]-1 and y < mat.shape[1]-1 and mat[x+1][y+1] in background:\r\n return True\r\n elif x > 1 and y > 1 and mat[x-1][y-1] in background:\r\n return True\r\n elif x < mat.shape[0]-1 and y > 1 and mat[x+1][y-1] in background:\r\n return True\r\n elif y < mat.shape[1]-1 and x > 1 and mat[x-1][y+1] in background:\r\n return True\r\n else:\r\n return False", "def is_filled(self, point):\n point = np.asanyarray(point)\n indices = self.points_to_indices(point)\n in_range = np.logical_and(\n np.all(indices < np.array(self.shape), axis=-1),\n np.all(indices >= 0, axis=-1))\n\n is_filled = np.zeros_like(in_range)\n is_filled[in_range] = self.encoding.gather_nd(indices[in_range])\n return is_filled", "def is_at_intersection(self):\n directions = 0\n self.tile = (self.get_nearest_row(), self.get_nearest_col())\n if self.internal_map[self.tile[0] - 1][self.tile[1]] not in ('x', ):\n directions += 1\n if self.internal_map[self.tile[0] + 1][self.tile[1]] not in ('x', ):\n directions += 1\n if self.internal_map[self.tile[0]][self.tile[1] - 1] not in ('x', ):\n directions += 1\n if self.internal_map[self.tile[0]][self.tile[1] + 1] not in ('x', ):\n directions += 1\n return True if directions > 2 else False", "def contains(self, coord):\n # print(coord, self.position, self.size)\n return (0 <= coord[0] - self.position[0] < self.size[0] and\n 0 <= coord[1] - self.position[1] < self.size[1])", "def __check_col(self, x: int, y: int) -> bool:\n return not any([self.__maze[x + i, y] for i in (-1, 0, 1)])", "def __contains__(self, x_y_tuple):\n x, y = x_y_tuple\n return self.is_chip_at(x, y)", "def _is_occupied(\n grid: List[List[str]], row: int, col: int, dx: int, dy: int) -> bool:\n while 0 <= (row + dy) < len(grid) and 0 <= (col + dx) < len(grid[0]):\n row += dy\n col += dx\n if grid[row][col] == 'L':\n return False\n if grid[row][col] == '#':\n return True\n return False", "def __cell_is_occupied(self, x, y) -> bool:\n return self.occupancy_map.data[self.__get_cell_index(x, y)] != 0", "def insideMatrix(x, y, m, n):\n if x < 0 or x >= m or y < 0 or y >= n:\n return False\n return True", "def __isolated_cell(self, x, y):\n if ((x+1 >= self.n_col) or not (x+1, y) in self.empty) and \\\n ((y+1 >= self.n_row) or not (x, y+1) in self.empty):\n return True\n return False", "def is_occupied(self, p):\r\n return 0 <= p[0] < self.width and 0 <= p[1] < self.height and self.grid[p[1]][p[0]] == '#'", "def __cell_is_in_map(self, x, y) -> bool:\n return x >= 0 and y >= 0 and x < self.occupancy_map.info.width and y < self.occupancy_map.info.height", "def coordinateExist(coordX, coordY):\n\n return (\n coordX >= 0 and coordX < world_size and \n coordY >= 0 and coordY < world_size and\n visited[coordX][coordY] == False and\n world[coordX][coordY] \n )" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Write the matrix to a csv table
def write_out(matrix, filename): with open(filename, 'w') as csvfile: writer = csv.writer(csvfile) for r in matrix: writer.writerow(r) print(filename + ' writen!')
[ "def write_matrix_into_csv(file_address, matrix):\n num_of_elems = len(matrix)\n with open(file_address, \"w\") as file:\n for i in range(num_of_elems):\n for j in range(num_of_elems):\n if j == num_of_elems - 1:\n file.write(str(round(matrix[i][j], 6)))\n else:\n file.write(str(round(matrix[i][j], 6)) + \",\")\n file.write(\"\\n\")", "def get_matrix_csv( self ):\n nb_trees = self.trees.count()\n nb_taxa = self.taxa.count()\n stat = self.get_statistics()\n matrix = self.get_matrix()\n # Sorting matrix\n sort_info = {'trees':{}, 'taxa':{}}\n for taxa, trees in matrix.iteritems():\n sort_info[\"taxa\"][taxa] = len( [i for i in trees.values() if i] )\n for tree, presence in trees.iteritems():\n if presence:\n if not tree in sort_info[\"trees\"]:\n sort_info[\"trees\"][tree] = 0 \n sort_info[\"trees\"][tree] += 1 \n #\n taxa_list = [i[1] for i in sorted([(i,v) for v,i in sort_info['taxa'].items()])]\n tree_list = [i[1] for i in sorted([(i,v) for v,i in sort_info['trees'].items()])]\n d_tree_id_name = dict( Tree.objects.filter( id__in = tree_list).values_list( 'id','name' ) )\n csv = [\"taxa,\"+\",\".join([d_tree_id_name[tree] for tree in tree_list])]\n for taxa in taxa_list:\n tmp = matrix[taxa]\n line = [list(stat[taxa]['scientific_taxon_list'])[0]]\n for tree in tree_list:\n line.append( str(tmp[tree]) )\n csv.append( \",\".join( line ) )\n return \"\\n\".join( csv )", "def write_csv(self, outfile=sys.stdout):\n csv_out =csv.writer(outfile)\n csv_out.writerow(self.column_names)\n for d in self.data:\n csv_out.writerow(d)", "def save_matrix_tab(input_matrix, output_filename):\n with open (output_filename, 'w') as f:\n for i in xrange(len(input_matrix)):\n row = [str(j) for j in input_matrix[i]]\n if i != len(input_matrix) - 1:\n f.write('\\t'.join(row) + '\\n')\n else:\n f.write('\\t'.join(row))", "def similarity_to_csv(name, matrix, labels):\n\tif not os.path.exists(os.path.join(os.getcwd(),DATA_DIR)):\n\t\tos.makedirs(os.path.join(os.getcwd(),DATA_DIR))\n\n\tfile_dir = os.path.join(os.getcwd(), DATA_DIR, name + SIMILARITY + '.csv')\n\n\t# write csv -- \n\n\twith open(file_dir, 'w') as f:\n\t\twriter = csv.writer(f)\n\t\twriter.writerow(labels)\n\t\tfor i in range(0,len(labels)):\n\t\t\twriter.writerow([matrix[i,j] for j in range(len(labels))])", "def write_csv(header, table_data, output_file):\r\n with open(output_file, \"a+\", newline=\"\") as file:\r\n writer = csv.writer(file)\r\n if not os.path.exists(output_file) or os.path.getsize(output_file) == 0:\r\n writer.writerow(header)\r\n writer.writerows(table_data)\r\n print()\r\n print(f\"File {output_file} was created.\")", "def export_csv(self, path):\r\n\r\n with open(path, 'w') as f:\r\n f.write('# h,hr,m')\r\n\r\n if self.rho is not None:\r\n f.write(',rho')\r\n if self.temperature is not None:\r\n f.write(',temperature')\r\n\r\n f.write('\\n')\r\n for i in range(self.shape[0]):\r\n for j in range(self.shape[1]):\r\n f.write(f'{self.h[i, j]},{self.hr[i, j]},{self.m[i, j]}')\r\n if self.rho is not None:\r\n f.write(f',{self.rho[i, j]}')\r\n if self.temperature is not None:\r\n f.write(f',{self.temperature[i, j]}')\r\n f.write('\\n')\r\n return", "def write_file(matrix: list, path='matrix.csv'):\n matrix = [list(map(str, i)) for i in matrix]\n matrix = [','.join(i) + '\\n' for i in matrix]\n with open(path, 'w') as matrix_file:\n matrix_file.writelines(matrix)", "def to_csv(header, rows):\r\n with open('result.csv', 'w') as result:\r\n result_writer = csv.writer(result, delimiter=';')\r\n result_writer.writerow(header)\r\n result_writer.writerows(rows)", "def write_matrix(df, output_path):\n # 2do: Handle possible trailing \"/\" in output_path.\n file_path = output_path + \"/CodePresenceMatrix.csv\"\n with open(file_path, 'w') as file_obj:\n df.to_csv(path_or_buf=file_obj, sep='\\t')", "def write(self): \n # Open csv file\n with open(self.file_name, 'w', newline='') as file:\n self._writer = csv.writer(file)\n \n # Write header rows\n# self.write_sim_header_data(self.trace.sim.get_data())\n \n # Write trace table\n self._writer.writerow(['Record #', 'Rep', 'Time',\n 'Priority', 'Record Type', 'Name'])\n for trace_record in self.trace._record_list:\n self._writer.writerow(trace_record.get_row())\n file.close()", "def generate_csv_table(table_values):\n\n with open('ayasdi_assignment.csv', 'wb') as csvfile:\n filewriter = csv.writer(csvfile, delimiter=',')\n filewriter.writerows(table_values)", "def mat_to_csv(\n self,\n input_matrix,\n output_csv,\n fields=None,\n n_tab=1,\n debug=False,\n i='origin',\n j='destination'\n ):\n script_text = r\"\"\"\n RUN PGM=MATRIX PRNFILE=\"format_env\\mat_to_csv.prn\" MSG='mat_to_csv'\n\n FILEI MATI[1] = filei_mati\n FILEO PRINTO[1] = fileo_printo\n\n print_headers\n JLOOP\n print_in_jloop\n ENDJLOOP\n\n ENDRUN\n \"\"\"\n if fields is None:\n tabs = ['tab_%i' % (i + 1) for i in range(n_tab)]\n fields = tabs\n else:\n n_tab = len(fields)\n field_names = ', '.join(fields)\n\n filei_mati = '\"%s\"' % input_matrix\n fileo_printo = '\"%s\"' % output_csv\n\n print_headers = 'IF (I = 1) \\n PRINT LIST =\"' + '\" ,\";\" ,\"'.join([i, j] + fields) + '\" PRINTO = 1 \\n ENDIF'\n print_assignation = ' '.join(['%s = MI.1.%s \\n' % (fields[i].replace(' ', '_'), i + 1) for i in range(n_tab)])\n print_statement = 'PRINT LIST = I, \";\", J, \";\", ' + ',\";\",'.join([f.replace(' ', '_') for f in fields]) + ' PRINTO = 1'\n print_in_jloop = print_assignation + ' \\n' + print_statement\n\n # creating a cube script\n script = open(self.environment + r'\\mat_to_csv.s', 'w', encoding='latin')\n script.write(script_text.replace(\n 'format_env', self.environment).replace(\n 'filei_mati', filei_mati).replace(\n 'fileo_printo', fileo_printo).replace(\n 'field_names', field_names).replace(\n 'print_in_jloop', print_in_jloop).replace('print_headers', print_headers))\n script.close()\n\n # runs the script with voyager.exe\n options = \"\"\"/Start /CloseWhenDone /Minimize /NoSplash\"\"\" if not debug else \"\"\n os.system('voyager.exe \"' + self.environment + r'\\mat_to_csv.s\" ' + options)", "def write_matrix(matrix, filename, gzip):\n\n matrix.rename(columns={'gene' : '#gene'}, inplace=True)\n \n matrix.to_csv(filename, sep='\\t', index=False, na_rep='NA')\n\n if gzip:\n subprocess.run(['gzip', '-f', filename])", "def write_tabular(self, filename, metric='auroc'):\n header = '\\n\\nEvaluation results ({}):\\n-----------------------\\n'.format(metric)\n f = open(filename, 'a')\n f.write(header)\n df = self.get_pandas_df(metric)\n df.to_csv(f, sep='\\t', na_rep='NA')\n f.close()", "def writeMatrix(self):\n\t\tpass", "def write_csv(self):\n self.query_dataframe.to_csv(self.csv_filename, index=False)", "def saveCSV(self):\n filename=tkFileDialog.asksaveasfilename(defaultextension='.csv',\n initialdir=os.getcwd(),\n filetypes=[(\"csv\",\"*.csv\"),(\"All files\",\"*.*\")])\n if not filename:\n return\n for m in self.matrices:\n matrix = self.matrices[m] \n if matrix != None: \n c=matrix.csvRepresentation()\n f=open(filename,'w')\n f.write(c)\n f.close()\n return", "def save_as_csv(self, path):\r\n print(\"Saving the performance table as a csv file at {}\".format(path))\r\n fp = open(path, 'w')\r\n fp.write(\"FOLD,EPOCH,{}\".format(PerformanceTableEntry.get_csv_columns()))\r\n\r\n for k in range(self.k):\r\n for epoch in range(self.epochs):\r\n entry = self.table[k][epoch]\r\n if entry:\r\n fold_epoch = \"{},{},\".format(k, epoch)\r\n fp.write(fold_epoch + entry.to_string())\r\n fp.close()" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Construct a DCEL from the output of matplotlib.delaunay.delaunay.
def from_delaunay_triangulation(cls, xl, yl, triangles, circumcentres): def add_containing_face_to_dcel(): containing_face_edges = [edge for edge in dcel.edges if not edge.nxt] edge = containing_face_edges.pop() face = Face(outer_component=None, inner_components=[edge]) dcel.faces.append(face) first_edge = edge previous_edge = [ e for e in containing_face_edges if e.get_destination() == edge.origin ] edge.prev = previous_edge[0] while len(containing_face_edges) > 1: edge.incident_face = face next_edge = [ e for e in containing_face_edges if e.origin == edge.get_destination() ] edge.nxt = next_edge[0] next_edge[0].prev = edge edge = next_edge[0] containing_face_edges.remove(next_edge[0]) edge_2 = containing_face_edges.pop() edge.incident_face = face edge_2.incident_face = face edge_2.prev = edge edge_2.nxt = first_edge edge.nxt = edge_2 def add_triangle_edges(circumcentre): triangles_edges = [] for vertex_idx, origin in enumerate(triangle_vertices): # Destination of the edge in this triangle that has vertex as origin destination = triangle_vertices[(vertex_idx + 1) % 3] edge_1 = HalfEdge(origin) edge_2 = HalfEdge(destination, twin=edge_1) edge_1.twin = edge_2 edge_1 = dcel.add_edge(edge_1) edge_2.twin = edge_1 edge_2 = dcel.add_edge(edge_2) edge_1.twin = edge_2 triangles_edges.append(edge_1) triangle_face = Face(triangles_edges[0], circumcentre=list(circumcentre)) dcel.faces.append(triangle_face) # Set previous and next of the edges for edge_idx, edge in enumerate(triangles_edges): edge.nxt = triangles_edges[(edge_idx + 1) % 3] edge.prev = triangles_edges[(edge_idx + 3 - 1) % 3] edge.incident_face = triangle_face triangle_vertices[edge_idx].incident_edge = edge dcel = cls() for t_idx, t in enumerate(triangles): triangle_vertices = [ dcel.add_vertex(Vertex(x)) for x in du.get_triangle_vertices(xl, yl, t) ] add_triangle_edges(circumcentres[t_idx]) add_containing_face_to_dcel() return dcel
[ "def delaunay_triples( # pylint: disable=too-many-arguments,too-many-locals\n data=None,\n x=None,\n y=None,\n z=None,\n output_type=\"pandas\",\n outfile=None,\n projection=None,\n verbose=None,\n binary=None,\n nodata=None,\n find=None,\n coltypes=None,\n header=None,\n incols=None,\n skiprows=None,\n wrap=None,\n **kwargs,\n ):\n # Return a pandas.DataFrame if ``outfile`` is not set\n if output_type not in [\"numpy\", \"pandas\", \"file\"]:\n raise GMTInvalidInput(\n \"Must specify 'output_type' either as 'numpy', 'pandas' or 'file'.\"\n )\n\n if isinstance(outfile, str) and output_type != \"file\":\n msg = (\n f\"Changing 'output_type' from '{output_type}' to 'file' \"\n \"since 'outfile' parameter is set. Please use output_type='file' \"\n \"to silence this warning.\"\n )\n warnings.warn(message=msg, category=RuntimeWarning, stacklevel=2)\n output_type = \"file\"\n\n # Return a pandas.DataFrame if ``outfile`` is not set\n with GMTTempFile(suffix=\".txt\") as tmpfile:\n if output_type != \"file\":\n outfile = tmpfile.name\n return triangulate._triangulate(\n data=data,\n x=x,\n y=y,\n z=z,\n output_type=output_type,\n outfile=outfile,\n projection=projection,\n verbose=verbose,\n binary=binary,\n nodata=nodata,\n find=find,\n coltypes=coltypes,\n header=header,\n incols=incols,\n skiprows=skiprows,\n wrap=wrap,\n **kwargs,\n )", "def __plot_delaunay(self, ax=None) -> None:\n for simplex in self.hull.simplices:\n ax.plot(self.points[simplex, 0], self.points[simplex, 1], \"r-\")\n\n tri = Delaunay(self.points)\n ax.triplot(self.points[:, 0], self.points[:, 1], tri.simplices.copy(), lw=1)", "def _get_diff_dc(self):\n self._diff_dc = tuple(encode_differential(self.data[:, 0, 0]))", "def __init__(self, points_x=None, points_y=None, boundary_mask=None, verbose=None, filename=None):\n\n from scipy.spatial import Delaunay as __Delaunay\n import time\n\n if filename:\n try:\n meshdata = np.load(filename)\n self.x = meshdata['x']\n self.y = meshdata['y']\n self.bmask = meshdata['bmask']\n\n except:\n print \"Invalid mesh file - \", filename\n\n else:\n self.x = np.array(points_x)\n self.y = np.array(points_y)\n self.bmask = np.array(boundary_mask)\n\n self.verbose = verbose\n\n # multiple possible implementations of the vector operators\n # Note the need to store the correct matrices / arrays\n\n self.delaunay_dx = self._matrix_delaunay_dx\n self.delaunay_dy = self._matrix_delaunay_dy\n self.delaunay_grad = self._matrix_delaunay_grad\n self.delaunay_div = self._matrix_delaunay_div\n self.delaunay_del2 = self._matrix_delaunay_del2\n self._store_delaunay_grad_matrix = self._matrix_store_delaunay_grad_matrix\n\n\n walltime = time.clock()\n points = np.column_stack((self.x, self.y))\n self.tri = __Delaunay(points)\n if self.verbose:\n print \" - Calculating Delaunay Triangulation \", time.clock() - walltime,\"s\"\n\n\n ## Construct the neighbour list which is absent from the Voronoi data structure\n\n walltime = time.clock()\n self._store_neighbour_information()\n if self.verbose:\n print \" - Triangulation Neighbour Lists \", time.clock() - walltime,\"s\"\n\n ## Summation weights and local areas\n\n walltime = time.clock()\n self._store_weights_and_measures()\n if self.verbose:\n print \" - Triangulation Local Areas and Weights \", time.clock() - walltime,\"s\"\n\n ## Matrix of gradient coefficients\n\n walltime = time.clock()\n self._store_delaunay_grad_matrix()\n if self.verbose:\n print \" - Triangulation Vector Operators \", time.clock() - walltime,\"s\"\n\n walltime = time.clock()\n self._matrix_build_local_area_smoothing_matrix()\n if self.verbose:\n print \" - Local Smoothing Operator \", time.clock() - walltime,\"s\"\n\n return", "def extract_CDL(self, \n plots = True,\n ):\n\n if all([os.path.isfile('{}/{}landuse.csv'.format(self.landusedata, y))\n for y in self.years]):\n print('CDL data for {} exist\\n'.format(self.HUC8))\n return\n\n # make an instance of the CDLExtractor to use to get the data\n\n cdlextractor = CDLExtractor(self.landusedata)\n\n # download the CDL data for the watershed for each year\n\n for year in self.years:\n \n p = '{}/{}landuse.tif'.format(self.landusedata, year)\n e = '{}/NASSerror{}.html'.format(self.landusedata, year)\n\n # if the file has not been downloaded (or attempted), try to get it\n\n if os.path.isfile(e):\n\n print('land use data for {} are unavailable'.format(year))\n\n elif not os.path.isfile(p):\n\n try:\n\n cdlextractor.download_shapefile(self.subbasinfile, year)\n\n except:\n\n print('warning: data for {} are not available'.format(year))\n\n else:\n\n print('land use data for {} exist'.format(year))\n \n print('')\n\n # check to see if CDL data are available for each year\n\n l = self.landusedata\n self.years = [year\n for year in self.years\n if os.path.isfile('{}/{}landuse.tif'.format(l, year))]\n\n for year in self.years:\n\n # landuse in each subbasin for each year\n\n extracted = '{}/{}landuse.tif'.format(self.landusedata, year)\n\n # field code for the subbasin shapefile to match CDL data\n\n attribute = 'ComID'\n\n # csv file of the output\n\n csvfile = '{}/{}landuse.csv'.format(self.landusedata, year)\n\n if not os.path.isfile(csvfile):\n\n try:\n\n cdlextractor.calculate_landuse(extracted, \n self.subbasinfile,\n self.cdlaggregate, \n attribute,\n csvfile = csvfile)\n\n if plots:\n\n # raw landuse plot\n\n raw = '{}/{}raw_landuse'.format(self.landusedata, year)\n if not os.path.isfile(raw + '.png'):\n cdlextractor.plot_landuse(extracted, \n self.subbasinfile, \n attribute, \n self.landuse, \n output = raw, \n lw = 2.,\n datatype = 'raw')\n\n # aggregated land use plot\n\n its = self.landusedata, year\n results = '{}/{}aggregated_landuse'.format(*its)\n if not os.path.isfile(results + '.png'):\n cdlextractor.plot_landuse(extracted, \n self.subbasinfile, \n attribute, \n self.landuse,\n output = results, \n datatype = 'results')\n\n except Exception as error:\n print('warning: unable to calculate land use for year ' +\n '{} due to the follow error:'.format(year))\n print(error)", "def _compute_gt_volume_delaunay(self):\n gt_volume = self.get_gt_volume(labels=[l.CONTOUR, l.INSIDE])\n # gt_volume = get_mask_by_label(self.gt_volume, l.CONTOUR)\n if gt_volume is None or gt_volume.any() == False:\n return\n gt_volume = viewer.delaunay(gt_volume)\n self.gt_delaunay = gt_volume", "def get_dem(myhuc, sources):\n logging.info(\"\")\n logging.info(\"Preprocessing DEM\")\n logging.info(\"==========================\")\n logging.info(\"downloading DEM\")\n\n # load shapefiles for the HUC of interest\n logging.info(\"loading HUC %s\"%myhuc)\n profile, huc = sources['HUC'].load_huc(myhuc)\n assert(profile['crs']['init'] == 'epsg:4269') # latlong\n\n dem_profile, dem = workflow.clip.clip_dem(huc, sources['DEM'])\n dem = dem[0,:,:] # only the first band\n return dem_profile, dem", "def make_dtde(self, dtde_file):\n if not os.path.isfile(dtde_file):\n raise IOError(f'Invalid dtde file: {dtde_file}')\n\n colName, colData, colForm, colUnit = {}, {}, {}, {}\n\n # read in dtde data from text file\n with open(dtde_file) as fin:\n for line in fin:\n # skip comments\n if line.startswith('#'):\n continue\n\n row = line.split()\n\n # column names\n if row[0] == 'DTDE':\n colRange = range(len(row))\n for i in colRange:\n colName[i] = row[i]\n colData[i] = []\n # data\n else:\n for i in colRange:\n colData[i].append(row[i])\n\n # convert data to numpy arrays\n colData[0] = np.array(colData[0], dtype=np.float32)\n colForm[0] = 'E'\n colUnit[0] = ''\n\n colData[1] = np.array(colData[1], dtype=np.int32)\n colForm[1] = 'J'\n colUnit[1] = 'DN/S'\n\n c0 = fits.Column(name=colName[0], format=colForm[0], array=colData[0])\n c1 = fits.Column(name=colName[1], format=colForm[1], unit=colUnit[1],\n array=colData[1])\n\n self.dtde = fits.BinTableHDU.from_columns(fits.ColDefs([c0, c1]))\n self.dtde.header['EXTNAME'] = 'DTDE'\n self.dtde.header['DATAFILE'] = (os.path.basename(dtde_file),\n 'data source file')", "def DX(df, time_period=14):\n high = df['high']\n low = df['low']\n close = df['close']\n return talib.DX(high, low, close, timeperiod=time_period)", "def detrend(xyz_csv, in_dem, aoi_shp):\n\n print('Detrending DEM...')\n detrended_dem = detrend_that_raster(xyz_csv=xyz_csv, in_dem=in_dem, aoi_shp=aoi_shp)\n print('Done')\n print('Detrended DEM @ %s' % detrended_dem)", "def plotb0( self, ir=45, db=0,eps=0 ):\n if (self.qlde_hdl == -1):\n print (\"qlde File not found\")\n return\n if (eps>0):\n db=2*eps/(1.-eps)\n\n dqlpsi=self.qlde_hdl.variables['Psi'].data\n dqltemp=self.qlde_hdl.variables['Tem'].data\n dql_LD=self.qlde_hdl.variables['Qldce_LD'].data\n nuperp=self.qlde_hdl.dimensions['VelPrpDim']\n nupar=self.qlde_hdl.dimensions['VelDim']\n\n umax=(self.qlde_hdl.variables['Umax'].data)[0]\n umin=(self.qlde_hdl.variables['Umin'].data)[0]\n upar=np.arange(nupar)/float(nupar-1)*(umax-umin)+umin\n uperp=np.arange(nuperp)/float(nuperp-1)*umax\n vx,vz=np.meshgrid(uperp,upar)\n\n fig=plt.figure(figsize=(2.*8.3,2.*3.7))\n plt.axes().set_aspect(1, 'box')\n #plot passing trapped boundary\n roa = dqlpsi[ir]\n if (eps<0):\n db=2.*np.abs(eps)*roa/(1.-np.abs(eps)*roa)\n\n if (db>0):\n vpar=np.sqrt(db)*umax\n plt.plot([0,vpar],[0,umax],'k',[0,-vpar],[0,umax],'k',linewidth=2)\n\n dq=np.transpose(np.log((dql_LD[ir,:,:])+1.)/np.log(10)) #np.abs\n mxdq=int(dq.max())\n ll=range(mxdq-10,mxdq)\n cd=plt.contourf(vz,vx,dq,levels=ll)\n#,10)\n plt.gca().set_ylim(0,umax)\n cbar=plt.colorbar(cd)\n\n plt.title(r'log10 $\\lambda$<B> at r/a='+str(roa)[0:4],size=30)\n plt.ylabel(r'$u_{\\bot0}/u_{n}$',size=20)\n plt.xlabel(r'$u_{||0}/u_{n}$',size=20)\n plt.draw() #make sure ylimits are updated in displayed plot\n return", "def build_xy_plot_python():\n\n layout = XYPlotPythonLayout(table_select=None,\n x_axis=None,\n y_axis=None,\n plot=None,\n data=None)\n\n return layout.render_components()", "def create_dull_pd_matrix(dullvalue=0.0, dullname=\"A\", startdate=pd.datetime(1970,1,1).date(), enddate=datetime.datetime.now().date(), index=None):\n if index is None:\n index=pd.date_range(startdate, enddate)\n \n dullvalue=np.array([dullvalue]*len(index))\n \n ans=pd.DataFrame(dullvalue, index, columns=[dullname])\n \n return ans", "def BundledDEMWrapper(hed_unet):\n input_bundled = Input([None, None, 4])\n inputs = [input_bundled]\n x, dem = ExtractBundledDEM()(input_bundled)\n out = hed_unet([x, dem])\n return Model(inputs=[input_bundled], outputs=[out])", "def make_xy(self, **kwarg):\n\n # 从沙盒中读取测试数据\n ABuEnv.enable_example_env_ipython()\n tsla = ABuSymbolPd.make_kl_df('usTSLA')\n ABuEnv.disable_example_env_ipython()\n\n # 留五个做为测试,其它的都做训练\n train_df = tsla[:-5]\n # make_xy中需要确定self.df\n self.df = train_df.filter(['close', 'open', 'high', 'low', 'pre_close', 'date_week'])\n tsla_matrix = self.df.as_matrix()\n # close列做为y,make_xy中需要确定self.y\n self.y = tsla_matrix[:, 0]\n # 'open', 'high', 'low', 'pre_close', 'date_week'做为x\n self.x = tsla_matrix[:, 1:]\n\n # 最后5个交易日做为测试数据, 只做为AbuMLPd使用示例\n test_df = tsla[-5:]\n tsla_matrix = test_df.filter(['close', 'open', 'high', 'low', 'pre_close', 'date_week']).as_matrix()\n self.y_test = tsla_matrix[:, 0]\n self.x_test = tsla_matrix[:, 1:]", "def delaunay_3d(self, alpha=0.0, tol=0.001, offset=2.5, progress_bar=False):\n alg = _vtk.vtkDelaunay3D()\n alg.SetInputData(self)\n alg.SetAlpha(alpha)\n alg.SetTolerance(tol)\n alg.SetOffset(offset)\n _update_alg(alg, progress_bar, 'Computing 3D Triangulation')\n return _get_output(alg)", "def __repr__(self):\n return (\n '<DCEL ('\n 'vertices:\\n {obj.vertices},\\n'\n 'edges:\\n {obj.edges},\\n'\n 'faces:\\n {obj.faces}>'.format(obj=self)\n )", "def dem_generation(lastoolsdir, lidardir, ground_poly, cores, units_code, keep_orig_pts, coarse_step,\n coarse_bulge, coarse_spike, coarse_down_spike,\n coarse_offset, fine_step, fine_bulge, fine_spike,\n fine_down_spike, fine_offset, aoi_shp,\n dem_resolution, dem_method, tri_meth, void_meth):\n\n # We carry input spatial ref over from the above process, but we should still convert from shp to ref object\n print('Processing LiDAR to remove vegetation points...')\n las_folder = lidardir + '\\\\las_files\\\\'\n process_lidar(lastoolsdir + '\\\\', las_folder, ground_poly, cores, units_code, keep_orig_pts,\n coarse_step,\n coarse_bulge, coarse_spike, coarse_down_spike,\n coarse_offset, fine_step, fine_bulge, fine_spike,\n fine_down_spike, fine_offset)\n print('Done')\n\n print('Generating a %sm resolution DEM...' % dem_resolution)\n dem = lidar_to_raster(lidardir, ground_poly, aoi_shp, dem_method, tri_meth, void_meth,\n m_cell_size=float(dem_resolution))\n print('Done')\n\n print('Generating hillshade raster for the DEM...')\n hill_out = lidardir + '\\\\hillshade.tif'\n arcpy.HillShade_3d(dem, hill_out)\n print('Done')", "def myderiv(d):\n s = arbplf_deriv(json.dumps(d))\n df = pd.read_json(StringIO(s), orient='split', precise_float=True)\n return df" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Add an edge to DCEL if it doesn't already exists, otherwise return the existing edge.
def add_edge(self, edge): try: edge_idx = self.edges.index(edge) return self.edges[edge_idx] except Exception: self.edges.append(edge) return edge
[ "def add_edge(self, e):\n\n # Just add egde.\n self.Edges.append(e)\n\n return e", "def add_edge(self, edge: e.Edge) -> None:\n if edge not in self.edges:\n self.edges.append(edge)\n self.num_edges = self.num_edges + 1", "def add_edge(self, edge):\n chromosome = edge.chromosome\n start = edge.start\n end = edge.end\n edge_id = str(edge.identifier)\n\n if chromosome not in self.chromosomes:\n self.add_chromosome(chromosome)\n if edge.start == edge.end:\n print(\"Ignoring edge with ID \" + edge_id + \" because its length is zero.\")\n return\n\n # Check for collisions. There are two ways a collision can happen.\n # By far the most common case is an edge that is in multiple \n # transcripts. For this case, merge the edge transcript sets. \n # IntervalTrees allow redundancy for intervals with the exact same\n # positions, so it is OK if two exons from different genes occupy \n # identical intervals\n if edge_id in self.edges:\n oldEdge = self.edges[edge_id]\n edge.transcript_ids = oldEdge.transcript_ids | edge.transcript_ids\n else: \n self.chromosomes[chromosome][start:end] = edge_id\n \n self.edges[edge_id] = edge\n return", "def add_edge(self, edge):\r\n edge = set(edge)\r\n (vertex1, vertex2) = tuple(edge)\r\n \r\n if vertex1 in self.__graph_dict.keys() and vertex2 in self.__graph_dict.keys():\r\n if vertex2 in self.__graph_dict[vertex1] and vertex1 in self.__graph_dict[vertex2]:\r\n return\r\n self.__graph_dict[vertex1].add(vertex2)\r\n self.__graph_dict[vertex2].add(vertex1)\r\n elif vertex1 not in self.__graph_dict.keys() and vertex2 in self.__graph_dict.keys():\r\n self.__graph_dict[vertex1] = {vertex2}\r\n self.__graph_dict[vertex2].add(vertex1)\r\n elif vertex1 in self.__graph_dict.keys() and vertex2 not in self.__graph_dict.keys():\r\n self.__graph_dict[vertex2] = {vertex1}\r\n self.__graph_dict[vertex1].add(vertex2)\r\n else:\r\n self.__graph_dict[vertex1] = {vertex2}\r\n self.__graph_dict[vertex2] = {vertex1}", "def add_edge(self,e):\r\n\r\n self.edge_set.add(e)\r\n\r\n self.update_vertices()", "def add_edge(self, edge):\n src = edge.get_source()\n dest = edge.get_destination()\n #weightEdge = WeightedEdge(src, dest, edge.get_total_distance(), edge.get_outdoor_distance())\n if not (src in self.edges and dest in self.edges):\n raise ValueError('Node not in graph')\n self.edges[src].append(dest)\n #self.edges[src].append(weightEdge)", "def add_edge(self, source, target):\n source, target = self.__resolve_node_tool(source, target)\n source_node = None\n try:\n source_node = self._nodes[source]\n except LookupError:\n return None\n target_node = self._nodes[target]\n edge = Edge(source_node, target_node)\n if edge in self._edges:\n for known in self._edges:\n if edge == known:\n return known\n\n log.debug(\"Add edge: %s->%s\", source_node, target_node)\n self._edges.add(edge)\n if not edge in source_node._edges:\n source_node._edges.append(edge)\n if not edge in target_node._edges:\n target_node._edges.append(edge)\n return edge", "def add_edge(self, edge):\n if self.graph.has_edge(edge):\n self.edges.insert(0, edge)\n self.path_length += edge.get_weight()", "def addEdge(self, e):\n v = e.either()\n w = e.other(v)\n self._validateVertex(v)\n self._validateVertex(w)\n self._adj[v].add(e)\n self._adj[w].add(e)\n self._E += 1", "def _add_edge(edges, src, dest):\n edge = (src, dest)\n if edge not in edges:\n edges.add(edge)", "def add_edge(self, ed):\n self.edge.append(ed)\n self.update_node2edge()", "def addEdge(self, edge):\r\n src = edge.getSource()\r\n dest = edge.getDestination()\r\n if not(src in self.nodes and dest in self.nodes):\r\n raise ValueError('Node not in graph')\r\n\r\n if src in self.edges.keys():\r\n if dest not in self.edges[src].keys():\r\n destdict = self.edges[src]\r\n destdict[dest] = edge.gettime()", "def add_edge(self, e):\n\n if not self.is_vertex(e[0]) or not self.is_vertex(e[1]):\n raise ValueError(\"An endpoint is not in graph\")\n self.alist[e[0]].append(e[1])", "def add_edge(self, e):\n a, b = e\n self[a][b] = e\n self[b][a] = e", "def add_edge(graph, edge):\n source_node_id, destiny_node_id, graph_edge_attr = edge\n graph.edge(source_node_id, destiny_node_id, **graph_edge_attr)\n return graph", "def appendEdge(self,edge):\n\t\tself.way.append(edge)\n\t\treturn True", "def add_edge(e, R):\n\tR.add_edges_from([tuple(e)])\n\ta=R.get_edge_data(e[0], e[1])\n\ta['c']=cf.routingCost(R,e,storeData=True)\n\t#update_after_mod(e,R)\n\tassert a['c']>=0 #who knows..", "def attach(self, edge):\n raise NotImplementedError", "def test_add_edge_on_existing_verts_with_edges():\n g = graph_filled()\n assert g.graph['A'] == {'B': 10}\n g.add_edge('A', 'E', 7)\n assert g.graph['A'] == {'B': 10, 'E': 7}" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Add vertex to DCEL if it doesn't already exists, otherwise return the existing vertex.
def add_vertex(self, vertex): try: vertex_idx = self.vertices.index(vertex) # print "{} already in {}".format(vertex, self.vertices) return self.vertices[vertex_idx] except Exception: self.vertices.append(vertex) # print "adding {} to {}".format(vertex, self.vertices) return vertex
[ "def add_vertex(self, key):\n\n if key in self.vert_dict:\n print(f'Vertex {key} already exists')\n return\n\n # create a new vertex\n new_vertex = Vertex(key)\n self.vert_dict[key] = new_vertex\n self.num_vertices += 1\n\n return new_vertex", "def _add_vertex(self, x, y):\n v = Vertex2(x, y)\n i = bisect(self.vertices, v)\n \n # if vertex at these coordinates exists just return it\n if len(self.vertices) > i and self.vertices[i] == v:\n return self.vertices[i]\n \n # otherwise add new vertex in sorted position and return it\n self.vertices.insert(i, v)\n return v", "def add_vertex(self, v):\n v = {'x': v[0], 'y': v[1]}\n if v not in self:\n self.append(v)\n return len(self)-1\n return self.index(v)", "def add_vertex(self, name, raise_exception=False):\n if self.__vertices.has_key(name):\n if raise_exception:\n raise GraphException(\"Error adding vertex '%s': Vertex already exists.\" % name)\n else:\n return self.__vertices[name]\n else:\n self.__g.add_vertex(1)\n n = self.__g.vcount() - 1\n self.__vertices[name] = n\n self.__g.vs[n]['name'] = name\n return n", "def add_vertex(self, vertex):\n pass", "def add_vertex(self, vertex):\n raise NotImplementedError", "def add_vertex(self, key):\n #increments the number of vertices\n #creates a new vertex\n #adds the new vertex to the vertex list\n #returns the new vertex\n if key != None:\n self.num_vertices += 1\n new_vertex = Vertex(key)\n self.vert_list[key] = new_vertex\n return new_vertex\n raise KeyError(\"There's no key here\")", "def add_vertex(self, key):\n self.num_vertices += 1\n new_vertex = Vertex(key)\n self.vertices[key] = new_vertex\n\n return new_vertex", "def add_vertex(self, vertex):\n if vertex not in self.__graph_dict:\n self.__graph_dict[vertex] = []", "def add_vertex(self, key):\n vertex = Vertex(key)\n self.vertices += 1\n self.graph[key] = vertex\n\n return vertex", "def add_vertex(self, vertex):\n # pass # TODO\n self.vertices[vertex] = set()", "def add_vertex(self, vertex):\n if vertex not in self.graph_dict:\n self.graph_dict[vertex] = []", "def add_vertex(self, vertex):\r\n if self.is_vertex_in_graph(vertex):\r\n raise GraphException(\"The vertex already exists.\")\r\n self.__neighbours[vertex] = []", "def add_vertex(self, vertex):\n self.vertices.add(vertex)\n \"\"\"Add vertex to the graph.\"\"\"\n if vertex not in self.adjacent:\n self.adjacent[vertex] = []", "def __add__(self, vertex):\n\n if isinstance(vertex, Vertex):\n vName = vertex.name\n self._vertices[vName] = vertex", "def add_vertex(self, vertex, edges=()):\n if vertex in self.vertices:\n raise Exception('Error: adding vertex that already exists')\n if not set(edges).issubset(self.vertices):\n raise Exception('Error: cannot have edge to nonexistent vertices')\n self.vertices[vertex] = set(edges)", "def add_vertex(self, key):\n\t\tvertex = Vertex(key)\n\t\tself.vertices[key] = vertex", "def add_vertex(self, key):\n vertex = Vertex(key)\n self.vertices[key] = vertex", "def add_vertex(self, vertex_name: n):\n new_vertex = Vertex(vertex_name)\n self._graph[new_vertex.name] = new_vertex" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Add a face to DCEL if it doesn't already exists, otherwise return the existing face.
def add_face(self, face): try: face_idx = self.faces.index(face) return self.faces[face_idx] except Exception: self.faces.append(face) return face
[ "def add_face(self, face):\n\n if face.uuid is None:\n face.uuid = self._generate_uuid()\n\n if face.uuid in self._faces:\n error_str = \"Trying to add an already existing face with uuid: \"\\\n + str(face.uuid)\n raise KeyError(error_str)\n\n self._faces[face.uuid] = Face.from_face(face)\n\n return face.uuid", "def add_face(self, f):\n\n # If face is already link to some zome,\n # we have to unlink it first.\n if f.Zone is not None:\n f.unlink_from_zone()\n\n # Just add and set link to the zone.\n f.Zone = self\n self.Faces.append(f)\n\n return f", "def update_face(self, face):\n\n if face.uuid not in self._faces:\n error_str = \"Trying to update a non-existing face with uuid: \"\\\n + str(face.uuid)\n raise KeyError(error_str)\n\n if not isinstance(face, Face):\n error_str = \"Trying to update an object with the wrong type. \"\\\n + \"Face expected.\"\n raise TypeError(error_str)\n\n face_to_update = self._faces[face.uuid]\n\n face_to_update.data = face.data\n face_to_update.points = face.points", "def addFace(self, vertices, bypassCheck=False):\n try:\n if bypassCheck:\n raise ValueError\n return self.getFace(vertices)\n except ValueError:\n if any(vertices.count(v) > 1 for v in vertices):\n raise ValueError('The face given is invalid: '\n 'two or more vertices are identical')\n newF = face(vertices)\n self.faces.append(newF)\n for i in range(len(vertices)):\n try:\n e = self.getEdge(vertices[i],\n vertices[(i + 1) % len(vertices)])\n except ValueError:\n e = self.addEdge(vertices[i],\n vertices[(i + 1) % len(vertices)])\n e.linkFace(newF)\n return newF", "def _add_facet(self, ea, eb, ec):\n f = Facet2(ea, eb, ec)\n i = bisect(self.edges, f)\n if len(self.facets) > i and self.facets[i] == f:\n return self.facets[i]\n \n self.facets.insert(i, f)\n return f", "def add_vertex_to_face(mesh, fkey, vkey, added_vkey):\n\n if vkey not in mesh.face_vertices(fkey):\n return None\n\n face_vertices = mesh.face_vertices(fkey)[:]\n idx = face_vertices.index(vkey) + 1 - len(face_vertices)\n face_vertices.insert(idx, added_vkey)\n mesh.delete_face(fkey)\n mesh.add_face(face_vertices, fkey = fkey)\n\n return face_vertices", "def change_face(self, face):\n if self.face is not None:\n self.face.remove_point(self)\n\n self.face = face\n self.face.add_point(self)", "def add_face(image, face_list_id, user_data=None, target_face=None):\n url = 'facelists/{}/persistedFaces'.format(face_list_id)\n headers, data, json = util.parse_image(image)\n params = {\n 'userData': user_data,\n 'targetFace': target_face,\n }\n\n return util.request('POST', url, headers=headers, params=params, json=json,\n data=data)", "def get_face(self, uuid):\n\n try:\n return Face.from_face(self._faces[uuid])\n except KeyError:\n error_str = \"Trying to get an non-existing face with uuid: {}\"\n raise ValueError(error_str.format(uuid))", "def add_facet(self, facet):\n\n self.facets.append(facet)\n return len(self.facets) - 1", "def insertFace(bm, v):\n a = []\n for k in range(len(v)):\n a.append(bm.verts[v[k]])\n f = bm.faces.new(a)\n bm.faces.ensure_lookup_table()\n return f", "def add_face(self):\n # Indicating to Agent Pi that the method has begun\n self.__client.sendall(\"OK\".encode())\n\n # Receive username and image from Agent Pi\n message = self.receive_image_from_client()\n username = message['username']\n image = message['image']\n\n # Saving and encoding images\n self.__fru.add_face(username, image)\n self.__fru.encode_faces()\n\n # Letting Agent Pi know that the process has completed\n self.__client.send(\"OK\".encode())", "def face(self):\n return self._face", "def add_face(self,verts):\n face = []\n for v in verts[:-1]: # omit last vertex (same as first)\n face.append(self.vertex(v))\n if len(face) > 0:\n self.faceList.append(face)", "def add_face_descriptor(self, face_descriptor):\n self.face_list.append(face_descriptor)\n self.mean = self.compute_mean()", "def getFace(self, vertices):\n for f in self.faces:\n if f.vertices == vertices:\n return f\n raise ValueError('No face found')", "def add_face(self, vertices: Iterable[\"Vertex\"]) -> None:\n self.faces.append(self.add_vertices(vertices))", "def selectmeshface(self):#Not used yet\n go = Rhino.Input.Custom.GetObject()\n go.GeometryFilter=Rhino.DocObjects.ObjectType.MeshFace\n go.SetCommandPrompt(\"Get mesh Face\")\n go.Get()\n objref=go.Object(0)\n face_guid = objref.ObjectId\n go.Dispose()\n \n return face_guid", "def addPhoto(fileName, personName):\n\n #Check if image is a jpg\n if (fileName[-4:] != \".jpg\"):\n print(\"\\n[!] File extenstion must be .jpg!\\n\")\n return\n\n #Check image exists\n if (not os.path.isfile(fileName)):\n print(\"\\n[!] File does not exist!\\n\")\n return\n\n #Check no illegal characters in file name\n for c in ILLEGAL_FILE_NAMES:\n if (c in personName):\n print(\"\\n[!] Provided name contains an illegal argument\\n\")\n return\n\n #Load image\n image = face_recognition.load_image_file(fileName)\n\n #Use the name in the filename as the identity key\n identity = os.path.splitext(os.path.basename(fileName))[0]\n\n #Get the face location\n locationsHog = hogDetectFaceLocations(image)\n\n locationsHaar = haarDetectFaceLocations(image)\n\n #Get the face encoding\n encodingsHaar = face_recognition.face_encodings(image, locationsHaar)\n encodingsHog = face_recognition.face_encodings(image, locationsHog)\n\n #check if exactly one face is in the photo\n if ((len(encodingsHaar) == 0) or (len(encodingsHog) == 0)):\n print(\"\\n[!] No face detected in the provided photo\\n\")\n return\n\n elif ((len(encodingsHaar) > 1) or (len(encodingsHog) > 1)):\n print(\"\\n[!] More than one face detected in the provided photo\\n\")\n return\n\n #Set path to respective dataset\n directoryToAddTo = DATABASE_PATH + personName\n\n #Look for directory\n exists = False\n for subdir, dirs, files in os.walk(DATABASE_PATH):\n if (subdir == directoryToAddTo):\n exists = True\n\n #If directory doesnt exist, make it\n if (not exists):\n os.mkdir(directoryToAddTo)\n\n #Save data to file\n np.savetxt((directoryToAddTo + \"/\" + identity + \"Haar.txt\"),\n encodingsHaar[0])\n np.savetxt((directoryToAddTo + \"/\" + identity + \"Hog.txt\"),\n encodingsHog[0])\n\n print(\"\\n[*] Face successfully added!\\n\")" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Return a list of vertices that form the outer boundary of finite faces of the DCEL.
def get_outer_boundary_of_voronoi(self): edge = [edge for edge in self.edges if not edge.nxt][0] # next(obj for obj in objs if obj.val==5) first_vertex = edge.origin outer_boundary = [] while (not edge.get_destination() == first_vertex): if(edge.get_destination().is_infinity()): edge = edge.twin.nxt else: outer_boundary.append(edge) edge = edge.nxt outer_boundary.append(edge) return outer_boundary
[ "def boundary_vertices():\n\n from searchspace.geometry import Point\n\n boundary_vertices = list()\n boundary_vertices.append(Point(18.1, -37.1))\n boundary_vertices.append(Point(18.1, -37.0))\n boundary_vertices.append(Point(18.0, -37.0))\n boundary_vertices.append(Point(18.0, -37.1))\n\n return boundary_vertices", "def vertices_on_boundary(self):\n boundaries = self.vertices_on_boundaries()\n return boundaries[0] if boundaries else []", "def faces_on_boundaries(self):\n vertexgroups = self.vertices_on_boundaries()\n facegroups = []\n for vertices in vertexgroups:\n temp = [self.halfedge_face((v, u)) for u, v in pairwise(vertices)]\n faces = []\n for face in temp:\n if face is None:\n continue\n if face not in faces and all(face not in group for group in facegroups):\n faces.append(face)\n if faces:\n facegroups.append(faces)\n return facegroups", "def faces_on_boundary(self):\n boundaries = self.faces_on_boundaries()\n return boundaries[0] if boundaries else []", "def GetInteriorEdgesQuad(self):\n\n if not isinstance(self.all_edges,np.ndarray):\n self.GetEdgesQuad()\n if not isinstance(self.edges,np.ndarray):\n self.GetBoundaryEdgesQuad()\n\n sorted_all_edges = np.sort(self.all_edges,axis=1)\n sorted_boundary_edges = np.sort(self.edges,axis=1)\n\n x = []\n for i in range(self.edges.shape[0]):\n current_sorted_boundary_edge = np.tile(sorted_boundary_edges[i,:],\n self.all_edges.shape[0]).reshape(self.all_edges.shape[0],self.all_edges.shape[1])\n interior_edges = np.linalg.norm(current_sorted_boundary_edge - sorted_all_edges,axis=1)\n pos_interior_edges = np.where(interior_edges==0)[0]\n if pos_interior_edges.shape[0] != 0:\n x.append(pos_interior_edges)\n\n edge_aranger = np.arange(self.all_edges.shape[0])\n edge_aranger = np.setdiff1d(edge_aranger,np.array(x)[:,0])\n interior_edges = self.all_edges[edge_aranger,:]\n\n # GET FLAGS FOR BOUNDRAY AND INTERIOR\n edge_flags = np.ones(self.all_edges.shape[0],dtype=np.int64)\n edge_flags[edge_aranger] = 0\n\n self.interior_edges = interior_edges\n return interior_edges, edge_flags", "def vertices(self):\n\n if self._faces is None:\n if self._vertices is None:\n return None\n self.triangulate()\n return self._vertices", "def get_abs_vertices(self):\n\n return self.polygon_points", "def get_outer_vertices(feature):\n return [\n point\n for part in Geometry.get_multipolygon(feature)\n for point in part[0][0:-1]\n ]", "def bounded_edges(self):\n obj = self.Vrepresentation()\n edges = []\n for i in range(len(obj)):\n if not obj[i].is_vertex(): continue\n for j in range(i+1,len(obj)):\n if not obj[j].is_vertex(): continue\n if self.vertex_adjacency_matrix()[i,j] == 0: continue\n yield (obj[i], obj[j])", "def GetInteriorEdgesPent(self):\n\n if not isinstance(self.all_edges,np.ndarray):\n self.GetEdgesPent()\n if not isinstance(self.edges,np.ndarray):\n self.GetBoundaryEdgesPent()\n\n sorted_all_edges = np.sort(self.all_edges,axis=1)\n sorted_boundary_edges = np.sort(self.edges,axis=1)\n\n x = []\n for i in range(self.edges.shape[0]):\n current_sorted_boundary_edge = np.tile(sorted_boundary_edges[i,:],\n self.all_edges.shape[0]).reshape(self.all_edges.shape[0],self.all_edges.shape[1])\n interior_edges = np.linalg.norm(current_sorted_boundary_edge - sorted_all_edges,axis=1)\n pos_interior_edges = np.where(interior_edges==0)[0]\n if pos_interior_edges.shape[0] != 0:\n x.append(pos_interior_edges)\n\n edge_aranger = np.arange(self.all_edges.shape[0])\n edge_aranger = np.setdiff1d(edge_aranger,np.array(x)[:,0])\n interior_edges = self.all_edges[edge_aranger,:]\n\n # GET FLAGS FOR BOUNDRAY AND INTERIOR\n edge_flags = np.ones(self.all_edges.shape[0],dtype=np.int64)\n edge_flags[edge_aranger] = 0\n\n self.interior_edges = interior_edges\n return interior_edges, edge_flags", "def _get_edge_boundary(self, interior_vertex_ids: List[Node]) -> List[Edge]:\n interior = set(interior_vertex_ids)\n exterior = set(self.get_nodes()) - interior\n _is_boundary = partial(self._is_boundary, interior=interior, exterior=exterior)\n return [edge.tuple for edge in self.G.es() if _is_boundary(edge.tuple)]", "def faces_as_vertices(self) -> Iterable[List[Vec3]]:\n v = self.vertices\n for face in self.faces:\n yield [v[index] for index in face]", "def get_vertices(self) -> []:\n res = []\n for v in range(self.v_count) :\n res.append(v)\n return res", "def getFaces(self):\n\t\tfaces = []\n\t\tfor solid in self.getSolids():\n\t\t\ttry:\n\t\t\t\tfor face in solid.Faces:\n\t\t\t\t\tfaces.append(face)\n\t\t\texcept:\n\t\t\t\tpass\n\t\treturn faces", "def make_convex_hull(self):\n hull_points_d = []\n try:\n print \"self.V_bar_list_d******************\", self.V_bar_list_d\n hull = ConvexHull(self.V_bar_list_d)\n hull_vertices = hull.vertices\n\n for i in hull_vertices:\n hull_points_d.append(self.V_bar_list_d[i])\n\n except scipy.spatial.qhull.QhullError:\n hull_points_d = self.V_bar_list_d\n\n return hull_points_d", "def GetInteriorEdgesTri(self):\n\n if not isinstance(self.all_edges,np.ndarray):\n self.GetEdgesTri()\n if not isinstance(self.edges,np.ndarray):\n self.GetBoundaryEdgesTri()\n\n sorted_all_edges = np.sort(self.all_edges,axis=1)\n sorted_boundary_edges = np.sort(self.edges,axis=1)\n\n x = []\n for i in range(self.edges.shape[0]):\n current_sorted_boundary_edge = np.tile(sorted_boundary_edges[i,:],\n self.all_edges.shape[0]).reshape(self.all_edges.shape[0],self.all_edges.shape[1])\n interior_edges = np.linalg.norm(current_sorted_boundary_edge - sorted_all_edges,axis=1)\n pos_interior_edges = np.where(interior_edges==0)[0]\n if pos_interior_edges.shape[0] != 0:\n x.append(pos_interior_edges)\n\n edge_aranger = np.arange(self.all_edges.shape[0])\n edge_aranger = np.setdiff1d(edge_aranger,np.array(x)[:,0])\n interior_edges = self.all_edges[edge_aranger,:]\n\n # GET FLAGS FOR BOUNDRAY AND INTERIOR\n edge_flags = np.ones(self.all_edges.shape[0],dtype=np.int64)\n edge_flags[edge_aranger] = 0\n\n self.interior_edges = interior_edges\n return interior_edges, edge_flags", "def inner_volume_vertices(self) -> agx.Vec3Vector:\n return self._inner_volume_vertices", "def GetBoundaryEdgesHex(self):\n\n p = self.InferPolynomialDegree()\n # DO NOT COMPUTE IF ALREADY COMPUTED\n if isinstance(self.edges,np.ndarray):\n if self.edges.shape[0] > 1:\n # IF LINEAR VERSION IS COMPUTED, DO COMPUTE HIGHER VERSION\n if self.edges.shape[1] == 2 and p > 1:\n pass\n else:\n return\n\n\n # FIRST GET BOUNDARY FACES\n if not isinstance(self.faces,np.ndarray):\n self.GetBoundaryFacesHex()\n\n # BUILD A 2D MESH\n tmesh = Mesh()\n tmesh.element_type = \"quad\"\n tmesh.elements = self.faces\n tmesh.nelem = tmesh.elements.shape[0]\n del tmesh.faces\n del tmesh.points\n\n # ALL THE EDGES CORRESPONDING TO THESE BOUNDARY FACES ARE BOUNDARY EDGES\n self.edges = tmesh.GetEdgesQuad()", "def edges_on_boundaries(self):\n vertexgroups = self.vertices_on_boundaries()\n edgegroups = []\n for vertices in vertexgroups:\n edgegroups.append(list(pairwise(vertices)))\n return edgegroups" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Return the dual of the current DCEL.
def dual(self): def set_twins(): for edge_idx in range(0, len(dual_dcel.edges), 2): dual_dcel.edges[edge_idx].twin = dual_dcel.edges[edge_idx + 1] dual_dcel.edges[edge_idx + 1].twin = dual_dcel.edges[edge_idx] def set_next_and_previous(): for face in dual_dcel.faces: face_edges = [edge for edge in dual_dcel.edges if edge.incident_face == face] for edge in face_edges: if(not edge.get_destination().is_infinity()): edge.nxt = [e for e in face_edges if e.origin == edge.get_destination()][0] if(not edge.origin.is_infinity()): edge.prev = [e for e in face_edges if edge.origin == e.get_destination()][0] dual_dcel = DCEL() for edge in self.edges: incident_face = dual_dcel.add_face(Face(circumcentre=edge.twin.origin.as_points())) origin = dual_dcel.add_vertex(Vertex(coordinates=edge.incident_face.circumcentre)) dual_edge = HalfEdge( origin=origin, incident_face=incident_face ) incident_face.outer_component = dual_edge origin.incident_edge = dual_edge dual_dcel.edges.append(dual_edge) set_twins() set_next_and_previous() return dual_dcel
[ "def dual(self):\n return self.getdual()", "def get_dual(self, offset=-1):\n return self.primary_axis.get_dual(self.dual_level + offset)", "def dual(self):\n return dual_array(self)", "def get_dual(self, offset=-1):\n if offset == 0:\n return self\n dual = self.__duals.get(offset, None)\n if dual is None:\n dual = DualAxis(self, offset)\n self.__duals[offset] = dual\n return dual", "def getdualobj(self,whichsol_):\n dualobj_ = ctypes.c_double()\n res = __library__.MSK_XX_getdualobj(self.__nativep,whichsol_,ctypes.byref(dualobj_))\n if res != 0:\n _,msg = self.__getlasterror(res)\n raise Error(rescode(res),msg)\n dualobj_ = dualobj_.value\n _dualobj_return_value = dualobj_\n return (_dualobj_return_value)", "def dual_multiplier(self):\n m = copy(self)\n m._is_dual = int(not self._is_dual)\n o = self._character.order()\n m._character = self._character**(o - 1)\n m._weight = QQ(2) - QQ(self.weight())\n return m", "def dual_multiplier(self):\n weight = QQ(2) - QQ(self._weight)\n dual = int(not self._is_dual)\n m = WeilRepMultiplier(self._weil_module, weight, self._use_symmetry, self._group, dual=dual, **self._kwargs)\n return m", "def dual_to_range(self):\n return self._dual_to_range", "def dual_space(self):\n return self._dual_space", "def dual_objective(self, dual_coeffs):\n primal = self.model._sdca_primal_dual_relation(self.l_l2sq,\n dual_coeffs)\n prox_l2_value = 0.5 * self.l_l2sq * np.linalg.norm(primal) ** 2\n return self.model.dual_loss(dual_coeffs) - prox_l2_value", "def Double(self):\n return self.Xdouble(1)", "def getDoseRateConst(self):\n return self.sheet[2][4]", "def getdual(cls):\n ops = cls.algebra.operations\n if issubclass(cls, ops.OR):\n return ops.AND\n elif issubclass(cls, ops.AND):\n return ops.OR\n else:\n raise AttributeError(\"Class must be in algebra.operations.\")", "def exterior_der(self):\n from utilities import format_unop_txt, format_unop_latex\n if self._exterior_derivative is None:\n vmodule = self._vmodule # shortcut\n rname = format_unop_txt('d', self._name)\n rlname = format_unop_latex(r'\\mathrm{d}', self._latex_name)\n resu = vmodule.alternating_form(self._tensor_rank+1, name=rname, \n latex_name=rlname)\n for dom, rst in self._restrictions.iteritems():\n resu._restrictions[dom] = rst.exterior_der()\n self._exterior_derivative = resu\n return self._exterior_derivative", "def gen_dual_func(self):\n if 0 in self.sig:\n # We are degenerate, use the right complement\n return self.right_complement_func\n else:\n Iinv = self.pseudoScalar.inv().value\n gmt_func = self.gmt_func\n @numba.njit\n def dual_func(Xval):\n return gmt_func(Xval, Iinv)\n return dual_func", "def d2(self):\n d1 = self.d1()\n return d1 - self.sigma * (self.t **(0.5))", "def dual(self):\n domain = self.algebra.domain\n if self is domain.TRUE:\n return domain.FALSE\n elif self is domain.FALSE:\n return domain.TRUE\n else:\n raise AttributeError(\"Class should be TRUE or FALSE but is %s.\"\n % self.cls.__name__)", "def l2(self):\n return math.sqrt(self.quadrance)", "def getVoltage(self):\n return self.getVoltageResistance()[0]" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Printfriendly representation of the DCEL object.
def __repr__(self): return ( '<DCEL (' 'vertices:\n {obj.vertices},\n' 'edges:\n {obj.edges},\n' 'faces:\n {obj.faces}>'.format(obj=self) )
[ "def toString(self):\n return PyVDF.formatData(self.__data)", "def __repr__( self ) :\n\n s = \"\"\n for v in self.data : s = s + endl1dmathmisc.endl1d_repr_xFormat % v + \"\\n\"\n return s", "def __repr__(self):\n values = ', '.join(f'{k}={v}' for k, v in self.variables.items())\n return f'D({values})'", "def __repr__(self):\n str_repr = str(([self.coordinates(),\n self.__damaged_cells,\n # ship_helper.direction_repr_str(Direction, self.__direction),\n self.__direction,\n self.__board_size]))\n return str_repr", "def __str__(self):\n\n outstr = 'gear wheel data:\\n'\n # output gear data\n for date in self.data:\n outstr += date.ljust(10) + ':\\t' + str(self.data.get(date)) + '\\n'\n\n # output modification data\n if self.modifications:\n outstr += '\\nflank modifications:\\n'\n for date in self.modifications:\n outstr += date.ljust(10) + ':\\t' + str(self.modifications.get(date)) + '\\n'\n\n # output tooth form coordinates\n if self.formcoords:\n # upper and lower index of point-array\n outstr += '\\ntooth form coordinates:\\n'\n for coord in self.formcoords:\n outstr += str(coord[0]) + '\\t' + str(coord[1]) + '\\n'\n\n return outstr", "def __repr__(self) -> str:\n return f\"{self.__class__.__name__}({self._fiscal_year}, {self._fiscal_day})\"", "def __repr__(self):\n\n return f\"<PersonalDose drug_id: {self.drug.generic_name}, creator: {self.creator.fname}, species: {self.individual_species.species_name}, group: {self.species_group.species_group} >\"", "def __dxf__(self):\n return tags2str(self)", "def __str__(self):\n TAB = \" \"\n textlines = []\n\n def print_tree(obj, indent):\n \"\"\"Recursively serialize sub-elements.\n\n This closes over textlines and modifies it in-place.\n \"\"\"\n if isinstance(obj, (Tree, Clade)):\n # Avoid infinite recursion or special formatting from str()\n objstr = repr(obj)\n else:\n objstr = as_string(obj)\n textlines.append(TAB * indent + objstr)\n indent += 1\n for attr in obj.__dict__:\n child = getattr(obj, attr)\n if isinstance(child, TreeElement):\n print_tree(child, indent)\n elif isinstance(child, list):\n for elem in child:\n if isinstance(elem, TreeElement):\n print_tree(elem, indent)\n\n print_tree(self, 0)\n return \"\\n\".join(textlines)", "def __repr__(self):\n return '{}({})'.format(self.__class__.__name__,\n ', '.join('{}={!r}'.format(k, getattr(self, k))\n for k in self._fields))", "def __str__(self):\r\n\r\n data = [self.seq_f,\r\n self.seq_r,\r\n self.tf,\r\n self.df,\r\n len(self.taxons),\r\n len(self.families),\r\n ]\r\n\r\n return \"%s\\n\" % \"\\t\".join([str(x) for x in data])", "def __repr__(self) -> str:\n return f\"{self.__class__.__name__}({self._fiscal_year}, {self._fiscal_quarter})\"", "def __str__(self):\n\n num_decimal_places = 3\n\n def write_coefficient(coefficient, is_initial_term=False):\n coefficient = round(coefficient, num_decimal_places)\n\n if coefficient % 1 == 0:\n coefficient = int(coefficient)\n\n output = ''\n\n if coefficient < 0:\n output += '-'\n if coefficient > 0 and not is_initial_term:\n output += '+'\n if abs(coefficient) != 1:\n output += '{}'.format(abs(coefficient))\n\n return output\n\n n = self.normal_vector\n\n try:\n coefs = n.coordinates\n initial_index = Line.first_nonzero_index(coefs)\n terms = [write_coefficient(coefs[i], is_initial_term=(i == initial_index)) + 'x_{}'.format(i + 1)\n for i in range(self.dimension) if round(coefs[i], num_decimal_places) != 0]\n output = ' '.join(terms)\n except Exception as e:\n if str(e) == Line.NO_NONZERO_ELTS_FOUND_MSG:\n output = '0'\n else:\n raise e\n\n constant = round(self.constant_term, num_decimal_places)\n if constant % 1 == 0:\n constant = int(constant)\n output += ' ={}'.format(constant)\n\n return output", "def prettyPrint(self):\n\t\tprint(\"name: {:s}\".format(self.name))\n\t\tfor ctup in self.__coordList:\n\t\t\tprint(\"x: {:f} y: {:f}\".format(ctup[0], ctup[1]))", "def __repr__(self) -> str:\n return f\"{self.__class__.__name__}({self._fiscal_year}, {self._fiscal_month})\"", "def __str__(self):\n # string representation includes values of all inner fields\n return \\\n \"Edge Weight: \" + str(self.weight) + \"\\n\" + \\\n \"Edge Attributes: \" + str(self.attributes) + \"\\n\" + \\\n \"First Incident Node: \\n\" + str(self.first_incident_node.get_name()) + \"\\n\" + \\\n \"Second Incident Node: \\n\" + str(self.second_incident_node.get_name()) + \"\\n\"", "def __repr__(self):\n s = ''\n for attr in self.__dict__:\n val = pformat(self.__dict__[attr])[:1500]\n if len(val)>70:\n s+=f'\\n{attr}:\\n{val}\\n\\n'\n else:\n s += f'{attr}: {val}\\n'\n return s", "def __str__(self):\n struct_repr = \", \".join([\n \"voltage_v: \" + str(self.voltage_v),\n \"remaining_percent: \" + str(self.remaining_percent)\n ])\n\n return f\"Battery: [{struct_repr}]\"", "def __str__(self):\n printString = 'Tile: Center: ' + str(self.center) + ' Dirt Amount: ' \\\n + str(self.dirtAmount) + 'Tile Cleaned: ' + str(self.tileCleaned)\n return printString" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Store the camera intrinsics. We need this for the calibration matrices from the Tango
def new_camera_intrinsics_callback(self, new_camera_info): self.camera_intrinsics = new_camera_info self.k_mat = np.matrix( np.array(self.camera_intrinsics.K).reshape((3, 3)) ) self.k_inv = self.k_mat.I
[ "def save_intrinsics(self, save_dir):\n if not osp.isfile(\n osp.join(save_dir, 'intrinsics', 'intrinsics.npy')):\n np.save(osp.join(\n save_dir, 'intrinsics', 'intrinsics'), self.camera_model.K)", "def _extract_extrinsics(self, kwargs, cloud_idx) ->Tuple[torch.Tensor, torch.Tensor]:\n cameras = self.rasterizer.cameras\n R = kwargs.get('R', cameras.R)[cloud_idx]\n T = kwargs.get('T', cameras.T)[cloud_idx]\n tmp_cams = PerspectiveCameras(R=R.unsqueeze(0), T=T.unsqueeze(0), device=R.device)\n size_tensor = torch.tensor([[self.renderer._renderer.height, self.renderer._renderer.width]])\n pulsar_cam = _pulsar_from_cameras_projection(tmp_cams, size_tensor)\n cam_pos = pulsar_cam[0, :3]\n cam_rot = pulsar_cam[0, 3:9]\n return cam_pos, cam_rot", "def calculate_default_intrinsics_matrix(self) -> np.ndarray:\n intrinsics_matrix = np.identity(3)\n intrinsics_matrix[0, 2] = self.image_size_x / 2.0\n intrinsics_matrix[1, 2] = self.image_size_y / 2.0\n intrinsics_matrix[0, 0] = self.image_size_x / (\n 2.0 * np.tan(self.fov * np.pi / 360.0)\n )\n intrinsics_matrix[1, 1] = self.image_size_y / (\n 2.0 * np.tan(self.fov * np.pi / 360.0)\n )\n self.intrinsics_matrix = intrinsics_matrix\n return intrinsics_matrix", "def load_calib(self):\n # We'll build the calibration parameters as a dictionary, then\n # convert it to a namedtuple to prevent it from being modified later\n data = {}\n\n # Load the calibration file\n calib_filepath = os.path.join(self.sequence_path, 'calib.txt')\n filedata = utils.read_calib_file(calib_filepath)\n\n # Create 3x4 projection matrices\n P_rect_00 = np.reshape(filedata['P0'], (3, 4))\n P_rect_10 = np.reshape(filedata['P1'], (3, 4))\n P_rect_20 = np.reshape(filedata['P2'], (3, 4))\n P_rect_30 = np.reshape(filedata['P3'], (3, 4))\n\n # Compute the rectified extrinsics from cam0 to camN\n T1 = np.eye(4)\n T1[0, 3] = P_rect_10[0, 3] / P_rect_10[0, 0]\n T2 = np.eye(4)\n T2[0, 3] = P_rect_20[0, 3] / P_rect_20[0, 0]\n T3 = np.eye(4)\n T3[0, 3] = P_rect_30[0, 3] / P_rect_30[0, 0]\n\n # Compute the velodyne to rectified camera coordinate transforms\n data['T_cam0_velo'] = np.reshape(filedata['Tr'], (3, 4))\n data['T_cam0_velo'] = np.vstack([data['T_cam0_velo'], [0, 0, 0, 1]])\n data['T_cam1_velo'] = T1.dot(data['T_cam0_velo'])\n data['T_cam2_velo'] = T2.dot(data['T_cam0_velo'])\n data['T_cam3_velo'] = T3.dot(data['T_cam0_velo'])\n\n # Compute the camera intrinsics\n data['K_cam0'] = P_rect_00[0:3, 0:3]\n data['K_cam1'] = P_rect_10[0:3, 0:3]\n data['K_cam2'] = P_rect_20[0:3, 0:3]\n data['K_cam3'] = P_rect_30[0:3, 0:3]\n\n # Compute the stereo baselines in meters by projecting the origin of\n # each camera frame into the velodyne frame and computing the distances\n # between them\n p_cam = np.array([0, 0, 0, 1])\n p_velo0 = np.linalg.inv(data['T_cam0_velo']).dot(p_cam)\n p_velo1 = np.linalg.inv(data['T_cam1_velo']).dot(p_cam)\n p_velo2 = np.linalg.inv(data['T_cam2_velo']).dot(p_cam)\n p_velo3 = np.linalg.inv(data['T_cam3_velo']).dot(p_cam)\n\n data['b_gray'] = np.linalg.norm(p_velo1 - p_velo0) # gray baseline\n data['b_rgb'] = np.linalg.norm(p_velo3 - p_velo2) # rgb baseline\n\n self.calib = namedtuple('CalibData', data.keys())(*data.values())", "def test_get_camera_intrinsics(self):\n dummy_camera = Camera(None, None, None, None, None)\n self.assertIsNone(dummy_camera._getCameraIntrinsics())\n\n for camera_id, camera_obj in CameraTest.robot.camera_dict.items():\n CameraTest.robot.subscribeCamera(camera_id)\n self.assertIsInstance(camera_obj._getCameraIntrinsics(), list)\n CameraTest.robot.unsubscribeCamera(camera_id)", "def intrinsics_json(json_path):\n with open(json_path) as json_file:\n # Camera Intrinsic Matrix\n k_mat = np.eye(4, dtype=np.float32) #Idk why size 4? (To match translation?)\n json_data = json.load(json_file)\n k_mat[0, 0] = json_data[\"intrinsic\"][\"fx\"]\n k_mat[1, 1] = json_data[\"intrinsic\"][\"fy\"]\n k_mat[0, 2] = json_data[\"intrinsic\"][\"u0\"]\n k_mat[1, 2] = json_data[\"intrinsic\"][\"v0\"]\n\n # Transformation Mat between cameras\n stereo_t = np.eye(4, dtype=np.float32)\n stereo_t[0, 3] = json_data[\"extrinsic\"][\"baseline\"]\n\n return {\"K\":k_mat, \"inv_K\":np.linalg.pinv(k_mat), \"baseline_T\":stereo_t}", "def _assemble_intrinsics(focal, cx, cy, distortion):\n intrinsics = np.eye(3)\n intrinsics[0,0] = float(focal)\n intrinsics[1,1] = float(focal)\n intrinsics[0,2] = float(cx)\n intrinsics[1,2] = float(cy)\n distortion_coefficients = np.array([float(distortion), 0.0, 0.0, 0.0])\n return intrinsics, distortion_coefficients", "def save(self, filename):\n file_root, file_ext = os.path.splitext(filename)\n if file_ext.lower() != INTR_EXTENSION:\n raise ValueError('Extension %s not supported for OrhtographicIntrinsics. Must be stored with extension %s' %(file_ext, INTR_EXTENSION))\n\n camera_intr_dict = copy.deepcopy(self.__dict__)\n f = open(filename, 'w')\n json.dump(camera_intr_dict, f)\n f.close()", "def camera_matrix_from_pbobject(intrinsics):\n K = np.eye(3)\n K[0, 0] = intrinsics.fx\n K[1, 1] = intrinsics.fy\n K[0, 2] = intrinsics.cx\n K[1, 2] = intrinsics.cy\n K[0, 1] = intrinsics.skew\n return K", "def __init__(self, extrinsics: CameraExtrinsics, intrinsics: CameraIntrinsics):\n assert len(extrinsics) == len(intrinsics)\n assert extrinsics.device == intrinsics.device\n self.extrinsics: CameraExtrinsics = extrinsics\n self.intrinsics: CameraIntrinsics = intrinsics", "def get_intrinsics(self):\n if self._K is None:\n K = self.original_intrinsics.clone()\n if self.crop is not None:\n K[:2,2] -= torch.tensor(self.crop[:,0], device=K.device, dtype=K.dtype)\n K[:2] *= self.reup_sample / self.down_sample\n self._K = K\n return self._K", "def getCameraMatrix(self): # real signature unknown; restored from __doc__\n pass", "def load_camera_data(file_name):\n assert os.path.isfile(file_name), \"Invalid file {}\".format(file_name)\n import sintel_io\n\n intrinsic, extrinsic = sintel_io.cam_read(file_name)\n return intrinsic, extrinsic", "def hack_camera(camera):\n #should the dist_coefs be zero or matched to the pre-recorded calibrations?\n #since the square_marker_cache is already undistorted you do not want pass any distortion coefficients.\n camera['dist_coefs'] = np.array([[.0,.0,.0,.0,.0]])\n\n #camera['dist_coefs'] = np.array([\n # [-0.1804359422372346],\n # [0.042312699050507684],\n # [-0.048304496525298606],\n # [0.022210236517363622]])\n camera['camera_matrix'] = np.array([\n [843.364676204713, 0.0, 983.8920955744197],\n [0.0, 819.1042187528645, 537.1633514857654],\n [0.0, 0.0, 1.0]])\n camera['resolution'] = np.array([1920, 1080])\n\n return camera", "def load(filename):\n file_root, file_ext = os.path.splitext(filename)\n if file_ext.lower() != INTR_EXTENSION:\n raise ValueError('Extension %s not supported for CameraIntrinsics. Must be stored with extension %s' %(file_ext, INTR_EXTENSION))\n\n f = open(filename, 'r')\n ci = json.load(f)\n f.close()\n return OrthographicIntrinsics(frame=ci['_frame'],\n vol_height=ci['_vol_height'],\n vol_width=ci['_vol_width'],\n vol_depth=ci['_vol_depth'],\n plane_height=ci['_plane_height'],\n plane_width=ci['_plane_width'],\n depth_scale=ci['_depth_scale'])", "def persistent_image_features(images, toStoreFile):\n image_features = extract_features(images)\n\n np.save(toStoreFile, image_features)", "def get_calibration_data(self):\n\n try:\n self.cam_matrix = np.load('./calibration_parameters/Cameramatrix.npy')\n self.dist_coefs = np.load('./calibration_parameters/DistortionCoeffs.npy')\n except:\n print(\"Couldn't load calibration data. Starting calibration...\")\n try:\n self.cam_matrix, self.dist_coefs = Calibration.Calibration('./camera_cal/', save=True).run()\n print('...done!')\n except:\n print('Calibration failed. Aborting...')", "def extract_calibration(self):\n #TODO add function to check if the folder exists because opencv points to other error rather than saying it doesnt exist\n cv_file = cv2.FileStorage(\"calib_images/calibration.yaml\", cv2.FILE_STORAGE_READ)\n camera_matrix = cv_file.getNode(\"camera_matrix\").mat()\n dist_matrix = cv_file.getNode(\"dist_coeff\").mat()\n print(\"[INFO]: Extracted camera parameters.\")\n cv_file.release()\n return camera_matrix, dist_matrix", "def get_cam_vectors( self ):\n if _DEBUG: print( \"Pos:\" , self.get_camera_position() , \"Cen:\" , self.center , \"Up:\" , self.up )\n return self.get_camera_position() , self.center , self.up" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Add padding for unet of given depth
def _pad(x, depth=4): divisor = np.power(2, depth) remainder = x.shape[0] % divisor # no padding because already of even shape if remainder == 0: return x # add zero rows after 1D feature elif len(x.shape) == 2: return np.pad(x, [(0, divisor - remainder), (0, 0)], "constant") # add zero columns and rows after 2D feature elif len(x.shape) == 3: return np.pad(x, [(0, divisor - remainder), (0, divisor - remainder), (0, 0)], "constant")
[ "def padding_depth(self):\n\t\treturn self.paddings_shape_param('D')", "def _print_padded(string, depth):\n padding = \" \" * depth if depth else \"\"\n print(f\"{padding}{string}\")", "def zero_pad_features(features, depth):\n\n n = int(features.get_shape().dims[-1])\n extra_feature_count = depth - n\n assert n >= 0\n if n > 0:\n padding = tf.tile(features[:, :, :, :1] * 0,\n [1, 1, 1, extra_feature_count])\n features = tf.concat([features, padding], 3)\n return features", "def zero_pad_features(features, depth):\n\n n = int(features.get_shape().dims[-1])\n extra_feature_count = depth - n\n assert n >= 0\n if n > 0:\n padding = tf.tile(features[:, :, :1] * 0,\n [1, 1, extra_feature_count])\n features = tf.concat([features, padding], 2)\n return features", "def pad_upper(self, data, options, padding):\n # data, options = nrrd.read(input_file_name)\n rows, columns, depths = data.shape\n\n # numpy.fill\n for i in range(padding):\n padding_layer = [[self.AIR] * columns for j in range(rows)]\n data = self.concatenate_layers(data, padding_layer)\n\n options['sizes'][2] += padding # update depths\n return (data, options)", "def apply_pad_inner_list(layout, depth, lateral_context, **kwargs):\n n = lateral_context[\"n\"]\n # We want to be above at least one dimension (list)\n if depth == 2:\n return add_outer_dimensions(layout, n)\n else:\n return None", "def pad_to_preserve_shape(volume, layer_spec, balanced=True):\n s = layer_spec.get('stride', 1)\n k = layer_spec['kernel_size']\n spatial_shape = np.array(volume.shape[2:4]).astype('i')\n required_shape = (np.ceil((spatial_shape / s).astype('i')) - 1) * s + k\n padding = required_shape - spatial_shape\n\n if balanced:\n pre = np.floor(padding / 2).astype('i')\n post = np.ceil(padding / 2).astype('i')\n padded = F.pad(volume, (pre[1], post[1], pre[0], post[0],))\n else:\n padded = F.pad(volume, (0, padding[1], 0, padding[0]))\n\n return padded", "def zero_padding(input, paddings):\n pad_mat = np.array([[0,0], [paddings, paddings], [paddings, paddings], [0, 0]])\n return tf.pad(input, paddings=pad_mat)", "def _padding(X, n, m):\n padding = tf.zeros((n, m), dtype=X.dtype)\n return tf.concat([padding, X],0)", "def _prepare_onnx_paddings__tensorrt(g, input, pad):\n ctx = FUNCTION_REWRITER.get_context()\n torch_version = version_parse(torch.__version__)\n if torch_version.major == 1 and torch_version.minor < 10:\n return ctx.origin_func(g, input, pad)\n # The desired order of paddings is\n # dim_0_begin, dim_1_begin, ... , dim_0_end, ..., dim_n_end.\n # n is the dimension of input.\n # Assume zero-dimensions in the beginning, pad the \"pad\" sequence with\n # zeros in the beginning\n pad_len = torch.onnx.symbolic_opset9.size(\n g, pad, g.op('Constant', value_t=torch.tensor([0])))\n # Set extension = [0] * (dim * 2 - len(pad))\n rank = sym_help._get_tensor_rank(input)\n if rank is None:\n rank = g.op('Size', g.op('Shape', input))\n else:\n rank = g.op('Constant', value_t=torch.tensor(rank, dtype=torch.int64))\n extension = g.op(\n 'Sub',\n g.op('Mul', rank,\n g.op('Constant', value_t=torch.tensor(2, dtype=torch.int64))),\n pad_len)\n # Concat pad with extension: paddings = [dim_n_begin, dim_n_end,\n # dim_n-1_begin, dim_n-1_end, 0, 0, ... ]\n # Currently ONNX only supports int64 type for Pad\n pad = g.op('Cast', pad, to_i=sym_help.cast_pytorch_to_onnx['Long'])\n paddings = g.op(\n 'Concat',\n pad,\n g.op(\n 'ConstantOfShape',\n extension,\n value_t=torch.tensor([0], dtype=torch.int64)),\n axis_i=0)\n # Reshape and reverse order and collate first beginnings and then ends\n # paddings = [[..., 0, dim_n-1_begin, dim_n_begin],\n # [..., 0, dim_n-1_end, dim_n_end]]\n # Reshape back to 1-D paddings = [..., 0, dim_n - 1_begin, dim_n_begin,\n # ..., 0, dim_n - 1_end, dim_n_end]\n\n # replace original Constant-Transpose-Constant with Slices and Concat.\n paddings = torch.onnx.symbolic_opset10.flip(g, paddings, [0])\n begins = sym_help._slice_helper(\n g, paddings, axes=[0], starts=[1], ends=[0xffff], steps=[2])\n ends = sym_help._slice_helper(\n g, paddings, axes=[0], starts=[0], ends=[0xffff], steps=[2])\n paddings = g.op('Concat', begins, ends, axis_i=0)\n padding_c = g.op(\n 'Cast', paddings, to_i=sym_help.cast_pytorch_to_onnx['Long'])\n return padding_c", "def fix_padding(inputs, kernel_size, mode='CONSTANT'):\n pad_total = kernel_size - 1\n pad_start = pad_total // 2\n pad_end = pad_total - pad_start\n padded_inputs = tf.pad(inputs, [[0, 0], [pad_start, pad_end], [pad_start, pad_end], [0, 0]], mode=mode)\n return padded_inputs", "def zero_padding( pattern, template ): \n #Calculate pad size \n pad = [ 0 ] * ( len( pattern ) - 1 )\n #Pad begining and end of temple -1 for first element\n template_padded = pad + list(template) + pad\n\n return template_padded", "def restore_padding(token):\n # Re-inflate the padding\n mod_returned = len(token) % 4\n if mod_returned:\n missing_padding = 4 - mod_returned\n token += '=' * missing_padding\n return token", "def pad_bbox(bbox, padding=0.3):\n dx = float(bbox[1]) - float(bbox[0])\n dy = float(bbox[3]) - float(bbox[2])\n\n # calculate the padding\n padding_x = padding * dx\n padding_y = padding * dy\n\n bbox[0] = float(bbox[0]) - padding_x\n bbox[1] = float(bbox[1]) + padding_x\n\n bbox[2] = float(bbox[2]) - padding_y\n bbox[3] = float(bbox[3]) + padding_y\n\n dx = float(bbox[1]) - float(bbox[0])\n dy = float(bbox[3]) - float(bbox[2])\n #print \"Layer ratio after padding is: %s\" % (dx/dy)\n\n\n return bbox", "def zero_padding(x, p, skip_dims=True):\n skip = [[0,0]]\n padding = [[p,p] for it in range(tf.rank(x)-2)] # skip first and last dims\n if skip_dims:\n padding = tf.concat([skip,padding,skip], axis=0)\n xp = tf.pad(x, padding, 'CONSTANT')\n return xp", "def pad(input, pad_size):\n if not pad_size:\n return input\n return tf.pad(input, [[0,0],[pad_size, pad_size],[pad_size, pad_size],[0,0]], 'REFLECT')", "def spatial_padding(self, lrs):\n n, t, c, h, w = lrs.size()\n\n pad_h = (4 - h % 4) % 4\n pad_w = (4 - w % 4) % 4\n\n # padding\n lrs = lrs.view(-1, c, h, w)\n lrs = F.pad(lrs, [0, pad_w, 0, pad_h], mode='reflect')\n\n return lrs.view(n, t, c, h + pad_h, w + pad_w)", "def pad(array, size = 1):\n\n return np.pad(array, size, 'edge')", "def _pad(payload):\n\t\tlength = AES.block_size - (len(payload) % AES.block_size)\n\t\tif length == AES.block_size:\n\t\t\treturn payload #no padding required\n\t\tpadding = chr(length)*length\n\t\treturn payload + padding" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
returns a normalized url to path relative from root
def relative_url(path, root): try: url = os.path.relpath(path, root) except: error('Unable to make a relative url:', url, root) url = url.replace('\\', '/') if os.sep == '\\' else url return urllib.parse.quote(url)
[ "def full_url(self, path):\n if path[0] == '/':\n path = path[1:]\n return urljoin(self.absolute_root, path)", "def normalize_url(path: str, page: Page | None = None, base: str = '') -> str:\n path, relative_level = _get_norm_url(path)\n if relative_level == -1:\n return path\n if page is not None:\n result = get_relative_url(path, page.url)\n if relative_level > 0:\n result = '../' * relative_level + result\n return result\n\n return posixpath.join(base, path)", "def _url(path: str) -> str:\n return urlparse.urljoin(API_ROOT, path)", "def _graceful_relative_url(base_url, url):\n if url == base_url:\n return ''\n base_prefix = '%s://%s' % urlparse.urlparse(base_url or '')[0:2]\n url_prefix = '%s://%s' % urlparse.urlparse(url or '')[0:2]\n if base_prefix == url_prefix and url_prefix != '://':\n return url[len(url_prefix):]\n return url", "def absolute_url(self, path):\n folder = self.folder and \"/\" + self.folder\n if not path.startswith(\"/\"):\n path = \"/\" + path\n return \"//\" + self.domain + folder + path", "def full_url(self):\r\n\r\n url = '/' + '/'.join(p.slug for p in list(self.get_ancestors()) + [self] if p.slug)\r\n\r\n # Make sure the URL ends with a slash, as god intended.\r\n # This little endswith dance is done to handle the root url ('/') correctly.\r\n if not url.endswith('/'):\r\n url = url + '/'\r\n\r\n return url", "def _absurl(fragment):\r\n root = settings.MEDIA_URL\r\n root += root[-1:] != '/' and '/' or ''\r\n return urlparse.urljoin(root, fragment)", "def make_url_relative(url_or_path):\n result = urlparse(url_or_path).path\n if not result:\n result = '/'\n return result", "def normalize_cdmi_url(self, path):\n # Turn URL path into OS path for manipulation\n mypath = url2pathname(path)\n if not os.path.isabs(mypath):\n mypath = os.path.join(url2pathname(self.pwd()), mypath)\n # normalize path\n mypath = os.path.normpath(mypath)\n if path.endswith(\"/\") and not mypath.endswith(\"/\"):\n mypath += \"/\"\n url = self.cdmi_url + pathname2url(mypath)\n return url", "def xrd_get_rooturl(self):\n if self.queryargs:\n return \"{0}/?{1}\".format(self.root_url, urlencode(self.queryargs))\n else:\n return self.root_url", "def relative_uri(base, to):\n if to.startswith(SEP):\n return to\n b2 = base.split(SEP)\n t2 = to.split(SEP)\n # remove common segments (except the last segment)\n for x, y in zip(b2[:-1], t2[:-1]):\n if x != y:\n break\n b2.pop(0)\n t2.pop(0)\n if b2 == t2:\n # Special case: relative_uri('f/index.html','f/index.html')\n # returns '', not 'index.html'\n return ''\n if len(b2) == 1 and t2 == ['']:\n # Special case: relative_uri('f/index.html','f/') should \n # return './', not ''\n return '.' + SEP\n return ('..' + SEP) * (len(b2)-1) + SEP.join(t2)", "def getBaseUrl(self, url):\n try:\n # A normal url will have two slashes in the protocol and a third after the base url.\n # http://www.google.com/foobar\n # Remove everything after the third slash.\n # http://www.google.com/\n third_slash = url.index(\n '/',\n url.index(\n '/',\n url.index('/') +\n 1) +\n 1)\n return url[:third_slash + 1]\n except ValueError:\n return url", "def relative_uri(base, to):\n if to.startswith(SEP):\n return to\n b2 = base.split(SEP)\n t2 = to.split(SEP)\n # remove common segments (except the last segment)\n for x, y in zip(b2[:-1], t2[:-1]):\n if x != y:\n break\n b2.pop(0)\n t2.pop(0)\n if b2 == t2:\n # Special case: relative_uri('f/index.html','f/index.html')\n # returns '', not 'index.html'\n return ''\n if len(b2) == 1 and t2 == ['']:\n # Special case: relative_uri('f/index.html','f/') should\n # return './', not ''\n return '.' + SEP\n return ('..' + SEP) * (len(b2)-1) + SEP.join(t2)", "def _reconstruct_relative_url(self, environ):\n url = urllib.quote(environ.get('SCRIPT_NAME', ''))\n url += urllib.quote(environ.get('PATH_INFO', ''))\n if environ.get('QUERY_STRING'):\n url += '?' + environ['QUERY_STRING']\n return url", "def base_url_path(self):\n path = urlsplit(self.base_url())[2]\n if path.endswith(\"/\"):\n path = path[:-1]\n return path", "def get_relative_url(current, target):\n rel = os.path.relpath(target, current)\n\n if rel[-1] != \"/\":\n if \".\" not in rel.split(\"/\")[-1]:\n rel += \"/\"\n\n if not rel.startswith(\"../\") and rel != \"./\":\n rel = f\"./{rel}\"\n\n return rel", "def buildpath(self):\n basepath = urlutil.href_settings.root + (self.relpath if self.relpath else cherrypy.request.path_info)\n if basepath.find('~') < 0:\n basepath += ('' if basepath.endswith('/') else '/') + '~'\n if cherrypy.request.query_string:\n basepath += ('&' if basepath.find('?') >= 0 else '?') + cherrypy.request.query_string\n return basepath", "def normalize_url(self, url):\r\n return url", "def get_path(self, normalize = False):\r\n\r\n split = self.path_s.split(\"?\", 1)\r\n path = split[0]\r\n if not normalize: return path\r\n if not path.startswith((\"http://\", \"https://\")): return path\r\n return netius.legacy.urlparse(path).path" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Generate Post objects from markdown. Date must be present in each post and posts must be ordrered by date.
def parse_markdown(filename): if not os.path.exists(filename): error('File not found', filename) posts = list() with open(filename, encoding='utf-8') as f: line = next(f) if line.startswith('# '): title = line[2:].strip() record = [] next(f) else: title = None record = [line] for line in f: if not line.startswith('___'): record.append(line) else: posts.append(Post.from_markdown(record)) record = [] # set rank of posts in date daterank = defaultdict(int) for post in posts: daterank[post.date] += 1 post.daterank = daterank[post.date] # check post order for post1, post2 in zip(posts[:-1], posts[1:]): if post1.date > post2.date: error('Posts are not ordered', f'{post1.date} > {post2.date}') return title, posts
[ "def markdown_post(post):\n post['entry'] = markdown(post['entry'].replace(\"\\n\",\" \\n\"), output=\"html5\")\n return post", "def parse_joint_markdown(md_str):\n list_posts = []\n # each element does not contain \\n at the end\n lines = md_str.split('\\n')\n # line index\n prev_end_sec_ind = -1\n sec_content_start = -1\n sec_title = None\n for i, l in enumerate(lines):\n if l.startswith('========'):\n prev_end_sec_ind = i-1\n if sec_title is not None:\n # add to the list \n assert sec_content_start >= 0\n sec_content = '\\n'.join(lines[sec_content_start:prev_end_sec_ind])\n list_posts.append((sec_title, sec_content))\n\n # beginning of a section\n sec_title = lines[i-1]\n sec_content_start = i+1\n # add the last post \n sec_content = '\\n'.join(lines[sec_content_start:])\n list_posts.append((sec_title, sec_content))\n return list_posts", "def parse_post_text(formatted_content):\n post = {}\n # Parse Mod comments and remove them from the text.\n potential_comments = re.finditer(\"\\[.+?\\]\", formatted_content, re.DOTALL)\n comments = []\n for comment_match in potential_comments:\n comment = comment_match.group()\n mod = re.search(r\"\\-\\s?Mod\\.\\s?(?P<mod>\\w+\\b)\", comment)\n if mod:\n comments.append({\n \"comment\" : comment,\n \"mod\" : mod.group(\"mod\")\n })\n post[\"modComments\"] = comments\n \n # Comments are removed from the post test so that\n # links, reports, etc. mentioned by mods are not extracted.\n no_comment_txt = formatted_content\n for comment in comments:\n no_comment_txt = no_comment_txt.replace(comment[\"comment\"], \"\")\n \n metadata, header_end = parse_post_metadata(no_comment_txt)\n post.update(metadata)\n \n sections = re.split(r\"^[\\*#]{3,}\\s*$\", no_comment_txt[header_end:], flags=re.M)\n articles = []\n \n # Some posts have articles which are parsed into multiple sections:\n # Ex: http://www.promedmail.org/direct.php?id=2194235\n # The section parsing code tries to recombine these by concatenating\n # unrecognized sections onto the previous sections if they form an article.\n # article_start_idx keeps track of the first section in the article.\n article_start_idx = None\n \n for idx, section in enumerate(sections):\n section = section.strip()\n article = parse_article_text(section, post_date=post['promedDate'])\n # Check if the section contains an actual article by seeing which\n # properties could be parsed.\n if article.get('source') or article.get('date'):\n articles.append(article)\n article_start_idx = idx\n else:\n # When a section cannot be parsed as an article the following code\n # tries to determine what it is. If the type cannot be determined\n # an error or warning is thrown.\n # These warnings can be used to find sections which are not being\n # correctly parsed.\n # Posts with known issues:\n # http://www.promedmail.org/direct.php?id=19990512.0773\n if re.search(r\"Visit ProMED-mail\\'s web site at|\"\n r\"Please support (the \\d{4}\\s)?ProMED\\-mail|\"\n r\"Donate to ProMED\\-mail. Details available at|\"\n r\"ProMED\\-mail makes every effort to verify the reports|\"\n r\"PROMED\\-MAIL FREQUENTLY ASKED QUESTIONS|\"\n r\"Become a ProMED\\-mail Premium Subscriber|\"\n r\"A ProMED\\-mail post\",\n section, re.I):\n # boilerplate promed notice section\n pass\n elif re.search(r\"In this (update|post(ing)?)\", section):\n # table of contents section\n pass\n elif re.search(r\"Cases in various countries\", section):\n # This type of post typically has links to several articles\n # with single sentence summaries.\n # Ex: http://www.promedmail.org/direct.php?id=20131125.2073661\n pass\n elif section == \"\":\n # empty section\n pass\n elif idx == 0 and section.count(\"\\n\") < 2:\n # probably the article title\n pass\n else:\n if article_start_idx != None:\n article = parse_article_text(\n \"\\n#####\\n\".join(\n sections[article_start_idx:idx]).strip(),\n post_date=post['promedDate'])\n assert article.get('source') or article.get('date')\n articles[-1] = article\n continue\n else:\n print \"Unexpected Section (%s):\" % post['archiveNumber'], [section[0:50] + \"...\"]\n article_start_idx = None\n post['articles'] = articles\n return post", "def read_all_posts():\n posts = OrderedDict()\n filenames = glob('%s/*.md' % settings.TINYBLOG_ROOT_DIR)\n for filename in sorted(filenames, reverse=True):\n # [:-3] chops off the '.md' suffix\n slug = os.path.relpath(filename, settings.TINYBLOG_ROOT_DIR)[:-3]\n with open(filename) as f:\n markdown_content = f.read()\n posts[slug] = Post(slug, markdown_content)\n\n return posts", "def from_file(cls, filename):\n\t\twith open(os.path.join(POST_PATH, filename), 'r', encoding='utf-8') as f:\n\t\t\ttext = ''.join(f.readlines())\n\t\t\tpost = frontmatter.loads(text)\n\t\t\tpost_dict = post.to_dict()\n\t\t\t# Stuff before the \"<!--more-->\" is the preview\n\t\t\tpreview_cutoff = post.content.find('<!--more-->')\n\t\t\tif preview_cutoff != -1:\n\t\t\t\tpost_dict['preview'] = post.content[:preview_cutoff]\n\t\t\telse:\n\t\t\t\t# if the post is short enough, it won't have a preview\n\t\t\t\t# so just paste the entire post there.\n\t\t\t\tpost_dict['preview'] = post.content\n\t\t\t\n\t\t\t# Find the first image on the page\n\t\t\tm = re.search('!\\[.*?\\]\\((.*?)\\)', post.content)\n\t\t\tif m is not None:\n\t\t\t\tfeatured_image_url = 'https://sha.wn.zone' + m.group(1)\n\t\t\telse:\n\t\t\t\tfeatured_image_url = ''\n\t\t\t\n\t\t\t# We'll use the first few sentences of the first <p> tag\n\t\t\t# in the rendered markdown as the description for OG and JSON-LD\n\t\t\tmarkup = markdown.markdown(post_dict['preview'], extensions=[\n\t\t\t\t'markdown.extensions.tables',\n\t\t\t\t'markdown.extensions.codehilite',\n\t\t\t\t'markdown.extensions.footnotes',\n\t\t\t])\n\n\t\t\tp = pq(markup)\n\t\t\t# Images are put in <p>s, so if one has an image in it, ignore it\n\t\t\teligible_paras = p('p').filter(lambda i: len(pq(this).find('img')) == 0)\n\t\t\tdescription = eligible_paras.eq(0).text()\n\n\t\t\treturn cls(\n\t\t\t\tdate=post_dict['date'],\n\t\t\t\tfilename=filename,\n\t\t\t\ttitle=post_dict['title'],\n\t\t\t\tpreview=post_dict['preview'],\n\t\t\t\tcontent=post_dict['content'],\n\t\t\t\turl_name='-'.join(os.path.splitext(filename)[0].split('-')[3:]),\n\t\t\t\tfeatured_image=featured_image_url,\n\t\t\t\tdescription=description,\n\t\t\t)", "def parse_post_text(self):\n\n text_converter = self.get_text_converter()\n text = text_converter(self.text)\n\n #todo, add markdown parser call conditional on\n #self.use_markdown flag\n post_html = text\n mentioned_authors = list()\n removed_mentions = list()\n if '@' in text:\n op = self.get_origin_post()\n anticipated_authors = op.get_author_list(\n include_comments = True,\n recursive = True\n )\n\n extra_name_seeds = markup.extract_mentioned_name_seeds(text)\n\n extra_authors = set()\n for name_seed in extra_name_seeds:\n extra_authors.update(\n User.objects.filter(username__istartswith = name_seed)\n )\n\n #it is important to preserve order here so that authors of post\n #get mentioned first\n anticipated_authors += list(extra_authors)\n\n mentioned_authors, post_html = markup.mentionize_text(\n text,\n anticipated_authors\n )\n\n #find mentions that were removed and identify any previously\n #entered mentions so that we can send alerts on only new ones\n from askbot.models.user import Activity\n if self.pk is not None:\n #only look for previous mentions if post was already saved before\n prev_mention_qs = Activity.objects.get_mentions(\n mentioned_in = self\n )\n new_set = set(mentioned_authors)\n for prev_mention in prev_mention_qs:\n\n user = prev_mention.get_mentioned_user()\n if user is None:\n continue\n if user in new_set:\n #don't report mention twice\n new_set.remove(user)\n else:\n removed_mentions.append(prev_mention)\n mentioned_authors = list(new_set)\n\n data = {\n 'html': post_html,\n 'newly_mentioned_users': mentioned_authors,\n 'removed_mentions': removed_mentions,\n }\n return data", "def make_post_md(label):\n\n # Get website database object\n wdb = WDB()\n\n # Create the markdown file with yml front matter\n with open(os.path.join(wdb.post_path, '2017-03-15-' + label + '.md'), 'w') as post_file:\n post_file.write('---\\n')\n post_file.write('title: \\\"' + label + '\\\"\\n')\n post_file.write('date: 2017/03/15\\n')\n post_file.write('layout: erp\\n')\n post_file.write('---')", "def post_markdown(self, md):\n logger.debug(\"마크다운 파일을 포스팅합니다.\")\n\n post = self.markdown_to_post(md)\n post_id = post.get(\"id\") or post.get(\"postId\")\n founded = self.find_post(slogan=post.get(\"slogan\"))\n if post_id or founded:\n if founded:\n logger.info(\"동일한 포스팅 발견\")\n logger.info(\" \" * 2 + \"- id: %s\" % founded.get(\"id\"))\n logger.info(\" \" * 2 + \"- title: %s\" % founded.get(\"title\"))\n if not post_id:\n post_id = founded.get(\"id\")\n logger.info(\"포스팅 업데이트 중...\")\n return self.post_modify(post_id, post)\n return self.post_write(post)", "def parse_markdown(md_file):\n\n f = open(md_file)\n md = f.read()\n f.close()\n\n md_parser = mistune.Markdown()\n\n html = md_parser(md)\n\n img_pattern = re.compile(\"<img \")\n html = img_pattern.sub(\"<img class=\\\"img-fluid\\\"\",html)\n\n html = re.sub(\"<ul>\",\"<ul class=\\\"list-group\\\">\",html)\n html = re.sub(\"<li>\",\"<li class=\\\"list-group-item\\\">\",html)\n\n dash_pattern = re.compile(\"--\")\n html = dash_pattern.sub(\"&mdash;\",html)\n\n h1_pattern = re.compile(\"<h1>.*?</h1>\")\n h1_matches = h1_pattern.findall(html)\n if h1_matches:\n for m in h1_matches:\n inner = m.split(\">\")[1].split(\"<\")[0]\n inner = inner.lower()\n inner = re.sub(\" \",\"-\",inner)\n\n new_header = f\"<h1 id=\\\"{inner}\\\">{m[4:]}\"\n\n html = re.sub(m,new_header,html)\n\n hr_pattern = re.compile(\"\\<hr\\>\")\n\n out = []\n out.append(\"<br/><br/>\")\n for section in hr_pattern.split(html):\n out.append(\"<div class=\\\"container bg-light rounded mx-auto\\\">\")\n out.append(\"<div class=\\\"m-3 p-3\\\">\")\n out.append(section)\n out.append(\"</div></div><br/>\")\n\n\n return \"\".join(out)", "def markdown_parse(text):\r\n text = md_parser.reset().convert(text)\r\n return text", "def blog_add_posts(context):\n tag_expr = re.compile(\"<.*?>\")\n posts = []\n # posts from the file system\n if context[\"blog\"][\"posts_path\"]:\n posts_path = os.path.join(\n context[\"source_path\"], *context[\"blog\"][\"posts_path\"].split(\"/\")\n )\n for fname in os.listdir(posts_path):\n if fname.startswith(\"index.\"):\n continue\n link = (\n f\"/{context['blog']['posts_path']}\"\n f\"/{os.path.splitext(fname)[0]}.html\"\n )\n md = markdown.Markdown(\n extensions=context[\"main\"][\"markdown_extensions\"]\n )\n with open(os.path.join(posts_path, fname), encoding=\"utf-8\") as f:\n html = md.convert(f.read())\n title = md.Meta[\"title\"][0]\n summary = re.sub(tag_expr, \"\", html)\n try:\n body_position = summary.index(title) + len(title)\n except ValueError:\n raise ValueError(\n f'Blog post \"{fname}\" should have a markdown header '\n f'corresponding to its \"Title\" element \"{title}\"'\n )\n summary = \" \".join(summary[body_position:].split(\" \")[:30])\n posts.append(\n {\n \"title\": title,\n \"author\": context[\"blog\"][\"author\"],\n \"published\": datetime.datetime.strptime(\n md.Meta[\"date\"][0], \"%Y-%m-%d\"\n ),\n \"feed\": context[\"blog\"][\"feed_name\"],\n \"link\": link,\n \"description\": summary,\n \"summary\": summary,\n }\n )\n # posts from rss feeds\n for feed_url in context[\"blog\"][\"feed\"]:\n feed_data = feedparser.parse(feed_url)\n for entry in feed_data.entries:\n published = datetime.datetime.fromtimestamp(\n time.mktime(entry.published_parsed)\n )\n summary = re.sub(tag_expr, \"\", entry.summary)\n posts.append(\n {\n \"title\": entry.title,\n \"author\": entry.author,\n \"published\": published,\n \"feed\": feed_data[\"feed\"][\"title\"],\n \"link\": entry.link,\n \"description\": entry.description,\n \"summary\": summary,\n }\n )\n posts.sort(key=operator.itemgetter(\"published\"), reverse=True)\n context[\"blog\"][\"posts\"] = posts[: context[\"blog\"][\"num_posts\"]]\n return context", "def rebuild_from_yaml(args):\n\n git_checkout_branch('gh-pages')\n\n posts = []\n for fname in glob('_posts/*.html'):\n with codecs.open(fname, 'r', 'utf-8') as f:\n c = f.read()\n # we only want the yaml frontmatter\n start = c.index('---') + 3\n end = c.rindex('---')\n frontmatter = yaml.safe_load(c[start:end])\n\n posts.append(Post(**frontmatter['api_data']['post']))\n\n _write_out(posts, yaml=False, supporting=True)", "def parse_article_text(article_text, post_date=datetime.datetime.now()):\n result = {}\n\n metadata_start = 0\n main_content_start = 0\n main_content_end = len(article_text)\n \n article_date_match = re.search(r\"^Date:\\s(?P<date>[^\\(\\[\\n]+)\", article_text, re.M)\n if article_date_match:\n # There may be more than one source date in summary articles.\n # Example: http://promedmail.org/direct.php?id=1073176\n # Summary articles are not a focus so currently only the first date\n # is recorded.\n source_date = parse_datetime(\n article_date_match.group(\"date\")\n )\n\n if source_date:\n result[\"date\"] = datetime_to_utc(source_date)\n metadata_start = min(article_date_match.start(), metadata_start)\n main_content_start = max(article_date_match.end(), main_content_start)\n # The year is checked to avoid typos like 200_ that throw\n # the date off by a large factor.\n # Example: http://www.promedmail.org/direct.php?id=45850 (article 2)\n if result[\"date\"].year < 1900:\n result[\"date\"] = None\n # Some articles have timestamps that are incorrectly parsed.\n # Current examples:\n # http://www.promedmail.org/direct.php?id=43918\n # http://www.promedmail.org/direct.php?id=2200173\n # Some of these incorrect timestamps can be removed by verifying that\n # they preceed the time of the posting. A day of slop time is allowed\n # to account for variations due to incorrect timezones.\n elif result[\"date\"] > post_date + datetime.timedelta(1):\n result[\"date\"] = None\n else:\n result[\"date\"] = None\n \n source_match = re.search(r\"Source:\\s(?P<name>[^\\[\\n]+)\" +\\\n r\"(\\s(?P<edits>\\[.*))?\" +\\\n r\"\\n\" +\\\n r\"(?P<url>http.+)?\", article_text)\n if source_match:\n result[\"source\"] = source_match.groupdict()\n metadata_start = min(source_match.start(), metadata_start)\n main_content_start = max(source_match.end(), main_content_start)\n \n heading_match = re.search(r\"^(?P<idx>\\[\\d\\]\\s)?\" +\\\n r\"(?P<heading>\\S+.*)\\n\",\n article_text[0:metadata_start], re.M)\n if heading_match:\n result[\"heading\"] = heading_match.group(\"heading\")\n \n communicated_match = re.search(communicated_by_regex, article_text, re.M)\n if communicated_match:\n result[\"communicatedBy\"] = communicated_match.group(\"communicated_by\")\n main_content_end = min(communicated_match.start(), main_content_end)\n \n result[\"content\"] = article_text[main_content_start:main_content_end].strip()\n return result", "def parse_post_metadata(post_text):\n result = {}\n \n header_end = 0\n \n promed_date_match = re.search(\n r\"Published Date:\\s(?P<date>.*)\", post_text)\n result[\"promedDate\"] = parse_promed_pub_datetime(\n promed_date_match.group(\"date\"))\n \n archive_match = re.search(r\"Archive Number: (?P<num>.*)\", post_text)\n result[\"archiveNumber\"] = archive_match.group(\"num\")\n header_end = archive_match.end()\n \n subject = re.search(r\"Subject:\\s(?P<subject>.*)\", post_text).group(\"subject\")\n result[\"subject\"] = parse_subject_line(subject)\n result[\"subject\"][\"raw\"] = subject\n \n # This will not find all linked reports.\n # Some older posts refrence posts using different indexes I do not know\n # how to interpret.\n # Example: http://promedmail.org/direct.php?id=2194235\n result[\"linkedReports\"] = [\n report_id for report_id in re.findall(r\"\\d{8}\\.\\d+\", post_text)]\n \n # Most links will be article source urls or links to promed.\n result[\"links\"] = list(set(\n re.findall(r\"http\\S+[^(\\.\\])(\\.\\)>\\s]\", post_text)))\n result[\"links\"].sort()\n \n communicated_match = re.search(communicated_by_regex, post_text, re.M)\n if communicated_match:\n result[\"communicatedBy\"] = communicated_match.group(\"communicated_by\")\n return result, header_end", "def get_post_data(self, soup):\n texts_raw = soup.find_all('div', class_=\"post\")\n for t in texts_raw:\n quotes = t.find_all('div', class_=\"quote\")\n for q in quotes:\n q.decompose()\n q_header = t.find_all('div', class_=\"quoteheader\")\n for qh in q_header:\n qh.decompose()\n\n sigs = soup.find_all('div', class_='signature')\n for s in sigs:\n s.decompose()\n\n dates_raw = soup.find_all('div', class_=\"smalltext\")\n\n dates = []\n for date in dates_raw:\n date = date.get_text()\n if any(substring in date for substring in date_word_list) \\\n and len(date) < 32:\n date = convert_date_to_unix_time(date)\n dates.append(date)\n\n texts = []\n for text in texts_raw:\n text = text.get_text().encode('utf-8')\n if not text.isdigit():\n texts.append(text)\n\n return dates, texts", "def find_posts(self):\n\n self.clear()\n\n for path in self.app.jinja_env.list_templates(filter_func=lambda t: t.startswith('posts/') and t.endswith('.html')):\n template = self.app.jinja_env.get_template(path)\n\n filename = path[6:-5]\n slug = filename[7:]\n date_fragment = filename[0:6]\n published_on = datetime.strptime(date_fragment, '%y%m%d').date()\n\n self.append(Post(title=template.module.title, slug=slug, published_on=published_on, path=path))\n\n self.sort(key=lambda post: post.published_on, reverse=True)", "def _parse_markdown(self):\n renderer = MyRenderer()\n md = mistune.Markdown(renderer=renderer)\n md.render(self._markdown_text)\n self._bash_commands = renderer._bash_commands", "def _populate_posts(self, channel, url):\n import feedparser\n\n Post = get_model('articles', 'Post')\n Image = get_model('images', 'Image')\n\n parser = feedparser.parse(url)\n\n for entry in parser['entries']:\n # Some entries are incomplete and have only the title, need to\n # ignore these entries.\n if not entry.get('summary'):\n continue\n\n # The title may have only 140 characters\n title = self._truncate_string(entry['title'], 140)\n slug = slugify(title)\n headline = entry['summary']\n\n # Some entries do not have the 'content' field, in this case we\n # get the 'summary' field instead.\n if entry.get('content'):\n content = entry['content'][0]['value']\n else:\n content = entry['summary']\n\n # When we find a entry that already is registered we don't need\n # continue because the following registries already be registered.\n exists = Post.objects.filter(slug=slug).count()\n if exists:\n break\n\n # Check if has some image in the post content.\n # NOTE: For the best user experience we use only the posts that\n # have images.\n image_url = self._get_image_url_in_content(content)\n if image_url:\n main_image = Image.objects.create(\n title=title,\n slug=slug,\n archive_link=image_url,\n published=True,\n user=self._user\n )\n # Generate the 'short_title' based on 'content'\n short_title = re.sub('<[^<]+?>', '', content).encode('utf-8')\n short_title = self._truncate_string(short_title.strip(), 140)\n\n post = Post.objects.create(\n title=title,\n short_title=short_title,\n slug=slug,\n headline=headline,\n content=content,\n channel=channel,\n main_image=main_image,\n show_on_root_channel=True,\n published=True,\n hat='',\n user=self._user\n )", "def post_content(context, is_markdown=False, *args, **kwargs):\n obj = context['object']\n content = obj.parse_content()\n\n if not content:\n content = obj.content_rendered\n\n default_template = [\"djblog/includes/post_content.html\"]\n\n if obj.custom_template:\n tpl = Template(obj.custom_template)\n\n #elif obj.template_name:\n # default_template.append(obj.template_name)\n # tpl = loader.select_template(default_template)\n\n else:\n tpl = loader.select_template(default_template)\n\n custom_context = Context({\n 'content': mark_safe(markdown(content))\n })\n\n custom_context.update(context)\n\n return mark_safe(tpl.render(custom_context))" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Purge root dir from irrelevant html files
def purge_htmlfiles(args, posts): htmlist = list_of_htmlfiles(args, posts) html_to_remove = list() for fullname in glob.glob(os.path.join(args.root, '*.htm*')): if fullname not in htmlist: html_to_remove.append(fullname) if len(html_to_remove) > args.thumbnails.threshold_htmlfiles: inpt = 'x' while inpt not in 'yn': inpt = input(f'{len(html_to_remove)} html files to remove. Continue [y|n]? ').lower() if inpt == 'n': return for name in html_to_remove: print('Removing html files', name) os.remove(name)
[ "def clean(self) -> None:\n dist = self.root / \"dist\"\n shutil.rmtree(dist, ignore_errors=True)", "def cleanup():\n assert root_dir.remove(False)\n assert not root_dir.exists()", "def html_clean(options):\r\n remake_directories(options.sphinx.doctrees, options.html.outdir)\r\n html(options)\r\n return", "def clean():\n local(\"rm -rf holbertonwebapp.tar.gz\")", "def cleanup():\n\n if os.path.isdir(GEN_DIR):\n shutil.rmtree(GEN_DIR)", "def clean():\n for dirpath, dirnames, filenames in os.walk('.'):\n for filename in filenames:\n if filename.endswith('.pyc') or filename.endswith('.pyo'):\n full_pathname = os.path.join(dirpath, filename)\n click.echo('Removing {}'.format(full_pathname))\n os.remove(full_pathname)", "def clean(self):\n if os.path.exists(self.paths['build_dir']):\n shutil.rmtree(self.paths['build_dir'])\n if os.path.exists(os.path.join(self.base_dir, 'docs')):\n shutil.rmtree(os.path.join(self.base_dir, 'docs'))", "def purge_cache():\n for (dir_path, dir_names, file_names) in os.walk(CACHE, topdown=False):\n for file_name in file_names:\n if is_json_file(file_name):\n path = os.path.join(dir_path, file_name)\n print(\"Removing file “%s”\" % path)\n os.remove(path)\n for directory in dir_names:\n path = os.path.join(dir_path, directory)\n if not os.listdir(path):\n print(\"Removing directory “%s”\" % path)\n os.rmdir(path)", "def cleanup_intermediate_files():\n\n dirs = (DIR_PAGE, DIR_SRGB, DIR_VTI, DIR_TIFF, DIR_BACK, DIR_TEXT)\n map(lambda dir: shutil.rmtree(os.path.join(cwd, dir)) , dirs)", "def remove_ci_ga_files():\n os.rmdir(os.path.join(PROJECT_DIRECTORY, \".github\"))", "def clean_wiki_clone(dungeon_root):\n root = os.path.join(dungeon_root, 'dungeon.wiki')\n for filename in os.listdir(root):\n if os.path.isfile(os.path.join(root, filename)) and should_be_deleted(filename):\n os.remove(os.path.join(root, filename))", "def clean():\n print(\"=== Cleaning Sphinx Build ===\")\n _remove_dir(SASVIEW_DOC_TARGET)\n _remove_dir(SPHINX_BUILD)\n _remove_dir(SPHINX_SOURCE)", "def clean():\n print(\"\\nCleaning the site from {}\\n\".format(_site_dest))\n rm(_site_dest)", "def _clean_bins():\n rmtree(LIBS_DIR)\n rmtree(BINS_DIR)\n rmtree(HEADERS_DIR)", "def cleanup_files(self):\n\t\t#todo: this needs python 3.5 for the ** thing\n\t\t#todo: cleaned files may have had another expiry time set when created (probably acceptable though, it's just cache)\n\t\tfiles = iglob(join(self.file_dir, '**'), recursive=True)\n\t\tfor file in files:\n\t\t\tif isfile(file) and time() - getmtime(file) > self.file_expiration_time:\n\t\t\t\tremove(file)", "def _clean(light):\n if os.path.isdir('Trash'):\n print()\n shutil.rmtree('Trash')\n print('Removing Trash directory')\n removed = []\n\n trash_files = ['_doconce_debugging.log', '__tmp.do.txt', 'texput.log']\n # \"secret\" files (.trash$hash)\n trash_files += glob.glob('.trash[a-f0-9]*') + glob.glob('._.trash[a-f0-9]*')\n for trash_file in trash_files:\n if os.path.isfile(trash_file):\n removed.append(trash_file)\n\n doconce_files = glob.glob('*.do.txt')\n for dof in doconce_files:\n namestem = dof[:-7]\n generated_files = glob.glob(namestem + '.*')\n extensions_to_keep = ['.do.txt', '.sh', '.py', '*.pl']\n if light:\n extensions_to_keep += ['.pdf', '.html', '.txt', '.gwiki', '.mwiki', '.cwiki', '.ipynb', '.m']\n for ext in extensions_to_keep:\n filename = namestem + ext\n if os.path.isfile(filename):\n if filename in generated_files:\n generated_files.remove(filename)\n for f in generated_files:\n removed.append(f)\n if not light:\n ipynb_tarfile = 'ipynb-%s-src.tar.gz' % namestem\n if os.path.isfile(ipynb_tarfile):\n removed.append(ipynb_tarfile)\n\n removed.extend(\n glob.glob('*~') + glob.glob('.*~') + glob.glob('tmp*') +\n glob.glob('.*.exerinfo') +\n glob.glob('.*.quiz*') +\n glob.glob('.*_html_file_collection') +\n glob.glob('.*.copyright') +\n glob.glob('automake_sphinx.*')\n )\n if not light:\n removed.extend(\n glob.glob(_part_filename_wildcard + '.html') +\n glob.glob(_part_filename_wildcard + '.rst')\n )\n\n directories = ['html_images', 'latex_figs', 'standalone_exercises']\n directories += glob.glob('_minted-*')\n if not light:\n directories += glob.glob('sphinx-*') + glob.glob('sphinx_*')\n\n for d in directories:\n if os.path.isdir(d):\n removed.append(d)\n\n if removed:\n print('Remove: ' + ' '.join(removed) + ' (-> Trash)')\n os.mkdir('Trash')\n for f in removed:\n try:\n shutil.move(f, 'Trash')\n except shutil.Error as e:\n if 'already exists' in str(e):\n pass\n else:\n print('Move problems with %s %s' % (f, e))\n if os.path.isdir(f):\n shutil.rmtree(f)", "def clean_build(c):\n c.run(\"rm -fr build/\")\n c.run(\"rm -fr dist/\")\n c.run(\"rm -fr xmlstarlet/config.h \" \"xmlstarlet/Makefile \" \"xmlstarlet/config.status\")\n c.run(\"rm -fr .eggs/\")\n c.run(\"find . -name '*.egg-info' -exec rm -fr {} +\")\n c.run(\"find . -name '*.egg' -exec rm -f {} +\")", "def clear():\n \n global BUILDDIR\n\n for i in os.listdir(BUILDDIR):\n if i in [ \".svn\",\n \"build\",\n \"doc\",\n \"source\",\n \"tools\",\n \"CMakeLists.txt\",\n \"examples\",\n \"INSTALL\",\n \"resources\"]:\n continue\n name = os.path.join(BUILDDIR,i)\n if os.path.isdir(name):\n print \"Removing directory: \",name\n shutil.rmtree(name)\n if os.path.isfile(name):\n os.remove(name)", "def clean(self) -> None:\n # remove all *.py and *.pyi files in the folder\n for wc in [\"*.py\", \"*.pyi\", \"modules.json\"]:\n for f in (self.package_path).rglob(wc):\n f.unlink()" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Purge thumbnail dir from irrelevant thumbnails
def purge_thumbnails(args, thumbdir, posts, diary=False): thumblist = list_of_thumbnails(posts, diary) thumbs_to_remove = list() for fullname in glob.glob(os.path.join(thumbdir, '*.jpg')): if os.path.basename(fullname) not in thumblist: thumbs_to_remove.append(fullname) if len(thumbs_to_remove) > args.thumbnails.threshold_thumbs: inpt = 'x' while inpt not in 'yn': inpt = input(f'{len(thumbs_to_remove)} thumbnails to remove. Continue [y|n]? ').lower() if inpt == 'n': return for name in thumbs_to_remove: print('Removing thumbnail', name) os.remove(name) info_fullname = os.path.splitext(name)[0] + '.info' if os.path.exists(info_fullname): os.remove(info_fullname)
[ "def clear_thumbnails(self):", "def delete_thumbs_tmp():\n all_nodes = nuke.allNodes(\"Read\")\n for read in all_nodes:\n file_path = read[\"file\"].value()\n last = os.path.basename(file_path)\n if last == \"Thumbs.db\" or last[-4:] == \".tmp\":\n nuke.delete(read)", "def clear_thumbnails() -> None:\n global thumbnails_list\n\n for thumbnail in thumbnails_list:\n thumbnail.destroy()\n \n thumbnails_list = []", "def delete_thumbnail(self, thumbnail_name):", "def delete_gallery(sender, instance, *args, **kwargs):\n if instance.thumbnail:\n instance.thumbnail.delete(save=False)", "def delete_thumbnails(relative_source_path, root=None, basedir=None,\r\n subdir=None, prefix=None):\r\n thumbs = thumbnails_for_file(relative_source_path, root, basedir, subdir,\r\n prefix)\r\n return _delete_using_thumbs_list(thumbs)", "def destroy_temp_file(self) -> None:\n self.thumbnail.destroy()", "def _clean_up_temporary_files(dataset_dir):\n filename = _DATA_URL.split('/')[-1]\n filepath = os.path.join(dataset_dir, filename)\n tf.io.gfile.remove(filepath)\n\n tmp_dir = os.path.join(dataset_dir, 'flower_photos')\n tf.io.gfile.rmtree(tmp_dir)", "def cleanup():\n\n if os.path.isdir(GEN_DIR):\n shutil.rmtree(GEN_DIR)", "def clear_thumbnail(self):\n from anima.ui import utils\n utils.clear_thumbnail(self.thumbnail_graphics_view)", "def cleanup(uri):\n if os.path.isdir(uri):\n shutil.rmtree(uri)\n else:\n os.remove(uri)", "def delete_all_files_in_image():\n \n [os.remove(file) for file in glob.glob(os.path.join(os.getcwd(),\"src/static/images/\",\"*.png\"))]", "def delete_all_previous_result_images():\n for filename in os.listdir(image_result_folder):\n file_path = os.path.join(image_result_folder, filename)\n try:\n if os.path.isfile(file_path) or os.path.islink(file_path):\n # delete the file\n os.unlink(file_path)\n elif os.path.isdir(file_path):\n # delete the folder\n shutil.rmtree(file_path)\n except Exception as e:\n print('Failed to delete &s. Reason: %s' % (file_path, e))\n\n print('-------------- Empty the result folder --------------')\n print()", "def clearImageFolder():\n filelist = listImageFolder()\n for f in filelist:\n os.remove('{}/{}'.format(imageFolder, f))", "def delete_screenshots(self, model):\n try:\n os.remove(\"uploads/{}.png\".format(model.screenshot))\n os.remove(\"uploads/small_{}.png\".format(model.screenshot))\n except:\n pass\n\n self.delete_from_s3(\"{}.png\".format(model.screenshot))\n self.delete_from_s3(\"small_{}.png\".format(model.screenshot))", "def delete_tiff_files(self):\n for file in self.tiff_files:\n os.remove('../raw_data/' + self.uuid + '/' + file)", "def delete_pic_in_outer_folder(self):\n for file in os.listdir():\n if file.endswith(\".jpg\"):\n os.unlink(file)", "def cleanup(self):\n self.qemu.clean_run_files()\n for tmp in glob.glob(self.configfile + \"?*\"):\n os.unlink(tmp)", "def remove_tmp_dir(self):" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Return the list of full paths for pictures and movies in source directory plus subdirectories containing media
def list_of_medias_ext(args, sourcedir): result = list() listdir = sorted_listdir(os.listdir(sourcedir)) if '.nomedia' not in listdir: for basename in listdir: fullname = os.path.join(sourcedir, basename) if os.path.isdir(fullname) and basename != '$RECYCLE.BIN' and contains_media(args, fullname): result.append(fullname) else: if is_media_within_dates(fullname, args.dates): result.append(fullname) return result
[ "def get_file_paths(source):\n print(\"Gathering files please wait....\")\n file_endings = [\"mp4\", \"avi\", \"mpg\", \"wmv\", \"mov\", \"mkv\"]\n files = [file for file in source.glob(\"**/*\")\n if file.is_file() and any([file.name.endswith(ending) for ending in file_endings])]\n return files", "def get_media_file_paths(base_project_folder, ignored_folder_names=['BL_proxy']):\n files = []\n FILE_EXTENSIONS = list(VIDEO_EXT + IMG_EXT)\n for dirpath, dirnames, filenames in os.walk(project_folder):\n for name in ignored_folder_names:\n if name in dirpath:\n continue\n\n for f in filenames:\n file_path = os.path.join(dirpath, f)\n extension = os.path.splitext(file_path)[1]\n if extension.lower() in FILE_EXTENSIONS:\n files.append(os.path.join(dirpath, f))\n return files", "def __return_movie_file_list(self, movie_path):\n movie_dir = movie_path.rsplit(\"/\",1)[0]\n movie_file_list =[]\n movie_extentionds = self.__movie_file_extensions(self.__file_extentions)\n for x in os.listdir(movie_dir):\n if x.rsplit(\".\",1)[-1]in movie_extentionds:\n movie_file_list.append(movie_dir+\"/\"+x)\t\t\n\t#USUNAC URL Z NAPISY24\n return movie_file_list", "def get_file_paths():\n audio_folder = MY_DIR + '/assets'\n\n audio_addy_list = []\n for file in os.scandir(audio_folder):\n audio_addy_list.append(file.path)\n\n return audio_addy_list", "def get_source_paths(root_path, options):\n results = []\n for root, dirs, files in os.walk(root_path):\n for afile in files: # for files at this level\n if (isa_desired_file(afile)): # if a file is to be uploaded\n results.append(os.path.join(root, afile)) # local source path\n return results", "def _source_images(self):\n return self.source_dir.glob(f'*.{self.FROM_FORMAT}')", "def walk_source(source_dir):\n for path in (os.path.join(root, f) for root, _, files in os.walk(source_dir) for f in files):\n if os.path.splitext(path)[1].lower() == '.mp3':\n yield path", "def get_scenes_dirs(self):\n glob_expression = join(self.dir_data, '*')\n relative_paths = [relpath(p, self.dir_data) for p in sorted(glob.glob(glob_expression))]\n return relative_paths", "def find_photos(source_path, common_extensions=('JPG', 'CR2', 'ORF', 'ARW', 'TIFF', 'DNG'), ignore=[]):\n # combinedignored = re.compile('|'.join('(?:{0})'.format(x) for x in ignore))\n # use endswith , ignore must be a tuple then\n # if ignore and dirpath.endswith(ignore):\n # for duplication, at the end cll the same funciton\n\n source_files = list()\n\n for (dirpath, dirnames, filenames) in os.walk(source_path):\n for f in filenames:\n if f.upper().endswith(common_extensions):\n # source_files.append(os.path.join(dirpath, f))\n parent = os.path.basename(os.path.normpath(dirpath))\n source_files.append({'dir':dirpath,\n 'filename':f,\n 'parent_folder':parent})\n\n return source_files", "def get_movies(path):\n movies_list = []\n \n \n for f in os.listdir(path):\n \n full_file_path = join(path,f)\n if isdir(full_file_path):\n \n movies_list.extend( get_movies(full_file_path) )\n \n elif isfile(full_file_path) and full_file_path[-3:] in util.extension:\n m = Movie(f, full_file_path)\n movies_list.append(m)\n \n return movies_list", "def get_all_paths(why = 'train'):\r\n if why == 'train':\r\n parent_folder = train_parent_folder\r\n if why == 'test':\r\n parent_folder = test_test_folder\r\n sub_folders = glob.glob(parent_folder) # Directories of all languages\r\n image_paths = [glob.glob(sub_folder + '\\*') for sub_folder in sub_folders] # Directories of all characters\r\n image_paths = sum(image_paths, []) # Flatten out the 2D list to a 1D list \r\n return image_paths", "def get_source_paths(self):\n source_keys = ['s_sources', 'c_sources', 'cpp_sources', 'hex_files',\n 'objects', 'libraries']\n source_files = []\n for key in source_keys:\n source_files.extend(getattr(self.resources, key))\n return list(set([os.path.dirname(src) for src in source_files]))", "def list_media_dirs(media_root):\n media_root = os.path.abspath(media_root);\n all_dirs = [ dirs for dirs in os.listdir(media_root)\n if os.path.isdir(os.path.join(media_root, dirs)) ]\n return sorted(filter(media_filter, all_dirs))", "def _build_local_file_list(source):\n if isfile(source):\n return dirname(source), ['/'+basename(source)]\n else:\n f = []\n root = dirname(source)\n for (current_folder, sub_folders, sub_files) in walk(root):\n if len(sub_files) > 0:\n remote_path = current_folder.replace(root,'').replace('\\\\', '/') # We remove any '\\' (Windows env)\n f.extend(list(map(lambda sub_file: remote_path+'/'+sub_file, sub_files)))\n return root, f", "def get_video_file_paths(input_dir):\n\n # Get all files in folders and sub-folders\n # files = get_all_files_in_dir(input_dir)\n\n VIDEO_FILE_TYPES = (\n \".avi\",\n \".mp4\",\n \".mkv\",\n \".webm\",\n \".mpeg\",\n \".ogg\",\n \".m4v\",\n \".wmv\",\n \".mov\",\n \".flv\",\n )\n\n video_files = []\n\n entries = input_dir.iterdir()\n # entries = sorted(entries, key=lambda entry: entry.is_file())\n\n for f in entries:\n if f.suffix in VIDEO_FILE_TYPES:\n video_files.append(f)\n\n return video_files", "def get_all_recording_paths():\n file_list = glob.glob(DATA_FOLDER + \"**/**/*.csv\", recursive=True)\n return file_list", "def _get_movies(dir):\n movieList = []\n\n directories = os.listdir(dir)\n for d in directories:\n # We need to skip past directories without instruction sets\n if '__' not in d:\n continue\n files = os.listdir(\"{root}/{subdir}\".format(root=dir, subdir=d))\n for f in files:\n # Don't add .mkv's that are handbrake encodes.\n if '--converted' not in f and '.mkv' in f:\n movie = Movie(dir, d, f)\n movieList.append(movie)\n\n return movieList", "def listdir_fullpath(d):\r\n return [os.path.join(d, f) for f in os.listdir(d)]", "def images_in_paths(folder_path: str) -> List[str]:\n paths = []\n folder_path = os.path.join(os.getcwd(), folder_path)\n for root, dirs, files in os.walk(folder_path):\n for file in files:\n paths.append(os.path.join(root, file))\n return paths" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Compose html with blogger image urls
def compose_blogger_html(args, title, posts, imgdata, online_videos): for post in posts: for media in post.medias: if type(media) is PostImage: if media.uri not in imgdata: print('Image missing: ', media.uri) else: img_url, resized_url = imgdata[media.uri] media.uri = img_url media.resized_url = resized_url elif type(media) is PostVideo: if not online_videos: print('Video missing: ', media.uri) else: media.iframe = online_videos[0] del online_videos[0] else: assert False return print_html(args, posts, title, '', target='blogger')
[ "def doImg(bunch, text, env):\n if bunch.get(\"align\", None) is not None:\n align = \" align='%s'\" % bunch[\"align\"]\n else:\n align = \"\"\n if bunch.get(\"width\", None) is not None:\n width = \" width='%s'\" % bunch[\"width\"]\n else:\n width = \"\"\n return \"<img src='%s'%s%s>%s\" % (bunch[\"src\"], align, width,text)", "def process_url_images(text: str, width: int, height: int) -> str:\n\n\treturn re.sub(\n\t\tparsing.url,\n\t\tlambda m: '<p>' + from_image_url(m.group(0), width=width, height=height) + '<br></p>',\n\t\ttext)", "def image_preview(self):\r\n h = '<img src=\"%s\" alt=\"Campaign badge\"/>' % self.image.url\r\n return mark_safe(h)", "def website_create_body(website_info):\r\n body = \"\"\r\n body += H2 + website_info.title + END_H2\r\n body += '\\n' + P + website_info.content + END_P\r\n for image in website_info.images:\r\n if isinstance(image, str):\r\n body += '<img src=\"' + image + CLASS_CENTER\r\n elif isinstance(image, Image):\r\n body += '<img src=\"' + image.name + '\" width=\"' + image.size + CLASS_CENTER\r\n else:\r\n pass\r\n return body", "def news_image_embedtype_handler(attrs, images=[]):\n attrs['alt'] = attrs.get('alt', '')\n\n if attrs.get('title'):\n title_html = '<figcaption>{}</figcaption>'.format(attrs['title'])\n else:\n title_html = ''\n\n image_html = image_embedtype_handler(attrs, images=images)\n\n if attrs.get('href'):\n image_html = '<a href=\"{}\">{}</a>'.format(attrs['href'], image_html)\n\n return image_html + title_html", "def image2html(imagefile, title='', alt=''):\n return \"\"\"\n <!DOCTYPE html>\n <html lang=\"en\">\n <head>\n <style>\n h2 {text-align: center;}\n .center {\n display: block;\n margin-left: auto;\n margin-right: auto;\n width: 80%;\n }\n </style>\n </head>\n <body>\n <h2>\"\"\" + title + \"</h2>\" + f\"\"\"\n <img src=\"{imagefile}\" alt=\"{alt}\" class=\"center\"></img>\n\n </body>\n </html>\n \"\"\"", "def prepare_for_blogger(args):\n title, posts = parse_markdown(os.path.join(args.root, 'index.md'))\n online_images, online_videos = online_images_url(args)\n\n if args.check_images and check_images(args, posts, online_images) is False:\n pass\n\n html = compose_blogger_html(args, title, posts, online_images, online_videos)\n\n if args.full is False:\n html = re.search('<body>(.*)?</body>', html, flags=re.DOTALL).group(1)\n html = re.sub('<script>.*?</script>', '', html, flags=re.DOTALL)\n html = STYLE.replace('%%', '%') + html\n\n if args.dest:\n with open(args.dest, 'wt', encoding='utf-8') as f:\n f.write(html)\n else:\n clipboard.copy(html)", "def correct_img_links(body_main_content, schema_name, list_name_image):\n for name_image in list_name_image:\n body_main_content = body_main_content.replace(\n \"src=\\\"\" + name_image + \"\\\"\",\n \"src=\\\"{% static \\\"schema_viewer/oxygen/\" + schema_name + \"/\" + name_image + \"\\\" %}\\\"\"\n )\n return body_main_content", "def banner_wrapper(banner_url):\n # so simple\n return '{url}<img src=\"{url}\" alt=\"{alt}\">'.format(\n url=banner_url,\n alt='Banner'\n )", "def getLinkedImages(self, blog):\n imgs = []\n newBlog = blog[:]\n blog = blog.replace(\"<A \", \"<a \")\n for blogSeg in blog.split(\"<a \")[1:]:\n tag = \"<a \" + blogSeg[:blogSeg.find(\">\")+1]\n tag = tag.replace(' HREF=', ' href=')\n quoteChar = tag[tag.find(\"href=\")+5]\n # Make sure this is a tag that links\n if \" href=\" in tag:\n thisImg = tag.split('href='+quoteChar, 1)[1].split(quoteChar, 1)[0]\n imgs.append(thisImg)\n for img in imgs:\n # ditch the query string, some images won't download with it \n downloadUrl = img.split(\"?\", 1)[0]\n if downloadUrl[0:7] != \"http://\":\n downloadUrl = self.urlPrefix + downloadUrl\n downloadUrl = downloadUrl.replace(\"../\", \"\")\n filename = \"comics/blogimgs/{0}\".format(os.path.basename(downloadUrl))\n filename = filename.replace(\"%20\", \" \")\n pathname = \"{0}/{1}\".format(self.filePrefix, filename)\n if filename[-4:] not in [\".gif\", \".png\", \".jpg\"]:\n continue\n try:\n self.getImage(downloadUrl, pathname)\n except DownloadError as e:\n # there are likely many broken links, silently continue on error\n # unlike embedded images, broken links do not break layout\n continue\n newBlog = newBlog.replace(img, filename)\n return newBlog", "def image(self, src, title, text):\n src = escape_link(src)\n text = escape(text, quote=True)\n if title:\n title = escape(title, quote=True)\n html = '<img src=\"%s\" alt=\"%s\" title=\"%s\"' % (src, text, title)\n else:\n html = '<img src=\"%s\" alt=\"%s\"' % (src, text)\n if self.options.get('use_xhtml'):\n return '%s />' % html\n return '%s>' % html", "def explore(self): \n\n # Use Beautiful Soup to parse the HTML and get the href of a tags and src of img tags:\n page_data = BeautifulSoup(self.response.text, 'html.parser')\n links = [link.get('href') for link in page_data.find_all('a')]\n images = [image.get('src') for image in page_data.find_all('img')]\n\n for link in links:\n # Format the url:\n link = self.format_url(link)\n if link == '':\n continue\n\n # Append each valid link to self.links:\n self.links.append(link)\n\n for image in images:\n # Format the url:\n image = self.format_url(image)\n if image == '':\n continue\n\n # Append each valid image to self.images:\n self.images.append(image)", "def add_images(self):\n soup = self.get_soup()\n story = self.get_story()\n \n all_images = soup.find_all(\"img\")\n for imgtag in all_images:\n if imgtag.get(\"alt\") == \"cover\":\n is_cover = True\n else:\n is_cover = False\n img_url = imgtag[\"src\"]\n # fanficfare does not like 'ffdl-' present in the URL, replace it with a placeholder\n sub_img_url = img_url.replace(\"ffdl-\", FFDL_PLACEHOLDER)\n newsrc, imgurl = story.addImgUrl(\"file://{}/story.html\".format(self.path), sub_img_url, self.fetch_image, cover=is_cover)\n # rewrite image tag\n imgtag[\"src\"] = newsrc\n if not imgtag.has_attr(\"longdesc\"):\n imgtag[\"longdesc\"] = imgurl", "def cmd_redditimg(self, args, msg):\n if len(args) == 0:\n return \"Usage: !redditimg <subreddit>\"\n imgs = []\n for page in range(1, 11):\n for img in requests.get(\"http://imgur.com/r/%s/top/all/page/%s.json\" % (args[0], page)).json()['data']:\n resp = \"%s - http://i.imgur.com/%s%s\" % (img['title'], img['hash'], img['ext'])\n if img['nsfw']:\n resp = resp + \" :nsfw:\"\n imgs.append(resp)\n if len(imgs):\n return choice(imgs)", "def get_post_image(html):\n \n banned_strings = [\"doubleclick\", \"feedburner\", \"tweetmeme\", \"hulkshare\", \"tracker\", \"phobos\", \"apple\"] # ignore tracking pixels\n soup = BeautifulSoup(html)\n images = soup.findAll('img')\n \n # note, we proceed through the HTML from top to bottom, returning the first suitable image we find\n for img in images:\n src = img['src']\n contains_banned = False\n for substring in banned_strings:\n if substring in src:\n contains_banned = True\n if not contains_banned:\n return src\n return ''", "def thumbnail_generator():\n website_url = json.loads(request.data.decode())['url']\n try:\n webpage, message = url_preview.send_request(website_url)\n if webpage is not None:\n #Construct the soup object\n soup_object = url_preview.get_soup_object(webpage)\n #Get the title of the artcile\n title = url_preview.get_title(soup_object)\n #Get the website of the article\n website_name = url_preview.get_url(soup_object).rsplit(\".\", 1)[0]\n if website_name is None:\n website_name = website_url.split(\"//\", 1)[1].split(\"/\", 1)[0].rsplit(\".\", 1)[0]\n\n #Get the description of the article\n description = url_preview.get_description(soup_object)\n\n #Get the published date and time of the article\n date_time = url_preview.get_date_time(website_url)\n\n #Get the link to the preview image\n image_url = url_preview.get_preview_image(soup_object)['content']\n\n #Get the link to the favicon\n favicon_url = url_preview. get_favicon(soup_object)\n\n return render_template(\n \"success.html\",\n urlx=website_url,\n title=title,\n site_name=website_name,\n description=description,\n date_time=date_time,\n preview_image=image_url,\n favicon=favicon_url\n )\n except Exception as exp:\n return render_template('error.html', msg=str(exp))", "def format_post_background(post: dict) -> str:\n parsed = urllib.parse.urlparse(post['url'])\n logger.debug(f\"Sending post: {post['name']}\")\n if 'i.redd.it' in post['url']:\n return f\"\"\"\n <div id=\"{post['name']}\" class=\"col-sm-3 col-lg-3 col-xxl-3\">\n <img class=\"\" style=\"width: 100%;\" src=\"{post['url']}\">\n </div>\n \"\"\"\n elif 'v.redd.it' in post['url']:\n if post['media'] is not None:\n return \"\"\n return f\"\"\"\n <div id=\"{post['name']}\" class=\"col-sm-3 col-lg-3 col-xxl-3\">\n <video style=\"width: 100%;\" data-dashjs-player autoplay src=\"{post['media']['reddit_video']['dash_url']}\" controls></video>\n </div>\n \"\"\"\n else:\n logger.error(f\"Error no media for v.redd.it link: {post['url']}\")\n return \"\"\n elif 'imgur' in post['url'] and ('gif' in post['url'] or 'mp4' in post['url']):\n return \"\"\n imgur_id = parsed.path.split('.')[0].split('/')[-1]\n return f\"\"\"\n <div id=\"{post['name']}\" class=\"col-sm-3 col-lg-3 col-xxl-3\">\n <video controls poster=\"//i.imgur.com/{imgur_id}.jpg\" preload=\"auto\" autoplay=\"autoplay\" muted=\"muted\" loop=\"loop\" webkit-playsinline=\"\" style=\"width: 100%; height: 100%;\">\n <source src=\"//i.imgur.com/{imgur_id}.mp4\" type=\"video/mp4\">\n </video>\n </div>\n \"\"\"\n elif 'imgur' in post['url']:\n imgur_id = parsed.path.split('.')[0].split('/')[-1]\n return f\"\"\"\n <div id=\"{post['name']}\" class=\"col-sm-3 col-lg-3 col-xxl-3\">\n <img class=\"\" style=\"width: 100%;\" src=\"//i.imgur.com/{imgur_id}.jpg\">\n </div>\n \"\"\"\n elif 'redgif' in post['url']:\n return \"\"\n redgifs_id = parsed.path.split('.')[0].split('/')[-1]\n return f\"\"\"\n <div id=\"{post['name']}\" class=\"col-sm-3 col-lg-3 col-xxl-3\">\n <div style='position:relative; padding-bottom:88.67%;'>\n <iframe src='https://redgifs.com/ifr/{redgifs_id}' frameborder='0' scrolling='no' width='100%' height='100%' style='position:absolute;top:0;left:0;' allowfullscreen></iframe>\n </div>\n </div>\n \"\"\"\n elif 'gfycat' in post['url']:\n return \"\"\n else:\n thumbnail = post.get('thumbnail', '/favicon.ico')\n thumbnail = thumbnail if thumbnail != '' else '/favicon.ico'\n return f\"\"\"\n <div id=\"{post['name']}\" class=\"col-sm-3 col-lg-3 col-xxl-3\">\n <a href=\"{post['url']}\"><img class=\"\" style=\"width: 100%;\" src=\"{thumbnail}\">\n </div>\n \"\"\"\n return \"\"", "def html_content(self, base_url=''):\n tile_html = ''\n\n for tile in self.tiles.filter(active=True):\n tile_html += '''<div style=\"padding:8px; margin-top: 10px;\n background-color: #FFFFFF; border-radius:7px;\n box-shadow:2px 2px 4px #e2e2e2;\n border:1px #f8f8f8 solid;\">\n <a %(target)s href=\"%(tile_url)s\" style=\"text-decoration: none;\n color:#141933;\">\n <div style=\"width:40px; height: 40px;\n display: inline-block; margin-left: 10px;\n margin-right: 10px; vertical-align: middle;\">\n <img style=\"%(img_style)s\" src=\"%(tile_icon)s\" alt=\"\"/>\n </div>\n <div style=\"display: inline-block;\n font-size:13px; color:#141933;\n font-family: 'Roboto', sans-serif; \">\n %(tile_title)s\n </div></a></div>''' % {\n 'target': 'target=\"_blank\"' if tile.url else None,\n 'tile_url': tile.url,\n 'tile_icon': base_url + tile.image.url\n if tile.image else base_url + \"/static/images/default_tile_img-s.png\",\n 'tile_title': tile.title,\n 'img_style': 'max-width: 100%; max-height:100%; vertical-align: -webkit-baseline-middle'\n }\n\n return '''<!DOCTYPE html>\n <html>\n <meta name=\"viewport\"\n content=\"width=device-width,\n initial-scale=1, shrink-to-fit=no\">\n <head>\n <link\n href=\"https://fonts.googleapis.com/css?family=Roboto:400,400i,700,700i&display=swap\"\n rel=\"stylesheet\">\n <style>\n %(style_width)s\n </style>\n </head>\n <body style=\"font-family: 'Roboto',\n sans-serif; font-size:14px; line-height:24px; color: #212529;\">\n %(content)s%(tile_html)s</body></html>''' % {\n 'content': self.content,\n 'tile_html': tile_html,\n 'style_width': 'img{max-width:100%;}'}", "def html_content(self, base_url=''):\n tile_html = ''\n\n for tile in self.tiles.filter(active=True):\n tile_html += '''<div style=\"padding:8px; margin-top: 10px;\n background-color: #FFFFFF; border-radius:7px;\n box-shadow:2px 2px 4px #e2e2e2;\n border:1px #f8f8f8 solid;\">\n <a %(target)s href=\"%(tile_url)s\" style=\"text-decoration: none;\n color:#141933;\">\n <div style=\"width:40px; height: 40px;\n display: inline-block; margin-left: 10px;\n margin-right: 10px; vertical-align: middle;\">\n <img style=\"%(img_style)s\" src=\"%(tile_icon)s\" alt=\"\"/>\n </div>\n <div style=\"display: inline-block;\n font-size:13px; color:#141933;\n font-family: 'Roboto', sans-serif; \">\n %(tile_title)s\n </div></a></div>''' % {\n 'target': 'target=\"_blank\"' if tile.url else None,\n 'tile_url': tile.url,\n 'tile_icon': base_url + tile.image.url\n if tile.image else base_url + \"/static/images/default_tile_img-s.png\",\n 'tile_title': tile.title,\n 'img_style': 'max-width: 100%; max-height:100%; vertical-align: -webkit-baseline-middle'\n }\n\n return '''<!DOCTYPE html>\n <html>\n <meta name=\"viewport\"\n content=\"width=device-width,\n initial-scale=1, shrink-to-fit=no\">\n <head>\n <link\n href=\"https://fonts.googleapis.com/css?family=Roboto:400,400i,700,700i&display=swap\"\n rel=\"stylesheet\">\n <style>\n %(style_width)s\n </style>\n </head>\n <body style=\"font-family: 'Roboto',\n sans-serif; font-size:14px; line-height:24px; color: #212529;\">\n %(content)s%(tile_html)s</body></html>''' % {\n 'content': self.content,\n 'tile_html': tile_html,\n 'style_width': 'img{max-width:100%;}'}" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Export blogger html to clipboard. If full, export complete html, otherwise export html extract ready to paste into blogger edit mode.
def prepare_for_blogger(args): title, posts = parse_markdown(os.path.join(args.root, 'index.md')) online_images, online_videos = online_images_url(args) if args.check_images and check_images(args, posts, online_images) is False: pass html = compose_blogger_html(args, title, posts, online_images, online_videos) if args.full is False: html = re.search('<body>(.*)?</body>', html, flags=re.DOTALL).group(1) html = re.sub('<script>.*?</script>', '', html, flags=re.DOTALL) html = STYLE.replace('%%', '%') + html if args.dest: with open(args.dest, 'wt', encoding='utf-8') as f: f.write(html) else: clipboard.copy(html)
[ "def copy_content_of_cleandump_to_downloadedthread(self):\n \n shutil.copy(tm.CLEAN_DUMP_PATH, tm.ECLIPSE_DOWNLOADED_PAGE_FILE)\n \n print \"Dumped to {0}\".format( tm.ECLIPSE_DOWNLOADED_PAGE_FILE )", "def pastebin():\r\n\r\n fdirpb = \"/media/preto/DESCARGAS/pastebins/\" # Directorio donde se guardaran los pastes\r\n urlpbd = \"http://pastebin.com/download.php?i=\" # URL de descarga\r\n contents = urllib.urlopen(\"http://pastebin.com/archive\")\r\n bs = BeautifulSoup(contents, \"lxml\")\r\n link = bs.find_all('table', {'class': 'maintable'})\r\n\r\n for l in link:\r\n href = l.find_all('a')\r\n for h in href:\r\n if 'href' in h.attrs:\r\n if 'archive' not in h['href']:\r\n filepb = h['href'][1:] # URL de cada enlace encontrado que contiene pastes\r\n if filterurl(urlpbd+filepb) is True:\r\n download(urlpbd+filepb, filepb, fdirpb)", "def _copyToClipboardButton(self, *args):\n textToClipBoard = cmds.scrollField(self.macroScrollField, q=True, text=True)\n QtWidgets.QApplication.clipboard().setText(textToClipBoard)", "def depreciated_get_paste(paste_tup):\n href, name = paste_tup\n\n # Form the url from the href and perform GET request\n paste_url = 'http://pastebin.com' + href\n paste_page = requests.get(paste_url)\n\n # Collect the paste details from paste page\n if paste_page.status_code == 200:\n text = paste_page.text\n soup = BeautifulSoup(text, 'html.parser')\n # soup.textarea.get_text() return the paste content\n paste = Paste(url=\"http://www.pastebin.com\"+href, name=name, content=soup.textarea.get_text(), datetime=datetime.now())\n return paste\n\n # Return False if the scrape failed\n return False", "def get_content(paste):\n return paste.content", "def clipboard(self, text = None):\n if text == None:\n response = self._fetch_json('/api/clipboard')\n return response['content']\n else:\n postdata = codecs.encode(json.dumps({ 'content': text }), 'utf-8')\n self._urlopen('/api/clipboard', postdata).read()", "def paste():\r\n print('Paste selected')", "def get_paste(url) :\n info = get_info(url)\n return get_paste_by_id(info[0], info[1])", "def pastegrabber(arg_file):\n host = \"pastebin.com\"\n connect = httplib.HTTPConnection(host)\n raw_url = \"/raw.php?i=\" + arg_file\n connect.request(\"GET\", raw_url)\n response = connect.getresponse()\n if response.status != 200:\n sys.exit('Error!', 'status code:', response.status)\n raw_post = response.read()\n write_paste(raw_post)", "def publish_html(self, readyhtml):\n with open(self.outfile,'w') as f_out:\n f_out.writelines(readyhtml)", "def exportBookmarksHtml(self, filePath):\n exports.ExportDialog.exportWhat = exports.ExportDialog.selectBranch\n localControl = globalref.mainControl.activeControl\n exportControl = exports.ExportControl(localControl.model.root,\n localControl.currentSelectionModel().selectedNodes(),\n globalref.mainControl.defaultFilePath())\n try:\n return exportControl.exportBookmarksHtml(filePath)\n except IOError:\n return False", "def get_clipboard():\n return pyperclip.paste()", "def copy(to_end=False):\n # Find a way to generalize this for different systems\n if to_end:\n with open('/Users/john/Terminal Saved Output', 'r') as f:\n output = f.read().replace('bpython', 'Python')\n code = output.split('\\nPython')[-1]\n else:\n code = pyperclip.paste()\n pyperclip.copy(parse_code(code))\n return None", "def save_clipboard(dist):\n tmpimg = ImageGrab.grabclipboard()\n if tmpimg:\n tmpimg.save(dist, 'PNG', compress_level=9)\n return dist\n return ''", "def ipython_paste(self,e):\n if self.enable_win32_clipboard:\n txt=clipboard.get_clipboard_text_and_convert(\n self.enable_ipython_paste_list_of_lists)\n if self.enable_ipython_paste_for_paths:\n if len(txt)<300 and (\"\\t\" not in txt) and (\"\\n\" not in txt):\n txt=txt.replace(\"\\\\\",\"/\").replace(\" \",r\"\\ \")\n self.insert_text(txt)\n self.finalize()", "def call_paste(self):\n from ..clipboard import ClipboardWindow\n clipboard = None\n for widget in self.uistate.gwm.window_tree:\n if isinstance(widget, ClipboardWindow):\n clipboard = widget\n if clipboard is None:\n clipboard = ClipboardWindow(self.dbstate, self.uistate)\n return True\n return False", "def OnPopUpCopy(self, event):\n\n wx.TheClipboard.UsePrimarySelection(False)\n if not wx.TheClipboard.Open():\n return\n data = wx.TextDataObject(self._URL)\n wx.TheClipboard.SetData(data)\n wx.TheClipboard.Close()", "def scrape_paste(self,paste_id):\n parameter = {'i': paste_id}\n r = requests.get('https://scrape.pastebin.com/api_scrape_item.php',params=parameter)\n return r.text", "def createClipboard(self):\n\n # Create dock widget\n clipboard_dock = QDockWidget()\n clipboard_dock.setWindowTitle(\"Display Clipboard Contents\")\n clipboard_dock.setAllowedAreas(Qt.TopDockWidgetArea)\n\n dock_frame = QFrame()\n self.cb_text = QTextEdit()\n paste_button = QPushButton(\"Paste\")\n paste_button.clicked.connect(self.pasteText)\n\n dock_v_box = QVBoxLayout()\n dock_v_box.addWidget(self.cb_text)\n dock_v_box.addWidget(paste_button)\n\n # Set the main layout for the dock widget,\n # then set the main widget of the dock widget\n dock_frame.setLayout(dock_v_box)\n clipboard_dock.setWidget(dock_frame)\n\n # Set initial location of dock widget\n self.addDockWidget(Qt.TopDockWidgetArea, clipboard_dock)\n\n # Create instance of the clipboard\n self.clipboard = QApplication.clipboard()\n self.clipboard.dataChanged.connect(self.copyFromClipboard)" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Made after reading config file. Check for ffmpeg in path. Create .thumbnails dir if necessary and create .nomedia in it. Copy photobox file to destination dir. Handle priority between command line and config file.
def setup_part2(args): if args.update: args.sourcedir = args.source.sourcedir args.bydir = args.source.bydir args.bydate = args.source.bydate args.diary = args.source.diary args.recursive = args.source.recursive args.dates = args.source.dates args.github_pages = args.source.github_pages elif args.gallery: args.source.sourcedir = args.sourcedir args.source.bydir = args.bydir args.source.bydate = args.bydate args.source.diary = args.diary args.source.recursive = args.recursive args.source.dates = args.dates args.source.github_pages = args.github_pages update_config(args) if args.github_pages: args.html_suffix = '.html' else: args.html_suffix = '.htm' rootext = os.path.splitext(args.rootarg)[1] if rootext: args.rootname = os.path.basename(args.rootarg) else: args.rootname = 'index' + args.html_suffix if args.sourcedir: args.sourcedir = os.path.abspath(args.sourcedir) if os.path.splitdrive(args.sourcedir)[0]: drive, rest = os.path.splitdrive(args.sourcedir) args.sourcedir = drive.upper() + rest if not os.path.isdir(args.sourcedir): error('Directory not found', args.sourcedir) else: if args.gallery and args.diary is False and args.update is None: error('Directory not found', 'Use --sourcedir') if args.dest: args.dest = os.path.abspath(args.dest) if args.dest is None: args.dest = args.root if args.blogger and args.urlblogger is None: error('No blogger url (--url)') if args.gallery or args.update: # check for ffmpeg and ffprobe in path for exe in ('ffmpeg', 'ffprobe'): try: check_output([exe, '-version']) except FileNotFoundError: error('File not found', exe) if args.github_pages: args.thumbrep = 'thumbnails' else: args.thumbrep = '.thumbnails' args.thumbdir = os.path.join(args.dest, args.thumbrep) if not os.path.exists(args.thumbdir): os.mkdir(args.thumbdir) open(os.path.join(args.thumbdir, '.nomedia'), 'a').close() favicondst = os.path.join(args.dest, 'favicon.ico') if not os.path.isfile(favicondst): faviconsrc = os.path.join(os.path.dirname(__file__), 'favicon.ico') shutil.copyfile(faviconsrc, favicondst) photoboxdir = os.path.join(args.dest, 'photobox') if not os.path.exists(photoboxdir): photoboxsrc = os.path.join(os.path.dirname(__file__), 'photobox') shutil.copytree(photoboxsrc, photoboxdir) if args.dates: if not(args.gallery or args.create): # silently ignored for the moment, otherwise all other commands will # launch a wanrning or an error on the default --dates value pass if args.dates == 'source': pass elif args.dates == 'diary': if args.create: error('Incorrect date format', args.dates) elif re.match(r'\d+-\d+', args.dates): date1, date2 = args.dates.split('-') if validate_date(date1) and validate_date(date2): args.dates = date1, date2 else: error('Incorrect date format', args.dates) else: error('Incorrect date format', args.dates)
[ "def create_thumbnails(fsrc):\n job_update(JobStatus.RUNNING, 'Generating recording thumbnails.')\n THUMBNAIL_COMMAND = 'ffmpegthumbnailer'\n EXTENSION = 'png'\n task = MythTV.System(path=THUMBNAIL_COMMAND)\n task.append('-q9') # quality level 0-10\n task.append('-t10') # seek percentage or time (hh:mm:ss)\n task.append('-i{}'.format(fsrc)) # source mp4 recording\n logging.debug(task.path)\n task.command('-s320', '-o{}.{}'.format(fsrc, EXTENSION), NULL_OUTPUT_OPT)\n # mythweb large\n task.command('-s320', '-o{}.-1.320x180.{}'.format(fsrc, EXTENSION), NULL_OUTPUT_OPT)\n # mythweb small\n task.command('-s100', '-o{}.-1.100x56.{}'.format(fsrc, EXTENSION), NULL_OUTPUT_OPT)", "def getPic(media_asset, file, directories):\n\n # use combination of media_asset[] values as a key into our directories dictionary from the .ini file\n if directories.has_key(media_asset[5]+\" \"+media_asset[6]):\n # try to traverse the directory tree recusively to find the media file\n try:\n src_file = os.path.exists(file_dict[file])\n #print ':',src_file,':', file_dict[file]\n ################################\n #\n # altered 5/24/05 CDP\n # forced dest_file flag so that the media grabber does not check for the existing presence\n # of the binary, it just copies everything all the time now:(\n #\n ################################\n #dest_file = os.path.exists(directories[media_asset[5]+\" \"+media_asset[6]]['destdir']+\"/\"+file)\n dest_file = 0\n #print directories[media_asset[5]+\" \"+media_asset[6]]['destdir']+\"/\"+file\n\n # check to see if the media file even exists where we think it should\n if src_file == 0:\n #print \"no such source file \"+file+ \", please check error log\"\n global status_msg1\n status_msg1 = 1\n error_nosrcfile.append(\"Tab file has the asset id \"+file+\" but this file doesn't exist on disk to fetch\")\n print \"Tab file has the asset id \"+file+\" but this file doesn't exist on disk to fetch\";\n return\n \n # it exists where we think it should now check if go2 has it already\n # go2 already has is, skip and go on to the next asset\n elif dest_file == 1:\n print 'we already have a go2 media asset named '+file+' not copying'\n return\n # go2 doesn't have it, go get it\n elif dest_file == 0:\n #src_file = src_file[0]\n print \"copying file :\" +file_dict[file]+\" \"+directories[media_asset[5]+\" \"+media_asset[6]]['destdir']\n try:\n os.system(\"cp \"+file_dict[file]+\" \"+directories[media_asset[5]+\" \"+media_asset[6]]['destdir'])\n except (IOError), diag:\n retval = str(diag)\n print retval\n return\n \n \n # no key in .ini file for this asset, excepting a index, type, and key errors, append to errorids the asset id\n except (KeyError, TypeError, IndexError), diag:\n retval = str(diag)\n # append asset id to errorids\n errorids.append('No file '+media_asset[0]+'.'+media_asset[4]+' in directory structure to fetch, if its a new go2 asset it will need to be created and tab file updated for asset :'+file)\n print 'No file '+media_asset[0]+'.'+media_asset[4]+' in directory structure to fetch, new go2 asset will need to be created and tab file updated for asset :'+file;\n return\n\n # we don't have a key in the directories dict that matches the delivered tab file, print error\n else:\n #print \"don't have ini file info for\", media_asset[5], media_asset[6], \"on assets id \", media_asset[0]\n global status_msg2\n status_msg2 = 1\n print \"don't have ini file info for \"+media_asset[5]+' '+media_asset[6]+\" on assets id \"+media_asset[0];\n error_noiniinfo.append(\"don't have ini file info for \"+media_asset[5]+' '+media_asset[6]+\" on assets id \"+media_asset[0])\n return", "def makeVideo():\n os.system(\"cd video && ffmpeg -r 10 -i img%05d.jpg -vcodec mpeg4 -y caronthehill_clip.mp4\")", "def main():\n files = glob(\"*.jpg\")\n for file_name in files:\n date = get_exif(file_name)['DateTime']\n date = date[:10].replace(':', '')\n try:\n os.stat(date)\n except:\n os.mkdir(date)\n copyfile(file_name, os.path.join(date, file_name))", "def take_snapshot_for(instance):\n flv = instance.flv_file\n path, ext = os.path.splitext(flv.name)\n upload_to = time.strftime(instance.splash_image.field.upload_to)\n image_name = os.path.join(upload_to, os.path.basename(path) + '.jpg')\n \n inp = os.path.join(settings.VIDEOS_TEMP_DIR, flv.name)\n outp = os.path.join(settings.VIDEOS_TEMP_DIR, image_name)\n try:\n os.makedirs(os.path.dirname(outp))\n except:\n pass\n\n try:\n position = random.randint(1, instance.duration.seconds)\n except:\n position = 0\n instance.auto_position = '%s' % position\n\n tmpout = tempfile.NamedTemporaryFile(mode='rw+');\n tmperr = tempfile.NamedTemporaryFile(mode='rw+');\n process = subprocess.Popen(['ffmpeg',\n '-i', inp,\n '-an',\n '-ss', instance.auto_position,\n '-r', '1',\n '-vframes', '1',\n '-f', 'image2',\n '-y', outp,\n ], stdout=tmpout, stderr=tmperr)\n stdoutdata, stderrdata = process.communicate()\n # there was a problem with the video, (not supported format)\n if process.returncode == 1:\n wimage_name = None\n tmpout.seek(0)\n tmperr.seek(0)\n mail_video_errors(tmpout.read(), tmperr.read())\n raise WrongFfmpegFormat('invalid video format')\n return image_name", "def plugin(args):\n print(\"Creating {}\".format(args.dir))\n recursive_copy(\"extension\", args.dir)\n print(\"Done!\")", "def main():\n\n config = Config()\n\n if sys.argv[1] == \"--config\" or sys.argv[1] == \"-c\":\n config.load_config(sys.argv[2])\n elif isfile(DEFAULT_CONF_PATH):\n config.load_config(DEFAULT_CONF_PATH)\n\n if not config.check():\n print(\"Bad config given.\")\n return 0\n\n wallpaper = None\n\n with Image.open(config.wallpaper) as current:\n wallpaper = Image.new('RGBA', current.size)\n wallpaper.paste(current)\n\n config.max_width = wallpaper.width\n config.max_height = wallpaper.height\n\n maker = WallpaperMaker(config)\n wallpaper = maker.paste_covers(wallpaper)\n\n wallpaper.save(config.output)\n wallpaper.close()\n\n return 1", "def _configure(self):\n #set thumbnails path\n if self._options['tmbPath']:\n path = self._join_path(self._root, self._options['tmbPath'])\n \n self._attributes.append({\n 'pattern' : '^%s$' % re.escape('%s%s' % (self._separator, self._relpath(path))),\n 'locked' : True,\n 'hidden' : True\n })\n \n try:\n stat = self.stat(path)\n except os.error:\n try:\n self._mkdir(path=path)\n stat = self.stat(path)\n except os.error:\n stat = None\n\n if stat and stat['mime'] == 'directory' and stat['read']:\n self._options['tmbPath'] = path\n self._tmb_path_writable = stat['write'] \n else:\n self._options['tmbPath'] = ''", "def make_video(self,namein,nameout):\n import os\n os.system(f'ffmpeg -framerate 24 -pattern_type glob -i \"{namein}*.png\" {self.respath}/{self.date}/{nameout}.mp4')", "def main():\n conn = get_connection_or_die(Config.server, Config.database)\n make_table(conn)\n clear_table(conn)\n folder = Config.photo_dir\n photo_list = [t for t in folder_file_tuples(folder) if is_jpeg(t[1])]\n write_photos(conn, photo_list)", "def copy_file_to_server():\r\n utils.system_output('mv /home/chronos/user/Downloads/* /usr/local/autotest/results/default/',ignore_status=True)\r\n logging.info(\"Video Copied to Log location\")", "def prep(path,date,image):\n \n # run bash code with 'Popen'\n P = Popen('cp '+path+date+'/final/'+image+' ./', shell=True)\n P.wait()\n P = Popen('mv '+image+' '+image+'.fz', shell=True)\n P.wait()\n P = Popen('funpack *.fz', shell=True)\n P.wait()\n P = Popen('rm -rf *.fz', shell=True)\n P.wait()", "def process_video(proc_state):\n entry = proc_state.entry\n workbench = proc_state.workbench\n video_config = mgg.global_config['media_type:mediagoblin.media_types.video']\n\n queued_filepath = entry.queued_media_file\n queued_filename = proc_state.get_queued_filename()\n name_builder = FilenameBuilder(queued_filename)\n\n medium_filepath = create_pub_filepath(\n entry, name_builder.fill('{basename}-640p.webm'))\n\n thumbnail_filepath = create_pub_filepath(\n entry, name_builder.fill('{basename}.thumbnail.jpg'))\n\n # Create a temporary file for the video destination (cleaned up with workbench)\n tmp_dst = NamedTemporaryFile(dir=workbench.dir, delete=False)\n with tmp_dst:\n # Transcode queued file to a VP8/vorbis file that fits in a 640x640 square\n progress_callback = ProgressCallback(entry)\n\n dimensions = (\n mgg.global_config['media:medium']['max_width'],\n mgg.global_config['media:medium']['max_height'])\n\n # Extract metadata and keep a record of it\n metadata = transcoders.VideoTranscoder().discover(queued_filename)\n store_metadata(entry, metadata)\n\n # Figure out whether or not we need to transcode this video or\n # if we can skip it\n if skip_transcode(metadata):\n _log.debug('Skipping transcoding')\n\n dst_dimensions = metadata['videowidth'], metadata['videoheight']\n\n # Push original file to public storage\n _log.debug('Saving original...')\n proc_state.copy_original(queued_filepath[-1])\n\n did_transcode = False\n else:\n transcoder = transcoders.VideoTranscoder()\n\n transcoder.transcode(queued_filename, tmp_dst.name,\n vp8_quality=video_config['vp8_quality'],\n vp8_threads=video_config['vp8_threads'],\n vorbis_quality=video_config['vorbis_quality'],\n progress_callback=progress_callback,\n dimensions=dimensions)\n\n dst_dimensions = transcoder.dst_data.videowidth,\\\n transcoder.dst_data.videoheight\n\n # Push transcoded video to public storage\n _log.debug('Saving medium...')\n mgg.public_store.copy_local_to_storage(tmp_dst.name, medium_filepath)\n _log.debug('Saved medium')\n\n entry.media_files['webm_640'] = medium_filepath\n\n did_transcode = True\n\n # Save the width and height of the transcoded video\n entry.media_data_init(\n width=dst_dimensions[0],\n height=dst_dimensions[1])\n\n # Temporary file for the video thumbnail (cleaned up with workbench)\n tmp_thumb = NamedTemporaryFile(dir=workbench.dir, suffix='.jpg', delete=False)\n\n with tmp_thumb:\n # Create a thumbnail.jpg that fits in a 180x180 square\n transcoders.VideoThumbnailerMarkII(\n queued_filename,\n tmp_thumb.name,\n 180)\n\n # Push the thumbnail to public storage\n _log.debug('Saving thumbnail...')\n mgg.public_store.copy_local_to_storage(tmp_thumb.name, thumbnail_filepath)\n entry.media_files['thumb'] = thumbnail_filepath\n\n # save the original... but only if we did a transcoding\n # (if we skipped transcoding and just kept the original anyway as the main\n # media, then why would we save the original twice?)\n if video_config['keep_original'] and did_transcode:\n # Push original file to public storage\n _log.debug('Saving original...')\n proc_state.copy_original(queued_filepath[-1])\n\n # Remove queued media file from storage and database\n proc_state.delete_queue_file()", "def generate_thumbnails(self):\n success = False\n func = f\"{__package__}.{self.name}.{__class__.__name__}.generate_thumbnails\"\n post_event(\"log_debug\", f\"{func}\", f\"Generating thumbnail images...\")\n\n thumb_path = (\n f\"{self.trick_play_path}/{self.thumb_prefix}-%03d{self.thumb_extension}\"\n )\n\n # ffmpeg -i $INFILE -vf fps=1/$INTERVAL -s $RESOLUTION $OUTPREFIX-%03d.jpg\n args = [\n \"-i\",\n f\"{self.source_index.get('uri')}\",\n \"-vf\",\n f\"fps=1/{self.thumb_interval}\",\n \"-s\",\n f\"{self.thumb_resolution}\",\n f\"{thumb_path}\",\n ]\n\n ffmpeg = FFMPEG()\n image_creation = ffmpeg.run(arguments=args)\n\n success = image_creation.success\n\n return success", "def main():\n parser = OptionParser()\n\n common_group = OptionGroup(parser, \"Relocates photos and images by EXIF/creation date into a date-based hierarchy\")\n common_group.add_option(\"-s\", \"--source\", dest=\"source\", help=\"the source folder to process\")\n common_group.add_option(\"-t\", \"--target\", dest=\"target\", help=\"the target folder for new files\")\n common_group.add_option(\"-d\", \"--delete\", dest=\"delete\", action=\"store_true\",\n help=\"delete source files when processed\")\n common_group.add_option(\"--dry-run\", dest=\"dry_run\", action=\"store_true\",\n help=\"Just do a test-run. No actual changes will be made\")\n common_group.add_option(\"-p\", \"--path-prefix\", dest=\"path_prefix\",\n help=\"a prefix to prepend all files when they are processed\")\n common_group.add_option(\"-x\", \"--skip-existing\", dest=\"skip_existing\", action=\"store_true\",\n help=\"skip moving existing files when processing\")\n common_group.add_option(\"--configuration-folder\", dest=\"configuration_folder\",\n help=\"folder containing mediaphile.ini to use\")\n common_group.add_option(\"-l\", \"--list\", dest=\"list\", action=\"store_true\",\n help=\"list all files in source folder with photo specific data\")\n common_group.add_option(\"-a\", \"--auto-tag\", dest=\"auto_tag\", action=\"store_true\",\n help=\"use prepending folders as tags instead of day part from date\")\n common_group.add_option(\"--tag\", dest=\"tag\", help=\"tag to use instead of day part from date\")\n parser.add_option_group(common_group)\n\n add_common_options(parser)\n (options, args) = parser.parse_args()\n check_common_options(options, args)\n\n if options.list:\n if not options.source:\n print(\"You must provide a source when using the -l option.\")\n sys.exit(1)\n\n list_photos(options.source)\n\n elif not options.source and not options.target:\n print(\"ERROR: You must supply both source- and target-folders.\\n\")\n print_help(parser)\n sys.exit(1)\n\n config = get_user_config(options.configuration_folder or None)\n\n relocate_photos(\n source_dir=options.source,\n target_dir=options.target,\n append_timestamp=config.getboolean('options', 'append timestamp') or False,\n remove_source=options.delete,\n tag=options.tag,\n dry_run=options.dry_run,\n photo_extensions_to_include=[ext.strip() for ext in config.get('options', 'photo extensions').split(',')],\n timestamp_format=config.get('options', 'timestamp format'),\n duplicate_filename_format=config.get('options', 'duplicate filename format'),\n new_filename_format=config.get('options', 'new filename format'),\n path_prefix=options.path_prefix,\n skip_existing=options.skip_existing or config.getboolean('options', 'skip existing'),\n auto_tag=options.auto_tag or config.getboolean('options', 'auto tag'))", "def make_movie(imgname=None, imgdir=None, movname=None, indexsz='05', framerate=10, rm_images=False,\n save_into_subdir=False, start_number=0, framestep=1, ext='png', option='normal', overwrite=False,\n invert=False, add_commands=[]):\n # if movie name is not given, name it as same as the name of the img directory\n if movname is None:\n if os.path.isdir(imgname):\n if imgname[-1] == '/':\n movname = imgname[:-1]\n else:\n movname = imgname\n else:\n pdir, filename = os.path.split(imgname)\n movname = pdir\n\n\n if not option=='glob':\n command = [ffmpeg_path,\n '-framerate', str(int(framerate)),\n '-start_number', str(start_number),\n '-i', imgname + '%' + indexsz + 'd.' + ext,\n '-pix_fmt', 'yuv420p',\n '-vcodec', 'libx264', '-profile:v', 'main', '-crf', '12', '-threads', '0', '-r', '100']\n else:\n # If images are not numbered or not labeled in a sequence, you can use the glob feature.\n # On command line,\n # ffmpeg -r 1\n # -pattern_type glob\n # -i '/Users/stephane/Documents/git/takumi/library/image_processing/images2/*.png' ## It is CRITICAL to include '' on the command line!!!!!\n # -vcodec libx264 -crf 25 -pix_fmt yuv420p /Users/stephane/Documents/git/takumi/library/image_processing/images2/sample.mp4\n command = [ffmpeg_path,\n '-pattern_type', 'glob', # Use glob feature\n '-framerate', str(int(framerate)), # framerate\n '-i', imgname + '/*.' + ext, # images\n '-vcodec', 'libx264', # codec\n '-crf', '12', # quality\n '-pix_fmt', 'yuv420p']\n if overwrite:\n command.append('-y')\n if invert:\n command.append('-vf')\n command.append('negate')\n # check if image has dimensions divisibly by 2 (if not ffmpeg raises an error... why ffmpeg...)\n # ffmpeg raises an error if image has dimension indivisible by 2. Always make sure that this is not the case.\n # image_paths = glob.glob(imgname + '/*.' + ext)\n # img = mpimg.imread(image_paths[0])\n # height, width = img.shape\n # if not (height % 2 == 0 and width % 2 == 0):\n command += ['-vf', ' pad=ceil(iw/2)*2:ceil(ih/2)*2']\n\n\n print(command)\n command += add_commands\n\n command.append(movname + '.mp4')\n subprocess.call(command)\n\n # Delete the original images\n if rm_images:\n print('Deleting the original images...')\n if not save_into_subdir and imgdir is None:\n imdir = os.path.split(imgname)\n print('Deleting folder ' + imgdir)\n subprocess.call(['rm', '-r', imgdir])", "def convert_dir(self, big_pic_dir, small_pic_dir):\n self.G_DEBUG_LOG.info('[BASE.convert_dir.start.....]')\n\n big_pic_dir = os.path.normpath(big_pic_dir)\n small_pic_dir = os.path.normpath(small_pic_dir)\n small_pic_str = ''\n \n if os.path.exists(big_pic_dir):\n if self.G_CHANNEL == '3': # 效果图插件提交\n small_width = '640'\n small_height = '640'\n else:\n small_width = '425'\n small_height = '260'\n small_pic_file_name_list = []\n\n for root, dirs, files in os.walk(big_pic_dir): # generator\n for big_pic_file_name in files:\n self.G_DEBUG_LOG.info('') # 分隔日志\n self.G_DEBUG_LOG.info('big_pic_file_name=' + big_pic_file_name)\n \n big_pic_file_path = os.path.join(root, big_pic_file_name)\n big_pic_file_name_tuple = os.path.splitext(big_pic_file_name) # ('demo', '.jpg')\n big_pic_file_name_root = big_pic_file_name_tuple[0] # 'demo'\n big_pic_file_name_ext = big_pic_file_name_tuple[1] # '.jpg'\n \n if big_pic_file_name_ext in ['.vrmap', '.vrlmap', '.vrimg']:\n continue\n \n big_pic_relative_file_path = big_pic_file_path[len(big_pic_dir) + 1:] # 相对路径,不以(反)斜杠开头\n \n small_pic_file_name = '{action_id}_{path_str}.jpg'.format(\n action_id=self.G_ACTION_ID,\n path_str=big_pic_relative_file_path.replace('\\\\', '[_]').replace('/', '[_]').replace('.', '[-]'),\n )\n small_pic_file_path = os.path.join(small_pic_dir, small_pic_file_name)\n\n small_pic_file_name_list.append(small_pic_file_name.replace('\\\\', '/'))\n \n small_pic_file_path_tmp = os.path.join(small_pic_dir, self.G_ACTION_ID + '_tmp.jpg')\n big_pic_file_path_tmp = os.path.join(root, 'tmp{}'.format(big_pic_file_name_ext))\n \n # 1.大图源文件 重命名为 大图临时文件\n # 如果重命名失败,保持原子性\n try:\n os.rename(big_pic_file_path, big_pic_file_path_tmp)\n except Exception as e:\n self.G_DEBUG_LOG.info('[warn]rename big_pic_file_path to big_pic_file_path_tmp failed-----')\n big_pic_file_path_tmp = big_pic_file_path\n small_pic_file_path_tmp = small_pic_file_path\n \n self.G_DEBUG_LOG.info('big_pic_file_path---{}'.format(big_pic_file_path))\n self.G_DEBUG_LOG.info('big_pic_file_path_tmp---{}'.format(big_pic_file_path_tmp))\n self.G_DEBUG_LOG.info('small_pic_file_path_tmp---{}'.format(small_pic_file_path_tmp))\n self.G_DEBUG_LOG.info('small_pic_file_path---{}'.format(small_pic_file_path))\n \n # 确定转换命令行\n # 不判断工具是否存在,不存在则会在执行时报错跳过;如果判断的话需要保持原子性则修改动作较大\n if big_pic_file_name_ext in ['.exr']:\n # 用oiiotool转换exr\n # 201809 oiiotool转换 包含通道的exr图 时会崩溃,可用nuke\n \n # oiio_path = r'c:\\oiio\\OpenImageIO-1.5.18-bin-vc9-x64\\oiiotool.exe'\n # convert_cmd = '{exe_path} \"{converted_file}\" -resize {width_x_height} -o \"{output_file}\"'.format(\n # exe_path=oiio_path,\n # output_file=small_pic_file_path_tmp,\n # converted_file=big_pic_file_path_tmp,\n # width_x_height=self.get_convert_r(big_pic_file_path_tmp, oiio_path, small_width, small_height),\n # )\n \n py_path = os.path.join(r'C:\\script\\new_py\\Util', \"nuke_convert_small_pic.py\")\n nuke_path = ''\n nuke_path1 = os.path.join(r'C:\\Program Files\\Nuke10.0v4', 'Nuke10.0.exe')\n nuke_path2 = os.path.join(r'C:\\Program Files\\Nuke9.0v9', 'Nuke9.0.exe')\n nuke_path3 = os.path.join(r'C:\\Program Files\\Nuke10.0v6', 'Nuke10.0.exe')\n nuke_path4 = os.path.join(r'C:\\Program Files\\Nuke9.0v6', 'Nuke9.0.exe')\n nuke_path5 = os.path.join(r'C:\\Program Files\\Nuke10.5v5', 'Nuke10.5.exe')\n nuke_path6 = os.path.join(r'C:\\Program Files\\Nuke8.0v3', 'Nuke8.0.exe')\n nuke_path7 = os.path.join(r'C:\\Program Files\\Nuke10.0v6', 'Nuke.exe')\n nuke_path8 = os.path.join(r'B:\\nuke\\Nuke10.5v5', 'Nuke10.5.exe')\n if os.path.isfile(nuke_path1):\n nuke_path = nuke_path1\n elif os.path.isfile(nuke_path2):\n nuke_path = nuke_path2\n elif os.path.isfile(nuke_path3):\n nuke_path = nuke_path3\n elif os.path.isfile(nuke_path4):\n nuke_path = nuke_path4\n elif os.path.isfile(nuke_path5):\n nuke_path = nuke_path5\n elif os.path.isfile(nuke_path6):\n nuke_path = nuke_path6\n elif os.path.isfile(nuke_path7):\n nuke_path = nuke_path7\n elif os.path.isfile(nuke_path8):\n nuke_path = nuke_path8\n else:\n self.G_DEBUG_LOG.info(\"[warn]nuke is not exists!\")\n \n if not os.path.isfile(py_path):\n self.G_DEBUG_LOG.info(\"[warn]nuke_convert_small_pic.py is not exists!\")\n \n try:\n os.environ['foundry_LICENSE'] = \"4101@127.0.0.1;4101@10.60.1.108;4101@10.60.5.248;4101@10.30.96.203\"\n except Exception as e:\n self.G_DEBUG_LOG.info(\"set environ for nuke error:\\n{}\".format(e))\n \n convert_cmd = r'\"{nuke_path}\" -t \"{py_path}\" \"{width}\" \"{height}\" \"{input_file}\" \"{output_file}\"'.format(\n nuke_path=nuke_path,\n py_path=py_path,\n width=small_width,\n height=small_height,\n input_file=big_pic_file_path_tmp,\n output_file=small_pic_file_path_tmp,\n ).replace(\"\\\\\", '/')\n else:\n # 用nconvert转换缩略图\n nconvert_path = r'c:/ImageMagick/nconvert.exe'\n # convert_cmd = '{exe_path} -out jpeg -ratio -resize {w} {h} -overwrite -o \"{output_file}\" \"{converted_file}\"'.format(\n convert_cmd = '{exe_path} -out jpeg -ratio -resize {w} {h} -overwrite -o \"{output_file}\" \"{converted_file}\"'.format(\n exe_path=nconvert_path,\n w=small_width,\n h=small_height,\n output_file=small_pic_file_path_tmp,\n converted_file=big_pic_file_path_tmp\n )\n \n # 2.将 大图临时文件 转缩略图为 缩略图临时文件\n CLASS_COMMON_UTIL.cmd(convert_cmd, my_log=self.G_DEBUG_LOG, continue_on_error=True)\n \n # 3.将 缩略图临时文件 重命名为 缩略图目标文件\n if big_pic_file_path_tmp != big_pic_file_path:\n os.rename(big_pic_file_path_tmp, big_pic_file_path)\n if os.path.exists(small_pic_file_path_tmp):\n try:\n os.rename(small_pic_file_path_tmp, small_pic_file_path)\n except Exception as e:\n pass\n \n self.G_DEBUG_LOG.info('') # 分隔日志\n\n small_pic_file_name_list = self.sort_pic_list(small_pic_file_name_list)\n\n small_pic_str = '|'.join(small_pic_file_name_list)\n self.G_FEE_PARSER.set('render', 'small_pic', small_pic_str)\n\n self.G_DEBUG_LOG.info('[BASE.convert_dir.end.....]')\n return small_pic_str", "def copy_images(in_path: str, out_path: str) -> None:\n file_list = glob.glob(join(in_path, \"**/*.jpg\"), recursive=True)\n in_path_parts = os.path.split(in_path)\n if len(in_path_parts) > 0 and len(in_path_parts[-1]) > 0:\n video_name = in_path_parts[-1]\n else:\n video_name = \"default\"\n out_dir = join(out_path, video_name)\n os.makedirs(out_dir)\n logger.info(\"Copying %s to %s\", in_path, out_dir)\n for image_path in tqdm(file_list):\n image_name = os.path.split(image_path)[-1]\n shutil.copyfile(image_path, join(out_dir, image_name))", "def write_thumbnails(self, appstruct):\n slugser = slugify(appstruct[\"serial\"])\n pdf_filename = \"thumbnails/%s/uploaded.pdf\" % slugser\n top_file = \"thumbnails/%s/top.png\" % slugser\n mos_file = \"thumbnails/%s/mosaic.png\" % slugser\n \n thumg = ThumbnailGenerator(pdf_filename)\n self.save_blob(thumg.top_thumbnail(), top_file)\n self.save_blob(thumg.mosaic_thumbnail(), mos_file)" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Returns the q'th percentile of the distribution given in the argument 'data'. Uses the 'precision' parameter to control the noise level.
def Quantile(data, q, precision=1.0): N, bins = np.histogram(data, bins=precision*np.sqrt(len(data))) norm_cumul = 1.0*N.cumsum() / len(data) for i in range(0, len(norm_cumul)): if norm_cumul[i] > q: return bins[i]
[ "def Quartiles(data):\n q = np.percentile(data, [25, 50, 75])\n\n return q[0], q[1], q[2]", "def test__quantile(self):\r\n # regular cases\r\n sample_data = array(range(25, 42))\r\n assert_almost_equal(_quantile(sample_data, 0.5), median(sample_data))\r\n\r\n # sorted data is assumed for this function\r\n sample_data = sorted(\r\n array([0.17483293, 0.99891939, 0.81377467, 0.8137437,\r\n 0.51990174, 0.35521497, 0.98751461]))\r\n assert_almost_equal(_quantile(sample_data, 0.10), 0.283062154)", "def percentiles(data):\r\n mean = np.array(data).mean()\r\n std = np.array(data).std()\r\n np.array(data).sort() \r\n i_5 = int(0.05 * len(data))\r\n i_50 = int(0.50 * len(data))\r\n i_95 = int(0.95 * len(data))\r\n p_5 = data[i_5]\r\n p_50 = data[i_50]\r\n p_95 = data[i_95]\r\n return p_5, p_50,p_95", "def lower_quartile_sorted(data: Sequence[Real]) -> Real:\n return median(data[:len(data) // 2])", "def percentile(data, quantile, likelihood_ratio=None):\n\n if likelihood_ratio is None:\n return np.percentile(data, quantile, interpolation='nearest')\n else:\n sample_size = len(data)\n # likelihood ratio divided by N - 1\n lr = likelihood_ratio / (sample_size - 1)\n idx = data.argsort()\n\n if quantile > 50: # Right tail\n lr[np.argmax(data)] = 0\n lr_cumsum = np.flip(np.flip(lr[idx]).cumsum())\n tail_probability = 1.00 - quantile / 100.0\n else: # Left tail\n lr[np.argmin(data)] = 0\n lr_cumsum = lr[idx].cumsum()\n tail_probability = quantile / 100.0\n idx_nearest = np.argmin(abs(lr_cumsum - tail_probability))\n return data[idx[idx_nearest]]", "def prctile(data, p_vals=[0, 25, 50, 75, 100], sorted_=False):\r\n ps = [p_vals] if np.isscalar(p_vals) else p_vals\r\n\r\n if not sorted_:\r\n data = sorted(data)\r\n n = len(data)\r\n d = []\r\n for p in ps:\r\n fi = p * n / 100 - 0.5\r\n if fi <= 0: # maybe extrapolate?\r\n d.append(data[0])\r\n elif fi >= n - 1:\r\n d.append(data[-1])\r\n else:\r\n i = int(fi)\r\n d.append((i+1 - fi) * data[i] + (fi - i) * data[i+1])\r\n return d[0] if np.isscalar(p_vals) else d", "def percentile(self, q):\n return self.quantile(q)", "def upper_quartile_sorted(data: Sequence[Real]) -> Real:\n return median(data[-len(data) // 2:])", "def quantile(x, p):\n sorted_x = sorted(x)\n # round p_index to base int\n p_index = int(p * len(x))\n return sorted_x[p_index]", "def quantile(self, data, alpha):\n \n data = np.sort(data)\n idx = int(len(data) * alpha)\n return data[idx]", "def quantile(x, p):\n p_index = int(p * len(x))\n return sorted(x)[p_index]", "def percentile(x, q):\n if (np.isnan(x.values).all() | np.isnan(q)):\n return np.nan\n else:\n return np.percentile(x.dropna(), q=q)", "def percentile(t: torch.tensor, q: float):\n # Note that ``kthvalue()`` works one-based, i.e. the first sorted value\n # indeed corresponds to k=1, not k=0! Use float(q) instead of q directly,\n # so that ``round()`` returns an integer, even if q is a np.float32.\n k = 1 + round(.01 * float(q) * (t.numel() - 1))\n result = t.view(-1).kthvalue(k).values.item()\n return result", "def quantile(X, q, dim=None):\n return X.kthvalue(int(q * len(X)), dim=dim)[0]", "def quart_standardize(x):\n q1 = np.percentile(x,15)\n q3 = np.percentile(x,85)\n iqr = q3-q1\n if iqr == 0:\n iqr = 1\n median = np.median(x)\n x = x-median\n x[np.where(x==0)[0]]=1e-5\n x = x/iqr\n return x", "def percentile(t: torch.tensor, q: float): # -> Union[int, float]\n # Note that ``kthvalue()`` works one-based, i.e. the first sorted value\n # indeed corresponds to k=1, not k=0! Use float(q) instead of q directly,\n # so that ``round()`` returns an integer, even if q is a np.float32.\n k = 1 + round(.01 * float(q) * (t.numel() - 1))\n result = t.view(-1).kthvalue(k).values.item()\n return result", "def compute_95th_percentile(the_data):\n\n clut_series = pd.Series(the_data[~np.isnan(the_data)])\n to_return = clut_series.quantile(0.95)\n\n return to_return", "def test_profiled_quantiles(self):\n\n # this data has 4 bins, range of 3\n # with equal bin size, each bin has the width of 0.75\n\n data = [\"1.0\", \"2.0\", \"3.0\", \"4.0\"]\n df = pd.Series(data)\n profiler = FloatColumn(df.name)\n profiler.update(df)\n profile = profiler.profile\n\n est_quartiles = profile['quantiles']\n est_Q1 = est_quartiles[249]\n est_Q2 = est_quartiles[499]\n est_Q3 = est_quartiles[749]\n\n data_to_num = [float(item) for item in data]\n exact_Q1 = np.percentile(data_to_num, 25)\n exact_Q2 = np.percentile(data_to_num, 50)\n exact_Q3 = np.percentile(data_to_num, 75)\n\n self.assertEqual(est_Q1, exact_Q1)\n self.assertEqual(est_Q2, exact_Q2)\n self.assertEqual(est_Q3, exact_Q3)", "def upper_quartile(data: Iterable[Real]) -> Real:\n ordered = sorted(data)\n\n return upper_quartile_sorted(ordered)" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Downloads a YouTube video by its unique id.
def youtube_download_by_id(id, title=None, output_dir='.', merge=True, info_only=False): raw_video_info = get_content('http://www.youtube.com/get_video_info?video_id=%s' % id) video_info = parse.parse_qs(raw_video_info) if video_info['status'] == ['ok'] and ('use_cipher_signature' not in video_info or video_info['use_cipher_signature'] == ['False']): title = parse.unquote_plus(video_info['title'][0]) stream_list = parse.parse_qs(raw_video_info)['url_encoded_fmt_stream_map'][0].split(',') else: # Parse video page when video_info is not usable. video_page = get_content('http://www.youtube.com/watch?v=%s' % id) ytplayer_config = json.loads(match1(video_page, r'ytplayer.config\s*=\s*([^\n]+);')) title = ytplayer_config['args']['title'] stream_list = ytplayer_config['args']['url_encoded_fmt_stream_map'].split(',') streams = { parse.parse_qs(stream)['itag'][0] : parse.parse_qs(stream) for stream in stream_list } for codec in yt_codecs: itag = str(codec['itag']) if itag in streams: download_stream = streams[itag] break url = download_stream['url'][0] if 'sig' in download_stream: sig = download_stream['sig'][0] else: sig = decrypt_signature(download_stream['s'][0]) url = '%s&signature=%s' % (url, sig) type, ext, size = url_info(url) print_info(site_info, title, type, size) if not info_only: download_urls([url], title, ext, size, output_dir, merge = merge)
[ "def download(idd, path):\n print(f'[{script}]: Downloading YT video \"{idd}\"...') if verbosity >= 1 else None\n\n try:\n yt = pytube.YouTube(\"https://www.youtube.com/watch?v=\" + idd)\n stream = yt.streams.filter(progressive=True).first()\n stream.download(path, filename=idd)\n except Exception:\n print(f'[{script}]: Failed download of YT video \"{idd}\".')\n return None\n\n data = {\n \"idd\": idd,\n \"abr\": stream.abr,\n \"acodec\": stream.audio_codec,\n \"bitrate\": stream.bitrate,\n \"codecs\": stream.codecs,\n \"fps\": stream.fps,\n \"mime\": stream.mime_type,\n \"res\": stream.resolution,\n \"vcodec\": stream.video_codec,\n \"size\": stream._filesize,\n \"frames\": stream.fps * yt.length,\n }\n\n file_path = path + \"/\" + data[\"idd\"] + \".mp4\"\n print(\n f'[{script}]: Download successful. Saved to \"{file_path}\".'\n ) if verbosity >= 2 else None\n return data", "def fetch_single_youtube_video(video_id):\n\n feed = 'http://' + YT_GDATAHOST + YT_FEEDBASE + 'videos/%s' % video_id\n \n entry = parsers.getxml(feed)\n user_root = entry.findtext('{%s}author/{%s}uri' % (ATOM_NS, ATOM_NS))\n user = parsers.getxml(user_root)\n \n _handle_video(\n author = user.findtext('{%s}id' % ATOM_NS).lstrip('http://'+ YT_GDATAHOST + YT_FEEDBASE + 'users/'),\n video_id = video_id,\n title = entry.findtext('{%s}title' % ATOM_NS),\n url = filter(lambda x: x.attrib['rel'] == 'alternate', entry.findall('{%s}link' % ATOM_NS))[0].attrib['href'],\n tags = '',\n date_uploaded = gtime2datetime(entry.findtext('{%s}published' % ATOM_NS)),\n date_received = datetime.datetime.now(),\n description = entry.findtext('{%s}group/{%s}description' % (MRSS_NS, MRSS_NS)) or '',\n )", "def download(videoid, outputpath):\n urlString = 'http://youtube.com/watch?v=' + videoid\n yt = YouTube(urlString)\n yt.streams.filter(progressive=True, file_extension='mp4').order_by(\n 'resolution')[-1].download(output_path=outputpath)\n print(yt.title + ' downlaoded to ' + outputpath)", "def download(videoId, uid = None, savepath = None):\n video_url = 'https://www.youtube.com/watch?v=' + videoId\n res = {}\n if savepath:\n res['savedpath'] = savepath\n else:\n res['savedpath'] = os.getcwd() + os.path.sep + \"downloads/\"\n\n ydl_opts = {}\n # TODO: a. Need to verify the download success/failure and other parameters.\n # b. Need to update the master database of the md5sum, file location of the download, date time, uid, retention period of the file\n # c. Need to update the user schema of the download information.\n\n with youtube_dl.YoutubeDL(ydl_opts) as ydl:\n os.chdir(res['savedpath'])\n ydl.download([video_url])\n res['status'] = 'success'\n return res", "def get_video(self, video_id):\n uri = 'videos/' + video_id\n return self.make_request(uri)", "def video(youtube_id):\r\n display(YouTubeVideo(youtube_id, 720, 480, rel=0))", "def get_video(self, id):\n video_results = self.youtube.videos().list(\n \tid=id,\n \tpart=\"snippet\"\n )\n \n return self.execute_query(video_results)", "def get_yt_link(video_id):\n req = Request(\"https://youtube.com/get_video_info?video_id={}&hl=en\".format(video_id), headers=_HEADERS)\n\n with urllib.request.urlopen(req, timeout=2) as resp:\n data = urllib.request.unquote(gzip.decompress(resp.read()).decode(\"utf-8\")).split(\"&\")\n out = {k: v for k, sep, v in (str(d).partition(\"=\") for d in map(urllib.request.unquote, data))}\n player_resp = out.get(\"player_response\", None)\n\n if player_resp:\n try:\n resp = json.loads(player_resp)\n except JSONDecodeError as e:\n log(\"{}: Parsing player response error: {}\".format(__class__.__name__, e))\n else:\n det = resp.get(\"videoDetails\", None)\n title = det.get(\"title\", None) if det else None\n streaming_data = resp.get(\"streamingData\", None)\n fmts = streaming_data.get(\"formats\", None) if streaming_data else None\n\n if fmts:\n urls = {Quality[i[\"itag\"]]: i[\"url\"] for i in\n filter(lambda i: i.get(\"itag\", -1) in Quality, fmts)}\n\n if urls and title:\n return urls, title.replace(\"+\", \" \")\n\n stream_map = out.get(\"url_encoded_fmt_stream_map\", None)\n if stream_map:\n s_map = {k: v for k, sep, v in (str(d).partition(\"=\") for d in stream_map.split(\"&\"))}\n url, title = s_map.get(\"url\", None), out.get(\"title\", None)\n url, title = urllib.request.unquote(url) if url else \"\", title.replace(\"+\", \" \") if title else \"\"\n if url and title:\n return {Quality[0]: url}, title.replace(\"+\", \" \")\n\n rsn = out.get(\"reason\", None)\n rsn = rsn.replace(\"+\", \" \") if rsn else \"\"\n log(\"{}: Getting link to video with id {} filed! Cause: {}\".format(__class__.__name__, video_id, rsn))\n\n return None, rsn", "def download_video(url,path):\n ydl_opts = {'outtmpl': path, 'format': '22'}\n with youtube_dl.YoutubeDL(ydl_opts) as ydl:\n ydl.download([url])\n print('Video downloaded to ' + path)", "def download_from_youtube():\n linkinput = input(\"Enter the url you want to download: \")\n youtube_object = Youtube(linkinput)\n youtube_object.youtube()", "def get_video_by_id():\n sample_id = demisto.getArg('id')\n r = req('GET', SUB_API + 'samples/' + sample_id + '/video.webm')\n ec = {'ThreatGrid.Sample.Id': sample_id}\n demisto.results([\n {\n 'Type': entryTypes['note'],\n 'EntryContext': ec,\n 'HumanReadable': '### ThreatGrid Sample Run Video File -\\n'\n + 'Your sample run video file download request has been completed successfully for '\n + sample_id,\n 'Contents': ec,\n 'ContentsFormat': formats['json']\n },\n fileResult(sample_id + '.webm', r.content)\n ])", "def play_youtube(self, media_id):\n pass", "def movieid_first_video_url(self, movie_id):\n YOUTUBE_URL = \"https://www.youtube.com/watch?v=\"\n VIDEOS_URL = \"https://api.themoviedb.org/3/movie/%s/videos\"\n url_with_movieid = VIDEOS_URL % (movie_id)\n parm_dict = {\"api_key\": self.api_key, \"language\": self.language}\n url = url_with_movieid + \"?\" + urlencode(parm_dict, doseq=True)\n # print url\n\n response = requests.get(url)\n json_dict = json.loads(response.text)\n response.close()\n\n youtube_video_key = json_dict['results'][0]['key']\n return YOUTUBE_URL + youtube_video_key", "def download_video(self):\n track = self.f_name + self.file_type\n # youtube_cmd = [\n # \"youtube-dl\", self.link, \"-f\",\n # self.file_type, \"-o\", track\n # ]\n\n youtube_cmd = [\n \"youtube-dl\", self.link, \"-o\", track, \"-f\", \"webm\"\n ]\n cmd = ' '.join(youtube_cmd)\n for std_out in popen(cmd):\n self.set_status_label(std_out)\n self.status_label.update_idletasks()\n try:\n move(track, self.downloads)\n except Exception:\n self.set_status_label(\"ERROR DOWNLOADING\")", "def get_youtube_mp4():\n try:\n video = YouTube(full_link)\n stream = video.streams.filter(\n only_audio=True, audio_codec=\"mp4a.40.2\"\n ).first()\n stream.download(mp4_path)\n\n return get_youtube_mp3(stream)\n except Exception as error:\n print(error) # poor man's logging\n raise Exception", "def url(yt_id: str) -> str:\n return \"https://www.youtube.com/watch?v={}\".format(yt_id)", "async def youtube_extraction(self, video_id: str, url: str) -> dict:\n # noinspection PyBroadException\n try:\n start = time.time()\n if self.research_cache.get(video_id, None) is not None:\n return self.research_cache.get(video_id)\n ydl = await pytube.YouTube.create(url)\n yt_s, codec_name, abr = self.get_format(ydl.streams)\n # preferring format 250: 78k bitrate\n # (discord default = 64, max = 96) + already opus formatted\n song = {\n \"link\": url,\n \"id\": ydl.video_id,\n \"title\": ydl.title,\n \"stream\": yt_s,\n \"codec\": codec_name,\n \"abr\": abr,\n \"duration\": ydl.length,\n \"thumbnail\": ydl.thumbnail_url,\n \"term\": \"\",\n }\n\n if \"manifest\" in song[\"stream\"]:\n # pytube doesn't handle manifest extraction, so I need to do it.\n song[\"stream\"] = self.extract_manifest(song[\"stream\"])\n async with aiohttp.request(\n \"HEAD\", song[\"stream\"], allow_redirects=False\n ) as async_request:\n song[\"stream\"] = async_request.headers.get(\n \"Location\", song[\"stream\"]\n )\n song[\"loadtime\"] = int(time.time() - start)\n self.research_cache[song[\"id\"]] = song\n del ydl\n return song\n except (NotAvailableException, RegexMatchError):\n pass\n except Exception:\n traceback.print_exc()\n raise NotAvailableException(Errors.no_results_found)", "def get_video(video_id):\n return Video.get_by_id(video_id)", "def play_youtube(self, media_id):\n raise NotImplementedError()" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Check that user_data is a dict and that key is in there
def has_user_data(self, key): return isinstance(self._user_data, dict) and key in self._user_data
[ "def check_for_dict(check):", "def can_insert(data):\n return isinstance(data, dict)", "def _is_key_value(data):\n if data is None:\n return False\n return all(x in data for x in ['key', 'value'])", "def test_is_dict(input):\n return isinstance(input, dict)", "def isDict(data):\n\ttry:\n\t\tfrom types import DictType\n\t\tif type(data) == DictType:\n\t\t\treturn True\n\texcept ImportError:\n\t\tif type(data) == type({}):\n\t\t\treturn True\n\treturn False", "def _is_dict(val):\n\n return isinstance(val, dict)", "def sanity_check(cls, data): # no version with ID, since PUT (update) isn't allowed\n data = _dict_sanity_check(data,\n mandatory_keys = [\n (\"user_id\", User.exists),\n (\"customer_id\", Customer.exists)\n ],\n optional_keys = [])\n return data, None", "def _assert_user_key(self):\n if 'user_key' not in self._json_request_data:\n return\n assert self.current_user is not None\n assert (self.current_user.key.urlsafe() ==\n self._json_request_data['user_key'])", "def is_dict_of_model(cls: Type[Signer], dictionary: Dict[str, Any]) -> bool:\n return (\n isinstance(dictionary, dict)\n and \"signer\" in dictionary\n and super().is_dict_of_model(dictionary[\"signer\"])\n )", "def _test_dict(dic):\n\n if not isinstance(dic, dict):\n return False\n if len(dic) == 0:\n return False\n return True", "def validate_user_request_dict(request_dict):\n if 'first_name' not in request_dict:\n return False\n if 'last_name' not in request_dict:\n return False\n if 'id' not in request_dict:\n return False\n if 'email' not in request_dict:\n return False\n return True", "def check_user_data_in_response(response_data):\n assert response_data[\"id\"] > 0\n assert response_data[\"name\"] == pytest.test_user.name\n assert response_data[\"email\"] == pytest.test_user.email\n assert response_data[\"gender\"] == pytest.test_user.gender\n assert response_data[\"status\"] == pytest.test_user.status", "def _is_dict(v):\n return isinstance(v, dict)", "def __is_valid_dict(self, GRFData):\n\n if type(GRFData) is not dict:\n raise ValueError(\"Expected GRFData to be of type '{}', but received type '{}'.\".format(type(dict), type(GRFData)))\n\n for component in self.comp_list:\n if component not in GRFData.keys():\n raise ValueError(\"Component '{}' not found in GRFData.\".format(component))", "def dict_support_required(self):\n\t\treturn self.typemanager.has_dicts", "def check_validity(self, field_name, value):\n if not isinstance(value, dict):\n raise ValidationError(f'{field_name}: value must be dict')", "def _is_ok_metadata(self, meta_data):\n if not isinstance(meta_data, dict):\n return False\n\n for k, v in meta_data.items():\n if not isinstance(v, str):\n return False\n\n return True", "def test_process_dict_true(self):\n\n self.assertIn('userA@domain', self.temp_set)", "def is_dict(value):\n return isinstance(value, dict)" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Return key from user_data if it's a dict
def get_user_data(self, key, default=None): if not isinstance(self._user_data, dict): return default return self._user_data.get(key)
[ "def key(data, key_name):\n return data.get(key_name)", "def get(self, username):\n return self.keys[username].key", "def _get_string(self, data, key):\n return data.get(key)", "def __is_key_in_json(self, key=str, json_dict=json):\n if key in json_dict:\n # noinspection PyUnresolvedReferences\n return json_dict[key]\n else:\n return self.NO_KEY_VALUE_FOR_ENTRY", "def data_key(self):\n raise NotImplementedError", "def get_value(data, key):\n if key in data:\n return data[key]\n return None", "def DictionaryKey(self) -> str:", "def _get_user_id(self, user: Optional[Dict[str, Any]]) -> Optional[str]:\n return user[\"id\"] if user and \"id\" in user else None", "def has_user_data(self, key):\n return isinstance(self._user_data, dict) and key in self._user_data", "def get_data_key(object_id):\n return", "def key(self, realm):\n self._reload()\n result = self.data.get(realm.upper())\n if result:\n result = result.get('key')\n return result", "def _get_user_id(self, user: Optional[Dict[str, Any]]) -> Optional[str]:\n if user and \"id\" in user:\n return user[\"id\"]\n return None", "def user_data(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"user_data\")", "def extract_backing_type(value: dict) -> str:\n return next(iter(value.keys()))", "def get_key(self, item):\r\n return item[0]", "def meta_value(request_object, dictkey):\n \n try:\n val = request_object.META[dictkey]\n except: # Exception as ex:\n val = ''\n return val", "def key(record):\n return record['key']", "def firstKey(dataDict):\n return list(dataDict.keys())[0]", "def data_key(self):\n return self._data_key" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Function to test add furniture functionality.
def test_add_furniture(self): add_furniture('invoice.csv', 'Elisa Miles', 'LR04', 'Leather Sofa', 25) add_furniture('invoice.csv', 'Edward Data', 'KT78', 'Kitchen Table', 10) add_furniture('invoice.csv', 'Alex Gonzales', 'BR02', 'Queen Mattress', 17) # Generate list of rentals with open('invoice.csv', 'r') as csvfile: rentals = [] for row in csvfile: rentals.append(row) print(rentals) # Assert statements self.assertEqual(rentals[0], ('Elisa Miles,LR04,Leather Sofa,25\n')) self.assertEqual(rentals[1], ('Edward Data,KT78,Kitchen Table,10\n')) self.assertEqual(rentals[2], ('Alex Gonzales,BR02,Queen Mattress,17\n'))
[ "def test_add_data():\n add_furniture(\"invoice_file.csv\", \"Elisa Miles\", \"LR04\", \"Leather Sofa\", 25.00)\n add_furniture(\"invoice_file.csv\", \"Edward Data\", \"KT78\", \"Kitchen Table\", 10.00)\n add_furniture(\"invoice_file.csv\", \"Alex Gonzales\", \"BR02\", \"Queen Mattress\", 17.00)", "def setUp(self):\n self.item = Furniture('11', 'sofa', '4', '5', 'suede', 'xl')", "def test_add_stock_item(self):\n pass", "def test_furniture_init(self):\n e = Furniture(\"test product code\", \"test description\", \"test market price\", \"test rental price\",\n \"test material\", \"test size\")\n\n self.assertEqual(e.product_code, \"test product code\")\n self.assertEqual(e.description, \"test description\")\n self.assertEqual(e.market_price, \"test market price\")\n self.assertEqual(e.rental_price, \"test rental price\")\n self.assertEqual(e.material, \"test material\")\n self.assertEqual(e.size, \"test size\")", "def test_api_can_add_food_to_a_meal(self):\n response = self.client.post(f'/api/v1/meals/{self.breakfast.id}/foods/{self.oatmeal.id}')\n # import code; code.interact(local=dict(globals(), **locals()))\n\n self.assertEqual(response.data['message'], \"Successfully added oatmeal to breakfast\")", "def add_furniture():\n print(\"Attempting to seed the furniture collection.....\")\n print()\n\n chair_path = Path(\"chair.png\")\n\n couch = FurnitureItem(\n \"Comfy couch\",\n \"Well loved, but still in pretty good condition\",\n 60.00,\n 40,\n \"swiessle@stevens.edu\",\n \"Couch\",\n \"beige\",\n [50, 20, 10],\n )\n couch.set_image_filepath(chair_path)\n Database.add_item(couch)\n print(\"couch has been successfully added\")\n\n table = FurnitureItem(\n \"Dining room table\",\n \"Wooden dining room table. Has a few scuffs, but not bad!\",\n 30.00,\n 15,\n \"gracem730@gmail.com\",\n \"Table\",\n \"wood\",\n [40, 20, 40],\n )\n table.set_image_filepath(chair_path)\n Database.add_item(table)\n print(\"table has been successfully added\")\n\n bed = FurnitureItem(\n \"Bed Frame\",\n \"Just selling the bed frame, you'll have \\\n to get your own mattress\",\n 55.00,\n 50,\n \"erotside@stevens.edu\",\n \"Bed\",\n \"white\",\n [10, 20, 10],\n )\n bed.set_image_filepath(chair_path)\n Database.add_item(bed)\n print(\"bed has been successfully added\")\n\n desk = FurnitureItem(\n \"Ikea desk, no longer need it\",\n \"In great condition, this is truly a steal\",\n 60.00,\n 35,\n \"jlora@stevens.edu\",\n \"Ikea Desk\",\n \"navy\",\n [20, 20, 30],\n )\n desk.set_image_filepath(chair_path)\n Database.add_item(desk)\n print(\"desk has been successfully added\")\n\n shelf = FurnitureItem(\n \"Book shelf, never used\",\n \"Brand new\",\n 110.00,\n 25,\n \"dcarpent@stevens.edu\",\n \"Book Shelf\",\n \"black\",\n [10, 20, 100],\n )\n shelf.set_image_filepath(chair_path)\n Database.add_item(shelf)\n print(\"shelf has been successfully added\")\n\n print()\n print(\"Done seeding the furniture collection!\")\n print(\"----------------------------------------------\")", "def test_add(self):\n # Everything added will be deleted later in test_delete.\n first_name = 'Trevor'\n last_name = 'Harvey'\n entry_date = '04/19/2012'\n title = 'Test'\n minutes = 34\n notes = 'testing entries. and regex (555) 555-3425'\n self.data.add(first_name, last_name, entry_date, title, minutes, notes)\n # second test add\n first_name = 'Nik'\n last_name = 'Silver'\n entry_date = '01/14/1827'\n title = 'random@mail.com'\n minutes = 34\n notes = 'This is an email test.'\n\n self.data.add(first_name, last_name, entry_date, title, minutes, notes)", "def test_add(self):\n self.sample_list.add(\"Apple Pie\", \"As American as...\", 6000)\n self.sample_list.add(\"Ice Cream\", \"Lieutenant Dan!!\", 1)\n\n self.assertEqual(self.sample_list.desserts[0].name, \"Apple Pie\")\n self.assertEqual(self.sample_list.desserts[0].calories, 6000)\n self.assertEqual(self.sample_list.desserts[1].description,\n \"Lieutenant Dan!!\")\n self.assertEqual(self.sample_list.next_id, 3)", "def test_add(self):\n self.assertEqual(Supply.objects.get(pk=1).quantity, float('10.8'))\n resp = self.client.post('/api/v1/supply/1/add/?quantity=5', format='json')\n self.assertEqual(Supply.objects.get(pk=1).quantity, float('15.8'))", "def add_furniture(itemcode, description, marketprice, rentalprice):\n\n material = input(\"Enter item material: \")\n size = input(\"Enter item size (S,M,L,XL): \")\n newitem = Furniture(itemcode, description,\n marketprice, rentalprice\n , material, size)\n FULLINVENTORY[itemcode] = newitem.returnasdictionary()\n print(\"New inventory item added\")", "def test_add_recipe(self):\n\n count = Recipe.objects.count()\n self.client.force_login(self.users[0])\n self.client.post('/meals/add/', {\n 'title': 'title',\n 'ingredients': 10,\n 'tags': 'stuff nonsesne',\n 'cook_time': 20,\n 'instructions': 'words here',\n 'add_new_recipe': ''\n })\n self.assertFalse(count == Recipe.objects.count())", "def test_form_submition_and_product_creation(user_company, client, authenticated_user):\n add_product_url = reverse('add-product')\n response = client.post(add_product_url, {\n 'name': 'Test_product_name',\n 'serial_number': 'XZ001', \n 'manufacturer': 'Test company',\n 'price_net': 415.26,\n 'description': fake.paragraph(),\n 'stock': 16\n })\n assert response.status_code == 302\n product = Product.objects.get(name='Test_product_name')\n assert response.url == reverse('product-detail',kwargs={'pk': product.pk}) \n assert product.user == authenticated_user\n assert product in Product.objects.all()", "def test_order_refund_add(self):\n pass", "def test_substitute_creates_expense(self):\n pass", "def test_update_financial_support(self):\n pass", "def test_add_item_to_cart(client):\n raise NotImplemented('Acceptance test failed')", "def test_order_add(self):\n pass", "def test_search_meal_pricing(self):\n pass", "def test_add_item_adds_single_entry():\n sc.menu = sc.default_menu\n sc.current.add_item('Coffee', 1)\n assert sc.current.receipt == {'subtotal': 1.59, 'Coffee': 1}" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
runs an automatic check to see if any transcriptions need to be started or are already finished and need to be reuploded\n\n Needs dbConnection & an integer representing the max concurrent transcriptons that can be ran at a time\n\n This is a function that you dont want to parse and upload files from the 'transcripts' folder into. because you really dont know which files are in progress or not whatever. ill fix later .
def runAutoCheck(dbConnection, maxConcurrent): # checks if any shows are pending. fileContent = DatabaseInteract.checkPre(dbConnection) if(len(fileContent) > 0 and Tools.numRunningProcesses() < maxConcurrent): cursor = dbConnection.cursor() cursor.execute("UPDATE transcriptions SET pending = TRUE WHERE id = '" + str(fileContent[1]) + "';") dbConnection.commit() cursor.close() url = fileContent[0] indexID = str(fileContent[1]) # get the ID instead of the filename service = str(fileContent[3]) # podcastName = fileContent[2] Tools.transcribeAll(service, url, indexID) # download the mp3 will print when done
[ "def check_transcripts(request):\r\n transcripts_presence = {\r\n 'html5_local': [],\r\n 'html5_equal': False,\r\n 'is_youtube_mode': False,\r\n 'youtube_local': False,\r\n 'youtube_server': False,\r\n 'youtube_diff': True,\r\n 'current_item_subs': None,\r\n 'status': 'Error',\r\n }\r\n try:\r\n __, videos, item = _validate_transcripts_data(request)\r\n except TranscriptsRequestValidationException as e:\r\n return error_response(transcripts_presence, e.message)\r\n\r\n transcripts_presence['status'] = 'Success'\r\n\r\n filename = 'subs_{0}.srt.sjson'.format(item.sub)\r\n content_location = StaticContent.compute_location(item.location.course_key, filename)\r\n try:\r\n local_transcripts = contentstore().find(content_location).data\r\n transcripts_presence['current_item_subs'] = item.sub\r\n except NotFoundError:\r\n pass\r\n\r\n # Check for youtube transcripts presence\r\n youtube_id = videos.get('youtube', None)\r\n if youtube_id:\r\n transcripts_presence['is_youtube_mode'] = True\r\n\r\n # youtube local\r\n filename = 'subs_{0}.srt.sjson'.format(youtube_id)\r\n content_location = StaticContent.compute_location(item.location.course_key, filename)\r\n try:\r\n local_transcripts = contentstore().find(content_location).data\r\n transcripts_presence['youtube_local'] = True\r\n except NotFoundError:\r\n log.debug(\"Can't find transcripts in storage for youtube id: %s\", youtube_id)\r\n\r\n # youtube server\r\n youtube_text_api = copy.deepcopy(settings.YOUTUBE['TEXT_API'])\r\n youtube_text_api['params']['v'] = youtube_id\r\n youtube_response = requests.get('http://' + youtube_text_api['url'], params=youtube_text_api['params'])\r\n\r\n if youtube_response.status_code == 200 and youtube_response.text:\r\n transcripts_presence['youtube_server'] = True\r\n #check youtube local and server transcripts for equality\r\n if transcripts_presence['youtube_server'] and transcripts_presence['youtube_local']:\r\n try:\r\n youtube_server_subs = get_transcripts_from_youtube(\r\n youtube_id,\r\n settings,\r\n item.runtime.service(item, \"i18n\")\r\n )\r\n if json.loads(local_transcripts) == youtube_server_subs: # check transcripts for equality\r\n transcripts_presence['youtube_diff'] = False\r\n except GetTranscriptsFromYouTubeException:\r\n pass\r\n\r\n # Check for html5 local transcripts presence\r\n html5_subs = []\r\n for html5_id in videos['html5']:\r\n filename = 'subs_{0}.srt.sjson'.format(html5_id)\r\n content_location = StaticContent.compute_location(item.location.course_key, filename)\r\n try:\r\n html5_subs.append(contentstore().find(content_location).data)\r\n transcripts_presence['html5_local'].append(html5_id)\r\n except NotFoundError:\r\n log.debug(\"Can't find transcripts in storage for non-youtube video_id: %s\", html5_id)\r\n if len(html5_subs) == 2: # check html5 transcripts for equality\r\n transcripts_presence['html5_equal'] = json.loads(html5_subs[0]) == json.loads(html5_subs[1])\r\n\r\n command, subs_to_use = _transcripts_logic(transcripts_presence, videos)\r\n transcripts_presence.update({\r\n 'command': command,\r\n 'subs': subs_to_use,\r\n })\r\n return JsonResponse(transcripts_presence)", "def transcript_sequence(species,aceVersion,log=0):\n \n os.chdir(os.environ['PYDATA']+'/%s/log'%species)\n logFile=open('%s_ace_transcripts.txt'%species,'w')\n t1=time.time()\n #create ace transcript_sequence\n path=os.environ['PYDATA']+\"/\"+species+\"/aceview/\"+species+\"_transcript_sequence.bkdb\"\n if os.path.exists(path):\n os.remove(path)\n transcriptDB=bsddb.btopen(path,'w')\n \n #test if mRNAs sequences are in one file or in several chromosome files\n try:\n sequenceFile = open('%s/%s_%s/AceView.ncbi_37.all_mrnas_dna.fasta' %(os.environ['ACEDATA'],species,aceVersion.lower()),'r')\n chrFlag=0 \n except: \n chrFlag=1 \n \n if chrFlag: \n #open database for relation between chromosome and Ensembl region\n path=os.environ['PYDATA']+'/'+species+'/ensembl/'+species+'_region_by_chromosome.bkdb'\n chrDB=bsddb.btopen(path,'r')\n chromosomes=chrDB.keys()\n tscriptNb=0 \n for chromosome in chromosomes:\n print 'processing chromosome: '+chromosome\n try: \n sequenceFile = open('%s/%s_%s/x1.all_mrnas_fasta.%s.fasta' %(os.environ['ACEDATA'],species,aceVersion.lower(),chromosome),'r') \n region=chrDB[chromosome] \n geneName='' \n transcriptName=''\n sequence='' \n for lines in sequenceFile:\n tscriptNb=tscriptNb+1 \n line = lines.split('\\n')[0]\n if not line:\n #save last transcript\n if geneName != '': \n #save previous transcript\n transcriptDB[transcriptName]=cPickle.dumps(ps_class.Transcript(ID=transcriptName,geneID=geneName,chromosome=chromosome,region=region,sequence=sequence),protocol=-1)\n break\n # get some informations \n if line[0]=='>': \n if geneName != '': \n #save previous transcript\n transcriptDB[transcriptName]=cPickle.dumps(ps_class.Transcript(ID=transcriptName,geneID=geneName,chromosome=chromosome,region=region,sequence=sequence),protocol=-1)\n transcriptName = line.split(':')[1] \n shortName=transcriptName.split(aceVersion)[0] \n transcriptLetter=shortName.split('.')[-1]\n geneName=shortName.split('.'+transcriptLetter)[0] \n sequence='' \n else:\n # Construct sequence\n sequence=sequence+line\n except:\n logFile.write('no AceView files %s/x1.all_mrnas_fasta.%s.fasta' %(os.environ['ACEDATA'],chromosome)) \n transcriptDB.close()\n chrDB.close()\n else: \n tscriptNb=0 \n sequenceFile = open('%s/%s_%s/AceView.ncbi_37.all_mrnas_dna.fasta' %(os.environ['ACEDATA'],species,aceVersion.lower()),'r') \n geneName='' \n transcriptName=''\n sequence='' \n for lines in sequenceFile:\n tscriptNb=tscriptNb+1 \n line = lines.split('\\n')[0]\n if not line:\n #save last transcript\n if geneName != '': \n #save previous transcript\n transcriptDB[transcriptName]=cPickle.dumps(ps_class.Transcript(ID=transcriptName,geneID=geneName,sequence=sequence),protocol=-1)\n break\n # get some informations \n if line[0]=='>': \n if geneName != '': \n #save previous transcript\n transcriptDB[transcriptName]=cPickle.dumps(ps_class.Transcript(ID=transcriptName,geneID=geneName,sequence=sequence),protocol=-1)\n transcriptName = line.split(':')[1] \n shortName=transcriptName.split(aceVersion)[0] \n transcriptLetter=shortName.split('.')[-1]\n geneName=shortName.split('.'+transcriptLetter)[0] \n sequence='' \n else:\n # Construct sequence\n sequence=sequence+line \n transcriptDB.close()\n \n t2=time.time()\n if log!=0:\n log.write('\\t%u\\t%.2f\\n'%(tscriptNb,t2-t1))", "async def test_get_transcripts_from_gene(test_db):\n resp = await test_db.get_transcripts_from_gene(\"BRAF\", 2145, 2145)\n assert len(resp) == 32\n\n resp = await test_db.get_transcripts_from_gene(\"BRAF\", 140453136,\n 140453136)\n assert len(resp) == 0", "def run_tximport():\n eligible_experiments = (\n Experiment.objects.annotate(num_organisms=Count(\"organisms\"))\n .filter(num_organisms=1, technology=\"RNA-SEQ\", num_processed_samples=0)\n .prefetch_related(\"samples__results\")\n )\n\n paginator = Paginator(eligible_experiments, PAGE_SIZE)\n page = paginator.page()\n\n # Next is to figure out how many samples were processed for\n # each experiment. Should be able to reuse code from salmon\n # cause it does this stuff.\n tximport_pipeline = ProcessorPipeline.TXIMPORT\n\n while True:\n creation_count = 0\n\n for experiment in page.object_list:\n quant_results = get_quant_results_for_experiment(experiment)\n\n if should_run_tximport(experiment, quant_results, True):\n processor_job = ProcessorJob()\n processor_job.pipeline_applied = tximport_pipeline.value\n processor_job.ram_amount = 8192\n # This job doesn't need to run on a specific volume\n # but it uses the same Nomad job as Salmon jobs which\n # do require the volume index.\n processor_job.volume_index = random.choice(list(get_active_volumes()))\n processor_job.save()\n\n assoc = ProcessorJobOriginalFileAssociation()\n # Any original file linked to any sample of the\n # experiment will work. Tximport is somewhat special\n # in that it doesn't actuallhy use original files so\n # this is just used to point to the experiment.\n assoc.original_file = experiment.samples.all()[0].original_files.all()[0]\n assoc.processor_job = processor_job\n assoc.save()\n\n creation_count += 1\n\n try:\n send_job(tximport_pipeline, processor_job)\n except Exception:\n # If we cannot queue the job now the Foreman will do\n # it later.\n pass\n\n logger.info(\"Created %d tximport jobs for experiments past the thresholds.\", creation_count)\n\n if not page.has_next():\n break\n else:\n page = paginator.page(page.next_page_number())", "def start_transcribing():\n transcribe.main()", "def docxProcessing():\n DOCUMENT_ORIGIN_CODE = \"RADIOLOGIE_SOFTWARE\"\n global DATABASE\n conn = db.create_connection(DATABASE)\n pathFolder = \"fichiers source/\"\n extension = \".docx\"\n docxFileArrayPath = glob.glob(pathFolder + \"*\" + extension)\n print(\" - Processing docx\", end=\"\") \n for file in docxFileArrayPath:\n text = readFile.readDocxFile(file)\n query = getDocumentQuery(text, DOCUMENT_ORIGIN_CODE, file, pathFolder, extension)\n db.insert_document(conn, query) \n print(\".\", end = '')\n #commit the changes to db\t\t\t\n conn.commit()\n #close the connection\n conn.close()\n print(\"\\n\")", "def file_already_processed(filespec):\n\n\ttry:\n\t\tconn = sqlite3.connect(cfg['dbfilespec'])\n\texcept Exception, e:\n\t\tmsg = \"Connect fail for db %s: %s\" % (cfg['dbfilespec'],str(e))\n\t\tlogger.error(msg)\n\t\treturn None\n\n\tc = conn.cursor()\n\n\t# get list of feeds and their properties\n\tquery_get_already_processed = \"SELECT * FROM files_successfully_processed WHERE filespec = '%s'\" \\\n\t % (filespec)\n\n\ttry:\n\t\tc.execute(query_get_already_processed)\n\texcept Exception, e:\n\t\tmsg = \"Execute fail on db %s for query %s: %s\" \\\n\t\t % (cfg['dbfilespec'],query_get_already_processed,str(e))\n\t\tlogger.error(msg)\n\t\tconn.close()\n\t\treturn None\n\n\t# fetchone returns a list of tuples\n\trows = []\n\ttry:\n\t\trows = c.fetchall()\n\texcept Exception, e:\n\t\tmsg = \"Fetchall fail on db %s for query %s: %s\" % (cfg['dbfilespec'],query_get_already_processed,str(e))\n\t\tlogger.error(msg)\n\t\tconn.close()\n\t\treturn None\n\n\tif len(rows) == 0:\n\t\tconn.close()\n\t\treturn False\n\n\tif len(rows) > 0:\n\t\tconn.close()\n\t\treturn True\n\n\t# wtf? len(rows) returned negative?\n\t# should this be an exception? don't know what's going on beyond this point\n\tmsg = \"len(rows) returned a negative value\"\n\tlogger.warning(msg)\n\t#warnings.warn(msg)\n\tconn.close()\n\treturn None", "def test_filechecker01(self) -> None:\n checkfile = tempfile.NamedTemporaryFile()\n checkfilename = checkfile.name\n ft = Taskmeister.FileChecker(self.msgq, self.logger,\n self.sec_interval, True, checkfilename)\n assert ft is not None, \"ft is None\"\n gevent.sleep(self.test_sleep_time)\n tn = self.msgq.num_messages()\n print(\"after sleep 1 {}\".format(tn))\n if tn != 1:\n raise RuntimeError(\"unexpected tn = {}\".format(tn))\n # now close the checkfile to remove it\n checkfile.close()\n gevent.sleep(self.test_sleep_time)\n tn = self.msgq.num_messages()\n print(\"after sleep 2 {}\".format(tn))\n if tn != 2:\n raise RuntimeError(\"unexpected tn = {}\".format(tn))", "def gbk_upload(self):\n t_count = 0\n os.chdir(self.path)\n print(os.getcwd())\n if os.path.isdir(self.path + '/Databases') is False:\n os.mkdir('Databases')\n for tier in os.listdir(os.getcwd()):\n if tier == 'Databases':\n continue\n db_name = str(tier) + '.db'\n if os.path.isfile(self.path + '/Databases/' + db_name) is False:\n print('Copying Template BioSQL Database... '\n 'This may take a few minutes...')\n shutil.copy2(where.Templates + '/Template_BioSQL_DB.db',\n self.path + '/Databases/%s' % db_name)\n else:\n os.remove(self.path + '/Databases/' + db_name)\n print('Copying Template BioSQL Database... '\n 'This may take a few minutes...')\n shutil.copy2(where.Templates + '/Template_BioSQL_DB.db',\n self.path + '/Databases/%s' % db_name)\n\n server = BioSeqDatabase.open_database(\n driver='sqlite3', db=(\n self.path + '/Databases/' + db_name))\n os.chdir(tier)\n for gene in os.listdir(os.getcwd()):\n os.chdir(gene)\n sub_db_name = gene\n for file in os.listdir(os.getcwd()):\n try:\n if sub_db_name not in server.keys():\n server.new_database(sub_db_name)\n db = server[sub_db_name]\n count = db.load(SeqIO.parse(file, 'genbank'))\n server.commit()\n print('Server Commited %s' % sub_db_name)\n print('%s database loaded with %s.' % (db.dbid, file))\n print(\n \"That file contains %s genbank records.\" %\n str(count))\n t_count = t_count + count\n print(\n 'The total number of files loaded so far is %i.' %\n t_count)\n except BaseException:\n server.rollback()\n try:\n del server[sub_db_name]\n server.commit()\n except BaseException:\n raise\n raise\n os.chdir('..')\n os.chdir('..')", "def check_active_requests():\n\n active_requests = jobtracker.query(\"SELECT * FROM requests \" \\\n \"WHERE status='waiting'\")\n for request in active_requests:\n\n\t# Check requested status \n\tif DownloaderSPAN512.check_request_done(request):\n\t dlm_cout.outs(\"Restore (GUID: %s) has succeeded. Will create file entries.\\n\" % request['guid'])\n\t create_file_entries(request)\n\n\telse:\n#\t dlm_cout.outs(\"Request (GUID: %s) has failed.\\n\" \\\n#\t \"\\tDatabase failed to report the data as restored.\" % request['guid'])\n#\t jobtracker.query(\"UPDATE requests SET status='failed', \" \\\n# \"details='Request failed. Why ?', \" \\\n# \"updated_at='%s' \" \\\n# \"WHERE guid='%s'\" % (jobtracker.nowstr(), request['guid']))\n\n query = \"SELECT (TO_SECONDS('%s')-TO_SECONDS(created_at)) \" \\\n \"AS deltaT_seconds \" \\\n \"FROM requests \" \\\n \"WHERE guid='%s'\" % \\\n (jobtracker.nowstr(), request['guid'])\n row = jobtracker.query(query, fetchone=True)\n #if row['deltaT_seconds']/3600. > config.download.request_timeout:\n if row/3600. > config.download.request_timeout:\n dlm_cout.outs(\"Restore (GUID: %s) is over %d hr old \" \\\n \"and still not ready. Marking \" \\\n \"it as failed.\" % \\\n (request['guid'], config.download.request_timeout))\n jobtracker.query(\"UPDATE requests \" \\\n \"SET status='failed', \" \\\n \"details='Request took too long (> %d hr)', \" \\\n \"updated_at='%s' \" \\\n \"WHERE guid='%s'\" % \\\n (config.download.request_timeout, jobtracker.nowstr(), \\\n request['guid']))", "def checkFiles(self): \r\n mdate_filenames_list = []\r\n mdate_filenames_tuple = {}\r\n last24 = []\r\n now = datetime.datetime.now() \r\n noise,ft = file_type.split('.')\r\n ## note can do an entry bg color stoplight thing >24 hrs = red, 12-24 hrs = yellow < 12 = green nice little if loop\r\n for f in filenames_list:\r\n if os.path.isfile(f):\r\n lastmod_date = datetime.datetime.fromtimestamp(os.path.getmtime(f))\r\n mdate_filenames_tuple = lastmod_date, f\r\n mdate_filenames_list.append(mdate_filenames_tuple)\r\n \r\n if now - lastmod_date < file_age:\r\n \r\n #print (\"{} was last modified on {:%a %b %d %Y, %H:%M:%S, %Z}. Moving to 'destinaiton' transfer folder.\".format(f, lastmod_date))\r\n last24.append(f)\r\n shutil.copy2(f, destination)\r\n xferTime=time.time()\r\n \r\n fa = str(file_age) \r\n with sqlite3.connect('fileTransfer.db') as connection:\r\n c = connection.cursor()\r\n c.execute(\"INSERT INTO tbl_lastRun(col_timestamp, col_source, col_destination, col_file_type, col_file_age) VALUES (?,?,?,?,?)\",(xferTime, source, destination, ft, hrs))\r\n connection.commit()\r\n connection.close \r\n\r\n clear(self)\r\n ask_quit(self)", "def createStructuredTranscript_Non_Core_Doc():\n\n #create a temporary folder that will hold the data transformed from doc to docx\n os.system('mkdir ' + INPUT_FOLDER+'temp')\n\n core_doc_asset = []\n missing_count = 0\n missing_files=[]\n # get all the docx files that are part of the core asset\n for file in glob.glob(INPUT_FOLDER+\"*.doc\"):\n\n # RG numbers for the core asset\n if (\"RG-50.030\" not in file and\n \"RG-50.106\" not in file and\n \"RG-50.549\" not in file):\n \n\n \n # convert file to docx, storing it in an untracked folder called temp\n file_docx = file + 'x'\n command = 'textutil -convert docx ' + file + ' -output ' + INPUT_FOLDER+'temp/'+ file_docx.split('/')[-1]\n call(command, shell=True)\n\n # append to the array\n core_doc_asset.append(file_docx)\n \n\n \n\n # get the units for each file, store them and update tracker\n core_doc_asset=create_dictionary_of_file_list(core_doc_asset)\n \n not_processed=0\n processed_doc=0\n \n # get the units for each file, store them and update tracker \n for mongo_rg in core_doc_asset:\n # get text units for this entry\n processed=[]\n result=[]\n \n for file in core_doc_asset[mongo_rg]:\n \n \n \n units = getTextUnits(INPUT_FOLDER+'temp/'+file.split('/')[-1])\n \n if units:\n #replace white spaces\n for i,element in enumerate(units):\n units[i]['unit']=' '.join(element['unit'].split())\n result.extend(units)\n \n processed.append(True)\n else:\n #check if processed\n processed.append(False)\n\n #set the method used to transform the transcript\n h.update_field(DB, TRACKER, \"rg_number\", mongo_rg, \"method\", \"transcribe_non_core_doc\")\n\n not_processed=not_processed+1\n\n if False in processed:\n\n h.update_field(DB, TRACKER, \"rg_number\", mongo_rg, \"status\", \"Unprocessed\")\n not_processed=not_processed+1\n missing_files.append(' '.join(core_doc_asset[mongo_rg]))\n else:\n # insert units on the output collection\n h.update_field(DB, OUTPUT, \"shelfmark\", 'USHMM '+mongo_rg, \"structured_transcript\", result)\n\n \n # update status on the stracker\n \n h.update_field(DB, TRACKER, \"rg_number\", mongo_rg, \"status\", \"Processed\")\n processed_doc=processed_doc+1\n \n\n #delete the temporary folder\n os.system('rm -r ' + INPUT_FOLDER+'temp')\n\n \n #write the missing files to text file\n file = open(OUTPUT_FOLDER_USHMM_PROCESSING_LOGS+'transcribe_non_core_doc_failed.txt','w')\n file.write('\\n'.join(missing_files))\n\n \n # success\n pprint.pprint(\"Non-core doc files were successfully processed, but there are \" + str(missing_count) + \" missing\")", "def verify_files():\n toverify = jobtracker.query(\"SELECT * FROM files \" \\\n \"WHERE status='unverified'\")\n\n numverified = 0\n for file in toverify:\n\n actualsize = pipeline_utils.get_file_size(file['filename'])\n\n expectedsize = file['size']\n\n last_attempt_id = jobtracker.query(\"SELECT id \" \\\n \"FROM download_attempts \" \\\n \"WHERE file_id=%s \" \\\n \"ORDER BY id DESC \" % file['id'], \\\n fetchone=True)\n \n queries = []\n if actualsize == expectedsize:\n dlm_cout.outs(\"Download of %s is complete and verified.\" % \\\n os.path.split(file['filename'])[-1])\n # Everything checks out!\n queries.append(\"UPDATE files \" \\\n \"SET status='downloaded', \" \\\n \"details='Download is complete and verified', \" \\\n \"updated_at='%s'\" \\\n \"WHERE id=%d\" % \\\n (jobtracker.nowstr(), file['id']))\n queries.append(\"UPDATE download_attempts \" \\\n \"SET status='downloaded', \" \\\n \"details='Download is complete and verified', \" \\\n \"updated_at='%s'\" \\\n \"WHERE id=%d\" % \\\n (jobtracker.nowstr(), last_attempt_id))\n\n\t # Mark the beam as downloaded in the main database\n\t #mark_beam_downloaded(os.path.split(file['filename'])[-1]))\n\n numverified += 1\n else:\n dlm_cout.outs(\"Verification of %s failed. \\n\" \\\n \"\\tActual size (%d bytes) != Expected size (%d bytes)\" % \\\n (os.path.split(file['filename'])[-1], actualsize, expectedsize))\n \n # Boo... verification failed.\n queries.append(\"UPDATE files \" \\\n \"SET status='failed', \" \\\n \"details='Downloaded file failed verification', \" \\\n \"updated_at='%s'\" \\\n \"WHERE id=%d\" % \\\n (jobtracker.nowstr(), file['id']))\n queries.append(\"UPDATE download_attempts \" \\\n \"SET status='verification_failed', \" \\\n \"details='Downloaded file failed verification', \" \\\n \"updated_at='%s'\" \\\n \"WHERE id=%d\" % \\\n (jobtracker.nowstr(), last_attempt_id))\n jobtracker.query(queries)\n return numverified", "def resetScript(dbConnection, maxConcurrent):\n while (Tools.numRunningProcesses() != 0): # wait for the transcriptions to end. Pings every 2 mins\n time.sleep(120)\n emptyPodcastFolder = Tools.cleanupFolder(\"podcasts\")\n DatabaseInteract.refreshDatabase(dbConnection)", "def process_files(file_list, mdb_database, mdb_user, mdb_pwd, mdb_server, mdb_auth):\n uri = \"mongodb://{0}:{1}@{2}/{3}\".format(mdb_user, mdb_pwd, mdb_server, mdb_auth)\n db_name = '{0}'.format(mdb_database)\n client = pymongo.MongoClient(uri)\n db = client[db_name]\n raw_msg = db['raw_messages']\n total_processed = 0\n total_queries = 0\n total_error = 0\n unique_queries_to_add = []\n queries_hash = set()\n\n for file_name in file_list:\n print('--> processing file: {0}'.format(file_name))\n with open(file_name) as f:\n lines = f.readlines()\n total_queries += len(lines)\n for i, line in enumerate(lines):\n try:\n jsonn = json.loads(line)\n jsonn['insertTime'] = get_timestamp()\n doc_hash = calculate_hash(jsonn)\n if doc_hash not in queries_hash:\n unique_queries_to_add.append(jsonn)\n queries_hash.add(doc_hash)\n total_processed += 1\n except Exception as e:\n print('ERROR: file {0} line {1} --- {2}'.format(file_name, i, e))\n total_error += 1\n\n total_unique_queries = len(unique_queries_to_add)\n print('- Total processed: {0} from {1}'.format(total_processed, total_queries))\n print('- Total unique queries: {0} '.format(total_unique_queries))\n print('- Total errors: {0}'.format(total_error))\n print('- Adding queries to MongoDB')\n for jsonn in tqdm(unique_queries_to_add):\n raw_msg.insert_one(jsonn)", "def ifAlreadyDone(self, cxRepo, schemaRepo, schema, tablename):\n logging.debug(f\"\"\"check if {schema}.{tablename} has been analyzed\"\"\")\n conn = self.connect(cxRepo)\n sql = f\"\"\"select table_name from {schemaRepo}.tablediff where lower\n (table_name) = lower('{tablename}') and schema1 = '{schema}' and\n server1_status = 'ready' and server1_status = 'ready' and result in\n ('ready', 'init')\"\"\"\n with conn:\n with conn.cursor() as curs:\n curs.execute(sql)\n row = curs.fetchone()\n if row is None:\n return 1\n else:\n return 0", "def should_start_analysis(self):\n return len(self.task_queue) >= self.bulk_size", "def compile_audio_and_transcripts(trans_dict, n_segs, as_client, file_name):\n\n db = Postgres_Connect().connection\n # db = db_connect()\n\n for (ep_id, trans_id) in trans_dict.items():\n transcript = compile_episode_transcript(trans_id, db)\n\n if len(transcript) == 0:\n print('Unable to find transcript ID {}'\n ' in AS database'.format(trans_id))\n continue\n dl_audio_and_segment(transcript, ep_id, as_client, n_segs, file_name)", "def validate_and_submit(self, filename):\n\n matches = [p for p in self.process_list[self.name] if filename == p.source]\n if filename not in self.transfer_queue[self.name] and not matches:\n t = threading.Thread(target=self.is_stable, args=(filename,))\n t.setDaemon(True)\n t.start()" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Waits for the running transcription processes to end (2 min intervals). \n Then deletes everything in the 'podcasts' folder, parses all transcripts, and updates the databases
def resetScript(dbConnection, maxConcurrent): while (Tools.numRunningProcesses() != 0): # wait for the transcriptions to end. Pings every 2 mins time.sleep(120) emptyPodcastFolder = Tools.cleanupFolder("podcasts") DatabaseInteract.refreshDatabase(dbConnection)
[ "def transcribe_proc():\n while True:\n # Get result of transcription\n transcribe_result = transcriber.transcribe_stream(\n audio_stream(), sample_rate, sample_width, channels\n )\n\n _LOGGER.debug(\"Transcription result: %s\", transcribe_result)\n\n transcribe_result = transcribe_result or Transcription.empty()\n transcribe_dict = dataclasses.asdict(transcribe_result)\n transcribe_dict[\"timeout\"] = is_timeout\n\n print_json(transcribe_dict)\n transcription_printed.set()", "def cleanup(self):\n self.all_wav_to_mp3()\n self.past_songs_db.close()\n self.move_tracks_to_music_folder( )\n self.delete_leftovers()\n print \"Cleanup finished\"", "async def cleanup():", "def transcribeAll(service, url, fileName):\n if(service == \"omny.fm\"):\n url = url.replace(\".mp3\",\"\") + \".mp3\"\n subprocess.Popen(\"wget -c -O ./podcasts/\" + fileName + \".mp3 \" + url + \" && sleep 40 && ffmpeg -i ./podcasts/\"\n + fileName + \".mp3 -acodec pcm_s16le -ac 1 -ar 8000 ./podcasts/\" + fileName + \".wav && sleep 10 && rm ./podcasts/\" \n + fileName + \".mp3 && nohup ./online2-wav-nnet3-latgen-faster --online=false --do-endpointing=false \"\n + \"--frame-subsampling-factor=3 --config=online.conf --max-mem=2000000000 --max-active=7000 --beam=15.0 --lattice-beam=6.0 \"\n + \"--acoustic-scale=1.0 --word-symbol-table=words.txt final.mdl HCLG.fst 'ark:echo utterance-id\" + fileName \n + \" utterance-id\" + fileName + \"|' 'scp:echo utterance-id\" + fileName + \" ./podcasts/\" + fileName + \".wav|' 'ark:/dev/null' &\", shell=True)", "def clearPandoraTailSongs(self):\n songs = list((yield self.mpd().playlistinfo()))\n status = (yield self.mpd().status())\n\n for s in reversed(songs):\n playingNow = (status['state'] == 'play' and\n int(s['pos']) == int(status.get('song', -1)))\n if not playingNow and isPandoraUrl(s['file']):\n self.mpd().deleteid(int(s['id']))\n else:\n break", "def clean_tasks(self):\n remove = []\n\n for task in self.tasks:\n if task['process'].poll() is not None:\n remove.append(task['id'])\n if task['type'] == 'streamlink':\n self.streamlink_ended(task)\n elif task['type'] == 'ffmpeg':\n self.ffmpeg_ended(task)\n else:\n if self.cycle % 2 == 0:\n size = os.path.getsize(task['file'])\n if size == task['size']:\n remove.append(task['id'])\n task['process'].terminate()\n log(\"Process stuck: \", self, 10, task['id'])\n self.streamlink_ended(task)\n else:\n task['size'] = size\n\n if len(remove) > 0:\n log(\"Remove tasks: \", self, 10, remove)\n temp = [item for item in self.tasks if item['id'] not in remove]\n self.tasks = temp", "def load_transcriptions(data_dir):\n\n def extract_transcriptions(talk_title, data_path):\n path1 = \"https://www.ted.com/talks/%s/transcript\" % talk_title\n r1 = urllib.urlopen(path1).read()\n soup1 = bs(r1, \"html.parser\")\n df1 = pd.DataFrame()\n for i in soup1.findAll('link'):\n if i.get('href') != None and i.attrs['href'].find('?language=') != -1:\n lang = i.attrs['hreflang']\n path2 = i.attrs['href']\n r2 = urllib.urlopen(path2).read()\n soup2 = bs(r2, \"html.parser\")\n time_frame = []\n text_talk = []\n\n for j in soup2.findAll('span', class_='talk-transcript__fragment'):\n time_frame.append(j.attrs['data-time'])\n text_talk.append(j.text.replace('\\n', ' '))\n\n df2 = pd.DataFrame()\n df2[lang] = text_talk\n df2[lang + '_time_frame'] = time_frame\n df1 = pd.concat([df1, df2], axis=1)\n df1.to_csv(os.path.join(data_path, 'orig', '%s.csv' % talk_title), sep='\\t', encoding='utf-8')\n\n if not os.path.exists(os.path.join(data_dir, 'orig')):\n os.mkdir(os.path.join(data_dir, 'orig'))\n\n # Load titles\n title_df = load_titles(data_dir)\n files = []\n print(\"\\tLoading ted talk transcriptions...\")\n for doc_id, row in title_df.iterrows():\n title = row['title']\n orig_path = os.path.join(data_dir, 'orig', '%s.csv' % title)\n if not os.path.exists(orig_path):\n extract_transcriptions(title, data_dir)\n orig_df = pd.read_csv(orig_path, sep='\\t', encoding='utf-8', index_col=0)\n orig_df['sent_id'] = orig_df.index\n orig_df['doc_id'] = pd.Series([doc_id] * len(orig_df), index=orig_df.index)\n\n files.append(orig_df)\n df = pd.concat(files, ignore_index=True)\n\n # languages list\n #languages = [c for c in df.columns.values if not c.endswith('_time_frame') and not in ['sent_id', 'doc_id']]\n languages = []\n\n # Save files by language\n for lang in df:\n if lang.endswith('_time_frame') or lang in ['doc_id', 'sent_id']:\n continue\n languages.append(lang)\n path = os.path.join(data_dir, 'raw', '%s.csv' % lang)\n if not os.path.exists(path):\n filtered = df[pd.notnull(df[lang])][lang]\n filtered.to_csv(path, sep='\\t', encoding='utf-8', index=False, header=False)\n\n print('\\t\\t%d languages extracted.' % len(languages))", "def cleaning_loop(clean_event):\n logging.info(\"Started export job cleaning thread\")\n while True:\n clean_event.wait(720)\n clean_event.clear()\n\n delete_old_export_jobs()", "def transcribe_from_folder(path2folder):\n for fname in os.listdir(path2folder):\n if fname[-3:].lower() == \"mp3\":\n print(\"\\nstart processing {}\".format(fname))\n transcribe_from_file(\n os.path.join(path2folder, fname),\n os.path.join(path2folder, fname[:-3] + \"txt\"),\n )\n print(\"finish processing {}\\n\".format(fname))", "def cleaner():\n session = Session()\n while True:\n _database_operations.purge_old_jobs(session)\n time.sleep(30)", "async def cleanup(self) -> None:\n self._logger.debug(\"running\")\n for task in self._tasks:\n task.cancel()\n await self._rs_dev_scanner.cleanup()\n await self._cam_scanner.cleanup()\n awaitables = list()\n for dev in self._devs.values():\n awaitables.append(create_task(dev.cleanup(True)))\n for awaitable in awaitables:\n await awaitable\n if self._saving_flag.is_set():\n await self._done_saving_flag.wait()\n self._logger.debug(\"done\")", "def process_transcript(transcript_label):\n transcript_key = f\"{transcript_label}.json\"\n\n # Load Transcribe output from S3.\n raw_transcript = get_transcribe_output(transcript_key)\n\n # Parse to assign speaker parts.\n speaker_parts = assign_speakers(raw_transcript)\n\n # Identify Karen and Georgia.\n assigned = karen_or_georgia(speaker_parts)\n\n # Update the full transcript.\n build_transcript(assigned)\n\n # Upload the latest transcript to S3.\n s3 = boto3.resource(\"s3\")\n s3.Bucket(os.getenv(\"S3_BUCKET\")).upload_file(\"main_transcript.txt\", \"main_transcript.txt\")", "def clean_old_data():\n logger.info('Cleaning standalone files on disk...')\n for absolute_path in glob.glob(MEDIA_URL + '*'):\n file_name = os.path.basename(absolute_path)\n try:\n relative_path = os.path.join(AUDIOS_URL, file_name)\n audio = Audio.objects.get(filename=relative_path)\n if audio.get_type() == 'episode':\n try:\n # If there are inactive audios on its being\n for e in audio.podcast.episode_set.exclude(pk=audio.podcast.active_episode.pk):\n if not e.is_active():\n logger.info('Inactive audio found in podcast set. Erasing files.')\n e.delete_files()\n except Exception, e:\n logger.exception(e.message)\n except ObjectDoesNotExist, e:\n logger.info('A file with no audio registered in database')\n if os.path.isfile(relative_path):\n logger.info('Erasing: %s' % relative_path)\n os.remove(relative_path)\n logger.info('... Done.')", "async def clean_up():\n # Load Settings\n settings = await fetch_settings()\n\n try:\n if settings[\"previous version\"] == settings[\"version\"]:\n await upgrade()\n except KeyError:\n await upgrade()\n\n old_version = settings[\"previous version\"]\n new_version = settings[\"version\"]\n\n if float(new_version) <= 1.2:\n # Deleting repeats\n connection = await connect()\n repeats = await connection.fetch(f\"\"\"\n SELECT * FROM \"{settings[\"table\"]}\"\n WHERE \"UID\" IN (SELECT \"UID\" FROM \"{settings[\"table\"]}\" GROUP BY \"UID\" HAVING COUNT(*) > 1);\n \"\"\")\n\n uniques = {}\n removed = []\n\n for article in repeats:\n if article[\"UID\"] in uniques.keys():\n removed.append(uniques[article[\"UID\"]][\"ID\"])\n uniques[article[\"UID\"]] = article\n\n for article_id in removed:\n await connection.execute(f\"\"\"\n DELETE FROM \"{settings[\"table\"]}\"\n WHERE \"ID\" = {article_id};\n \"\"\")\n\n # Fixing IDs\n all_articles = await connection.fetch(f\"\"\"\n SELECT * FROM \"{settings[\"table\"]}\";\n \"\"\")\n\n transaction = connection.transaction()\n await transaction.start()\n\n try:\n # Empty Table\n await connection.execute(f\"\"\"\n DELETE FROM \"{settings[\"table\"]}\";\n \"\"\")\n\n # Reset ID Column\n await connection.execute(f\"\"\"\n ALTER SEQUENCE \"{settings[\"table\"]}_ID_seq\"\n RESTART WITH 1\n \"\"\")\n\n # Reinsert Articles\n for article in all_articles:\n text = unquote(article[\"Text\"].replace(\"'\", \"''\"))\n\n date_released = article[\"dateReleased\"]\n if date_released.year >= 3300:\n date_released = date_released.replace(year=(article[\"dateReleased\"].year - GAME_YEAR_OFFSET))\n\n title = article[\"Title\"].strip().replace(\"'\", \"''\")\n if title == \"\" or title is None:\n title = \"No Title Available\"\n\n await connection.execute(f\"\"\"\n INSERT INTO \"{settings[\"table\"]}\" (\"Title\", \"UID\", \"dateReleased\", \"dateAdded\", \"Text\")\n VALUES ($1, $2, $3, $4, $5);\n \"\"\", title, article[\"UID\"], date_released, article[\"dateAdded\"], text)\n except Exception as e:\n print(\"\\n\\nProcess failed due to exception. Reverting.\\n\\n\")\n await transaction.rollback()\n raise e\n\n else:\n await transaction.commit()\n\n await connection.close()\n\n settings = await fetch_settings()\n settings[\"previous version\"] = settings[\"version\"]\n\n with open(\"Settings.json\", \"w\") as file:\n json.dump(settings, file, indent=2)", "def runAutoCheck(dbConnection, maxConcurrent):\n # checks if any shows are pending.\n fileContent = DatabaseInteract.checkPre(dbConnection)\n if(len(fileContent) > 0 and Tools.numRunningProcesses() < maxConcurrent):\n cursor = dbConnection.cursor()\n cursor.execute(\"UPDATE transcriptions SET pending = TRUE WHERE id = '\" + str(fileContent[1]) + \"';\")\n dbConnection.commit()\n cursor.close()\n url = fileContent[0]\n indexID = str(fileContent[1]) # get the ID instead of the filename\n service = str(fileContent[3])\n # podcastName = fileContent[2]\n Tools.transcribeAll(service, url, indexID) # download the mp3 will print when done", "def cleanup(self):\n deleted_mp3s = 0\n deleted_dirs = 0\n \n empty_dirs = []\n for root, dirs, files in os.walk(self.path):\n if not dirs and files in ([], [TAGS_XML]):\n # empty directory or contains only the toptags xml file\n empty_dirs.append(root)\n continue\n \n for filename in files:\n name, ext = os.path.splitext(filename)\n if ext != \".mp3\":\n continue\n \n mp3path = os.path.join(root, filename)\n mp3size = os.path.getsize(mp3path)\n if mp3size<=self.cfg.mp3_del_min_size:\n print \">>> File smaller than %sBytes:\" % self.cfg.mp3_del_min_size\n print \"path:\", root\n print \"file name:\", filename\n print \"file size: %iBytes\" % mp3size\n print \"delete mp3 file!\"\n os.remove(mp3path)\n deleted_mp3s += 1\n \n empty_artist = False\n for path in empty_dirs:\n if TAGS_XML in os.listdir(path):\n print \"delete toptags.xml file.\"\n os.remove(self.get_feed_fs_path())\n \n try:\n os.removedirs(path)\n except OSError, err:\n if self.cfg.debug:\n print \"debug: %s\" % err\n else:\n if path == self.path:\n print \">>> Empty artist directory removed:\"\n empty_artist = True\n else:\n print \">>> empty directory removed:\"\n print path\n deleted_dirs += 1\n \n \n return empty_artist, deleted_mp3s, deleted_dirs", "def __removing_loop(self) -> None:\r\n\r\n # repeat until stop flag is set\r\n while not self.__stopper.wait(self.CLEANUP_EXPIRED_INTERVAL):\r\n now = int(datetime.now(self.__tz).timestamp())\r\n log.debug('Removing...')\r\n\r\n # iterate through database and remove expired encounters\r\n for enc_id, despawn_time in self.__pokes_db.copy().items():\r\n if despawn_time - now < 5:\r\n del self.__pokes_db[enc_id]", "def delete_processed_emails(self, conn):\n for email_id in self.uids:\n # Move to Trash (deleted after 30 days). Alternatively, we could also flag trash as \\Deleted, and expunge.\n # However, the tester may want to view the email themselves, so it seems ok to leave it in Trash.\n status, data = conn.uid('store', email_id, '+X-GM-LABELS', '\\\\Trash')\n\n # Check whether the move was completed successfully, and log it if not\n if status != 'OK':\n message = 'Failed to delete email with IMAP UID of %d (in inbox of user %s)' % (email_id, self.email)\n self.log.warning(message)", "def __keep_update_tweets(interval: int) -> None:\n mzk.set_process_name(f'MocaTwitterUtils({core.VERSION}) -- keep-update-tweets')\n while True:\n mzk.call(\n f'nohup {mzk.executable} \"{core.TOP_DIR.joinpath(\"moca.py\")}\" update-tweets &> /dev/null &', shell=True\n )\n mzk.sleep(interval)" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
This parses the content of nohup. The size of nohup is basically unlimited but each line has to be under 300000 characters(?). This then returns the following...\n\n index 0 a list of all the occurences of realTimeFactor\n index 1 a list of all the occurences of transcriptions\n index 2 a list of all the occurences of the transcription ID\n index 3 a list of all the occurences of the total transcription time.\n\n \n\n \\Example usage\n parsedContent = nohupTranscriptionContent("ok.txt")
def nohupTranscriptionContent(filePath): try: continu = True fileContent = "" f = open(filePath, 'r') while (continu): temp = f.readline(900000) if(len(temp) == 0): continu = False else: fileContent += temp results = [] realTimeFactor = re.findall(r'Timing stats: real-time factor for offline decoding was (.*?) = ', fileContent) results.append(realTimeFactor) transcription = re.findall(r'utterance-id(.*?) (.*?)\n', fileContent) transcriptionList = [] transcriptionIDList = [] for item in transcription: if(len(item[1]) > 1000): transcriptionIDList.append(item[0]) transcriptionList.append(item[1]) results.append(transcriptionList) results.append(transcriptionIDList) transcriptionTime = re.findall(r'seconds / (.*?) seconds\.', fileContent) results.append(transcriptionTime) return results except Exception as e: Tools.writeException("nohupTranscriptionContent", e) return False
[ "def fileTranscriptionContent(filePath):\n try:\n continu = True\n f = open(filePath, 'r')\n fileContent = \"\"\n while (continu):\n temp = f.readline(300000)\n if(len(temp) == 0):\n continu = False\n else:\n fileContent += temp\n results = []\n f.close()\n url = re.findall(r'URL:(.*?)\\n', fileContent)\n results.append(url)\n realTimeFactor = re.findall(r'Timing stats: real-time factor for offline decoding was (.*?) = ', fileContent)\n results.append(realTimeFactor)\n transcription = re.findall(r'utterance-id1 (.*?)\\n', fileContent)\n for item in transcription:\n if(len(item) > 500):\n results.append(item.replace(\"'\", \"''\"))\n if((len(results[0]) > 0) and (len(results[1]) > 0) and (len(results[2]) > 0)):\n return results\n else:\n Tools.writeException(\"fileTranscriptionContent\", \"ERROR attempted to parse \" + filePath + \" but got \" + str(results))\n return False\n except Exception as e:\n Tools.writeException(\"fileTranscriptionContent\", e)", "def parse_chat_content(read_content):\n # Removes the encoding markup\n read_content = read_content.lstrip('\\ufeff')\n # Removes whitespace and tabs from the strings\n # adding them to the result variable\n parsed_content = []\n for line in read_content.splitlines():\n line = line.strip(' \\t')\n parsed_content.append(line)\n return parsed_content", "def readTotitle(fh):\n preLines = []\n while True:\n l = fh.readline().strip()\n if l.startswith('>'):\n return (preLines,l)\n elif l == '':\n return preLines,None\n else:\n preLines.append(l)", "def read_subtitles(self):\n\n # Group 1: index, Group 2: Start Time, Group 3: End Time, Group 4: Text\n\n patterns = [\n r\"(\\d+)\\n(\\d\\d:\\d\\d:\\d\\d,\\d\\d\\d) --> (\\d\\d:\\d\\d:\\d\\d,\\d\\d\\d)\\n((?:.+\\n)*.+)\",\n r\"(\\d+)\\r\\n(\\d\\d:\\d\\d:\\d\\d,\\d\\d\\d) --> (\\d\\d:\\d\\d:\\d\\d,\\d\\d\\d)\\r\\n((?:.+\\r\\n)*.+)\",\n # Reports pattern\n r\"(\\d+)\\r(\\d\\d:\\d\\d:\\d\\d,\\d\\d\\d) --> (\\d\\d:\\d\\d:\\d\\d,\\d\\d\\d)\\n((?:.+\\r)*.+)\"\n ]\n\n for pattern in patterns:\n re_subs = re.findall(pattern, self.subtitles, re.M | re.I)\n if(len(re_subs) > 1):\n self.re_subs = re_subs\n return\n\n raise Exception(\n f're_subs length is {len(re_subs)}. Maybe the regex pattern is falty?')", "def parse_transcript(self):\n\t\t\n\t\toutput_text = tempfile.NamedTemporaryFile(mode = 'r')\n\t\twith tempfile.NamedTemporaryFile(delete=False) as input_text:\n\t\t\tinput_text.write(self.transcript_string.encode('utf-8'))\n\t\t\t#to write to the file, convert to utf-8; to use for jinja, convert it back to unicode\n\n\t\tos.popen(\"python vocab_resources/splitta/sbd.py -m vocab_resources/splitta/model_nb -t \" + input_text.name +\" -o \" + output_text.name)\n\t\tos.remove(input_text.name)\n\n\t\twith open(output_text.name) as parsed_text:\n\t\t\tsentence_index = {}\n\t\t\tfor index, sentence in enumerate(parsed_text):\n\t\t\t\tsentence = sentence.rstrip()\n\t\t\t\tsentence_index[index] = sentence\n\n\t\tsentence_index[len(sentence_index)] = \"Unable_to_find_matching_sentence\" #avoid outliers\n\t\tself.sentence_index = sentence_index", "def parse_tidy_output() -> None:\n notification = None\n with open(\"clang_tidy_report.txt\", \"r\", encoding=\"utf-8\") as tidy_out:\n for line in tidy_out.readlines():\n match = re.match(NOTE_HEADER, line)\n if match is not None:\n notification = TidyNotification(match.groups())\n GlobalParser.tidy_notes.append(notification)\n elif notification is not None:\n notification.fixit_lines.append(line)", "def haikus_for_document(filename):\n text = get_text(filename)\n haikus = []\n # SpaCy has a maximum text size of 1,000,000 characters.\n # Let's use one fewer to be on the safe side.\n for chunk in chunks(text,999_999): # this underscore syntax was introduced in Python 3.6\n doc = nlp(chunk)\n for sent in doc.sents:\n haiku = check_haiku(sent)\n if haiku:\n haikus.append(haiku)\n return haikus", "def extract_captions(file_name : str) -> List[Dict]:\n\n res = parse(file_name)\n\n elements = res.getElementsByTagName('text')\n\n results = []\n\n for ele in elements:\n start = ele.getAttribute('start')\n duration = ele.getAttribute('dur')\n text = getText(ele.childNodes)\n text = text.replace('\\n', ' ')\n\n results.append( {'start' : start, \n 'duration' : duration,\n 'text' : text} )\n return results", "def read_transcription_file(file_path, audio_file_path):\n with open(file_path) as in_file:\n last_timestamp = 0\n res = []\n transcription = \"\"\n for line in in_file:\n time_stamp_match = re.match(\"\\[([0-9\\]+\\.[0-9]+)\\]\", line)\n #if this regex matched then the line is a timestamp\n if time_stamp_match:\n timestamp = float(time_stamp_match.group(1))\n if transcription and transcription.strip() not in ['(())', \"<no-speech>\"]:\n single_instance = {\"start_time\": last_timestamp, \n \"end_time\": timestamp,\n \"transcription\": transcription,\n \"audio_file\" : audio_file_path}\n res.append(single_instance)\n last_timestamp = timestamp\n else:\n last_timestamp = timestamp # this handles silence at beginning\n else:\n transcription = line.strip()\n \n return res", "def chunk_file(content):\n if len(content) < MAX_TEXT_LENGTH:\n return [content,]\n else:\n chunks = []\n while len(content) > MAX_TEXT_LENGTH:\n chunk = get_chunk(content, MAX_TEXT_LENGTH)\n chunks.append(chunk)\n content = content[len(chunk):]\n chunks.append(content)\n return chunks", "def parse_sambamba_output(self):\r\n exons = []\r\n with open (self.file_path, \"r\") as sambamba_output:\r\n for line in sambamba_output:\r\n if line.startswith('#'):\r\n fields = line.strip().split()\r\n else:\r\n description = list(line.strip().split())\r\n i = 0\r\n exon_dict = {}\r\n while i<len(fields):\r\n exon_dict[fields[i]] = description[i]\r\n i += 1\r\n exons.append(exon_dict)\r\n return exons", "def create_hn_text(self):\n text_list = [f\"Top {STORIES_NUMBER} from HackerNews:\"]\n sorted_stories = self.get_top_stories()\n # Format slack text\n for story in sorted_stories:\n text_list.append(\n \"*<{}|{}>* - <{}|{}>\".format(\n \"{}/item?id={}\".format(HN_URL, story[\"id\"]),\n story[\"score\"],\n # Ask HN type posts do not have 'url' key, so using get to return None\n story.get('url'),\n story[\"title\"],\n )\n )\n self.logger.debug(text_list)\n return \"\\n>\".join(text_list)", "def parseBlobIntoLines(txt):\n\t#the zeroth line is the line that starts the blob and ends immediatly before the\n\t#first instance of [HH:MM:SS [AP]M]\n\tthischat=Chat()\n\tzerothLine=re.match(r'(.*)(?=\\[[0-9])*',txt,re.M|re.I).group(1)\n\ttxt=txt.replace(zerothLine,\"\")\n\twhile True:\n\t\tif (txt[0]=='\\n'):\n\t\t\ttxt=txt.replace('\\n','',1)\n\t\telse:\n\t\t\tbreak\n\t##\n\t## What may happen is that the attachment may bugger things up, so this should deal with that\n\tif not (re.match(r'^\\[',txt,re.M|re.I)):\n\t\tnextbracket = txt.find('[')\n\t\ttxt=txt[nextbracket:]\n\n\tfirstlineTimestamp = re.match(r'.*(\\[[0-9][0-9]:[0-9][0-9]:[0-9][0-9] [AP]M\\]).*',txt,re.M|re.I).group(1)\n\t## I am keeping these re expressions because I don't understand RE and they\n\t##\tmay be helpful to figure another solution\n\t## a=re.match(r'.*([0-9][0-9]):([0-9][0-9]):([0-9][0-9]) ([AP]M\\]).*',t1,re.M|re.I) ##\n\t## a=re.match(r'.*(\\[[0-9][0-9]:[0-9][0-9]:[0-9][0-9] [AP]M\\]).*',t1,re.M|re.I).group(1)\n\ttxt=txt.replace(firstlineTimestamp,'',1)\n\tnextTimeStamp=txt.find('\\n[')\n\tfirstline = txt[:nextTimeStamp]\n\ttxt=txt.replace(firstline, '', 1)\n\n\tthischat.lines.append(convertZerothLine(zerothLine,thischat))\n\tthischat.Agent=thischat.lines[0].whoSaidIt\n\tthischat.lines.append(convertFirstLine(thischat, txt[:14],firstline))\n\n\n\n\twhile(len(txt)>0):\n\t\t(messageTimeStamp, thisMessage, txt) = getNextLine(txt)\n\t\tconvertNormalLine(messageTimeStamp, thisMessage, thischat)\n\n\treturn thischat\n\t\t########################################################################\n\t\t##\tThis method breaks off the next interaction and returns two peices\n\t\t########################################################################", "def parseOutText(f):\n\n\n f.seek(0) ### go back to beginning of file (annoying)\n all_text = f.read()\n ### split off metadata\n \n content = re.split(\"X-FileName:.*$\", all_text, flags=re.MULTILINE, maxsplit=1)\n words = \"\"\n if len(content) > 1:\n text_string = content[1]\n\n ## remove mails that are forwarded or to which are responded\n # e.g. ---------------------- Forwarded\"\n text_string = re.split(\"-*\\sForwarded\", text_string, maxsplit=1)[0]\n\n # -----Original Message-----\n text_string = re.split(\"-*\\Original\\sMessage\", text_string, maxsplit=1)[0]\n\n # Vince J Kaminski@ECT\n # 04/30/2001 02:28 PM\n # To:\tStanley Horton/Corp/Enron@Enron, Danny McCarty/ET&S/Enron@Enron\n # cc:\tVince J Kaminski/HOU/ECT@ECT \n # or\n # Vince J Kaminski@ECT\n # 04/30/2001 02:28 PM\n # to:\tStanley Horton/Corp/Enron@Enron, Danny McCarty/ET&S/Enron@Enron\n # cc:\tVince J Kaminski/HOU/ECT@ECT \n \n text_string = re.split(\"((.*\\n){2})[Tt]o:\\s\", text_string, maxsplit=1)[0]\n\n ### remove punctuation\n # should be autopmatically by scikit learn\n #text_string = text_string.translate(string.maketrans(\"\", \"\"), string.punctuation)\n\n ### project part 2: comment out the line below\n #words = text_string\n\n ### split the text string into individual words, stem each word,\n ### and append the stemmed word to words (make sure there's a single\n ### space between each stemmed word)\n from nltk.stem.snowball import SnowballStemmer\n\n stemmer = SnowballStemmer(\"english\")\n words = [stemmer.stem(word) for word in text_string.split()]\n\n\n\n return \" \".join(words)", "def transcript_lines(transcript_text):\n lines = []\n for line in transcript_text.splitlines():\n if line.strip() and line.strip()[0] != '#':\n split = line.split(':')\n speaker = split[0][-1]\n utterance = ' '.join(split[1:]).strip()\n lines.append((speaker, utterance))\n return lines", "def ParseSeqFile(FilePath):\n SeqFile = rSeqFile(FilePath)\n TidyFile = TidyLines(SeqFile)\n \n result = []\n\n for line in TidyFile:\n t = ( ProcessLine(line) )\n result.append(t)\n return(result)", "def parse(self):\n\n with open(self.fasta_file) as file:\n content = file.readlines()\n\n sequences = []\n sequence_ids = []\n sequence = []\n for line in content:\n if line.startswith('>'):\n sequence_ids.append(line.strip())\n if len(sequence) != 0:\n sequences.append(''.join(sequence))\n if len(''.join(sequence)) > MAX_SEQUENCE_LENGTH:\n print WARNING_SEQUENCE_LENTH_EXCEEDED\n sequence = []\n elif line.startswith(\"A\") or line.startswith(\"T\") or \\\n line.startswith(\"C\") or line.startswith(\"G\"):\n sequence.append(line.strip())\n sequences.append(''.join(sequence))\n if len(''.join(sequence)) > MAX_SEQUENCE_LENGTH:\n print WARNING_SEQUENCE_LENTH_EXCEEDED\n\n if len(sequences) > MAX_SEQUENCES:\n print WARNING_MAX_SEQUENCES_EXCEEDED\n\n return sequences, sequence_ids", "def process_corpus(lines):\n count = 0\n documents = []\n\n while len(lines) != 0:\n if count % 1000 == 0:\n print('\\t processing document ' + str(count))\n\n document = []\n topic = get_topic(lines.pop(0))\n date = get_date(lines.pop(0))\n title = get_title(lines.pop(0))\n actual_contents = get_contents(lines.pop(0))\n\n contents = clean_contents(actual_contents)\n\n document.append(topic)\n document.append(date)\n document.append(title)\n document.append(actual_contents)\n document.append(contents)\n\n documents.append(document)\n count += 1\n\n return documents", "def file_to_subtitles(filename):\n times_texts = []\n current_times = None\n current_text = \"\"\n with open(filename,'r', encoding = 'UTF-8') as f:\n for line in f:\n times = re.findall(\"([0-9]*:[0-9]*:[0-9]*,[0-9]*)\", line)\n if times:\n current_times = [cvsecs(t) for t in times]\n elif line.strip() == '':\n times_texts.append((current_times, current_text.strip('\\n')))\n current_times, current_text = None, \"\"\n elif current_times:\n current_text += line\n return times_texts" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
This parses the content of the transcription file. The size of the file can basically be unlimited but each line has to be under 300000 characters(?). This then returns the following...\n\n index 0 url\n index 1 realTimeFactor\n index 2 transcription\n
def fileTranscriptionContent(filePath): try: continu = True f = open(filePath, 'r') fileContent = "" while (continu): temp = f.readline(300000) if(len(temp) == 0): continu = False else: fileContent += temp results = [] f.close() url = re.findall(r'URL:(.*?)\n', fileContent) results.append(url) realTimeFactor = re.findall(r'Timing stats: real-time factor for offline decoding was (.*?) = ', fileContent) results.append(realTimeFactor) transcription = re.findall(r'utterance-id1 (.*?)\n', fileContent) for item in transcription: if(len(item) > 500): results.append(item.replace("'", "''")) if((len(results[0]) > 0) and (len(results[1]) > 0) and (len(results[2]) > 0)): return results else: Tools.writeException("fileTranscriptionContent", "ERROR attempted to parse " + filePath + " but got " + str(results)) return False except Exception as e: Tools.writeException("fileTranscriptionContent", e)
[ "def nohupTranscriptionContent(filePath):\n try:\n continu = True\n fileContent = \"\"\n f = open(filePath, 'r')\n while (continu):\n temp = f.readline(900000)\n if(len(temp) == 0):\n continu = False\n else:\n fileContent += temp\n results = []\n realTimeFactor = re.findall(r'Timing stats: real-time factor for offline decoding was (.*?) = ', fileContent)\n results.append(realTimeFactor)\n transcription = re.findall(r'utterance-id(.*?) (.*?)\\n', fileContent)\n transcriptionList = []\n transcriptionIDList = []\n for item in transcription:\n if(len(item[1]) > 1000):\n transcriptionIDList.append(item[0])\n transcriptionList.append(item[1])\n results.append(transcriptionList)\n results.append(transcriptionIDList)\n transcriptionTime = re.findall(r'seconds / (.*?) seconds\\.', fileContent)\n results.append(transcriptionTime)\n return results\n except Exception as e:\n Tools.writeException(\"nohupTranscriptionContent\", e)\n return False", "def read_transcription_file(file_path, audio_file_path):\n with open(file_path) as in_file:\n last_timestamp = 0\n res = []\n transcription = \"\"\n for line in in_file:\n time_stamp_match = re.match(\"\\[([0-9\\]+\\.[0-9]+)\\]\", line)\n #if this regex matched then the line is a timestamp\n if time_stamp_match:\n timestamp = float(time_stamp_match.group(1))\n if transcription and transcription.strip() not in ['(())', \"<no-speech>\"]:\n single_instance = {\"start_time\": last_timestamp, \n \"end_time\": timestamp,\n \"transcription\": transcription,\n \"audio_file\" : audio_file_path}\n res.append(single_instance)\n last_timestamp = timestamp\n else:\n last_timestamp = timestamp # this handles silence at beginning\n else:\n transcription = line.strip()\n \n return res", "def parse_transcript(self):\n\t\t\n\t\toutput_text = tempfile.NamedTemporaryFile(mode = 'r')\n\t\twith tempfile.NamedTemporaryFile(delete=False) as input_text:\n\t\t\tinput_text.write(self.transcript_string.encode('utf-8'))\n\t\t\t#to write to the file, convert to utf-8; to use for jinja, convert it back to unicode\n\n\t\tos.popen(\"python vocab_resources/splitta/sbd.py -m vocab_resources/splitta/model_nb -t \" + input_text.name +\" -o \" + output_text.name)\n\t\tos.remove(input_text.name)\n\n\t\twith open(output_text.name) as parsed_text:\n\t\t\tsentence_index = {}\n\t\t\tfor index, sentence in enumerate(parsed_text):\n\t\t\t\tsentence = sentence.rstrip()\n\t\t\t\tsentence_index[index] = sentence\n\n\t\tsentence_index[len(sentence_index)] = \"Unable_to_find_matching_sentence\" #avoid outliers\n\t\tself.sentence_index = sentence_index", "def ParseSeqFile(FilePath):\n SeqFile = rSeqFile(FilePath)\n TidyFile = TidyLines(SeqFile)\n \n result = []\n\n for line in TidyFile:\n t = ( ProcessLine(line) )\n result.append(t)\n return(result)", "def get_transcript(url):\n path = urlparse(url)\n url_path = re.search(r\"[a-z].*[full-transcript]\", str(path.path)).string.split('/')[1:-1]\n date = '-'.join(url_path[:3])\n title = '-'.join(url_path[3].split('-')[2:])\n name = '-'.join(url_path[3].split('-')[:2])\n fname = f\"{name}/{'_'.join([name, date, title])}.txt\"\n\n print(name, title)\n r = requests.get(url)\n soup = BeautifulSoup(r.text, 'html.parser')\n raw_html = soup.find('div', attrs = {'class':'elementor-widget-theme-post-content'})\n\n # TODO: this is some test text\n # text = \"content\"\n\n text = \"\"\n for p in raw_html.find_all('p'):\n text += f\"{p.get_text()}\\n\"\n \n return [name, date, title, fname, text]", "def parse_plain_text_export(text_file):\n\n text_file.seek(0)\n for line in text_file.readlines():\n urls = re.findall(URL_REGEX, line) if line.strip() else ()\n for url in urls:\n yield {\n 'url': url,\n 'timestamp': str(datetime.now().timestamp()),\n 'title': None,\n 'tags': '',\n 'sources': [text_file.name],\n }", "def process_raw_phrases(file_path):", "def get_transcription(url):\n\n # Checks the format of the URL\n if \"https://www.youtube.com/watch?v=\" in url:\n input_url_id = url.replace(\"https://www.youtube.com/watch?v=\", \"\")\n elif \"https://youtu.be/\" in url:\n input_url_id = url.replace(\"https://youtu.be/\", \"\")\n\n # Creates a blank list to iterate over\n text_parts = []\n\n # Gets a list of all available transcripts\n try:\n\n list_of_transcripts = YouTubeTranscriptApi.list_transcripts(input_url_id)\n print(\"Checking for Transcriptions...\")\n\n # Checks to see if a manual transcript is created if not, checks to see if a generated one is created\n if 'en-US' in list_of_transcripts._manually_created_transcripts:\n print(\"Manual Transcription Found.\")\n transcript = list_of_transcripts.find_manually_created_transcript(['en-US'])\n elif 'en' in list_of_transcripts._manually_created_transcripts:\n print(\"Manual Transcription Found.\")\n transcript = list_of_transcripts.find_manually_created_transcript(['en'])\n elif 'en' in list_of_transcripts._generated_transcripts:\n print(\"Auto-Generated Transcription Found.\")\n transcript = list_of_transcripts.find_generated_transcript(['en'])\n\n # Saves the transcript into a variable to iterate over\n raw_transcription = transcript.fetch()\n\n # Indexing of raw transcripts\n iteration_of_raw = 0\n\n # Iterates over each dictionary and extracts 'text' key then appends the blank text_parts list\n for i in raw_transcription:\n indexed_dictionary = raw_transcription[iteration_of_raw]\n text_from_dictionary = indexed_dictionary['text']\n text_parts.append(text_from_dictionary)\n iteration_of_raw += 1\n # Defines how we want each text element to be separated with\n separator_for_each_text = \" \"\n\n # Joins the separator with the text_parts\n clean_transcription = separator_for_each_text.join(text_parts)\n\n # Returns the cleaned transcripts\n return clean_transcription\n\n except:\n print(\"No Transcriptions Found\")\n clean_transcription = \"No Transcriptions Found\"\n return clean_transcription", "def parse_file(self, file_path) -> list:\n data = []\n with open(file_path, 'rb') as f:\n lines = pickle.load(f)\n for line in lines:\n input, output = line\n if input.strip() == \"\" or output.strip() == \"\":\n continue\n input_len = len(input.split())\n output_len = len(output.split())\n if input_len > 50 or output_len > 50:\n continue\n data_item = Text2TextDataItem(input_text=input, output_text=output, tokenizer=self.tokenizer,\n share_vocab=self.share_vocab)\n data.append(data_item)\n return data", "def parse(self):\n\n with open(self.fasta_file) as file:\n content = file.readlines()\n\n sequences = []\n sequence_ids = []\n sequence = []\n for line in content:\n if line.startswith('>'):\n sequence_ids.append(line.strip())\n if len(sequence) != 0:\n sequences.append(''.join(sequence))\n if len(''.join(sequence)) > MAX_SEQUENCE_LENGTH:\n print WARNING_SEQUENCE_LENTH_EXCEEDED\n sequence = []\n elif line.startswith(\"A\") or line.startswith(\"T\") or \\\n line.startswith(\"C\") or line.startswith(\"G\"):\n sequence.append(line.strip())\n sequences.append(''.join(sequence))\n if len(''.join(sequence)) > MAX_SEQUENCE_LENGTH:\n print WARNING_SEQUENCE_LENTH_EXCEEDED\n\n if len(sequences) > MAX_SEQUENCES:\n print WARNING_MAX_SEQUENCES_EXCEEDED\n\n return sequences, sequence_ids", "def process_file(self) -> List:\n squad_data = self.json_from_file(self.input_path)['data']\n samples = []\n\n for article in squad_data:\n pars = article[\"paragraphs\"]\n for par in pars:\n context = par[\"context\"]\n\n tokenized_context = self.word_tokenizer.tokenize(context)\n sentences = list(self.sentence_tokenizer.tokenize(context.strip()))\n\n if len(sentences) < 2: # There must be at least two sentences in the paragraph\n continue\n\n for qa in par[\"qas\"]:\n targets = []\n answer = qa[\"answers\"][0]\n question = qa[\"question\"]\n question_id = qa[\"id\"]\n\n tokenized_question = self.word_tokenizer.tokenize(question)\n question_length = len(tokenized_question)\n sample_text = \" \".join(tokenized_question) + \" \"\n\n answer_char_position = answer[\"answer_start\"]\n answer_sentence_index = self.get_sentence_index_from_char_position(answer_char_position, sentences)\n\n found_answer_sentence_in_context = False\n\n # go through all sentences in context\n for sentence_index, sentence in enumerate(sentences):\n\n tokenized_sentence = self.word_tokenizer.tokenize(sentence)\n sample_text += \" \".join(tokenized_sentence) + \" \"\n\n # get token start position for sentence in context\n sentence_pos = self.find_sentence_position_in_context(tokenized_context, tokenized_sentence)\n\n if sentence_pos is None:\n continue\n\n # define sentence token span for jiant target\n start_index = sentence_pos + question_length\n end_index = start_index + len(tokenized_sentence)\n sentence_span = [start_index, end_index]\n\n # if sentence contains answer, set label to \"1\"\n if sentence_index == answer_sentence_index:\n label = \"1\"\n found_answer_sentence_in_context = True\n else:\n label = \"0\"\n\n targets.append(self.create_target(question_length, sentence_span, label))\n\n if not found_answer_sentence_in_context:\n # could not find answer in context, skip this example\n continue\n\n sample = {\"info\": {\"doc_id\": self.DOC_ID, \"q_id\": question_id},\n \"text\": sample_text.strip(),\n \"targets\": targets}\n\n samples.append(sample)\n\n return samples", "def _get_transcript_entries(transcript_directory):\n transcript_files = iglob_recursive(transcript_directory, '*.trans.txt')\n for transcript_file in transcript_files:\n with open(transcript_file, 'r') as f:\n for line in f:\n # Strip included new line symbol\n line = line.rstrip('\\n')\n\n # Each line is in the form\n # 00-000000-0000 WORD1 WORD2 ...\n splitted = line.split(' ', 1)\n yield splitted", "def _read_txt(file_path):\n translation_pairs = []\n with file_path.open() as f:\n for line in f:\n translation_pairs.append(\n evaluation.TranslationPair(source=None, translation=line.strip())\n )\n return translation_pairs", "def loadTIText(self, file):\n next = 1\n currentAddr = 0\n startAddr = 0\n segmentdata = []\n #Convert data for MSP430, TXT-File is parsed line by line\n while next >= 1:\n #Read one line\n l = file.readline()\n if not l: break #EOF\n l = l.strip()\n if l[0] == 'q': break\n elif l[0] == '@': #if @ => new address => send frame and set new addr.\n address = int(l[1:],16)\n else:\n for i in l.split():\n value = int(i,16)\n self._set(address, value, bytemode=1)\n address += 1", "def parseOutText(f):\n\n\n f.seek(0) ### go back to beginning of file (annoying)\n all_text = f.read()\n ### split off metadata\n \n content = re.split(\"X-FileName:.*$\", all_text, flags=re.MULTILINE, maxsplit=1)\n words = \"\"\n if len(content) > 1:\n text_string = content[1]\n\n ## remove mails that are forwarded or to which are responded\n # e.g. ---------------------- Forwarded\"\n text_string = re.split(\"-*\\sForwarded\", text_string, maxsplit=1)[0]\n\n # -----Original Message-----\n text_string = re.split(\"-*\\Original\\sMessage\", text_string, maxsplit=1)[0]\n\n # Vince J Kaminski@ECT\n # 04/30/2001 02:28 PM\n # To:\tStanley Horton/Corp/Enron@Enron, Danny McCarty/ET&S/Enron@Enron\n # cc:\tVince J Kaminski/HOU/ECT@ECT \n # or\n # Vince J Kaminski@ECT\n # 04/30/2001 02:28 PM\n # to:\tStanley Horton/Corp/Enron@Enron, Danny McCarty/ET&S/Enron@Enron\n # cc:\tVince J Kaminski/HOU/ECT@ECT \n \n text_string = re.split(\"((.*\\n){2})[Tt]o:\\s\", text_string, maxsplit=1)[0]\n\n ### remove punctuation\n # should be autopmatically by scikit learn\n #text_string = text_string.translate(string.maketrans(\"\", \"\"), string.punctuation)\n\n ### project part 2: comment out the line below\n #words = text_string\n\n ### split the text string into individual words, stem each word,\n ### and append the stemmed word to words (make sure there's a single\n ### space between each stemmed word)\n from nltk.stem.snowball import SnowballStemmer\n\n stemmer = SnowballStemmer(\"english\")\n words = [stemmer.stem(word) for word in text_string.split()]\n\n\n\n return \" \".join(words)", "def load_transcriptions(data_dir):\n\n def extract_transcriptions(talk_title, data_path):\n path1 = \"https://www.ted.com/talks/%s/transcript\" % talk_title\n r1 = urllib.urlopen(path1).read()\n soup1 = bs(r1, \"html.parser\")\n df1 = pd.DataFrame()\n for i in soup1.findAll('link'):\n if i.get('href') != None and i.attrs['href'].find('?language=') != -1:\n lang = i.attrs['hreflang']\n path2 = i.attrs['href']\n r2 = urllib.urlopen(path2).read()\n soup2 = bs(r2, \"html.parser\")\n time_frame = []\n text_talk = []\n\n for j in soup2.findAll('span', class_='talk-transcript__fragment'):\n time_frame.append(j.attrs['data-time'])\n text_talk.append(j.text.replace('\\n', ' '))\n\n df2 = pd.DataFrame()\n df2[lang] = text_talk\n df2[lang + '_time_frame'] = time_frame\n df1 = pd.concat([df1, df2], axis=1)\n df1.to_csv(os.path.join(data_path, 'orig', '%s.csv' % talk_title), sep='\\t', encoding='utf-8')\n\n if not os.path.exists(os.path.join(data_dir, 'orig')):\n os.mkdir(os.path.join(data_dir, 'orig'))\n\n # Load titles\n title_df = load_titles(data_dir)\n files = []\n print(\"\\tLoading ted talk transcriptions...\")\n for doc_id, row in title_df.iterrows():\n title = row['title']\n orig_path = os.path.join(data_dir, 'orig', '%s.csv' % title)\n if not os.path.exists(orig_path):\n extract_transcriptions(title, data_dir)\n orig_df = pd.read_csv(orig_path, sep='\\t', encoding='utf-8', index_col=0)\n orig_df['sent_id'] = orig_df.index\n orig_df['doc_id'] = pd.Series([doc_id] * len(orig_df), index=orig_df.index)\n\n files.append(orig_df)\n df = pd.concat(files, ignore_index=True)\n\n # languages list\n #languages = [c for c in df.columns.values if not c.endswith('_time_frame') and not in ['sent_id', 'doc_id']]\n languages = []\n\n # Save files by language\n for lang in df:\n if lang.endswith('_time_frame') or lang in ['doc_id', 'sent_id']:\n continue\n languages.append(lang)\n path = os.path.join(data_dir, 'raw', '%s.csv' % lang)\n if not os.path.exists(path):\n filtered = df[pd.notnull(df[lang])][lang]\n filtered.to_csv(path, sep='\\t', encoding='utf-8', index=False, header=False)\n\n print('\\t\\t%d languages extracted.' % len(languages))", "def read_the_transcript_file(file_path):\n # open and read the transcript file.\n file_obj = open(file_path, 'r')\n # removing unnecessary spaces.\n text = file_obj.read()\n return text", "def loadTIText(self, file):\n next = 1\n startAddr = 0\n segmentdata = []\n #Convert data for MSP430, TXT-File is parsed line by line\n while next >= 1:\n #Read one line\n l = file.readline()\n if not l: break #EOF\n l = l.strip()\n if l[0] == 'q': break\n elif l[0] == '@': #if @ => new address => send frame and set new addr.\n #create a new segment\n if segmentdata:\n self.segments.append( Segment(startAddr, ''.join(segmentdata)) )\n startAddr = int(l[1:],16)\n segmentdata = []\n else:\n for i in l.split():\n segmentdata.append(chr(int(i,16)))\n if segmentdata:\n self.segments.append( Segment(startAddr, ''.join(segmentdata)) )", "def parse_transcripts(transcript_lines):\n LOG.info(\"Parsing transcripts\")\n transcripts = parse_ensembl_transcripts(transcript_lines)\n\n # Since there can be multiple lines with information about the same transcript\n # we store transcript information in a dictionary for now\n parsed_transcripts = {}\n # Loop over the parsed transcripts\n for tx in transcripts:\n tx_id = tx[\"ensembl_transcript_id\"]\n ens_gene_id = tx[\"ensembl_gene_id\"]\n\n # Check if the transcript has been added\n # If not, create a new transcript\n if not tx_id in parsed_transcripts:\n tx_info = {\n \"chrom\": tx[\"chrom\"],\n \"transcript_start\": tx[\"transcript_start\"],\n \"transcript_end\": tx[\"transcript_end\"],\n \"mrna\": set(),\n \"mrna_predicted\": set(),\n \"nc_rna\": set(),\n \"ensembl_gene_id\": ens_gene_id,\n \"ensembl_transcript_id\": tx_id,\n }\n parsed_transcripts[tx_id] = tx_info\n\n tx_info = parsed_transcripts[tx_id]\n # Add the ref seq information\n if tx.get(\"refseq_mrna_predicted\"):\n tx_info[\"mrna_predicted\"].add(tx[\"refseq_mrna_predicted\"])\n if tx.get(\"refseq_mrna\"):\n tx_info[\"mrna\"].add(tx[\"refseq_mrna\"])\n if tx.get(\"refseq_ncrna\"):\n tx_info[\"nc_rna\"].add(tx[\"refseq_ncrna\"])\n\n return parsed_transcripts" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
deletes all contents of the specified folder (but not the folder itself).\n returns true if successful. False if an error was thrown or the number of running processes is not = 0
def cleanupFolder(folderName): try: if(Tools.numRunningProcesses() == 0): process = subprocess.call('rm -r ./' + folderName + '/*', shell=True) return True else: return False except Exception as e: Tools.writeException("cleanupFolder", e) return False
[ "def del_folder(folder):\n if os.path.exists(folder):\n print 'Deleted: ' + folder\n shutil.rmtree(folder)\n else:\n print 'Not exist: ' + folder", "def empty_trash():\n drive_service().files().emptyTrash().execute()\n\n return True", "def directoryDeleteContent (\n \n self,\n path = None,\n ) :\n\n path = self.normalizePath( path, normalize = False )\n \n if path is None : return False\n \n entries = self.directoryContent( path, annotate = True )\n\n if entries is None : return False\n\n for entry in entries :\n\n # does nothing for current and parent directories\n # normally, they are not in directoryContent() but...\n\n if entry == os.curdir : continue\n \n if entry == os.pardir : continue\n\n entryPath = self.normalizePath( path + os.sep + entry, normalize = False )\n\n # directory\n\n if entry.endswith( os.sep ) : self.directoryDelete( entryPath )\n \n else : self.fileDelete( entryPath )\n \n \n\n # here, anything OK\n \n return True", "def CleanJobFolder_Subcons(rstdir):# {{{\n flist =[\n \"%s/remotequeue_seqindex.txt\"%(rstdir),\n \"%s/torun_seqindex.txt\"%(rstdir)\n ]\n for f in flist:\n if os.path.exists(f):\n try:\n os.remove(f)\n except:\n pass", "def CleanJobFolder_PRODRES(rstdir):# {{{\n flist =[\n \"%s/remotequeue_seqindex.txt\"%(rstdir),\n \"%s/torun_seqindex.txt\"%(rstdir)\n ]\n for f in flist:\n if os.path.exists(f):\n try:\n os.remove(f)\n except:\n pass", "def rmtree(folder: pathlib.Path):\n for item in folder.iterdir():\n if item.is_dir():\n rmtree(item)\n item.rmdir()\n else:\n item.unlink()", "def folder_delete(folder_to_delete: Path) -> NoReturn:\n\n path_check(folder_to_delete)\n\n for files in folder_to_delete.glob(r\"**/*\"):\n files.unlink()\n\n folder_to_delete.rmdir()", "def clear_folder(folder: str) -> None:\n\n for file in os.listdir(folder):\n file_path = os.path.join(folder, file)\n try:\n if os.path.isfile(file_path):\n os.unlink(file_path)\n elif os.path.isdir(file_path):\n shutil.rmtree(file_path)\n except Exception as e:\n print(e)", "def delete_folder(path):\n command = ['rm', '-rf', TEST_DIR]\n file_operation(path, command)", "def CleanJobFolder_Scampi(rstdir):# {{{\n flist =[\n \"%s/remotequeue_seqindex.txt\"%(rstdir),\n \"%s/torun_seqindex.txt\"%(rstdir)\n ]\n for f in flist:\n if os.path.exists(f):\n try:\n os.remove(f)\n except:\n pass", "def directoryPurge (\n \n self,\n path = None,\n mode = None\n ) :\n\n mode = self.string(\n mode,\n texts = [ \"all\", \"normal\", \"other\" ],\n default = \"all\"\n )\n \n # not a directory\n\n if not self.directoryPresent( path ) : return False\n\n # normalizes path\n \n path = self.normalizePath( path, normalize = False )\n\n # checks that it is not a complete volume. In this case, there is no last name\n \n name = self.pathLastNameWithExtension( path )\n \n if self.pathDrive( path ) == path.rstrip( os.sep ) : return False\n\n # loop on directory content\n \n entries = self.directoryContent( path, annotate = True )\n\n result = True\n\n for entry in entries :\n\n item = path + os.sep + entry\n\n # decides whether to delete or not\n\n other = ( os.sep + \".\" in item ) or ( os.sep + \"~\" in item )\n\n delete = ( mode == \"all\" ) or ( ( mode == \"other\" ) == other )\n\n\n # file\n\n if not item.endswith( os.sep ) :\n\n if not delete : continue\n\n ok = self.fileDelete( item )\n\n # directory\n \n else :\n\n # must delete: deletes completely\n\n if delete : ok = self.directoryPurge( item, mode = \"all\" )\n\n # special directory, must only delete normal : does nothing\n\n elif other : continue\n \n # normal directory, must only delete others, deletes selectively\n\n else : ok = self.directoryPurge( item, mode = mode )\n \n if not ok : result = False\n\n # deletes directory itself only if there is no error and must delete all\n \n if not mode == \"all\" : return result\n\n if not result : return False\n\n # gets delete rights\n \n try :\n \n os.chmod( path, stat.S_IRWXU )\n\n except Exception, exception :\n\n None\n \n try :\n \n os.rmdir( path )\n \n except Exception, exception :\n\n self.error = str( exception )\n \n return False\n\n return True", "def CleanJobFolder_PconsC3(rstdir):# {{{\n flist =[\n \"%s/remotequeue_seqindex.txt\"%(rstdir),\n \"%s/torun_seqindex.txt\"%(rstdir)\n ]\n for f in flist:\n if os.path.exists(f):\n try:\n os.remove(f)\n except:\n pass", "def rmEmptyDir(path):\n try:\n os.rmdir(path)\n except OSError as exc:\n return False\n return True", "def delete_files(*args):\n errorFlag=False\n for filePath in args:\n status=delete_file(filePath)\n if not status:\n errorFlag=True\n \n return not(errorFlag)", "def delete_with_retry(folder):\n\n folder = os.path.abspath(os.path.normpath(folder))\n\n for _i in range(0, 5):\n try:\n if os.path.exists(folder):\n shutil.rmtree(folder)\n\n return\n except Exception:\n time.sleep(0.1)\n\n print(\"Could not delete directory after 5 attempts: %s\" % folder)\n sys.exit(1)", "def delete_empty_dirs(folder: str, delete_root: bool = True) -> bool:\n all_files = os.listdir(folder)\n file_set = set(all_files)\n for filename in all_files:\n full_filename = os.path.join(folder, filename)\n if not os.path.isdir(full_filename):\n continue\n\n deleted_root = delete_empty_dirs(full_filename)\n if deleted_root:\n file_set.remove(filename)\n\n if len(file_set) == 0 and delete_root:\n os.rmdir(folder)\n return True\n\n return False", "def deleteImageFolder(pause=5):\n try:\n shutil.rmtree(imageFolder)\n except PermissionError:\n # Still busy creating the montage or something. Try once more\n time.sleep(pause)\n shutil.rmtree(imageFolder)\n except FileNotFoundError:\n # Folder already gone\n pass", "def delete_with_retry(folder):\n\n for _i in range(0, 5):\n try:\n if os.path.exists(folder):\n shutil.rmtree(folder)\n\n return\n except:\n time.sleep(0.1)\n\n print(\"Could not delete directory after 5 attempts: %s\" % folder)\n sys.exit(1)", "def delete_folder(content, dc: vim.Datacenter, folder_name: str):\n folder = get_obj(content, dc, [vim.Folder], folder_name)\n if folder is None:\n raise ObjectNotFoundError(f'Folder \"{folder_name}\" not found')\n \n # VMs have to be powered off\n for vm in folder.childEntity:\n if isinstance(vm, vim.VirtualMachine):\n try:\n vm.PowerOff()\n except vim.fault.VimFault as f:\n raise DestroyError(f'could not power off virtual machine \"{vm}: {str(f)}\"')\n\n # removes the folder and all children, i.e. VMs, from disk\n # give the power down tasks some time\n sleep(5)\n try:\n folder.Destroy_Task()\n except vim.fault.VimFault as f:\n raise DestroyError(f'could not delete folder \"{folder_name}\": {str(f)}')\n except vmodl.fault.ManagedObjectNotFound:\n pass" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
gets the number of runnning transcription processes
def numRunningProcesses(): try: proc = subprocess.run("ps -Af|grep -i \"online2-wav-nnet3-latgen-faster\"", stdout=subprocess.PIPE, shell=True) np = (len(str(proc.stdout).split("\\n")) - 3) if(np == None): np = 0 return np except Exception as e: Tools.writeException("numRunningProcesses", e) return -1
[ "def getRunningTaskCount():", "def num_processes():\n return 1", "def get_runs(self) -> int:", "def getWaitingTaskCount():", "def run_count(self) -> int:\n return self._run_count", "def run_count(self):\n return self._run_count", "def GetNumberOfResultsProcessed(self) -> int:\n return self.i", "def number_jobs(self):\n if self.array:\n return len(self.commands)\n else:\n return 1", "def numRunning(self):\n #with self.__queueLock:\n # The size of the list does not change, only its contents, so I don't\n # think there should be any conflict if we are reading a variable from\n # one thread and updating it on the other thread.\n activeRuns = sum(run is not None for run in self.__running)\n\n return activeRuns", "def number_of_jobs_in_queue():\n\n # Initialize #\n user_name = get_username()\n\n process = subprocess.check_output([\"squeue\", \"-u\", user_name])\n\n return len([line for line in process.split(\"\\n\") if user_name in line])", "def _n_workers(self, processes: int = 2) -> int:\n if 2 <= processes <= cpu_count():\n n_workers = processes\n else:\n n_workers = cpu_count()\n return n_workers", "def initializeRunningTasksCount():", "def get_num_procs():\n return get_num_MPI_workers()", "def processes(self):\n return self._getint('processes')", "def numRunningTotal(self):\n activeRuns = sum(run is not None for run in self.__running + self.__clientRunning)\n return activeRuns", "def test_countsProcesses(self):\n self.cmd.result.output = \"\\n\".join(\n (\n \" PID RSS TIME COMMAND\",\n \"345 1 10:23 notherJob1 a b c\",\n \"123 1 00:05:01 someJob a b c\",\n \"657 1 2-03:00:00 someJob a b c\",\n \"8766 1 00:10:00 unrelatedTask a b c\",\n )\n )\n results = ParsedResults()\n ps().processResults(self.cmd, results)\n count, countValue = _getDatapoint(results.values, \"count\")\n self.assertEqual(countValue, 3)", "def count_transcriptome_reads(results):\n #count total number of reads in transcriptiome\n transcriptome_reads = 0\n\n for gene_result in results:\n if gene_result is not None:\n logging.info(\"nreads: %d\" % (gene_result['nreads']))\n transcriptome_reads += gene_result['nreads']\n \n \n return transcriptome_reads", "def get_iter_num(self):\n iter_num = {}\n for topic_id, topic_data in self.run_result.items():\n iter_num[topic_id] = len(topic_data)\n\n return iter_num", "def get_job_count():\n return TxJob().count()" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Does everything you need to transcribe a podcast given the filename\n Download podcast, wait 40 seconds, change podcast to .wav, wait 10 seconds, remove the .mp3 file, run the transcription
def transcribeAll(service, url, fileName): if(service == "omny.fm"): url = url.replace(".mp3","") + ".mp3" subprocess.Popen("wget -c -O ./podcasts/" + fileName + ".mp3 " + url + " && sleep 40 && ffmpeg -i ./podcasts/" + fileName + ".mp3 -acodec pcm_s16le -ac 1 -ar 8000 ./podcasts/" + fileName + ".wav && sleep 10 && rm ./podcasts/" + fileName + ".mp3 && nohup ./online2-wav-nnet3-latgen-faster --online=false --do-endpointing=false " + "--frame-subsampling-factor=3 --config=online.conf --max-mem=2000000000 --max-active=7000 --beam=15.0 --lattice-beam=6.0 " + "--acoustic-scale=1.0 --word-symbol-table=words.txt final.mdl HCLG.fst 'ark:echo utterance-id" + fileName + " utterance-id" + fileName + "|' 'scp:echo utterance-id" + fileName + " ./podcasts/" + fileName + ".wav|' 'ark:/dev/null' &", shell=True)
[ "def say_something(self, text, directory='/tmp/', in_background=True):\n print(\"[ICUB][ACAPELA] Downloading the mp3 file...\")\n\n tts_acapela = acapela.Acapela(self.acapela_account_login, self.acapela_application_login,\n self.acapela_application_password, self.acapela_service_url,\n quality='22k', directory=directory)\n tts_acapela.prepare(text=text, lang='US', gender='M', intonation='NORMAL')\n output_filename = tts_acapela.run()\n print \"[ICUB][ACAPELA] Recorded TTS to %s\" % output_filename\n subprocess.Popen([\"play\",\"-q\",directory + str(output_filename)])\n print \"[ICUB][PLAY] reproducing the acapela file\"", "def transcribe_audio_file(filename):\n url = 'https://api.nexiwave.com/SpeechIndexing/file/storage/' + USERNAME +'/recording/?authData.passwd=' + PASSWORD + '&auto-redirect=true&response=application/json'\n\n # To receive transcript in plain text, instead of html format, comment this line out (for SMS, for example)\n #url = url + '&transcriptFormat=html'\n\n\n # Ready to send:\n sys.stderr.write(\"Send audio for transcript with \" + url + \"\\n\")\n r = requests.post(url, files={'mediaFileData': open(filename,'rb')})\n data = r.json()\n transcript = data['text']\n foo = data['text']\n f = open('newf.txt', 'w')\n f.write(foo)\n f.close() \n # Perform your magic here:\n print \"Transcript for \"+filename+\"=\" + transcript", "def podcast_download(self):\r\n warnings.filterwarnings(\"ignore\", category=UnicodeWarning)\r\n now = datetime.datetime.now()\r\n\r\n for podcast_file in self.podcast_list:\r\n published, name, link, title = podcast_file\r\n if self.podcast_list != []:\r\n line_file = (published + ';' + title + ';' + name + ';' + link).encode(\"utf-8\") \r\n if line_file in open(self.download_log).read():\r\n pass\r\n else:\r\n title = unicodedata.normalize('NFKD', title).encode('ascii', 'ignore')\r\n download_folder = os.path.join('downloads', title)\r\n if not os.path.exists(download_folder): \r\n os.makedirs(download_folder)\r\n try:\r\n published = str(parser.parse(published))[:10]\r\n except IOError as error:\r\n print 'Error' + (error) + ': File - ' + str(title)\r\n download_folder = os.path.join(download_folder, published)\r\n if not os.path.exists(download_folder): \r\n os.makedirs(download_folder)\r\n namefile_unicode = link[link.rfind('/')+1:]\r\n namefile_str = unicodedata.normalize('NFKD', \r\n namefile_unicode).encode('ascii', 'ignore')\r\n namefile_str = namefile_str.decode('utf-8', 'ignore').encode(\"utf-8\")\r\n if '.mp3' in namefile_str:\r\n len_name = namefile_str.index('.mp3')\r\n elif '.MP3' in namefile_str:\r\n len_name = namefile_str.index('.MP3')\r\n namefile_str = namefile_str[:len_name + 4]\r\n fileoutput = os.path.join(download_folder, namefile_str)\r\n name = unicodedata.normalize('NFKD', name).encode('ascii', 'ignore')\r\n print str(published) + '; ' + name\r\n ## downlink\r\n download_file(link, fileoutput) \r\n ## tagging\r\n mp3_tagging(fileoutput, podcast_file)\r\n ## write log\r\n write_file(self.download_log, line_file)\r\n end = datetime.datetime.now()\r\n print '\\r' + 'Download Time = ' + str(end-now) + '\\r'\r\n return None", "def main():\n temp_dir = tempfile.mkdtemp()\n print('temp_dir is %s' % temp_dir)\n silence_file = generate_silence_file(temp_dir)\n urls = get_song_urls()\n files = fetch_songs(temp_dir, urls)\n wav = merge_files(files, silence_file)\n mp3 = convert_to_mp3(wav)\n metadata = write_metadata(mp3, urls)\n print('Done. Completed mp3 is here: %s' % mp3)\n os.remove(wav)\n shutil.rmtree(temp_dir)\n return mp3, metadata", "def transcribe_from_file(path2mp3, path2transcription):\n\n if os.path.exists(path2transcription):\n os.remove(path2transcription)\n\n # https://stackoverflow.com/questions/64085443/audio-files-downloaded-from-youtube-dl-are-corrupted\n # Your audio file is probably encoded not as MP3. It is probably AAC (usually having file extension .aac),\n # which is the default format for .mp4 and AVC video codec and youtube.\n # sound = AudioSegment.from_mp3(path2mp3)\n sound = AudioSegment.from_file(path2mp3)\n length = len(sound)\n start = 0\n\n while start < length:\n header = \"starting : {:d} s, ending {:d} s, total {:d} s\".format(\n start // 1000,\n min((start + SIZE), length) // 1000,\n length // 1000\n )\n print(\"\\tprocessing segment \" + header)\n section = sound[start:start + SIZE]\n out = BytesIO()\n section.export(out, format=\"mp3\")\n audio_byte = out.read()\n transcription = gcp_transcribe(audio_byte)\n with open(path2transcription, \"a\") as f:\n f.write(\"\\n\" + header + \"\\n\" + transcription + \"\\n\")\n\n start = start + SIZE - BUFFER", "def play_alarm():\n os.system(\"afplay ./alarm.mp3 -v 3 -t 8\")", "def transcribe_from_folder(path2folder):\n for fname in os.listdir(path2folder):\n if fname[-3:].lower() == \"mp3\":\n print(\"\\nstart processing {}\".format(fname))\n transcribe_from_file(\n os.path.join(path2folder, fname),\n os.path.join(path2folder, fname[:-3] + \"txt\"),\n )\n print(\"finish processing {}\\n\".format(fname))", "def transcribe_file(speech_file):\n from google.cloud import speech\n speech_client = speech.Client()\n\n\n \"\"\"\n with io.open(speech_file, 'rb') as audio_file:\n content = audio_file.read()\n audio_sample = speech_client.sample(\n content,\n source_uri=None,\n encoding=speech.Encoding.FLAC,\n sample_rate_hertz=48000)\n \"\"\"\n basename = os.path.basename(speech_file)\n filename, file_extensions = os.path.splitext(basename)\n\n try:\n path, blobName = upload_blob('audio-transcripts-regional', speech_file)\n\n audio_sample = speech_client.sample(\n content=None,\n source_uri=path,\n encoding='FLAC',\n sample_rate_hertz=48000)\n\n operation = audio_sample.long_running_recognize('en-AU',max_alternatives=4)\n\n\n\n max_retry_count = 100000\n retry_count = 0\n with progressbar.ProgressBar(max_value=progressbar.UnknownLength) as bar: \n while retry_count < max_retry_count and not operation.complete:\n retry_count += 1\n bar.update(retry_count)\n time.sleep(2)\n operation.poll()\n\n if not operation.complete:\n print('Operation not complete and retry limit reached.')\n return\n\n alternatives = operation.results\n\n \n with open('tmpOut/%s.tsv' % (basename), 'w+' ) as file:\n for i, alternative in enumerate(alternatives):\n file.write('{}\\t'.format(alternative.transcript))\n file.write('{}\\n'.format(alternative.confidence))\n # [END send_request]\n finally:\n print(\"Cleaning up %s\" % (blobName))\n delete_blob('audio-transcripts-regional', blobName)", "def process_speak_listen(device_index, mp3_filename, text, record, flag):\n\n mp3_filename = mp3_filename + \".mp3\"\n try:\n tts = gTTS(text=text, lang='en', slow=False)\n tts.save(mp3_filename)\n playsound(mp3_filename)\n os.remove(mp3_filename)\n\n if flag != 1:\n with sr.Microphone(device_index=device_index) as source:\n record.adjust_for_ambient_noise(source, duration=1)\n print(\"Speak:\")\n os.system(\"zenity --progress --width=400 --height=200 --title='Speak Now' \"\n \"--text='Speak Now......No need to click OK button' --no-cancel &\")\n try:\n audio = record.listen(source, timeout=5)\n text = record.recognize_google(audio)\n os.system(\"ps -ef|grep zenity|awk '{print $2}'|head -1|xargs kill -9\")\n print(text)\n except LookupError:\n os.system(\"ps -ef|grep zenity|awk '{print $2}'|head -1|xargs kill -9\")\n print(\"ERROR : LookupError - Could not able to understand\")\n text = None\n except speech_recognition.WaitTimeoutError:\n os.system(\"ps -ef|grep zenity|awk '{print $2}'|head -1|xargs kill -9\")\n print(\"ERROR : WaitTimeoutError - Could not able to listen anything for 5 seconds\")\n text = None\n except speech_recognition.UnknownValueError:\n os.system(\"ps -ef|grep zenity|awk '{print $2}'|head -1|xargs kill -9\")\n print(\"ERROR : UnknownValueError - Could not able to listen anything for 5 seconds\")\n text = None\n except gtts.tts.gTTSError:\n print(\"ERROR : Connection Error : No internet connection.\")\n exit_program()\n except PermissionError:\n print(\"ERROR : No permission\")\n exit_program()\n\n return text", "def play_audio(filename):\n os.system(AUDIOPLAYER + ' ' + filename)", "def do_play(*_args):\n print(last_wav_path)\n if last_wav_path and last_wav_path.is_file():\n threading.Thread(\n target=lambda: subprocess.check_call(\n [\"aplay\", \"-q\", str(last_wav_path)]\n )\n ).start()", "def transcribe(config):\n\n long_mode = True\n\n if 'audio_data' not in config:\n raise KeyError(\"`audio_data` not specified for transcription operation.\")\n\n if 'timeout' not in config:\n raise KeyError(\"`timeout` not specified for transcription operation.\")\n\n try:\n if config.pop('audio_duration') < 60: \n long_mode = False\n except KeyError:\n pass\n\n if long_mode:\n print(\"Running in long audio duration mode (audio is >60 seconds duration)...\")\n print(\"Uploading file...\")\n remote_object = gcloud_upload_file(config['audio_data'], config['storage_bucket'])\n file_name = remote_object.rsplit('/', 1)[-1]\n\n config['audio_data'] = \"gs://%s/%s\" % (config['storage_bucket'], file_name)\n storage_bucket = config.pop('storage_bucket')\n\n print(\"Transcribing file...\")\n result = gcloud_transcribe_long(config)\n\n print(\"Transcription successful, cleaning up...\")\n print(\"Deleting uploaded GCS file...\")\n gcloud_delete_file(file_name, storage_bucket)\n else:\n print(\"Transcribing file...\")\n config.pop('timeout')\n config.pop('storage_bucket')\n result = gcloud_transcribe_short(config)\n\n return result", "def download_audio(self):\n aac = self.f_name + '.' + 'aac'\n track = self.f_name + self.file_type\n youtube_cmd = [\n \"youtube-dl\", self.link, \"-f\",\n \"bestaudio\", \"--extract-audio\",\n \"-o\", track, \"--audio-quality\",\n \"0\", \"--audio-format\", \"aac\"\n ]\n cmd = ' '.join(youtube_cmd)\n for std_out in popen(cmd):\n self.set_status_label(std_out)\n self.status_label.update_idletasks()\n ffmpeg_cmd = [\"ffmpeg\", \"-v\", \"quiet\", \"-i\", aac, track]\n cmd = ' '.join(ffmpeg_cmd)\n for stdout in popen(cmd):\n self.set_status_label(stdout)\n self.status_label.update_idletasks()\n try:\n # if aac:\n # remove(aac)\n move(track, self.downloads)\n # move(track, os.path.join(os.getcwd()) + '/downloads/')\n except Exception:\n self.set_status_label(\"ERROR DOWNLOADING\")", "def podcast_generate(date):\n c.execute('SELECT id, title, url, program, local_file, duration FROM article WHERE date = ?', (date, ))\n rows = c.fetchall()\n id, title, url, program, local_file, sduration = ([] for i in range(6))\n for row in rows:\n id.append(row[0])\n title.append(row[1])\n url.append(row[2])\n program.append(row[3])\n local_file.append(row[4])\n local_file.append(MP3_GAP) # Add a silent clip.\n sduration.append(row[5])\n\n podcast_fname = '{0}_npr_popular.mp3'.format(str(date))\n podcast_file = PODCAST_PATH + podcast_fname\n if not (os.path.isfile(podcast_file) and local_file):\n with open(podcast_file, \"w\") as outfile:\n call(['cat'] + local_file, stdout=outfile) # \"Cat\" the files via a shell cmd\n audiofile = eyed3.load(podcast_file)\n audiofile.tag.artist = u\"NPR\"\n audiofile.tag.release_date = str(date)\n audiofile.tag.genre = 101\n audiofile.tag.title = u\"NPR Populer Stories for \" + str(date)\n audiofile.tag.save()\n print 'Podcast {0} has been generated.'.format(str(date))\n\n # Now write the new podcast to the db.\n c.execute('SELECT COUNT(pid) FROM podcast WHERE pub_date = ?', (date, ))\n row = c.fetchone();\n if row[0] > 0:\n print 'A podcast for {0} already exists in the db.'.format(str(date))\n pass # Podcast already exists.\n else:\n human_date = date.strftime(\"%a, %b %-d\")\n pub_date = utils.formatdate(time.mktime(date.timetuple())) # RFC2822 Date\n ptitle = human_date + ' Most Popular Stories'\n description = \"\"\n dur = 0\n for index, item in enumerate(title):\n if index > 0:\n dur = dur + sduration[index-1] + 2\n m, s = divmod(dur, 60)\n hdur = \"%d:%02d\" % (m, s)\n description += str(index + 1) +\". \"+ item +\"(\"+ hdur +\") - \"+ url[index] +\"\\n\"\n length = os.path.getsize(podcast_file)\n duration = eyed3.load(podcast_file).info.time_secs\n c.execute(\n 'INSERT OR IGNORE INTO podcast (title, description, url, date, pub_date, length, type, duration) VALUES (?, ?, ?, ?, ?, ?, ?, ?)',\n (ptitle, description.encode('ascii', 'xmlcharrefreplace'), podcast_fname, date, pub_date, length,\n 'audio/mpeg', duration))\n db.commit()\n print \"Podcast {0} has been saved.\".format(str(date))\n\n return podcast_fname", "def pron(word):\n\n return send_from_directory('prons', word + \".mp3\", mimetype=\"audio/mpeg\")", "def process_transcript(transcript_label):\n transcript_key = f\"{transcript_label}.json\"\n\n # Load Transcribe output from S3.\n raw_transcript = get_transcribe_output(transcript_key)\n\n # Parse to assign speaker parts.\n speaker_parts = assign_speakers(raw_transcript)\n\n # Identify Karen and Georgia.\n assigned = karen_or_georgia(speaker_parts)\n\n # Update the full transcript.\n build_transcript(assigned)\n\n # Upload the latest transcript to S3.\n s3 = boto3.resource(\"s3\")\n s3.Bucket(os.getenv(\"S3_BUCKET\")).upload_file(\"main_transcript.txt\", \"main_transcript.txt\")", "def speak(self, sentence):\n temp_dir = \"/tmp/\"\n filename = \"gtts.mp3\"\n file_path = \"{}/{}\".format(temp_dir, filename)\n\n if not os.path.exists(temp_dir):\n os.makedirs(temp_dir)\n\n def delete_file():\n try:\n os.remove(file_path)\n if not os.listdir(temp_dir):\n try:\n os.rmdir(temp_dir)\n except OSError:\n pass\n except:\n pass\n\n if self.logger is not None:\n self.logger.info(\"Google TTS: {}\".format(sentence))\n tts = gTTS(text=sentence, lang=self.locale)\n tts.save(file_path)\n AudioPlayer.play_async(file_path, delete_file)", "def wavplay(filename):\n\tif (os.path.isfile(filename) == False): # raise error if wrong input file\n\t\tprint(\"Input file does not exist. Make sure you computed the analysis/synthesis\")\n\telse:\n\t\tif sys.platform == \"linux\" or sys.platform == \"linux2\":\n\t\t # linux\n\t\t subprocess.call([\"aplay\", filename])\n\n\t\telif sys.platform == \"darwin\":\n\t\t\t# OS X\n\t\t\tsubprocess.call([\"afplay\", filename])\n\t\telse:\n\t\t\tprint(\"Platform not recognized\")", "def run(self):\n if len(sys.argv) == 5:\n\n pcmfn = sys.argv[2]\n opusfn = pcmfn.replace(\".pcm_raw\", \".opus_hex\")\n wavefn = os.path.join(self.datapath, sys.argv[4] + '.wav')\n\n memberid = sys.argv[3]\n timestamp = sys.argv[4]\n\n with open(pcmfn, 'rb') as pcm:\n pcmdata = pcm.read()\n\n with wave.open(wavefn, 'wb') as wavfile: # Converts pcm to wave\n wavfile.setparams((2, 2, 48000, 0, 'NONE', 'NONE'))\n wavfile.writeframes(pcmdata)\n frames = wavfile.getnframes()\n\n if frames > self.frame_threshold: # Checks for minimum time requirement\n \n r = sr.Recognizer()\n with sr.AudioFile(wavefn) as source:\n audio = r.record(source)\n result = r.recognize_google_cloud(audio, credentials_json=self.API_JSON).strip()\n\n try:\n self.process(result, memberid, timestamp, wavefn)\n except Exception as e:\n print(e)\n\n if self.clearfiles:\n os.remove(pcmfn)\n os.remove(wavefn)\n\n else:\n raise Exception(\"Bot must be run with commands passed from main.js\")" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
audiourl > url of the transcriptions mp3 is stored here (NOT NULL)\n PodcastName > THe name of the show (references podcast(name))\n Description > The provided summary of that days podcast\n Date > The date that podcast aired (parsed to mmddyyyy\n Title > The title of that specific podcast\n Duration > the running time of that podcast (use strptime to parse, need mmddyyyy\n pending > right now will be false because were not transcribing\n (dateTranscribed) > date of transcription (updated later)\n
def insertClip(dbConnection, audiourl, podcastName, description, parsedDate, title): try: cursor = dbConnection.cursor() title = title.replace("'", "''") cursor.execute("INSERT INTO transcriptions(audiourl, realtimefactor, podcastname, transcription, description, date, title, pending, datetranscribed) VALUES('" + audiourl + "', NULL, '" + podcastName + "', NULL, '" + description + "', '" + parsedDate + "', '" + title + "', FALSE, NULL);") dbConnection.commit() cursor.close() return True except: return False return False
[ "def podcast_generate(date):\n c.execute('SELECT id, title, url, program, local_file, duration FROM article WHERE date = ?', (date, ))\n rows = c.fetchall()\n id, title, url, program, local_file, sduration = ([] for i in range(6))\n for row in rows:\n id.append(row[0])\n title.append(row[1])\n url.append(row[2])\n program.append(row[3])\n local_file.append(row[4])\n local_file.append(MP3_GAP) # Add a silent clip.\n sduration.append(row[5])\n\n podcast_fname = '{0}_npr_popular.mp3'.format(str(date))\n podcast_file = PODCAST_PATH + podcast_fname\n if not (os.path.isfile(podcast_file) and local_file):\n with open(podcast_file, \"w\") as outfile:\n call(['cat'] + local_file, stdout=outfile) # \"Cat\" the files via a shell cmd\n audiofile = eyed3.load(podcast_file)\n audiofile.tag.artist = u\"NPR\"\n audiofile.tag.release_date = str(date)\n audiofile.tag.genre = 101\n audiofile.tag.title = u\"NPR Populer Stories for \" + str(date)\n audiofile.tag.save()\n print 'Podcast {0} has been generated.'.format(str(date))\n\n # Now write the new podcast to the db.\n c.execute('SELECT COUNT(pid) FROM podcast WHERE pub_date = ?', (date, ))\n row = c.fetchone();\n if row[0] > 0:\n print 'A podcast for {0} already exists in the db.'.format(str(date))\n pass # Podcast already exists.\n else:\n human_date = date.strftime(\"%a, %b %-d\")\n pub_date = utils.formatdate(time.mktime(date.timetuple())) # RFC2822 Date\n ptitle = human_date + ' Most Popular Stories'\n description = \"\"\n dur = 0\n for index, item in enumerate(title):\n if index > 0:\n dur = dur + sduration[index-1] + 2\n m, s = divmod(dur, 60)\n hdur = \"%d:%02d\" % (m, s)\n description += str(index + 1) +\". \"+ item +\"(\"+ hdur +\") - \"+ url[index] +\"\\n\"\n length = os.path.getsize(podcast_file)\n duration = eyed3.load(podcast_file).info.time_secs\n c.execute(\n 'INSERT OR IGNORE INTO podcast (title, description, url, date, pub_date, length, type, duration) VALUES (?, ?, ?, ?, ?, ?, ?, ?)',\n (ptitle, description.encode('ascii', 'xmlcharrefreplace'), podcast_fname, date, pub_date, length,\n 'audio/mpeg', duration))\n db.commit()\n print \"Podcast {0} has been saved.\".format(str(date))\n\n return podcast_fname", "def tokenize_podcast_transcript(args):\n DATA_DIR = os.path.join(os.getcwd(), 'data', args.project_id)\n story_file = os.path.join(DATA_DIR, 'podcast-transcription.txt')\n\n # Read all words and tokenize them\n with open(story_file, 'r') as fp:\n data = fp.readlines()\n\n data = [item.split(' ') for item in data]\n data = [\n item[:-2] + [' '.join(item[-2:])] if item[-1] == '\\n' else item\n for item in data\n ]\n data = [item for sublist in data for item in sublist]\n\n df = pd.DataFrame(data, columns=['word'])\n df['conversation_id'] = 1\n\n return df", "def podcast_download(self):\r\n warnings.filterwarnings(\"ignore\", category=UnicodeWarning)\r\n now = datetime.datetime.now()\r\n\r\n for podcast_file in self.podcast_list:\r\n published, name, link, title = podcast_file\r\n if self.podcast_list != []:\r\n line_file = (published + ';' + title + ';' + name + ';' + link).encode(\"utf-8\") \r\n if line_file in open(self.download_log).read():\r\n pass\r\n else:\r\n title = unicodedata.normalize('NFKD', title).encode('ascii', 'ignore')\r\n download_folder = os.path.join('downloads', title)\r\n if not os.path.exists(download_folder): \r\n os.makedirs(download_folder)\r\n try:\r\n published = str(parser.parse(published))[:10]\r\n except IOError as error:\r\n print 'Error' + (error) + ': File - ' + str(title)\r\n download_folder = os.path.join(download_folder, published)\r\n if not os.path.exists(download_folder): \r\n os.makedirs(download_folder)\r\n namefile_unicode = link[link.rfind('/')+1:]\r\n namefile_str = unicodedata.normalize('NFKD', \r\n namefile_unicode).encode('ascii', 'ignore')\r\n namefile_str = namefile_str.decode('utf-8', 'ignore').encode(\"utf-8\")\r\n if '.mp3' in namefile_str:\r\n len_name = namefile_str.index('.mp3')\r\n elif '.MP3' in namefile_str:\r\n len_name = namefile_str.index('.MP3')\r\n namefile_str = namefile_str[:len_name + 4]\r\n fileoutput = os.path.join(download_folder, namefile_str)\r\n name = unicodedata.normalize('NFKD', name).encode('ascii', 'ignore')\r\n print str(published) + '; ' + name\r\n ## downlink\r\n download_file(link, fileoutput) \r\n ## tagging\r\n mp3_tagging(fileoutput, podcast_file)\r\n ## write log\r\n write_file(self.download_log, line_file)\r\n end = datetime.datetime.now()\r\n print '\\r' + 'Download Time = ' + str(end-now) + '\\r'\r\n return None", "def load_podcasts(channel, channel_id):\n\n print \"Podcasts\", channel[\"feed\"].get(\"title\")\n\n # channel_name = channel[\"feed\"].get(\"title\")\n\n # channel_id = Channel.query.filter_by(channel_name=channel_name).one().channel_id\n\n for podcast in channel[\"items\"]:\n # iterating through keys in items dict\n all_links = podcast.get(\"links\")\n all_images = podcast.get(\"image\")\n\n author = podcast.get(\"author\")\n title = podcast.get(\"title\")\n podcast_url = podcast.get(\"link\")\n summary = podcast.get(\"summary\")\n\n # duration is in a different format for each rss feed, so this uniforms all into seconds\n podcast_duration = podcast.get(\"itunes_duration\")\n\n if podcast_duration and \":\" in podcast_duration.encode('utf-8'):\n\n splitted = podcast_duration.encode('utf-8').split(\":\")\n\n if len(splitted) == 3:\n hours_to_secs = int(splitted[0]) * 3600\n mins_to_secs = int(splitted[1]) * 60\n podcast_duration = hours_to_secs + mins_to_secs + int(splitted[2])\n\n if len(splitted) == 2:\n mins_to_secs = int(splitted[0])*60\n podcast_duration = mins_to_secs + int(splitted[1])\n else:\n podcast_duration = 0\n\n # converting to a datetime from a python timestruct:\n # http://stackoverflow.com/questions/1697815/how-do-you-convert-a-python-time-struct-time-object-into-a-datetime-object/18726020\n python_timestruct = podcast.get(\"published_parsed\")\n\n if python_timestruct:\n released_at = datetime.fromtimestamp(mktime(python_timestruct))\n\n # creating an image url variable, to avoid 'referenced before created error'\n image_url = None\n\n if all_images:\n image_url = all_images.get(\"href\")\n\n # creating an play url variable, to avoid 'referenced before created error'\n play_url = None\n\n if all_links:\n for link in all_links:\n if link.type == \"audio/mpeg\" or link.type == \"video/mp4\" or link.type == \"audio/x-mpeg\":\n # checking for type of link so we get the correct url\n play_url = link.get(\"href\")\n\n # we only want to add episodes that have a play_url to the database\n if play_url:\n podcast = Podcast(channel_id=channel_id,\n author=author,\n title=title,\n podcast_url=podcast_url,\n play_url=play_url,\n released_at=released_at,\n image_url=image_url,\n summary=summary,\n podcast_duration=podcast_duration,)\n\n # adds instance to the session so it will be stored\n db.session.add(podcast)\n\n # committing to the database\n db.session.commit()", "def _parse_transcript_link(episode_id):\n # transcript files are 3 digit\n # see here a nice way to convert a number in 3 digits using f-strings:\n # https://github.com/pybites/challenges/pull/431\n number = f'{episode_id:0>3}'\n transcript = TRANSCRIPT_URL.format(number)\n resp = requests.get(transcript)\n return resp.content if resp.status_code == 200 else ''", "def construct_metadata(song):\n print(song) #temp", "def build_transcript(speaker_label_transcript):\n with open('main_transcript.txt', 'a') as the_file:\n for t in speaker_label_transcript:\n the_file.write(f\"{t['speaker']}:\\n\")\n the_file.write(f\"{t['content']}\\n\\n\")", "def track_info(filename):\n tag = id3.Tag()\n tag.parse(filename)\n a = load(filename)\n print(\"# {}\".format('=' * 78))\n print(\"Track Name: {}\".format(tag.title))\n print(\"Track Artist: {}\".format(tag.artist))\n print(\"Track Album: {}\".format(tag.album))\n print(\"Track Duration: {}\".format(duration_from_seconds(a.info.time_secs)))\n print(\"Track Number: {}\".format(tag.track_num))\n print(\"Track BitRate: {}\".format(a.info.bit_rate))\n print(\"Track BitRate: {}\".format(a.info.bit_rate_str))\n print(\"Sample Rate: {}\".format(a.info.sample_freq))\n print(\"Mode: {}\".format(a.info.mode))\n print(\"# {}\".format('=' * 78))\n print(\"Album Artist: {}\".format(tag.album_artist))\n print(\"Album Year: {}\".format(tag.getBestDate()))\n print(\"Album Recording Date: {}\".format(tag.recording_date))\n print(\"Album Type: {}\".format(tag.album_type))\n print(\"Disc Num: {}\".format(tag.disc_num))\n print(\"Artist Origin: {}\".format(tag.artist_origin))\n print(\"# {}\".format('=' * 78))\n print(\"Artist URL: {}\".format(tag.artist_url))\n print(\"Audio File URL: {}\".format(tag.audio_file_url))\n print(\"Audio Source URL: {}\".format(tag.audio_source_url))\n print(\"Commercial URL: {}\".format(tag.commercial_url))\n print(\"Copyright URL: {}\".format(tag.copyright_url))\n print(\"Internet Radio URL: {}\".format(tag.internet_radio_url))\n print(\"Publisher URL: {}\".format(tag.publisher_url))\n print(\"Payment URL: {}\".format(tag.payment_url))\n print(\"# {}\".format('=' * 78))\n print(\"Publisher: {}\".format(tag.publisher))\n print(\"Original Release Date: {}\".format(tag.original_release_date))\n print(\"Play Count: {}\".format(tag.play_count))\n print(\"Tagging Date: {}\".format(tag.tagging_date))\n print(\"Release Date: {}\".format(tag.release_date))\n print(\"Terms Of Use: {}\".format(tag.terms_of_use))\n print(\"isV1: {}\".format(tag.isV1()))\n print(\"isV2: {}\".format(tag.isV2()))\n print(\"BPM: {}\".format(tag.bpm))\n print(\"Cd Id: {}\".format(tag.cd_id))\n print(\"Composer: {}\".format(tag.composer))\n print(\"Encoding date: {}\".format(tag.encoding_date))\n print(\"# {}\".format('=' * 78))\n print(\"Genre: {}\".format(tag.genre.name))\n print(\"Non Std Genre Name: {}\".format(tag.non_std_genre.name))\n print(\"Genre ID: {}\".format(tag.genre.id))\n print(\"Non Std Genre ID: {}\".format(tag.non_std_genre.id))\n print(\"LAME Tag: {}\".format(a.info.lame_tag))\n print(\"# {}\".format('=' * 78))\n print(\"Header Version: {}\".format(tag.header.version))\n print(\"Header Major Version: {}\".format(tag.header.major_version))\n print(\"Header Minor Version: {}\".format(tag.header.minor_version))\n print(\"Header Rev Version: {}\".format(tag.header.rev_version))\n print(\"Header Extended: {}\".format(tag.header.extended))\n print(\"Header Footer: {}\".format(tag.header.footer))\n print(\"Header Experimental: {}\".format(tag.header.experimental))\n print(\"Header SIZE: {}\".format(tag.header.SIZE))\n print(\"Header Tag Size: {}\".format(tag.header.tag_size))\n print(\"Extended Header Size: {}\".format(tag.extended_header.size))\n print(\"# {}\".format('=' * 78))\n print(\"File Name: {}\".format(tag.file_info.name))\n print(\"File Tag Size: {}\".format(tag.file_info.tag_size))\n print(\"File Tag Padding Size: {}\".format(tag.file_info.tag_padding_size))\n print(\"File Read Only: {}\".format(tag.read_only))\n print(\"File Size: {}\".format(a.info.size_bytes))\n print(\"Last Modified: {}\".format(time.strftime('%Y-%m-%d %H:%M:%S',\n time.localtime(tag.file_info.mtime))))\n print(\"Last Accessed: {}\".format(time.strftime('%Y-%m-%d %H:%M:%S',\n time.localtime(tag.file_info.atime))))\n print(\"# {}\".format('=' * 78))", "def getPodcastDetails(url):\n try:\n headers = {'Accept':'text/html, application/xhtml+xml, application/xml; q=0.9, */*; q=0.8' ,'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/64.0.3282.140 Safari/537.36 Edge/18.17763'}\n req = requests.get(url, headers=headers)\n root = etree.fromstring(req.text)\n resArray = []\n homepage = root[0].find(\"link\").text\n name = root[0].find(\"title\").text\n description = \"\"\n try:\n description = root[0].find(\"{http://www.itunes.com/dtds/podcast-1.0.dtd}summary\").text\n except:\n pass\n try:\n description = root[0].find(\"description\").text\n except: \n pass\n category = root[0].find(\"{http://www.itunes.com/dtds/podcast-1.0.dtd}category\").attrib[\"text\"]\n image = root[0].find(\"{http://www.itunes.com/dtds/podcast-1.0.dtd}image\").attrib[\"href\"]\n if(len(name) > 0 and len(description) > 0 and len(category) > 0 and len(image) > 0 and len(homepage) > 0):\n print(\"all pass.. got \")\n print(name)\n print(homepage)\n print(description)\n print(category)\n print(image)\n except Exception as e:\n pass\n # Modules.Tools.writeException(\"NPR getXML\", e)", "async def ntrstream(self):\n embed = discord.Embed(title=\"NTR Streaming Guide\", color=discord.Color.blue())\n embed.url = \"https://gbatemp.net/threads/tutorial-3ds-screen-recording-without-a-capture-card-ntr-cfw-method.423445/\"\n embed.description = \"How to use NTR CFW with Nitro Stream to Wirelessly Stream\"\n embed.add_field(name=\"4 common fixes\", value=\"• Are you connected to the Internet?\\n• Is your antivirus program blocking the program?\\n• Make sure you are not putting the port (:####) into the IP box of Nitro Stream.\\n• Does your NTR menu say NTR CFW 3.4 Preview2?\")\n await self.bot.say(\"\", embed=embed)", "def transcribeAll(service, url, fileName):\n if(service == \"omny.fm\"):\n url = url.replace(\".mp3\",\"\") + \".mp3\"\n subprocess.Popen(\"wget -c -O ./podcasts/\" + fileName + \".mp3 \" + url + \" && sleep 40 && ffmpeg -i ./podcasts/\"\n + fileName + \".mp3 -acodec pcm_s16le -ac 1 -ar 8000 ./podcasts/\" + fileName + \".wav && sleep 10 && rm ./podcasts/\" \n + fileName + \".mp3 && nohup ./online2-wav-nnet3-latgen-faster --online=false --do-endpointing=false \"\n + \"--frame-subsampling-factor=3 --config=online.conf --max-mem=2000000000 --max-active=7000 --beam=15.0 --lattice-beam=6.0 \"\n + \"--acoustic-scale=1.0 --word-symbol-table=words.txt final.mdl HCLG.fst 'ark:echo utterance-id\" + fileName \n + \" utterance-id\" + fileName + \"|' 'scp:echo utterance-id\" + fileName + \" ./podcasts/\" + fileName + \".wav|' 'ark:/dev/null' &\", shell=True)", "def get_transcript(url):\n path = urlparse(url)\n url_path = re.search(r\"[a-z].*[full-transcript]\", str(path.path)).string.split('/')[1:-1]\n date = '-'.join(url_path[:3])\n title = '-'.join(url_path[3].split('-')[2:])\n name = '-'.join(url_path[3].split('-')[:2])\n fname = f\"{name}/{'_'.join([name, date, title])}.txt\"\n\n print(name, title)\n r = requests.get(url)\n soup = BeautifulSoup(r.text, 'html.parser')\n raw_html = soup.find('div', attrs = {'class':'elementor-widget-theme-post-content'})\n\n # TODO: this is some test text\n # text = \"content\"\n\n text = \"\"\n for p in raw_html.find_all('p'):\n text += f\"{p.get_text()}\\n\"\n \n return [name, date, title, fname, text]", "def format_subtitle_track(self, name):\n track = {\"language\": None, \"bitrate\": None}\n name = name.strip()\n if \" / \" in name:\n name_parts = name.split(\" / \", 1)\n track[\"language\"] = name_parts[0].strip()\n track[\"bitrate\"] = name_parts[1].strip()\n return track", "def getPodcastDetailsDebug(url):\n try:\n headers = {'Accept':'text/html, application/xhtml+xml, application/xml; q=0.9, */*; q=0.8' ,'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/64.0.3282.140 Safari/537.36 Edge/18.17763'}\n req = requests.get(url, headers=headers)\n root = etree.fromstring(req.text)\n resArray = []\n print(root[0].find(\"link\").text)\n print(root[0].find(\"title\").text)\n print(root[0].find(\"{http://www.itunes.com/dtds/podcast-1.0.dtd}summary\").text)\n print(root[0].find(\"{http://www.itunes.com/dtds/podcast-1.0.dtd}category\").attrib[\"text\"])\n print(root[0].find(\"{http://www.itunes.com/dtds/podcast-1.0.dtd}image\").attrib[\"href\"])\n except Exception as e:\n pass\n # Modules.Tools.writeException(\"NPR getXML\", e)", "def compile_episode_transcript(trans_id, db):\n transcript = []\n trans = get_transcript(db, trans_id).sort_values(by=\"start_time\")\n\n # line contents: [start_time, end_time, utterance, speaker_id]\n for idx in range(trans.shape[0]):\n speaker = trans['speaker_id'][idx]\n text = clean_text(trans['text'][idx])\n start = trans['start_time'][idx]/60.\n end = trans['end_time'][idx]/60.\n\n if speaker is None or np.isnan(speaker):\n speaker = -1\n\n # this happens a lot in the audiosearch db..\n if text == '.':\n continue\n\n line = [start, end, text, speaker]\n\n # skip duplicate lines\n if idx > 0 and line[2] == transcript[-1][2]:\n continue\n\n transcript.append(line)\n return np.asarray(transcript)", "def subtitle(request, playlist_id):\n\ttry: \n\t\targs = json.loads(request.body)\n\t\tplaylist = Playlist.objects.get(pk=playlist_id)\n\t\tplaylist.subtitle = args['subtitle']\n\texcept Playlist.DoesNotExist:\n\t\treturn error('Invalid playlist id')\n\texcept KeyError:\n\t\treturn error('Query does not contain required arguments')\n\n\tplaylist.save()\n\treturn success()", "def test_gathering_links_for_audio_track(\n lep_dl: LepDL,\n) -> None:\n json_test = \"\"\"\\\n [\n {\n \"episode\": 3,\n \"date\": \"2000-01-01T00:00:00+00:00\",\n \"url\": \"https://teacherluke.co.uk/2009/04/15/episode-3-musicthe-beatles/\",\n \"post_title\": \"3. Music/The Beatles\",\n \"post_type\": \"\",\n \"files\": {\n \"audios\": [],\n \"atrack\": [\n [\n \"https://someurl1.local\", \"https://someurl2.local\", \"https://someurl3.local\"\n ]\n ]\n },\n \"parsed_at\": \"2021-10-14T07:35:24.575575Z\",\n \"index\": 2009041501,\n \"admin_note\": \"Check audio track.\"\n }\n ]\n \"\"\" # noqa: E501,B950\n db_episodes = Lep.extract_only_valid_episodes(json_test)\n lep_dl.files = downloader.gather_all_files(db_episodes)\n assert len(lep_dl.files) == 2\n assert lep_dl.files[0].primary_url == \"https://someurl1.local\"\n assert lep_dl.files[0].secondary_url == \"https://someurl2.local\"\n assert lep_dl.files[0].tertiary_url == \"https://someurl3.local\"\n assert isinstance(lep_dl.files[0], ATrack)\n assert (\n lep_dl.files[0].filename == \"[2000-01-01] # 3. Music/The Beatles _aTrack_.mp3\"\n )", "def set_meta_mp3(file):\n\n list_str_prop_mp3 = ['album', 'artist', 'title']\n list_other_prop_mp3 = ['comment', 'genre', 'year']\n dict_file_mp3 = {}\n # For each string properties into the tag\n for prop in list_str_prop_mp3:\n # If the tag exist (i.e it's not empty for the music file)\n if file.tag.d.has_key(prop.upper()):\n # We delete spe char and we format it\n dict_file_mp3[prop] = delete_spe_char_and_format(file.tag[prop.upper()])\n else:\n # Or we define it's value as 'Unknow ' + prop\n # For instance 'Unknow Artist'\n dict_file_mp3[prop] = 'Unknow ' + prop.capitalize()\n # For each other properties\n for prop in list_other_prop_mp3:\n if file.tag.d.has_key(prop.upper()):\n # We just copy them\n dict_file_mp3[prop] = file.tag[prop.upper()]\n else:\n dict_file_mp3[prop] = ''\n # To try to find the tracknumber, we need 'title'\n if dict_file_mp3.has_key('title'): \n # But before, we delete the duplicate\n list_duplicate = [dict_file_mp3['artist'], dict_file_mp3['album']]\n # Now we delete the duplicates\n dict_file_mp3['title'] = delete_duplicate(dict_file_mp3['title'], list_duplicate)\n # So we are able to find the tracknumber\n number = ''\n # If ID3 already find it\n if file.tag.d.has_key(\"TRACKNUMBER\"):\n number = file.tag[\"TRACKNUMBER\"]\n # Else we try to find by ourself\n else:\n number = find_tracknumber(dict_file_mp3['title'])\n # If we found a tracknumber, we delete it from 'title'\n if number:\n dict_file_mp3['title'] = delete_duplicate(dict_file_mp3['title'], [number])\n dict_file_mp3['tracknumber'] = number\n # And we format the new title\n dict_file_mp3['title'] = build_track_name(dict_file_mp3['title'], number)\n dict_file_mp3['name'] = dict_file_mp3['title'] + '.mp3'\n dict_file_mp3['path'] = build_path([dict_file_mp3['artist'], dict_file_mp3['album']])\n return dict_file_mp3", "def audio(datatype, stream_id, data, control, timestamp):\n msg = {'msg': datatype, 'stream_id': stream_id,'timestamp': timestamp, 'body': {'control': control, 'data': data}}\n return msg" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
checks the database for empty transcription entries, returns a list with \n\n index 0 audiourl\n index 1 id\n index 2 podcast name\n index 3 service of podcast
def checkPre(dbConnection): cursor = dbConnection.cursor() cursor.execute("SELECT audiourl, T.id, podcastName, source FROM transcriptions AS T JOIN podcasts as P ON P.name = T.podcastname WHERE COALESCE(T.transcription, '') = '' AND pending = FALSE LIMIT 1;") entry = cursor.fetchone() cursor.close() return entry
[ "def empty(cls) -> \"Transcription\":\n return Transcription(text=\"\", likelihood=0, transcribe_seconds=0, wav_seconds=0)", "async def get_not_sent(self) -> List[TradeRecord]:\n\n cursor = await self.db_connection.execute(\n \"SELECT * from trade_records WHERE sent<? and confirmed=?\",\n (\n 4,\n 0,\n ),\n )\n rows = await cursor.fetchall()\n await cursor.close()\n records = []\n for row in rows:\n record = TradeRecord.from_bytes(row[0])\n records.append(record)\n\n return records", "def test_speaker_list_with_no_speakers_in_database(self):\r\n url = reverse('videos-speaker-list')\r\n\r\n resp = self.client.get(url)\r\n eq_(resp.status_code, 200)\r\n self.assertTemplateUsed(resp, 'videos/speaker_list.html')", "def test_get_subscribers_empty(self):\n m1 = self.create_movement()\n self.session.commit()\n\n movement_id = m1.id\n self.assertListEqual([], get_subscribers(movement_id))", "def checkEmptyDocs(self):\r\n \r\n self.emptyIndexes = []\r\n for i, doc in enumerate(self.docsString):\r\n if len(doc) == 0:\r\n self.emptyIndexes.append(i)", "def test_cli_show_empty_list(self):\n runner = engine.engine()\n runner.run([], a.show)\n self.assertEqual(0, runner.logger.stats['processed'])", "def removeEmptySubscriptions(self):\r\n \r\n clean_data = []\r\n for subscription in self.data:\r\n if len(subscription['items']) == 0:\r\n continue\r\n clean_data.append(subscription)\r\n\r\n if len(clean_data) == 0:\r\n raise Exception(\"You don't have any unread feeds!\")\r\n else:\r\n self.data = clean_data", "def get_empty_list_message(self):\n return _l('There are no items in the table.')", "def test_170417_empty(self):\n spc = parser(get_file('PTSD48_empty.txt'))\n # spc.draw_outlooks()\n spc.sql(self.txn)\n jabber = spc.get_jabbers('')\n self.assertEquals(jabber[0][0],\n (\"The Storm Prediction Center issues Days 4-8 \"\n \"Convective Outlook at Dec 25, 9:41z \"\n \"http://www.spc.noaa.gov/products/exper/day4-8/\"\n \"archive/2008/day4-8_20081225.html\"))", "def nohupTranscriptionContent(filePath):\n try:\n continu = True\n fileContent = \"\"\n f = open(filePath, 'r')\n while (continu):\n temp = f.readline(900000)\n if(len(temp) == 0):\n continu = False\n else:\n fileContent += temp\n results = []\n realTimeFactor = re.findall(r'Timing stats: real-time factor for offline decoding was (.*?) = ', fileContent)\n results.append(realTimeFactor)\n transcription = re.findall(r'utterance-id(.*?) (.*?)\\n', fileContent)\n transcriptionList = []\n transcriptionIDList = []\n for item in transcription:\n if(len(item[1]) > 1000):\n transcriptionIDList.append(item[0])\n transcriptionList.append(item[1])\n results.append(transcriptionList)\n results.append(transcriptionIDList)\n transcriptionTime = re.findall(r'seconds / (.*?) seconds\\.', fileContent)\n results.append(transcriptionTime)\n return results\n except Exception as e:\n Tools.writeException(\"nohupTranscriptionContent\", e)\n return False", "def test_cli_report_empty_list(self):\n runner = engine.engine()\n runner.run([], a.report)\n self.assertEqual(0, runner.logger.stats['processed'])", "def test_get_all_unassociated_no_tracks(self):\n self.assertEqual(self.get_track_count(), 0)\n tracks = Track.get_all_unassociated(self.app.curs)\n self.assertEqual(tracks, [])", "def compile_episode_transcript(trans_id, db):\n transcript = []\n trans = get_transcript(db, trans_id).sort_values(by=\"start_time\")\n\n # line contents: [start_time, end_time, utterance, speaker_id]\n for idx in range(trans.shape[0]):\n speaker = trans['speaker_id'][idx]\n text = clean_text(trans['text'][idx])\n start = trans['start_time'][idx]/60.\n end = trans['end_time'][idx]/60.\n\n if speaker is None or np.isnan(speaker):\n speaker = -1\n\n # this happens a lot in the audiosearch db..\n if text == '.':\n continue\n\n line = [start, end, text, speaker]\n\n # skip duplicate lines\n if idx > 0 and line[2] == transcript[-1][2]:\n continue\n\n transcript.append(line)\n return np.asarray(transcript)", "def db_query_is_empty(result):\n\n rows = []\n # start iterating through GqlQuery\n for r in result:\n rows.append(r)\n break\n if len(rows) > 0:\n return False\n return True", "def test_cli_tag_empty_list(self):\n runner = engine.engine()\n runner.run([], a.tag)\n self.assertEqual(0, runner.logger.stats['processed'])", "def getNotEmptyEntries(self):\n not_empty = []\n catalog = utils.getToolByName(self.context, 'portal_catalog')\n for entry in catalog(portal_type='BlogEntry'):\n entry = entry.getObject()\n contained = entry.contentIds()\n if contained:\n not_empty.append(entry)\n return not_empty", "def unknown_words(self) -> List[str]:\n return [word for word in self.input_words if word not in self.words_already_studied]", "def _checkForBlankLines(self, datalines):\n empties = None\n count = 0\n rtlines = []\n for line in datalines:\n if line.strip() == \"\":\n empties = 1\n else:\n if empties == 1: # If data line found after empty line then raise\n raise Exception(\"Empty line found in data section at line: \" + str(count))\n else:\n rtlines.append(line)\n count = count + 1\n return rtlines", "def test_list_transcription_vocabularies(self):\n pass" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
given title, if the podcast is in the database already return true. False if the podcast does not exist in the database
def checkIfExists(dbconnection, title): cursor = dbconnection.cursor() output = "" title = title.replace("'", "''") try: cursor.execute("SELECT * FROM transcriptions WHERE title = '" + title + "';") dbconnection.commit() output = cursor.fetchone() cursor.close() if(output is None): return False else: return True except: dbconnection.rollback() cursor.execute("SELECT * FROM transcriptions WHERE title = '" + title + "';") dbconnection.commit() output = cursor.fetchone() cursor.close() if(output is None): return False else: return True
[ "def check_if_entry_exists(title: str) -> bool:\n conn = sqlite3.connect('rss.db')\n c = conn.cursor()\n try:\n c.execute(\n \"\"\"select * from entries where title = ?\"\"\",\n (title,)\n )\n records = c.fetchall()\n return len(records) > 0\n except sqlite3.OperationalError as e:\n print(f'Exception {e} caught. Recreating database.')\n c.execute('drop table if exists entries')\n conn.commit()\n conn.close()\n create()\n return False", "def _titletaken(self, title):\n c = db.get(db.Key.from_path('Collection', title))\n return c is not None", "def _has_song(self, title):\r\n for i in range(len(self._playlist)):\r\n song_name = self._playlist[i].split(':')[0]\r\n if song_name == title:\r\n return True\r\n return False", "def verify_title(title,user_id):\n query =\"\"\"select entries.title from entries\n where user_id = {} and title = '{}'\n \"\"\".format(user_id, title)\n try:\n cur.execute(query)\n title = cur.fetchone()\n if title:\n return True\n return False\n except:\n conn.rollback()", "def link_exist(self, value):\n if value:\n self.cursor.execute(\"SELECT title FROM link WHERE title=?\", (value, ))\n result = self.cursor.fetchone()\n if result:\n return True\n else:\n return False\n else:\n return True", "def store_game_title(title, games_coll=games_coll):\n if games_coll.count_documents({'title': title}) == 0:\n games_coll.insert_one({'title': title})", "def check_movie_exists(self, movie_name):\r\n for db_row in self.id_cursor.execute(\"SELECT * FROM movie_info\"):\r\n if db_row[0] == movie_name:\r\n return True\r\n return False", "def movie_exists (self, title, year):\n title=re.sub(r'[?|$|!|:|#]',r'',title)\n movie_meta = '%s (%d)' % (title, year)\n return movie_meta in self.db[self.movies_label]", "def exists(self):\n self.cursor.execute(f\"\"\"\n SELECT 1\n FROM {self.table_name}\n WHERE {self.lookup_type}='{self.word}'\n \"\"\")\n return True if self.cursor.fetchone() else False", "def check_song(conn, song_title, album_name):\n curs = dbi.dict_cursor(conn)\n curs.execute('''select count(song_id) from coda_song where song_title = %s \n and album_id = \n (select album_id from coda_album where album_title = %s)''',\n [song_title, album_name])\n result = curs.fetchone()\n return result['count(song_id)'] == 0", "def presentation_exists(self, presentation):\r\n result = QtSql.QSqlQuery('''SELECT * FROM presentations''')\r\n while result.next():\r\n if (unicode(presentation.title) == unicode(result.value(1).toString())\r\n and unicode(presentation.speaker) == unicode(result.value(2).toString())):\r\n return True\r\n return False", "def show_exists (self, title):\n title=re.sub(r'[?|$|!|:|#]',r'',title)\n show_meta = '%s' % (title)\n return show_meta in self.db[self.series_label]", "def episode_exists (self, title, season, episode):\n title=re.sub(r'[?|$|!|:|#]',r'',title)\n if self.show_exists(title) == False:\n return False\n show_entry = self.db[self.series_label][title]\n episode_entry = 'S%02dE%02d' % (season, episode)\n return episode_entry in show_entry['episodes']", "def get_movie_if_exist(item):\n query = Session.query(Movie).filter(Movie.title == item.title)\n result = query.first()\n return result", "def check_repeat(db, record):\n models = [TechRepublicData, SecurityNewsData, PyjobData, RedditData]\n temp = db.query(*models)\n\n for model in models:\n if temp.filter(model.title == record.title).count():\n return True", "def is_category_existed(url):\n \n existed_category = db.execute_query(\"SELECT 1 FROM categories WHERE url = '\" + url + \"'\")\n \n # if the current category is not in the database\n if existed_category:\n return True\n else:\n return False", "def is_unique(title, type_):\r\n data = access_file(type_)\r\n if len(data) == 0:\r\n return True\r\n for obj in data:\r\n if obj[\"title\"] == title:\r\n return False\r\n return True", "def __contains__(self, title):\n return title in self.__articles", "def is_duplicate(title, movies):\n for movie in movies:\n if movie.lower() == title.lower():\n return True\n return False" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
generate the CUSPARSE FFI definition
def generate_cffi_cdef( cuda_include_path=cuda_include_path, cusparse_header=cusparse_header, cffi_out_file=None): with open(cusparse_header, 'r') as f: cusparse_hdr = f.readlines() # in some version cusparse_v2.h just points to cusparse.h, so read it # instead for line in cusparse_hdr: # if v2 header includes cusparse.h, read that one instead if line.startswith('#include "cusparse.h"'): cusparse_header = os.path.join(cuda_include_path, 'cusparse.h') with open(cusparse_header, 'r') as f: cusparse_hdr = f.readlines() cusparse_hdr = [_remove_comment(l) for l in cusparse_hdr] # skip lines leading up to first typedef for idx, line in enumerate(cusparse_hdr): if line.startswith('typedef'): start_line = idx break # skip closing #if defined logic for idx, line in enumerate(cusparse_hdr[start_line:]): if line.startswith('#if defined(__cplusplus)') or \ 'Define the following symbols for the new API' in line: # second match is to avoid CFFI compilation errror due to the final # define statements in v4.1 through v5.5 end_line = start_line + idx break # define other data types needed by FFI # ... will be filled in from cuComplex.h by the C compiler cffi_cdef = """ typedef struct CUstream_st *cudaStream_t; typedef struct float2 { ...; } float2; typedef float2 cuFloatComplex; typedef float2 cuComplex; typedef struct double2 { ...; } double2; typedef double2 cuDoubleComplex; typedef float cufftReal; typedef double cufftDoubleReal; typedef cuComplex cufftComplex; typedef cuDoubleComplex cufftDoubleComplex; typedef enum cudaDataType_t { CUDA_R_16F= 2, // real as a half CUDA_C_16F= 6, // complex as a pair of half numbers CUDA_R_32F= 0, // real as a float CUDA_C_32F= 4, // complex as a pair of float numbers CUDA_R_64F= 1, // real as a double CUDA_C_64F= 5, // complex as a pair of double numbers CUDA_R_8I= 3, // real as a signed char CUDA_C_8I= 7, // complex as a pair of signed char numbers CUDA_R_8U= 8, // real as a unsigned char CUDA_C_8U= 9, // complex as a pair of unsigned char numbers CUDA_R_32I= 10, // real as a signed int CUDA_C_32I= 11, // complex as a pair of signed int numbers CUDA_R_32U= 12, // real as a unsigned int CUDA_C_32U= 13 // complex as a pair of unsigned int numbers } cudaDataType; typedef enum libraryPropertyType_t //GRL: added this for cuda 8.0 { MAJOR_VERSION, MINOR_VERSION, PATCH_LEVEL } libraryPropertyType; /* definitions from cusparse header below this point */ """ cffi_cdef += ''.join(cusparse_hdr[start_line:end_line]) """ don't use the _v2 versions of the function names defined in CUDA v4.1 through v5.5 """ cffi_cdef = cffi_cdef.replace('_v2(', '(') if os.name == 'nt': # Win cffi_cdef = cffi_cdef.replace('CUSPARSEAPI', '__stdcall') else: # posix, etc cffi_cdef = cffi_cdef.replace('CUSPARSEAPI', '') if cffi_out_file is not None: # create specified output directory if it doesn't already exist out_dir = os.path.dirname(cffi_out_file) if out_dir and not os.path.exists(out_dir): os.makedirs(out_dir) with open(cffi_out_file, 'w') as f: f.write(cffi_cdef) return cffi_cdef
[ "def make_c_header(self):\n res = \\\n\"\"\"PyThreadState* ___madz_LANG_python_thread_state; //Holds Thread State for this interpreter\nPyObject *___madz_LANG_python_wrapper_module; //Hold Pointer to the _madz.py file representing this plugin\ntypedef struct{{\n{function_pointers}\n}}___madz_LANG_python_TYPE_;\n___madz_LANG_python_TYPE_ ___madz_LANG_python_OUTPUT;\nvoid ___madz_init_imports();\n{fn_dec}\n\n\"\"\"\n c_gen = c_wrapgen.CGenerator([],\"\", self.description)\n #TODO function_pointers, all same except\n fragments ={\"fn_dec\" : \"\", \"function_pointers\" : \"\"}\n fn = \"\"\"{rettype}{fnname}({args});\\n\"\"\"\n pointer = \"\"\" {prettype} (*{nodename})({args});\\n\"\"\"\n for node in self.description.definitions():\n if isinstance(node.type.get_type(), pdl.TypeFunction):\n frg = {\n \"prettype\": c_gen.gen_type_string(\"\", node.type.return_type),\n \"rettype\": c_gen.gen_type_string(\"\", node.type.return_type),\n \"fnname\": \"___madz_LANG_python_FN_\" + node.name,\n \"nodename\": node.name,\n \"args\": \",\".join(map(\n lambda a: c_gen.gen_type_string(a.name, a.type),\n node.type.args)),\n\n }\n fragments[\"fn_dec\"] += fn.format(**frg)\n fragments[\"function_pointers\"] += pointer.format(**frg)\n if fragments[\"function_pointers\"] == \"\":\n fragments[\"function_pointers\"] = \"uint8_t _madz_empty;\"\n return res.format(**fragments)", "def make_c_function_stubs(self):\n fn =\\\n\"\"\"{rettype} {fnname}({args}){{\n {rettype} ret;\n\n ret = {cast_and_deref}___madz_LANG_python_OUTPUT.{nodename}({argnames});\n\n return ret;\n}}\n\n\"\"\"\n fn_no_return =\\\n\"\"\"{rettype} {fnname}({args}){{\n ___madz_LANG_python_OUTPUT.{nodename}({argnames});\n return;\n}}\n\n\"\"\"\n res = \"\"\n c_gen = c_wrapgen.CGenerator([],\"\", self.description)\n for node in self.description.definitions():\n if isinstance(node.type.get_type(), pdl.TypeFunction):\n fragments = {\n \"maybe_parentheses\": \")\" if isinstance(node.type.return_type.get_type(),pdl.TypeStruct) else \"\",\n \"cast_and_deref\": self.make_c_cast_deref_string(c_gen, node.type.return_type),\n \"rettype\": c_gen.gen_type_string(\"\", node.type.return_type),\n \"fnname\": \"___madz_LANG_python_FN_\" + node.name,\n \"nodename\": node.name,\n \"args\": \",\".join(map(\n lambda a: c_gen.gen_type_string(a.name, a.type),\n node.type.args)),\n \"argnames\":\",\".join(map(\n lambda a: a.name,\n node.type.args))\n }\n res += (fn if not isinstance(node.type.return_type, pdl.TypeTypeNone) else fn_no_return).format(**fragments)\n return res", "def fortran_c_wrapper(self) -> str:\n if self.fc_override is not None:\n return self.fc_override.replace('$CLASSNAME$', self.class_name).replace(\n \"$C_PREFIX$\", self.c_prefix).replace(\"$F_PREFIX$\", self.f_prefix)\n\n result = ''\n\n # declaration\n in_parameters = self._fc_in_parameters()\n return_type, out_parameters = self._fc_out_parameters()\n if self.may_throw:\n out_parameters.append('int * err_code')\n out_parameters.append('char ** err_msg')\n out_parameters.append('std::size_t * err_msg_len')\n\n func_name = '{}_{}_{}_'.format(\n self.c_prefix, self.class_name, self.name)\n\n par_str = ', '.join(in_parameters + out_parameters)\n result += '{} {}({}) {{\\n'.format(return_type, func_name, par_str)\n\n # convert input\n for par in self.params:\n result += '{}'.format(par.fc_convert_input())\n\n # call C++ function and return result\n if self.may_throw:\n result += ' try {\\n'\n result += ' *err_code = 0;\\n'\n result += indent(self._fc_cpp_call(), 4*' ')\n result += indent(self._fc_return(), 4*' ')\n result += ' }\\n'\n for exception, code in error_codes.items():\n if code != 0:\n catch = ''\n catch += 'catch (std::{} const & e) {{\\n'.format(exception)\n catch += ' *err_code = {};\\n'.format(code)\n catch += ' static std::string msg;\\n'\n catch += ' msg = e.what();\\n'\n catch += ' *err_msg = const_cast<char*>(msg.data());\\n'\n catch += ' *err_msg_len = msg.size();\\n'\n catch += '}\\n'\n result += indent(catch, 4*' ')\n result += self._fc_return_default()\n else:\n result += self._fc_cpp_call()\n result += self._fc_return()\n result += '}\\n\\n'\n return result", "def build_cffi():\r\n print_banner(\"Building CFFI Module\")\r\n ffi = cffi.FFI()\r\n\r\n this_dir = pathlib.Path().resolve()\r\n h_file_name = this_dir / \"cmult.h\"\r\n with open(h_file_name) as h_file:\r\n # cffi does not like our preprocessor directives, so we remove them\r\n lns = h_file.read().splitlines()\r\n flt = filter(lambda ln: not re.match(r\" *#\", ln), lns)\r\n flt = map(lambda ln: ln.replace(\"EXPORT_SYMBOL \", \"\"), flt)\r\n ffi.cdef(str(\"\\n\").join(flt))\r\n\r\n ffi.set_source(\r\n \"cffi_example\",\r\n # Since we are calling a fully built library directly no custom source\r\n # is necessary. We need to include the .h files, though, because behind\r\n # the scenes cffi generates a .c file which contains a Python-friendly\r\n # wrapper around each of the functions.\r\n '#include \"cmult.h\"',\r\n # The important thing is to include the pre-built lib in the list of\r\n # libraries we are linking against:\r\n libraries=[\"cmult\"],\r\n library_dirs=[this_dir.as_posix()],\r\n extra_link_args=[\"-Wl,-rpath,.\"],\r\n )\r\n\r\n ffi.compile()\r\n print(\"* Complete\")", "def gguf_init_from_file(fname: ffi.CData, params: ffi.CData) -> ffi.CData:\n ...", "def gen_cheader(protocol):\n\ts = \"\"\"/* Junior Design Sp2018 Final Project\n * Robot Firmware - RPi <-> Microcontroller Communication\n * Nick Ames 2018\n * WARNING: This file is automatically generated by gen-files.py\n * Any changes you make will be erased.\n */\n#include <stdfix.h>\n#include <stdint.h>\n#include \"config.h\"\n\n\"\"\"\n\ts += \"struct comm_data_t {\\n\"\n\tfor r in protocol:\n\t\ts += \"\\t\" + r.size + \" \" + r.name + \"; /* \" + r.desc + \" */\\n\"\n\ts += \"};\\n\\n\"\n\tfor r in protocol:\n\t\ts += \"%s get_%s(void); /* %s */\\n\"%(r.size, r.name, r.desc)\n\t\ts += \"void set_%s(%s); /* %s */\\n\\n\"%(r.name, r.size, r.desc)\n\ts += \"\"\"extern volatile struct comm_data_t Data;\"\"\"\n\treturn s", "def gen_csource(protocol):\n\tdef format_default(reg):\n\t\t\"\"\"Given a reg, return its default value formatted as a string for inclusion in\n\t\t a C source file.\"\"\"\n\t\tif reg.size == \"accum\":\n\t\t\treturn str(float(reg.default)) + \"k\"\n\t\telse:\n\t\t\treturn str(int(reg.default)) + \"L\"\n\n\ts = \"\"\"/* Junior Design Sp2018 Final Project\n * Robot Firmware - RPi <-> Microcontroller Communication\n * Nick Ames 2018\n * WARNING: This file is automatically generated by gen-files.py\n * Any changes you make will be erased.\n */\n#include <avr/interrupt.h>\n#include <util/atomic.h>\n#include \"protocol.h\"\n#include \"spi.h\"\n\n\"\"\"\n\ts += \"volatile struct comm_data_t Data = {\\n\"\n\tfor r in protocol:\n\t\ts += \"\\t.\" + r.name + \" = \" + format_default(r) + \", /* \" + r.desc + \" */\\n\"\n\ts += \"};\\n\\n\"\n\ts += \"\\n\"\n\t\n\tfor r in protocol:\n\t\ts += \"%s get_%s(void){ /* %s */\\n\"%(r.size, r.name, r.desc)\n\t\ts += \"\"\"\\t%s v;\n\tATOMIC_BLOCK(ATOMIC_RESTORESTATE){\n\t\tv = Data.%s;\n\t}\n\treturn v;\n}\n\"\"\"%(r.size, r.name)\n\t\ts += \"void set_%s(%s v){ /* %s */\\n\"%(r.name, r.size, r.desc)\n\t\ts += \"\"\"\\tATOMIC_BLOCK(ATOMIC_RESTORESTATE){\n\t\tData.%s = v;\n\t}\n}\n\n\"\"\"%(r.name)\n\ts += \"\"\"ISR(SPI0_STC_vect){\n\tuint8_t reg_num = SPDR0;\n\tswitch(reg_num){\n\"\"\"\n\t\n\tfor r in protocol:\n\t\tif r.write:\n\t\t\ts += \"\\t\\tcase % 2d: /* Write %s (%s) */\\n\"%(r.number, r.name, r.desc)\n\t\t\ts += \"\\t\\t\\tspi_rx((uint8_t *) &Data.%s, sizeof(Data.%s));\\n\"%(r.name, r.name)\n\t\t\ts += \"\\t\\t\\tbreak;\\n\"\n\t\tif r.read:\n\t\t\ts += \"\\t\\tcase 0x80 + % 2d: /* Read %s (%s) */\\n\"%(r.number, r.name, r.desc)\n\t\t\ts += \"\\t\\t\\tspi_tx((uint8_t *) &Data.%s, sizeof(Data.%s));\\n\"%(r.name, r.name)\n\t\t\ts += \"\\t\\t\\tbreak;\\n\"\n\ts += \"\"\"\t}\n\n\t/* Clear SPIF flag */\n\treg_num = SPSR0;\n\treg_num = SPDR0;\n}\n\"\"\"\t\n\treturn s", "def make_get_python_out_struct(self):\n res = \\\n\"\"\"DLLEXPORT ___madz_LANG_python_TYPE_* {}_get_out_struct(){{\n return &___madz_LANG_python_OUTPUT;\n}}\n\n\"\"\"\n return res.format(self.python_mangle)", "def build_cffi(c):\n print(\"Building CFFI Module\")\n invoke.run(\"gcc -c -Wall -Werror -fpic cmult.c -I /usr/include/python3.7\")\n invoke.run(\"gcc -shared -o libcmult.so cmult.o\")\n\n ffi = cffi.FFI()\n\n this_dir = pathlib.Path().resolve()\n h_file_name = this_dir / \"cmult.h\"\n with open(h_file_name) as h_file:\n ffi.cdef(h_file.read())\n\n ffi.set_source(\n \"cffi_example\",\n # Since we are calling a fully built library directly no custom source\n # is necessary. We need to include the .h files, though, because behind\n # the scenes cffi generates a .c file which contains a Python-friendly\n # wrapper around each of the functions.\n '#include \"cmult.h\"',\n # The important thing is to include the pre-built lib in the list of\n # libraries we are linking against:\n libraries=[\"cmult\"],\n library_dirs=[this_dir.as_posix()],\n extra_link_args=[\"-Wl,-rpath,.\"],\n )\n\n ffi.compile()\n print(\"* Complete\")", "def arg_to_CFI(self, node, ordered_functions):\n options = node.options\n fmt_func = node.fmtdict\n\n if options.wrap_fortran is False:\n # The buffer function is intended to be called by Fortran.\n # No Fortran, no need for buffer function.\n return\n\n ast = node.ast\n declarator = ast.declarator\n result_typemap = ast.typemap\n # shadow classes have not been added yet.\n # Only care about string, vector here.\n result_is_ptr = declarator.is_indirect()\n if (\n result_typemap\n and result_typemap.base in [\"string\", \"vector\"]\n and result_typemap.name != \"char\"\n and not result_is_ptr\n ):\n node.wrap.c = False\n # node.wrap.fortran = False\n self.config.log.write(\n \"Skipping {}, unable to create C wrapper \"\n \"for function returning {} instance\"\n \" (must return a pointer or reference).\"\n \" Bufferify version will still be created.\\n\".format(\n result_typemap.cxx_type, declarator.user_name\n )\n )\n \n cfi_args = {}\n for arg in ast.declarator.params:\n declarator = arg.declarator\n name = declarator.user_name\n attrs = declarator.attrs\n meta = declarator.metaattrs\n cfi_args[name] = False\n arg_typemap = arg.typemap\n if meta[\"api\"]:\n # API explicitly set by user.\n continue\n elif meta[\"assumed-rank\"]:\n cfi_args[name] = True\n elif attrs[\"rank\"]:\n cfi_args[name] = True\n elif arg_typemap.sgroup == \"string\":\n cfi_args[name] = True\n elif arg_typemap.sgroup == \"char\":\n if declarator.is_indirect():\n cfi_args[name] = True\n elif meta[\"deref\"] in [\"allocatable\", \"pointer\"]:\n cfi_args[name] = True\n has_cfi_arg = any(cfi_args.values())\n\n # Function result.\n need_buf_result = None\n\n result_as_arg = \"\" # Only applies to string functions\n # when the result is added as an argument to the Fortran api.\n\n # Check if result needs to be an argument.\n declarator = ast.declarator\n attrs = declarator.attrs\n meta = declarator.metaattrs\n if meta[\"deref\"] == \"raw\":\n # No bufferify required for raw pointer result.\n pass\n elif result_typemap.sgroup == \"string\":\n need_buf_result = \"cfi\"\n result_as_arg = fmt_func.F_string_result_as_arg\n result_name = result_as_arg or fmt_func.C_string_result_as_arg\n elif result_typemap.sgroup == \"char\" and result_is_ptr:\n need_buf_result = \"cfi\"\n result_as_arg = fmt_func.F_string_result_as_arg\n result_name = result_as_arg or fmt_func.C_string_result_as_arg\n elif meta[\"deref\"] in [\"allocatable\", \"pointer\"]:\n need_buf_result = \"cfi\"\n\n if not (need_buf_result or\n has_cfi_arg):\n return False\n\n options.wrap_fortran = False\n\n # Create a new C function and change arguments\n # and add attributes.\n C_new = node.clone()\n ordered_functions.append(C_new)\n self.append_function_index(C_new)\n\n generated_suffix = \"cfi\"\n C_new._generated = \"arg_to_cfi\"\n C_new.splicer_group = \"cfi\"\n if need_buf_result:\n C_new.ast.declarator.metaattrs[\"api\"] = need_buf_result\n fmt_func = C_new.fmtdict\n fmt_func.function_suffix = fmt_func.function_suffix + fmt_func.C_cfi_suffix\n\n C_new.wrap.assign(c=True)#, fortran=True)\n C_new._PTR_C_CXX_index = node._function_index\n\n for arg in C_new.ast.declarator.params:\n name = arg.declarator.user_name\n if cfi_args[name]:\n arg.declarator.metaattrs[\"api\"] = generated_suffix\n\n ast = C_new.ast\n if True: # preserve to avoid changing indention for now.\n f_attrs = node.ast.declarator.attrs # Fortran function attributes\n f_meta = node.ast.declarator.metaattrs # Fortran function attributes\n if result_as_arg:\n # decl: const char * getCharPtr2() +len(30)\n # +len implies copying into users buffer.\n result_as_string = ast.result_as_arg(result_name)\n result_as_string.const = False # must be writeable\n attrs = result_as_string.declarator.attrs\n # Special case for wrapf.py to override \"allocatable\"\n f_meta[\"deref\"] = None\n result_as_string.declarator.metaattrs[\"api\"] = \"cfi\"\n result_as_string.declarator.metaattrs[\"deref\"] = \"result\"\n result_as_string.declarator.metaattrs[\"is_result\"] = True\n C_new.ast.declarator.metaattrs[\"api\"] = None\n C_new.ast.declarator.metaattrs[\"intent\"] = \"subroutine\"\n C_new.ast.declarator.metaattrs[\"deref\"] = None\n\n if result_as_arg:\n F_new = self.result_as_arg(node, C_new)\n ordered_functions.append(F_new)\n self.append_function_index(F_new)\n else:\n if node._generated in [\"result_to_arg\", \"fortran_generic\", \"getter/setter\"]:\n node.wrap.c = False\n # Fortran function may call C subroutine if string/vector result\n # Fortran function calls bufferify function.\n node._PTR_F_C_index = C_new._function_index\n return True", "def generate_headers(sdfg) -> str:\n proto = \"\"\n params = (sdfg.name, sdfg.signature(with_types=True, for_call=False))\n proto += 'extern \"C\" int __dace_init_%s(%s);\\n' % params\n proto += 'extern \"C\" int __dace_exit_%s(%s);\\n' % params\n proto += 'extern \"C\" void __program_%s(%s);\\n' % params\n return proto", "def _gen_code(self):\r\n #TODO: maybe generate one C function only to save compile time? Also easier to take that as a basis and hand craft other covariances??\r\n\r\n #generate c functions from sympy objects \r\n argument_sequence = self._sp_x+self._sp_z+self._sp_theta\r\n code_list = [('k',self._sp_k)]\r\n # gradients with respect to covariance input\r\n code_list += [('dk_d%s'%x.name,dx) for x,dx in zip(self._sp_x,self._sp_dk_dx)]\r\n # gradient with respect to parameters\r\n code_list += [('dk_d%s'%theta.name,dtheta) for theta,dtheta in zip(self._sp_theta,self._sp_dk_dtheta)]\r\n # gradient with respect to multiple output parameters\r\n if self.output_dim > 1:\r\n argument_sequence += self._sp_theta_i + self._sp_theta_j\r\n code_list += [('dk_d%s'%theta.name,dtheta) for theta,dtheta in zip(self._sp_theta_i,self._sp_dk_dtheta_i)]\r\n (foo_c,self._function_code), (foo_h,self._function_header) = \\\r\n codegen(code_list, \"C\",'foobar',argument_sequence=argument_sequence)\r\n #put the header file where we can find it\r\n f = file(os.path.join(tempfile.gettempdir(),'foobar.h'),'w')\r\n f.write(self._function_header)\r\n f.close()\r\n\r\n # Substitute any known derivatives which sympy doesn't compute\r\n self._function_code = re.sub('DiracDelta\\(.+?,.+?\\)','0.0',self._function_code)\r\n\r\n\r\n ############################################################\r\n # This is the basic argument construction for the C code. #\r\n ############################################################\r\n \r\n arg_list = ([\"X2(i, %s)\"%x.name[2:] for x in self._sp_x]\r\n + [\"Z2(j, %s)\"%z.name[2:] for z in self._sp_z])\r\n\r\n # for multiple outputs need to also provide these arguments reversed.\r\n if self.output_dim>1:\r\n reverse_arg_list = list(arg_list)\r\n reverse_arg_list.reverse()\r\n\r\n # Add in any 'shared' parameters to the list.\r\n param_arg_list = [shared_params.name for shared_params in self._sp_theta]\r\n arg_list += param_arg_list\r\n\r\n precompute_list=[]\r\n if self.output_dim > 1:\r\n reverse_arg_list+=list(param_arg_list)\r\n split_param_arg_list = [\"%s1(%s)\"%(theta.name[:-2].upper(),index) for index in ['ii', 'jj'] for theta in self._sp_theta_i]\r\n split_param_reverse_arg_list = [\"%s1(%s)\"%(theta.name[:-2].upper(),index) for index in ['jj', 'ii'] for theta in self._sp_theta_i]\r\n arg_list += split_param_arg_list\r\n reverse_arg_list += split_param_reverse_arg_list\r\n # Extract the right output indices from the inputs.\r\n c_define_output_indices = [' '*16 + \"int %s=(int)%s(%s, %i);\"%(index, var, index2, self.input_dim-1) for index, var, index2 in zip(['ii', 'jj'], ['X2', 'Z2'], ['i', 'j'])]\r\n precompute_list += c_define_output_indices\r\n reverse_arg_string = \", \".join(reverse_arg_list)\r\n arg_string = \", \".join(arg_list)\r\n precompute_string = \"\\n\".join(precompute_list)\r\n\r\n # Code to compute argments string needed when only X is provided.\r\n X_arg_string = re.sub('Z','X',arg_string)\r\n # Code to compute argument string when only diagonal is required.\r\n diag_arg_string = re.sub('int jj','//int jj',X_arg_string)\r\n diag_arg_string = re.sub('j','i',diag_arg_string)\r\n if precompute_string == '':\r\n # if it's not multioutput, the precompute strings are set to zero\r\n diag_precompute_string = ''\r\n diag_precompute_replace = ''\r\n else:\r\n # for multioutput we need to extract the index of the output form the input.\r\n diag_precompute_string = precompute_list[0]\r\n diag_precompute_replace = precompute_list[1]\r\n \r\n\r\n # Here's the code to do the looping for K\r\n self._K_code =\\\r\n \"\"\"\r\n // _K_code\r\n // Code for computing the covariance function.\r\n int i;\r\n int j;\r\n int N = target_array->dimensions[0];\r\n int num_inducing = target_array->dimensions[1];\r\n int input_dim = X_array->dimensions[1];\r\n //#pragma omp parallel for private(j)\r\n for (i=0;i<N;i++){\r\n for (j=0;j<num_inducing;j++){\r\n%s\r\n //target[i*num_inducing+j] = \r\n TARGET2(i, j) += k(%s);\r\n }\r\n }\r\n %s\r\n \"\"\"%(precompute_string,arg_string,\"/*\"+str(self._sp_k)+\"*/\") #adding a string representation forces recompile when needed\r\n\r\n self._K_code_X = \"\"\"\r\n // _K_code_X\r\n // Code for computing the covariance function.\r\n int i;\r\n int j;\r\n int N = target_array->dimensions[0];\r\n int num_inducing = target_array->dimensions[1];\r\n int input_dim = X_array->dimensions[1];\r\n //#pragma omp parallel for private(j)\r\n for (i=0;i<N;i++){\r\n %s // int ii=(int)X2(i, 1);\r\n TARGET2(i, i) += k(%s);\r\n for (j=0;j<i;j++){\r\n %s //int jj=(int)X2(j, 1);\r\n double kval = k(%s); //double kval = k(X2(i, 0), shared_lengthscale, LENGTHSCALE1(ii), SCALE1(ii));\r\n TARGET2(i, j) += kval;\r\n TARGET2(j, i) += kval;\r\n }\r\n }\r\n /*%s*/\r\n \"\"\"%(diag_precompute_string, diag_arg_string, re.sub('Z2', 'X2', diag_precompute_replace), X_arg_string,str(self._sp_k)) #adding a string representation forces recompile when needed\r\n\r\n # Code to do the looping for Kdiag\r\n self._Kdiag_code =\\\r\n \"\"\"\r\n // _Kdiag_code\r\n // Code for computing diagonal of covariance function.\r\n int i;\r\n int N = target_array->dimensions[0];\r\n int input_dim = X_array->dimensions[1];\r\n //#pragma omp parallel for\r\n for (i=0;i<N;i++){\r\n %s\r\n //target[i] =\r\n TARGET1(i)=k(%s);\r\n }\r\n %s\r\n \"\"\"%(diag_precompute_string,diag_arg_string,\"/*\"+str(self._sp_k)+\"*/\") #adding a string representation forces recompile when needed\r\n\r\n # Code to compute gradients\r\n grad_func_list = []\r\n if self.output_dim>1:\r\n grad_func_list += c_define_output_indices\r\n grad_func_list += [' '*16 + 'TARGET1(%i+ii) += PARTIAL2(i, j)*dk_d%s(%s);'%(self.num_shared_params+i*self.output_dim, theta.name, arg_string) for i, theta in enumerate(self._sp_theta_i)]\r\n grad_func_list += [' '*16 + 'TARGET1(%i+jj) += PARTIAL2(i, j)*dk_d%s(%s);'%(self.num_shared_params+i*self.output_dim, theta.name, reverse_arg_string) for i, theta in enumerate(self._sp_theta_i)]\r\n grad_func_list += ([' '*16 + 'TARGET1(%i) += PARTIAL2(i, j)*dk_d%s(%s);'%(i,theta.name,arg_string) for i,theta in enumerate(self._sp_theta)])\r\n grad_func_string = '\\n'.join(grad_func_list) \r\n\r\n self._dK_dtheta_code =\\\r\n \"\"\"\r\n // _dK_dtheta_code\r\n // Code for computing gradient of covariance with respect to parameters.\r\n int i;\r\n int j;\r\n int N = partial_array->dimensions[0];\r\n int num_inducing = partial_array->dimensions[1];\r\n int input_dim = X_array->dimensions[1];\r\n //#pragma omp parallel for private(j)\r\n for (i=0;i<N;i++){\r\n for (j=0;j<num_inducing;j++){\r\n%s\r\n }\r\n }\r\n %s\r\n \"\"\"%(grad_func_string,\"/*\"+str(self._sp_k)+\"*/\") # adding a string representation forces recompile when needed\r\n\r\n\r\n # Code to compute gradients for Kdiag TODO: needs clean up\r\n diag_grad_func_string = re.sub('Z','X',grad_func_string,count=0)\r\n diag_grad_func_string = re.sub('int jj','//int jj',diag_grad_func_string)\r\n diag_grad_func_string = re.sub('j','i',diag_grad_func_string)\r\n diag_grad_func_string = re.sub('PARTIAL2\\(i, i\\)','PARTIAL1(i)',diag_grad_func_string)\r\n self._dKdiag_dtheta_code =\\\r\n \"\"\"\r\n // _dKdiag_dtheta_code\r\n // Code for computing gradient of diagonal with respect to parameters.\r\n int i;\r\n int N = partial_array->dimensions[0];\r\n int input_dim = X_array->dimensions[1];\r\n for (i=0;i<N;i++){\r\n %s\r\n }\r\n %s\r\n \"\"\"%(diag_grad_func_string,\"/*\"+str(self._sp_k)+\"*/\") #adding a string representation forces recompile when needed\r\n\r\n # Code for gradients wrt X, TODO: may need to deal with special case where one input is actually an output.\r\n gradX_func_list = []\r\n if self.output_dim>1:\r\n gradX_func_list += c_define_output_indices\r\n gradX_func_list += [\"TARGET2(i, %i) += PARTIAL2(i, j)*dk_dx_%i(%s);\"%(q,q,arg_string) for q in range(self._real_input_dim)]\r\n gradX_func_string = \"\\n\".join(gradX_func_list)\r\n\r\n self._dK_dX_code = \\\r\n \"\"\"\r\n // _dK_dX_code\r\n // Code for computing gradient of covariance with respect to inputs.\r\n int i;\r\n int j;\r\n int N = partial_array->dimensions[0];\r\n int num_inducing = partial_array->dimensions[1];\r\n int input_dim = X_array->dimensions[1];\r\n //#pragma omp parallel for private(j)\r\n for (i=0;i<N; i++){\r\n for (j=0; j<num_inducing; j++){\r\n %s\r\n }\r\n }\r\n %s\r\n \"\"\"%(gradX_func_string,\"/*\"+str(self._sp_k)+\"*/\") #adding a string representation forces recompile when needed\r\n \r\n\r\n diag_gradX_func_string = re.sub('Z','X',gradX_func_string,count=0)\r\n diag_gradX_func_string = re.sub('int jj','//int jj',diag_gradX_func_string)\r\n diag_gradX_func_string = re.sub('j','i',diag_gradX_func_string)\r\n diag_gradX_func_string = re.sub('PARTIAL2\\(i, i\\)','2*PARTIAL1(i)',diag_gradX_func_string)\r\n\r\n # Code for gradients of Kdiag wrt X\r\n self._dKdiag_dX_code= \\\r\n \"\"\"\r\n // _dKdiag_dX_code\r\n // Code for computing gradient of diagonal with respect to inputs.\r\n int N = partial_array->dimensions[0];\r\n int input_dim = X_array->dimensions[1];\r\n for (int i=0;i<N; i++){\r\n %s\r\n }\r\n %s\r\n \"\"\"%(diag_gradX_func_string,\"/*\"+str(self._sp_k)+\"*/\") #adding a\r\n # string representation forces recompile when needed Get rid\r\n # of Zs in argument for diagonal. TODO: Why wasn't\r\n # diag_func_string called here? Need to check that.\r\n #self._dKdiag_dX_code = self._dKdiag_dX_code.replace('Z[j', 'X[i')\r\n\r\n # Code to use when only X is provided. \r\n self._dK_dtheta_code_X = self._dK_dtheta_code.replace('Z[', 'X[')\r\n self._dK_dX_code_X = self._dK_dX_code.replace('Z[', 'X[').replace('+= PARTIAL2(', '+= 2*PARTIAL2(') \r\n self._dK_dtheta_code_X = self._dK_dtheta_code_X.replace('Z2(', 'X2(')\r\n self._dK_dX_code_X = self._dK_dX_code_X.replace('Z2(', 'X2(')\r\n\r\n\r\n #TODO: insert multiple functions here via string manipulation\r\n #TODO: similar functions for psi_stats\r", "def translate_to_c(Newast):\n ast = parse_file('exampleMin.c', use_cpp=True)\n\n ast.show()\n #print(\"newast: \", Newast.ext[0].decl.type.args.params[0].type.type==ast.ext[0].decl.type.args.params[0].type.type)\n #print(\"newast2: \", Newast.ext[0].decl.type.args.params[0].type.type.coord)\n #print(\"ast2: \", ast.ext[0].decl.type.args.params[0].type.type.coord)\n\n #Newast.show()\n \n # print(ast.ext[0].decl.bitsize)\n # print(Newast.ext[0].decl.bitsize)\n # print(\"----------------------------------\")\n # print(ast.ext[0].decl.type.args.coord)\n # print(Newast.ext[0].decl.type.args.coord)\n # print(\"----------------------------------\")\n # print(ast.ext[0].decl.type.args.params)\n # print(Newast.ext[0].decl.type.args.params)\n # print(\"----------------------------------\")\n # print(ast.ext[0].decl.type.args.params[0])\n # print(Newast.ext[0].decl.type.args.params[0])\n # print(\"----------------------------------\")\n # print(ast.ext[0].decl.type.args.params[0].type)\n # print(Newast.ext[0].decl.type.args.params[0].type)\n # print(\"----------------------------------\")\n # print(ast.ext[0].decl.type.args.params[0].type.type)\n # print(Newast.ext[0].decl.type.args.params[0].type.type)\n # print(\"----------------------------------\")\n # print(ast.ext[0].decl.type.args.params[0].type.type.names)\n # print(Newast.ext[0].decl.type.args.params[0].type.type.names)\n # print(\"----------------------------------\")\n\n generator = c_generator.CGenerator()\n #ast.show()\n\n # tracing the generator for debugging\n # import trace\n # tr = trace.Trace(countcallers=1)\n # tr.runfunc(generator.visit, Newast)\n # tr.results().write_results()\n\n print(generator.visit(Newast))", "def compile_cutils():\r\n\r\n types = ['npy_' + t for t in ['int8', 'int16', 'int32', 'int64', 'int128',\r\n 'int256', 'uint8', 'uint16', 'uint32', 'uint64', 'uint128', 'uint256',\r\n 'float16', 'float32', 'float64', 'float80', 'float96', 'float128',\r\n 'float256']]\r\n\r\n complex_types = ['npy_' + t for t in ['complex32', 'complex64',\r\n 'complex128', 'complex160', 'complex192', 'complex512']]\r\n\r\n inplace_map_template = \"\"\"\r\n #if defined(%(typen)s)\r\n static void %(type)s_inplace_add(PyArrayMapIterObject *mit, PyArrayIterObject *it)\r\n {\r\n int index = mit->size;\r\n while (index--) {\r\n %(op)s\r\n\r\n PyArray_MapIterNext(mit);\r\n PyArray_ITER_NEXT(it);\r\n }\r\n }\r\n #endif\r\n \"\"\"\r\n\r\n floatadd = \"((%(type)s*)mit->dataptr)[0] = ((%(type)s*)mit->dataptr)[0] + ((%(type)s*)it->dataptr)[0];\"\r\n complexadd = \"\"\"\r\n ((%(type)s*)mit->dataptr)[0].real = ((%(type)s*)mit->dataptr)[0].real + ((%(type)s*)it->dataptr)[0].real;\r\n ((%(type)s*)mit->dataptr)[0].imag = ((%(type)s*)mit->dataptr)[0].imag + ((%(type)s*)it->dataptr)[0].imag;\r\n \"\"\"\r\n\r\n fns = ''.join([inplace_map_template % {'type': t, 'typen': t.upper(),\r\n 'op': floatadd % {'type': t}}\r\n for t in types] +\r\n [inplace_map_template % {'type': t, 'typen': t.upper(),\r\n 'op': complexadd % {'type': t}}\r\n for t in complex_types])\r\n\r\n fn_array = (\"static inplace_map_binop addition_funcs[] = {\" +\r\n ''.join([\"\"\"\r\n #if defined(%(typen)s)\r\n %(type)s_inplace_add,\r\n #endif\r\n \"\"\" % {'type': t, 'typen': t.upper()}\r\n for t in types + complex_types]) +\r\n \"\"\"NULL};\r\n \"\"\")\r\n\r\n type_number_array = (\"static int type_numbers[] = {\" +\r\n ''.join([\"\"\"\r\n #if defined(%(typen)s)\r\n %(typen)s,\r\n #endif\r\n \"\"\" % {'type': t, 'typen': t.upper()}\r\n for t in types + complex_types]) +\r\n \"-1000};\")\r\n\r\n code = (\"\"\"\r\n #include <Python.h>\r\n #include \"numpy/arrayobject.h\"\r\n\r\n extern \"C\"{\r\n static PyObject *\r\n run_cthunk(PyObject *self, PyObject *args)\r\n {\r\n PyObject *py_cthunk = NULL;\r\n if(!PyArg_ParseTuple(args,\"O\",&py_cthunk))\r\n return NULL;\r\n\r\n if (!PyCObject_Check(py_cthunk)) {\r\n PyErr_SetString(PyExc_ValueError,\r\n \"Argument to run_cthunk must be a PyCObject.\");\r\n return NULL;\r\n }\r\n void * ptr_addr = PyCObject_AsVoidPtr(py_cthunk);\r\n int (*fn)(void*) = (int (*)(void*))(ptr_addr);\r\n void* it = PyCObject_GetDesc(py_cthunk);\r\n int failure = fn(it);\r\n\r\n return Py_BuildValue(\"i\", failure);\r\n }\r\n\r\n #if NPY_API_VERSION >= 0x00000008\r\n typedef void (*inplace_map_binop)(PyArrayMapIterObject *, PyArrayIterObject *);\r\n \"\"\" + fns + fn_array + type_number_array +\r\n\r\n\"\"\"\r\nstatic int\r\nmap_increment(PyArrayMapIterObject *mit, PyObject *op, inplace_map_binop add_inplace)\r\n{\r\n PyArrayObject *arr = NULL;\r\n PyArrayIterObject *it;\r\n PyArray_Descr *descr;\r\n if (mit->ait == NULL) {\r\n return -1;\r\n }\r\n descr = PyArray_DESCR(mit->ait->ao);\r\n Py_INCREF(descr);\r\n arr = (PyArrayObject *)PyArray_FromAny(op, descr,\r\n 0, 0, NPY_ARRAY_FORCECAST, NULL);\r\n if (arr == NULL) {\r\n return -1;\r\n }\r\n if ((mit->subspace != NULL) && (mit->consec)) {\r\n PyArray_MapIterSwapAxes(mit, (PyArrayObject **)&arr, 0);\r\n if (arr == NULL) {\r\n return -1;\r\n }\r\n }\r\n it = (PyArrayIterObject*)\r\n PyArray_BroadcastToShape((PyObject*)arr, mit->dimensions, mit->nd);\r\n if (it == NULL) {\r\n Py_DECREF(arr);\r\n return -1;\r\n }\r\n\r\n (*add_inplace)(mit, it);\r\n\r\n Py_DECREF(arr);\r\n Py_DECREF(it);\r\n return 0;\r\n}\r\n\r\n\r\nstatic PyObject *\r\ninplace_increment(PyObject *dummy, PyObject *args)\r\n{\r\n PyObject *arg_a = NULL, *index=NULL, *inc=NULL;\r\n PyArrayObject *a;\r\n inplace_map_binop add_inplace = NULL;\r\n int type_number = -1;\r\n int i =0;\r\n PyArrayMapIterObject * mit;\r\n\r\n if (!PyArg_ParseTuple(args, \"OOO\", &arg_a, &index,\r\n &inc)) {\r\n return NULL;\r\n }\r\n if (!PyArray_Check(arg_a)) {\r\n PyErr_SetString(PyExc_ValueError, \"needs an ndarray as first argument\");\r\n return NULL;\r\n }\r\n\r\n a = (PyArrayObject *) arg_a;\r\n\r\n if (PyArray_FailUnlessWriteable(a, \"input/output array\") < 0) {\r\n return NULL;\r\n }\r\n\r\n if (PyArray_NDIM(a) == 0) {\r\n PyErr_SetString(PyExc_IndexError, \"0-d arrays can't be indexed.\");\r\n return NULL;\r\n }\r\n type_number = PyArray_TYPE(a);\r\n\r\n\r\n\r\n while (type_numbers[i] >= 0 && addition_funcs[i] != NULL){\r\n if (type_number == type_numbers[i]) {\r\n add_inplace = addition_funcs[i];\r\n break;\r\n }\r\n i++ ;\r\n }\r\n\r\n if (add_inplace == NULL) {\r\n PyErr_SetString(PyExc_TypeError, \"unsupported type for a\");\r\n return NULL;\r\n }\r\n mit = (PyArrayMapIterObject *) PyArray_MapIterArray(a, index);\r\n if (mit == NULL) {\r\n goto fail;\r\n }\r\n if (map_increment(mit, inc, add_inplace) != 0) {\r\n goto fail;\r\n }\r\n\r\n Py_DECREF(mit);\r\n\r\n Py_INCREF(Py_None);\r\n return Py_None;\r\n\r\nfail:\r\n Py_XDECREF(mit);\r\n\r\n return NULL;\r\n}\r\n #endif\r\n\r\n\r\n static PyMethodDef CutilsExtMethods[] = {\r\n {\"run_cthunk\", run_cthunk, METH_VARARGS|METH_KEYWORDS,\r\n \"Run a theano cthunk.\"},\r\n #if NPY_API_VERSION >= 0x00000008\r\n {\"inplace_increment\", inplace_increment,\r\n METH_VARARGS,\r\n \"increments a numpy array inplace at the passed indexes.\"},\r\n #endif\r\n {NULL, NULL, 0, NULL} /* Sentinel */\r\n };\"\"\")\r\n\r\n if PY3:\r\n # This is not the most efficient code, but it is written this way to\r\n # highlight the changes needed to make 2.x code compile under python 3.\r\n code = code.replace(\"<Python.h>\", '\"numpy/npy_3kcompat.h\"', 1)\r\n code = code.replace(\"PyCObject\", \"NpyCapsule\")\r\n code += \"\"\"\r\n static struct PyModuleDef moduledef = {\r\n PyModuleDef_HEAD_INIT,\r\n \"cutils_ext\",\r\n NULL,\r\n -1,\r\n CutilsExtMethods,\r\n };\r\n\r\n PyMODINIT_FUNC\r\n PyInit_cutils_ext(void) {\r\n import_array();\r\n return PyModule_Create(&moduledef);\r\n }\r\n }\r\n \"\"\"\r\n else:\r\n code += \"\"\"\r\n PyMODINIT_FUNC\r\n initcutils_ext(void)\r\n {\r\n import_array();\r\n (void) Py_InitModule(\"cutils_ext\", CutilsExtMethods);\r\n }\r\n } //extern C\r\n \"\"\"\r\n\r\n loc = os.path.join(config.compiledir, 'cutils_ext')\r\n if not os.path.exists(loc):\r\n os.mkdir(loc)\r\n\r\n args = cmodule.GCC_compiler.compile_args()\r\n cmodule.GCC_compiler.compile_str('cutils_ext', code, location=loc,\r\n preargs=args)", "def dump_func_table_c(c_file, mod):\n MOD = mod.upper()\n print(\" --> [%s]\" % c_file)\n with open(c_file, \"w\") as Out:\n dump_copyright(Out)\n print(\"#ifndef NETMOD_INLINE\", file=Out)\n print(\"#define NETMOD_DISABLE_INLINES\", file=Out)\n print(\"#include <mpidimpl.h>\", file=Out)\n print(\"MPL_SUPPRESS_OSX_HAS_NO_SYMBOLS_WARNING;\", file=Out)\n print(\"\", file=Out)\n print(\"#include \\\"netmod_inline.h\\\"\", file=Out)\n print(\"MPIDI_NM_funcs_t MPIDI_NM_%s_funcs = {\" % mod, file=Out)\n for a in G.apis:\n if 'nm_params' not in a:\n continue\n if not a['native']:\n if a['nm_inline']:\n print(\" .%s = MPIDI_NM_%s,\" % (a['name'], a['name']), file=Out)\n else:\n print(\" .%s = MPIDI_%s_%s,\" % (a['name'], MOD, a['name']), file=Out)\n print(\"};\", file=Out)\n print(\"\", file=Out)\n print(\"MPIDI_NM_native_funcs_t MPIDI_NM_native_%s_funcs = {\" % mod, file=Out)\n for a in G.apis:\n if 'nm_params' not in a:\n continue\n if a['native']:\n if a['nm_inline']:\n print(\" .%s = MPIDI_NM_%s,\" % (a['name'], a['name']), file=Out)\n else:\n print(\" .%s = MPIDI_%s_%s,\" % (a['name'], MOD, a['name']), file=Out)\n print(\"};\", file=Out)\n print(\"#endif\", file=Out)", "def generate_dummy(sdfg) -> str:\n includes = \"#include <stdlib.h>\\n\"\n includes += \"#include \\\"\" + sdfg.name + \".h\\\"\\n\\n\"\n header = \"int main(int argc, char** argv) {\\n\"\n allocations = \"\"\n deallocations = \"\"\n sdfg_call = \"\"\n footer = \" return 0;\\n}\\n\"\n\n al = sdfg.arglist()\n\n # first find all scalars and set them to 42\n for argname, arg in al.items():\n if isinstance(arg, data.Scalar):\n allocations += \" \" + str(arg.as_arg(name=argname,\n with_types=True)) + \" = 42;\\n\"\n\n # allocate the array args using calloc\n for argname, arg in al.items():\n if isinstance(arg, data.Array):\n dims_mul = cpp.sym2cpp(\n functools.reduce(lambda a, b: a * b, arg.shape, 1))\n basetype = str(arg.dtype)\n allocations += \" \" + str(arg.as_arg(name=argname, with_types=True)) + \\\n \" = (\" + basetype + \"*) calloc(\" + dims_mul + \", sizeof(\"+ basetype +\")\" + \");\\n\"\n deallocations += \" free(\" + argname + \");\\n\"\n\n sdfg_call = '''\n __dace_init_{name}({params});\n __program_{name}({params});\n __dace_exit_{name}({params});\\n\\n'''.format(name=sdfg.name,\n params=sdfg.signature(\n with_types=False,\n for_call=True))\n\n res = includes\n res += header\n res += allocations\n res += sdfg_call\n res += deallocations\n res += footer\n return res", "def fortran_interface(self) -> str:\n result = ''\n if self.fc_override == '':\n return result\n\n func_name = '{}_{}_{}_'.format(\n self.c_prefix, self.class_name, self.name)\n\n # declaration\n in_parameters = self._fi_in_parameters()\n return_type, out_parameters = self._fi_out_parameters()\n if self.may_throw:\n out_parameters.append(('integer (c_int)', 'err_code'))\n out_parameters.append(('type (c_ptr)', 'err_msg'))\n out_parameters.append(('integer (c_size_t)', 'err_msg_len'))\n\n arg_list = [par_name for _, par_name in in_parameters + out_parameters]\n if len(arg_list) > 1:\n arg_vlist = ' &\\n' + indent(', &\\n'.join(arg_list), 8*' ')\n else:\n arg_vlist = ', '.join(arg_list)\n\n if return_type != '':\n result += '{} function {}({}) &\\n'.format(\n return_type, func_name, arg_vlist)\n else:\n result += 'subroutine {}({}) &\\n'.format(func_name, arg_vlist)\n result += ' bind(C, name=\"{}\")\\n'.format(func_name)\n result += '\\n'\n result += ' use iso_c_binding\\n'\n\n # parameter declarations\n for par_type, par_name in in_parameters:\n result += ' {}, intent(in) :: {}\\n'.format(\n par_type, par_name)\n for par_type, par_name in out_parameters:\n result += ' {}, intent(out) :: {}\\n'.format(par_type, par_name)\n\n # end\n if return_type != '':\n result += 'end function {}\\n\\n'.format(func_name)\n else:\n result += 'end subroutine {}\\n\\n'.format(func_name)\n return indent(result, 8*' ')", "def generate_c_file(c_file,\n caller, header,\n main_generator):\n c_file.write('/* Generated by {} */'\n .format(caller))\n c_file.write('''\n#include <stdio.h>\n''')\n c_file.write(header)\n c_file.write('''\nint main(void)\n{\n''')\n main_generator(c_file)\n c_file.write(''' return 0;\n}\n''')", "def codegen():" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Set up the Opple light platform.
def setup_platform( hass: HomeAssistant, config: ConfigType, add_entities: AddEntitiesCallback, discovery_info: DiscoveryInfoType | None = None, ) -> None: name = config[CONF_NAME] host = config[CONF_HOST] entity = OppleLight(name, host) add_entities([entity]) _LOGGER.debug("Init light %s %s", host, entity.unique_id)
[ "def platform_start(self):\n self.platform.start()", "def setup_platform(\n hass: HomeAssistant,\n config: ConfigType,\n add_entities: AddEntitiesCallback,\n discovery_info: DiscoveryInfoType | None = None,\n) -> None:\n lights = []\n for channel, device_config in config[CONF_DEVICES].items():\n device = {}\n device[\"name\"] = device_config[CONF_NAME]\n device[\"dimmable\"] = device_config[\"dimmable\"]\n device[\"channel\"] = channel\n device[\"driver\"] = config[CONF_DRIVER]\n device[\"host\"] = config[CONF_HOST]\n device[\"port\"] = config[CONF_PORT]\n lights.append(FutureNowLight(device))\n\n add_entities(lights, True)", "def setUp(self):\n self.platform = wirelesstagpy.WirelessTags(username=USERNAME, password=PASSWORD)", "def setupScene():\n global ogre_scene_manager\n global ogre_render_window\n global ogre_root_node\n\n ogre_render_window = ogre_root.getAutoCreatedWindow()\n ogre_scene_manager = ogre_root.createSceneManager(ogre.ST_GENERIC,\n \"Default SceneManager\")\n camera = ogre_scene_manager.createCamera(\"Camera\")\n ogre_root.getAutoCreatedWindow().addViewport(camera)\n\n camera.setPosition(ogre.Vector3(0, 0, 120))\n camera.lookAt(ogre.Vector3(0, 0, 0))\n\n ogre_scene_manager.setAmbientLight(ogre.ColourValue(0.7, 0.7, 0.7))\n ogre_scene_manager.setFog(ogre.FOG_EXP, ogre.ColourValue(1, 1, 1), 0.0002)\n light = ogre_scene_manager.createLight('lightMain')\n light.setPosition(ogre.Vector3(10, 10, 10))\n\n ogre_root_node = ogre_scene_manager.getRootSceneNode()", "def setup(hass, config):\n import insteon\n\n username = config[DOMAIN][CONF_USERNAME]\n password = config[DOMAIN][CONF_PASSWORD]\n api_key = config[DOMAIN][CONF_API_KEY]\n\n global INSTEON\n INSTEON = insteon.Insteon(username, password, api_key)\n\n if INSTEON is None:\n _LOGGER.error(\"Could not connect to Insteon service\")\n return False\n\n discovery.load_platform(hass, 'light', DOMAIN, {}, config)\n\n return True", "def setup_platform(hass, config, add_entities, discovery_info=None):\n data: KohlerData = hass.data[DATA_KOHLER]\n\n # Add devices\n lights: list[KohlerLight] = []\n for light in data.lights:\n if light.installed:\n lights.append(KohlerLight(data, light))\n\n add_entities(lights)", "def initialize(self):\n self.ha_url = self.args.get(\"ha_url\", None)\n self.use_current_brightness = self.args.get(\"use_current_brightness\", False)\n self.condition = self.args.get(\"condition\")\n self.lights = self.args[\"lights\"]\n self.listen_state(self.change_lights_color, self.args[\"media_player\"], attribute = self.args.get(\"photo_attribute\", \"entity_picture\"))", "def setupLights(self) :\n\t\tself.ambientLight = render.attachNewNode(AmbientLight( \\\n\t\t\t\t\t\"ambientLight\"))\n\t\tself.ambientLight.node().setColor(Vec4(.8,.8,.8,1))\n\t\trender.setLight(self.ambientLight)\n\n\t\tdLight1 = DirectionalLight(\"dLight1\")\n\t\tdLight1.setColor(Vec4(6,5,7,1))\n\t\tdLight1.setDirection(Vec3(1,1,1))\n\t\tdlnp1 = render.attachNewNode(dLight1)\n\t\tdlnp1.setHpr(30,-160,0)\n\t\trender.setLight(dlnp1)\n\n\t\tdLight2 = DirectionalLight(\"dLight2\")\n\t\tdLight2.setColor(Vec4(.6,.7,1,1))\n\t\tdLight2.setDirection(Vec3(-1,-1,-1))\n\t\tself.dlnp2 = render.attachNewNode(dLight2)\n\t\tself.dlnp2.node().setScene(render)\n\t\tself.dlnp2.setHpr(-70,-60,0)\n\t\trender.setLight(self.dlnp2)", "def startup_platform(self):\n from pyon.public import CFG\n self.receive_timeout = CFG.endpoint.receive.timeout\n\n state = self.platform_agent.get_agent_state()\n\n if state != PlatformAgentState.COMMAND:\n # Initialize platform\n if state == PlatformAgentState.UNINITIALIZED:\n attempt = 0\n while (attempt < self.max_attempts and state != PlatformAgentState.INACTIVE):\n attempt += 1\n self.platform_inactive_state()\n state = self.platform_agent.get_agent_state()\n\n # Go active\n if state == PlatformAgentState.INACTIVE:\n attempt = 0\n while (attempt < self.max_attempts and state != PlatformAgentState.IDLE):\n attempt += 1\n self.platform_idle_state()\n state = self.platform_agent.get_agent_state()\n\n # Run\n if state == PlatformAgentState.IDLE:\n attempt = 0\n while (attempt < self.max_attempts and state != PlatformAgentState.COMMAND):\n attempt += 1\n self.platform_command_state()\n state = self.platform_agent.get_agent_state()\n\n # # Run Mission\n # if state == PlatformAgentState.COMMAND:\n # attempt = 0\n # while (attempt < self.max_attempts and state != PlatformAgentState.MISSION_COMMAND):\n # attempt += 1\n # self.platform_mission_running_state()\n # state = self.platform_agent.get_agent_state()", "def main():\r\n LEDStrip = createNeoPixelObject()\r\n setup(LEDStrip)\r\n clock(LEDStrip)", "def setup(self):\n\n caps = {}\n caps[\"platformName\"] = \"Android\"\n caps[\"automationName\"] = \"uiautomator2\"\n caps[\"deviceName\"] = \"xiaomi-mi_5s-238c8e2f\"\n# \"\"\"hogwarts\"\n\n caps[\"appPackage\"] = \"com.xueqiu.android\"\n caps[\"appActivity\"] = \".view.WelcomeActivityAlias\"\n # caps[\"noReset\"] = True\n\n self.driver = webdriver.Remote(\"http://localhost:4723/wd/hub\", caps)\n self.driver.implicitly_wait(10)", "def initialize(self):\n self.log.info(\"Initialize raspPinball hardware.\")\n\n self.config = self.machine.config['rasppinball']\n self.machine.config_validator.validate_config(\"rasppinball\", self.config)\n print(\"***************************\")\n print(self.config)\n #self.machine_type = (\n # self.machine.config['hardware']['driverboards'].lower())\n\n self._connect_to_hardware()\n\n\n # keypad\n self._kp = Keypad()\n self.old_key = \"\"\n self.key = \"\"\n # leds\n self.init_strips()", "def setupScene():\n global ogre_scene_manager\n global ogre_render_window\n global ogre_root_node\n global ogre_camera\n\n ogre_render_window = ogre_root.getAutoCreatedWindow()\n ogre_scene_manager = ogre_root.createSceneManager(ogre.ST_GENERIC,\n \"Default SceneManager\")\n ogre_camera = ogre_scene_manager.createCamera(\"Camera\")\n ogre_root.getAutoCreatedWindow().addViewport(ogre_camera)\n\n ogre_camera.setPosition(ogre.Vector3(0, 40, 5))\n ogre_camera.lookAt(ogre.Vector3(0, 0, 0))\n ogre_camera.nearClipDistance = 5\n\n ogre_scene_manager.setAmbientLight(ogre.ColourValue(0.05, 0.05, 0.05))\n ogre_scene_manager.setShadowTechnique(ogre.SHADOWTYPE_STENCIL_ADDITIVE)\n ogre_scene_manager.setFog(ogre.FOG_EXP, ogre.ColourValue(1, 1, 1), 0.002)\n\n directional_light = ogre_scene_manager.createLight('Light-Directional')\n directional_light.setType(ogre.Light.LT_DIRECTIONAL)\n directional_light.setDirection(0.1, -1, 0.5)\n directional_light.setDiffuseColour(0.5, 0.5, 0.5)\n directional_light.setSpecularColour(0.02, 0, 0)\n\n ogre_root_node = ogre_scene_manager.getRootSceneNode()", "def setUp(self):\n self.platform = wirelesstagpy.WirelessTags(username=USERNAME, password=PASSWORD)\n self.tag_outdoor = wirelesstagpy.SensorTag(MOCK.OUTDOOR_PROBE, self.platform)\n self.platform._tags[\"fake-1\"] = self.tag_outdoor # pylint: disable=protected-access", "def setup_platform(\n hass: HomeAssistant,\n config: ConfigType,\n add_entities: AddEntitiesCallback,\n discovery_info: DiscoveryInfoType | None = None,\n) -> None:\n if discovery_info is None:\n return\n add_entities([EufyHomeSwitch(discovery_info)], True)", "def test_setup_adds_proper_devices(self, mock_light):\n good_config = {\n \"mochad\": {},\n \"light\": {\n \"platform\": \"mochad\",\n \"devices\": [{\"name\": \"Light1\", \"address\": \"a1\"}],\n },\n }\n assert setup_component(self.hass, light.DOMAIN, good_config)", "def main():\n # Parse arguments for configuration and light type\n parser = argparse.ArgumentParser()\n parser.add_argument(\"light_type\", help=\"lifx or hue\", choices=['lifx', 'hue'], type = str.lower)\n parser.add_argument(\"-c\", \"--config_mode\", action='store_true', help=\"runs the client in config mode which prints out the light data\")\n \n args = parser.parse_args()\n \n config_mode = args.config_mode\n light_type = args.light_type\n \n # Get light information \n # *Note*\n # Only LIFX is supported at this point in time\n light_service = None\n if light_type == 'lifx':\n light_service = lightservice.LIFXLightService(\"https://api.lifx.com/v1/\")\n \n data = light_service.refresh_light_data(config_mode)\n \n button_handler = None\n if config_mode:\n button_handler = buttonhandler.ConfigButtonHandler()\n button_handler.start()\n else:\n button_handler = buttonhandler.ButtonHandler(data)\n button_handler.start(light_service)", "def setup_platform(hass, config, add_devices, discovery_info=None):\n add_devices([\n DemoThermostat(\"Nest\", 21, TEMP_CELSIUS, False, 19, False),\n DemoThermostat(\"Thermostat\", 68, TEMP_FAHRENHEIT, True, 77, True),\n ])", "def setup_platform(hass, config, add_devices, discovery_info=None):\n from pybotvac import Account\n\n try:\n auth = Account(config[CONF_USERNAME], config[CONF_PASSWORD])\n except HTTPError:\n _LOGGER.error(\"Unable to connect to Neato API\")\n return False\n\n dev = []\n for robot in auth.robots:\n for type_name in SWITCH_TYPES:\n dev.append(NeatoConnectedSwitch(robot, type_name))\n add_devices(dev)" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }