query
stringlengths
9
9.05k
document
stringlengths
10
222k
negatives
sequencelengths
19
20
metadata
dict
Calculate the bouding rectangle. It is a straight rectangle, it doesn't consider the rotation of the object. So area of the bounding rectangle won't be minimum. It is found by the function cv2.boundingRect().
def __CalculateBoundingBox(self, contour): return cv2.boundingRect(contour)
[ "def boundingRect(self):\r\n bias_x_1 = 50\r\n bias_y_1 = 20\r\n i = self.x - 14\r\n j = self.y - 9\r\n\r\n parity_ind = self.y % 2\r\n side_size = 10\r\n\r\n return QRectF(bias_x_1 + int((i*1.732 + parity_ind*0.866 + 0) * side_size), bias_y_1 + int((j * 1.5 + 0) * side_size),\r\n side_size*0.866*2, side_size*2)", "def boundingRect(self):\n o = self.handleSize + self.handleSpace\n return self.rect().adjusted(-o, -o, o, o)", "def boundingBox(self):\n minx, miny, maxx, maxy = self.substrates.bounds\n return pcbnew.BOX2I(\n pcbnew.VECTOR2I(int(minx), int(miny)),\n pcbnew.VECTOR2I(int(maxx - minx), int(maxy - miny)))", "def get_bounding_box(current_building_contour):\n x, y, w, h, = cv.boundingRect(current_building_contour[0])\n return x, y, w, h", "def fitRectangle(self):\n \n #TODO MAKE SOMETHING MORE GENERIC!!\n \n fA, (fXg, fYg) = self.getArea_and_CenterOfMass()\n \n x1,y1, x2,y2 = self.getBoundingBox()\n #build a rectangle with same \"width\" as the polygon... is-it good enough??\n w = x2 - x1\n \n #but this width should not lead to go out of the bounding box!\n fW = min(w, (x2-fXg)*2, (fXg-x1)*2)\n \n #same area\n fH = fA / fW\n \n x1,y1, x2,y2 = [ int(round(v)) for v in [ fXg - fW/2.0, fYg - fH/2\n , fXg + fW/2.0, fYg + fH/2 ]]\n \n return x1,y1, x2,y2", "def calc_bounding_box(img):\n rows = np.any(img, axis=1)\n cols = np.any(img, axis=0)\n rmin, rmax = np.where(rows)[0][[0, -1]]\n cmin, cmax = np.where(cols)[0][[0, -1]]\n return rmin, cmin, rmax, cmax", "def rect_to_bounding_box(rect):\n x = rect.left()\n y = rect.top()\n w = rect.right() - x\n h = rect.bottom() - y\n\n return x, y, w, h", "def boundingRect(self) -> QRectF:\n return self._rect.adjusted(-10, -10, 10, 10)", "def _get_available_bounding_box(self):\n width, height = self.get_material_width(),self.get_material_height()\n top, right, bottom, left = self.get_padding()\n return [left,width-right,bottom,height-top]", "def calculateRect(k: Koordinate):\n x = FENSTER_RAND_ABSTAND + k.x * k.laenge + (k.laenge / 4)\n y = FENSTER_RAND_ABSTAND + k.y * k.laenge + (k.laenge / 4)\n width = k.laenge - (k.laenge / 2)\n height = k.laenge - (k.laenge / 2)\n return Rect(x, y, width, height)", "def boundingRectPoints(cnt):\n\tx, y, w, h = cv2.boundingRect(cnt)\n\tfirst = (x, y)\n\tend = (x+w, y+h)\n\treturn {\"top-left\": first, \"bottom-right\":end}", "def get_boundingbox(face, width, height, scale=1.3, minsize=None):\n x1 = face.left()\n y1 = face.top()\n x2 = face.right()\n y2 = face.bottom()\n size_bb = int(max(x2 - x1, y2 - y1) * scale)\n if minsize:\n if size_bb < minsize:\n size_bb = minsize\n center_x, center_y = (x1 + x2) // 2, (y1 + y2) // 2\n\n # Check for out of bounds, x-y top left corner\n x1 = max(int(center_x - size_bb // 2), 0)\n y1 = max(int(center_y - size_bb // 2), 0)\n # Check for too big bb size for given x, y\n size_bb = min(width - x1, size_bb)\n size_bb = min(height - y1, size_bb)\n\n return x1, y1, size_bb", "def get_area(self, bounding_rect):\n # every bounding box is composed of: x,y,w,h\n return bounding_rect[2] * bounding_rect[3]", "def boundingBox(self):\n pmodel = (glm.vec3(1, -self.y_sign, 0)\n * self.model.pos * self.transform.scale)\n x, y, _ = self.transform.pos + pmodel\n y += -self.y_sign * self.font.table['ascent'] * self.transform.scale[1]\n return x, y, self.pixwidth(), self.pixheight()", "def bounding_box(self):\n # We use the solution described in\n # https://stackoverflow.com/a/14163413\n cos_theta = np.cos(self.angle)\n sin_theta = np.sin(self.angle)\n width_x = 0.5 * self.width * cos_theta\n width_y = 0.5 * self.width * sin_theta\n height_x = 0.5 * self.height * -sin_theta\n height_y = 0.5 * self.height * cos_theta\n dx = np.sqrt(width_x**2 + height_x**2)\n dy = np.sqrt(width_y**2 + height_y**2)\n\n xmin = self.center.x - dx\n xmax = self.center.x + dx\n ymin = self.center.y - dy\n ymax = self.center.y + dy\n\n return RegionBoundingBox.from_float(xmin, xmax, ymin, ymax)", "def bounds(self):\n return self.rectangle.bounds", "def find_bbox(self):\n lower_wal = (17, 250, 241) # (25, 22, 19) (22, 255, 246)\n upper_wal = (27, 256, 251) # (25, 22, 19)\n lower_bp = (99, 211, 241) # (98, 90, 77) (104, 216, 246)\n upper_bp = (109, 221, 251) # (98, 90, 77)\n lower_rc = (99, 211, 208) # (39, 35, 25) (104, 216, 213)\n upper_rc = (109, 221, 218) # (39, 35, 25)\n lower_key = (16, 13, 11) # (21, 18, 16)\n upper_key = (26, 23, 21) # (21, 18, 16)\n\n self.img_wal = cv2.inRange(self.img_bgr, lower_wal, upper_wal)\n\n self.img_bp = cv2.inRange(self.img_bgr, lower_bp, upper_bp)\n\n self.img_rc = cv2.inRange(self.img_bgr, lower_rc, upper_rc)\n\n self.img_key = cv2.inRange(self.img_bgr, lower_key, upper_key)\n\n \n\n \"\"\"\n # 로직 4. 물체의 contour 찾기\n # 지갑, 키 등의 물체들이 차지한 픽셀만 흰색으로 이진화되어 있는 이미지에 대해서,\n # 흰색 영역을 감싸는 contour들을 구하십시오.\n # cv2.findContours를 가지고 \n \"\"\"\n contours_wal, _ = cv2.findContours(self.img_wal, cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE)\n\n contours_bp, _ = cv2.findContours(self.img_bp, cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE)\n\n contours_rc, _ = cv2.findContours(self.img_rc, cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE)\n\n contours_key, _ = cv2.findContours(self.img_key, cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE)\n\n \n\n \"\"\"\n # 로직 5. 물체의 bounding box 좌표 찾기\n \"\"\"\n self.find_cnt(contours_wal)\n \n self.find_cnt(contours_bp)\n \n self.find_cnt(contours_rc)\n \n self.find_cnt(contours_key)", "def calc_bounding_box(self):\n # This may get overwritten in some subclasses\n self.boundingBox = BBox.as_bbox((self.position, self.position))", "def get_bounding_box(self):\n if len(self.polygons) == 0:\n return None\n return numpy.array(((min(pts[:, 0].min() for pts in self.polygons),\n min(pts[:, 1].min() for pts in self.polygons)),\n (max(pts[:, 0].max() for pts in self.polygons),\n max(pts[:, 1].max() for pts in self.polygons))))" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Calculates the centroid of the contour. Moments up to the third order of a polygon or rasterized shape.
def __CalculateCentroid(self, contour): moments = cv2.moments(contour) centroid = (-1, -1) if moments["m00"] != 0: centroid = (int(round(moments["m10"] / moments["m00"])), int(round(moments["m01"] / moments["m00"]))) return centroid
[ "def contour_centroid(contour):\n\n moments = cv2.moments(contour)\n centroid = np.array(\n [moments['m10'] / moments['m00'], moments['m01'] / moments['m00']])\n\n return centroid[0], centroid[1]", "def centroid(self):\n A = 1 / (6*self.area)\n cx,cy = 0,0\n for ind in xrange(-1, len(self.vertices)-1):\n pi = self.vertices[ind]\n pii = self.vertices[ind+1]\n v = pi[0]*pii[1]-pii[0]*pi[1]\n cx += v*(pi[0] + pii[0])\n cy += v*(pi[1] + pii[1])\n return Point(simplify(A*cx), simplify(A*cy))", "def centroid(self) -> Point:\n return self._context.polygon_centroid(self)", "def centroid(self):\n return self._topology(capi.geos_centroid(self.ptr))", "def calc_centroid(image):\r\n x_array, y_array = mesh_grid_from_array(image)\r\n centroid = Pixel(units='pixels')\r\n centroid.x = (image * x_array).sum() / image.sum()\r\n centroid.y = (image * y_array).sum() / image.sum()\r\n return centroid", "def centroid(self):\n l1 = self.angleBisector(idx=0)\n l2 = self.angleBisector(idx=1)\n return l1.intersectionWith(l2)", "def centroid(cnt):\n m = cv2.moments(cnt)\n cx = int(m['m10'] / m['m00'])\n cy = int(m['m01'] / m['m00'])\n return cx, cy", "def centroid(X):\n C = X.mean(axis=0)\n return C", "def centroid(im, mask=None, w=None, x=None, y=None):\n from numpy import ones, arange, meshgrid\n # 2009-09-02 13:35 IJC: Created\n if mask==None:\n mask = ones(im.shape)\n if w==None:\n w = ones(im.shape)\n if not (im.shape==mask.shape and im.shape==w.shape):\n print \"Image, mask, and weights must have same shape! Exiting.\"\n return -1\n if x==None or y==None:\n xx = arange(im.shape[1])\n yy = arange(im.shape[0])\n x,y = meshgrid(xx,yy)\n x0 = (x*im*mask*w).sum()/(im*mask*w).sum()\n y0 = (y*im*mask*w).sum()/(im*mask*w).sum()\n\n return (x0,y0)", "def get_contour_centroid(contour):\n M = cv2.moments(contour)\n cx = int(M[\"m10\"] / M[\"m00\"])\n cy = int(M[\"m01\"] / M[\"m00\"])\n return (cx, cy)", "def compute_polygon_centroid_2d(polygon):\r\n return geometry.gmComputePolygonCentroid(polygon)", "def centroid(t, v):\n c = numpy.zeros(v[0].shape)\n total_area = 0\n for i in range(len(t)):\n p = vertices(t[i], v)\n ct = triangle.centroid(p)\n area = triangle.area(p)\n c += area * ct\n total_area += area\n c /= total_area\n return c", "def centroid(arr):\n length = arr.shape[0]\n sum_x = np.sum(arr[:, 0])\n sum_y = np.sum(arr[:, 1])\n sum_z = np.sum(arr[:, 2])\n return sum_x / length, sum_y / length, sum_z / length", "def centroid_polygon(polygon):\n p = len(polygon)\n\n assert p > 2, \"At least three points required\"\n\n if p == 3:\n return centroid_points(polygon)\n\n o = centroid_points(polygon)\n a = polygon[-1]\n b = polygon[0]\n oa = subtract_vectors(a, o)\n ob = subtract_vectors(b, o)\n n0 = cross_vectors(oa, ob)\n\n x, y, z = centroid_points([o, a, b])\n a2 = length_vector(n0)\n\n A2 = a2\n cx = a2 * x\n cy = a2 * y\n cz = a2 * z\n\n for i in range(1, p):\n a = b\n b = polygon[i]\n\n oa = ob\n ob = subtract_vectors(b, o)\n\n n = cross_vectors(oa, ob)\n x, y, z = centroid_points([o, a, b])\n\n if dot_vectors(n, n0) > 0:\n a2 = length_vector(n)\n else:\n a2 = -length_vector(n)\n\n A2 += a2\n cx += a2 * x\n cy += a2 * y\n cz += a2 * z\n\n if A2 == 0:\n return polygon[0]\n\n return [cx / A2, cy / A2, cz / A2]", "def centroid(points):\n points = numpy.array(points)\n num, dim = points.shape\n return numpy.add.reduce(points)/float(num)", "def centroid(sign, FS):\n\n time = compute_time(sign, FS)\n\n energy, time_energy=signal_energy(sign, time)\n\n total_energy = np.dot(np.array(time_energy),np.array(energy))\n energy_sum = np.sum(energy)\n\n if energy_sum == 0 or total_energy == 0:\n centroid = 0\n else:\n centroid = total_energy / energy_sum\n return centroid", "def find_centroid(image):\n # is this row major or column major????\n\tM = cv2.moments(image)\n\tcx = int(M['m10'] / M['m00']) # x position of the center\n\tcy = int(M['m01'] / M['m00']) # y position of the center\n\tcentroid = [cy, cx] # saved as row major, to work easily with numpy\n\treturn centroid", "def centroid(self) -> Point[Scalar]:\n return self._context.multipoint_centroid(self)", "def centroid(self):\n\n ycen, xcen = self.cutout_centroid.value\n return (ycen + self._slice[0].start,\n xcen + self._slice[1].start) * u.pix" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Calculate the circumcircle of an object using the function cv2.minEnclosingCircle(). It is a circle which completely covers the object with minimum area.
def __CalculateCircle(self, contour): return cv2.minEnclosingCircle(contour)
[ "def boundingCircle(self):\n\n try:\n import cv2\n except:\n logger.warning(\"Unable to import cv2\")\n return None\n\n # contour of the blob in image\n contour = self.contour()\n\n points = []\n # list of contour points converted to suitable format to pass into cv2.minEnclosingCircle()\n for pair in contour:\n points.append([[pair[0], pair[1]]])\n\n points = np.array(points)\n\n (cen, rad) = cv2.minEnclosingCircle(points);\n\n return (cen[0], cen[1], rad)", "def extract_circle(img):\r\n ret, thresh = cv2.threshold(img, 177, 200, cv2.THRESH_BINARY)\r\n contours, hierarchy = cv2.findContours(thresh, cv2.RETR_CCOMP, cv2.CHAIN_APPROX_SIMPLE)\r\n cnt = contours[0]\r\n circle = cv2.minEnclosingCircle(cnt)\r\n return circle", "def find_min_circle(contours):\n center = (0, 0)\n radius = 0\n\n if len(contours) > 0:\n #compute the minimum enclosing circle and centroid\n c = max(contours, key=cv2.contourArea)\n (x, y), radius = cv2.minEnclosingCircle(c)\n M = cv2.moments(c)\n center = (int(M[\"m10\"] / M[\"m00\"]), int(M[\"m01\"] / M[\"m00\"]))\n else:\n #ball not found\n center = None\n radius = None\n return center, radius", "def circumcircle(self):\n from Drawables.Circle import Circle\n x = self.circumcenter()\n return Circle.fromMetrics(x, x.distanceTo(point=self.vertices[0]))", "def getCircleCircumscribed(self):\n p1, p2, p3 = self.points\n a1 = - (p2.x - p1.x) / (p2.y - p1.y)\n b1 = (p2.x ** 2 - p1.x ** 2 + p2.y ** 2 - p1.y ** 2) / (2 * (p2.y - p1.y))\n a2 = - (p3.x - p2.x) / (p3.y - p2.y)\n b2 = (p3.x ** 2 - p2.x ** 2 + p3.y ** 2 - p2.y ** 2) / (2 * (p3.y - p2.y))\n x = (b1 - b2) / (a2 - a1)\n y = a1 * x + b1\n radius = math.hypot(p1.x - x, p1.y - y)\n return Circle(x, y, radius=radius)", "def get_contour_centre(self, contour):\n return cv2.minEnclosingCircle(contour)", "def circle_radius(self):\n return min([self.container.width, self.container.height]) / 4", "def circumradius(self):\n return Point.distance(self.circumcenter, self.vertices[0])", "def detectCircle(img,gausSize,minR,maxR):\n if len(img.shape)>2:\n gray = cv2.cvtColor(img,cv2.COLOR_BGR2GRAY)\n else:\n gray = img.copy()\n\n grayGaus = cv2.GaussianBlur(gray,(gausSize,gausSize),0)\n\n # Apply Hough transform on the blurred image. \n circles = cv2.HoughCircles(grayGaus,cv2.HOUGH_GRADIENT, 1, 20, param1 = 50,\n param2 = 30, minRadius = minR, maxRadius = maxR)\n\n if circles is None:\n return None\n else:\n return circles[0]", "def _circumcircle(self):\n ax, ay = self.p1\n bx, by = self.p2\n cx, cy = self.p3\n d = (ax * (by - cy) + bx * (cy - ay) + cx * (ay - by)) * 2\n x = ((ax**2 + ay**2)*(by - cy) +\n (bx**2 + by**2)*(cy - ay) +\n (cx**2 + cy**2)*(ay - by)) / d\n y = ((ay**2 + ax**2)*(cx - bx) +\n (by**2 + bx**2)*(ax - cx) +\n (cy**2 + cx**2)*(bx - ax)) / d\n r = math.sqrt((x - ax)**2 + (y - ay)**2)\n return ((x, y), r)", "def incircle(self):\n return Circle(self.incenter, self.inradius)", "def fit_central_circle(image, radius_lower_bound=170, radius_upper_bound=190):\n\n smoothed = smooth_gaussian(image.astype(np.float), sigma=5)\n edges = find_edges_sobel(smoothed)\n thresh = threshold_otsu(edges)\n\n hmm = 170, 190\n hough_radii = np.arange(140, 170, 2)\n hough_res = hough_circle(thresh, hough_radii)\n\n circles = find_n_best_hough_circles(hough_radii, hough_res, 1)\n circle = circles[0]\n\n return circle", "def find_circle_size(x, y, r,\n camera_intrinsic,\n ground_plane_origin, ground_plane_normal,\n ):\n # compute vector normal to plane which represents camera center\n G = ((np.dot( ground_plane_origin, ground_plane_origin)/\n np.dot( ground_plane_origin, ground_plane_normal)) *\n ground_plane_normal\n )\n # extract vertical camera intrinsics\n fy = camera_intrinsic[1,1]\n cy = camera_intrinsic[1,2]\n # vertical angle to object\n psi = np.arctan2(y-cy, fy)\n fx = camera_intrinsic[0,0]\n cx = camera_intrinsic[0,2]\n # horizontal angle to object\n phi = np.arctan2(x-cx, fx)\n # vertical angle from camera z-axis to G\n theta = np.arctan2(-G[2], G[0])\n # Distance from camera to ground plane in object direction\n R = np.cos(phi)*linalg.norm(G)/np.cos(psi+theta)\n return r*R/fx", "def find_largest_enclosing_circle(img):\n if img.dtype is not np.dtype(np.uint8):\n raise ValueError('The input image data type should be uint8.')\n\n # Calculate histogram.\n hist = cv.calcHist([img], [0], None, [256], [0, 256])\n\n # Find the min and max intensity value on the image.\n min_i, max_i = find_histogram_range(hist)\n\n # Threshold the image at the median intensity.\n _, binary_img = cv.threshold(img, (max_i + min_i) / 2, 255, cv.THRESH_BINARY)\n\n # Find contours.\n contours, _ = cv.findContours(binary_img, cv.RETR_EXTERNAL, cv.CHAIN_APPROX_TC89_L1)\n if len(contours) == 0:\n return (0, 0), 0\n\n # Find a minimum enclosing circle for each contour, and find the largest one.\n circles = [cv.minEnclosingCircle(contour) for contour in contours]\n max_circle = max(circles, key=lambda circle: circle[1])\n (center_x, center_y), radius = max_circle\n return (int(center_x), int(center_y)), int(radius)", "def circumcenter(self):\n from Drawables.Line import Line\n from Drawables.Point import Point\n (a, b, c) = self.vertices[0:3]\n l1 = Point.bisect(a, b)\n l2 = Point.bisect(a, c)\n return Line.intersectionWith(l1, l2)", "def circumcenter(self):\n a,b,c = [x.perpendicular_bisector() for x in self.sides]\n return GeometryEntity.do_intersection(a, b)[0]", "def circle_rating(contour, area_factor=0.9, radius_factor=0.8):\n fill_ratio, radius = circle_fill_ratio(contour)\n _, _, width, height = cv2.boundingRect(contour)\n radius_ratio = ((((radius * 2) ** 2) / float(width) * height) ** 0.5)\n rating = (radius_ratio * radius_factor) * (fill_ratio * area_factor)\n return rating", "def object_circularity(labelmask, label):\n # Find z slice with most pixels from object.\n z, i, j = np.where(labelmask == label)\n zmax = mode(z)[0][0]\n # Select 2D image representing object's max Z-slice.\n im = np.where(labelmask[zmax] == label, 1, 0)\n # Calculate circularity from object perimeter and area.\n regions = regionprops(im)\n perimeter = regions[0].perimeter\n area = regions[0].area\n if (perimeter == 0):\n perimeter = 0.5\n circularity = 4 * np.pi * area / (perimeter ** 2) \n return circularity", "def find_best_circle(x, y):\n method_2 = \"leastsq\"\n\n x_m, y_m = np.mean(x), np.mean(y)\n def calc_R(xc, yc):\n \"\"\"\n calculate the distance of each 2D points from the center (xc, yc)\n \"\"\"\n return ((x-xc)**2 + (y-yc)**2) ** (1/2)\n\n def f_2(c):\n \"\"\"\n calculate the algebraic distance between the data points and\n the mean circle centered at c=(xc, yc)\n \"\"\"\n Ri = calc_R(*c)\n return Ri - Ri.mean()\n\n center_estimate = x_m, y_m\n center_2, ier = optimize.leastsq(f_2, center_estimate)\n\n xc_2, yc_2 = center_2\n Ri_2 = calc_R(*center_2)\n R_2 = Ri_2.mean()\n residu_2 = sum((Ri_2 - R_2)**2)\n\n return xc_2, yc_2, R_2" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Calculate the countour extend.
def __CalculateExtend(self, contour): area = self.__CalculateArea(contour) boundingBox = self.__CalculateBoundingBox(contour) return area / (boundingBox[2] * boundingBox[3])
[ "def calc_base_eff_and_infl(self):\n return 2 + (self.level - 1)", "def life_insurance_to_recive_total(self):\n pass", "def calcular_incremento_costo_base(self):\n pass #TODO: Reemplazar \"pass\" y completar el método según la documentación", "def estimate_incumbent(self, startpoints):\n\n pass", "def extras_total(self):\n total = self.wides + self.no_balls + self.byes + self.leg_byes\n return total", "def calc_calories(gpx_track, wt = 175, activity='Run'):", "def calc_intertie_offset_generation (self):\n self.generation = \\\n self.forecast.get_generation(self.start_year,self.end_year)\n dist = self.comp_specs['distance to community']\n self.annual_transmission_loss = \\\n 1 - (\n (1- (self.comp_specs['transmission loss per mile']/ 100.0))\n ** dist)\n self.intertie_offset_generation = \\\n self.generation * (1 + self.annual_transmission_loss)\n\n gen_eff = self.intertie_generation_efficiency\n self.intertie_offset_generation_fuel_used = \\\n self.intertie_offset_generation / gen_eff\n #~ print 'self.proposed_generation',self.proposed_generation\n #~ print con", "def molarExtinction (self):\r\n extintCoef = 0\r\n for aa in ProteinParam.aa2abs280.keys():\r\n extintCoef += self.protString.count(aa) * ProteinParam.aa2abs280[aa]\r\n return extintCoef", "def private_pension_total(self):\n pass", "def total_annual_water_make_up(self):\n return np.array(self.cooling_water_flow).sum()", "def molarExtinction(self):\n extinction = sum(self.aaComp[aa] * self.aa2abs280[aa] for aa in self.aa2abs280)\n return extinction", "def _calculate_subtotal(self):\n raise NotImplementedError", "def calc_base_eff_and_infl(level):\n return 2 + (level - 1)", "def calculate_activities(self):\n # Sleep\n sleep = self.sleep_hours * 0.95\n\n # Work\n if self.work_intensity == self.INTENSITY_LOW:\n work_factor = 1.5\n elif self.work_intensity == self.INTENSITY_MEDIUM:\n work_factor = 1.8\n else:\n work_factor = 2.2\n work = self.work_hours * work_factor\n\n # Sport (entered in hours/week, so we must divide)\n if self.sport_intensity == self.INTENSITY_LOW:\n sport_factor = 4\n elif self.sport_intensity == self.INTENSITY_MEDIUM:\n sport_factor = 6\n else:\n sport_factor = 10\n sport = (self.sport_hours / 7.0) * sport_factor\n\n # Free time\n if self.freetime_intensity == self.INTENSITY_LOW:\n freetime_factor = 1.3\n elif self.freetime_intensity == self.INTENSITY_MEDIUM:\n freetime_factor = 1.9\n else:\n freetime_factor = 2.4\n freetime = self.freetime_hours * freetime_factor\n\n # Total\n total = (sleep + work + sport + freetime) / 24.0\n return decimal.Decimal(str(total)).quantize(TWOPLACES)", "def getTotalDIECount(self) -> int:\n ...", "def get_fuel_total_saved (self):\n #~ print self.lost_heat_recovery\n #~ print self.intertie_offset_generation_fuel_used\n #~ print self.pre_intertie_generation_fuel_used\n #~ gen_eff = self.cd[\"diesel generation efficiency\"]\n #~ fuel_used = self.intertie_offset_generation / gen_eff\n\n generation_diesel_reduction = \\\n np.array(self.pre_intertie_generation_fuel_used\\\n [:self.actual_project_life])\n return - np.array(self.lost_heat_recovery[:self.actual_project_life]) +\\\n generation_diesel_reduction", "def CalculateAllIntervalsRelatedToATransition():", "def calculate_inductance_requirement(self):\n self.inductance_goal = 385/4 /self.frequency", "def Interval(self) -> float:" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Check if a curve is convex or not.
def __IsConvex(self, contour): return cv2.isContourConvex(contour)
[ "def isConvex(self):\n \n pass", "def convex(self):\n # Convex has positive curvature (2nd derivative)\n # f\"(x) = 2a, so a > 0 corresponds to convex\n return (self.a > 0)", "def isConvex(self):\n\n target = None\n for i in range(self.n):\n # Check every triplet of points\n A = self.points[i % self.n]\n B = self.points[(i + 1) % self.n]\n C = self.points[(i + 2) % self.n]\n\n if not target:\n target = ccw3(A, B, C, self.normal)\n else:\n if ccw3(A, B, C,self.normal) != target:\n return False\n\n return True", "def _is_on_curve(p):\n x = p[0]\n y = p[1]\n result = (-x * x + y * y - 1 - D * x * x * y * y) % PRIME\n return result == 0", "def is_convex(self):\n def tarea(a, b, c):\n return (b[0] - a[0])*(c[1] - a[1]) - (c[0] - a[0])*(b[1] - a[1])\n\n def isright(a, b, c):\n return bool(tarea(a, b, c) <= 0)\n\n # Determine orientation of points\n cw = isright(self.vertices[-2], self.vertices[-1], self.vertices[0])\n\n for i in xrange(0, len(self.vertices)):\n if cw ^ isright(self.vertices[i - 2], self.vertices[i - 1], self.vertices[i]):\n return False\n\n return True", "def is_on_curve(point):\n if point is None:\n # None represents the point at infinity.\n return True\n\n x, y = point\n on_curve = (y * y - x * x * x - curve.a * x - curve.b) % curve.p == 0\n return on_curve", "def convex(self):\n x, y = self.center\n angles = []\n l = len(self.points)\n for i in range(l - 1):\n A = self.points[(i + l - 1) % l]\n B = self.points[i % l]\n C = self.points[(i + 1) % l]\n u = Vector.createFromTwoPoints(A, B)\n v = Vector.createFromTwoPoints(C, B)\n angle = v ^ u\n if angle > pi:\n return True\n return False", "def is_on_curve(point):\n if point is None:\n # None represents the point at infinity.\n return True\n\n x, y = point\n\n return (y * y - x * x * x - curve.a * x - curve.b) % curve.p == 0", "def isSetCurve(self):\n return _libsbml.GeneralGlyph_isSetCurve(self)", "def check_curve(value_db_RAU):\n\n warn_monot = False\n warn_convex = False\n convex_u = False\n convex_d = False\n slope_list = []\n for idx in xrange(len(value_db_RAU['agg_obj']) - 1):\n slope = (value_db_RAU['agg_obj'][idx + 1] -\n value_db_RAU['agg_obj'][idx]) / (value_db_RAU['cost']\n [idx + 1] - value_db_RAU['cost'][idx])\n if slope < 0:\n warn_convex = True\n slope_list.append(slope)\n for idx in xrange(len(slope_list) - 1):\n if slope_list[idx + 1] == slope_list[idx]:\n continue\n if slope_list[idx + 1] > slope_list[idx]:\n convex_u = True\n if convex_d:\n warn_monot = True\n else:\n convex_d = True\n if convex_u:\n warn_monot = True\n return warn_convex, warn_monot", "def isSetCurve(self):\n return _libsbml.ReferenceGlyph_isSetCurve(self)", "def is_convex(self):\n n = len(self) - 1\n if n < 5:\n return True\n\n # initialize *sign* with value at last vertex\n sign = np.cross(self[n]-self[n-1], self[0]-self[n]) > 0\n for i in range(n-1):\n if sign != (np.cross(self[i]-self[i-1], self[i+1]-self[i]) > 0):\n return False\n return True", "def is_point_on_curve(self, P):\n x, y, = P[0], P[1]\n left = y * y\n right = (x * x * x) + (self.a * x) + self.b\n return (left - right) % self.p == 0", "def hascurves(self, shape):\n import Part\n for e in shape.Edges:\n if not isinstance(e.Curve, (Part.Line, Part.LineSegment)):\n return True\n return False", "def isSetCurve(self):\n return _libsbml.ReactionGlyph_isSetCurve(self)", "def assert_continuous(*curves: CubicBezierCurve) -> bool:\n if not curves:\n raise ValueError(\"CurveChecker.assert_continuous() cannot be called on an empty list\")\n\n previous_curve = curves[0]\n for curve in curves[1:]:\n if previous_curve.p1 != curve.p0:\n return False\n previous_curve = curve\n return True", "def isSetCurve(self):\n return _libsbml.SpeciesReferenceGlyph_isSetCurve(self)", "def IsTypecurve(self):\r\n\r\n return self.IsType(ID_TYPECURVE)", "def is_on_curve(P, a, b, p):\n\tx, y = P\n\treturn ((y ** 2) % p) == ((x ** 3 + a * x + b) % p)" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Calculate the contour moments to help you to calculate some features like center of mass of the object, area of the object etc.
def __CalculateMoments(self, contour): return cv2.moments(contour)
[ "def moments2e(image):\n assert len(image.shape) == 2 # only for grayscale images\n x, y = mgrid[:image.shape[0],:image.shape[1]]\n moments = {}\n moments['mean_x'] = sum(x*image)/sum(image)\n moments['mean_y'] = sum(y*image)/sum(image)\n\n # raw or spatial moments\n moments['m00'] = sum(image)\n moments['m01'] = sum(x*image)\n moments['m10'] = sum(y*image)\n moments['m11'] = sum(y*x*image)\n moments['m02'] = sum(x**2*image)\n moments['m20'] = sum(y**2*image)\n moments['m12'] = sum(x*y**2*image)\n moments['m21'] = sum(x**2*y*image)\n moments['m03'] = sum(x**3*image)\n moments['m30'] = sum(y**3*image)\n\n # central moments\n # moments['mu01']= sum((y-moments['mean_y'])*image) # should be 0\n # moments['mu10']= sum((x-moments['mean_x'])*image) # should be 0\n moments['mu11'] = sum((x-moments['mean_x'])*(y-moments['mean_y'])*image)\n moments['mu02'] = sum((y-moments['mean_y'])**2*image) # variance\n moments['mu20'] = sum((x-moments['mean_x'])**2*image) # variance\n moments['mu12'] = sum((x-moments['mean_x'])*(y-moments['mean_y'])**2*image)\n moments['mu21'] = sum((x-moments['mean_x'])**2*(y-moments['mean_y'])*image)\n moments['mu03'] = sum((y-moments['mean_y'])**3*image)\n moments['mu30'] = sum((x-moments['mean_x'])**3*image)\n\n # opencv versions\n #moments['mu02'] = sum(image*(x-m01/m00)**2)\n #moments['mu02'] = sum(image*(x-y)**2)\n\n # wiki variations\n #moments['mu02'] = m20 - mean_y*m10\n #moments['mu20'] = m02 - mean_x*m01\n\n # central standardized or normalized or scale invariant moments\n moments['nu11'] = moments['mu11'] / sum(image)**(2/2+1)\n moments['nu12'] = moments['mu12'] / sum(image)**(3/2+1)\n moments['nu21'] = moments['mu21'] / sum(image)**(3/2+1)\n moments['nu20'] = moments['mu20'] / sum(image)**(2/2+1)\n moments['nu03'] = moments['mu03'] / sum(image)**(3/2+1) # skewness\n moments['nu30'] = moments['mu30'] / sum(image)**(3/2+1) # skewness\n return moments", "def build_composition_features(self):\n self.aspect_ratio = 1. * self.image.shape[1]/self.image.shape[0]\n self.extract_symmetry()\n i = np.zeros((20, 20), dtype=np.double)\n i[13:17, 13:17] = 1\n m = measure.moments(i)\n cr = m[0, 1] / m[0, 0]\n cc = m[1, 0] / m[0, 0]\n self.image_central_moments = measure.moments_central(i, cr, cc)", "def __init__(self, image):\n # compute all desired moments here (recommended)\n x, y = np.meshgrid(np.arange(image.shape[1]), \n np.arange(image.shape[0]))\n M00 = (1.0 * (x**0) * (y**0) * image).sum()\n x_mean = (1.0 * (x**1) * (y**0) * image).sum() / M00\n y_mean = (1.0 * (x**0) * (y**1) * image).sum() / M00\n x2, y2 = 1.0 * x - x_mean, 1.0 * y - y_mean\n x2_pow = [x2**i for i in xrange(4)]\n y2_pow = [y2**i for i in xrange(4)]\n \n # array: [mu20, mu11, mu02, mu30, mu21, mu12, mu03, mu22]\n moments = np.array([(2,0),(1,1),(0,2),(3,0),(2,1),(1,2),(0,3),(2,2)])\n self.central_moments = [(x2_pow[p]*y2_pow[q]*image).sum() for p,q in moments]\n \n u00 = ((x2**0) * (y2**0) * image).sum()\n # array: [nu20, nu11, nu02, nu30, nu21, nu12, nu03, nu22]\n self.scaled_moments = [self.central_moments[i] / (u00**(1.0+(moments[i,:].sum()/ 2.0))) for i in xrange(moments.shape[0])]\n # Note: Make sure computed moments are in correct order\n \n \"\"\"\n test = cv2.moments(image)\n test1 = [test[i] for i in ['mu20', 'mu11', 'mu02', 'mu30', 'mu21', 'mu12', 'mu03']]\n test2 = [test[i] for i in ['nu20', 'nu11', 'nu02', 'nu30', 'nu21', 'nu12', 'nu03']]\n test_x, test_y = test['m10'] / test['m00'], test['m01'] / test['m00']\n if abs(M00 - test['m00']) > 1e-10:\n print M00, test['m00']\n if abs(test_x - x_mean) > 1e-10:\n print x_mean, test_x\n if abs(test_y - y_mean) > 1e-10:\n print y_mean, test_y\n for i in xrange(len(test1)):\n if abs(test1[i]-self.central_moments[i]) > 1e-3:\n print 'c', i, self.central_moments[i], test1[i]\n for i in xrange(len(test2)):\n if abs(test2[i]-self.scaled_moments[i]) > 1e-10:\n print 's', i, self.scaled_moments[i], test2[i]\n \"\"\"", "def moments(cnt):\n\treturn cv2.moments(cnt)", "def moments(self):", "def center_calculation(self,contours):\n # Temp variables for storing the center position calculated for different contours \n centers = []\n\n if len(contours) != 0:\n\n if self.debug_intr == True:\n print(\"contours found\", len(contours))\n\n for cnt in contours:\n\n if len(cnt) >= 3:\n M = cv.moments(cnt)\n cx = int(M['m10'] / M['m00'])\n cy = int(M['m01'] / M['m00'])\n centers.append([cx,cy])\n\n else:\n centers.append(cnt[0][0])\n\n if self.debug_intr == True:\n print(\"Centers:\",centers)\n\n return centers", "def measure_image_moments(image):\n data = image.quantity\n\n coords = image.geom.get_coord().skycoord\n x, y = coords.data.lon.wrap_at(\"180d\"), coords.data.lat\n\n A = data[np.isfinite(data)].sum()\n\n # Center of mass\n x_cms = (x * data)[np.isfinite(data)].sum() / A\n y_cms = (y * data)[np.isfinite(data)].sum() / A\n\n # Second moments\n x_var = ((x - x_cms) ** 2 * data)[np.isfinite(data)].sum() / A\n y_var = ((y - y_cms) ** 2 * data)[np.isfinite(data)].sum() / A\n x_sigma = np.sqrt(x_var)\n y_sigma = np.sqrt(y_var)\n\n return A, x_cms, y_cms, x_sigma, y_sigma, np.sqrt(x_sigma * y_sigma)", "def moments(self, *args, **kwargs):\n return _image.image_moments(self, *args, **kwargs)", "def contour_centroid(contour):\n\n moments = cv2.moments(contour)\n centroid = np.array(\n [moments['m10'] / moments['m00'], moments['m01'] / moments['m00']])\n\n return centroid[0], centroid[1]", "def find_center( contours ):\r\n ret = []\r\n\r\n for x in contours:\r\n M = cv2.moments( x )\r\n pt = Point()\r\n pt.x = int( M['m10']/M['m00'] )\r\n pt.y = int( M['m01']/M['m00'] )\r\n\r\n ret.append( pt )\r\n\r\n return( ret );", "def __CalculateCentroid(self, contour):\r\n moments = cv2.moments(contour)\r\n\r\n centroid = (-1, -1)\r\n if moments[\"m00\"] != 0:\r\n centroid = (int(round(moments[\"m10\"] / moments[\"m00\"])),\r\n int(round(moments[\"m01\"] / moments[\"m00\"])))\r\n\r\n return centroid", "def find2D_higher_moments(image, centroid, halfwidths, c_sum):\n \n # Unpack centroid to seperate values\n xcen, ycen = np.floor(centroid)\n xhw, yhw = halfwidths\n \n xmoment2 = 0\n xmoment3 = 0\n ymoment2 = 0\n ymoment3 = 0\n \n # Set up x and y centroid scanning ranges\n x_range = np.array((np.floor(xcen - xhw) - 1, np.ceil(xcen + xhw) - 1))\n y_range = np.array((np.floor(ycen - yhw) - 1, np.ceil(ycen + yhw) - 1))\n \n \n for ii in xrange(np.int(x_range[0]), np.int(x_range[1])):\n for jj in xrange(np.int(y_range[0]), np.int(y_range[1])):\n \n xloc = ii - np.floor(xcen)\n yloc = jj - np.floor(ycen)\n \n xweight = 0\n yweight = 0\n \n xoff = np.abs(ii - xcen)\n yoff = np.abs(jj - ycen)\n \n if xoff <= xhw:\n xweight = 1\n elif xhw < xoff < (xhw + 1):\n xweight = xhw + 1 - xoff\n \n if yoff <= yhw:\n yweight = 1\n elif yhw < yoff < (yhw + 1):\n yweight = yhw + 1 - yoff\n \n weight = xweight * yweight\n\n xmoment2 += xloc ** 2 * image[jj, ii] * weight\n xmoment3 += xloc ** 3 * image[jj, ii] * weight\n ymoment2 += yloc ** 2 * image[jj, ii] * weight\n ymoment3 += yloc ** 3 * image[jj, ii] * weight\n \n xmoment2 = xmoment2 / c_sum\n xmoment3 = xmoment3 / c_sum\n ymoment2 = ymoment2 / c_sum\n ymoment3 = ymoment3 / c_sum\n\n # Pack the x and y moments to return to main program\n x_moment = np.array((xmoment2, xmoment3))\n y_moment = np.array((ymoment2, ymoment3))\n \n return x_moment, y_moment", "def find1D_higher_moments(image, xcen, xhw, c_sum):\n \n # Collapse input image unto x axis\n vector = np.sum(image, axis=0)\n \n xmoment2 = 0.0\n xmoment3 = 0.0\n \n # Set up x and y centroid scanning ranges\n x_range = np.array((np.floor(xcen - xhw) - 1, np.ceil(xcen + xhw) - 1))\n\n for ii in xrange(np.int(x_range[0]), np.int(x_range[1])):\n xloc = (ii + 1) - np.floor(xcen)\n \n xweight = 0\n xoff = np.abs(ii - xcen)\n \n if xoff <= xhw:\n xweight = 0\n elif xhw < xoff < xhw + 1:\n xweight = xhw + 1 - xoff\n\n xmoment2 += xloc ** 2 * vector[ii] * xweight\n xmoment3 += xloc ** 3 * vector[ii] * xweight\n \n xmoment2 = xmoment2 / c_sum\n xmoment3 = xmoment3 / c_sum\n \n # Pack moments for return to main program\n x_mom = np.array((xmoment2, xmoment3))\n \n return x_mom", "def get_center_of_mass(contour):\n M = cv2.moments(contour)\n if M[\"m00\"] != 0:\n cX = int(M[\"m10\"] / M[\"m00\"])\n cY = int(M[\"m01\"] / M[\"m00\"])\n else:\n cX, cY = 0, 0\n\n return cX, cY", "def find1D_higher_moments(image, xcen, xhw, c_sum):\n \n # Collapse input image unto x axis\n vector = np.sum(image, axis=0)\n \n xmoment2 = 0.0\n xmoment3 = 0.0\n \n # Set up x and y centroid scanning ranges\n x_range = np.array((np.floor(xcen - xhw) - 1, np.ceil(xcen + xhw) - 1))\n\n for ii in xrange(np.int(x_range[0]), np.int(x_range[1]) +1): # the +1 is because python stops the loop at idx=n\n xloc = (ii + 1) - np.floor(xcen)\n \n xweight = 0\n xoff = np.abs(ii - xcen)\n \n if xoff <= xhw:\n xweight = 0\n elif xhw < xoff < xhw + 1:\n xweight = xhw + 1 - xoff\n\n xmoment2 += xloc ** 2 * vector[ii] * xweight\n xmoment3 += xloc ** 3 * vector[ii] * xweight\n \n xmoment2 = xmoment2 / c_sum\n xmoment3 = xmoment3 / c_sum\n \n # Pack moments for return to main program\n x_mom = np.array((xmoment2, xmoment3))\n \n return x_mom", "def hu_moment_calculate(img):\n image = cv2.imread(img)\n image = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)\n _, thresh = cv2.threshold(image, 128, 255, cv2.THRESH_BINARY)\n moments = cv2.moments(thresh)\n hu_moments = cv2.HuMoments(moments)\n\n for i in range(0, 7):\n hu_moments[i][0] = np.round(\n (\n -1\n * math.copysign(1.0, hu_moments[i][0])\n * math.log10(abs(hu_moments[i][0]))\n ),\n 3,\n )\n return hu_moments", "def center_point(contour):\n m = cv2.moments(contour)\n if m['m00'] == 0:\n return None\n\n return int(m['m10'] / m['m00']), int(m['m01'] / m['m00'])", "def centroid(cnt):\n m = cv2.moments(cnt)\n cx = int(m['m10'] / m['m00'])\n cy = int(m['m01'] / m['m00'])\n return cx, cy", "def centroidFloat(cnt):\n M = cv2.moments(cnt)\n cx = M['m10']/M['m00']\n\tcy = M['m01']/M['m00']\n\treturn (cx, cy)" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Calculates a contour perimeter or a curve length.
def __CalculatePerimeter(self, curve): return cv2.arcLength(curve, True)
[ "def perimeter(self):\r\n option = input('Lengths or coordinate points? \\'l\\' for lengths, \\'c\\' for coordinates')\r\n if option == 'l':\r\n x = float(input('Input length1: '))\r\n y = float(input('Input length2: '))\r\n return 2 * (x + y)\r\n elif option == 'c':\r\n point1 = input('Input top-left coordinate (x y): ')\r\n point_1 = point1.split(' ')\r\n first_points = []\r\n for i in point_1:\r\n first_points.append(float(i))\r\n point2 = input('Input bottom-right coordinate (x y): ')\r\n point_2 = point2.split(' ')\r\n second_points = []\r\n for i in point_2:\r\n second_points.append(float(i))\r\n perim = (second_points[0] - first_points[0]) + (first_points[1] - second_points[1])\r\n return 2 * perim", "def perimeter(cnt):\n return cv2.arcLength(cnt,True)", "def perimeter(self):\n return self.sidelength1 + self.sidelength2 + self.baselength1 + self.baselength2", "def perimeter(self):\n return sum(self.side_length(k) for k in range(self.number_sides))", "def open_arc_length(contour: np.ndarray) -> float:\n return cv2.arcLength(contour, False)", "def perimeter_separate(cnt):\n return sum([cv2.arcLength(x,True) for x in cnt])", "def calculateperimeter(self):\r\n return (self.width * 2) + (self.height * 2)", "def perimeter(base, height):\n return (2.0 * base) + (2.0 * height)", "def polygonal_perimeter(shape, tolerance=1):\n contours = find_contours(shape, 0.5, fully_connected=\"high\")\n total = 0\n for contour in contours:\n coords = approximate_polygon(contour, tolerance=tolerance)\n # Distance from last coordinate to first\n perimeter = np.linalg.norm(coords[-1] - coords[0])\n # Add the distances between the rest of the successive coordinate pairs\n for i in range(1, coords.shape[0]):\n segment_length = np.linalg.norm(coords[i - 1] - coords[i])\n perimeter += segment_length\n total += perimeter\n return total", "def perimeter(self):\r\n\r\n return 2*math.pi*self.__radius", "def perimeter(self):\n return math.pi * (3 * (self.r + self.b) - ((3 * self.r + self.b) *\n (self.r + 3 * self.b)) ** 0.5)", "def perimeter(self):\n\t\treturn 2 * (self.width + self.height)", "def perimeter(self):\n return self._radius * 2 * math.pi", "def getPerimeter(self):\n return 2 * math.pi * self.__radius", "def perimeter(self):\r\n perimeter = (self.width + self.height)*2\r\n return perimeter", "def getPerimeter(self):\n pass", "def get_perimeter_formula(cls):\n pass", "def perimeter(self):\n return self.width * 2 + self.height *2", "def calorimeter_fn(x, p):\n try:\n inval = x[0]\n peak, resolution, exp_scale, peak_fraction, norm = p\n erf = math.erf\n exp = math.exp\n sqrt2 = math.sqrt(2)\n sqrt2pi = math.sqrt(2*math.pi)\n peak_norm = peak_fraction / (resolution * sqrt2pi)\n peak_exp = exp(-(inval - peak)**2/(2 * resolution**2))\n peak_term = peak_norm * peak_exp\n\n tail_coeff = (1 - peak_fraction) * exp_scale / (exp(exp_scale * peak) - 1)\n tail_exp = exp(((resolution * exp_scale)**2 + 2 * exp_scale * inval)/2)\n tail_erf1 = erf((peak - inval - resolution**2 * exp_scale) / (sqrt2 *\n resolution))\n tail_erf2 = erf((-inval - resolution**2 * exp_scale) / (sqrt2 *\n resolution))\n tail_term = tail_coeff * tail_exp * (tail_erf1 - tail_erf2)\n return norm * (peak_term + tail_term)\n except:\n print([y for y in x])\n print([y for y in p])\n raise" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Update the ElasticSearch index every hour.
def update_es_index(): for job in scheduler.get_jobs(): if 'task_type' in job.meta and job.meta['task_type'] == "update_index": scheduler.cancel(job) scheduler.schedule( scheduled_time=datetime.now(), func='haystack.management.commands.update_index.Command().handle()', interval=60 * 60, repeat=None, ) for job in scheduler.get_jobs(): index_job = job if index_job.func_name == 'haystack.management.commands.update_index.Command().handle()': break index_job.meta['task_type'] = "update_index" index_job.save()
[ "def refresh(self, timesleep=0):\n get_es().refresh(self.index_name, timesleep=timesleep)", "def index_later(self):\n return", "def refresh(self):\n now = datetime.now()\n an_hour_from_now = now + timedelta(hours=1)\n if self.expire_time < an_hour_from_now:\n self.update(expire_time=an_hour_from_now).execute()", "def update_index(self):\n if not self.afk:\n return\n\n url = config.index_route.format(self.id)\n\n try:\n requests.get(url,\n headers={'User-Agent': config.user_agent},\n auth=(config.index_user, config.index_pass)\n )\n except:\n logging.exception(\"Failed song indexing ping\")", "def flush_and_refresh(self, index):\n self.client.indices.flush(wait_if_ongoing=True, index=index)\n self.client.indices.refresh(index=index)\n self.client.cluster.health(\n wait_for_status='yellow', request_timeout=30)\n return True", "async def update_patron_cache_hour(self):\n # this is to make sure on the first run it doesn't update since it is created elsewhere.\n if self.ex.loop_count:\n await self.process_cache_time(self.create_patreons, \"Patrons\")\n self.ex.loop_count += 1", "def hourly_scan():\n\n for event in Event.objects.all():\n e = bot.get_event_info(event.fb_id)\n if e:\n try:\n node = e[event.fb_id]\n if event.data == node:\n pass\n else:\n data = extract_event_data(node)\n serializer = EventSerializer(\n event,\n data=data,\n partial=True\n )\n if serializer.is_valid():\n serializer.save()\n except Exception as e:\n send_msg(e)\n pass\n else:\n event.delete()", "def run(self):\n cmd = \"esbulk -z -verbose -server http://{host}:{port} -w {workers}\"\"\".format(\n **self.config)\n for k, v in self.config.get(\"indices\").items():\n shellout(\n \"curl -XDELETE http://{host}:{port}/{index}\".format(**self.config, index=v))\n put_dict(\"http://{host}/{index}\".format(**self.config,\n index=v), {\"mappings\": {k: {\"date_detection\": False}}})\n shellout(\n cmd+\"\"\" -index {index} -type {type} -id id {type}s.ldj.gz\"\"\".format(index=v, type=k))", "def poll_kibana_index_pattern():\n\n indices = [\"requests\", \"logging\", \"container_logs\"]\n tries = 30\n while indices and tries > 0:\n for index in indices:\n payload = {\"attributes\": {\"title\": index, \"timeFieldName\": \"@timestamp\"}}\n try:\n res = requests.post(\"http://kibana:5601/api/saved_objects/index-pattern/{}\".format(index), json=payload, headers={\"kbn-xsrf\": \"true\"}, verify=False)\n if res.status_code == 200:\n indices.remove(index)\n else:\n time.sleep(10)\n tries -= 1\n except Exception as e:\n time.sleep(10)\n tries -= 1\n continue", "def step010():\n logger.logMessage('Begin: Getting candidate documents from elasticsearch')\n\n def limitHour(d):\n thish = d.start_time.tz_localize(tz='UTC')\n nexth = thish + dt.timedelta(hours=1)\n return { 'range': { 'time': {'gte':thish, 'lt':nexth } } }\n \n conn = sql.create_engine(pgurl)\n client = es.Elasticsearch(hostlist)\n dupesDF = pd.read_sql_table('weather_dupes',conn).set_index('time')\n hours =dupesDF.to_period('H').reset_index()['time'].unique()\n ranges = [ limitHour(h) for h in hours ]\n query = { \n '_source': [ 'tsa','time' ],\n 'query': { \n 'bool': { 'should': ranges } \n } \n }\n #logger.logMessage(level='DEBUG',message='Query body: {0}'.format(query))\n hits = eshelp.scan(client=client,index=indexName,doc_type='doc',query=query)\n numRecs = 0\n with open(candidatesFile,'w') as f:\n for h in hits:\n src = h['_source']\n tsa = int(src['tsa'])\n time = src['time']\n docid = h['_id']\n idx = h['_index']\n f.write(f'{tsa:014d};{time:25s};{docid:32s};{idx:32s}\\n') \n numRecs += 1\n if numRecs % 1000 == 0:\n logger.logMessage(level='DEBUG',message=\"{0:9d} records written\".format(numRecs))\n logger.logMessage(message=\"{0:9d} total records written\".format(numRecs))\n logger.logMessage('End: Getting candidate documents from elasticsearch')", "def cron_refresh_spacetrack_cache():\n s = SpaceTrackApi()\n updated_tles_str = s.get_all_tles()\n storage.save_tle_cache(updated_tles_str)\n last_updated[0] = int(time.time())\n metadata = {\n 'last_updated': last_updated[0],\n }\n storage.save_metadata(metadata)", "def reindex(self):", "def hourly():\r\n\r\n # get the full history for any new stock added to the database\r\n CompanyHistory().init()", "def reset_index(waring_time: int, reset_periode: Optional[int]):\n\n # check if it's time to reset elasticsearch index\n if reset_periode:\n unixtime_secounds: int = int(time.time())\n unixtime_minutes: int = int(unixtime_secounds / 60)\n unixtime_hours: int = int(unixtime_minutes / 60)\n unixtime_days: int = int(unixtime_hours / 24)\n unixtime_weeks: int = int(unixtime_days / 7)\n print(unixtime_weeks)\n if unixtime_weeks % reset_periode != 0:\n print(\"Skip reset elasticsearch index, because it's not on schedule!\")\n exit(0)\n\n print(\"You try to drop the current elasticsearch index & to reload them again!\")\n print(\"You have 30 secounds to stop this operation, you can't undo this action!\")\n print(\"To stop this action press 'crtl + c'\")\n\n try:\n for secounds in range(waring_time, 0, -1):\n sleep(1)\n print(\"{} secounds left ...\".format(secounds))\n except KeyboardInterrupt:\n print(\"You stop this action, nothing changed...\")\n return\n\n print(\"The Elasticsearch index will now be delted ...\")\n\n # delete group & meetup zip index\n # fixme load es from flask base config!\n env: Env = Env()\n elasticsearch: Elasticsearch = Elasticsearch(\n [{\"host\": env(\"http.host\"), \"port\": env(\"http.port\")}]\n )\n elasticsearch.indices.delete(index=Group.Index.name, ignore=[400, 404])\n elasticsearch.indices.delete(index=MeetupZip.Index.name, ignore=[400, 404])\n\n sleep(2)\n\n print(\"The Elasticsearch index are now deletet!\")\n\n # migrate models\n migrate_models()\n\n sleep(1)\n\n boundingboxes: dict = env.dict(\"LOCATION_BOUNDINGBOX\", subcast=str)\n for boundingbox in boundingboxes:\n print(\"Load meetup.com zip codes for {}\".format(boundingbox))\n boundingbox_list: List[str] = boundingboxes[boundingbox].split(\" \")\n load_zip_codes(\n lat_min=float(boundingbox_list[0]),\n lat_max=float(boundingbox_list[1]),\n lon_min=float(boundingbox_list[2]),\n lon_max=float(boundingbox_list[3]),\n )\n\n sleep(2)\n\n for country in env.list(\"LOCATION_COUNTRIES\"):\n print(\"Load groups with all events from {}!\".format(country))\n load_groups(load_events=True, country=country)\n\n print(\"All done :)\")", "def _update_on_refresh():\n cities = City.query.all()\n\n #Iterates over all cities in the database and updates their value\n for city in cities:\n metric_resp, imperial_resp = _get_open_weather_requests(city.name)\n\n metric_json = metric_resp.json()\n imperial_json = imperial_resp.json()\n\n city.temp_celsius = int(metric_json[MAIN][TEMPERATURE])\n city.temp_fahrenheit = int(imperial_json[MAIN][TEMPERATURE])\n db.session.commit()", "def refresh(self):\n if (datetime.now() - self.updated_at()) > 60:\n self.refresh_yahoo_api_data()\n self.refresh_yahoo_intraday_data()", "async def _timein_refresh(self):\n\t\t\n\t\tawait self.refresh_cache()", "def reindex(cls):\n for obj in cls.query:\n add_to_index(cls.__tablename__, obj)", "def schedule_update(self):\r\n self.update_event = Clock.schedule_interval(self.update, 1.0 / self.config_dict['Tasks']['Boids']['update_frequency'])" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
JavaProcess.__init__(self, class_loc, args=[]) Initializes an external Java process.
def __init__(self, config, class_loc, args=[]): JavaProcess.config = JavaProcessConfig.configFrom_dict(config) self._cp = self._construct_classpath_str() self.class_loc = class_loc self.args = args self._process = None self._stdout = None self._stderr = None LOG.debug("JavaProcess constructed for %s", self.class_loc) return
[ "def __init__(self, process=None, parent=None, **kwargs):\n super(ProcessIO, self).__init__(**kwargs)\n self.process = process\n self.parent = parent\n self.default_output = process.default_output", "def _start_process(self):\n\t\tself._proc = subprocess.Popen(self.argv)", "def __init__(self, pid=None, name=None, debug=True):\n super(OSXProcess, self).__init__()\n if pid is not None:\n self.pid = pid\n elif name is not None:\n self.pid = OSXProcess.pid_from_name(name)\n else:\n raise ValueError(\n \"You need to instanciate process with at least a name or a pid\"\n )\n self.task = None\n self.mytask = None\n self._open()", "def java_run():\n\n # Get the java excutable from the Java home\n java_exec = P.realpath(P.join(\n os.environ['JAVA_HOME'],\n 'bin',\n 'java'\n ))\n\n # Get the java.library.path from the LD_LIBRARY_PATH and compiled\n # JNI stubs.\n java_library_path = P.pathsep.join([\n P.join(bindings_dir, 'jni'),\n os.environ['LD_LIBRARY_PATH'],\n ])\n\n # Prepare the command to run the Java main\n args = [\n java_exec,\n '-cp', class_path,\n '-Dfile.encoding=UTF-8',\n f\"-Djava.library.path={java_library_path}\",\n ]\n if 'graalvm' in os.environ['JAVA_HOME']:\n args.append((\n '--add-opens=org.graalvm.truffle/com.oracle.truffle.api.'\n 'strings=ALL-UNNAMED'\n ))\n args += [main_java, project_path]\n\n # Run the test\n self.run_and_check(args)", "def __init__(self, cmd):\n self.cmd = cmd\n self.process = None", "def __init__(self, binPath, numProc, wd, platform):\n self.binPath = binPath\n self.numProc = numProc\n self.wd = wd\n self.platform = platform", "def _runner(self, classpath, main, jvm_options, args):", "def __init__(self):\n EDPluginExecProcessScript.__init__(self)\n self.setXSDataInputClass(XSDataInputExecCommandLine)", "def __init__(self, analysis, fifofile, histointerval, _q, _ws, _y):\n\n multiprocessing.Process.__init__(self)\n self.analysis = analysis\n self.fifofile = fifofile\n self.histointerval = histointerval\n self._q = _q\n self._ws = _ws\n self._y = _y", "def _launch(self):\n annotators = ['tokenize', 'ssplit']\n if 'ner' in self.annotators:\n annotators.extend(['pos', 'lemma', 'ner'])\n elif 'lemma' in self.annotators:\n annotators.extend(['pos', 'lemma'])\n elif 'pos' in self.annotators:\n annotators.extend(['pos'])\n annotators = ','.join(annotators)\n options = ','.join(['untokenizable=noneDelete',\n 'invertible=true'])\n # if you work on English, use this this command\n cmd = ['java', '-mx' + self.mem, '-cp', '\"%s\"' % self.classpath,\n 'edu.stanford.nlp.pipeline.StanfordCoreNLP', '-annotators',\n annotators, '-tokenize.options', options,\n '-outputFormat', 'json', '-prettyPrint', 'false']\n \n # if you work on arabic, use this this command\n \n # cmd = ['java', '-mx' + self.mem, '-cp', '\"%s\"' % self.classpath,\n # # 'edu.stanford.nlp.pipeline.StanfordCoreNLP','-annotators',\n # 'edu.stanford.nlp.pipeline.StanfordCoreNLP', '-props', 'StanfordCoreNLP-arabic.properties','-annotators',\n # annotators, '-tokenize.options', options, #'-tokenize.whitespace', 'true',\n # '-outputFormat', 'json', '-prettyPrint', 'false']\n print(' '.join(cmd))\n\n # We use pexpect to keep the subprocess alive and feed it commands.\n # Because we don't want to get hit by the max terminal buffer size,\n # we turn off canonical input processing to have unlimited bytes.\n self.corenlp = pexpect.spawn('/bin/bash', maxread=100000, timeout=60)\n self.corenlp.setecho(False)\n self.corenlp.sendline('stty -icanon')\n self.corenlp.sendline(' '.join(cmd))\n self.corenlp.delaybeforesend = 0\n self.corenlp.delayafterread = 0\n self.corenlp.expect_exact('NLP>', searchwindowsize=100)", "def _from_java(cls, java_obj):\n # Create a new instance of this stage.\n py_obj = cls()\n py_obj._java_obj = java_obj\n if java_obj is None and java_obj.parentPipeline().isDefined():\n py_parent = MLPipeline()\n py_parent._java_obj = java_obj.parentPipeline().get()\n py_obj._parent = py_parent\n return py_obj", "def __init__(self, target=None, *args, **kwargs):\n super(PyonThread, self).__init__()\n\n if target is not None or not hasattr(self, 'target'): # Allow setting target at class level\n self.target = target\n self.spawn_args = args\n self.spawn_kwargs = kwargs\n\n # The instance of Greenlet or subprocess or similar\n self.proc = None\n self.supervisor = None\n\n self.ev_exit = Event()", "def __init__(self):\n super(MultiProcessEngine, self).__init__()\n self._debug_output = False\n self._name = 'Main'\n self._last_worker_number = 0\n self._log_filename = None\n self._pid = os.getpid()\n self._process_information = process_info.ProcessInfo(self._pid)\n self._process_information_per_pid = {}\n self._processes_per_pid = {}\n self._quiet_mode = False\n self._rpc_clients_per_pid = {}\n self._rpc_errors_per_pid = {}\n self._status_update_active = False\n self._status_update_thread = None\n self._storage_writer = None\n self._worker_memory_limit = definitions.DEFAULT_WORKER_MEMORY_LIMIT", "def __init__(self):\n self.pid = os.getpid()", "def spawn(self, classpath, main, jvm_options=None, args=None, **subprocess_args):\r\n cmd = self._create_command(*self._scrub_args(classpath, main, jvm_options, args))\r\n return self._spawn(cmd, **subprocess_args)", "def __init__(self, command_arguments):", "def __init__(self, class_name='gumtree.GumTreeApi'):\n if Gumtree.gumtree is None:\n # class path\n jvm_arg = \"-Djava.class.path=\" + my_constant.JAVA_CLASS_PATH\n startJVM(getDefaultJVMPath(), '-d64', jvm_arg)\n # initial class and object\n GumtreeApi = JClass(class_name)\n Gumtree.gumtree = GumtreeApi()", "def __init__(self, connected, jlink_exe=None, jlink_path='', params=None):\n self._connected = connected\n # If not provided, pick the appropriate JLinkExe name based on the\n # platform:\n # - Linux = JLinkExe\n # - Mac = JLinkExe\n # - Windows = JLink.exe\n if jlink_exe is None:\n system = platform.system()\n if system == 'Linux':\n jlink_exe = 'JLinkExe'\n elif system == 'Windows':\n jlink_exe = 'JLink.exe'\n elif system == 'Darwin':\n jlink_exe = 'JLinkExe'\n else:\n raise AdaLinkError('Unsupported system: {0}'.format(system))\n # Store the path to the JLinkExe tool so it can later be run.\n self._jlink_path = os.path.join(jlink_path, jlink_exe)\n logger.info('Using path to JLinkExe: {0}'.format(self._jlink_path))\n # Apply command line parameters if specified.\n self._jlink_params = []\n if params is not None:\n self._jlink_params.extend(params.split())\n logger.info('Using parameters to JLinkExe: {0}'.format(params))\n # Make sure we have the J-Link executable in the system path\n self._test_jlinkexe()", "def __init__( self,\n name,\n desc,\n program,\n args,\n workingDir,\n logFile = None,\n env = None,\n uid = None,\n gid = None,\n stopSignal = None ):\n\n if stopSignal is None:\n import signal\n stopSignal = signal.SIGKILL\n\n if env is None:\n env = {}\n\n self.name = name\n self.desc = desc\n self.program = program\n self.args = [ program ]\n self.args.extend( args )\n self.workingDir = workingDir\n self.stopSignal = stopSignal\n self.env = env\n self.logFile = logFile\n\n if gid and uid:\n import grp\n import pwd\n\n self.gid = grp.getgrnam( gid )[2]\n self.uid = pwd.getpwnam( uid )[2]\n elif gid or uid:\n raise ValueError(\n \"For process '%s', either gid or uid must both be None, \"\n \"or both must be set.\" % self.name\n )\n else:\n self.gid = None\n self.uid = None" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
slick solution in python ONLY zeros = [0 for i in range(zeros_and_ones.count(0))] ones = [1 for j in range(zeros_and_ones.count(1))] return zeros + ones
def zeros_before_ones(zeros_and_ones): index_i = 0 last_index = len(zeros_and_ones) - 1 while index_i < last_index: if zeros_and_ones[index_i] == 1 and zeros_and_ones[last_index] == 0: zeros_and_ones[index_i], zeros_and_ones[last_index] = zeros_and_ones[last_index], zeros_and_ones[index_i] index_i += 1 last_index -= 1 # print(zeros_and_ones) # TODO: NEEDS IMPROVEMENTS! zeros_and_ones
[ "def _ones_and_zero_pows(self, element, index):\n # Compute win_lst, sqr_lst\n b, n = arith1.vp(index, 2)\n win_lst, sqr_lst = [], [b]\n maxa = 1\n while True:\n ones = 0\n while n & 1:\n n >>= 1\n ones += 1\n win_lst.append(ones)\n if maxa < ones:\n maxa = ones\n if n == 0:\n break\n zeros = 0\n while not (n & 1):\n n >>= 1\n zeros += 1\n sqr_lst.append(zeros + win_lst[-1])\n e = len(win_lst) - 1\n f = e\n # Precomputation\n sqrs = element\n pre_table = [element]\n for i in range(maxa - 1):\n sqrs = self.square(sqrs)\n pre_table.append(self.mul(pre_table[-1], sqrs))\n # Main Loop\n while f >= 0:\n if f == e:\n sol = pre_table[win_lst[f] - 1]\n else:\n sol = self.mul(sol, pre_table[win_lst[f] - 1])\n for i in range(sqr_lst[f]):\n sol = self.square(sol)\n f -= 1\n return sol", "def number_of_ones2(integer_value):\n \n one_count = 0\n while integer_value != 0:\n one_subtracted = integer_value - 1\n integer_value = one_subtracted & integer_value\n one_count = one_count + 1", "def zeros(n):\n return [0] * n", "def count_ones(self):\n self.ones = 0\n for memory_function in self.memory:\n self.ones += memory_function.count(1)", "def eqcounter(nodes):\n nnodes = nodes.shape[0]\n IBC = np.zeros([nnodes, 6], dtype=np.integer)\n IBC[:,:] = nodes[:,4:]\n neq = 0\n for i in range(nnodes):\n for j in range(6):\n if IBC[i, j] == 0:\n IBC[i, j] = neq\n neq = neq + 1\n # End if\n # End for j\n # End for i\n return neq, IBC", "def count_ones(i):\n count = 0\n while i > 0:\n count += i & 1\n i >>= 1\n return count", "def build_pad0_state(s, s_qubits, a_qubits):\n u = np.array([1, 0])\n d = np.array([0, 1])\n dict_s = utils.get_dict_rep_of_vec(s)\n for key in dict_s.keys():\n basis_rep = key\n sidx = 0\n full_s = 1\n for q in a_qubits:\n if q in s_qubits:\n if basis_rep[sidx] == 0:\n full_s = np.kron(full_s, u)\n else:\n full_s = np.kron(full_s, d)\n sidx += 1\n else:\n full_s = np.kron(full_s, u)\n\n return full_s", "def single_number(nums: List[int]) -> int:\n ones = 0\n twos = 0\n\n for num in nums:\n # Record number that appears twice.\n twos |= (ones & num)\n\n # Record number that appears once.\n ones ^= num\n\n # Remove number that is on ones and twos.\n common_bit_mask = ~(ones & twos)\n ones &= common_bit_mask\n twos &= common_bit_mask\n return ones", "def check_for_all_zeros(X, i, j):\n non_zeros = []\n first_non_zero = -1\n for m in range(i, len(X)):\n non_zero = X[m][j] != 0\n non_zeros.append(non_zero)\n if first_non_zero == -1 and non_zero:\n first_non_zero = m\n zero_sum = sum(non_zeros)\n return zero_sum, first_non_zero", "def summarize_binary_list(lista):\r\n ans = []\r\n x_0 = None\r\n tamano = len(lista)\r\n for i in xrange(tamano):\r\n if lista[i] == 1 and x_0 is None:\r\n x_0 = i\r\n end_of_sequence = lista[i] == 0\r\n end_of_array = i == (tamano-1) and lista[i] == 1\r\n if (end_of_sequence or end_of_array) and x_0 is not None:\r\n if end_of_sequence:\r\n ans.append((x_0, i-1))\r\n if end_of_array:\r\n ans.append((x_0, i))\r\n x_0 = None\r\n return ans", "def test_count_binary_decisions(self):\n abs_centered_quantized_data_0 = numpy.array([0.75, 0.05, 0.1, 0.2, 0.2, 0.15], dtype=numpy.float32)\n bin_width_test_0 = 0.05\n abs_centered_quantized_data_1 = numpy.array([210., 6., 9., 6.], dtype=numpy.float32)\n bin_width_test_1 = 3.\n truncated_unary_prefix = 7\n \n (cumulated_zeros_0, cumulated_ones_0) = \\\n lossless.stats.count_binary_decisions(abs_centered_quantized_data_0,\n bin_width_test_0,\n truncated_unary_prefix)\n (cumulated_zeros_1, cumulated_ones_1) = \\\n lossless.stats.count_binary_decisions(abs_centered_quantized_data_1,\n bin_width_test_1,\n truncated_unary_prefix)\n print('1st experiment:')\n print('Number of occurrences of 0 for each binary decision computed by the function:')\n print(cumulated_zeros_0)\n print('Number of occurrences of 0 for each binary decision computed by hand:')\n print(numpy.array([0, 1, 1, 1, 2, 0, 0]))\n print('Number of occurrences of 1 for each binary decision computed by the function:')\n print(cumulated_ones_0)\n print('Number of occurrences of 1 for each binary decision computed by hand:')\n print(numpy.array([6, 5, 4, 3, 1, 1, 1]))\n print('\\n2nd experiment:')\n print('Number of occurrences of 0 for each binary decision computed by the function:')\n print(cumulated_zeros_1)\n print('Number of occurrences of 0 for each binary decision computed by hand:')\n print(numpy.array([0, 0, 2, 1, 0, 0, 0]))\n print('Number of occurrences of 1 for each binary decision computed by the function:')\n print(cumulated_ones_1)\n print('Number of occurrences of 1 for each binary decision computed by hand:')\n print(numpy.array([4, 4, 2, 1, 1, 1, 1]))", "def OneHot0(*xs, simplify=True, conj=True):\n xs = [Expression.box(x).node for x in xs]\n terms = []\n if conj:\n for x0, x1 in itertools.combinations(xs, 2):\n terms.append(exprnode.or_(exprnode.not_(x0),\n exprnode.not_(x1)))\n y = exprnode.and_(*terms)\n else:\n for xs_ in itertools.combinations(xs, len(xs) - 1):\n terms.append(exprnode.and_(*[exprnode.not_(x) for x in xs_]))\n y = exprnode.or_(*terms)\n if simplify:\n y = y.simplify()\n return _expr(y)", "def majority_logical(*bit_arrays):\n\n if (len(bit_arrays) == 0):\n raise TypeError(\"len(bit_arrays) must be > 0.\")\n\n MINIMUM_MAJORITY = (len(bit_arrays) // 2) + 1\n\n answer = itertools.combinations(bit_arrays, MINIMUM_MAJORITY)\n answer = map(all, answer)\n answer = any(answer)\n return answer", "def onesNumber(self):\n onemask = self.onmask & ~self.dcmask\n ones_number = 0\n while onemask > 0:\n ones_number += onemask & 1\n onemask >>= 1\n return ones_number", "def remove_zeros(sLU, iLU, jLU):\n N = len(iLU) - 1\n nz_idx = np.nonzero(sLU) # Gets the indices of the non-zero elements of sLU\n sLU = sLU[nz_idx] # We only keep non-zero elements in sLU\n jLU = jLU[nz_idx] # We only keep the column indices of non-zero elements\n\n for i in range(1, N + 1):\n # Updates iLU[i] by counting the number of non-zero in the lines < i\n iLU[i] = np.sum(nz_idx < iLU[i])\n return sLU, iLU, jLU", "def map_zero_one(x, a, b):\n assert b > a\n s = 1./(b - a)\n t = a/(a-b)\n y = s*x + t\n y[y>1] = 1\n y[y<0] = 0\n return y", "def hammingpattern(bits):\n\tmax = int(2) ** bits; \n\tpattern1 = [];\n\tpattern2 = [];\n\t\n\tfor i in range(0, max):\n\t\tcount = bin(i).count(\"1\")\n\t\tif count == 1: \n\t\t\tpattern1.append(i); \n\t\tif count == 2:\n\t\t\tpattern2.append(i); \n\n\t\"\"\"\n\tprint \"--> pattern1\"\n\tprintbinary(pattern1)\n\tprint \"--> pattern2\"\n\tprintbinary(pattern2)\n\t\"\"\"\n\treturn pattern1, pattern2", "def compareones_c(w1,w2,tn):\n nw1 = np.int_(np.copy(w1))\n nw2 = np.int_(np.copy(w2))\n code = \"\"\"\n int s;\n s = 0;\n for(int i = 0; i < n; i++)\n {\n if((nw1[i] == 1)&(nw2[i] == 1))\n {\n s += 100;\n }\n }\n return_val = s;\n \"\"\"\n n = len(w2)\n res = inline(code, ['nw1','nw2','n'], headers = ['<math.h>'], compiler = 'gcc')\n return res / float(tn)", "def most_ones(seq0, seq1):\n cnt0 = len(utils.indices(seq0, 1))\n cnt1 = len(utils.indices(seq1, 1))\n if cnt0 >= cnt1:\n return 0\n else:\n return 1" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Registers all the JRPC overloaders in the jrpc server
def register_overloaders(jrpc_server: JRPCServer, receiver) -> None: jrpc_server.register_overloader( 'Application.GetProperties', lambda server: GetPropertiesOverloader(server, receiver)) jrpc_server.register_overloader( 'Application.SetMute', lambda server: SetMuteOverloader(receiver)) jrpc_server.register_overloader( 'Application.SetVolume', lambda server: SetVolumeOverloader(receiver)) jrpc_server.register_overloader( 'Application.Quit', lambda server: ApplicationQuitOverloader(receiver)) jrpc_server.register_overloader( 'System.Hibernate', lambda server: ApplicationQuitOverloader(receiver)) jrpc_server.register_overloader( 'System.Shutdown', lambda server: ApplicationQuitOverloader(receiver)) jrpc_server.register_overloader( 'System.Suspend', lambda server: ApplicationQuitOverloader(receiver)) jrpc_server.register_overloader( 'System.GetProperties', lambda server: SystemPropertiesOverloader())
[ "def start_rpc_listeners(self):\n pass", "def cb_xmlrpc_register(args):\n args['methods'].update({'pingback.ping': pingback })\n return args", "def registerMethodForRpc(self, uri, obj, proc):\r\n self.procs[uri] = (obj, proc, False)\r\n if self.debugWamp:\r\n log.msg(\"registered remote method on %s\" % uri)", "def AddRPCInstance(self, url, instance):\n\n dispatcher = SimpleJSONRPCServer.SimpleJSONRPCDispatcher()\n dispatcher.register_introspection_functions()\n dispatcher.register_instance(instance)\n self.add_dispatcher(url, dispatcher)", "def register_overloader(self, method: str, overloader_provider: Callable[[JRPCHandler], JRPCOverloader]) -> None:\n self.overloaders[method] = overloader_provider", "def _setup_rpc_server_clients(self):\n\n self.dcenter_to_rpc = {}\n\n # RPC server\n host_addr = socket.gethostbyname(socket.gethostname())\n rpc_server = SimpleXMLRPCServer((host_addr, CONF.rpc_port),\n allow_none=True)\n rpc_server.register_introspection_functions()\n rpc_server.register_instance(self.inception_rpc)\n # server_thread = threading.Thread(target=rpc_server.serve_forever)\n hub.spawn(rpc_server.serve_forever)\n\n # Create RPC clients\n for dcenter in self.dcenter_to_info:\n controller_ip, _ = self.dcenter_to_info[dcenter]\n rpc_client = ServerProxy(\"http://%s:%s\" %\n (controller_ip, CONF.rpc_port))\n self.dcenter_to_rpc[dcenter] = rpc_client", "def set_plugin_rpc(self, plugin_rpc):", "def register_servlets():\n global UPDATE_INFO_SERV\n global GET_FRIENDS_FEED\n global ADD_FRIEND\n global FRIEND_REQUEST_ACTION\n global GET_USERS_LIST\n global FRIEND_COMPARISON\n global GET_USER_FEED\n\n UPDATE_INFO_SERV = communicators.UpdateInfoServlet()\n GET_FRIENDS_FEED = communicators.FriendsFeedServlet()\n ADD_FRIEND = communicators.AddFriendServlet()\n FRIEND_REQUEST_ACTION = communicators.FriendRequestActionServlet()\n GET_USERS_LIST = communicators.UsersListServlet()\n FRIEND_COMPARISON = communicators.ComparisonWithFriendsServlet()\n GET_USER_FEED = communicators.UserFeedServlet()", "def _register_endpoints(self):\n self.api_server.start_api()\n # Register controller endpoints as /api/kytos/core/...\n self.api_server.register_core_endpoint('config/',\n self.configuration_endpoint)\n self.api_server.register_core_endpoint('metadata/',\n Controller.metadata_endpoint)\n self.api_server.register_core_endpoint(\n 'reload/<username>/<napp_name>/',\n self.rest_reload_napp)\n self.api_server.register_core_endpoint('reload/all',\n self.rest_reload_all_napps)\n self.auth.register_core_auth_services()", "def registerHandlerMethodForRpc(self, uri, obj, handler, extra = None):\r\n self.procs[uri] = (obj, handler, True, extra)\r\n if self.debugWamp:\r\n log.msg(\"registered remote handler method on %s\" % uri)", "def register_peer(self):\n try:\n self.get_file_list()\n num_files = len(self.file_list)\n total_ops = self.key_end - self.key_start\n run_ops = total_ops/num_files\n print \"Staring Benchmark Register Peer with Server...\"\n t1 = time.time()\n for i in range(run_ops):\n for file in self.file_list:\n self.service.put(file, self.peer_id)\n t2 = time.time()\n total = run_ops * num_files\n print \"%s Register operations = %s sec\" % (total,t2-t1)\n print \"per Register operation = %s sec\" % ((t2-t1)/total)\n print \"per Register operation = %s msec\" % (((t2-t1)/total)*1000)\n except Exception as e:\n print \"Registering Peer Error, %s\" % e\n sys.exit(1)", "def init_rpc(self):\n self._server = SimpleXMLRPCServer(self._addr, allow_none=True)\n # Registering commands\n self._server.register_function(self.start_monitor_rpc, \"start_monitor\")\n self._server.register_function(self.stop_monitor, \"stop_monitor\")\n self._server.register_function(self.get_traffic, \"get_traffic\")\n self._server.register_function(self.get_open_ports, \"get_open_ports\")", "def registerRPC(self, call, args = None):\n\n rpc = RemoteProcedureCall(self, len(self.rpc), args)\n self.rpc.append(call)\n return rpc", "def register_vnc_api_options():\n cfg.CONF.register_opts(vnc_opts, 'APISERVER')\n cfg.CONF.register_opts(vrouter_opts, 'VROUTER')", "def add_subdispatch(self, dispatcher: 'RPCDispatcher', prefix: str = ''):\n self.subdispatchers.setdefault(prefix, []).append(dispatcher)", "def register_resources(self):\n raise NotImplementedError", "def make_json_handler(rpc):\n\n class JSONRPCHandler(BaseHTTPRequestHandler):\n \"\"\"\n A request handler for http.server that speaks JSON-RPC.\n \"\"\"\n def _validate_http_request(self):\n \"\"\"\n Ensures that we understand the HTTP portion of the request.\n \"\"\"\n if self.path != '/':\n print('Invalid request path:', self.path)\n self.send_error(HTTPStatus.NOT_FOUND, 'Request Must Have Path Of /')\n raise ValueError\n\n content_type = self.headers.get('Content-Type', None)\n if content_type != 'application/json':\n print('Invalid request Content-Type:', self.path)\n self.send_error(HTTPStatus.BAD_REQUEST, 'Content-Type Must Be application/json')\n raise ValueError\n\n def _validate_rpc_request(self, request):\n \"\"\"\n Ensures that we understand the JSON-RPC portion of the request.\n \"\"\"\n if request.get('jsonrpc', None) != '2.0':\n raise ValueError('Invalid jsonrpc: must be \"2.0\"')\n\n id = request.get('id', None)\n if not (id is None or isinstance(id, (str, int, float))):\n raise ValueError('Invalid id: must be null, string or number')\n\n method = request.get('method', None)\n if not isinstance(method, str):\n raise ValueError('Invalid method: must be string')\n\n params = request.get('params', [])\n if not isinstance(params, (dict, list)):\n raise ValueError('Invalid params: must be array or object')\n\n def _build_rpc_error(self, id, error, exception, keep_null_id=False):\n \"\"\"\n Returns an error response that can be encoded to JSON.\n\n By default this respects the ID of the request, and returns None if the\n ID is also None. To override this behavior, set keep_null_id=True.\n \"\"\"\n if id is None and not keep_null_id:\n return None\n\n message = RPC_ERROR_MESSAGES.get(error, str(exception))\n\n return {\n 'jsonrpc': '2.0',\n 'id': id,\n 'error': {\n 'code': error.value,\n 'message': message,\n 'data': {\n 'stacktrace': str(exception) + '\\n' + '\\n'.join(traceback.format_tb(exception.__traceback__))\n }\n }\n }\n\n def _build_rpc_result(self, id, result):\n \"\"\"\n Returns a result response that can be encoded to JSON.\n \"\"\"\n if id is None:\n return None\n\n return {\n 'jsonrpc': '2.0',\n 'id': id,\n 'result': result\n }\n\n def _process_request(self, request):\n \"\"\"\n Calls a single RPC function and returns the result.\n \"\"\"\n try:\n self._validate_rpc_request(request)\n except ValueError as err:\n return self._build_rpc_error(None, RpcErrors.INVALID_REQUEST, err, keep_null_id=True)\n\n id = request.get('id', None)\n\n try:\n method = getattr(rpc, request['method'])\n except AttributeError as err:\n return self._build_rpc_error(id, RpcErrors.METHOD_NOT_FOUND, err)\n\n try:\n params = request.get('params', None)\n if params is None:\n result = method()\n elif isinstance(params, list):\n result = method(*params)\n elif isinstance(params, dict):\n result = method(**params)\n\n return self._build_rpc_result(id, result)\n\n except TypeError as err:\n return self._build_rpc_error(id, RpcErrors.INVALID_PARAMS, err)\n except Exception as err:\n return self._build_rpc_error(id, RpcErrors.INTERNAL_ERROR, err)\n\n def _send_json(self, value):\n \"\"\"\n Dumps the value to a JSON string, and sets the appropriate headers to\n return it\n \"\"\"\n raw_value = json.dumps(value).encode('utf-8')\n\n self.send_response(200, 'OK')\n for header, value in CORS_HEADERS.items():\n self.send_header(header, value)\n\n self.send_header('Content-Type', 'application/json')\n self.send_header('Content-Length', str(len(raw_value)))\n self.end_headers()\n\n self.wfile.write(raw_value)\n\n def do_POST(self):\n \"\"\"\n Parses and processes a single or batch JSON-RPC request.\n \"\"\"\n try:\n self._validate_http_request()\n except ValueError:\n return\n\n content_length = int(self.headers.get('Content-Length', '0'))\n request_bytes = self.rfile.read(content_length)\n while len(request_bytes) < content_length:\n request_bytes += self.rfile.read(content_length - len(request_bytes))\n\n request_raw = request_bytes.decode('utf-8')\n try:\n request = json.loads(request_raw)\n except ValueError as err:\n error = self._build_rpc_error(None, RpcErrors.PARSE_ERROR, err, keep_null_id=True)\n self._send_json(error)\n return\n\n if isinstance(request, list):\n responses = [self._process_request(single) for single in request]\n response = [r for r in responses if r is not None]\n elif isinstance(request, dict):\n response = self._process_request(request)\n else:\n try:\n raise ValueError\n except ValueError as err:\n error = self._build_rpc_error(None, RpcErrors.INVALID_REQUEST, err)\n self._send_json(error)\n return\n\n if response is not None:\n self._send_json(response)\n else:\n self.send_response(200, 'OK')\n self.end_headers()\n\n def do_OPTIONS(self):\n \"\"\"\n Sends back the headers necessary to support CORS\n \"\"\"\n print('Processing CORS OPTIONS request')\n self.send_response(200, 'OK')\n for header, value in CORS_HEADERS.items():\n self.send_header(header, value)\n\n self.end_headers()\n\n return JSONRPCHandler", "def _register_services(self, pipeline):\n\n pipeline.register_service(self._aprs_service)", "def _dispatch_additional_instance_methods(self, instance):\n # self_methods = set([attr for attr in dir(self) if is_method(self, attr)])\n self_methods = [k[0] for k in inspect.getmembers(self, inspect.ismethod)\n if '_callback_pubsub' in k[1].__dict__]\n # instance_methods = set([attr for attr in dir(instance) if is_method(instance, attr)])\n instance_methods = [k[0] for k in inspect.getmembers(instance, inspect.ismethod)\n if '_callback_pubsub' in k[1].__dict__]\n methods_difference = list(set(instance_methods) - set(self_methods))\n map(export_rpc, methods_difference)\n self.protocol.register_object(\n \"http://localhost:{}/raiden#\".format(self.port), instance) # XXX check for name collisions" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Accumulate observed stars on the same dates.
def accumulate_dates(dates, stars): start = min(dates) stop = max(dates) t_range = (stop - start).days a_dates = [start + timedelta(days = n) for n in range(t_range + 1)] a_stars = [0 for n in range(t_range + 1)] for i in range(len(dates)): idx = (dates[i] - start).days a_stars[idx] = a_stars[idx] + stars[i] return a_dates, a_stars
[ "def _update_rating_history(self, rating: float, date: Union[str, float]):\n self.rating_history.append((date, rating))", "def average_continuous_readings_on_same_day(d):\n print(\"Prior to averaging together readings on the same day, %i rows\" % len(d))\n assert list(d.columns) == list(['user_id_hash', 'date', 'value'])\n grouped_d = d.groupby(['user_id_hash', 'date']).mean()\n grouped_d = grouped_d.reset_index()\n grouped_d.index = range(len(grouped_d))\n print(\"After averaging together readings on the same day, %i rows\" % len(grouped_d))\n return grouped_d", "def average_review_stars():\n # get all un-counted reviews\n reviews = Review.query.filter_by(marked=False).join(Restaurant)\\\n .with_entities(Review, Restaurant).all()\n logging.info(f\"Averaging review stars of {len(reviews)} retrieved reviews..\")\n for review, restaurant in reviews:\n # compute running mean of reviews\n restaurant.num_reviews += 1\n restaurant.avg_stars = 1/restaurant.num_reviews * \\\n (restaurant.avg_stars * (restaurant.num_reviews-1) + review.stars)\n review.marked = True\n # update rows \n db.session.commit()", "def _rate_multiple(self, unstacked):\n unstacked['season'] = unstacked['dt'].map(org_ncaa.get_season)\n seasons = RatingsModel._get_seasons(unstacked)\n dfs = []\n for season in seasons:\n print 'Rating for season: %s' % season\n u = unstacked[unstacked['season'] == season]\n u = self.rate(u)\n dfs.append(u)\n\n return pd.concat(dfs)", "def _add(self, index, star):\n\n # We cannot do time-series photometry with stars which have no\n # information at all. It makes no sense, so don't even allow it.\n if not len(star):\n raise ValueError(\"star cannot be empty\")\n\n if not len(self.star_ids):\n self._star_ids.append(star.id)\n self.pfilter = star.pfilter\n self._unix_times = star._unix_times\n\n # A cache, mapping each Unix time to its index in phot_info; passed\n # to the constructor of DBStar for O(1) lookups of Unix times\n self._times_indexes = {}\n for time_index, unix_time in enumerate(self._unix_times):\n self._times_indexes[unix_time] = time_index\n\n # Three-dimensional array: first dimension maps to the star; second\n # to the type of info (index 0 for mag, 1 for SNR) and third to the\n # Unix time (in case we need to know to which time a given index\n # correspond, we can use self._unix_times). For example, to get all\n # the magnitudes of the third star, we will do self.phot_info[2][0]\n self._phot_info[index] = star._phot_info[1:]\n\n else:\n if star.pfilter != self.pfilter:\n msg = \"star with ID = %d has filter '%s', expected '%s'\"\n raise ValueError(msg % (star.id, star.pfilter, self.pfilter))\n\n if star.id in self.star_ids:\n raise ValueError(\"star with ID = %d already in the set\" % star.id)\n\n for unix_time in self._unix_times:\n if unix_time not in star._time_indexes:\n raise ValueError(\"stars must have info for the same Unix times\")\n\n self._star_ids.append(star.id)\n self._phot_info[index] = star._phot_info[1:]", "def repo_rate(created_start, created_end, minstars):\n gql_gen = gql_generator(created_start, minstars=minstars)\n delta = (created_end - created_start).total_seconds()\n day = created_start\n dates_repos = {\"dates\": [], \"repos\": []}\n while delta >= 0:\n # Iterate generator. No pagination required.\n repo_count = repocount(next(gql_gen))\n params = {\n \"CreatedStart\": day,\n \"RepoCount\": repo_count,\n }\n logging.info(params)\n dates_repos[\"dates\"].append(day)\n dates_repos[\"repos\"].append(repo_count)\n day = day.shift(days=+1)\n gql_gen = gql_generator(day, minstars=minstars)\n delta = (created_end - day).total_seconds()\n return dates_repos", "def addDate(self, date, count):\n if date.year not in self.data:\n self.data[date.year] = [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]\n year = self.data[date.year]\n year[date.month - 1] += count\n\n today = datetime.datetime.now()\n if (today - date).days <= self.recent:\n self.days += count", "def calc_avg_star_rating(ratings):\n avg_star_rating = -1\n\n if ratings:\n sum_stars = 0\n count_star_ratings = 0\n\n for rating in ratings:\n if rating.stars:\n sum_stars += rating.stars\n count_star_ratings += 1\n\n avg_star_rating = float(sum_stars) / count_star_ratings\n\n return avg_star_rating", "def daily_motion(cls, date):\n mean_motion = 360 / cls.SIDEREAL_YEAR\n anomaly = cls.mean_position(date, cls.ANOMALISTIC_YEAR)\n epicycle = 14/360 - abs(cls.sine(anomaly)) / 1080\n entry = quotient(float(anomaly), angle(0, 225, 0))\n sine_table_step = cls.sine_table(entry + 1) - cls.sine_table(entry)\n factor = -3438/225 * sine_table_step * epicycle\n return mean_motion * (factor + 1)", "def advance(self):\n for star in self.stars:\n star.advance()", "def _update(self):\n aps = []\n recall, precs = self._recall_prec()\n for ll, rec, prec in zip(range(len(precs)), recall, precs):\n ap = self._average_precision(rec, prec)\n aps.append(ap)\n if self.num is not None and ll < (self.num - 1):\n self.sum_metric[ll] = ap\n self.num_inst[ll] = 1\n if self.num is None:\n self.num_inst = 1\n self.sum_metric = np.nanmean(aps)\n else:\n self.num_inst[-1] = 1\n self.sum_metric[-1] = np.nanmean(aps)", "def iterate_on_dateaxis(self):\r\n return self._iterate_on_dateaxis('', self.metrica.multiplier)", "def _accumulate_rewards(self) -> None:\n for agent, reward in self.rewards.items():\n self._cumulative_rewards[agent] += reward", "def visitCalculated(self, date):\n raise NotImplementedError()", "def accumulate(self, mat):\n\n assert (mat.positions == self.positions), \"Positions not align\"\n self.rmse += mat.rmse\n self.mae += mat.mae\n self.p_at_k = np.add(self.p_at_k, mat.p_at_k)\n self.r_at_k = np.add(self.r_at_k, mat.r_at_k)\n self.mrr_at_k = np.add(self.mrr_at_k, mat.mrr_at_k)\n self.ndcg_at_k = np.add(self.ndcg_at_k, mat.ndcg_at_k)\n self.time += mat.time", "def update_stars(self):\n self.stars.update(self.dt, self.cursor, self.stars)\n self.star_timer += self.dt\n\n for star in self.stars:\n # check for active stars\n if star.active:\n icon_rect = pygame.Rect(star.rect.x - 3, star.rect.y - 3,\n star.rect.width + 6, star.rect.width + 6)\n pygame.draw.rect(self.screen, star.active_clr, icon_rect, 2)\n\n if star not in self.active_stars:\n self.active_stars.append(star)\n if star.rect.center not in self.active_points:\n self.active_points.append(star.rect.center)\n\n # check for inactive stars\n elif not star.active:\n if star in self.active_stars:\n self.active_stars.remove(star)\n if star.rect.center in self.active_points:\n self.active_points.remove(star.rect.center)\n\n # check for dead stars\n if star.dead:\n if star in self.active_stars:\n self.active_stars.remove(star)\n if star.rect.center in self.active_points:\n self.active_points.remove(star.rect.center)\n self.stars.remove(star)\n\n # STAGE 1: small words arrive one at a time\n if (self.timer > self.ts[\"star_1_start\"] and\n self.timer < self.ts[\"star_1_end\"]):\n self.star_delay = 9\n if self.star_timer >= self.star_delay:\n self.star_timer = 0\n self.make_star(size=\"small\")\n \n # STAGE 2\n elif (self.timer > self.ts[\"star_2_start\"] and\n self.timer < self.ts[\"star_2_end\"]):\n self.star_delay = random.randint(2, 4)\n if self.star_timer >= self.star_delay:\n self.star_timer = 0\n self.make_star(size=\"medium\")\n \n # STAGE 3\n elif (self.timer > self.ts[\"star_3_start\"]):\n self.star_delay = random.randint(1, 3)\n if self.star_timer >= self.star_delay:\n self.star_timer = 0\n self.make_star(size=\"medium\")", "def add_star(array, star_data, disk_star_ratio=0.001):\n left_bound = np.shape(star_data)[1]//2 - np.shape(array)[1]//2\n right_bound = np.shape(star_data)[1]//2 + np.shape(array)[1]//2\n\n # Cutting star data into the shape of the model\n star_data = star_data[:, left_bound:right_bound, left_bound:right_bound]\n star_data /= np.amax(star_data)\n\n star_addition = array * (disk_star_ratio) + star_data * (1-disk_star_ratio)\n\n return star_addition", "def get_daily_average(self):\n from politico.models import Story\n obj_list = Story.all().filter('bylines =', self.key())\n obj_list = obj_list.filter(\"update_date >=\", ANALYSIS_STARTDATE)\n date_diff = (datetime.now() - ANALYSIS_STARTDATE).days\n return obj_list.count() / float(date_diff)", "def add_star_team(client_id, team_id, now=None):\n\tnow = _get_now(now)\n\n\ttry:\n\t\t# Get the indexed name of the team.\n\t\tteam_indexed_name = session.query(Team.indexed_name)\\\n\t\t\t\t.filter(Team.id == team_id)\\\n\t\t\t\t.one()\\\n\t\t\t\t.indexed_name\n\t\t# Add the client's star for the team.\n\t\tstarred_team = StarredTeam(user_id=client_id,\n\t\t\t\tteam_id=team_id,\n\t\t\t\tindexed_name=team_indexed_name,\n\t\t\t\tadded=now)\n\t\tsession.add(starred_team)\n\t\tsession.flush()\n\texcept sa_orm.exc.NoResultFound:\n\t\tsession.rollback()\n\t\traise common_db.DbException._chain()\n\texcept sa.exc.IntegrityError:\n\t\t# The flush failed because the client has already starred this team.\n\t\tsession.rollback()\n\t\traise common_db.DbException._chain()\n\n\t# Increment the count of stars for the team.\n\tsession.execute(Teams.update()\n\t\t\t.where(Team.id == team_id)\n\t\t\t.values({Team.num_stars: Team.num_stars + 1}))\n\n\t# If needed, add a CalendarEntry for each streamed match.\n\tmatches_cursor = session.query(MatchOpponent.match_id, Match)\\\n\t\t\t.join(Match, MatchOpponent.match_id == Match.id)\\\n\t\t\t.filter(MatchOpponent.team_id == team_id, MatchOpponent.is_streamed == True)\n\tfor match_id, match in matches_cursor:\n\t\t_increment_num_user_stars(client_id, match, now)\n\t\n\tsession.commit()" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Calculate estimated number of stars observed during VLASS observation Assume 4.2 sec per pointing as estimated by Paul.
def vlass_stars(duration, n_beams): n_pointings = duration//4.2 n_observed = n_pointings*n_beams return n_observed
[ "def update_nps(self):\n self.avg_nps = (self.pos_index/(self.pos_index+1))*self.avg_nps + (1/(self.pos_index+1))*wait_for(self.info_handler,\"nps\")", "def articulation_rate(self):\r\n objects = self.__get_objects()\r\n z1 = str(objects[1]).strip().split()\r\n return int(z1[3])", "def supernovae_rate(self, time, timestep, metallicity):\n # get the mass limits of the timesteps the user passed in. The\n # lower time corresponds to the higher stellar mass\n m_low = self.lifetimes.turnoff_mass(time + timestep, metallicity)\n m_high = self.lifetimes.turnoff_mass(time, metallicity)\n\n # check the bounds, since supernovae can only happen for certain\n # mass stars\n min_mass = self.sn_ii_model.sn.mass_boundary_low\n max_mass = self.sn_ii_model.sn.mass_boundary_high\n m_low = max(m_low, min_mass)\n m_high = min(m_high, max_mass)\n if m_low > max_mass or m_high < min_mass:\n return 0\n\n # Here we just integrate over the IMF to count the stars in this mass\n # range that die\n number = self._integrate_mass_smart(self.imf.normalized_dn_dm,\n m_low, m_high, source=\"massive\")\n return number / timestep", "def on_neptune(self) -> float:\n period = 164.79132\n return self._calculate_age(period)", "def ventilation_rate_per_second(self):\n return self.volume * self.outdoor_air_ventilation * 1000 / 3600", "def calcul_moyenne_stars(liste_projets_json):\n\n if not liste_projets_json:\n return 0\n\n nombre_stars = 0.0\n for projet in liste_projets_json:\n # if projet['stargazers_count']:\n nombre_stars += projet['stargazers_count']\n\n moyenne_stars = nombre_stars / len(liste_projets_json)\n\n return moyenne_stars", "def on_venus(self) -> float:\n period = 0.61519726\n return self._calculate_age(period)", "def overall_rate(self):\n if self.time_elapsed() == 0:\n return 1\n return float(self.history[-1][0] - self.start[0]) / self.time_elapsed()", "def getavgvel(self):\n if self.total_time:\n return (6.28)/(self.total_time)", "def ndpm(self):\n\n merged = pd.merge(left=self.test, right=self.predict, on=['user', 'item'], how='inner')[\n ['user', 'rating_x', 'rating_y']]\n ndpms = []\n for user in merged.user.unique():\n frame = merged[merged.user == user]\n if frame.shape[0] <= 1:\n continue\n C_plus = self.num_of_ordered_positive(frame, 'rating_x', 'rating_y')\n C_minus = self.num_of_ordered_negative(frame, 'rating_x', 'rating_y')\n C_u = self.num_of_ordered(frame, 'rating_x')\n if C_u == 0:\n continue\n C_s = self.num_of_ordered(frame, 'rating_y')\n C_u0 = C_u - (C_plus + C_minus)\n ndpms.append(1 - (C_minus + 0.5 * C_u0) / C_u)\n\n return sum(ndpms) / len(ndpms)", "def mse(predicted_ratings):\n diff = predicted_ratings['stars'] - predicted_ratings['predicted rating']\n return (diff**2).mean()", "def get_pvalue_thd(self):\n terminals_values = []\n for terminal in self.feature_tree.get_terminals():\n temp = self.get_mannwitneyu_pvalue(terminal)\n terminals_values.append(temp)\n if temp == 1:\n print('non siginificant')\n while 0 in terminals_values:\n terminals_values.remove(0)\n self.pvalue_thd = min(self.pvalue_thd,np.mean(terminals_values))\n #print('pvalue_thd',self.pvalue_thd)", "def success_rate(x_tapes):\n return np.sum([is_success(x_tape) for x_tape in x_tapes]) / len(x_tapes)", "def avg_path_length():\n # you must replace this value with the avg path length\n return 4.424", "def getActualNumObs(avgNumObs, proportion):\n result = round(avgNumObs*proportion)\n return result", "def get_usage_ratio() :\n return float(AthleteResults._athlete_results_counter # Ratio of how often AthleteResults subclass was called/used \n / AthleteResults._processing_counter)", "def lastProgessRate(self):\n\t\tp1 = self.tracker[-2]\n\t\tp2 = self.tracker[-1]\n\t\treturn (p1[1].cost - p2[1].cost) / (p2[0] - p1[0]) if len(self.tracker) > 1 else 0.0", "def ratio(N):\n\n return (N+math.sqrt(N*N+4))/2", "def _predicted_rate_avg(self):\n if len(self.history) < 2:\n return None\n work_done = self.history[-1][0]\n return float(self.history[-1][0] - self.start[0]) / \\\n (self.history[-1][1] - self.start[1])" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Queries a nearby weather underground station for temp data and rain data
def get_weather_data(weather_station): now = datetime.datetime.now() then = now - datetime.timedelta(days=7) query_date_start = ("%d%02d%02d" % (then.year, then.month, then.day)) query_date_end = ("%d%02d%02d" % (now.year, now.month, now.day)) api_key = '/api/%s' % WUNDERGROUND_KEY history_key = '/history_%s%s/lang:EN/units:english/bestfct:1/v:2.0' % (query_date_start, query_date_end) query = '/q/%s.json?showObs=0&ttl=120' % weather_station weather_url = ("%s%s%s%s" % (WUNDERGROUND_HOST, api_key, history_key, query)) logger.info('Weather URL: %s', weather_url) response = requests.get(weather_url).text max_temp_avg = json.loads(response)['history']['summary']['max_temperature_avg'] sum_precip = json.loads(response)['history']['summary']['precip_sum'] return max_temp_avg, sum_precip
[ "def get_underground_weather(lat, long, date, key):\n response = requests.get(\"http://api.wunderground.com/api/\" + key + \"/history_\" + str(date) + \"/q/\" + str(lat) + \",\" + str(long) +\".json\")\n response = response.json()\n\n rain = response['history']['dailysummary'][0]['rain'] # Binary indicator whether it rained or not (1 or 0).\n precipm = response['history']['dailysummary'][0]['precipm'] # Amount of precipitation (metric system).\n maxtempm = response['history']['dailysummary'][0]['maxtempm'] # Max. Temperature of day (°C).\n meantempm = response['history']['dailysummary'][0]['meantempm'] # Mean Temperature of day (°C).\n mintempm = response['history']['dailysummary'][0]['mintempm'] # Min. Temperature of day (°C).\n meanwindspdm = response['history']['dailysummary'][0]['meanwindspdm'] # Mean wind speed (metric system).\n\n return rain, precipm, maxtempm, meantempm, mintempm, meanwindspdm", "def get_weather(station_num):\n\n \n # MySQL query to get average hourly availability for a given station\n sql = \"\"\"replace this with SQL query for specified station number {};\"\"\".format(station_num) #TODO: Add query\n \n # Execute SQL query for weather\n# result = db.engine.execute(sql) # result is a RowProxy\n \n # Get data from queries and structure for JSON file (dictionary)\n data = {}\n \n # Add code to populate dictionary from result\n \n return data", "def _query_weather(self, start_time, end_time):\n # The weather data API needs microseconds from the epoch as a\n # string. Why this inconsistency? I don't know.\n payload = {'queryMeasurement': 'weather',\n 'queryFilter': {'startTime':\n utils.dt_to_s_from_epoch(start_time),\n 'endTime':\n utils.dt_to_s_from_epoch(end_time)},\n 'responseFormat': 'JSON'}\n\n topic = topics.TIMESERIES\n data = self.gad.get_response(topic=topic, message=payload,\n timeout=self.timeout)\n\n # Check to see if we actually have any data.\n if (data['data'] is None) or (len(data['data']) == 0):\n raise QueryReturnEmptyError(topic=topic, query=payload)\n\n return data", "def temperatures():\n\n return station_9281", "def YesterdaysWeatherCalculator(weather, snow):\n #Check to see if we already populated the data.\n \n raw_data = models.YesterdaysWeather.all()\n raw_data.filter('date_time_added >', age_threshold)\n data = raw_data.get()\n if not data:\n \n initial = weather[0]\n \n noaa_observation_location = initial.noaa_observation_location\n station_id = initial.station_id\n icon_url_base = initial.icon_url_base\n icon_url_name = initial.icon_url_name\n weather_for_db = initial.weather\n\n dewpoint_f_high = initial.dewpoint_f\n dewpoint_f_low = initial.dewpoint_f\n dewpoint_c_low = initial.dewpoint_c\n dewpoint_c_high = initial.dewpoint_c\n temp_c_high = initial.current_temp_c\n temp_c_low = initial.current_temp_c\n temp_f_high = initial.current_temp_f\n temp_f_low = initial.current_temp_f\n wind_mph_high = initial.wind_mph\n wind_mph_low = initial.wind_mph\n wind_gust_mph_high = initial.wind_gust_mph\n wind_gust_mph_low = initial.wind_gust_mph\n dewpoint_f_high = initial.dewpoint_f\n dewpoint_f_low = initial.dewpoint_f\n dewpoint_c_high = initial.dewpoint_c\n dewpoint_c_low = initial.dewpoint_c\n pressure_mb_high = initial.pressure_mb\n pressure_mb_low = initial.pressure_mb\n pressure_in_high = initial.pressure_in\n pressure_in_low = initial.pressure_in\n visibility_mi_high = initial.visibility_mi\n visibility_mi_low = initial.visibility_mi\n relative_humidity_high = initial.relative_humidity\n relative_humidity_low = initial.relative_humidity\n\n for datapoint in weather:\n if datapoint.dewpoint_f > dewpoint_f_high:\n dewpoint_f_high = datapoint.dewpoint_f\n if datapoint.dewpoint_f < dewpoint_f_low:\n dewpoint_f_low = datapoint.dewpoint_f\n if datapoint.dewpoint_c > dewpoint_c_high:\n dewpoint_c_high = datapoint.dewpoint_c\n if datapoint.dewpoint_c < dewpoint_c_low:\n dewpoint_c_low = datapoint.dewpoint_c\n if datapoint.current_temp_c > temp_c_high:\n temp_c_high = datapoint.current_temp_c\n if datapoint.current_temp_c < temp_c_low:\n temp_c_low = datapoint.current_temp_c\n if datapoint.current_temp_f > temp_f_high:\n temp_f_high = datapoint.current_temp_f\n if datapoint.current_temp_f < temp_f_low:\n temp_f_low = datapoint.current_temp_f\n if datapoint.wind_mph > wind_mph_high:\n wind_mph_high = datapoint.wind_mph\n if datapoint.wind_mph < wind_mph_low:\n wind_mph_low = datapoint.wind_mph\n if datapoint.wind_gust_mph > wind_gust_mph_high:\n wind_gust_mph_high = datapoint.wind_mph\n if datapoint.wind_gust_mph < wind_gust_mph_low:\n wind_gust_mph_low = datapoint.wind_mph\n if datapoint.pressure_mb > pressure_mb_high:\n pressure_mb_high = datapoint.pressure_mb\n if datapoint.pressure_mb < pressure_mb_low:\n pressure_mb_low = datapoint.pressure_mb\n if datapoint.pressure_in > pressure_in_high:\n pressure_in_high = datapoint.pressure_in\n if datapoint.pressure_in < pressure_in_low:\n pressure_in_low = datapoint.pressure_in\n if datapoint.visibility_mi > visibility_mi_high:\n visibility_mi_high = datapoint.visibility_mi\n if datapoint.visibility_mi < visibility_mi_low:\n visibility_mi_low = datapoint.visibility_mi\n if datapoint.relative_humidity > relative_humidity_high:\n relative_humidity_high = datapoint.relative_humidity\n if datapoint.relative_humidity < relative_humidity_low:\n relative_humidity_low = datapoint.relative_humidity\n\n \n\n new = models.YesterdaysWeather()\n\n new.noaa_observation_location = noaa_observation_location\n new.station_id = station_id\n new.icon_url_base = icon_url_base\n new.icon_url_name = icon_url_name\n new.temp_c_high = temp_c_high\n new.temp_c_low = temp_c_low\n new.temp_f_high = temp_f_high\n new.temp_f_low = temp_f_low\n new.wind_mph_high = wind_mph_high\n new.wind_mph_low = wind_mph_low\n new.wind_gust_mph_high = wind_gust_mph_high\n new.wind_gust_mph_low = wind_gust_mph_low\n new.dewpoint_f_high = dewpoint_f_high\n new.dewpoint_f_low = dewpoint_f_low\n new.dewpoint_c_high = dewpoint_c_high\n new.dewpoint_c_low = dewpoint_c_low\n new.pressure_mb_high = pressure_mb_high\n new.pressure_mb_low = pressure_mb_low\n new.pressure_in_high = pressure_in_high\n new.pressure_in_low = pressure_in_low\n new.weather = weather_for_db\n new.visibility_mi_high = visibility_mi_high\n new.visibility_mi_low = visibility_mi_low\n new.relative_humidity_high = relative_humidity_high\n new.relative_humidity_low = relative_humidity_low\n new.new_snow_8200ft_24_hours = snow\n\n new.put()\n logging.info('Adding data to datastore.')\n\n else:\n logging.info('Data already exists.')", "def weather_fetch(city_name):\n api_key = 'b8e63e97d870d37437a1bf6a70f3de3f'\n current=requests.get(\"http://api.openweathermap.org/data/2.5/weather?appid={}&q={}\".format(api_key,city_name))\n\n temperature=current.json()['main']['humidity']\n humidity=current.json()['main']['temp']\n\n\n\n\n\n temperature = round((temperature - 273.15), 2)\n return temperature, humidity", "def query_digital_temp_sensors(self):\n raw_temps = self.query('TEMP?')\n temp1 = float(raw_temps[4:8])/16.\n temp2 = float(raw_temps[9:])/16.\n return temp1, temp2", "def read_weather_data():\n # Check if UTC to gmt+1 conversion is being handled correctly\n weather = pd.read_csv('//datc//opschaler//weather_data//knmi_10_min_raw_data//output//df_combined_uncleaned.csv',\n delimiter='\\t', comment='#',\n parse_dates=['datetime'])\n weather = weather.set_index(['datetime'])\n return weather", "def getRainfallData(station, day, month, year):\r\n url = \"http://www.wunderground.com/weatherstation/WXDailyHistory.asp?ID={station}&day={day}&month={month}&year={year}&graphspan=day&format=1\"\r\n full_url = url.format(station=station, day=day, month=month, year=year)\r\n # Request data from wunderground data\r\n response = requests.get(full_url, headers={'User-agent': 'Mozilla/5.0 (Windows NT 6.1) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/41.0.2228.0 Safari/537.36'})\r\n data = response.text\r\n # remove the excess <br> from the text data\r\n data = data.replace('<br>', '')\r\n # Convert to pandas dataframe (fails if issues with weather station)\r\n try:\r\n dataframe = pd.read_csv(io.StringIO(data), index_col=False)\r\n dataframe['station'] = station\r\n except Exception as e:\r\n print(\"Issue with date: {}-{}-{} for station {}\".format(day,month,year, station))\r\n return None\r\n return dataframe", "def update_rain_temp(self, day_of_week, departure_time_seconds):\n\n current_time = t.time()\n today = datetime.today().weekday()\n\n if (departure_time_seconds < (current_time + 3600) \\\n and day_of_week == today):\n\n self.temp = self.current_temperature\n self.rain = self.current_rainfall\n\n elif (day_of_week == today):\n for i in range(24):\n if (departure_time_seconds > self.weather_forecast_json \\\n [\"hourly\"][\"data\"][i][\"time\"] and departure_time_seconds \\\n < self.weather_forecast_json[\"hourly\"][\"data\"][i + 1][\"time\"]):\n\n self.temp = self.weather_forecast_json \\\n ['hourly']['data'][i]['temperature']\n\n self.rain = self.weather_forecast_json['hourly'] \\\n ['data'][i]['precipIntensity']\n break\n else:\n continue\n else:\n day_difference = int((departure_time_seconds - current_time) / 86400)\n\n self.temp = (self.weather_forecast_json['daily']['data'] \\\n [day_difference]['temperatureMax'] + \\\n self.weather_forecast_json['daily']['data'] \\\n [day_difference]['temperatureMin']) / 2\n\n self.rain = self.weather_forecast_json['daily'] \\\n ['data'][day_difference]['precipIntensity']", "def testWeatherFetch(self):\n\n timeCol = 'timestamp'\n rows = []\n for row in self.aggregator.rawData(dataType = 'weather',\n orderBy = [timeCol],\n timestampCol = timeCol,\n startDate = self.testStart,\n endDate = self.testEnd):\n rows.append(row)\n self.assertIsNotNone(rows, 'Rows are present.')", "def query_API(key, latitude, longitude, time):\n\n if isinstance(time, str):\n\n # Get data for UTC day of time\n query = 'https://api.darksky.net/forecast/' + key + '/' + latitude + ',' + longitude + ',' + time\n queryresult = pd.read_json(query)\n all_data = queryresult['hourly']['data']\n for m, hour_data in enumerate(all_data):\n if m == 0:\n df = pd.DataFrame(hour_data, index=[m])\n else:\n df = pd.concat([df, pd.DataFrame(hour_data, index=[m])])\n return df\n\n elif isinstance(time, list):\n\n # Build data for first day\n query = 'https://api.darksky.net/forecast/' + key + '/' + latitude + ',' + longitude + ',' + time[0]\n queryresult = pd.read_json(query)\n all_data = queryresult['hourly']['data']\n for m, hour_data in enumerate(all_data):\n if m == 0:\n df = pd.DataFrame(hour_data, index=[m])\n else:\n df = pd.concat([df, pd.DataFrame(hour_data, index=[m])], sort=True)\n\n # Prepare time variables for all days\n st = dt.datetime.strptime(time[0], '%Y-%m-%dT%H:%M:%SZ')\n et = dt.datetime.strptime(time[1], '%Y-%m-%dT%H:%M:%SZ')\n doys = int(math.ceil((et - st).total_seconds() / 86400))\n\n # Build data for all days\n for doy in range(doys):\n time = (st + dt.timedelta(days=doy)).isoformat()\n query = 'https://api.darksky.net/forecast/' + key + '/' + latitude + ',' + longitude + ',' + time\n queryresult = pd.read_json(query)\n all_data = queryresult['hourly']['data']\n for n, hour_data in enumerate(all_data):\n m += n + 1\n df = pd.concat([df, pd.DataFrame(hour_data, index=[m])], sort=True)\n return df", "def get_weather(days, hours, db):\n days = format_list_for_db(days)\n hours = format_list_for_db(hours)\n sql = f\"SELECT * FROM weather WHERE day in {days} AND HOUR in {hours}\"\n cursor = db.cursor()\n cursor.execute(sql)\n data = cursor.fetchall()\n cursor.close()\n\n weathers = []\n if len(data) > 0:\n for weather in data:\n weather = {\"hour\": weather[1],\n \"day\": day_absolute_to_relative(weather[2]),\n \"temperature\": weather[3],\n \"apparenttemperature\": weather[4],\n \"precipitationintensity\": weather[5],\n \"precipitationprobability\": weather[6],\n \"humidity\": weather[7],\n \"dewpoint\": weather[8],\n \"windspeed\": weather[9],\n \"windbearing\": weather[10],\n \"windgust\": weather[11],\n \"pressure\": weather[12],\n \"cloudcover\": weather[13],\n \"uvindex\": weather[14],\n \"visibility\": weather[15]}\n weathers.append(weather)\n return weathers", "def GetWeather(query, api_key):\n try:\n owm = pyowm.OWM(api_key)\n observation = owm.weather_at_place(str(query))\n location = observation.get_location()\n weather = observation.get_weather()\n temp = weather.get_temperature('fahrenheit')\n status = CleanupWeatherStatus(weather.get_detailed_status())\n return 'It is %sF degrees with %s in %s right now.' % (int(temp['temp']),\n status,\n location.get_name())\n except:\n return 'I couldn\\'t find any weather for %s. I am sorry.' % (query)", "def get_weather_data():\n keys = ['1364038.csv',\n '1364041.csv',\n '1364042.csv',\n '1364043.csv',\n '1364044.csv',\n '1364046.csv',\n '1364047.csv',\n '1364048.csv',\n '1364051.csv',\n '1364052.csv',\n '1364053.csv',\n '1364054.csv',\n '1364055.csv',\n '1364058.csv',\n '1364059.csv',\n '1364060.csv',\n '1364061.csv',\n '1364062.csv',\n '1364063.csv',\n '1364064.csv',\n '1364066.csv']\n df_weather = import_weather(keys)\n df_weather_dist = df_weather[[\n 'LATITUDE', 'LONGITUDE', 'name']].drop_duplicates().reset_index()\n return df_weather, df_weather_dist", "def GetWeather(self,timeStr=\"06\"):\n start = time.time()\n if type(timeStr)==type(int()) or type(timeStr)==type(float()):\n timeStr = \"%02d\"%(timeStr)\n\n if timeStr not in [\"06\",\"12\",\"24\"]:\n assert(0),\"Time must be 6, 12, or 24\"\n retVal1=dict()\n retVal2=dict()\n\n # Read data and prep for indexing\n rawData = urllib2.urlopen(\"http://aviationweather.gov/products/nws/all?fint=%s&lvl=lo\"%(timeStr))\n\n listData = list()\n for idx,val in enumerate(rawData):\n if val==\"000\\n\":\n dataStart = idx\n listData.append(val)\n idx = dataStart\n \n\n # Insert the time range into the Dict\n while(1):\n if listData[idx][0]==\"<\":\n break\n match = re.search(\"FOR USE (\\d{4})-(\\d{4})Z\",listData[idx])\n if match:\n retVal2[\"FOR USE\"]=[int(match.group(1)),int(match.group(2))]\n break\n idx+=1 \n\n\n # Insert the altitude headers into the Dict\n while(1):\n if listData[idx][0]==\"<\":\n break\n match = re.search(\"FT\\ +(\\d+)\\ +(\\d+)\\ +(\\d+)\\ +(\\d+)\\ +(\\d+)\\ +(\\d+)\\ +(\\d+)\\ +(\\d+)\\ +(\\d+)\",listData[idx])\n if match:\n # TODO: We will parse this in a different function\n # retVal[\"ALTS\"]=[\n # int(match.group(1)),\n # int(match.group(2)),\n # int(match.group(3)),\n # int(match.group(4)),\n # int(match.group(5)),\n # int(match.group(6)),\n # int(match.group(7)),\n # int(match.group(8)),\n # int(match.group(9))\n # ]\n idx+=1\n break\n idx+=1\n\n\n # Parse airports, and place them into the Dict\n aptData=list()\n while(1):\n if listData[idx][0]==\"<\":\n break\n # Beak line into the numbers\n match = re.split(\"(\\d{4,6}(?:[-+]\\d{2})?)\",listData[idx])\n if match:\n # Trim out the blank and new line matches\n for i,val in enumerate(match):\n if val[0]==' ' or val[0]=='\\n':\n del match[i]\n # Insert 'None' objects to replace empty locations\n while(len(match)<10):\n match.insert(1,None)\n # Trim white space\n match[0]=match[0].strip()\n aptData.append(match)\n idx+=1\n\n for indexApt,apt in enumerate(aptData):\n # Parse each airport found\n for indexW,weather in enumerate(apt):\n # Skip empties\n if(weather==None):\n continue\n # Skip the name\n if(len(weather)==3):\n continue\n # Parse the data and store as a three part list\n # TODO: This could be another function\n match = re.match(\"(\\d{2})(\\d{2})([-+])?(\\d{2})?\",weather)\n if match:\n tmpWData = [0,0,0]\n # print\n # print match.group(0) #All\n # print match.group(1) # Heading\n # print match.group(2) # Speed\n # print match.group(3) # -/+\n # print match.group(4) # Temp\n\n if match.group(1) == \"99\":\n tmpWData[0] = 0\n tmpWData[1] = 0\n elif int(match.group(1)) > 36:\n tmpWData[0] = (int(match.group(1)) - 50)*10\n tmpWData[1] = int(match.group(2)) + 100\n else:\n tmpWData[0] = (int(match.group(1)))*10\n tmpWData[1] = int(match.group(2))\n\n if match.group(3) == None and match.group(4) == None:\n pass\n elif match.group(3) == None or match.group(3) == \"-\":\n tmpWData[2] = -int(match.group(4))\n elif match.group(3) == \"+\":\n tmpWData[2] = int(match.group(4))\n aptData[indexApt][indexW] = tmpWData\n retVal1[apt[0]] = apt[1:]\n # End for indexW,i in enumerate(apt):\n # End for indexApt,apt in enumerate(aptData):\n return retVal1,retVal2", "def combine_weather(weather):\n\n weather1 = weather[weather[\"Station\"] == 1]\n weather2 = weather[weather[\"Station\"] == 2]\n\n\n pass", "def get_weather_data():\n fname = 'weather-2016.html'\n if not os.path.exists(fname):\n data = ('messw_beg=01.01.2016&messw_end=31.12.2016&'\n 'felder[]=Temp2m&felder[]=TempWasser&felder[]=Windchill&'\n 'felder[]=LuftdruckQFE&felder[]=Regen&felder[]=Taupunkt&'\n 'felder[]=Strahlung&felder[]=Feuchte&felder[]=Pegel&'\n 'auswahl=2&combilog=mythenquai&suchen=Werte anzeigen')\n data = data.encode('ascii')\n\n req = urllib.request.Request(\n 'https://www.tecson-data.ch/zurich/mythenquai/uebersicht/messwerte.php',\n method='POST',\n data=data,\n headers={\"Content-Type\": \"application/x-www-form-urlencoded\",\n 'User-Agent': 'http://github.com/wildtreetech/explore-open-data'\n },\n )\n\n with urllib.request.urlopen(req) as web:\n with open(fname, 'w') as local:\n local.write(web.read().decode('iso-8859-1'))\n\n df = pd.read_html(fname, attrs={'border': '1'}, skiprows=1)\n # take the first data frame from the list of data frames\n df = df[0]\n # this refers to the first column of the data frame now\n df[0] = pd.to_datetime(df[0], dayfirst=True)\n df.columns = ['Date', 'Temp', 'WaterTemp', 'Windchill', 'Pressure', 'Rain',\n 'Dewpoint', 'Radiation', 'Humidity', 'Waterlevel']\n df = df.set_index('Date')\n\n return df", "def get_data_from_IOT():\n\n # Doc\n # API Key used to complete the request for openweathermap.com\n api_key = \"9b67791e84d728b1070be77395a687ad\"\n \n # base_url variable to store url\n base_url = \"http://api.openweathermap.org/data/2.5/weather?\"\n \n # The city name\n city_name = \"Riccione\"\n \n # Complete Url to send a request for openweathermap.com\n complete_url = base_url + \"appid=\" + api_key + \"&q=\" + city_name\n \n # get method of requests module\n # return response json object\n response = requests.get(complete_url)\n \n # json method of response object \n # convert json format data into\n # python format data\n x = response.json()\n \n # x variable contain the city information\n # if the city return 404 the resource was not found\n # city is not found\n if x[\"cod\"] != \"404\":\n \n # store the value of \"main\"\n # key in variable y\n y = x[\"main\"]\n \n # store the value corresponding\n # to the \"temp\" key of y\n current_temperature = math.trunc(round(y[\"temp\"] - 273,15)) # Convert temperature in celsius from kelvin\n \n # store the value corresponding\n # to the \"humidity\" key of y\n current_humidity = y[\"humidity\"]\n\n else:\n\n print(\"City Not Found \")\n\n return current_temperature, current_humidity" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Return all zip streams and their positions in file.
def zipstreams(filename): with open(filename, 'rb') as fh: data = fh.read() i = 0 while i < len(data): try: zo = zlib.decompressobj() yield i, zo.decompress(data[i:]) i += len(data[i:]) - len(zo.unused_data) except zlib.error: i += 1
[ "def zip_streams(\n fileobject=\"/data/marble/nemweb/ARCHIVE/DispatchIS_Reports/PUBLIC_DISPATCHIS_20140208.zip\",\n):\n with ZipFileStreamer(fileobject) as zf:\n for filename in zf.namelist():\n yield filename, zf.extract_stream(filename)", "def get_zips(self):\n return self.zips", "def _GetStreamNames(self):\n if self._zipfile:\n for stream_name in self._zipfile.namelist():\n yield stream_name", "def get_zipped(path, progress=None):\n #breakpoint()\n with ZipFile(path) as ziphandle:\n zipped_mxf = list(file for file in ziphandle.namelist()\n if is_mediafile(file))\n if progress:\n progress.set_length(len(zipped_mxf))\n for entry in zipped_mxf:\n try:\n mxf = ziphandle.open(entry)\n except BadZipFile as e:\n msg = f'Could not open {entry} in {path}'\n warnings.warn(msg, e)\n continue\n with tempfile.NamedTemporaryFile(suffix='.mxf') as file:\n file.write(mxf.read())\n yield file.name, os.path.join(path, entry)\n mxf.close()", "def list_zip(src):\n with ZipFile(src, 'r') as zf:\n zf.printdir()", "def get_files(self):\n return self.zip.namelist()", "def get_cum_docs_per_zip(zips):\n docs_per_zip = {}\n count = 0\n for zipf in zips:\n with zipfile.ZipFile(zipf) as zf:\n count += len(zf.namelist())\n docs_per_zip[zipf] = count\n return docs_per_zip", "def zip_generator(files):\n\tcmd = [\"zip\", \"--quiet\", \"-r\", \"-\"] + files\n\tp = subprocess.Popen(cmd, stdout=subprocess.PIPE)\n\tfor data in data_generator(p.stdout):\n\t\tyield data\n\t# Afterwards, check for errors:\n\tp.poll()\n\tif p.returncode != 0: \n\t\traise Exception(\"Return code from 'zip' was {}.\".format(p.returncode));", "def _extract_zip(self, zipfile):\n zf = ZipFile(zipfile)\n d = {}\n for n in zf.namelist():\n d[n] = zf.read(n)\n return d", "def subzip(self, *members):\n dmembers = {member:self.get_fp(member) for member in members}\n return utils.zip_files(dmembers)", "def get_zipped_sentences(datazip):\n for filename in os.listdir(smart_open.smart_open(datazip)):\n with open(filename, 'r') as input_stream:\n for line in input_stream:\n yield line.strip().split()", "def get_doc_infos(self, zipfile):\n docinfos = []\n for zipinfo in zipfile.infolist():\n num_data_fields_labeled = 0\n num_data_fields_total = 0\n found_docs = filter(lambda d: d.name == zipinfo.filename, self.docs)\n if len(found_docs) > 0:\n num_data_fields_total = found_docs[0].num_blocks\n docinfos.append({\n 'name': zipinfo.filename,\n 'size': zipinfo.file_size,\n 'num_data_fields_labeled': num_data_fields_labeled,\n 'num_data_fields_total': num_data_fields_total\n })\n return docinfos", "def get_zip(self):\n self.zip.rewind()\n return self.zip.in_memory_zip", "def zip(self):\n global pointer\n global error_flag\n global totalFiles\n while pointer < len(self.files) and ((self.t and not error_flag) or not self.t):\n # Se o modo e' t e a error_flag nao for false entao pode avancar\n # Se o modo nao for t pode avancar sem restricoes\n self.sem.acquire()\n iterator = pointer\n pointer += 1\n self.sem.release()\n if iterator < len(self.files): # Iterator e' o ficheiro que deve ser utilizado pela thread\n File = self.files[iterator]\n if os.path.isfile(File): # Ver se o ficheiro existe\n with ZipFile(File + '.zip', 'w') as zipfile:\n zipfile.write(File) # Zip\n self.totalFilesSem.acquire()\n totalFiles += 1\n self.totalFilesSem.release()\n else:\n print \"O ficheiro\", File, \"não existe.\" # Se nao existir, avisa o utilizador\n error_flag = True # Atualiza a sua propria flag", "def follow(zipfile, starting_nothing):\n filename = get_filename(starting_nothing)\n all_filenames = [filename]\n while True:\n contents = zip_file.read(filename).decode('utf-8')\n next_nothing = find_next_nothing(contents)\n if next_nothing is None:\n # Complete - return\n return all_filenames, contents\n else:\n # Go around for another loop\n filename = get_filename(next_nothing)\n all_filenames.append(filename)", "def unzip(self):\r\n\r\n zip_files_list = self.getfiles(\".zip\")\r\n\r\n for items in zip_files_list:\r\n zip_ref = zipfile.ZipFile(items, 'r')\r\n zip_ref.extractall(os.getcwd())\r\n zip_ref.close()\r\n\r\n return self", "def extract_zip_contents(zipfilepath):\n\n zipobj = zipfile.ZipFile(zipfilepath)\n output_folder = zipfilepath.replace(\".zip\", \"\")\n zipobj.extractall(output_folder)\n\n file_list = []\n for root, _, files in os.walk(output_folder):\n for currfile in files:\n file_list.append(os.path.join(root, currfile))\n\n return file_list", "def _data_files(self, zippath):\n duczip = '/keyphrase_data/DUC-2001.zip'\n z = extract_nested_zipfile(zippath.rstrip('\\/') + duczip)\n\n for filename in z.namelist():\n if filename.endswith('.txt'):\n with z.open(filename) as f:\n yield f", "def list_zip_files(path):\n\n file = ZipFile(path)\n all_files = file.infolist()\n\n return [x.filename for x in all_files]" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Returns an enumeration member with a value matching `value`.
def get_member( cls, value: str, ): if not value: return None members = [ (member, member.value) for member in cls.__members__.values() ] for member, member_value in members: if member_value == value: return member return None
[ "def enum_by_value(cls, enum_value):\n d = cls.enum_dict_by_value()\n return d[enum_value]", "def get_enum_by_value(enum_class, value):\n try:\n return enum_class(value)\n except ValueError:\n return None", "def get_value(name, value):\n try:\n return get_enum(name)[value]\n except KeyError:\n valid_values = \", \".join(v.name for v in get_enum(name))\n raise ValueError(\n f\"Value '{value}' is not valid for a {name}-enumeration. Valid values are {valid_values}.\"\n ) from None", "def Enum(enum, value, default=None):\n if value is None:\n return default\n\n for pair in enum:\n if pair.value == value:\n return pair\n\n raise KeyError(\"Value '{}' not contained in enum type\".format(value))", "def lookupByValue(cls, value):\n for constant in cls.iterconstants():\n if constant.value == value:\n return constant\n raise ValueError(value)", "def test_get_enum_by_value():\n assert BusinessType.get_enum_by_value('CP') == BusinessType.COOPERATIVE\n assert BusinessType.get_enum_by_value('FM') == BusinessType.PARTNERSHIP_AND_SOLE_PROP\n assert BusinessType.get_enum_by_value('NOT_FOUND') is None", "def __getitem__(self, key):\n try:\n if utils.is_str(key):\n key = utils.force_name_case(key)\n return next(enum for enum in self if enum.name == key)\n else:\n return self._enums[key]\n except (StopIteration, TypeError, KeyError, IndexError):\n raise KeyError(\"There is no enum with the name/index '%s' in the '%s' bitfield!\" % (key, self.name))", "def from_value(cls, value):\n value = value if value else 0\n try:\n flags = [flag.name for flag in cls.enum_class if flag.value & value]\n except TypeError:\n flags = [flag.name for flag in cls.enum_class if flag.name == value]\n\n return cls(*flags)", "def get_by_value(cls, value):\n for constant in cls.iterconstants():\n if constant.value == value:\n return constant\n\n raise CandvValueNotFoundError(\n \"constant with value \\\"{0}\\\" is not present in \\\"{1}\\\"\"\n .format(value, cls)\n )", "def get_name(self, value: Any) -> str:\n try:\n return self._reverse_values[value].name\n except KeyError:\n raise UnknownEnumValue(\n \"Invalid value %r for enum %s\" % (value, self.name)\n )", "def member_status(value):\n for status in models.MEMBER_STATUS:\n if status[0]==value:\n return status[1]\n\n return \"MEMBER STATUS NOT FOUND\"", "def get_enum_value_row(enum_field, enum_value):\n # Translate plural, if given\n enum_field = ENUM_PLURALS_TRANSLATE[enum_field] if enum_field in ENUM_PLURALS_TRANSLATE else enum_field\n return apps.get_model('ahj_app', enum_field).objects.get(Value=enum_value)", "def cast_value_to_enum(attribute: Any, widget_value: str):\n enum_class: MyEnum = attribute.__class__\n return (t for i, t in enumerate(enum_class)\n if t.value == widget_value).__next__()", "def parse_enum(value, enum_name, enum_list):\n \n for enum in enum_list:\n if enum.name == enum_name:\n if value not in enum.values:\n return enum.name + \"[\" + str(value) + \"]\"\n else:\n return enum.values[value]", "def get_by(cls, name, value):\n return cls.query(getattr(cls, name) == value).get()", "def from_value(value):\n result = TokenKind._value_map.get(value, None)\n\n if result is None:\n raise ValueError(f\"Unknown TokenKind: {value:d}\")\n\n return result", "def from_value(value):\r\n result = TokenKind._value_map.get(value, None)\r\n\r\n if result is None:\r\n raise ValueError('Unknown TokenKind: %d' % value)\r\n\r\n return result", "def check_enum(enumerator, value):\n is_valid = False\n for data in enumerator:\n if data == value:\n is_valid = True\n break\n\n if is_valid:\n return value\n else:\n my_banner(\"Value must be from enum \" + enumerator +\" Value has been set to N/A\")\n return \"na\"", "def enum_value(cls: Any, e: Any) -> Any:\n if is_enum(e):\n v = e.value\n # Recursively get value of Nested enum.\n if is_enum(v):\n return enum_value(v.__class__, v)\n else:\n return v\n else:\n return cls(e).value" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Decorator that can be used to return the first item of a callable's `list` return.
def return_first_item(func): # Define the wrapper function. def wrapper(self, *args, **kwargs): # Execute the decorated method with the provided arguments. result = func(self, *args, **kwargs) # If the function returned a result and that result is a list then # return the first item on that list. if result and isinstance(result, list): result = result[0] return result return wrapper
[ "def first(func, *args, **kwargs):\n item = next(iter(func(*args, **kwargs)), DEFAULT_SENTINEL)\n if item is DEFAULT_SENTINEL:\n raise IterableEmpty('Iterable did not yield any items')\n\n return item", "def filter_first(func: Callable, data: Iterable[Any]) -> Optional[Any]:\n return (list(filter(func, data)) or [None])[0]", "def first(func, iterable):\n\n for elem in iterable:\n if func(elem):\n return elem", "def _select_first_in_list(lst):\n return lst[0] if isinstance(lst, list) else lst", "def first(items):\n return next(iter(items or []), None)", "def first(items):\r\n return items[0]", "def first(iterable: Iterable):\n return next(iterable, None)", "def first(iterable):\n return iterable.next()", "def get_first(l, default=None):\n if l:\n for item in l:\n return item\n return default", "def first(iterable):\n return next(iter(iterable))", "def first(l):\n return next(iter(l), None)", "def first(items: Iterator) -> Any:\n return next(items, None)", "def _get_first(details: CallableDetails) -> CallableArg:\n return details.args[0]", "def first(items: t.Iterable[T]) -> t.Optional[T]:\n just_first = tuple(take(1, items))\n return None if len(just_first) == 0 else just_first[0]", "def first(iterable, default=None):\n\n try:\n\n return iterable[0]\n\n except IndexError:\n\n return default\n\n except TypeError:\n\n return next(iterable, default)", "def first(iterable: t.Iterable[T]) -> T:\n return next(iter(iterable))", "def first(self, func: Callable[[T], bool], default=None, raise_exception: bool=True) -> Optional[T]:\n if raise_exception:\n return next(iter(filter(func, self.array)))\n return next(iter(filter(func, self.array)), default)", "def head(xs: Iterable[T]) -> T:\n return next(iter(xs))", "def first(items, default=NotImplemented):\n for item in items:\n return item\n if default is NotImplemented:\n raise TooFewItemsError(\"No items found in sequence.\")\n else:\n return default" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Decorator that ensures all ``list`` objects in a method's arguments have the same length
def lists_equal_length(func): # Define the wrapper function. def wrapper(self, *args, **kwargs): # Collect all `list` objects from `args`. lists_args = [arg for arg in args if isinstance(arg, list)] # Collecgt all `list` object from `kwargs`. lists_kwargs = [arg for arg in kwargs.values() if isinstance(arg, list)] # Concatenate the lists of `list` objects. lists = lists_args + lists_kwargs # Check whether all the `list` objects have the same length. do_have_same_length = len(set(map(len, lists))) == 1 # Raise an `InvalidArgumentsError` exception if there's a length # mismatch. if not do_have_same_length: msg_fmt = "The argument lists must have the same length." raise InvalidArgumentsError(msg_fmt) # Simply execute the decorated method with the provided arguments # and return the result. return func(self, *args, **kwargs) return wrapper
[ "def list_check(default_return=nan):\n\n def decorate(func):\n @functools.wraps(func)\n def wrapper(*args, **kwargs):\n for i in args:\n if not i and isinstance(i, list) and len(i) == 0:\n return default_return\n return func(*args, **kwargs)\n\n return wrapper\n\n return decorate", "def arrayargs(func):\n @functools.wraps(func)\n def wrapper(*args, **kwargs):\n \"\"\"Verifica que todos los argumentos son arrays.\"\"\"\n return func(*[array(a) for a in args], **kwargs)\n return wrapper", "def validate_equal_length(*args):\n length = len(args[0])\n if any(len(lst) != length for lst in args):\n raise exceptions.PlotlyError(\"Oops! Your data lists or ndarrays \"\n \"should be the same length.\")", "def __assert_param_consistency(args, argx_list_):\n if util_arg.NO_ASSERTS:\n return\n if len(argx_list_) == 0:\n return True\n argx_flags = [util_iter.isiterable(args[argx]) for argx in argx_list_]\n try:\n assert all([argx_flags[0] == flag for flag in argx_flags]), (\n 'invalid mixing of iterable and scalar inputs')\n except AssertionError as ex:\n print('!!! ASSERTION ERROR IN UTIL_DECOR !!!')\n for argx in argx_list_:\n print('[util_decor] args[%d] = %r' % (argx, args[argx]))\n raise ex", "def _validate_list_of_items(items):\n assert isinstance(items, list) # Asserts that items object is list\n return len(items) # Defines length of item array", "def test_listlist_op_1():\n\n @ops.listlist_op\n def f(x):\n return [4, 5, 6]\n\n result = f([1, 2, 3]) # Passing in a list, as expected\n\n assert(isinstance(result, list)), f\"{result}\"\n assert(result == [4, 5, 6])", "def check_list_length(list1, list2, **kwargs):\n name1 = kwargs.get('name1')\n name2 = kwargs.get('name2')\n if len(list1) != len(list2):\n msg = f'{name1} list is not equal to {name2} list!'\n raise GraphQLError(msg)", "def squareList(lst):\n pass", "def _valueorlistmethod(method):\n\n def wrappedmethod(self, valueorlist, *args, **kwargs):\n try:\n for item in valueorlist:\n break\n except:\n return method(self, [valueorlist], *args, **kwargs)[0]\n return method(self, valueorlist, *args, **kwargs)\n return wrappedmethod", "def _listify_args(args):\n ret_args = []\n for elem in args:\n if elem != None:\n ret_args.append(elem)\n return ret_args", "def listify(arg):\n if not isinstance(arg, list):\n arg = [arg, ]\n return arg", "def assert_equal_size(self, *lists):\n\t\tlsize = len(lists[0])\n\t\tfor l in lists:\n\t\t\tif len(l) != lsize:\n\t\t\t\traise InvalidConfig(\"List wiht unequal length found in config.\")", "def doing_nothing(A: list):\n pass", "def validate_args( method_name, args, kw ):\n import syndicate.ms.api as api\n method = api.get_method( method_name )\n \n arg_len = len(method.argspec.args)\n def_len = 0\n \n if method.argspec.defaults is not None:\n def_len = len(method.argspec.defaults)\n \n if len(args) != arg_len - def_len:\n raise Exception(\"Method '%s' expects %s arguments; got %s (%s)\" % (method_name, arg_len - def_len, len(args), args))\n \n return True", "def listify(arg):\n if isinstance(arg, list):\n return arg\n else:\n return [arg]", "def list_check(max_length):\n def validate(input_data):\n assert type(input_data) == list\n if len(input_data) <= max_length:\n return input_data\n raise ValidationError(\"failed to satisfy max length of {}\".format(max_length))\n return validate", "def len_list(self) -> int:\n return 1", "def get_arg_list_size(arg: ArgSingle) -> int:\n\t\tif arg.data_type.is_scalar:\n\t\t\treturn -1\n\t\telse:\n\t\t\treturn len(arg.value)", "def test_args_count_equal(args: list, target: int) -> bool:\n\n\treturn (args_count(args) == target)" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Clear the screen and draw the alien.
def draw(): screen.fill((0, 0, 0)) alien.draw()
[ "def clear(self):\n self.virt_screen.fill((0, 0, 0))", "def clear(self):\n\n self.screen.fill((255, 255, 255))", "def redraw(self):\n self.main_loop.draw_screen()", "def clear_screen(self):\n self.__screen.fill(self.get_config([\"board_background_color\"]), (0, self.get_config([\"toolbar_y\"]) + 1,\n self.get_config([\"width\"]),\n self.get_config([\"length\"]) - self.get_config(\n [\"toolbar_y\"]) + 1))", "def clear_screen(screen):\n screen.fill([0, 0, 0])", "def draw(self):\r\n self.__screen.draw_asteroid(self, self.__x[0], self.__y[0])", "def draw(self):\r\n self.__screen.draw_asteroid(self, self.__x, self.__y)", "def _drawall( self ):\n self.screen.fill((135,206,250))\n self.sky.draw(self.screen)\n self.player.draw(self.screen)\n for enemy in self.enemies.enemyList:\n enemy.draw( self.screen)\n self.bulletMan.drawall( self.screen)\n pygame.display.flip()", "def dead_draw_background(self):\n self.draw()\n self.dim_screen = pg.Surface(self.screen.get_size()).convert_alpha()\n self.dim_screen.fill((0, 0, 0, 180))\n self.screen.blit(self.dim_screen, (0, 0))", "def clear(self):\n self._write_command(self.LCD_CLEARDISPLAY)\n self._write_command(self.LCD_RETURNHOME)\n self._x = self._y = 0", "def draw_screen(self):\n\t\tself.current_screen.draw_screen(self.master_screen)", "def _blank_screen(self):\n self._screen.fill(self._bgcolor)\n pygame.display.update()", "def redraw(self):\n self.undraw()\n self.draw()", "def draw_screen(self, screen_manager):\n\t\tscreen_manager.draw_screen()", "def draw(screen):\n MY.restart_button.draw(screen)\n MY.display_text.draw(screen)", "def draw_elements(self):\n self.screen.keypad(True)\n\n self.screen.erase()\n self.screen_height, self.screen_width = self.screen.getmaxyx()\n\n self.title_bar = self.screen.subwin(3, self.screen_width, 0, 0)\n\n self.main_output = self.screen.subwin(self.screen_height - 7, self.screen_width, 3, 0)\n self.main_input = self.screen.subwin(4, self.screen_width, self.screen_height - 4, 0)\n\n if self.title:\n self.set_title(self.title)", "def clearScreen():\r\n\t#import OS library functions\r\n\timport os\r\n\t\r\n\t#now call OS function to clear the screen\r\n\tos.system('cls')", "def clear_buffer(self):\n pyray.begin_drawing()\n pyray.clear_background(pyray.BLACK)\n if self._debug == True:\n self._draw_grid()", "def _draw(self):\n display.draw_maze(self._screen, self._maze, self._settings)\n pygame.display.flip()" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Move the alien around using the keyboard.
def update(): if keyboard.left: alien.x -= 2 elif keyboard.right: alien.x += 2 if keyboard.space: alien.y = GROUND - 50 animate(alien, y=GROUND, tween='bounce_end', duration=.5) # If the alien is off the screen, # move it back on screen if alien.right > WIDTH: alien.right = WIDTH elif alien.left < 0: alien.left = 0
[ "def joystick_move(self, emphasis=1):\n step = int(20*emphasis)\n self.display.ship.move_vertical(step=step)", "def joy_callback(self, msg):\n mappings = gamepad_mappings.set_gamepad_mappings(msg)\n self.move_vertical = mappings[\"button_vertical\"] # up: +1.0, down: -1.0\n self.move_horizontal = mappings[\"button_horizontal\"] # left: +1.0, right: -1.0", "def make_ai_move(self):\n col = self.ai.find_legal_move(self, self.make_ai_func())\n self.gameGui.simulate_press_by_ai(col)", "def keyboard(key, x, y):\n global actions\n global mesg1\n \n if key == chr(27) or key == \"q\":\n sys.exit()\n if key in [\"F\", \"f\", \"B\", \"b\", \"R\", \"r\", \"U\", \"u\", \"L\", \"l\", \"D\", \"d\" ]:\n actions = actions + [key]\n if key == \"a\":\n actions = actions + kb.rand_move(20)\n mesg1 = \"randomly moving....\"\n if key == \"s\":\n state = kb.move_list_to_state(actions)\n seq = kb.solve(state)\n actions = actions + seq\n mesg1 = \"I found the solution!...\"", "def East(self):\n self.xpos += 1\n self.runcommands()", "def North(self):\n self.ypos -= 1\n self.runcommands()", "def move_and_sew():\r\n pass", "def MoveCurrentSpace(self):\n if self.facing == 0:\n self.y -= 1\n elif self.facing == 1:\n self.x += 1\n elif self.facing == 2:\n self.y += 1\n elif self.facing == 3:\n self.x -= 1", "def move_east(self):\r\n self.move(dx=1, dy=0)", "def move (self, amt_x=0, amt_y=0):\n self.x += amt_x\n self.y += amt_y", "def move_north(self):\r\n self.move(dx=0, dy=-1)", "def _movePaddle(self):\n self._click()\n self._game.updatePaddle(self._touch)\n self._last = self._touch", "def move(self):\n if not self.in_board():\n raise InvalidMove('Not initialised')\n if self.facing == NORTH:\n if self.pos_y == MAX_Y:\n raise InvalidMove()\n self.pos_y += 1\n elif self.facing == SOUTH:\n if self.pos_y == 0:\n raise InvalidMove()\n self.pos_y -= 1\n elif self.facing == EAST:\n if self.pos_x == MAX_X:\n raise InvalidMove()\n self.pos_x += 1\n else:\n if self.pos_x == 0:\n raise InvalidMove()\n self.pos_x -= 1", "def move_foward(self):\n\n\t\tif self.direction == \"N\":\n\t\t\tself.y += 1\n\n\t\telif self.direction == \"E\":\n\t\t\tself.x += 1\n\n\t\telif self.direction == 'S':\n\t\t\tself.y -= 1\n\n\t\telse:\n\t\t\tself.x -= 1", "def check_keys(self):\n if self.holding_left:\n self.paddle.move_down()\n\n if self.holding_right:\n self.paddle.move_up()", "def PenMoveCenter():\n pass", "def move_player(self, pressed_keys):\n # Arrow-key movement\n if pressed_keys[K_UP]:\n self.player.rect.move_ip(0, -2)\n self.player.movement_check = True\n self.player.up_check = True\n self.player.down_check = False\n if pressed_keys[K_DOWN]:\n self.player.rect.move_ip(0, 2)\n self.player.movement_check = True\n self.player.up_check = False\n self.player.down_check = True\n if pressed_keys[K_LEFT]:\n self.player.rect.move_ip(-2, 0)\n self.player.movement_check = True\n self.player.direction_check = False\n self.player.up_check = False\n self.player.down_check = False\n if pressed_keys[K_RIGHT]:\n self.player.rect.move_ip(2, 0)\n self.player.movement_check = True\n self.player.direction_check = True\n self.player.up_check = False\n self.player.down_check = False\n # WASD movement\n if pressed_keys[K_w]:\n self.player.rect.move_ip(0, -2)\n self.player.movement_check = True\n self.player.up_check = True\n self.player.down_check = False\n if pressed_keys[K_s]:\n self.player.rect.move_ip(0, 2)\n self.player.movement_check = True\n self.player.up_check = False\n self.player.down_check = True\n if pressed_keys[K_a]:\n self.player.rect.move_ip(-2, 0)\n self.player.movement_check = True\n self.player.direction_check = False\n self.player.up_check = False\n self.player.down_check = False\n if pressed_keys[K_d]:\n self.player.rect.move_ip(2, 0)\n self.player.movement_check = True\n self.player.direction_check = True\n self.player.up_check = False\n self.player.down_check = False\n #Boundary\n if self.player.rect.left < 0:\n self.player.rect.left = 0\n if self.player.rect.right > self.board.screen_width:\n self.player.rect.right = self.board.screen_width\n if self.player.rect.top <= 0:\n self.player.rect.top = 0\n if self.player.rect.bottom >= self.board.screen_height:\n self.player.rect.bottom = self.board.screen_height", "def updatePaddle(self, selfinput):\n assert isinstance(selfinput,GInput)\n position = 0\n \n if selfinput.is_key_down('right'):\n position = 5\n if selfinput.is_key_down('left'):\n position = -5\n \n self._paddle.move(position)", "def move_tie_fighter(self):\n self.position_x += self.speed_x\n self.position_y += self.speed_y" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Test the Categorical feature class.
def test_categorical_feature(): feature = Categorical("abc") for element in "abc": feature.set(element) feature.set("ignore this") feature.push() for element in "abc": getattr(feature, "set_" + element)() feature.push() array = feature.array() assert array.shape == (6, 3) for i, row in enumerate(array): assert sum(row) == 1.0 and row[i % 3] == 1.0
[ "def test_get_categorical_features_helper(self):\n (\n series,\n past_covariates,\n future_covariates,\n ) = self.inputs_for_tests_categorical_covariates\n (\n indices,\n column_names,\n ) = self.lgbm_w_categorical_covariates._get_categorical_features(\n series=series,\n past_covariates=past_covariates,\n future_covariates=future_covariates,\n )\n assert indices == [2, 3, 5]\n assert column_names == [\n \"past_cov_past_cov_cat_dummy_lag-1\",\n \"fut_cov_fut_cov_promo_mechanism_lag1\",\n \"product_id\",\n ]", "def test_categorical():\n # assert the distribution of the samples is close to the distribution of the data\n # using cstest:\n # - uniform (assert p-value > 0.05)\n # - very skewed / biased? (assert p-value > 0.05)\n # - inversely correlated (assert correlation < 0)", "def test_get_cat_score(self):\n classes = ['blue skin', 'pointy ears']\n negated_classes = []\n categories = ['ear feature', 'skin feature']\n\n categorical_score = self.annot_scorer._get_categorical_score(\n classes, negated_classes, categories,\n self.negation_weight, self.mock_ic_values\n )\n\n assert categorical_score == 0.7002519289078384", "def check_categorical(categories=['a', 'b', 'c', 'd']):\n check_low, check_high = check_dimension(categories)\n hyper_cat = HyperCategorical(categories)\n hyper_low, hyper_high = hyper_cat.get_hyperspace()\n\n assert_equal(check_low, hyper_low)\n assert_equal(check_high, hyper_high)", "def is_categorical(self):\n return(self.attribute_type == 'categorical')", "def test_classifier_alias(self):\n instance = LogisticRegression()\n self.assertEqual(is_classifier(instance), isclassifier(instance))", "def test_category(self):\n\n # Test empty categories\n self.assertFalse(self.colorspace.hasCategory('ocio'))\n self.assertEqual(len(self.colorspace.getCategories()), 0)\n with self.assertRaises(IndexError):\n self.colorspace.getCategories()[0]\n\n # Test with defined TEST_CATEGORIES.\n for i, y in enumerate(TEST_CATEGORIES):\n self.assertEqual(len(self.colorspace.getCategories()), i)\n self.colorspace.addCategory(y)\n self.assertTrue(self.colorspace.hasCategory(y))\n\n # Test the output list is equal to TEST_CATEGORIES.\n self.assertListEqual(\n list(self.colorspace.getCategories()), TEST_CATEGORIES)\n\n # Test the length of list is equal to the length of TEST_CATEGORIES.\n self.assertEqual(len(self.colorspace.getCategories()),\n len(TEST_CATEGORIES))\n\n iterator = self.colorspace.getCategories()\n for a in TEST_CATEGORIES:\n self.assertEqual(a, next(iterator))\n\n # Test the length of categories is zero after clearCategories()\n self.colorspace.clearCategories()\n self.assertEqual(len(self.colorspace.getCategories()), 0)\n\n # Testing individually adding and removing a category.\n self.colorspace.addCategory(TEST_CATEGORIES[0])\n self.assertEqual(len(self.colorspace.getCategories()), 1)\n self.colorspace.removeCategory(TEST_CATEGORIES[0])\n self.assertEqual(len(self.colorspace.getCategories()), 0)", "def test(name, data, classifier):\n classification = classifier.classify(data)\n print('Item ' + name + ' is a ' + classification)", "def is_categorical(x: Any) -> bool:\n return is_categorical_(x)", "def test_categorical_predicting(self):\n W = np.asarray([[4, -1, 2, 3], [0, 1, -2, 3], [-3, 0, 1, -2]],\n dtype=np.float32)\n b = np.asarray([1, -2, 3, 0], dtype=np.float32)\n X = np.asarray([[1, 2, -1], [-0.5, 1, 1], [0.5, 0, -1], [-2, 1, 0]],\n dtype=np.float32)\n y = np.asarray([3, 2, 0, 1], dtype=np.int32)\n lr = LogisticRegression(multi_class='multinomial', solver='lbfgs')\n self._do_test_predicting(4, lr, W, b, X, y)", "def test_unseen_categorical_feature():\n headers = [\"predictor 1\", \"predictor 2\", \"predictor3\", \"response\"]\n response = \"response\"\n random_features = ['H', 5.5, 'I']\n train_set = [[random_features[i%3],'B',1,'C'] for i in range(1,50)] + [['A','A',-1,'D'] for i in range(1,50)]\n train_set_copy = copy.copy(train_set)\n new_features = ['J',9]\n test_set = [[new_features[i%2],'B',1,'C'] for i in range(1,49)] + [['A','A',-1,'D'] for i in range(1,50)]\n all_data = train_set + test_set\n\n factory = ModelFactory(all_data, headers, response, name_prefix=\"test\")\n model = factory.build(train_set)\n print \"factory builds ModelVisitor? -- \", isinstance(model, ModelVisitor)\n\n predictions = model.test(test_set)\n print \"handling unseen categorical values correctly? -- \", all([pred[0] == pred[1] for pred in predictions])", "def data_categorical(df, cat_features = [], cont_features = []):\n subset_cat = []\n subset_dict={}\n # Add all the object type features to config.cat_features \n for col in df.columns:\n if df[col].dtype == 'object' and col not in cont_features:\n subset_cat.append(col)\n if col not in cat_features :\n cat_features.append(col)\n if cat_features !=[]:\n print('Categorical features : ', ' '.join(cat_features))\n printmd('**Number of unique values for every feature:**')\n print(pd.DataFrame(df[cat_features].nunique(), columns = ['Unique values']).sort_values(by = 'Unique values', ascending=False))\n printmd(\"**5 uniques samples of every Categorical Features :**\")\n for col in cat_features :\n subset_dict[col]= df[col].unique()[:5]\n print(pd.DataFrame.from_dict(subset_dict, orient='index').transpose())\n return (cat_features)", "def test_compare_categories_categorical_variables(self):\r\n for method in self.cat_methods:\r\n compare_categories(self.dm1_fp, self.map1_fp, method,\r\n self.cat_categories, self.num_perms, self.test_dir)\r\n results_fp = join(self.test_dir, '%s_results.txt' % method)\r\n self.files_to_remove.append(results_fp)\r\n results_f = open(results_fp, 'U')\r\n results = results_f.readlines()\r\n results_f.close()\r\n\r\n # Make sure the files aren't empty.\r\n self.assertTrue(len(results) > 0)", "def test_lgbm_categorical_features_passed_to_fit_correctly(self, lgb_fit_patch):\n (\n series,\n past_covariates,\n future_covariates,\n ) = self.inputs_for_tests_categorical_covariates\n self.lgbm_w_categorical_covariates.fit(\n series=series,\n past_covariates=past_covariates,\n future_covariates=future_covariates,\n )\n\n # Check that mocked super.fit() method was called with correct categorical_feature argument\n args, kwargs = lgb_fit_patch.call_args\n (\n cat_param_name,\n cat_param_default,\n ) = self.lgbm_w_categorical_covariates._categorical_fit_param\n assert kwargs[cat_param_name] == [2, 3, 5]", "def test_classifier_class(self):\n\n # Test classifiers are identified correctly\n classifiers = (\n RandomForestClassifier,\n LogisticRegression,\n )\n\n for klass in classifiers:\n self.assertTrue(inspect.isclass(klass))\n self.assertTrue(is_classifier(klass))\n\n # Test that non-regressors are identified correctly\n notclassifiers = (\n KMeans,\n PCA,\n LSHForest,\n RidgeCV,\n LassoCV,\n LinearRegression,\n )\n\n for klass in notclassifiers:\n self.assertTrue(inspect.isclass(klass))\n self.assertFalse(is_classifier(klass))", "def _check_category(self):\n if self.category_num < 0:\n raise ValueError(\"category_num should be more than 0\")\n\n for var in self.cat_features:\n value_counts = len(np.unique(self.X[:, var]))\n if value_counts > self.category_num:\n self.more_value_features.append(var)\n else:\n self.less_value_features.append(var)", "def add_cat_feature(self, operators):\n\n self.op_default_check(operators, \"categorical\")\n if operators:\n self.add_feature(operators)", "def test_category(self):\n # XXX identifiers would be groovy\n self.check_search(\n dict(category=u'36:self'), # trap\n [u'Ingrain'],\n 'simple category search, vs self',\n exact=True,\n )\n self.check_search(\n dict(category=u'14:target'), # protect\n [u'Conversion 2', u'False Swipe'],\n 'simple category search, vs target',\n exact=True,\n )\n\n # Multiple categories\n # sleep OR attack up\n self.check_search(\n dict(category=[u'29:self', u'15:target'], category_operator=u'any'),\n [u'Rest', u'Swagger'],\n 'multiple category search (OR)',\n exact=True,\n )\n\n # sleep AND heal self\n self.check_search(\n dict(category=[u'29:self', u'13:self'], category_operator=u'all'),\n [u'Rest'],\n 'multiple category search (AND)',\n exact=True,\n )", "def test_classify_cuisine(self):\n pass" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Test the Hashed feature class.
def test_hashed_feature(): def mock(c): return ord(c) - ord('a') group = Group({"a": Hashed(buckets=3, hash=mock), "b": Hashed(buckets=5, hash=mock), }) for i in range(10): group.set_a("abcde" [i % 3]) group.set_b("abcde" [i % 5]) group.push() array = group.array() assert array.shape == (10, 8) for i, row in enumerate(array): for column, value in zip(array.columns, row): feature, index = column.split("_") if feature == "a": assert value == float((i % 3) == int(index)) else: assert value == float((i % 5) == int(index))
[ "def test_all_features_with_data(self):\n feature1 = Feature('looktest1')\n feature1.set_percentage(5)\n\n feature2 = Feature('looktest2')\n feature2.activate()\n feature2.add_to_whitelist(3)\n\n feature3 = Feature('looktest3')\n feature3.activate()\n feature3.add_to_blacklist(4)\n feature3.add_to_blacklist(5)\n\n feature4 = Feature('looktest4')\n feature4.activate()\n feature4.add_to_whitelist(3)\n feature4.add_to_whitelist(5)\n feature4.add_to_blacklist(4)\n\n all_features = Feature.all_features(include_data=True)\n self.assertEqual(len(all_features), 4)\n\n for key in ['looktest1', 'looktest2', 'looktest3', 'looktest4']:\n self.assertTrue(key in all_features)\n if not key == 'looktest1':\n self.assertEqual(all_features[key]['percentage'], 100)\n\n self.assertEqual(all_features['looktest1']['percentage'], 5)\n self.assertFalse('whitelist' in all_features['looktest1'])\n self.assertFalse('blacklist' in all_features['looktest1'])\n\n self.assertTrue('whitelist' in all_features['looktest2'])\n self.assertEqual(all_features['looktest2']['whitelist'], [3])\n self.assertFalse('blacklist' in all_features['looktest2'])\n\n self.assertFalse('whitelist' in all_features['looktest3'])\n self.assertTrue('blacklist' in all_features['looktest3'])\n self.assertEqual(all_features['looktest3']['blacklist'], [4, 5])\n\n self.assertTrue('whitelist' in all_features['looktest4'])\n self.assertEqual(all_features['looktest4']['whitelist'], [3, 5])\n self.assertTrue('blacklist' in all_features['looktest4'])\n self.assertEqual(all_features['looktest4']['blacklist'], [4])", "def testHasherSetupSingle(self):\r\n hasher_ = Hasher('md5')\r\n for algname in hashpy.algorithms:\r\n if algname == 'md5':\r\n self.assertTrue(algname in hasher_.algorithms)\r\n else:\r\n self.assertFalse(algname in hasher_.algorithms)", "def test_creating_simple_feature():\n # given & when\n feature = Feature(1, \"Feature\", \"I am a feature\", \"foo.feature\", 1, tags=None)\n\n # then\n assert feature.id == 1\n assert feature.keyword == \"Feature\"\n assert feature.sentence == \"I am a feature\"\n assert feature.path == \"foo.feature\"\n assert feature.line == 1\n assert feature.tags == []", "def test_single_feature_label():\n pass", "def test_hash(self):\r\n self.assertEqual(processor_hash('test'), 'GqNJWF7X7L07nEhqMAZ+OVyks1Y=')\r\n self.assertEqual(processor_hash('edx '), '/KowheysqM2PFYuxVKg0P8Flfk4=')", "def test_hashalg_sha1(self):\n self.check_hash_algorithm(util.my_name(), 'sha1')", "def test_feature_a5(unleash_client):\n # Set up API\n responses.add(responses.POST, URL + REGISTER_URL, json={}, status=202)\n responses.add(responses.GET, URL + FEATURES_URL, json=json.loads(MOCK_JSON), status=200)\n responses.add(responses.POST, URL + METRICS_URL, json={}, status=202)\n\n # Tests\n unleash_client.initialize_client()\n assert unleash_client.is_enabled(\"Feature.A5\")", "def test_using_hash(self):\n\n self.assertTrue(using_hash(self.empty_string))\n self.assertTrue(using_hash(self.unique_string))\n self.assertFalse(using_hash(self.common_string))", "def test_hash(self):\n self.assertEqual(hash(self.compound), hash((\"t1\", \"test compound\")))", "def test_features(boston):\n assert boston.num_features == 13\n assert boston.feature_names == [\n \"CRIM\",\n \"ZN\",\n \"INDUS\",\n \"CHAS\",\n \"NOX\",\n \"RM\",\n \"AGE\",\n \"DIS\",\n \"RAD\",\n \"TAX\",\n \"PTRATIO\",\n \"B\",\n \"LSTAT\",\n ]", "def test_serialization(self):\n for hashtype in [HashTypes.SHA1, HashTypes.SHA2, HashTypes.SHA3, ]:\n self.do_test_serialization(hashtype)", "def test_is_active_of_homework_positive():\n assert oop_hw.is_active()", "def test_feature_b5(unleash_client):\n # Set up API\n responses.add(responses.POST, URL + REGISTER_URL, json={}, status=202)\n responses.add(responses.GET, URL + FEATURES_URL, json=json.loads(MOCK_JSON), status=200)\n responses.add(responses.POST, URL + METRICS_URL, json={}, status=202)\n\n # Tests\n unleash_client.initialize_client()\n assert not unleash_client.is_enabled(\"Feature.B5\")", "def test_feature_c5(unleash_client):\n # Set up API\n responses.add(responses.POST, URL + REGISTER_URL, json={}, status=202)\n responses.add(responses.GET, URL + FEATURES_URL, json=json.loads(MOCK_JSON), status=200)\n responses.add(responses.POST, URL + METRICS_URL, json={}, status=202)\n\n # Tests\n unleash_client.initialize_client()\n assert unleash_client.is_enabled(\"Feature.C5\")", "def test_features():\n with patch(\"salt.utils.fsutils._verify_run\", MagicMock(return_value=True)):\n mock = MagicMock(return_value={\"retcode\": 1, \"stderr\": \"\", \"stdout\": \"Salt\"})\n with patch.dict(btrfs.__salt__, {\"cmd.run_all\": mock}):\n assert btrfs.features() == {}", "def test_add_feature(self):\n fc1 = self.read_feature()\n fc2 = self.read_feature('Aegean_Sea')\n\n # add a feature already in the feature collection\n fc1.add_feature(fc1.features[0])\n assert len(fc1.features) == 1\n\n # add a new feature to the feature collection\n fc1.add_feature(fc2.features[0])\n assert len(fc1.features) == 2\n\n self.check_feature(fc1.features[0])\n self.check_feature(fc1.features[1], expected_name='Aegean Sea')", "def test_features(iris):\n assert iris.num_features == 4\n assert iris.feature_names == [\n \"sepal length (cm)\",\n \"sepal width (cm)\",\n \"petal length (cm)\",\n \"petal width (cm)\",\n ]", "def test_hash_blake2b(self):\n pass", "def test_call():\n algorithm_component = _algorithm_component()\n algorithm_obj = algorithm_component()\n assert isinstance(algorithm_obj, MockEstimator)\n assert hasattr(algorithm_obj, \"hyperparameter1\")\n assert hasattr(algorithm_obj, \"hyperparameter2\")" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Test if array concatenation works.
def test_array_concat(): array = Array(columns="abc") for i in range(10): array.append([1, 2, 3]) # Any 2-dimensional array witht the same number of rows should work. other = [[4, 5, 6]] * len(array) array.concat(other) assert array.shape == (10, 6) assert len(array.columns) == 6 assert all(type(column) is str for column in array.columns) for row in array: assert tuple(row) == (1, 2, 3, 4, 5, 6) # Now this should fail since the columns have the same names. other = Array(columns="abc") for i in range(10): other.append([7, 8, 9]) assert_raises(ValueError, array.concat, other) # Adding a prefix should make it work. array.concat(other, prefix="other") assert array.shape == (10, 9) assert len(array.columns) == 9 for row in array: assert tuple(row) == (1, 2, 3, 4, 5, 6, 7, 8, 9)
[ "def main():\n assert concat([1, 2, 3], [4, 5], [6, 7]) == [1, 2, 3, 4, 5, 6, 7]\n assert concat([1], [2], [3], [4], [5], [6], [7]) == [1, 2, 3, 4, 5, 6, 7]\n assert concat([1, 2], [3, 4]) == [1, 2, 3, 4]\n assert concat([4, 4, 4, 4, 4]) == [4, 4, 4, 4, 4]\n assert concat(['a'], ['b', 'c']) == ['a', 'b', 'c']\n print('Passed.')", "def test_concat_mixed_types(key, arrays, expected, join):\n axis = 0 if key == \"obsm\" else 1\n\n to_concat = []\n cell_id, gene_id = 0, 0\n for a in arrays:\n shape = np.array([3, 3]) # default shape (in case of missing array)\n if a is not None:\n length = dim_len(a, 0)\n shape[axis] = length\n\n tmp_adata = gen_adata(\n tuple(shape), varm_types=(), obsm_types=(), layers_types=()\n )\n prev_cell_id, prev_gene_id = cell_id, gene_id\n cell_id, gene_id = cell_id + shape[0], gene_id + shape[1]\n tmp_adata.obs_names = pd.RangeIndex(prev_cell_id, cell_id).astype(str)\n tmp_adata.var_names = pd.RangeIndex(prev_gene_id, gene_id).astype(str)\n if a is not None:\n if isinstance(a, pd.DataFrame):\n a.set_index(\n tmp_adata.obs_names if key == \"obsm\" else tmp_adata.var_names,\n inplace=True,\n )\n getattr(tmp_adata, key)[\"test\"] = a\n\n to_concat.append(tmp_adata)\n\n if isinstance(expected, type) and issubclass(expected, Exception):\n with pytest.raises(expected):\n anndata.concat(to_concat, axis=axis, join=join)\n else:\n print(to_concat)\n result_adata = anndata.concat(to_concat, axis=axis, join=join)\n result = getattr(result_adata, key).get(\"test\", None)\n assert_equal(expected, result, exact=True)", "def concat(self):\n return True", "def concat_arr( *arrays ):\n # URL , Test if any in an iterable belongs to a certain class : https://stackoverflow.com/a/16705879\n if any( isinstance( arr , np.ndarray ) for arr in arrays ): # If any of the 'arrays' are Numpy , work for all cases , \n if len( arrays ) == 2: # Base case 1 , simple concat # but always returns np.ndarray\n return np.concatenate( ( arrays[0] , arrays[1] ) )\n elif len( arrays ) > 2: # If there are more than 2 , concat the first two and recur\n return concat_arr( \n np.concatenate( ( arrays[0] , arrays[1] ) ) , \n *arrays[2:] \n )\n else: # Base case 2 , there is only one arg , return it\n return arrays[0]\n if len( arrays ) > 1: # else no 'arrays' are Numpy \n rtnArr = arrays[0]\n for arr in arrays[1:]: # If there are more than one , just use addition operator in a line\n rtnArr += arr\n return rtnArr\n else: # else there was only one , return it\n return arrays[0]", "def test_pad_and_concatenate_with_1d(self):\n array1 = 1.0\n array2 = 2.0\n result = numpy_pad_and_concatenate(array1, array2)\n self.assertTrue(np.array_equal(np.array([1.0, 2.0]), result))\n\n tensor1 = torch.tensor(1.0)\n tensor2 = torch.tensor(2.0)\n result = torch_pad_and_concatenate(tensor1, tensor2)\n self.assertTrue(torch.equal(result, torch.Tensor([1.0, 2.0])))", "def add_mismatched_arrays(array1, array2, truncate=False):\n # Cast these arrays to the largest common type\n array1 = np.array(array1, dtype=np.promote_types(array1.dtype, array2.dtype))\n array2 = np.array(array2, dtype=np.promote_types(array1.dtype, array2.dtype))\n\n # TODO: find a more elegant way to do this whole function\n\n if truncate:\n if len(array1) < len(array2):\n result = array1.copy()\n result += array2[:len(array1)]\n else:\n result = array2.copy()\n result += array1[:len(array2)]\n else:\n if len(array1) < len(array2):\n result = array2.copy()\n result[:len(array1)] += array1\n else:\n result = array1.copy()\n result[:len(array2)] += array2\n\n return result", "def test_concatenate_quaternions():\n # Until ea9adc5, this combination of a list and a numpy array raised\n # a ValueError:\n q1 = [1, 0, 0, 0]\n q2 = np.array([0, 0, 0, 1])\n q12 = pr.concatenate_quaternions(q1, q2)\n assert_array_almost_equal(q12, np.array([0, 0, 0, 1]))\n\n random_state = np.random.RandomState(0)\n for _ in range(5):\n q1 = pr.quaternion_from_axis_angle(pr.random_axis_angle(random_state))\n q2 = pr.quaternion_from_axis_angle(pr.random_axis_angle(random_state))\n\n R1 = pr.matrix_from_quaternion(q1)\n R2 = pr.matrix_from_quaternion(q2)\n\n q12 = pr.concatenate_quaternions(q1, q2)\n R12 = np.dot(R1, R2)\n q12R = pr.quaternion_from_matrix(R12)\n\n pr.assert_quaternion_equal(q12, q12R)", "def test_concat_use2() -> None:\n list_1: list[int] = [-1, -2, -3]\n list2: list[int] = [-4, -5, -6]\n assert concat(list_1, list2) == [-1, -2, -3, -4, -5, -6]", "def __array_append(self, in_a,in_b):\n in_b = np.array([in_b]) if isinstance(in_b,(int,float,long,complex)) else in_b\n return np.concatenate((in_a,in_b))", "def test_atom_collection_concatenate():\n atoms0 = [\n Atom(position=np.array([0.0, 0.0, 0.0])),\n Atom(position=np.array([1.0, 1.0, 1.0]))\n ]\n atoms1 = [\n Atom(position=np.array([0.0, 0.0, 0.0])),\n Atom(position=np.array([2.0, 2.0, 2.0]))\n ]\n collection0 = AtomCollection(atoms0)\n collection1 = AtomCollection(atoms1)\n with pytest.raises(ValueError):\n collection0.concatenate(collection1)\n offset = np.array([2.0, 2.0, 2.0])\n collection0.concatenate(collection1, offset)\n assert collection0.n_atoms == 4\n assert np.array_equal(collection1.atoms[0].position, np.array([0, 0, 0]))", "def add_arrays(arr1, arr2):\n add = []\n if len(arr1) == len(arr2):\n for i in range(len(arr1)):\n add.append(arr1[i] + arr2[i])\n return add\n else:\n return None", "def testConcatSourceMultipleButOneConcatable(self):\n env = self.env\n\n # Even if multiple input files, if only one is concat-able, won't concat.\n cs = env.ConcatSource('foo3.cc', ['a.cc', 'd.o'])\n self.assertEqual(map(str, cs), ['d.o', 'a.cc'])", "def _concatenate_virtual_arrays(arrs, cols=None, scaling=None):\n return None if not len(arrs) else ConcatenatedArrays(arrs, cols, scaling=scaling)", "def testConcatenation(self):\n\n ensemble = PDBENSEMBLE + PDBENSEMBLE\n assert_equal(ensemble.getCoordsets(arange(3)),\n PDBENSEMBLE.getCoordsets(),\n 'concatenation failed')\n assert_equal(ensemble.getCoordsets(arange(3,6)),\n PDBENSEMBLE.getCoordsets(),\n 'concatenation failed')\n assert_equal(ensemble.getCoords(), COORDS,\n 'concatenation failed')\n assert_equal(ensemble.getWeights()[arange(3)],\n PDBENSEMBLE.getWeights(),\n 'concatenation failed')\n assert_equal(ensemble.getWeights()[arange(3,6)],\n PDBENSEMBLE.getWeights(),\n 'concatenation failed')", "def test_op_add_offload_array_int(self):\n\n device = pymic.devices[0]\n stream = device.get_default_stream()\n a = numpy.arange(1, 4711 * 1024, dtype=int)\n o = a + 1\n\n old_a = numpy.empty_like(a)\n old_o = numpy.empty_like(o)\n old_a[:] = a[:]\n old_o[:] = o[:]\n expect = a + o\n\n offl_a = stream.bind(a)\n offl_o = stream.bind(o)\n offl_r = offl_a + offl_o\n offl_a.update_host()\n r = offl_r.update_host().array\n stream.sync()\n\n self.assertEqual(r.shape, a.shape)\n self.assertEqual(r.dtype, a.dtype)\n self.assertTrue((a == old_a).all(),\n \"Input array operand 1 must not be modified: \"\n \"{0} should be {1}\".format(a, old_a))\n self.assertTrue((o == old_o).all(),\n \"Input array operand 2 must not be modified: \"\n \"{0} should be {1}\".format(o, old_o))\n self.assertTrue((r == expect).all(),\n \"Array contains unexpected values: \"\n \"{0} should be {1}\".format(r, expect))", "def test_concat(self):\n\n test_cases = [\n Case(\n description=\"lists of strings\",\n val=[\"a\", \"b\"],\n args=[[\"c\", \"d\"]],\n kwargs={},\n expect=[\"a\", \"b\", \"c\", \"d\"],\n ),\n Case(\n description=\"missing argument\",\n val=[\"a\", \"b\"],\n args=[],\n kwargs={},\n expect=FilterArgumentError,\n ),\n Case(\n description=\"too many arguments\",\n val=[\"a\", \"b\"],\n args=[[\"c\", \"d\"], \"\"],\n kwargs={},\n expect=FilterArgumentError,\n ),\n Case(\n description=\"arguments not a list\",\n val=[\"a\", \"b\"],\n args=[5],\n kwargs={},\n expect=FilterArgumentError,\n ),\n Case(\n description=\"not an array\",\n val=\"a, b\",\n args=[[\"c\", \"d\"]],\n kwargs={},\n expect=FilterValueError,\n ),\n Case(\n description=\"array contains non string\",\n val=[\"a\", \"b\", 5],\n args=[[\"c\", \"d\"]],\n kwargs={},\n expect=[\"a\", \"b\", 5, \"c\", \"d\"],\n ),\n Case(\n description=\"undefined left value\",\n val=self.env.undefined(\"test\"),\n args=[[\"c\", \"d\"]],\n kwargs={},\n expect=[\"c\", \"d\"],\n ),\n Case(\n description=\"undefined argument\",\n val=[\"a\", \"b\"],\n args=[self.env.undefined(\"test\")],\n kwargs={},\n expect=FilterArgumentError,\n ),\n ]\n\n self._test(Concat, test_cases)", "def test_concat_keys(self):\n keys_to_concat = [[123, 456], [789, 987], [654, 321]]\n correct_answer = [528280977864, 3388729197531, 2808908611905]\n self.assertEqual(helper.concat_keys(keys_to_concat), correct_answer)", "def test_append_concat_filter(self):\n test_data = (\n (\"a\", \"audio\"),\n (\"v\", \"video\"),\n (\"f\", \"frame (should be ignored)\"),\n )\n expected = []\n for frame_type, description in test_data:\n for num_segments in range(0, 3):\n segments = None\n if frame_type == \"a\":\n segments = num_segments * [AudioSegment(\n file=\"file.in\", punch_in=timedelta(),\n punch_out=timedelta(seconds=20), input_stream=0)]\n elif frame_type == \"v\":\n segments = num_segments * [VideoSegment(\n file=\"file.in\", punch_in=timedelta(),\n punch_out=timedelta(seconds=20), input_stream=0)]\n elif frame_type == \"f\":\n # Frame segments should be ignored by\n # append_concat_filter() regardless.\n segments = []\n else:\n raise TypeError\n self.command.append_concat_filter(\n frame_type=frame_type, segments=segments)\n if frame_type not in [\"a\", \"v\"]:\n # Ignore frame segments.\n pass\n elif num_segments > 1:\n expected.append(\n \"{inspecs} concat=n={n}:v={v}:a={a} [{t}conc]\".format(\n inspecs=\" \".join([s.output_stream_specifier()\n for s in segments]),\n n=num_segments, v=int(frame_type == \"v\"),\n a=int(frame_type == \"a\"), t=frame_type)\n )\n elif num_segments == 1:\n expected.append(\n \"{inspec} {a}null [{t}conc]\".format(\n inspec=segments[0].output_stream_specifier(),\n a=frame_type if frame_type == \"a\" else \"\",\n t=frame_type)\n )\n with self.subTest(\n msg=\"{d}: {n}\".format(d=description, n=num_segments)):\n self.assertEqual(self.command.filters, expected)", "def hpat_arrays_append_overload(A, B):\n\n use_A_array = isinstance(A, (RangeIndexType, Int64IndexType))\n use_B_array = isinstance(B, (RangeIndexType, Int64IndexType))\n if isinstance(A, (types.Array, RangeIndexType, Int64IndexType)):\n if isinstance(B, (types.Array, RangeIndexType, Int64IndexType)):\n def _append_single_numeric_impl(A, B):\n _A = A.values if use_A_array == True else A # noqa\n _B = B.values if use_B_array == True else B # noqa\n return numpy.concatenate((_A, _B,))\n\n return _append_single_numeric_impl\n\n elif (isinstance(B, (types.UniTuple, types.List))\n and isinstance(B.dtype, (types.Array, RangeIndexType, Int64IndexType))):\n B_dtype_is_index = isinstance(B.dtype, (RangeIndexType, Int64IndexType))\n numba_common_dtype = find_common_dtype_from_numpy_dtypes([A.dtype, B.dtype.dtype], [])\n\n # TODO: refactor to use numpy.concatenate when Numba supports building a tuple at runtime\n def _append_list_numeric_impl(A, B):\n\n total_length = len(A) + numpy.array([len(arr) for arr in B]).sum()\n new_data = numpy.empty(total_length, numba_common_dtype)\n\n stop = len(A)\n _A = numpy.array(A) if use_A_array == True else A # noqa\n new_data[:stop] = _A\n for arr in B:\n _arr = arr.values if B_dtype_is_index == True else arr # noqa\n start = stop\n stop = start + len(_arr)\n new_data[start:stop] = _arr\n return new_data\n\n return _append_list_numeric_impl\n\n elif A == string_array_type:\n if B == string_array_type:\n def _append_single_string_array_impl(A, B):\n total_size = len(A) + len(B)\n total_chars = num_total_chars(A) + num_total_chars(B)\n new_data = sdc.str_arr_ext.pre_alloc_string_array(total_size, total_chars)\n\n pos = 0\n pos += append_string_array_to(new_data, pos, A)\n pos += append_string_array_to(new_data, pos, B)\n\n return new_data\n\n return _append_single_string_array_impl\n elif (isinstance(B, (types.UniTuple, types.List)) and B.dtype == string_array_type):\n def _append_list_string_array_impl(A, B):\n array_list = [A] + list(B)\n total_size = numpy.array([len(arr) for arr in array_list]).sum()\n total_chars = numpy.array([num_total_chars(arr) for arr in array_list]).sum()\n\n new_data = sdc.str_arr_ext.pre_alloc_string_array(total_size, total_chars)\n\n pos = 0\n pos += append_string_array_to(new_data, pos, A)\n for arr in B:\n pos += append_string_array_to(new_data, pos, arr)\n\n return new_data\n\n return _append_list_string_array_impl" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
KNearest Neighbors classifier. Return the most frequent class among the k nearest points
def knn(p, k, x, t): # Number of instances in data set N = x.shape[0] Euclidean_Distance = numpy.square(x - p) #Euclidean distance dis = numpy.sum(Euclidean_Distance, axis=1) #sum of the euclidean distance inds = numpy.argsort(dis)[:k] #sort the indices of the distance array tgt_cat = Counter([t[i] for i in inds]) #count the times of equivalent target labels top_class = max(tgt_cat, key= tgt_cat.get) #top class among the k nearest points #top_class = 0 return top_class
[ "def knn_classify_point(point, data, k, distance_metric):\n k_closest_points = get_k_closest_points(point, data, k, distance_metric)\n classification_counts = {}\n for item in k_closest_points:\n classification_type = item.classification\n if classification_type not in classification_counts:\n classification_counts[classification_type] = 0\n else:\n classification_counts[classification_type] += 1\n classification_counts = sorted(classification_counts, key = classification_counts.get)\n return classification_counts[-1]", "def knn_classifier(k, labeled_points, new_point):\n \n def new_point_distance(labeled_point):\n \"\"\" helper function to calculate distance between a (point, label)\n tuple and new_point \"\"\"\n # get the location from the labeled_point\n point = labeled_point[0]\n # return distance\n return Ch4.distance(point, new_point)\n\n # order the points from nearest to farthest\n by_distance = sorted(labeled_points, key = new_point_distance)\n\n # get the labels for the k closest point\n k_nearest_labels = [label for _, label in by_distance[:k]]\n\n # now we call majority vote to get a unique nearest label\n return majority_label(k_nearest_labels)", "def k_nn(frame, newPoint, colClass, k): \n counts = []\n \n # find all distances wrt the newPoint\n dist = find_distances(frame, newPoint)\n\n # find the nearest k points, extract their labels and save them in a list\n labels = [label for distance,label in dist[:k]] \n \n # for each class label, count how many occurrencies have been found\n for label in frame[colClass].unique():\n # save the number of occurrencies in a list of tuples (number, label)\n counts.append((labels.count(label), label)) \n \n # sort the list in descending order, and use the first label of the tuples'\n # list to make the prediction \n counts.sort(reverse=True)\n prediction = counts[0][1] \n \n return prediction", "def classify(self, point, k=3):\n\n # compute distance to all training points\n dist = np.array([L2dist(point, s) for s in self.samples])\n\n # sort\n ndx = dist.argsort()\n\n # use dict to store nearest k points\n votes = {}\n for i in range(k):\n label = self.labels[ndx[i]]\n votes.setdefault(label, 0)\n votes[label] += 1\n\n return np.max(votes)", "def knn(X, z, k):\n def predict(x):\n # TODO: Calculate the distance of x to every point in the training set\n # X.\n\n # TODO: Pick the k points with the lowest distance.\n\n # TODO: Do a majority vote and return the class as an integer.\n pass\n\n return predict", "def knn_classification_numeric(training_set, test_set, k):\n test_set['predicted_class'] = np.nan\n for i in range(0, len(test_set)):\n training_set_np = np.array(training_set.loc[:, training_set.columns != 'class']).astype(float)\n test_set_np = np.array(test_set.drop(['class', 'predicted_class'], axis = 1).loc[i, :]).astype(float)\n sum_sqaured = euclidean_distance(training_set_np, test_set_np)\n label_df = training_set['class']\n\n # find the k neighbors based on the sum of squared distance\n neighbor_df = pd.DataFrame(pd.concat([label_df, sum_sqaured], axis = 1).nsmallest(k, 'sum_difference').groupby(['class'])['class'].count()).rename(columns = {'class': '# observations'}).reset_index()\n\n test_set.loc[i, 'predicted_class'] = neighbor_df.sort_values(by = '# observations', ascending = False).reset_index(drop = True).loc[0, 'class']\n\n \n return len(test_set[test_set['predicted_class'] != test_set['class']]) / len(test_set['class'])", "def k_nearest_neighbors(X, X_train, k):\n return np.argsort(euclidean_distance(X, X_train))[:k]", "def predict(X_test, X_train, y_train, k):\n y_preds = []\n for X in X_test: # For each test vector\n knn_indexes = k_nearest_neighbors(X, X_train, k) # Get k indexes for k nearest neighbors\n knn_classes = y_train[knn_indexes] # Get k corresponding classes of neighbors\n counts = np.bincount(knn_classes) # Count the number of labels in labels\n max_count = np.argmax(counts) # Get the maximum occurring label\n y_preds.append(max_count) # Append the maximum occurring label as prediction\n return np.array(y_preds)", "def nearest_neighbors_classifier(data):\n clf = KNeighborsClassifier(3, 'distance')\n clf.name = \"KNN\"\n train_predict_and_results(data, clf)", "def classify(df, example_row, k):\n new_k_df=find_k_closest(df,example_row,k)\n majority=new_k_df['class'].max()\n return majority", "def knn_classification_categorical(training_set, test_set, k):\n test_set['predicted_class'] = np.nan\n for i in range(0, len(test_set)):\n total_distance = {}\n label_df = training_set['class']\n for j in range(len(training_set)):\n distance = []\n for col in training_set.loc[:, training_set.columns != 'class'].columns.unique():\n train_val = training_set.loc[j, col]\n test_val = test_set.loc[i, col]\n distance.append(dict_df[col][train_val][test_val])\n total_distance[j] = np.sqrt(np.sum(distance))\n sum_squared = pd.DataFrame.from_dict([total_distance]).T\n sum_squared.columns = ['sum_difference']\n \n # find the k neighbors based on the sum of squared distance\n neighbor_df = pd.DataFrame(pd.concat([label_df, sum_squared], axis = 1).nsmallest(k, 'sum_difference').groupby(['class'])['class'].count()).rename(columns = {'class': '# observations'}).reset_index()\n test_set.loc[i, 'predicted_class'] = neighbor_df.sort_values(by = '# observations', ascending = False).reset_index(drop = True).loc[0, 'class']\n\n return len(test_set[test_set['predicted_class'] != test_set['class']]) / len(test_set['class'])", "def KNN(x_train, x_test, y_train, k=3):\n knn = KNeighborsClassifier(n_neighbors=k)\n knn.fit(x_train, y_train)\n y_pred = knn.predict(x_test)\n return y_pred", "def knn(train_data, train_labels, test_data, test_labels, k):\n pred_labels = []\n for t in test_data:\n dist = calculate_distances(train_data, t)\n pred_class = majority_voting(dist, train_labels, k)\n pred_labels.append(pred_class)\n correct_pred_count = np.sum(pred_labels == test_labels)\n acc = correct_pred_count/len(test_labels)\n return acc", "def KNN(trainingSet, testSet, k): \n\n # num of neighbours\n k = int(k)\n # num of training samples\n z = len(trainingSet)\n # num of attributes in a sample including class\n n = len(trainingSet[0])\n\n def euclidian(trainingSample, testSample):\n \"\"\" returns euclidian distance between a single training sample and\n adn test sample based on attribute values. \"\"\"\n Dsqr = 0\n for i in range(n-1):\n a = float(trainingSample[i])\n b = float(testSample[i])\n Dsqr += (a - b)**2\n return math.sqrt(Dsqr)\n \n def byDiff(indexedDiff):\n \"\"\" provides key for list sorting order. \"\"\"\n return indexedDiff[0]\n\n\n results = []\n # start algorithm. Apply for each test sample provided\n for testSample in testSet:\n indexedDiffs = []\n # for each training sample provided\n for i in range(z):\n dist = euclidian(trainingSet[i], testSample)\n indexedDiffs.append((dist, i))\n \n indexedDiffs.sort(key=byDiff)\n yesCnt = 0\n for x in range(k):\n i = indexedDiffs[x][1]\n # inspect class value of corresponding training samples.\n if trainingSet[i][n-1] == 'yes':\n yesCnt += 1\n \n # categorize test sample\n if yesCnt >= math.ceil(k/2):\n results.append('yes')\n else:\n results.append('no')\n \n return results", "def find_best_k(X_train, y_train, X_test, y_test, min_k=1, max_k=25):\n best_k = 0\n best_score = 0.0\n for k in range(min_k, max_k+1, 2):\n knn = KNeighborsClassifier(n_neighbors=k)\n knn.fit(X_train, y_train)\n preds = knn.predict(X_test)\n f1 = f1_score(y_test, preds)\n if f1 > best_score:\n best_k = k\n best_score = f1\n print(\"Best Value for k: {}\".format(best_k))\n print(\"F1-Score: {}\".format(best_score))", "def predict(self, testData, k=1):\n distanceMatrix = self.computeDistanceMatrix(testData)\n closestIndices = np.argsort(distanceMatrix, axis=1)\n closestKIndices = closestIndices[:, :k]\n closestKLabels = self.trainLabel[closestKIndices]\n # NearestNeighbour only makes sense if using mode since labels may not be ordered\n from scipy.stats import mode\n majorityLabel, majorityCount = mode(closestKLabels, axis=1)\n majorityLabel = np.reshape(majorityLabel, (majorityLabel.size))\n return majorityLabel", "def predictClass(training_data, test_row, k):\n\n neighbors = getNeighbors(training_data, test_row, k)\n output_vals = [row[-1] for row in neighbors]\n \n counts = dict()\n\n for i in output_vals:\n counts[i] = counts.get(i, 0) + 1\n\n v = [value for value in counts.values()]\n\n #Pick a class on random if ties occur\n prediction = choice([key for key in counts if counts[key] == max(v)])\n\n return prediction", "def classifier(n_neighbors=5):\n from sklearn.neighbors import KNeighborsClassifier\n return KNeighborsClassifier(n_neighbors=n_neighbors)", "def _predict(self, x):\n # Compute the distance between x and each data point in X_train\n distances = [self._get_distance(x, x_train) for x_train in self.X_train]\n # Get the labels of the k nearest samples to x based on the distances\n k_nearest_indices = np.argsort(distances)[:self.k]\n k_nearest_labels = [self.y_train[idx] for idx in k_nearest_indices]\n # Determine the most common of the k nearest labels\n most_common_class = Counter(k_nearest_labels).most_common(1)[0][0]\n\n return most_common_class" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Given data (observed x and labels t) and choice k of nearest neighbors, plots the decision boundary based on a grid of classifications over the feature space.
def plot_decision_boundary(k, x, t, granularity=100, figures_root='../figures', data_name=None): print(f'KNN for K={k}') # Initialize meshgrid to be used to store the class prediction values # this is used for computing and plotting the decision boundary contour pointsX = numpy.linspace(numpy.min(x[:, 0]) - 0.1, numpy.max(x[:, 0]) + 0.1, granularity) pointsY = numpy.linspace(numpy.min(x[:, 1]) - 0.1, numpy.max(x[:, 1]) + 0.1, granularity) Xv, Yv = numpy.meshgrid(pointsX, pointsY) # Calculate KNN classification for every point in meshgrid classes = numpy.zeros(shape=(Xv.shape[0], Xv.shape[1])) for i in range(Xv.shape[0]): for j in range(Xv.shape[1]): c = knn(numpy.array([Xv[i][j], Yv[i][j]]), k, x, t) # print('{0} {1} {2}'.format(i, j, c)) classes[i][j] = c # plot the binary decision boundary contour plt.figure() plt.pcolormesh(Xv, Yv, classes, cmap=CMAP_LIGHT) ti = f'KNN with K = {k}' plt.title(ti) plt.draw() save_path = None if data_name is not None: save_path = os.path.join(figures_root, f'knn_{data_name}_k={k}') # else: # save_path = os.path.join(figures_root, f'knn_k={k}') # plot the data (on top of the decision boundary color mesh) plot_data(x, t, new_figure=False, save_path=save_path) return classes
[ "def draw_knn_boundaries(knn, h=0.02): # h = Step size in the mesh\n ax = plt.gca()\n [xmin, xmax] = ax.get_xlim()\n [ymin, ymax] = ax.get_ylim()\n # Generate the axis associated to the first feature: \n x_axis = np.arange(xmin, xmax, h)\n # Generate the axis associated to the 2nd feature: \n y_axis = np.arange(ymin, ymax, h)\n # Generate a meshgrid (2D grid) from the 2 axis:\n x_grid, y_grid = np.meshgrid(x_axis, y_axis)\n # Vectorize the grids into column vectors:\n x_grid_vectorized = x_grid.flatten()\n x_grid_vectorized = np.expand_dims(x_grid_vectorized, axis=1)\n y_grid_vectorized = y_grid.flatten()\n y_grid_vectorized = np.expand_dims(y_grid_vectorized, axis=1)\n # Concatenate the vectorized grids\n grid = np.concatenate((x_grid_vectorized, y_grid_vectorized), axis=1)\n # Now you can use 'grid' as data to classify by the knn \n\n # Predict concatenated features to get the decision boundaries:\n decision_boundaries = ... #TODO!\n\n # Reshape the decision boundaries into a 2D matrix:\n decision_boundaries = decision_boundaries.reshape(x_grid.shape)\n plt.pcolormesh(x_grid, y_grid, decision_boundaries, cmap=cmap_light, zorder=1)\n return ax", "def plot_decision_boundary(predict,\n classes=2,\n x_range=[0, 1],\n y_range=[0, 1],\n th=0.5):\n\n # Generate a grid with 200 points in each dimension\n hx = (x_range[1]-x_range[0])/200\n xx, yy = np.meshgrid(np.linspace(x_range[0], x_range[1], 200),\n np.linspace(y_range[0], y_range[1], 200))\n\n # Compute decision for meshgrid\n Z = predict(np.hstack((xx.reshape([-1, 1]), yy.reshape([-1, 1]))))\n\n if classes < 3:\n Z = Z > th\n # Plot the contour and training examples\n Z = np.reshape(Z, xx.shape)\n plt.contourf(xx, yy, Z,\n levels=[0, 0.5, 1],\n cmap='jet',\n alpha=0.6)\n\n else:\n if Z.ndim > 1:\n Z = np.argmax(Z, axis=1)\n # Plot the contour and training examples\n Z = Z.reshape(xx.shape)\n plt.contourf(xx, yy, Z,\n levels=np.arange(classes*2)/2,\n cmap='jet',\n alpha=0.6)\n\n return plt.gca()", "def plot_decision_boundaries(X, y, model_class, **model_params):\n try:\n X = np.array(X)\n y = np.array(y).flatten()\n except:\n print(\"Coercing input data to NumPy arrays failed\")\n # Reduces to the first two columns of data\n reduced_data = X[:, :2]\n # Instantiate the model object\n model = model_class(**model_params)\n # Fits the model with the reduced data\n model.fit(reduced_data, y)\n\n # Step size of the mesh. Decrease to increase the quality of the VQ.\n h = .02 # point in the mesh [x_min, m_max]x[y_min, y_max]. \n\n # Plot the decision boundary. For that, we will assign a color to each\n x_min, x_max = reduced_data[:, 0].min() - 1, reduced_data[:, 0].max() + 1\n y_min, y_max = reduced_data[:, 1].min() - 1, reduced_data[:, 1].max() + 1\n # Meshgrid creation\n xx, yy = np.meshgrid(np.arange(x_min, x_max, h), np.arange(y_min, y_max, h))\n\n # Obtain labels for each point in mesh using the model.\n Z = model.predict(np.c_[xx.ravel(), yy.ravel()]) \n\n x_min, x_max = X[:, 0].min() - 1, X[:, 0].max() + 1\n y_min, y_max = X[:, 1].min() - 1, X[:, 1].max() + 1\n xx, yy = np.meshgrid(np.arange(x_min, x_max, 0.1),\n np.arange(y_min, y_max, 0.1))\n\n # Predictions to obtain the classification results\n Z = model.predict(np.c_[xx.ravel(), yy.ravel()]).reshape(xx.shape)\n\n # Plotting\n plt.contourf(xx, yy, Z, alpha=0.4)\n plt.scatter(X[:, 0], X[:, 1], c=y, alpha=0.8)\n plt.xlabel(\"Feature-1\",fontsize=14)\n plt.ylabel(\"Feature-2\",fontsize=14)\n plt.xticks(fontsize=14)\n plt.yticks(fontsize=14)\n return plt", "def decision_surface(var1, var2, clf, features, target, npts=200):\n print 'Training classifier...'\n clf.fit(features[[var1, var2]].values, target)\n print 'Training done.'\n xmin, xmax = features[var1].min()-1, features[var1].max()+1\n ymin, ymax = features[var2].min()-1, features[var2].max()+1\n\n # Make mesh grid for decision contour plot\n xx, yy = np.meshgrid(np.linspace(xmin, xmax, num=npts),\n np.linspace(ymin, ymax, num=npts))\n print 'Building decision contours...'\n z = clf.predict(np.c_[xx.ravel(), yy.ravel()])\n print 'Contours done.'\n z = z.reshape(xx.shape)\n\n # Plot the decision contours\n fig, (ax1,ax2) = plt.subplots(1,2)\n plt.subplots_adjust(left=0.1, right=0.68, top=0.92, bottom=0.08)\n\n ax1.contourf(xx, yy, z, cmap=plt.cm.Paired, alpha=0.8)\n ax1.set_xlabel(var1.replace('_',' ')) # Calls to replace get rid of ugly\n # underscores in the axes titles.\n ax1.set_ylabel(var2.replace('_',' '))\n ax1.set_xlim(xx.min(), xx.max())\n ax1.set_ylim(yy.min(), yy.max())\n #ax1.set_xticks(())\n #ax1.set_yticks(())\n ax1.set_title('Classifier Contours')\n\n # Plot the training points\n ax2.set_axis_bgcolor('white')\n ax2.scatter(features[var1], features[var2], c=target, cmap=plt.cm.Paired)\n ax2.set_xlabel(var1.replace('_',' '))\n ax2.set_ylabel(var2.replace('_',' '))\n ax2.set_xlim(xx.min(), xx.max())\n ax2.set_ylim(yy.min(), yy.max())\n ax2.set_frame_on(True)\n ax2.yaxis.tick_right()\n ax2.yaxis.set_label_position('right')\n #ax2.set_xticks(())\n #ax2.set_yticks(())\n ax2.set_title('Training Data Scatter Plot')\n\n plt.show()", "def plotDecisionBoundary(x, y):\r\n markers = ('+', '.', 'x')\r\n colors = ('blue', 'dimgrey', 'maroon')\r\n cmap = ListedColormap(colors[:len(np.unique(y))])\r\n xx, yy = makeMeshGrid(x, y)\r\n plotContours(plt, sm, xx, yy, cmap=plt.cm.coolwarm, alpha=0.8)\r\n # Plot also the training points\r\n for idx, cl in enumerate(np.unique(y)):\r\n xBasedOnLabel = x[np.where(y[:,0] == cl)]\r\n plt.scatter(x=xBasedOnLabel[:, 0], y=xBasedOnLabel[:, 1], c=cmap(idx),\r\n cmap=plt.cm.coolwarm, marker=markers[idx], label=cl)\r\n plt.xlim(xx.min(), xx.max())\r\n plt.ylim(yy.min(), yy.max())\r\n plt.xlabel(\"Feature X1\")\r\n plt.ylabel(\"Feature X2\")\r\n plt.title(\"Softmax Classifier on Iris Dataset(Decision Boundary)\")\r\n plt.xticks()\r\n plt.yticks()\r\n plt.legend(loc='upper left')\r\n plt.show()", "def plot(self):\r\n \r\n\r\n print(\"Printing decision surfaces of decision trees\")\r\n plot_colors = \"rb\"\r\n plot_step = 0.02\r\n n_classes = 2\r\n for _ in range (self.n_estimators):\r\n plt.subplot(2, 3, _ + 1)\r\n x_min, x_max = self.X.iloc[:, 0].min() - 1, self.X.iloc[:, 0].max() + 1\r\n y_min, y_max = self.X.iloc[:, 1].min() - 1, self.X.iloc[:, 1].max() + 1\r\n xx, yy = np.meshgrid(np.arange(x_min, x_max, plot_step),np.arange(y_min, y_max, plot_step))\r\n plt.tight_layout(h_pad=0.5, w_pad=0.5, pad=2.5)\r\n Z = self.clfs[_].predict(np.c_[xx.ravel(), yy.ravel()])\r\n Z = np.array(Z)\r\n Z = Z.reshape(xx.shape)\r\n cs = plt.contourf(xx, yy, Z, cmap=plt.cm.RdBu)\r\n for i, color in zip(range(n_classes), plot_colors):\r\n if i == 0:\r\n idx = np.where(self.y == -1)\r\n if i == 1:\r\n idx = np.where(self.y == 1)\r\n for i in range (len(idx[0])):\r\n plt.scatter(self.X.loc[idx[0][i]][0], self.X.loc[idx[0][i]][1],c=color,cmap=plt.cm.RdBu, edgecolor='black', s=15)\r\n plt.suptitle(\"Decision surface of a decision tree using paired features\")\r\n plt.legend(loc='lower right', borderpad=0, handletextpad=0)\r\n plt.axis(\"tight\")\r\n\r\n plt.show()\r\n fig1 = plt\r\n\r\n # Figure 2\r\n print(\"Printing decision surface by combining the individual estimators\")\r\n plot_colors = \"rb\"\r\n plot_step = 0.02\r\n n_classes = 2\r\n x_min, x_max = self.X.iloc[:, 0].min() - 1, self.X.iloc[:, 0].max() + 1\r\n y_min, y_max = self.X.iloc[:, 1].min() - 1, self.X.iloc[:, 1].max() + 1\r\n xx, yy = np.meshgrid(np.arange(x_min, x_max, plot_step),np.arange(y_min, y_max, plot_step))\r\n plt.tight_layout(h_pad=0.5, w_pad=0.5, pad=2.5)\r\n Z = config.Classifier_AB.predict(np.c_[xx.ravel(), yy.ravel()])\r\n Z = np.array(Z)\r\n Z = Z.reshape(xx.shape)\r\n cs = plt.contourf(xx, yy, Z, cmap=plt.cm.RdBu)\r\n for i, color in zip(range(n_classes), plot_colors):\r\n if i == 0:\r\n idx = np.where(self.y == -1)\r\n if i == 1:\r\n idx = np.where(self.y == 1)\r\n for i in range (len(idx[0])):\r\n plt.scatter(self.X.loc[idx[0][i]][0], self.X.loc[idx[0][i]][1],c=color,cmap=plt.cm.RdBu, edgecolor='black', s=15)\r\n plt.suptitle(\"Decision surface by combining individual estimators\")\r\n plt.legend(loc='lower right', borderpad=0, handletextpad=0)\r\n plt.axis(\"tight\")\r\n\r\n plt.show()\r\n fig2 = plt\r\n\r\n return [fig1,fig2]", "def boundary(svdd, data, label, r=0.3, nn=2):\n\n dim = data.shape[1]\n if dim != 2:\n raise SyntaxError(\"Visualization of decision boundary only supports for 2D data\")\n\n # compute the range of grid\n numGrids = np.rint(data.shape[0] / nn).astype(int) # number of grids\n x_range = np.zeros(shape=(numGrids, 2))\n for i in range(2):\n _tmp_ = (np.max(data[:, i]) - np.min(data[:, i])) * r\n xlim_1 = np.min(data[:, i]) - _tmp_\n xlim_2 = np.max(data[:, i]) + _tmp_\n x_range[:, i] = np.linspace(xlim_1, xlim_2, numGrids)\n\n # grid\n xv, yv = np.meshgrid(x_range[:, 0], x_range[:, 1])\n\n num1 = xv.shape[0]\n num2 = yv.shape[0]\n distance = np.zeros(shape=(num1, num1))\n\n # calculate the grid scores\n print(\"Calculating the grid (%04d*%04d) scores...\\n\" % (num1, num2))\n\n display_ = svdd.parameters[\"option\"][\"display\"]\n svdd.parameters[\"option\"][\"display\"] = \"off\"\n start_time = time.time()\n for i in range(num1):\n for j in range(num2):\n tmp = np.mat([xv[i, j], yv[i, j]])\n distance[i, j], _ = svdd.test(tmp, 1)\n # print('[feature 1: %06d] [feature 2: %06d] \\n' % (i+1,j+1))\n end_time = time.time()\n print(\"Grid scores completed. Time cost %.4f s\\n\" % (end_time - start_time))\n svdd.parameters[\"option\"][\"display\"] = display_\n\n # plot the contour (3D)\n fig = plt.figure(figsize=(20, 6))\n\n ax3 = fig.add_subplot(1, 3, 1, projection=\"3d\")\n # ax3 = ax3.axes(projection='3d')\n ada = ax3.plot_surface(xv, yv, distance, cmap=plt.cm.jet)\n ax3.contourf(xv, yv, distance, zdir=\"z\", offset=np.min(distance) * 0.9, cmap=plt.cm.coolwarm)\n ax3.set_zlim(np.min(distance) * 0.9, np.max(distance) * 1.05)\n # plt.colorbar(ada)\n\n # plot the contour (2D)\n # fig = plt.figure(figsize = (10, 8))\n ax1 = fig.add_subplot(1, 3, 2)\n\n ctf1 = ax1.contourf(xv, yv, distance, alpha=0.8, cmap=plt.cm.jet)\n ctf2 = ax1.contour(xv, yv, distance, colors=\"black\", linewidths=1)\n plt.clabel(ctf2, inline=True)\n # plt.colorbar(ctf1)\n\n # plot the boundary\n # fig = plt.figure(figsize = (10, 8))\n ax2 = fig.add_subplot(1, 3, 3)\n\n if svdd.labeltype == \"single\":\n\n ax2.scatter(data[:, 0], data[:, 1], color=\"green\", marker=\"o\", edgecolor=\"black\", alpha=0.5, zorder=2)\n ax2.scatter(\n data[svdd.model[\"sv_index\"], 0],\n data[svdd.model[\"sv_index\"], 1],\n facecolor=\"C2\",\n marker=\"o\",\n s=144,\n linewidths=2,\n edgecolor=\"black\",\n zorder=2,\n )\n\n ax2.contour(xv, yv, distance, [svdd.model[\"radius\"]], colors=\"C3\", linewidths=5, zorder=1)\n\n ax2.legend(\n [\"Training data\", \"Support vectors\"], ncol=1, loc=0, edgecolor=\"black\", markerscale=1.2, fancybox=True\n )\n\n else:\n ax2.scatter(\n data[svdd.model[\"pIndex\"], 0],\n data[svdd.model[\"pIndex\"], 1],\n facecolor=\"C0\",\n marker=\"o\",\n s=100,\n linewidths=2,\n edgecolor=\"black\",\n zorder=2,\n )\n ax2.scatter(\n data[svdd.model[\"nIndex\"], 0],\n data[svdd.model[\"nIndex\"], 1],\n facecolor=\"C4\",\n marker=\"s\",\n s=100,\n linewidths=2,\n edgecolor=\"black\",\n zorder=2,\n )\n\n ax2.scatter(\n data[svdd.model[\"sv_index\"], 0],\n data[svdd.model[\"sv_index\"], 1],\n facecolor=\"C2\",\n marker=\"o\",\n s=144,\n linewidths=2,\n edgecolor=\"black\",\n zorder=2,\n )\n\n ax2.contour(xv, yv, distance, [svdd.model[\"radius\"]], colors=\"C3\", linewidths=5, zorder=1)\n\n ax2.legend(\n [\"Training data (+)\", \"Training data (-)\", \"Support vectors\"],\n ncol=1,\n loc=0,\n edgecolor=\"black\",\n markerscale=1.2,\n fancybox=True,\n )\n\n plt.show()", "def plot_tree_clarans(data, k):\n\n n = len(data)\n num_points = int(scipy.special.binom(n, k))\n num_neigh = k * (n - k)\n\n if (num_points > 50) or (num_neigh > 10):\n print(\n \"Either graph nodes are more than 50 or neighbors are more than 10, the graph would be too big\"\n )\n return\n\n # all possibile combinations of k elements from input data\n name_nodes = list(itertools.combinations(list(data.index), k))\n\n dot = graphviz.Digraph(comment=\"Clustering\")\n\n # draw nodes, also adding the configuration cost\n for i in range(num_points):\n tot_cost, meds = compute_cost_clarans(data, list(name_nodes[i]))\n tc = round(tot_cost, 3)\n\n dot.node(str(name_nodes[i]), str(name_nodes[i]) + \": \" + str(tc))\n\n # only connect nodes if they have k-1 common elements\n for i in range(num_points):\n for j in range(num_points):\n if i != j:\n if (\n len(set(list(name_nodes[i])) & set(list(name_nodes[j])))\n == k - 1\n ):\n dot.edge(str(name_nodes[i]), str(name_nodes[j]))\n\n graph = graphviz.Source(dot) # .view()\n display(graph)", "def plot_decision_boundary(model, X, y, title=\"\"):\n # Set min and max values and give it some padding\n x_min, x_max = X[:, 0].min() - 1, X[:, 0].max() + 1\n y_min, y_max = X[:, 1].min() - 1, X[:, 1].max() + 1\n h = 0.01\n # Generate a grid of points with distance h between them\n xx, yy = np.meshgrid(np.arange(x_min, x_max, h), np.arange(y_min, y_max, h))\n # Predict the function value for the whole grid\n Z = model(np.c_[xx.ravel(), yy.ravel()])\n Z = Z.reshape(xx.shape)\n print(Z)\n # Plot the contour and training examples\n plt.contourf(xx, yy, Z, cmap=plt.cm.Spectral)\n plt.ylabel('x2')\n plt.xlabel('x1')\n plt.scatter(X[:, 0], X[:, 1], c=y, cmap=plt.cm.Spectral)\n plt.title(title)\n plt.show()", "def nearest_neighbors_classifier(data):\n clf = KNeighborsClassifier(3, 'distance')\n clf.name = \"KNN\"\n train_predict_and_results(data, clf)", "def plot_2D_boundary(plot_range, points, decisionfcn, labels, values=[0]):\n\n clist = ['b', 'r', 'g', 'k', 'm', 'y'] # colors for the classes\n\n # evaluate on a grid and plot contour of decision function\n x = np.arange(plot_range[0], plot_range[1], .1)\n y = np.arange(plot_range[2], plot_range[3], .1)\n xx, yy = np.meshgrid(x, y)\n xxx, yyy = xx.flatten(), yy.flatten() # lists of x,y in grid\n zz = np.array(decisionfcn(xxx, yyy))\n zz = zz.reshape(xx.shape)\n\n # plot contour(s) at values\n plt.contour(xx, yy, zz, values)\n\n # for each class, plot the points with ’*’ for correct, ’o’ for incorrect\n for i in range(len(points)):\n d = decisionfcn(points[i][:, 0], points[i][:, 1])\n correct_ndx = labels[i] == d\n incorrect_ndx = labels[i] != d\n plt.plot(\n points[i][correct_ndx, 0],\n points[i][correct_ndx, 1],\n '*',\n color=clist[i])\n plt.plot(\n points[i][incorrect_ndx, 0],\n points[i][incorrect_ndx, 1],\n 'o',\n color=clist[i])\n plt.axis('equal')\n plt.show()", "def optimal_neighbors(X_data,\n y_data,\n standardize = True,\n pct_test=0.25,\n seed=802,\n response_type='reg',\n max_neighbors=20,\n show_viz=True): \n \n \n if standardize == True:\n # optionally standardizing X_data\n scaler = StandardScaler()\n scaler.fit(X_data)\n X_scaled = scaler.transform(X_data)\n X_scaled_df = pd.DataFrame(X_scaled)\n X_data = X_scaled_df\n\n\n\n # train-test split\n X_train, X_test, y_train, y_test = train_test_split(X_data,\n y_data,\n test_size = pct_test,\n random_state = seed)\n\n\n # creating lists for training set accuracy and test set accuracy\n training_accuracy = []\n test_accuracy = []\n \n \n # setting neighbor range\n neighbors_settings = range(1, max_neighbors + 1)\n\n\n for n_neighbors in neighbors_settings:\n # building the model based on response variable type\n if response_type == 'reg':\n clf = KNeighborsRegressor(n_neighbors = n_neighbors)\n clf.fit(X_train, y_train)\n \n elif response_type == 'class':\n clf = KNeighborsClassifier(n_neighbors = n_neighbors)\n clf.fit(X_train, y_train) \n \n else:\n print(\"Error: response_type must be 'reg' or 'class'\")\n \n \n # recording the training set accuracy\n training_accuracy.append(clf.score(X_train, y_train))\n \n # recording the generalization accuracy\n test_accuracy.append(clf.score(X_test, y_test))\n\n\n # optionally displaying visualization\n if show_viz == True:\n # plotting the visualization\n fig, ax = plt.subplots(figsize=(12,8))\n plt.plot(neighbors_settings, training_accuracy, label = \"training accuracy\")\n plt.plot(neighbors_settings, test_accuracy, label = \"test accuracy\")\n plt.ylabel(\"Accuracy\")\n plt.xlabel(\"n_neighbors\")\n plt.legend()\n plt.show()\n \n \n # returning optimal number of neighbors\n print(f\"The optimal number of neighbors is: {test_accuracy.index(max(test_accuracy))+1}\")\n return test_accuracy.index(max(test_accuracy))+1", "def plot_2D_decision_boundary(\n X,\n y=None,\n trained_classifier=None,\n margin_size=0.1,\n steps=100,\n figsize=(13, 10),\n title=\"\",\n):\n\n # range\n x_min, x_max = X[:, 0].min(), X[:, 0].max()\n y_min, y_max = X[:, 1].min(), X[:, 1].max()\n x_range = abs(x_max - x_min)\n y_range = abs(y_max - y_min)\n\n xmin, xmax = x_min - margin_size * x_range, x_max + margin_size * x_range\n ymin, ymax = y_min - margin_size * y_range, y_max + margin_size * y_range\n\n # plot points\n plt.figure(figsize=figsize)\n if y is not None:\n for l in [0, -1, 1]:\n ixl = np.where(y == l)[0]\n plt.scatter(\n X[ixl, 0], X[ixl, 1], color=PLOT_COLORS[l], marker=PLOT_MARKERS[l]\n )\n else:\n plt.scatter(X[:, 0], X[:, 1], edgecolors=\"k\")\n\n # plot decision boundary\n if trained_classifier is not None:\n xx, yy = np.meshgrid(\n np.linspace(xmin, xmax, int(steps)), np.linspace(ymin, ymax, int(steps))\n )\n X_mesh = np.c_[xx.ravel(), yy.ravel()]\n\n # predict for the mesh\n Z = trained_classifier.predict_proba(X_mesh)[:, 1] # anom prob\n Z = Z.reshape(xx.shape)\n\n # plot figure\n plt.contourf(xx, yy, Z, cmap=plt.cm.coolwarm, vmin=0.0, vmax=1.0, alpha=0.8)\n plt.colorbar()\n\n # clean plot\n plt.grid(alpha=0.25)\n plt.title(title)\n plt.show()", "def plot(self):\n # print(\"Printing decision trees\")\n # gs = gridspec.GridSpec(1, self.n_estimators)\n # plt.figure()\n\n # for i in range(self.n_estimators):\n # ax = plt.subplot(gs[0, i]) # row 0, col 0\n # clf = self.estimators_list[i].fit(self.split_x[i],self.split_y[i])\n # plot_tree(clf, filled=True)\n # plt.show()\n\n print(\"Printing decision surfaces \")\n plot_colors = \"rg\"\n plot_step = 0.02\n n_classes = 2\n for _ in range (self.n_estimators):\n plt.subplot(2, 5, _+1 )\n x_min, x_max = self.split_x[_].iloc[:, 0].min() - 1, self.split_x[_].iloc[:, 0].max() + 1\n y_min, y_max = self.split_x[_].iloc[:, 1].min() - 1, self.split_x[_].iloc[:, 1].max() + 1\n xx, yy = np.meshgrid(np.arange(x_min, x_max, plot_step),np.arange(y_min, y_max, plot_step))\n plt.tight_layout(h_pad=0.5, w_pad=0.5, pad=2.5)\n Z = self.estimators_list[_].predict(np.c_[xx.ravel(), yy.ravel()])\n Z = Z.reshape(xx.shape)\n cs = plt.contourf(xx, yy, Z, cmap=plt.cm.PiYG)\n for i, color in zip(range(n_classes), plot_colors):\n idx = np.where(self.split_y[_] == i)\n for i in range (len(idx[0])):\n plt.scatter(self.split_x[_].loc[idx[0][i]][0], self.split_x[_].loc[idx[0][i]][1],c=color,cmap=plt.cm.PiYG, edgecolor='black', s=15)\n plt.suptitle(\"RandomForestClassifier:Decision surface of a decision tree using two features\")\n plt.legend(loc='lower right', borderpad=0, handletextpad=0)\n plt.axis(\"tight\")\n\n plt.show()\n fig1 = plt\n\n # Figure 2\n print(\"Printing combining decision surface \")\n plot_colors = \"rg\"\n plot_step = 0.02\n n_classes = 2\n x_min, x_max = self.data.iloc[:, 0].min() - 1, self.data.iloc[:, 0].max() + 1\n y_min, y_max = self.data.iloc[:, 1].min() - 1, self.data.iloc[:, 1].max() + 1\n xx, yy = np.meshgrid(np.arange(x_min, x_max, plot_step),np.arange(y_min, y_max, plot_step))\n plt.tight_layout(h_pad=0.5, w_pad=0.5, pad=2.5)\n Z = self.predict(np.c_[xx.ravel(), yy.ravel()])\n Z = np.array(Z)\n Z = Z.reshape(xx.shape)\n cs = plt.contourf(xx, yy, Z, cmap=plt.cm.PiYG)\n for i, color in zip(range(n_classes), plot_colors):\n idx = np.where(self.labels == i)\n for i in range (len(idx[0])):\n plt.scatter(self.data.loc[idx[0][i]][0], self.data.loc[idx[0][i]][1],c=color,cmap=plt.cm.PiYG, edgecolor='black', s=15)\n plt.suptitle(\"RandomForestClassifier:Decision surface by combining all the estimators\")\n plt.legend(loc='lower right', borderpad=0, handletextpad=0)\n plt.axis(\"tight\")\n\n plt.show()\n fig2 = plt\n\n return [fig1,fig2]", "def plot_decision_boundary(resolution=100, colors=('b', 'k', 'r'), levels=(-1, 0, 1)):\n\n # Generate coordinate grid of shape [resolution x resolution]\n # and evaluate the model over the entire space\n xrange = np.linspace(x_train[:,0].min(), x_train[:,0].max(), resolution)\n yrange = np.linspace(x_train[:,1].min(), x_train[:,1].max(), resolution)\n grid = [[decision_function(alpha, y_train,\n Kernel1, x_train,\n np.array([xr, yr]), b) for xr in xrange] for yr in yrange]\n grid = np.array(grid).reshape(len(xrange), len(yrange))\n\n # Plot decision contours using grid and\n # make a scatter plot of training data\n ax.contour(xrange, yrange, grid, levels=levels, linewidths=(1, 1, 1),\n linestyles=('--', '-', '--'), colors=colors)\n ax.scatter(x_train[:,0], x_train[:,1],\n c=y_train, cmap=plt.cm.viridis, lw=0, alpha=0.25)\n\n # Plot support vectors (non-zero alphas)\n # as circled points (linewidth > 0)\n mask = np.round(alpha, decimals=2) != 0.0\n ax.scatter(x_train[mask,0], x_train[mask,1],\n c=y_train[mask], cmap=plt.cm.viridis, lw=1, edgecolors='k')\n\n return grid, ax", "def train_classification_k_neighbors(p_X_train_scaled, p_y_train, p_weather):\n X_train, X_val, y_train, y_val = train_test_split(p_X_train_scaled, p_y_train, random_state=42, test_size=0.2 / 0.7)\n clf = KNeighborsClassifier(n_neighbors=10, weights=\"distance\")\n clf.fit(X_train, y_train)\n y_prediction_train = clf.predict(X_train)\n y_prediction_val = clf.predict(X_val)\n io.save_object(clf, \"KNearestNeighbours_classifier_model\"+p_weather+\".pkl\")\n kn_classification_sets = [y_train, y_val, y_prediction_train, y_prediction_val]\n return kn_classification_sets", "def fit(self, data, labels, labels_pred):\n self.n_samples, dim = data.shape\n self.labels_unique = np.unique(labels)\n self.n_classes = len(self.labels_unique)\n if self.n_neighbors is None:\n # Set number of nearest neighbors based on the maximum number of samples per class and the neighborhood\n # constant\n num = 0\n for c in self.labels_unique:\n ind = np.where(labels == c)[0]\n if ind.shape[0] > num:\n num = ind.shape[0]\n\n self.n_neighbors = int(np.ceil(num ** self.neighborhood_constant))\n\n logger.info(\"Number of samples: {:d}. Data dimension = {:d}.\".format(self.n_samples, dim))\n logger.info(\"Number of classes: {:d}.\".format(self.n_classes))\n logger.info(\"Number of neighbors (k): {:d}.\".format(self.n_neighbors))\n logger.info(\"Fraction of outliers (alpha): {:.4f}.\".format(self.alpha))\n if self.model_dim_reduction:\n data = transform_data_from_model(data, self.model_dim_reduction)\n dim = data.shape[1]\n logger.info(\"Applying dimension reduction to the data. Projected dimension = {:d}.\".format(dim))\n\n # Distance from each sample in `data` to the `1 - alpha` level sets corresponding to each class\n distance_level_sets = np.zeros((self.n_samples, self.n_classes))\n self.index_knn = dict()\n self.epsilon = dict()\n indices_sub = dict()\n for j, c in enumerate(self.labels_unique):\n logger.info(\"Processing data from class '{}':\".format(c))\n logger.info(\"Building a KNN index for all the samples from class '{}'.\".format(c))\n indices_sub[c] = np.where(labels == c)[0]\n data_sub = data[indices_sub[c], :]\n self.index_knn[c] = KNNIndex(\n data_sub, n_neighbors=self.n_neighbors,\n metric=self.metric, metric_kwargs=self.metric_kwargs,\n approx_nearest_neighbors=self.approx_nearest_neighbors,\n n_jobs=self.n_jobs,\n low_memory=self.low_memory,\n seed_rng=self.seed_rng\n )\n # Distances to the k nearest neighbors of each sample\n _, nn_distances = self.index_knn[c].query_self(k=self.n_neighbors)\n # Radius or distance to the k-th nearest neighbor for each sample\n radius_arr = nn_distances[:, self.n_neighbors - 1]\n\n # Smallest radius `epsilon` such that only `alpha` fraction of the samples from class `c` have radius\n # greater than `epsilon`\n if self.alpha > 0.:\n self.epsilon[c] = np.percentile(radius_arr, 100 * (1 - self.alpha), interpolation='midpoint')\n\n # Exclude the outliers and build a KNN index with the remaining samples\n mask_incl = radius_arr <= self.epsilon[c]\n mask_excl = np.logical_not(mask_incl)\n num_excl = mask_excl[mask_excl].shape[0]\n else:\n # Slightly larger value than the largest radius\n self.epsilon[c] = 1.0001 * np.max(radius_arr)\n\n # All samples are included in the density level set\n mask_incl = np.ones(indices_sub[c].shape[0], dtype=np.bool)\n mask_excl = np.logical_not(mask_incl)\n num_excl = 0\n\n if num_excl:\n logger.info(\"Excluding {:d} samples with radius larger than {:.6f} and building a KNN index with \"\n \"the remaining samples.\".format(num_excl, self.epsilon[c]))\n self.index_knn[c] = KNNIndex(\n data_sub[mask_incl, :], n_neighbors=self.n_neighbors,\n metric=self.metric, metric_kwargs=self.metric_kwargs,\n approx_nearest_neighbors=self.approx_nearest_neighbors,\n n_jobs=self.n_jobs,\n low_memory=self.low_memory,\n seed_rng=self.seed_rng\n )\n # Distance to the nearest neighbor of each sample that is part of the KNN index\n _, dist_temp = self.index_knn[c].query_self(k=1)\n ind = indices_sub[c][mask_incl]\n distance_level_sets[ind, j] = dist_temp[:, 0]\n\n # Distance to the nearest neighbor of each sample that is not a part of the KNN index (outliers)\n _, dist_temp = self.index_knn[c].query(data_sub[mask_excl, :], k=1)\n ind = indices_sub[c][mask_excl]\n distance_level_sets[ind, j] = dist_temp[:, 0]\n else:\n # No need to rebuild the KNN index because no samples are excluded.\n # Distance to the nearest neighbor of each sample\n distance_level_sets[indices_sub[c], j] = nn_distances[:, 0]\n\n logger.info(\"Calculating the trust score for the estimation data.\")\n for c in self.labels_unique:\n # Compute the distance from each sample from class `c` to the level sets from the remaining classes\n data_sub = data[indices_sub[c], :]\n for j, c_hat in enumerate(self.labels_unique):\n if c_hat == c:\n continue\n\n _, dist_temp = self.index_knn[c_hat].query(data_sub, k=1)\n distance_level_sets[indices_sub[c], j] = dist_temp[:, 0]\n\n self.scores_estim = self._score_helper(distance_level_sets, labels_pred)\n return self", "def find_knn_hyperparams():\n n_neighbors = np.arange(5, 10)\n ps = np.arange(1, 10)\n results = []\n\n for p in ps:\n result = []\n for _ in range(10):\n data = FaceDataset(\"embeddings/known\", n=50)\n train_data, train_labels = data.train()\n test_data, test_labels = data.test()\n accs = []\n for n in n_neighbors:\n clf = KNeighborsClassifier(n_neighbors=n, weights=\"distance\", p=p)\n clf, _ = train(clf, train_data, train_labels)\n acc, _ = test(clf, test_data, test_labels)\n accs.append(acc)\n result.append(accs)\n result = np.mean(result, axis=0)\n results.append(result)\n\n plots = []\n for i in range(len(ps)):\n p = plotly.graph_objs.Scatter(x=n_neighbors, y=results[i], name=\"p={}\".format(ps[i]))\n plots.append(p)\n\n plotly.offline.plot(plots, filename=\"knn.html\")\n print(\"C={}\".format(n_neighbors[np.argmax(results)]))", "def plot_decision_boundary(model: torch.nn.Module, X: torch.Tensor, y: torch.Tensor):\n # Put everything to CPU (works better with NumPy + Matplotlib)\n model.to(\"cpu\")\n X, y = X.to(\"cpu\"), y.to(\"cpu\")\n\n # Setup prediction boundaries and grid\n x_min, x_max = X[:, 0].min() - 0.1, X[:, 0].max() + 0.1\n y_min, y_max = X[:, 1].min() - 0.1, X[:, 1].max() + 0.1\n xx, yy = np.meshgrid(np.linspace(x_min, x_max, 101), np.linspace(y_min, y_max, 101))\n\n # Make features\n X_to_pred_on = torch.from_numpy(np.column_stack((xx.ravel(), yy.ravel()))).float()\n\n # Make predictions\n model.eval()\n with torch.inference_mode():\n y_logits = model(X_to_pred_on)\n\n # Test for multi-class or binary and adjust logits to prediction labels\n if len(torch.unique(y)) > 2:\n y_pred = torch.softmax(y_logits, dim=1).argmax(dim=1) # mutli-class\n else:\n y_pred = torch.round(torch.sigmoid(y_logits)) # binary\n\n # Reshape preds and plot\n y_pred = y_pred.reshape(xx.shape).detach().numpy()\n plt.contourf(xx, yy, y_pred, cmap=plt.cm.RdYlBu, alpha=0.7)\n plt.scatter(X[:, 0], X[:, 1], c=y, s=40, cmap=plt.cm.RdYlBu)\n plt.xlim(xx.min(), xx.max())\n plt.ylim(yy.min(), yy.max())" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Get a nova client instance.
def get_nova(self, version='2.1'): if self.nova is None: self.nova = novaclient.Client(version, session=self.get_session()) return self.nova
[ "def _get_nova_client(self):\n region_name = CONF.region_name\n session = self._get_keystone_session()\n return novaclient.client.Client(2, session=session, region_name=region_name)", "def get_nova_v2_client(self):\n from novaclient import client as nova\n return nova.Client(NOVA_API_VERSION, session=self._session())", "def get_novaclient(self, env, client_version=3):\r\n self.nova_env = env\r\n assert utils.is_valid_environment(env), \"Env %s not found in \"\\\r\n \"supernova configuration file.\" % env\r\n print(\"Getting novaclient!\")\r\n return novaclient.Client(client_version, **self.prep_python_creds())", "def _get_neutron_client(self):\n session = self._get_keystone_session()\n return neutronclient.v2_0.client.Client(session=session)", "def gen_nova_client(self):\n\n print \"\\t* Generating nova client\"\n client = nClient.get_client_class('2')\n self.novaclient = client(self.username,\n self.password,\n self.tenant_name,\n self.auth_url,\n service_type='compute')", "def get_client_instance(opts={}, api_version=None):\n return get_client_class(api_version)(**opts)", "def get_keystone_client():\n username = os.environ.get('OS_USERNAME')\n password = os.environ.get('OS_PASSWORD')\n tenant = os.environ.get('OS_TENANT_NAME')\n url = os.environ.get('OS_AUTH_URL')\n assert username is not None\n assert password is not None\n assert tenant is not None\n assert url is not None\n cl = client.Client(username=username, password=password,\n tenant_name=tenant, auth_url=url)\n return cl", "def get_client() -> 'NM.Client':\n return NM.Client.new(None)", "def neutron_client(get_neutron_client):\n return get_neutron_client()", "def _keystone_client(context, version=(3, 0)):\n auth_plugin = token.Token(\n auth_url=CONF.keystone_authtoken.auth_uri,\n token=context.auth_token,\n project_id=context.project_id)\n client_session = session.Session(auth=auth_plugin,\n verify=False if\n CONF.keystone_authtoken.insecure else\n (CONF.keystone_authtoken.cafile or True))\n return client.Client(auth_url=CONF.keystone_authtoken.auth_uri,\n session=client_session, version=version)", "def get_neutron_v2_client(self):\n from neutronclient.v2_0 import client as neutron\n return neutron.Client(session=self._session())", "def nova(self, obj):\n\n if self._novaclient is not None:\n return self._novaclient\n params = self._build_conn_params(obj.user, obj.project)\n self._novaclient = driver_base.SenlinDriver().compute(params)\n return self._novaclient", "def initialize_client_hook(self):\n creds = self.conf.service_credentials\n logger = None\n if hasattr(self.conf, 'nova_http_log_debug') and getattr(\n self.conf, 'nova_http_log_debug'):\n logger = log.getLogger(\"novaclient-debug\")\n logger.logger.setLevel(log.DEBUG)\n ks_session = keystone_client.get_session(self.conf)\n return nova_client.Client(\n version=api_versions.APIVersion('2.1'),\n session=ks_session,\n # nova adapter options\n region_name=creds.region_name,\n endpoint_type=creds.interface,\n service_type=self.conf.service_types.nova,\n logger=logger)", "def get_servicediscovery_client() -> Any:\n client_name = 'servicediscovery'\n if client_name not in CLIENTS:\n region = CONFIG.aws_region if CONFIG else None\n profile = CONFIG.aws_profile if CONFIG else None\n session = boto3.session.Session(\n region_name=region, # type: ignore\n profile_name=profile, # type: ignore\n )\n CLIENTS[client_name] = session.client(client_name, config=Config(\n max_pool_connections=1,\n retries=dict(max_attempts=2)\n ))\n return CLIENTS[client_name]", "def _client():\n global _ndb_client\n global _initial_pid\n\n if not _ndb_client:\n with _ndb_client_lock:\n if not _ndb_client:\n _ndb_client = ndb.Client(project=utils.get_application_id())\n _initial_pid = os.getpid()\n\n return _ndb_client", "def _get_glance_client(self):\n session = self._get_keystone_session()\n return glanceclient.client.Client(2, session=session)", "def Client(self):\n if self._client_creator:\n return self._client_creator()", "def make_client(instance):\n return None", "def _get_client(self):\n if self._client is None:\n self._client = self.boto.client(service_name='elb', region_name=self.boto.cli_region)\n\n return self._client" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Get a glance client instance.
def get_glance(self, version='2'): if self.glance is None: self.glance = glanceclient(version, session=self.get_session()) return self.glance
[ "def _get_glance_client(self):\n session = self._get_keystone_session()\n return glanceclient.client.Client(2, session=session)", "def get_client_instance(opts={}, api_version=None):\n return get_client_class(api_version)(**opts)", "def _get_client(cls):\n\n try:\n key = os.environ[GDAXTrader.GDAX_KEY_ENV]\n secret = os.environ[GDAXTrader.GDAX_SECRET_ENV]\n passphrase = os.environ[GDAXTrader.GDAX_PASSPHRASE_ENV]\n except KeyError as error:\n raise KeyError('Missing environment variable for GDAX: '.format(error))\n\n try:\n api_url = os.environ[GDAXTrader.GDAX_API_URL_ENV]\n except KeyError:\n client = gdax.AuthenticatedClient(key, secret, passphrase)\n else:\n client = gdax.AuthenticatedClient(key, secret, passphrase,\n api_url=api_url)\n\n return client", "def get_client():\n client = soundcloud.Client(client_id=CLIENT_ID)\n return client", "def _get_girder_client(cls):\n if cls.instance is None:\n raise GaiaException('GirderInterface not initialized')\n\n if cls.instance.gc is None:\n raise GaiaException('GirderClient not initialized')\n\n return cls.instance.gc", "def get_unibox_client(self):\n if self._gls_unibox_client is None:\n client = Client(\n self.gls_server,\n self.gls_port\n )\n client.test = self.gls_is_test\n self._gls_unibox_client = client\n\n return self._gls_unibox_client", "def _get_client():\n\n return datastore.Client()", "async def get_client(self) -> GeWebsocketClient:\n if self.client is not None:\n await self.client.disconnect()\n\n loop = self._hass.loop\n self.client = self.create_ge_client(event_loop=loop)\n return self.client", "def get_client():\n\n # Initialize stub wrapper\n client_stub = StubWrapper(6)\n\n # Pass back client and stubs\n return pydgraph.DgraphClient(*client_stub.stubs), client_stub", "def get_client() -> 'NM.Client':\n return NM.Client.new(None)", "def get_client(request):\n client = raven.Client(release=raven.fetch_package_version('h'),\n transport=GeventedHTTPTransport)\n client.http_context(http_context_data(request))\n client.user_context(user_context_data(request))\n return client", "def get_client():\n return new_client(ONEPASSWORD_CONNECT_HOST, ONEPASSWORD_CONNECT_TOKEN)", "def Client(self):\n if self._client_creator:\n return self._client_creator()", "def client(cls):\n # use the latest token\n app = cls.objects.order_by('-created_at').first()\n if settings.DEBUG:\n logging.getLogger(__name__).debug(\n f\"Bot Access Token:{app.bot_access_token}\"\n )\n\n return WebClient(token=app.bot_access_token)", "def get_client(self, type, region=None, session_name=None):\n if 'creds' not in self.__dict__:\n self.creds = self.get_creds(session_name=session_name)\n client = boto3.client(type,\n aws_access_key_id = self.creds['AccessKeyId'],\n aws_secret_access_key = self.creds['SecretAccessKey'],\n aws_session_token = self.creds['SessionToken'],\n region_name = region)\n return(client)", "def _get_client_impl(self):\n api_version = self._get_api_version(None)\n if api_version not in self._client_impls:\n self._create_client_impl(api_version)\n return self._client_impls[api_version]", "def _client():\n global _ndb_client\n global _initial_pid\n\n if not _ndb_client:\n with _ndb_client_lock:\n if not _ndb_client:\n _ndb_client = ndb.Client(project=utils.get_application_id())\n _initial_pid = os.getpid()\n\n return _ndb_client", "def get_google_client(self, cred_file='./google.json', ):\n logging.debug(\"Creating Google client\")\n credentials = Storage(cred_file).get()\n http = credentials.authorize(httplib2.Http())\n client = build('fitness', 'v1', http=http)\n logging.debug(\"Google client created\")\n return client", "def get(self, id: int) -> Client:\n\n return self.__clients[id]" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Get a swift client.Connection instance.
def get_swift(self): if self.swift is None: self.swift = swiftclient.Connection( auth_version='3', authurl=self.auth_kwargs["auth_url"], user=self.auth_kwargs["username"], key=self.auth_kwargs["password"], tenant_name=self.auth_kwargs["project_name"] ) return self.swift
[ "def get_connection(self):\n try:\n return swift_client.Connection(\n preauthurl=self.swift_url,\n preauthtoken=self.auth_token,\n retries=5,\n auth_version='1',\n insecure=True)\n except Exception as e:\n err_message = \"Exception raised initiating a swift connection.\"\n LOGGER.exception(err_message)\n raise", "def get_connection(self):\n c = httplib.HTTPConnection(self.server)\n return c", "def get_connection(self):\n return self._run_by_retry(self._get_connection)", "def get_connection(self):\n if self.allow_reauth:\n # we are refreshing token only and if only connection manager\n # re-authentication is allowed. Token refreshing is setup by\n # connection manager users. Also we disable re-authentication\n # if there is not way to execute it (cannot initialize trusts for\n # multi-tenant or auth_version is not 3)\n auth_ref = self.client.session.auth.auth_ref\n # if connection token is going to expire soon (keystone checks\n # is token is going to expire or expired already)\n if self.store.backend_group:\n interval = getattr(\n self.store.conf, self.store.backend_group\n ).swift_store_expire_soon_interval\n else:\n store_conf = self.store.conf.glance_store\n interval = store_conf.swift_store_expire_soon_interval\n\n if auth_ref.will_expire_soon(interval):\n LOG.info(_LI(\"Requesting new token for swift connection.\"))\n # request new token with session and client provided by store\n auth_token = self.client.session.get_auth_headers().get(\n self.AUTH_HEADER_NAME)\n LOG.info(_LI(\"Token has been successfully requested. \"\n \"Refreshing swift connection.\"))\n # initialize new switclient connection with fresh token\n self.connection = self.store.get_store_connection(\n auth_token, self.storage_url)\n return self.connection", "def _make_swift_connection(self, auth_url, user, key):\n snet = self.snet\n logger.debug(_(\"Creating Swift connection with \"\n \"(auth_address=%(auth_url)s, user=%(user)s, \"\n \"snet=%(snet)s)\") % locals())\n return swift_client.Connection(\n authurl=auth_url, user=user, key=key, snet=snet)", "def conn(self):\n return os_conn.Connection(**self.os_auth_args)", "def __GetConnection(self):\n\n self.conn = httplib.HTTPConnection(BLIP_API_URL)\n return self.conn", "def get_client():\n return new_client(ONEPASSWORD_CONNECT_HOST, ONEPASSWORD_CONNECT_TOKEN)", "def get_connection(self) -> Try[asyncpg.PostgresError, asyncpg.Connection]:\n return self.connection.get().map(lambda c: c.connection)", "def get(self, conn_alias: str) -> \"BaseDBAsyncClient\":\n storage: Dict[str, \"BaseDBAsyncClient\"] = self._get_storage()\n try:\n return storage[conn_alias]\n except KeyError:\n connection: BaseDBAsyncClient = self._create_connection(conn_alias)\n storage[conn_alias] = connection\n return connection", "def get_connection(self):\n return self.conn", "def getmake_connection(self):\n if (self.connection == None):\n self.connection = self.engine.connect()\n return self.connection", "def conn(self) -> connection.Connection:\n return self._conn", "def get_conn(self) -> ServiceBusClient:\n conn = self.get_connection(self.conn_id)\n connection_string: str = str(conn.schema)\n if connection_string:\n client = ServiceBusClient.from_connection_string(connection_string, logging_enable=True)\n else:\n extras = conn.extra_dejson\n credential: str | DefaultAzureCredential = self._get_field(extras=extras, field_name=\"credential\")\n fully_qualified_namespace = self._get_field(extras=extras, field_name=\"fully_qualified_namespace\")\n if not credential:\n credential = DefaultAzureCredential()\n client = ServiceBusClient(\n fully_qualified_namespace=fully_qualified_namespace,\n credential=credential, # type: ignore[arg-type]\n )\n\n self.log.info(\"Create and returns ServiceBusClient\")\n return client", "def get_connection(self):\n return self._connection", "def connection():\n return _MockConnection()", "def get_conn(self) -> WebClient:\n return self.client", "def get_connection(hostname, logger):\n return Connection(\n hostname,\n logger=logger,\n sudo=needs_sudo(),\n )", "def get_conn(self):\n conn = self.get_connection(self.conn_id)\n service_options = conn.extra_dejson\n return BlockBlobService(account_name=conn.login,\n account_key=conn.password, **service_options)" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
'voltage' should be a dict of numpy arrays of floatingpoint numbers. The keys of 'voltage' are integers, 03. Each element of 'voltage' should start and end near zero. 'repetitions' and 'rate' should be integers.
def __init__( self, voltage={0:(0, 0)}, rate=500, repetitions=1, board_name='cDAQ1Mod1', voltage_limits=None, num_channels=7): self.board_name = board_name #Check Measurement and Automation Explorer self._taskHandle = ctypes.c_void_p(0) self.num_channels = num_channels DAQmxErrChk(api.DAQmxCreateTask("", ctypes.byref(self._taskHandle))) DAQmxErrChk(api.DAQmxCreateAOVoltageChan( self._taskHandle, self.board_name + "/ao0:%i"%(num_channels - 1), "", ctypes.c_double(-10.0), #Minimum voltage ctypes.c_double(10.0), #Maximum voltage 10348, #DAQmx_Val_Volts; don't question it! ctypes.c_void_p(0), #NULL )) self.num_points_written = ctypes.c_long(0) self._unwritten_voltages = False self._unplayed_voltages = False self.set_voltage_and_timing(voltage, rate, repetitions, voltage_limits) return None
[ "def update_voltages(self):\n for i in range(7):\n j = self.voltChannelLookup[0][i]\n self.voltages_raw[i] = int(self.ad7998[1].read_input_raw(j) & 0xfff)\n self.voltages[i] = self.voltages_raw[i] * 3 / 4095.0\n for i in range(6):\n j = self.voltChannelLookup[1][i]\n self.voltages_raw[i + 7] = int(self.ad7998[3].read_input_raw(j) & 0xfff)\n self.voltages[i + 7] = self.voltages_raw[i + 7] * 5 / 4095.0", "def pfeiffer_single_gauge(voltage):\n # V → Torr\n exponent = 1.667 * voltage - 11.46\n pres = 10 ** exponent\n return pres", "def render(sequence: AbstractInstructionBlock,\n sample_rate: Real=10.0,\n render_measurements=False) -> Union[Tuple[np.ndarray, Dict[ChannelID, np.ndarray]],\n Tuple[np.ndarray, Dict[ChannelID, np.ndarray], List[MeasurementWindow]]]:\n waveforms, measurements, total_time = iter_instruction_block(sequence, render_measurements)\n if not waveforms:\n return np.empty(0), dict()\n\n channels = waveforms[0].defined_channels\n\n # add one sample to see the end of the waveform\n sample_count = total_time * sample_rate + 1\n if not float(sample_count).is_integer():\n warnings.warn('Sample count not whole number. Casted to integer.')\n times = np.linspace(0, total_time, num=sample_count, dtype=float)\n # move the last sample inside the waveform\n times[-1] = np.nextafter(times[-1], times[-2])\n\n voltages = dict((ch, np.empty(len(times))) for ch in channels)\n offset = 0\n for waveform in waveforms:\n wf_end = offset + waveform.duration\n indices = slice(*np.searchsorted(times, (offset, wf_end)))\n sample_times = times[indices] - float(offset)\n for channel in channels:\n output_array = voltages[channel][indices]\n waveform.get_sampled(channel=channel,\n sample_times=sample_times,\n output_array=output_array)\n offset = wf_end\n if render_measurements:\n return times, voltages, measurements\n else:\n return times, voltages", "def set_voltage_data(self):\n self.voltage_record = {}\n self.threshold_value = {}\n for l in self.network.layers:\n if 'v' in self.network.layers[l].__dict__:\n self.voltage_record[l] = self.network.monitors['{:}_voltages'.format(l)].get('v')\n if 'thresh' in self.network.layers[l].__dict__:\n self.threshold_value[l] = self.network.layers[l].thresh", "def str_voltages(self, key, bypass_voltage):\r\n # If we already have the key, we're done\r\n if key in self.string_keys:\r\n return self.string_keys[key]\r\n model = key[0] # unpack the key\r\n pattern = key[1:]\r\n index = len(self.string_voltages)\r\n self.string_keys[key] = index\r\n # compute the combined voltage array\r\n try:\r\n cindex, multiple = pattern[0]\r\n svoltages = self.cell_voltages[cindex] * multiple\r\n for cindex, multiple in pattern[1:]:\r\n svoltages += self.cell_voltages[cindex] * multiple\r\n except:\r\n svoltages = self.cell_voltages[pattern[0]] * pattern[1]\r\n\r\n if bypass_voltage > 0:\r\n bypassed = svoltages < -bypass_voltage\r\n svoltages[bypassed] = -bypass_voltage\r\n self.string_voltages.append({\r\n 'voltages': svoltages,\r\n 'bypass': bypassed,\r\n })\r\n else:\r\n self.string_voltages.append({\r\n 'voltages': svoltages,\r\n 'bypass': None,\r\n })\r\n logger.debug(f'[{index:04d}] SV {pattern}')\r\n return index", "def voltage_conversion(self):\r\n\t\tvoltage = ((self.data[0] * 256 + self.data[1]) / 65536.0) * 5.0\r\n\t\t\r\n\t\treturn {'v' : voltage}", "def power2Sv(power_data_dict,cal_params):\n # set params\n tvgCorrectionFactor = 2.0 # default is to apply TVG correction with offset of 2\n\n # # transpose and flip power_data_dict\n # for channel in power_data_dict:\n # # Transpose array data so we have time on the x-axis and depth on the y-axis\n # power_data_dict[channel] = power_data_dict[channel].transpose()\n # # reverse the Y axis (so depth is measured from the surface (at the top) to the ZPLS (at the bottom)\n # power_data_dict[channel] = power_data_dict[channel][::-1]\n\n # Step through each frequency\n Sv = {}\n for n in range(len(power_data_dict)):\n # extract cal params\n f = cal_params[n]['frequency']\n c = cal_params[n]['soundvelocity']\n t = cal_params[n]['sampleinterval']\n alpha = cal_params[n]['absorptioncoefficient']\n G = cal_params[n]['gain']\n phi = cal_params[n]['equivalentbeamangle']\n pt = cal_params[n]['transmitpower']\n tau = cal_params[n]['pulselength']\n\n dR = c*t/2 # sample thickness\n wvlen = c/f # wavelength\n\n # Calc gains\n CSv = 10 * np.log10((pt * (10**(G/10))**2 * wvlen**2 * c * tau * 10**(phi/10)) / (32 * np.pi**2))\n CSp = 10 * np.log10((pt * (10**(G/10))**2 * wvlen**2) / (16 * np.pi**2))\n\n # calculate Sa Correction\n idx = [i for i,dd in enumerate(cal_params[n]['pulselengthtable']) if dd==tau]\n Sac = 2 * cal_params[n]['sacorrectiontable'][idx]\n\n # determine number of samples in array\n pSize = power_data_dict[n+1].shape # size(data.pings(n).power);\n\n # create range vector (in m)\n range_vec = np.arange(pSize[0]) * dR\n # data.pings(n).range = double((0:pSize(1) - 1) + ...\n # double(data.pings(n).samplerange(1)) - 1)' * dR;\n\n # apply TVG Range correction\n rangeCorrected = range_vec - (tvgCorrectionFactor * dR)\n rangeCorrected[rangeCorrected<0] = 0\n\n # get TVG\n TVG = np.empty(rangeCorrected.shape)\n TVG[rangeCorrected!=0] = np.real( 20*np.log10(rangeCorrected[rangeCorrected!=0]) ) # TVG = real(20 * log10(rangeCorrected));\n TVG[rangeCorrected==0] = 0\n\n Sv[n+1] = (power_data_dict[n+1].T \\\n +TVG +2*cal_params[n]['absorptioncoefficient']*rangeCorrected\\\n -CSv -Sac).T\n Sv[n+1] = Sv[n+1][::-1]\n\n # Sv[n+1] = power_data_dict[n+1][::-1] + np.transpose(np.matlib.repmat(TVG,pSize[1],1)) +\\\n # 2*cal_params[n]['absorptioncoefficient']*np.transpose(np.matlib.repmat(rangeCorrected,pSize[1],1)) - CSv - Sac\n # data.pings(n).Sv = data.pings(n).power + ...\n # repmat(TVG, 1, pSize(2)) + (2 * alpha * ...\n # repmat(rangeCorrected, 1, pSize(2))) - CSv - Sac;\n\n return Sv", "def get_voltage(self):\n result=self.asker('OD')\n if result[0] not in ('N', 'E'):\n header=0\n else:\n header=1\n if result[0]=='E':\n overload=True\n else:\n overload=False\n mode='V'\n if header==1:\n mode=result[3]\n result=result[4:]\n voltage=float(result)\n pt_idx=result.find('.')\n if result[-4:-2]=='-3':\n if pt_idx==3:\n V_range=2 #10 mV\n else:\n V_range=3 #100 mV\n else:\n if pt_idx==2:\n V_range=4 #1 V \n elif pt_idx==3:\n V_range=5 #10 V\n else:\n V_range=6 #30 V\n return dict(voltage=voltage, header=header, overload=overload, mode=mode, V_range=V_range)", "def get_voltage_rating(self):\n summary = self.get_version_summary()\n pattern = '\\$.*? .*? .*? .*? .*? (.*?) .*? .*? .*? \\r\\n' \n rating = int(re.findall(pattern,summary).pop())\n return rating", "def ProbeVoltage(self, channel, multiplier):\n self.write(\"ch{}:probe {}\".format(channel, multiplier))", "def get_ratio_metrics(\n ratio_metric_specs: Dict[iter8id, RatioMetricSpec], \n counter_metric_specs: Dict[iter8id, CounterMetricSpec], \n counter_metrics: Dict[iter8id, Dict[iter8id, CounterDataPoint]], \n versions: Iterable[Version],\n start_time: datetime) -> Dict[iter8id, Dict[iter8id, RatioDataPoint]]:\n rmd = {version.id: {} for version in versions} # initialize rmd\n\n # populate rmd\n for ratio_metric_spec in ratio_metric_specs.values():\n query_spec = RatioQuerySpec(\n version_label_keys = versions[0].version_labels.keys(),\n numerator_template = counter_metric_specs[ratio_metric_spec.numerator].query_template,\n denominator_template = counter_metric_specs[ratio_metric_spec.denominator].query_template,\n start_time = start_time\n )\n prmq = PrometheusRatioMetricQuery(query_spec, versions)\n current_time = datetime.now(timezone.utc)\n rmd_from_prom = prmq.query_from_spec(current_time)\n\n for version in versions:\n if version.id in rmd_from_prom:\n rmd[version.id][ratio_metric_spec.id] = rmd_from_prom[version.id]\n else:\n if version.id in counter_metrics and counter_metrics[version.id][ratio_metric_spec.denominator].value:\n rmd[version.id][ratio_metric_spec.id] = RatioDataPoint(\n value = 0,\n timestamp = current_time,\n status = StatusEnum.zeroed_ratio\n )\n else:\n rmd[version.id][ratio_metric_spec.id] = RatioDataPoint(\n value = None,\n timestamp = current_time,\n status = StatusEnum.absent_version_in_prom_response\n )\n \"\"\"if a version cannot be found in the list of ratio metrics returned by prometheus, then the value of the ratio is set to zero if denominator is non-zero, and is set to None otherwise.\n \"\"\"\n\n return rmd", "def render(sequence: InstructionSequence, sample_rate: int=10) -> Tuple[np.ndarray, Dict[ChannelID, np.ndarray]]:\n if not all(isinstance(x, (EXECInstruction, STOPInstruction, REPJInstruction)) for x in sequence):\n raise NotImplementedError('Can only plot waveforms without branching so far.')\n\n def get_waveform_generator(instruction_block):\n for instruction in instruction_block:\n if isinstance(instruction, EXECInstruction):\n yield instruction.waveform\n elif isinstance(instruction, REPJInstruction):\n for _ in range(instruction.count):\n yield from get_waveform_generator(instruction.target.block[instruction.target.offset:])\n else:\n return\n\n waveforms = [wf for wf in get_waveform_generator(sequence)]\n if not waveforms:\n return [], []\n\n total_time = sum(waveform.duration for waveform in waveforms)\n\n channels = waveforms[0].defined_channels\n\n # add one sample to see the end of the waveform\n sample_count = total_time * sample_rate + 1\n times = np.linspace(0, total_time, num=sample_count)\n # move the last sample inside the waveform\n times[-1] = np.nextafter(times[-1], times[-2])\n\n voltages = dict((ch, np.empty(len(times))) for ch in channels)\n offsets = {ch: 0 for ch in channels}\n for waveform in waveforms:\n for channel in channels:\n offset = offsets[channel]\n indices = slice(*np.searchsorted(times, (offset, offset+waveform.duration)))\n sample_times = times[indices] - offset\n output_array = voltages[channel][indices]\n waveform.get_sampled(channel=channel,\n sample_times=sample_times,\n output_array=output_array)\n offsets[channel] += waveform.duration\n return times, voltages", "def test_get_voltage_maps(self):\n pass", "def test_bus_voltage(test_bus):\n assert 0.99 == pytest.approx(test_bus.u, 0.01)", "def populate_metric_values(self):\n self.new_counter_metrics: Dict[iter8id, Dict[iter8id, CounterDataPoint]] = get_counter_metrics(\n self.counter_metric_specs, \n [version.spec for version in self.detailed_versions.values()],\n self.eip.start_time\n )\n \n for detailed_version in self.detailed_versions.values():\n detailed_version.aggregate_counter_metrics(self.new_counter_metrics[detailed_version.id])\n\n self.aggregated_counter_metrics = self.get_aggregated_counter_metrics()\n\n self.new_ratio_metrics: Dict[iter8id, Dict[iter8id, RatioDataPoint]] = get_ratio_metrics(\n self.ratio_metric_specs, \n self.counter_metric_specs, \n self.aggregated_counter_metrics,\n [version.spec for version in self.detailed_versions.values()],\n self.eip.start_time\n )\n\n # This is in the shape of a Dict[str, RatioMaxMin], where the keys are ratio metric ids\n # and values are their max mins. \n\n self.ratio_max_mins = self.get_ratio_max_mins()\n\n for detailed_version in self.detailed_versions.values():\n detailed_version.aggregate_ratio_metrics(\n self.new_ratio_metrics[detailed_version.id]\n )", "def addRateParams(spec, data_card, channels, modifiers):\n measurements = [\n measurement[\"config\"][\"poi\"] for measurement in spec[\"measurements\"]\n ]\n signal_mods = [modifier[0] for modifier in modifiers if modifier[0] in measurements]\n\n for idxc, channel in enumerate(channels):\n for idxs, sample in enumerate((spec[\"channels\"][idxc][\"samples\"])):\n is_signal = any(mod[\"name\"] in signal_mods for mod in sample[\"modifiers\"])\n if not is_signal:\n for mod in spec[\"channels\"][idxc][\"samples\"][idxs][\"modifiers\"]:\n # normfactor or shapefactor\n if \"normfactor\" in mod[\"type\"] or \"shapefactor\" in mod[\"type\"]:\n for measurement in spec[\"measurements\"]:\n for param in measurement[\"config\"][\"parameters\"]:\n data_card.rateParams.update(\n {f\"{channel}AND\" + sample[\"name\"]: []}\n )\n if mod[\"name\"] == param[\"name\"]:\n data_card.rateParams[\n f\"{channel}AND\" + sample[\"name\"]\n ].append([[mod[\"name\"], 1, 0, param[\"bounds\"]], \"\"])\n else:\n data_card.rateParams[\n f\"{channel}AND\" + sample[\"name\"]\n ].append([[mod[\"name\"], 1, 0], \"\"])", "def __init__(self, sample_rate):", "def increaseFreq(self, desHz):\n from scipy.interpolate import interp1d\n import time\n from numpy import linspace, floor\n from decimal import getcontext, Decimal\n\n if desHz > 1000: # set max freq here \n raise ValueError('Max Frequency is 1000 (3 decimal places)')\n now = time.asctime(time.localtime(time.time())) \n stamp = ''.join(['%% The following created by alog_manip.MOOSalog.MOOSalog.increaseFreq\\n%% ', now])\n increase_msg = ''.join(['%% Resultant Frequency: ',str(desHz),' Hz'])\n # hiHz = {}\n self.outData = {} # erase pre-existing dict\n self.outData['header'] = [stamp,increase_msg,'%%%%'] + self.srcData['header']\n\n def create_msgs():\n \"\"\" Puts interpolated data into dict outData\n Primary interpolation function for increaseFreq\n Consider using uniaxial spline --> would have one function for all of dictionary dat\n \"\"\"\n getcontext().prec = 3 # will round to 3 decimal places\n orig_times = sorted(dat)\n for n in range(len(dat) - 1):\n linfun = interp1d([orig_times[n], orig_times[n+1]], \\\n [dat[orig_times[n]], dat[orig_times[n+1]]])\n dt = orig_times[n+1] - orig_times[n] # current\n freq = 1/dt # current\n if dt < (1/desHz):\n print('found instance where Freq already at/above desired Freq')\n else:\n new_dt = dt*freq/desHz\n new_times = linspace(orig_times[n],orig_times[n+1],floor(dt/new_dt))\n # print(new_times)\n new_values = linfun(new_times)\n # rounded_values = [float(Decimal(\"%.3f\" % e)) for e in new_values]\n rounded_times = [float(Decimal(\"%.3f\" % e)) for e in new_times]\n for m in range(len(rounded_times)):\n # this_time = int(new_times[m]*100000)/100000 # 5 decimal places in timstamp\n self.outData[sens][meas][rounded_times[m]] = new_values[m]\n\n ## go thru and pull out dictionaries {time: value} then send to interpolation func\n for sens in self.srcData:\n if sens is not 'header':\n self.outData[sens] = {}\n for meas in self.srcData[sens]:\n self.outData[sens][meas] = {}\n dat = self.srcData[sens][meas]\n if len(dat) == 1:\n self.outData[sens][meas] = dat # only 1 data point, no interp\n else:\n create_msgs()", "def createMultipleResonanceMeasurement(self,resonances,rawMeasurement):\n \n keywds = {}\n \n for (attrName,keywd) in (('error','valueError'),):\n if hasattr(rawMeasurement,keywd):\n keywds[attrName] = getattr(rawMeasurement,keywd)\n\n self.convertCount[self.mainCode][1][self.valueCode] += 1\n\n if not hasattr(rawMeasurement,'value') or rawMeasurement.value == None:\n \n \"\"\"\n TODO: below bit has to become measurement dependent function!!\n \"\"\"\n if hasattr(rawMeasurement,'valueMin') and rawMeasurement.valueMin != None:\n minValue = rawMeasurement.valueMin\n else:\n # TODO this always valid?\n minValue = 0.0\n\n if hasattr(rawMeasurement,'valueMax'):\n maxValue = rawMeasurement.valueMax\n \n if maxValue == None and minValue:\n maxValue = minValue + 4.0 # This is not so great! Need some kind of default error?\n \n value = (minValue + maxValue) / 2.0\n keywds['error'] = maxValue - value\n \n else:\n # Meaningless\n value = None\n \n else:\n value = rawMeasurement.value\n \n return self.ApiMeasurement(self.measurementList, value = value, resonances = resonances, **keywds)" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Given rows of normal vectors to line L, return points (rows) that are somewhere on each line Just find intersection with some basis line.
def points_on_lines(hyperplanes): intersections = [] for row in hyperplanes: intersections.append(an_intersection(row[:-1], -row[-1])) return np.array(intersections)
[ "def get_intersect_points(line1, line2):\n intersect_points = matrix.matrix_sol([line1, line2])\n return intersect_points", "def find_line_intersection(vm, iref, l1, l2):\n # grid between the points\n xi = np.sort(vm.x2x([l1[0] - vm.dx, l2[0] + vm.dx]))\n yi = np.sort(vm.y2y([l1[1] - vm.dx, l2[1] + vm.dy]))\n ix = vm.xrange2i(*xi)\n iy = vm.yrange2i(*yi)\n xx, yy = np.meshgrid(vm.x[ix], vm.y[iy], indexing='ij')\n ixx, iyy = np.meshgrid(ix, iy, indexing='ij')\n zz = vm.rf[iref][ixx, iyy]\n\n # search all polygons\n isect = None\n for i in range(len(ix) - 1):\n for j in range(len(iy) - 1):\n p1 = (xx[i, j], yy[i, j], zz[i, j])\n p2_0 = (xx[i + 1, j], yy[i + 1, j], zz[i + 1, j])\n p2_1 = (xx[i, j + 1], yy[i, j + 1], zz[i, j + 1])\n p3 = (xx[i + 1, j + 1], yy[i + 1, j + 1], zz[i + 1, j + 1])\n\n for p2 in [p2_0, p2_1]:\n x, y, z = line_plane_isect(p1, p2, p3, l1, l2)\n if point_in_poly(p1, p2, p3, x, y, z):\n return x, y, z\n \n return", "def lineIntersection(P1,D1,P2,D2):\n P1 = asarray(P1).reshape((-1,3)).astype(float64)\n D1 = asarray(D1).reshape((-1,3)).astype(float64)\n P2 = asarray(P2).reshape((-1,3)).astype(float64)\n D2 = asarray(D2).reshape((-1,3)).astype(float64)\n N = P1.shape[0]\n # a,b,c,d\n la,a = vectorNormalize(D1)\n lb,b = vectorNormalize(D2)\n c = (P2-P1)\n d = cross(a,b)\n ld,d = vectorNormalize(d)\n # sa,sb\n a = a.reshape((-1,1,3))\n b = b.reshape((-1,1,3))\n c = c.reshape((-1,1,3))\n d = d.reshape((-1,1,3))\n m1 = concatenate([c,b,d],axis=-2)\n m2 = concatenate([c,a,d],axis=-2)\n # This may still be optimized\n sa = zeros((N,1))\n sb = zeros((N,1))\n for i in range(P1.shape[0]):\n sa[i] = linalg.det(m1[i]) / ld[i]\n sb[i] = linalg.det(m2[i]) / ld[i]\n # X\n a = a.reshape((-1,3))\n b = b.reshape((-1,3))\n X = 0.5 * ( P1 + sa*a + P2 + sb*b )\n return Coords(X)", "def get_intersection(l0, l1):\n # Source: https://en.wikipedia.org/wiki/Line–line_intersection\n\n denominator = (l0[0] - l0[1]) * (l1[2] - l1[3]) -\\\n (l0[2] - l0[3]) * (l1[0] - l1[1])\n\n x_nominator = (l0[0] * l0[3] - l0[2] * l0[1]) * (l1[0] - l1[1]) -\\\n (l1[0] * l1[3] - l1[2] * l1[1]) * (l0[0] - l0[1])\n y_nominator = (l0[0] * l0[3] - l0[2] * l0[1]) * (l1[2] - l1[3]) -\\\n (l1[0] * l1[3] - l1[2] * l1[1]) * (l0[2] - l0[3])\n\n return [x_nominator / denominator, y_nominator / denominator]", "def line_line_intersection_3d(uvw_vectors, origins):\n C = []\n S = []\n for n, pos in zip(uvw_vectors, origins):\n n = n.reshape((3, 1))\n norm_matrix = (n @ n.T) - np.eye(3)\n C.append(norm_matrix @ pos)\n S.append(norm_matrix)\n\n S = np.array(S).sum(axis=0)\n C = np.array(C).sum(axis=0)\n return np.linalg.inv(S) @ C", "def parallelogram_vertices_from_grouped_lines(lines):\n if len(lines) > 2:\n raise Exception(\"parallelogram finder \\\n called with too many lines\")\n c_1 = lines[0]\n c_2 = lines[1]\n intercepts = None\n for l1, l2 in list(zip(c_1, c_2)) + list(zip(c_1, c_2[::-1])):\n x = solve_for_intersection(np.array([l1, l2]))\n if intercepts is None:\n intercepts = np.array([x])\n else:\n intercepts = np.vstack((intercepts, x))\n return intercepts", "def _cuda_line_ellipsoid_intersection(r0, step, semiaxes):\n a = semiaxes[0]\n b = semiaxes[1]\n c = semiaxes[2]\n A = (step[0] / a) ** 2 + (step[1] / b) ** 2 + (step[2] / c) ** 2\n B = 2 * (\n a ** (-2) * step[0] * r0[0]\n + b ** (-2) * step[1] * r0[1]\n + c ** (-2) * step[2] * r0[2]\n )\n C = (r0[0] / a) ** 2 + (r0[1] / b) ** 2 + (r0[2] / c) ** 2 - 1\n d = (-B + math.sqrt(B ** 2 - 4 * A * C)) / (2 * A)\n return d", "def find_line_intersection(self, point, vector, Ns=50):\n point = np.asarray(point, dtype=float)\n vector = np.asarray(vector, dtype=float)\n if point.size == 3:\n point = np.array([point[0], point[2]])\n if vector.size == 3:\n vector = np.array([vector[0], vector[2]])\n normal = np.array([-vector[1], vector[0]])\n normal /= norm(normal)\n with self.fix_evaluator():\n def f(t):\n t = clip(t, 0, np.pi)\n rel_vec = self(t) - point\n return normal.dot(rel_vec)\n f0 = f(0)\n if f0 == 0.0:\n return 0.0\n step = np.pi/Ns\n a = 0\n while f(a+step)*f0 > 0:\n if a == np.pi:\n raise RuntimeError(\"Line seems to not intersect curve.\")\n a = min(np.pi, a+step)\n return brentq(f, a=a, b=a+step)", "def intersect_lines(self, p_beg, p_end):\n # generate all line direction vectors\n n = (p_end - p_beg) / np.linalg.norm(p_end - p_beg, axis=1)[:, np.newaxis] # normalized\n\n # generate the array of all projectors\n projectors = np.eye(n.shape[1]) - n[:, :, np.newaxis] * n[:, np.newaxis] # I - n*n.T\n # see fig. 1\n\n # generate R matrix and q vector\n R = projectors.sum(axis=0)\n q = (np.dot(projectors, p_beg[:, :, np.newaxis])).sum(axis=0)\n # q = (projs @ P0[:, :, np.newaxis]).sum(axis=0) # python 3\n\n # solve the least squares problem for the\n # intersection point p: Rp = q\n intersection = np.linalg.lstsq(R, q, rcond=None)[0]\n return intersection", "def intersection(line1, line2):\r\n rho1, theta1 = line1\r\n rho2, theta2 = line2\r\n A = np.array([\r\n [np.cos(theta1), np.sin(theta1)],\r\n [np.cos(theta2), np.sin(theta2)]\r\n ])\r\n b = np.array([[rho1], [rho2]])\r\n x0, y0 = np.linalg.lstsq(A, b)[0]\r\n x0, y0 = int(np.round(x0)), int(np.round(y0))\r\n return [x0, y0]", "def intersection(line1, line2):\n\n rho1, theta1 = line1[0]\n rho2, theta2 = line2[0]\n A = np.array([\n [np.cos(theta1), np.sin(theta1)],\n [np.cos(theta2), np.sin(theta2)]\n ])\n b = np.array([[rho1], [rho2]])\n x0, y0 = np.linalg.solve(A, b)\n x0, y0 = int(np.round(x0)), int(np.round(y0))\n\n return [x0, y0]", "def lineLineIntersection(p0, p1, p2, p3):\n A1 = p1[1] - p0[1]\n B1 = p0[0] - p1[0]\n C1 = A1*p0[0] + B1*p0[1]\n\n A2 = p3[1] - p2[1]\n B2 = p2[0] - p3[0]\n C2 = A2 * p2[0] + B2 * p2[1]\n\n det = A1*B2 - A2*B1\n if det == 0:\n return p0\n return [(B2*C1 - B1*C2)/det, (A1 * C2 - A2 * C1) / det]", "def find_all_intersections(lines):\n intersections = []\n for i, line_1 in enumerate(lines):\n for line_2 in lines[i + 1:]:\n if not line_1 == line_2:\n intersection = find_intersection_point(line_1, line_2)\n if intersection:\n intersections.append(intersection)\n\n return intersections", "def intersectionOfTwoLines(p1, v1, p2, v2):\n # if we transform multiple points in one go\n if len(v1.shape) == 2:\n a1 = np.einsum('ij,ij->i', v1, v1)\n a2 = np.einsum('ij,ij->i', v1, v2)\n b1 = -np.einsum('ij,ij->i', v2, v1)\n b2 = -np.einsum('ij,ij->i', v2, v2)\n c1 = -np.einsum('ij,j->i', v1, p1 - p2)\n c2 = -np.einsum('ij,j->i', v2, p1 - p2)\n res = np.linalg.solve(np.array([[a1, b1], [a2, b2]]).transpose(2, 0, 1), np.array([c1, c2]).T)\n res = res[:, None, :]\n return np.mean([p1 + res[..., 0] * v1, p2 + res[..., 1] * v2], axis=0)\n else: # or just one point\n a1 = np.dot(v1, v1)\n a2 = np.dot(v1, v2)\n b1 = -np.dot(v2, v1)\n b2 = -np.dot(v2, v2)\n c1 = -np.dot(v1, p1 - p2)\n c2 = -np.dot(v2, p1 - p2)\n try:\n res = np.linalg.solve(np.array([[a1, b1], [a2, b2]]), np.array([c1, c2]))\n except np.linalg.LinAlgError:\n return np.ones(3)*np.nan\n res = res[None, None, :]\n return np.mean([p1 + res[..., 0] * v1, p2 + res[..., 1] * v2], axis=0)[0]", "def intersectionPointsLWL(q1,m1,q2,m2,mode='all'):\n t1,t2 = intersectionTimesLWL(q1,m1,q2,m2,mode)\n if mode == 'all':\n q1 = q1[:,newaxis]\n m1 = m1[:,newaxis]\n return pointsAtLines(q1,m1,t1),pointsAtLines(q2,m2,t2)", "def _compute_lineline_intersection(line1_pt1, line1_pt2,\n line2_pt1, line2_pt2):\n (x1, y1) = line1_pt1\n (x2, y2) = line1_pt2\n (x3, y3) = line2_pt1\n (x4, y4) = line2_pt2\n\n # Check for parallel lines\n denominator = (x1 - x2) * (y3 - y4) - (y1 - y2) * (x3 - x4)\n\n if denominator == 0:\n return None\n\n p_x = ((x1*y2 - y1*x2) * (x3 - x4) - (x1 - x2) * (x3*y4 - y3*x4)) \\\n / denominator\n p_y = ((x1*y2 - y1*x2) * (y3 - y4) - (y1 - y2) * (x3*y4 - y3*x4)) \\\n / denominator\n return (p_x, p_y)", "def point_of_intersection(l, pz=distance):\r\n # Must fix the error here. Right now, any vector can have a point in the plane.\r\n # Must make it so that only vectors pointing in the planes direction has a point there\r\n # Can be done by checking whether d is positive or not.\r\n # This is to prevent vectors that point away from the detector to be counted\r\n # The definitions below assume that the detector is centred in the origin and its length is oriented along the z-axis.\r\n p0 = np.array([0,0,pz]) # Point on the plane\r\n l0 = np.array([0,0,0]) # Point on the line\r\n n = np.array([0,0,1]) # Normal vector of the plane\r\n d = np.dot(p0-l0, n)/np.dot(l, n)\r\n point = [i*d for i in l]\r\n return point", "def intersects_indices(single_line_intersects):\n\n return list(np.where(np.logical_not(np.isnan(single_line_intersects[..., 0])))[0])", "def lloyd_relaxation(points):\n center = np.array([0, 0, 0])\n radius = 1\n n_it = 100\n\n sv = None\n for i in range(n_it):\n sv = SphericalVoronoi(np.array(points), radius, center)\n points = []\n for region in sv.regions:\n polygon = sv.vertices[region]\n # pol_tup = [(vert[0], vert[1], vert[2]) for vert in polygon]\n centroid = get_centroid(polygon)\n points.append(centroid)\n return sv" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Initializes the scheduler to poll every five minutes and start it
def _init_scheduler(self): self._sched = BackgroundScheduler() self._sched.add_job(self._check_rain, trigger='cron', minute='*/5') self._sched.start()
[ "def initialize_scheduler(self):\n scheduler = BackgroundScheduler()\n scheduler.add_job(self.do, 'interval', minutes=1)\n scheduler.start()\n self.do()", "def init_scheduler():\n sched = BackgroundScheduler(daemon=True)\n sched.add_job(populate_stats,\n 'interval',\n seconds=app_config['scheduler']['period_sec'])\n sched.start()", "def run(self):\r\n print(\"scheduler start\")\r\n self._create_tasks()\r\n # TODO detect changes in file and update tasks\r\n # schedule doc https://schedule.readthedocs.io/en/stable/faq.html#how-can-i-run-a-job-only-once\r\n\r\n while True:\r\n # print('.')\r\n schedule.run_pending()\r\n time.sleep(self.interval)", "def test_example_scheduler_(self):\n rospy.init_node(\"test_example_scheduler\")\n self.rqr = Requester(self.feedback)\n self.actions = collections.deque([self.step1, self.step2, self.step3])\n self.timer = rospy.Timer(rospy.Duration(2.0), self.periodic_update)\n rospy.spin()", "def run_cronjob(self):\n self.gpio_pins = self.lese_config()\n self.read_init_pins()\n self.sun = SunRise()\n for gpio in self.gpio_pins.keys():\n self.time_trigger(gpio)", "async def initialize_scheduler(_app, _loop): # pylint: disable=unused-variable\n ax_scheduler.init_scheduler()", "def start(self):\n\n self.loadConf()\n self.loadDrivers()\n self.loadFeeds()\n self.runScheduler()\n self.scheduler.print_jobs()\n self.scheduler.start()\n self.printConf(\"test\")\n print(\"scheduler started\")", "def init():\n scheduler.start()\n scheduler.add_job(\n func=refresh_data,\n trigger=IntervalTrigger(days=1),\n id=\"refresh_data_job\",\n name=\"pull data from course academic timetable\",\n replace_existing=True,\n )\n atexit.register(lambda: scheduler.shutdown())\n # delay launching the app until there is some data available\n refresh_data()", "def AutonomousPeriodic(self):\n Scheduler.GetInstance().Run()", "def setup_periodic_tasks(sender, **kwargs):\n sender.add_periodic_task(60, scheduled_task.s(), name='A scheduled task')", "def start(self) -> None:\n self.bus.subscribe(\"cache:ready\", self.revive)\n self.bus.subscribe(\"scheduler:add\", self.add)\n self.bus.subscribe(\"scheduler:persist\", self.persist)\n self.bus.subscribe(\"scheduler:remove\", self.remove)\n self.bus.subscribe(\"scheduler:upcoming\", self.upcoming)\n self.scheduler = sched.scheduler(time.time, time.sleep)\n cherrypy.process.plugins.Monitor.start(self)", "def scheduler():\n while True:\n date = datetime.datetime.now()\n hour = date.hour\n if S.this_hour is None:\n logging.debug(\"First time you use scheduler?\")\n logging.debug(\"Saving the 'this_hour' variable.\")\n S.save(\"this_hour\", 0)\n S.save(\"last_sched_run\", date)\n\n # working hours, between 07:00 and 10:00\n if hour >= 5 and hour < 10:\n # sleep time: random period between 45 sec and 1 min\n sleep_time = random.randrange(45, 60)\n # working hours, between 10:00 and 18:00\n elif hour >= 10 and hour < 18:\n # sleep time: random period between 1 - 3 minutes\n sleep_time = random.randrange(60, 180)\n else:\n # sleep time: random period between 30 - 60 minutes\n sleep_time = random.randrange(1800, 3600)\n logging.debug(\"This hour: %s\", S.this_hour)\n\n if S.last_sched_run.hour != hour:\n logging.debug(\"New hour. Resetting hourly run counter.\")\n S.save(\"this_hour\", 0)\n else:\n # this is not the first time we run this hour,\n # check that max_runs_per hour is not reached\n if int(S.this_hour) < max_runs_per_hour:\n auto()\n logging.debug(\"Incrementing hourly run counter.\")\n S.save(\"this_hour\", S.this_hour + 1)\n else:\n logging.debug(\"Max runs per hour reached\")\n S.save(\"last_sched_run\", date)\n\n logging.info(\"Sleeping for %s seconds\", sleep_time)\n time.sleep(sleep_time)", "def schedule(self):\n\n task_scheduler.schedule_task(self)", "def __init__(self):\n self.jobs = {}\n self.scheduler = BackgroundScheduler(timezone=settings.TIME_ZONE)\n self.scheduler.start()", "def start(self) -> None:\n self._logger.info('Starting scheduler')\n self.pool.start()", "def start_scheduler():\n from security_monkey import scheduler\n scheduler.setup_scheduler()\n scheduler.scheduler.start()", "def scheduler(self):\n while True:\n if self.sch.empty():\n self.log.info(\"No scheduled jobs detected. Entering idle state\")\n bits = bitarray()\n # generate random 7B bitarrays\n for _ in range(pow(self.cube_dim,3)):\n bits.append(bool(random.getrandbits(1)))\n self.sch.enter(self.transmit_freq, 4, self.transmit, argument=(0, bits), kwargs={})\n else:\n try:\n self.log.info(\"Scheduled jobs detected. Serving through scheduler runner\")\n self.sch.run()\n except IOError as exc:\n self.log.exception(\"\"\"Scheduler runner encountered an error while executing the \n top level event: %s\"\"\", exc)\n sys.exit(1) # exit with status code 1", "async def start_periodically_refresh_appointments(): # pylint: disable=invalid-name\n await asyncio.sleep(60)\n await app[\"snct_scrapper\"].refresh_appointments_every_minutes()", "def scheduler_loop(self):\n while True:\n self.push_scheduler.run()\n time.sleep(1)" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Calculate the maximum amount of rain between now and now+minute Remote procedure to be called by the core of Domos
def rain_max(self, key=None, name=None, lat=None, lon=None, minute=0): self.logger.info("added sensor for rain max %s : %s for %s minutes" % (lat, lon, minute)) if key and lat and lon and minute: try: minute = int(minute) except: return False new_rain = Rain(key, lat, lon, minute, self._max) self._rain.append(new_rain) return True else: return False
[ "async def rain_rate(self, value):\n if not value:\n return 0\n return await self.rain(value * 60)", "def max_humidity(self):\n return 60", "def find_tim(self):\n start_max = 0\n finish_max = 0\n op_mode = self.op_number + ',' + self.mode_number\n for resource in self.resources:\n end_time = resource.usage[op_mode][\"start_time\"] + resource.usage[op_mode][\"duration\"]\n if end_time > finish_max:\n finish_max = end_time\n start_max = resource.usage[op_mode][\"start_time\"]\n self.tim = finish_max\n self.sim = start_max", "def evaluate(self, time) -> float:\n ...", "def ontime_max(self):\n\n return min(self._settings['ontime']['maximum'], self.pwm_period - self.offtime_minimum)", "def minutes(self) -> pli.Series:", "def wall_time(self):", "def kernel_max(self, time_start, time_end):\n pass", "def normalized_total_time(p, max_time=3600000):\n if \"cdgp.wasTimeout\" in p and p[\"cdgp.wasTimeout\"] == \"true\":\n v = 3600000\n else:\n v = int(float(p[\"result.totalTimeSystem\"]))\n return max_time if v > max_time else v", "def remaining_visnode_walltime(launcherMainFrame):\n\n try:\n\n job_id = int(launcherMainFrame.loginThread.massiveJobNumber)\n\n if job_has_been_canceled(sshCmd.format(username=launcherMainFrame.massiveUsername,host=launcherMainFrame.massiveLoginHost), launcherMainFrame,job_id):\n return\n else:\n return seconds_to_hours_minutes(float(run_ssh_command(sshCmd.format(username=launcherMainFrame.massiveUsername,host=launcherMainFrame.massiveLoginHost), 'qstat -f %d | grep Remaining' % (job_id,), launcherMainFrame,ignore_errors=True)[0].split()[-1]))\n except:\n return", "def traffic_restoration_time_to_healed_or_new_endpoints_in_minutes(self) -> pulumi.Output[Optional[int]]:\n return pulumi.get(self, \"traffic_restoration_time_to_healed_or_new_endpoints_in_minutes\")", "def event_based_r_factor(self):\n # assign variables\n rain_energy = 'rain_energy'\n rain_volume = 'rain_volume'\n erosivity = 'erosivity'\n r_factor = 'r_factor'\n\n # derive rainfall energy (MJ ha^-1 mm^-1)\n gscript.run_command(\n 'r.mapcalc',\n expression=\"{rain_energy}\"\n \"=0.29*(1.-(0.72*exp(-0.05*{rain_intensity})))\".format(\n rain_energy=rain_energy,\n rain_intensity=self.rain_intensity),\n overwrite=True)\n\n # derive rainfall volume\n \"\"\"\n rainfall volume (mm)\n = rainfall intensity (mm/hr)\n * (rainfall interval (min)\n * (1 hr / 60 min))\n \"\"\"\n gscript.run_command(\n 'r.mapcalc',\n expression=\"{rain_volume}\"\n \"= {rain_intensity}\"\n \"*({rain_interval}\"\n \"/60.)\".format(\n rain_volume=rain_volume,\n rain_intensity=self.rain_intensity,\n rain_interval=self.rain_interval),\n overwrite=True)\n\n # derive event erosivity index (MJ mm ha^-1 hr^-1)\n gscript.run_command(\n 'r.mapcalc',\n expression=\"{erosivity}\"\n \"=({rain_energy}\"\n \"*{rain_volume})\"\n \"*{rain_intensity}\"\n \"*1.\".format(\n erosivity=erosivity,\n rain_energy=rain_energy,\n rain_volume=rain_volume,\n rain_intensity=self.rain_intensity),\n overwrite=True)\n\n # derive R factor (MJ mm ha^-1 hr^-1 yr^1)\n \"\"\"\n R factor (MJ mm ha^-1 hr^-1 yr^1)\n = EI (MJ mm ha^-1 hr^-1)\n / (rainfall interval (min)\n * (1 yr / 525600 min))\n \"\"\"\n gscript.run_command(\n 'r.mapcalc',\n expression=\"{r_factor}\"\n \"={erosivity}\"\n \"/({rain_interval}\"\n \"/525600.)\".format(\n r_factor=r_factor,\n erosivity=erosivity,\n rain_interval=self.rain_interval),\n overwrite=True)\n\n # remove temporary maps\n gscript.run_command(\n 'g.remove',\n type='raster',\n name=['rain_energy',\n 'rain_volume',\n 'erosivity'],\n flags='f')\n\n return r_factor", "def nextMinutes(self):\n self.connection.ping(reconnect=True) \n cursor = self.connection.cursor()\n cursor.execute(\"select minutesDuty_order from members where onDuty_minutes = TRUE\")\n result = cursor.fetchone()\n order = int(result[0])\n\n cursor.execute(\"select count(*) from members where minutesDuty_order is not null\")\n rs = cursor.fetchone()\n mem = int(rs[0])\n try:\n cursor.execute(\"update members set onDuty_minutes = FALSE where minutesDuty_order = '%s'\" % order)\n cursor.execute(\"update members set onDuty_minutes= TRUE where minutesDuty_order = '%s'\" % mod(order + 1, mem))\n connection.commit()\n return presentMinutes()\n except Exception as e:\n connection.rollback()\n #logs.logException(e)\n finally:\n cursor.close()", "def evaluate(self, alarm):", "def exit_time(position, full_scint_dimensions):\r\n light = random_light_direction()\r\n time_accumulated = 1.4e-9\r\n num = 0\r\n while(position[2]>0):\r\n time = first_border_time(position, light, full_scint_dimensions)\r\n time_accumulated+=time\r\n position = position+time*light\r\n light = reflect(position,light,full_scint_dimensions)\r\n num+=1\r\n if num >100:\r\n return -1\r\n return time_accumulated", "def rainfall_event(self):\n\n # assign local variables\n datatype = 'strds'\n increment = str(self.rain_interval)+' minutes'\n raster = 'raster'\n iterations = int(self.rain_duration)/int(self.rain_interval)\n rain_excess = 'rain_excess'\n net_difference = 'net_difference'\n\n # create raster space time datasets\n gscript.run_command(\n 't.create',\n type=datatype,\n temporaltype=self.temporaltype,\n output=self.elevation_timeseries,\n title=self.elevation_title,\n description=self.elevation_description,\n overwrite=True)\n gscript.run_command(\n 't.create',\n type=datatype,\n temporaltype=self.temporaltype,\n output=self.depth_timeseries,\n title=self.depth_title,\n description=self.depth_description,\n overwrite=True)\n gscript.run_command(\n 't.create',\n type=datatype,\n temporaltype=self.temporaltype,\n output=self.erdep_timeseries,\n title=self.erdep_title,\n description=self.erdep_description,\n overwrite=True)\n gscript.run_command(\n 't.create',\n type=datatype,\n temporaltype=self.temporaltype,\n output=self.flux_timeseries,\n title=self.flux_title,\n description=self.flux_description,\n overwrite=True)\n gscript.run_command(\n 't.create',\n type=datatype,\n temporaltype=self.temporaltype,\n output=self.difference_timeseries,\n title=self.difference_title,\n description=self.difference_description,\n overwrite=True)\n\n # register the initial digital elevation model\n gscript.run_command(\n 't.register',\n type=raster,\n input=self.elevation_timeseries,\n maps=self.elevation,\n start=self.start,\n increment=increment,\n flags='i',\n overwrite=True)\n\n # create evolution object\n evol = Evolution(elevation=self.elevation,\n precipitation=self.precipitation,\n start=self.start,\n rain_intensity=self.rain_intensity,\n rain_interval=self.rain_interval,\n rain_duration=self.rain_duration,\n walkers=self.walkers,\n runoff=self.runoff,\n mannings=self.mannings,\n detachment=self.detachment,\n transport=self.transport,\n shearstress=self.shearstress,\n density=self.density,\n mass=self.mass,\n grav_diffusion=self.grav_diffusion,\n erdepmin=self.erdepmin,\n erdepmax=self.erdepmax,\n k_factor=self.k_factor,\n c_factor=self.c_factor,\n m=self.m,\n n=self.n,\n threads=self.threads,\n fill_depressions=self.fill_depressions)\n\n # determine mode and run model\n if self.mode == 'simwe_mode':\n (evolved_elevation, time, depth, erosion_deposition,\n difference) = evol.erosion_deposition()\n # remove relative timestamps from r.sim.water and r.sim.sediment\n gscript.run_command(\n 'r.timestamp',\n map=depth,\n date='none')\n gscript.run_command(\n 'r.timestamp',\n map=erosion_deposition,\n date='none')\n\n elif self.mode == \"usped_mode\":\n (evolved_elevation, time, depth, erosion_deposition,\n difference) = evol.usped()\n\n elif self.mode == \"rusle_mode\":\n (evolved_elevation, time, depth, sediment_flux,\n difference) = evol.rusle()\n\n else:\n raise RuntimeError(\n '{mode} mode does not exist').format(mode=self.mode)\n\n # register the evolved maps\n gscript.run_command(\n 't.register',\n type=raster,\n input=self.elevation_timeseries,\n maps=evolved_elevation,\n start=evol.start,\n increment=increment,\n flags='i',\n overwrite=True)\n gscript.run_command(\n 't.register',\n type=raster,\n input=self.depth_timeseries,\n maps=depth,\n start=evol.start,\n increment=increment,\n flags='i',\n overwrite=True)\n try:\n gscript.run_command(\n 't.register',\n type=raster,\n input=self.erdep_timeseries,\n maps=erosion_deposition,\n start=evol.start,\n increment=increment,\n flags='i',\n overwrite=True)\n except (NameError, CalledModuleError):\n pass\n try:\n gscript.run_command(\n 't.register',\n type=raster,\n input=self.flux_timeseries,\n maps=sediment_flux,\n start=evol.start,\n increment=increment,\n flags='i', overwrite=True)\n except (NameError, CalledModuleError):\n pass\n gscript.run_command(\n 't.register',\n type=raster,\n input=self.difference_timeseries,\n maps=difference,\n start=evol.start,\n increment=increment,\n flags='i',\n overwrite=True)\n\n # run the landscape evolution model\n # as a series of rainfall intervals in a rainfall event\n i = 1\n while i < iterations:\n\n # update the elevation\n evol.elevation = evolved_elevation\n print evol.elevation\n\n # update time\n evol.start = time\n print evol.start\n\n # derive excess water (mm/hr) from rainfall rate (mm/hr)\n # plus the depth (m) per rainfall interval (min)\n gscript.run_command(\n 'r.mapcalc',\n expression=\"{rain_excess}\"\n \"={rain_intensity}\"\n \"+{depth}\"\n \"/1000.\"\n \"/{rain_interval}\"\n \"*60.\".format(\n rain_excess=rain_excess,\n rain_intensity=self.rain_intensity,\n depth=depth,\n rain_interval=self.rain_interval),\n overwrite=True)\n\n # update excess rainfall\n rain_intensity = 'rain_intensity'\n gscript.run_command(\n 'r.mapcalc',\n expression=\"{rain_intensity} = {rain_excess}\".format(\n rain_intensity='rain_intensity',\n rain_excess=rain_excess),\n overwrite=True)\n evol.rain_intensity = rain_intensity\n\n # determine mode and run model\n if self.mode == \"simwe_mode\":\n (evolved_elevation, time, depth, erosion_deposition,\n difference) = evol.erosion_deposition()\n # remove relative timestamps\n # from r.sim.water and r.sim.sediment\n gscript.run_command(\n 'r.timestamp',\n map=depth,\n date='none')\n gscript.run_command(\n 'r.timestamp',\n map=erosion_deposition,\n date='none')\n\n elif self.mode == \"usped_mode\":\n (evolved_elevation, time, depth, erosion_deposition,\n difference) = evol.usped()\n\n elif self.mode == \"rusle_mode\":\n (evolved_elevation, time, depth, sediment_flux,\n difference) = evol.rusle()\n\n else:\n raise RuntimeError(\n '{mode} mode does not exist').format(mode=self.mode)\n\n # register the evolved maps\n gscript.run_command(\n 't.register',\n type=raster,\n input=self.elevation_timeseries,\n maps=evolved_elevation,\n start=evol.start,\n increment=increment,\n flags='i',\n overwrite=True)\n gscript.run_command(\n 't.register',\n type=raster,\n input=self.depth_timeseries,\n maps=depth,\n start=evol.start,\n increment=increment,\n flags='i',\n overwrite=True)\n try:\n gscript.run_command(\n 't.register',\n type=raster,\n input=self.erdep_timeseries,\n maps=erosion_deposition,\n start=evol.start,\n increment=increment,\n flags='i',\n overwrite=True)\n except (NameError, CalledModuleError):\n pass\n try:\n gscript.run_command(\n 't.register',\n type=raster,\n input=self.flux_timeseries,\n maps=sediment_flux,\n start=evol.start,\n increment=increment,\n flags='i', overwrite=True)\n except (NameError, CalledModuleError):\n pass\n gscript.run_command(\n 't.register',\n type=raster,\n input=self.difference_timeseries,\n maps=difference,\n start=evol.start,\n increment=increment,\n flags='i',\n overwrite=True)\n\n # remove temporary maps\n gscript.run_command(\n 'g.remove',\n type='raster',\n name=['rain_excess'],\n flags='f')\n\n i = i+1\n\n # compute net elevation change\n gscript.run_command(\n 'r.mapcalc',\n expression=\"{net_difference}\"\n \"={evolved_elevation}-{elevation}\".format(\n net_difference=net_difference,\n elevation=self.elevation,\n evolved_elevation=evol.elevation),\n overwrite=True)\n gscript.write_command(\n 'r.colors',\n map=net_difference,\n rules='-',\n stdin=difference_colors)", "def getMaxTime(self):\r\n maxTime, nonSlip, onlyStaticComps = self.routine.getMaxTime()\r\n if onlyStaticComps:\r\n maxTime= maxTime+0.5\r\n return maxTime", "def getEndMomentum(self) -> int:\n ...", "def traffic_restoration_time_to_healed_or_new_endpoints_in_minutes(self) -> Optional[pulumi.Input[int]]:\n return pulumi.get(self, \"traffic_restoration_time_to_healed_or_new_endpoints_in_minutes\")" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Returns all the session names for a participant
def filter_by_participant (self, participant): sparql_results = self.query (""" select distinct ?rs ?session ?name ?number ?pid ?sitename where { BIND (<%s> AS ?participant) ?rs rdf:type austalk:RecordedSession . ?rs olac:speaker ?participant . ?participant austalk:id ?pid . ?participant austalk:recording_site ?site . ?site rdfs:label ?sitename . ?rs austalk:prototype ?session . ?session austalk:name ?name . ?session austalk:id ?number . } ORDER BY ?name""" % participant.identifier) results = [] for result in sparql_results["results"]["bindings"]: results.append (Session ( client = self.client, identifier = result["rs"]["value"], prototype = result["session"]["value"], name = result["name"]["value"], number = result["number"]["value"], site = result["sitename"]["value"], participantId = result["pid"]["value"])) return results
[ "def filtered_session_names(self):\n return list(self.stage.filtered_sessions.keys())", "def search_sessions(name: str, provider: Optional[str] = None) -> List[str]:\n sessions = session_list(provider=provider).sessions\n name = name.lower()\n return [s.id for s in sessions if s.id.lower().startswith(name)]", "def teammates_player_names(self):\n return [p.name for p in self.teammates]", "def get_player_names(self):\n names = [user['name'] for user in self.server.status().raw['players']['sample']]\n return names", "def get_participants(self, tag):\n\n parsed_strings = tag.find(class_=\"table-participant\").text.split(\" - \")\n participants = []\n participants.append(parsed_strings[0])\n participants.append(parsed_strings[-1])\n return participants", "def getSessions(self, transaction):\n msg = transaction.asyncRead()\n if \"pid\" in msg:\n ret = []\n uids = self.findSessionIDsByProjectId(msg[\"pid\"])\n for uid in uids:\n session = self.findSessionBySessionUid(uid)\n name = session.directory\n name = os.path.basename(name)\n if session.isConnected:\n name = \"(connected) \" + name\n state = session.state\n if state is State.FINISHED:\n name += \" [FINISHED]\"\n if state is State.WAITING:\n name += \" [READY]\"\n if state is State.RUNNING:\n name += \" [RUNNING]\"\n if state is State.PAUSED:\n name += \" [PAUSED]\"\n if state is State.FAILED:\n name += \" [FAILED]\"\n ret.append((uid, name))\n ret.sort(key=lambda s: s[1], reverse=True)\n msg[\"sessions\"] = ret\n msg[\"status\"] = True\n logging.debug(\"Found %s Session/s\", str(len(ret)))\n else:\n msg[\"status\"] = False\n msg[\"error\"] = [\"GetSessions: No PID provided\"]\n logging.error(\"Sessions not found. No PID provided.\")\n transaction.send(msg)", "def get_session_names(self, directory):\n\t\tfileList = []\n\t\tfor f in os.listdir(directory):\n\t\t\tif f.endswith(\".pid\"):\n\t\t\t\tsession_name = f.replace(\".pid\", \"\")\n\t\t\t\tfileList.append(session_name)\n\t\treturn fileList", "def get_session_names(self):\n serializer = Serializer.get_serializer(\"json\")\n state_file = os.path.join(self.model_dir,\n \"{}_state.{}\".format(self.model_name, serializer.ext))\n with open(state_file, \"rb\") as inp:\n state = serializer.unmarshal(inp.read().decode(\"utf-8\"))\n session_names = [\"session_{}\".format(key)\n for key in state[\"sessions\"].keys()]\n logger.debug(\"Session to restore: %s\", session_names)\n return session_names", "def sessions(self, event_id=None):\n return [1, 2, 3]", "def get_participants(summoner_name):\n summoner_id = get_summoner_id(summoner_name)\n r = requests.get(API_URL + \"/lol/spectator/v4/active-games/by-summoner/\" + summoner_id, headers=REQUEST_HEADERS)\n if r.status_code != 200:\n return []\n else:\n data = r.json()\n return [p[\"summonerName\"] for p in data[\"participants\"]]", "def getSessionByUsername(self, username):\n match = []\n for session in self.sessions:\n if (session.identifier[1] == username):\n match.append(session)\n return match", "def sessions(event_id):\n return [1, 2, 3]", "def list_sessions(self):\n return self.sessions.list_sessions()", "def get_speaker_sessions(self, request):\n return self.session_service.get_speaker_sessions(\n request.websafeSpeakerKey)", "def get_participants(client, chat):\n participants = {}\n for p in client.iter_participants(chat):\n participants[p.id] = p\n if p.first_name == \"Bernat\":\n # Bernat switched phone numbers, so let's hack the old one in\n participants[2390325] = p\n return participants", "def participants(self):\n return self.__get_entities(\"participant\")", "def _getSessionsBySpeaker(self, request):\n # Ensure that the speaker key is valid and that the speaker exists\n speaker = _getEntityByWebsafeKey(request.websafeSpeakerKey, 'Speaker')\n # Return all of the speaker's sessions\n return ndb.get_multi(speaker.sessions)", "def get(self):\n all_players = _get_all_players()\n return [p.name for p in all_players]", "def speaker_list(self):\n return \", \".join(str(speaker.person) for speaker in self.speakers.all())" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Returns all the session names for a site identified by site label
def filter_by_site (self, label): sparql_results = self.query (""" select distinct ?rs ?session ?name ?number ?pid WHERE { ?rs rdf:type austalk:RecordedSession . ?rs olac:speaker ?participant . ?participant austalk:id ?pid . ?participant austalk:recording_site ?site . ?site rdfs:label "%s" . ?rs austalk:prototype ?session . ?session austalk:name ?name . ?session austalk:id ?number . } ORDER BY ?name""" % label) results = [] for result in sparql_results["results"]["bindings"]: results.append (Session ( client = self.client, identifier = result["rs"]["value"], prototype = result["session"]["value"], name = result["name"]["value"], number = result["number"]["value"], # site = result["sitename"]["value"], participantId = result["pid"]["value"])) return results
[ "def siteNames(self):\n sqlStr = \"\"\" SELECT site_name FROM rc_site;\"\"\"\n Session.execute(sqlStr)\n results = Session.fetchall()\n result = [ x[0] for x in results]\n return result", "def filtered_session_names(self):\n return list(self.stage.filtered_sessions.keys())", "def search_sessions(name: str, provider: Optional[str] = None) -> List[str]:\n sessions = session_list(provider=provider).sessions\n name = name.lower()\n return [s.id for s in sessions if s.id.lower().startswith(name)]", "def sessions(self):\n return utils.listItems(self, '/status/sessions')", "def get_session_names(self):\n serializer = Serializer.get_serializer(\"json\")\n state_file = os.path.join(self.model_dir,\n \"{}_state.{}\".format(self.model_name, serializer.ext))\n with open(state_file, \"rb\") as inp:\n state = serializer.unmarshal(inp.read().decode(\"utf-8\"))\n session_names = [\"session_{}\".format(key)\n for key in state[\"sessions\"].keys()]\n logger.debug(\"Session to restore: %s\", session_names)\n return session_names", "def list_sessions(self):\n return self.store.list()", "def get_sessions_list(xnat, project_id, slocal):\n list_sessions = XnatUtils.list_sessions(xnat, project_id)\n if slocal and slocal.lower() != 'all':\n #filter the list and keep the match between both list:\n list_sessions = filter(lambda x: x['label'] in slocal.split(','), list_sessions)\n if not list_sessions:\n LOGGER.warn('No session from XNAT matched the sessions given: '+slocal+' .')\n\n #Sort sessions: first the new sessions that have never been updated\n sorted_list = [sess for sess in list_sessions if not sess['last_updated']]\n new_sessions_label = [sess['label'] for sess in sorted_list]\n for session in list_sessions:\n if not session['label'] in new_sessions_label:\n sorted_list.append(session)\n\n return sorted_list", "def get_session_list(key):\n return session.get(key, [])", "def list_sessions(self):\n return self.sessions.list_sessions()", "def session_list_lgr(request):\n known_lgrs = request.session.get(LGRS_SESSION_KEY, {})\n return sorted(known_lgrs.values(), key=lambda x: x['name'])", "def sessions(event_id):\n return [1, 2, 3]", "def subworld_names(self, protocol=None):\n return self.m_subworld_counts.keys()", "def get_site_names(self) -> List[str]:\n return list(\n self.project_config_with_variables_substituted.data_docs_sites.keys()\n )", "def getSessionByUsername(self, username):\n match = []\n for session in self.sessions:\n if (session.identifier[1] == username):\n match.append(session)\n return match", "def list_sites():\n result = []\n querystring = 'select sitename from {};'.format(TABLES[0]))\n res = execute_query(querystring)\n if res:\n result = [x[0] for x in res]\n return result", "def _get_usernamesFromMyInst(self):\n institutionid = self._get_userInstitutionId(session.user.username)\n return self._get_usernamesByInst(institutionid)", "def describe_sessions(StackName=None, FleetName=None, UserId=None, NextToken=None, Limit=None, AuthenticationType=None):\n pass", "def session_list( context ):\n return context", "def get_site_names(self, include = ['*'], exclude = []):\n \n raise NotImplementedError('get_site_names')" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Delete one or more keys specified by ``keys``
async def delete(self, *keys, **kwargs): def gen_keys(keys): all_keys = [] for key in keys: if isinstance(key, list): all_keys += gen_keys(keys=key) else: all_keys.append(key) return all_keys all_keys = gen_keys(keys) for key in all_keys: await self._client_conn.hdel(key=self.name, field=key)
[ "def delete_many(self, keys):\n raise NotImplementedError()", "def delete_multi(self, keys):\n return self.run(\n lambda txn: {key: txn.delete(key) for key in keys},\n write=True)", "def delete_many(self, keys):\n return self.delete_many_values(keys)", "def delete_keys(self, keys: boto.s3.key.Key) -> boto.s3.multidelete.MultiDeleteResult:\n if self.dry_run:\n keys = []\n\n return self.get_connection().delete_keys(keys)", "def removekeys(self, *keys) -> None:\n for k in keys:\n for i in self.list:\n i.pop(k)", "def BatchDelete(self, keys):\n def _ShardDelete(shard, keys, vkeys, values):\n pipe = shard.pipeline()\n map(pipe.delete, vkeys)\n successes = pipe.execute()\n return Operation(success=True, response_value=successes)\n\n return self._ShardedOp([(key, None) for key in keys], _ShardDelete)", "def Delete(keys):\n keys, multiple = NormalizeAndTypeCheckKeys(keys)\n\n if multiple and not keys:\n return\n\n req = datastore_pb.DeleteRequest()\n req.key_list().extend([key._Key__reference for key in keys])\n\n tx = _MaybeSetupTransaction(req, keys)\n\n resp = datastore_pb.DeleteResponse()\n try:\n apiproxy_stub_map.MakeSyncCall('datastore_v3', 'Delete', req, resp)\n except apiproxy_errors.ApplicationError, err:\n raise _ToDatastoreError(err)", "def remove(self, keys, *args):\n dict_util.remove(self, keys, *args)", "def delitems(self, keys):\n # convert generator to a set, because the content of the\n # tree will be modified!\n for key in frozenset(keys):\n self.remove(key)", "def deleteKey(self, *args):\n\n delKeys = args\n keys = self.keys()\n for key in delKeys:\n if key in keys:\n self.pop(key)\n\n for key in self.subDictKeys():\n self[key].deleteKey(*delKeys)", "def delete(self, *keys: KeyT) -> ResponseT:\n return self._split_command_across_slots(\"DEL\", *keys)", "def del_quiet(dic, *keys):\n for key in keys:\n try:\n del dic[key]\n except KeyError:\n pass", "def keepkeys(d, keys):\n\n ks = set(list(keys))\n to_rm = [k for k in d.keys() if k not in ks]\n for k in to_rm:\n del d[k]\n return d", "def batch_delete(self, keys=[]):\n\n\t\tpipe = self.cache.r.pipeline()\n\n\t\tfor key in keys:\n\t\t\tpipe.delete(key)\n\n\t\tdelete_statuses = pipe.execute()\n\n\t\tresult = {}\n\t\tfor i, delete_status in enumerate(delete_statuses):\n\t\t\tresult[keys[i]] = delete_status\n\n\t\treturn result", "def deletekeys(self, keys):\n with self.lock.acquire(): \n data = self.read()\n _old = {}\n for key in keys:\n try:\n _old[key] = data.pop(key)\n except KeyError:\n pass\n self.write(data)\n return _old", "def unchecked_del(obj, *keys):\n for k in keys:\n try:\n del obj[k]\n except KeyError:\n pass", "def delete_keys_from_dict(d, keys):\n if isinstance(d, dict):\n for field in d.keys():\n if field in keys:\n del d[field]\n elif isinstance(d[field], dict) or isinstance(d[field], list) or isinstance(d[field], set):\n delete_keys_from_dict(d[field], keys)\n elif isinstance(d, dict) or isinstance(d, list) or isinstance(d, set):\n for i in d:\n delete_keys_from_dict(i, keys)", "def del_seqs(self, keys):\n for j in range(len(keys)):\n del self._d_seqs[keys[j]]\n self._num_seqs = int(len(self._d_seqs))\n self._d_seqs = self._d_seqs\n self._seqs = list(self._d_seqs)", "def batch_delete(self, keys):\n start = 0\n while start < len(keys):\n res = self.oss.batch_delete_objects(\n self.bucket, keys[start: start + self.batch_keys_max])\n if not res:\n return False\n start += self.batch_keys_max\n return True" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Creates data_points random points between 0 and max_value and dumps them to filename Crashes my MacBook Air at the moment, only runs on my Linux desktop 😅
def create_random_dataset( # Starting with 1 billion data points data_points: int = 1_000_000_000, # ms-accuracy timestamp over a month max_value: int = 1000 * 60 * 60 * 24 * 31, # default filename: str = os.path.join("data", "random_points.json"), ): print("Starting random dataset generation") random_points = [] for i in range(data_points): if i * 100 % data_points == 0: print(f"{int(i/data_points*100)}% done") random_points.append(random.randint(0, max_value)) print("Dumping data") with open(filename, "w+") as file: json.dump(random_points, file)
[ "def construct_data(model,num_pts,seed=None):\n num_dims = model.num_dims\n rv_trans = define_random_variable_transformation_hydromad(model)\n rng = numpy.random.RandomState( seed )\n pts = rng.uniform( -1., 1., ( num_dims, num_pts ) )\n pts = rv_trans.map_from_canonical_distributions( pts )\n vals = model.evaluate_set( pts )\n numpy.savetxt( 'pts.txt', pts, delimiter = ',' )\n numpy.savetxt( 'vals.txt', vals, delimiter = ',' )", "def genpoints(n, f, path):\n try:\n os.makedirs(path)\n except OSError:\n pass\n for i in range(f):\n out = open('{}/out_{}'.format(path,i), 'w')\n for j in range(n):\n x = random.random()\n y = random.random()\n out.write(str(x) + ',' + str(y) + '\\n')\n out.close()", "def generer(nombre, distance):\n with open(\"test/{}.pts\".format(nombre), \"w\") as file:\n file.write(\"{}\\n\".format(distance))\n for _ in range(nombre):\n point = random(), random()\n file.write(\"{}, {}\\n\".format(point[0], point[1]))\n file.close()", "def genAndSaveMoving1DMassData(saveName='movingPointMassData/testPointMassData000.pkl',Iterations=10):\n #How many iterations we want to include\n #Iterations = 10 # No more hard coding! This is now an input into the function.\n xmin = 0.0\n xmax = 20.0\n vmin = 1.0\n vmax = 5.0\n amin = 1.0\n amax = 2.0\n dt = .001\n tmax = 20.0\n dataOut = []\n for i in range(Iterations):\n funcOuts = movingPointMass1D.randomMassMovement(xmin, xmax, vmin, vmax, amin, amax, dt, tmax)\n dataOut.append(funcOuts)\n toSave = [dataOut, xmin, xmax, vmin, vmax, amin, amax, dt, tmax]\n outputFile = open(saveName, \"wb\")\n pickle.dump(toSave,outputFile)\n outputFile.close()", "def generate_points(num_points):\n for i in xrange(0, num_points):\n pass", "def generate_triangle(seed, num_points=200):\n points = {\n 0: 750,\n 750: 0,\n 1500: 751\n }\n random.seed(seed)\n while len(points) < num_points:\n y_coord = (random.randrange(500) or 1) + 200\n x_coord = random.randrange(round(y_coord*4/3)) + round((500 - y_coord)*(3/4)) + 400\n if (not points.get(x_coord)) and (x_coord != 750):\n points[x_coord] = y_coord\n\n os.makedirs(os.path.join(DATA_DIR, seed), exist_ok=True)\n filepath = os.path.join(DATA_DIR, '{}/triangle.node'.format(seed))\n\n # creates the input nodes used by triangle to create delauney graph\n with open(filepath, 'w') as node_file:\n header = \"{} 2 0 0\\n\".format(len(points))\n node_file.write(header)\n i = 1\n for x_coord, y_coord in points.items():\n node_file.write(\" {} {} {}\\n\".format(i, x_coord, y_coord))\n i += 1\n node_file.close()\n\n call(['triangle', '-e', filepath])", "def generate_data(node_num, x_max, y_max):\n file_name = \"data/data_{}.csv\".format(num_nodes)\n graph = make_graph(num_nodes, x_max, y_max)\n make_csv(graph, file_name)", "def write_points_file(path, x, y, z):\r\n out_data = []\r\n out_data.append(\"x y z temp\")\r\n for i, px in enumerate(x):\r\n py = y[i]\r\n pz = z[i]\r\n out_data.append(str(px) + \" \" + str(py) + \" \" + str(pz) + \" 1\")\r\n\r\n write_lines(path, out_data)", "def write_starting_points(self):\n num_params = self.f['/parameters/parameterNames'].shape[0]\n num_starting_points = 100\n np.random.seed(0)\n starting_points = self.f.require_dataset(\n '/optimizationOptions/randomStarts',\n [num_params, num_starting_points], 'f8')\n lower = self.f['/parameters/lowerBound'][:]\n upper = self.f['/parameters/upperBound'][:]\n starting_points[:] = np.transpose(\n np.random.rand(num_starting_points, num_params) * (\n upper - lower) + lower)\n\n if 'nominalValue' in self.parameter_df:\n self.f['/parameters/nominalValues'] = \\\n self.parameter_df.nominalValue[\n self.parameter_df.estimate == 1]", "def get_test_data(self):\n # self.data, target = make_blobs(n_samples=self.n_points, n_features=2, centers=self.n_points)\n # print(self.data)\n np.put(self.data, [self.n_points, 0], 500, mode='clip')\n np.put(self.data, [self.n_points, 1], 500, mode='clip')\n pyplot.scatter(self.data[:, 0], self.data[:, 1], c='blue')\n # 画图\n pyplot.show()", "def create_mockdata(output_path, electrode):\n\n uv_data = {}\n time_array = {}\n channelids = {}\n\n channels = list()\n channels.append(str(electrode))\n\n fullname = output_path + \"Mock_\" + str(electrode) + \".txt\"\n file = open(fullname, \"w+\")\n\n for electrode in channels:\n\n channelids[electrode] = list()\n\n channel_noise = noise() # Creates noise for the channel\n\n Threshold_noise = \"Noise threshold for\" + str(electrode) + \"=\" + str(channel_noise[2])\n\n file.write(Threshold_noise)\n\n spikes = exponential_spike_generator() # Randomly generated spike times\n\n time_array['mock_spiketimes'] = spikes\n\n Mock_electrode_spiketimes = \"Spike times for\" + str(electrode) + \"=\" + str(spikes[0][0])\n\n file.write(Mock_electrode_spiketimes)\n\n Mock_electrode_detections = \"Spike number for\" + str(electrode) + \"=\" + str(spikes[1])\n\n file.write(Mock_electrode_detections)\n\n file.close()\n\n template = [1.77083333333333, 1.04166666666667, 1.97916666666667, 2.81250000000000, 1.87500000000000, 1.56250000000000,\n 2.70833333333333, 2.81250000000000, 3.12500000000000, 3.33333333333333, 2.50000000000000, 1.14583333333333,\n 1.77083333333333, 2.50000000000000, 0.937500000000000, 0.729166666666667, 1.66666666666667, 2.500000000000,\n 1.35416666666667, 1.97916666666667, 5.93750000000000, 12.5000000000000, 18.5416666666667, 18.4375000000000,\n 4.16666666666667, -21.6666666666667, -39.7916666666667, -40, -31.4583333333333, -22.5000000000000,\n -17.2916666666667, -14.5833333333333, -10.8333333333333, -8.22916666666667, -6.77083333333333, -3.12500000,\n -2.18750000000000, -1.45833333333333, -0.937500000000000, 0.104166666666667, 3.02083333333333, 2.2916666667,\n 0.416666666666667, 0.625000000000000, 2.91666666666667, 3.22916666666667, 3.75000000000000, 6.1458333333333,\n 6.56250000000000, 6.97916666666667, 6.04166666666667, 5.41666666666667, 5.10416666666667, 5.10416666666667,\n 6.66666666666667, 5.52083333333333, 3.54166666666667, 4.58333333333333, 3.43750000000000, 0.625000000000000,\n 0.312500000000000, 2.29166666666667, 4.79166666666667, 2.81250000000000, 0.104166666666667, -0.208333333333,\n -0.833333333333333, -0.416666666666667, 2.08333333333333, 5.31250000000000, 5.10416666666667, 1.04166666667,\n -0.625000000000000, -0.104166666666667, 0.312500000000000] # Spike shapes to be imposed\n\n mock_data = impose_template(noise=channel_noise, spikes=spikes, template=template)\n\n time_array[electrode] = mock_data[0]\n uv_data[electrode] = mock_data[1]\n\n raw_data = RAW_NeuronalData(uv_data=uv_data, input='RAWdata', time_array=time_array, channelids=channelids)\n\n return raw_data", "def generate_data(points_number=1000, disk_radius=1.0 / (math.sqrt(2.0 * math.pi))):\n examples = FloatTensor(points_number, 2).uniform_(-0.5, 0.5)\n targets = LongTensor(points_number, 2).zero_()\n for i, ex in enumerate(examples):\n dist = math.sqrt(ex[0] ** 2 + ex[1] ** 2)\n if dist > disk_radius: # or ex[0] < 0:\n targets[i, 0] = 1\n else:\n targets[i, 1] = 1\n ex[0] += 0.5\n ex[1] += 0.5\n return examples, targets", "def create_log_maxDataPoints(uidWell, uidWellbore, uidLog, nameWell, nameWellbore, nameLog, indexType, maxDataPoints ):\n \n # set variables based on log indexType\n if (indexType == \"measured depth\"):\n indexMnemonic = \"DEPTH\"\n indexUnit = \"m\"\n indexDataType = \"double\"\n elif (indexType == \"date time\"):\n indexMnemonic = \"TIME\"\n indexUnit = \"unitless\"\n indexDataType = \"date time\"\n else:\n return False\n \n \n # set maximum number of rows per request to lesser of UpdateInStore or AddToStore\n if (maxDataPoints['WMLS_AddToStore'] < maxDataPoints['WMLS_UpdateInStore']):\n maximum_rows_per_request = maxDataPoints['WMLS_AddToStore']\n else:\n maximum_rows_per_request = maxDataPoints['WMLS_UpdateInStore']\n \n maxPointsToExceed = maxDataPoints['WMLS_GetFromStore'] \n print (\"uploading log that exceeds maxDataPoints of \" + str(maxPointsToExceed));\n \n # set number of curves to write (capped at 500)\n curve_count = maxPointsToExceed / 1000;\n if (curve_count < 1):\n curve_count = 1\n if (curve_count > 500):\n curve_count = 500\n \n row_count = maxPointsToExceed / curve_count;\n curves = [];\n depth_index_curve_name = indexMnemonic\n rows_per_request = row_count / 10\n uploaded_points = 0;\n \n ### generating curve names\n for i in range(curve_count):\n number_as_str = str(i);\n while len(number_as_str) < 4:\n number_as_str = \"0\"+number_as_str;\n curves.append( \"CURVE_\"+number_as_str );\n \n current_row_index = 0;\n uploaded_nodes = 0;\n \n if ( indexType == \"measured depth\"):\n virtual_index = 0.0;\n else:\n virtual_index = subtract_seconds_to_timestamp(now(), 24*60*60)\n \n first_upload = True;\n while 1:\n if (uploaded_points > maxPointsToExceed):\n break;\n empty_data_template_xml = string.Template(\"\"\"<?xml version=\"1.0\" encoding=\"utf-8\"?>\n <logs xmlns=\"http://www.witsml.org/schemas/1series\" version=\"$ver\">\n <log uidWell=\"$uidWell\" uidWellbore=\"$uidWellbore\" uid=\"$uid\">\n <nameWell>$nameWell</nameWell>\n <nameWellbore>$nameWellbore</nameWellbore>\n <name>$name</name>\n <indexType>$indexType</indexType>\n <direction>increasing</direction>\n <indexCurve>$indexMnemonic</indexCurve>\n \"\"\").substitute(ver = get(\"server_schema_version\"),\n uidWell = uidWell,\n uidWellbore = uidWellbore,\n uid = uidLog,\n nameWell = nameWell,\n nameWellbore = nameWellbore,\n name = nameLog,\n indexType = indexType,\n indexMnemonic = indexMnemonic);\n ns = \"http://www.witsml.org/schemas/1series\";\n EF = objectify.ElementMaker(annotate=False, namespace=ns, nsmap={None : ns})\n ###\n update_xml = empty_data_template_xml;\n header_mnemonics = [indexMnemonic]\n header_uoms = [indexUnit]\n ###\n crv = string.Template('''\n <logCurveInfo uid=\"$uid\">\n <mnemonic>$mnemonic</mnemonic>\n <unit>$uom</unit>\n <typeLogData>$typeLogData</typeLogData>\n </logCurveInfo>\n ''').substitute(uid = indexMnemonic,\n mnemonic = indexMnemonic,\n uom = indexUnit,\n typeLogData = indexDataType\n );\n if (first_upload):\n update_xml += crv; \n ###\n for crv_index in range(curve_count):\n curve_mnemonic = curves[crv_index];\n curve_uom = 'm'\n curve_type = \"int\"\n \n ###\n header_mnemonics.append(curve_mnemonic)\n header_uoms.append(curve_uom)\n ###\n \n crv = string.Template('''\n <logCurveInfo uid=\"$uid\">\n <mnemonic>$mnemonic</mnemonic>\n <unit>$uom</unit>\n <typeLogData>$typeLogData</typeLogData>\n </logCurveInfo>\n ''').substitute(uid = curve_mnemonic,\n mnemonic = curve_mnemonic,\n uom = curve_uom,\n typeLogData = curve_type\n );\n if (first_upload): \n update_xml += crv; \n ###\n update_xml += \"<logData>\\n\"\n ###\n update_xml+=\"<mnemonicList>\"+(\",\".join(header_mnemonics))+\"</mnemonicList>\\n\"\n update_xml+=\"<unitList>\"+(\",\".join(header_uoms))+\"</unitList>\\n\" \n ###\n collected_nodes = 0;\n collected_points = 0;\n for rwi in range(rows_per_request):\n \n if (indexType == \"measured depth\"):\n virtual_index += 0.1;\n else:\n virtual_index = add_seconds_to_timestamp(virtual_index, 1)\n \n data_row = [str(virtual_index)]; \n \n for crv_i in range(curve_count):\n data_value = int(random.random() * 9 )\n data_row.append( str( data_value ) ); \n collected_points+=1;\n collected_nodes+=1;\n \n data_row_str = \",\".join(data_row);\n update_xml+=\"<data>\"+data_row_str+\"</data>\\n\"\n \n update_xml += \"</logData>\\n</log>\\n</logs>\" \n update_query = update_xml\n ###\n if (first_upload):\n WMLS_AddToStore(WMLTYPEIN_LOG, update_query );\n check_ReturnValue_Success()\n else:\n WMLS_UpdateInStore(WMLTYPEIN_LOG, update_query );\n check_ReturnValue_Success()\n ###\n uploaded_points += collected_points;\n uploaded_nodes += collected_nodes\n print \"uploaded \"+str(uploaded_points)+\" points to witsml store\"\n print \"node count: \"+str(uploaded_nodes)+\" \"\n first_upload = False;\n return True", "def save_points(filename=None):\n\n if filename is None:\n filetypes = '*.txt'\n prompt = 'Save Points'\n filename = save_file_gui(prompt, filetypes=filetypes)\n if filename is None:\n return None\n g.m.statusBar().showMessage(f'Saving Points in {os.path.basename(filename)}')\n p_out = []\n p_in = g.win.scatterPoints\n for t in np.arange(len(p_in)):\n for p in p_in[t]:\n p_out.append(np.array([t, p[0], p[1]]))\n p_out = np.array(p_out)\n np.savetxt(filename, p_out)\n g.m.statusBar().showMessage(f'Successfully saved {os.path.basename(filename)}')\n return filename", "def gen_data(min_coord, max_coord, size):\r\n data = np.random.randint(min_coord, max_coord, size)\r\n return data", "def generate_random_data():\n size = convert_file_size(request.args.get(\"size\"))\n datagen, cache, fwriter, status = get_dependencies(\"dg\", \"ch\", \"fw\", \"st\")\n path = make_file_path()\n report = DataReport()\n data_stream = datagen.generate_randoms(hook=report.update, suffix=\", \")\n\n def write_file_async():\n nonlocal fwriter, path, data_stream, size, report, status\n file_name = get_file_name(path)\n status.update_status(file_name, \"WAITING\")\n fwriter.write(path, data_stream, max_size=size)\n cache.save_data(file_name, report.dict())\n status.update_status(file_name, \"FINISH\")\n\n Thread(target=write_file_async).start()\n return GenDataAPIResponse(path=path, size=size).json()", "def generate_test_set(data, pts): \n test_set = np.asarray(random.sample(data, pts))\n \n return test_set", "def generate_data(func, points, seed=0):\n np.random.seed(seed)\n\n data = []\n for segment in points:\n x = np.linspace(*segment[\"xlim\"], num=segment[\"n_points\"])\n distribution = func(x)\n # Generate observations\n y = distribution.rvs()\n df = pd.DataFrame({\"x\": x, \"y\": y})\n data.append(df)\n\n return pd.concat(data, ignore_index=True)", "def save_points(conn, points, location_id):\n time_str = strftime(\"%a, %d %b %Y %H:%M:%S\", gmtime())\n m_id = create_measurement(conn, (location_id, time_str))\n for p in points:\n create_datapoint(conn, (p, m_id))\n\n set_location_trained(conn, location_id, \"TRUE\")" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Descarga la base de datos, del día de hoy, sobre covid 19 y la descomprime
def descarga_base_covid(fecha): zipname = "data/"+fecha+"COVID19MEXICO.zip" filename = fecha+"COVID19MEXICO.csv" if os.path.exists(zipname): print("La base de datos se encuentra en la carpeta") else: start_time = time() url = "http://datosabiertos.salud.gob.mx/gobmx/salud/datos_abiertos/" url += "datos_abiertos_covid19.zip" r = requests.get(url) with open(zipname, "wb") as code: code.write(r.content) end_time = time() total_time = end_time - start_time print(r.status_code) if r.status_code == 200: print("Descarga exitosa") print(f"La descarga tomó {total_time} segundos")
[ "def descarga_base_covid_antigua(fecha):\n zipname = \"data/\"+fecha+\"COVID19MEXICO.zip\"\n filename = fecha+\"COVID19MEXICO.csv\"\n if os.path.exists(zipname):\n print(\"La base de datos se encuentra en la carpeta\")\n else:\n start_time = time()\n url = \"http://datosabiertos.salud.gob.mx/gobmx/salud/datos_abiertos/\"\n url += \"historicos/20\"+fecha[:2]+\"/\"+fecha[2:4]\n url += \"/datos_abiertos_covid19_\"+fecha[4:6]+\".\"+fecha[2:4]+\".\"\n url += \"20\"+fecha[:2]+\".zip\"\n r = requests.get(url)\n with open(zipname, \"wb\") as code:\n code.write(r.content)\n end_time = time()\n total_time = end_time - start_time\n print(r.status_code)\n if r.status_code == 200:\n print(\"Descarga exitosa\")\n print(f\"La descarga tomó {total_time} segundos\")", "def getIndiceDeConfianzaDelConsumidor(self):\n #Obtener la url de descarga del cvs\n urlPackage=\"https://datos.gob.ar/api/3/action/package_show?id=sspm-indice-confianza-consumidor\"\n s=requests.get(urlPackage).content\n objJson = json.loads(s)\n resultado = objJson['result']['resources']\n longitudResultado = len(resultado)-1\n ultimoResultado = resultado[longitudResultado]\n urlDescarga = ultimoResultado['url']\n descripcion = ultimoResultado['description']\n print(\"Descargando: {}\".format(descripcion))\n print(\"Archivo: {}\".format(urlDescarga))\n \n #Descargar la url con cvs y generar pandas dataframe\n contenidoCVS = requests.get(urlDescarga).content\n flujoCVS = io.StringIO(contenidoCVS.decode('utf-8'))\n df_temp = pd.read_csv(flujoCVS)\n \n #transform string to datetime\n df_temp['indice_tiempo'] = pd.to_datetime(df_temp['indice_tiempo'], format='%Y-%m-%d', errors='ignore')\n df_temp['indice_tiempo'] = df_temp['indice_tiempo'].dt.date\n #set index\n df_temp.set_index('indice_tiempo', inplace=True)\n \n return df_temp", "def getIndicePreciosAlConsumidorCordobaBaseJulio2012(self):\n #Obtener la url de descarga del cvs\n urlPackage=\"https://datos.gob.ar/api/3/action/package_show?id=sspm-indice-precios-al-consumidor-provincia-cordoba-base-2014-100\"\n s=requests.get(urlPackage).content\n objJson = json.loads(s)\n resultado = objJson['result']['resources']\n selector = 0\n ultimoResultado = resultado[selector]\n urlDescarga = ultimoResultado['url']\n descripcion = ultimoResultado['description']\n print(\"Descargando: {}\".format(descripcion))\n print(\"Archivo: {}\".format(urlDescarga))\n \n #Descargar la url con cvs y generar pandas dataframe\n contenidoCVS = requests.get(urlDescarga).content\n flujoCVS = io.StringIO(contenidoCVS.decode('utf-8'))\n df_temp = pd.read_csv(flujoCVS)\n \n #transform string to datetime\n df_temp['indice_tiempo'] = pd.to_datetime(df_temp['indice_tiempo'], format='%Y-%m-%d', errors='ignore')\n df_temp['indice_tiempo'] = df_temp['indice_tiempo'].dt.date\n #set index\n df_temp.set_index('indice_tiempo', inplace=True)\n \n return df_temp", "def obtener_descripcion(id_asignatura):\n descripcion = \"\"\n CURSOR.execute(\"SELECT DISTINCT descripcion_silabo FROM silabo \" +\n \"WHERE id_silabo = '\" + id_asignatura + \"'\")\n rows = CURSOR.fetchall()\n descripcion = rows[0][0]\n return descripcion", "def fetch_data_secretarias():\n for e in UF:\n url = f'https://brasil.io/dataset/covid19/caso?state={e}&place_type=state&format=csv'\n df = pd.read_csv(url)\n df['obitos_secretaria'] = df['deaths'].diff(periods=-1).fillna(0).astype(int)\n df.rename(columns={\n 'date': 'data',\n 'state': 'uf'\n }, inplace=True)\n df = df[['uf', 'data', 'obitos_secretaria']]\n df.to_csv(f'raw_obitos_secretaria_{e}.csv', index=False)\n final = pd.concat(\n [pd.read_csv(f) for f in glob.glob(f'raw_obitos_secretaria_*.csv')]\n )\n final.to_csv(f'obitos_secretaria.csv', index=False)", "def buscarAlumnosCurso(self, curso):", "def write_base_data(self, daty):\r\n self.liczba_pelnych_linii = len(self.output_lista_cegiel) / 5\r\n if len(self.output_lista_cegiel)-self.liczba_pelnych_linii * 5 != 0:\r\n self.niepelna_liniia = True\r\n else:\r\n self.niepelna_liniia = False\r\n\r\n self.write_drugs_name()\r\n self.write_cegla_name()\r\n self.write_date(daty)", "def descritivo_bateria(self) -> None:\n print('Este carro tem bateria de ' + str(self.tamanho_bateria) + '-KWh.')", "def diplayDepartementEco (cur, numDep) :\n # info table Region\n command = (\"\"\"\n SELECT libelle, taux_activite_2017, part_diplomes_2017, poids_economie_2015 \n FROM Departements WHERE num_dep = '%s';\n \"\"\" % (numDep))\n cur.execute(command)\n res = cur.fetchall()\n print(res)\n res = res[0]\n libelle = res[0]\n taux_act = res[1]\n part_dip = res[2]\n p_eco = res[3]\n # info table Emploi_diplomes\n command = (\"\"\"\n SELECT taux_emploi, part_jeunes_diplomes_18_25_ans, annee \n FROM Emploi_diplomes WHERE libelle = '%s';\n \"\"\" % libelle)\n cur.execute(command)\n emp_dip = cur.fetchall()\n # info table Transport\n command = (\"\"\"\n SELECT pourcentage_voiture, pourcentage_transport_commun, pourcentage_autre, annee \n FROM Transport WHERE libelle = '%s';\n \"\"\" % libelle)\n cur.execute(command)\n Transport = cur.fetchall()\n ## Affichage : \n print(\"\\n*** Departement : \", libelle, \"\\n** Informations Economiques\")\n print(\"* taux d'activité en 2017 : \", taux_act)\n print(\"* part des diplomes en 2017 : \", part_dip)\n print(\"* poids de l'économie en 2015 : \", p_eco)\n for i in emp_dip :\n print(\"* taux d'emploi en \", i[2], \" : \", i[0], \"%\")\n print(\"* part des jeunes diplomés de 18 à 25 ans en \", i[2], \" : \", i[1], \"%\")\n for j in Transport : \n print(\"* part des types de transport pour se rendre au travail en \", j[3], \" : \")\n print(\"\\t** \", j[0], \"% voiture\\n\\t** \", j[1], \"% tansport en commun\\n\\t** \", j[2], \"% autre\")\n print(\"\\n\")", "def diplayDepartementSocial (cur, numDep) :\n # info table Region\n command = (\"\"\"\n SELECT libelle, disparite_niveau_vie_2014, eloignement_service_sante_2016 \n FROM Departements WHERE num_dep = '%s';\n \"\"\" % (numDep))\n cur.execute(command)\n res = cur.fetchall()\n res = res[0]\n libelle = res[0]\n diparite = res[1]\n eloignement = res[2]\n # info table Esperances_vie\n command = (\"\"\"\n SELECT esperance_vie_hommes, esperance_vie_femmes, annee \n FROM Esperances_vie WHERE libelle = '%s';\n \"\"\" % libelle)\n cur.execute(command)\n esperance_vie = cur.fetchall()\n # info table Taux_pauvrete\n command = (\"\"\"\n SELECT pourcentage, annee \n FROM Taux_pauvrete WHERE libelle = '%s';\n \"\"\" % libelle)\n cur.execute(command)\n Taux_pauvrete = cur.fetchall()\n # info table Insertion_jeunes\n command = (\"\"\"\n SELECT pourcentage_jeunes_non_inseres, annee \n FROM Insertion_jeunes WHERE libelle = '%s';\n \"\"\" % libelle)\n cur.execute(command)\n non_insertion = cur.fetchall()\n # info table Zones_inondables\n command = (\"\"\"\n SELECT pourcentage_pop, annee \n FROM Zones_inondables WHERE libelle = '%s';\n \"\"\" % libelle)\n cur.execute(command)\n part_z_inond = cur.fetchall()\n ## Affichage : \n print(\"\\n*** Departement : \", libelle, \"\\n** Informations sociales\")\n print(\"* disparite du niveau de vie en 2014 : \", diparite)\n print(\"* part de la population eloignées des services de santé en 2016 : \", eloignement)\n for i in esperance_vie :\n print(\"* esperance de vie en \", i[2], \" : hommes \", i[0], \" / femmes \", i[1], \" ans\")\n for j in Taux_pauvrete : \n print(\"* taux de pauvreté en \", j[1], \" : \", j[0], \"%\")\n for k in non_insertion :\n print(\"* part des jeunes non insérés en \", k[1], \" : \", k[0], \"%\")\n for l in part_z_inond :\n print(\"* part des habitants en zones inondables en \", l[1], \" : \", l[0], \"%\")\n print(\"\\n\")", "def fetch_death_registrocivil():\n for e in UF:\n url_base = 'https://transparencia.registrocivil.org.br/api/covid?data_type=data_ocorrido'\n url_search = f'&search=death-covid&state={e}&start_date=2020-01-01&end_date={TODAY}'\n url = url_base + url_search\n data = requests.get(url)\n raw_response = data.json()\n response = raw_response['chart']\n df = pd.DataFrame.from_dict(\n response,\n orient='index',\n columns=['obitos_cartorio']\n ).reset_index().rename(columns={'index': 'data'})\n df['uf'] = f\"{e}\"\n df = df[['uf', 'data', 'obitos_cartorio']]\n df['data'] = df['data'] + '/2020'\n df['data'] = pd.to_datetime(df['data'], dayfirst=True)\n df.to_csv(f'raw_obitos_cartorios_{e}.csv', index=False)\n final = pd.concat(\n [pd.read_csv(f) for f in glob.glob(f'raw_obitos_cartorios_*.csv')]\n )\n final.to_csv('obitos_cartorio.csv', index=False)", "def download_france_data():\n start = time.time()\n oc19_file = \"opencovid19-fr-chiffres-cles.csv\"\n gouv_file = \"data-gouv-fr-chiffres-cles.csv\"\n oc19_url = \"https://raw.githubusercontent.com/opencovid19-fr/data/master/dist/chiffres-cles.csv\"\n gouv_url = \"https://www.data.gouv.fr/fr/datasets/r/f335f9ea-86e3-4ffa-9684-93c009d5e617\"\n # run requests to download and save the data\n myfile = requests.get(oc19_url)\n with open(oc19_file, \"wb\") as f:\n f.write(myfile.content)\n file = requests.get(gouv_url)\n with open(gouv_file, \"wb\") as f:\n f.write(file.content)\n # Load both csv into pandas\n data = pd.read_csv(oc19_file)\n data_gouv = pd.read_csv(gouv_file)\n # Fill in some of the metadata that is not present in the government data\n data_gouv[\"granularite\"] = \"pays\"\n data_gouv[\"maille_code\"] = \"FRA\"\n data_gouv[\"maille_nom\"] = \"France\"\n data[\"source_nom\"] = \"Santé publique France Data\"\n data_gouv[\"source_url\"] = \"https://www.data.gouv.fr/fr/datasets/r/f335f9ea-86e3-4ffa-9684-93c009d5e617\"\n data_gouv.rename(DATA_GOUV_2_OPEN, axis=\"columns\", inplace=True)\n end = time.time()\n print(\"Time spent on download_france_data: {0:.5f} s.\".format(end - start)) \n return pd.concat((data, data_gouv), join=\"outer\")", "def carga_db(arquivo,user,password,database,host):\n df = pd.read_csv(arquivo,sep=\";\")\n if len(df) == 0:\n exit(\"arquivo vazio, não tem dados novos para carregar !\")\n cursor = bd.bdados(database,user,password,host)\n cursor.escrever(df,\"book\")", "def get_doctor_details(medico, cookie):\n detail_link = medico.find('a').get('href')\n detail_link = status_url_base % (cookie, detail_link)\n response = requests.get(detail_link)\n soup = BeautifulSoup(response.text)\n table = soup.find('table')\n\n hours = [td for td in table.findAll('td')]\n\n i=0\n for th in table.findAll('th'):\n print th.string + \" %s\" % hours[i]\n i+=1", "def crear_dicionarios():\r\n valor_alfanumerico = {'a': 1, 'b': 2, 'c': 3, 'd': 4, 'e': 5, 'f': 6, 'g': 7, 'h': 8, 'i': 9, 'j': 10, 'k': 11, 'l': 12,\r\n 'm': 13, 'n': 14, 'ñ': 15, 'o': 16, 'p': 17, 'q': 18, 'r': 19, 's': 20, 't': 21, 'u': 22, 'v': 23, 'w': 24, 'x': 25, 'y': 26,\r\n 'z': 27, 'A': 28, 'B': 29, 'C': 30, 'D': 31, 'E': 32, 'F': 33, 'G': 34, 'H': 35, 'I': 36, 'J': 37, 'K': 38, 'L': 39, 'M': 40,\r\n 'N': 41, 'Ñ': 42, 'O': 43, 'P': 44, 'Q': 45, 'R': 46, 'S': 47, 'T': 48, 'U': 49, 'V': 50, 'W': 51, 'X': 52, 'Y': 53, 'Z': 54,\r\n 'á': 55, 'Á': 56, 'é': 57, 'É': 58, 'í': 59, 'Í': 60, 'ó': 61, 'Ó': 62, 'ú': 63, 'Ú': 64, '/': 65, '(': 66, ')': 67, '\"': 68,\r\n '=': 69, '&': 70, '%': 71, '$': 72, '#': 73, '!': 74, '¡': 75, '¿': 76, '?': 77, '*': 78, '-': 79, '+': 80, \"'\": 81, '0': 82,\r\n '1': 83, '2': 84, '3': 85, '4': 86, '5': 87, '6': 88, '7': 89, '8': 90, '9': 91, '|': 92, '°': 93, '<': 94, '>': 95, '{': 96,\r\n '}': 97, '[': 98, ']': 99, ',': 100, '.': 101, ':': 102, ';': 103, '_': 104, '^': 105, '`': 106, '~': 107, '¬': 108, ' ': 109}\r\n return valor_alfanumerico", "def dataset_to_bd(dataframe):\n\n tabla = dataframe.to_dict('index')\n creado = False\n linea = 1\n\n for registro in tabla:\n\n linea = linea + 1\n\n RESULTADO_SUBIDA['n_total_registros_analizados'] = (\n RESULTADO_SUBIDA['n_total_registros_analizados'] + 1\n )\n\n nombres_correctos = True\n\n # Se validan los datos. Si falla salta el registro:\n\n #============# #============#\n\n nombres_correctos = (nombres_correctos\n * check_provincia(tabla[registro]['Provincia'])\n * check_tecnico(tabla[registro]['Técnico'])\n * check_fabrica(tabla[registro]['Fab. Pienso'])\n * check_poblacion(tabla[registro]['Población'])\n )\n\n # Los dos errores no son incompatibles\n if not nombres_correctos:\n RESULTADO_SUBIDA['lineas_error'].append(\n {str(linea) : \"Nombre incorrecto o no registrado.\"})\n\n #============# #============#\n\n fecha_valida = check_fecha(tabla[registro]['FECHA'])\n\n if not fecha_valida:\n RESULTADO_SUBIDA['lineas_error'].append(\n {str(linea) : \"Fecha no válida.\"})\n\n # ============# #============#\n\n # Trata de introducir el registro en bbdd\n if nombres_correctos & fecha_valida:\n try:\n # Cojo la provincia que ya está en bbdd\n provincia = models.Provincia.get(\n models.Provincia.nombre_provincia **\n (tabla[registro]['Provincia'].strip().title()))\n\n tecnico = models.Tecnico.get(\n models.Tecnico.nombre_tecnico **\n (tabla[registro]['Técnico'].strip().title()))\n\n fabrica = models.Fabrica.get(\n models.Fabrica.nombre_fabrica **\n (tabla[registro]['Fab. Pienso'].strip().title()))\n\n poblacion = models.Poblacion.get(\n models.Poblacion.nombre_poblacion **\n (tabla[registro]['Población'].strip().title()))\n\n # Se crea el integrado\n creado = models.Integrado.create_integrado(\n user=g.user._get_current_object(),\n tecnico=tecnico,\n fabrica=fabrica,\n codigo=tabla[registro]['Código'],\n nombre_integrado=tabla[registro]['Avicultor'].strip().title(),\n poblacion=poblacion,\n provincia=provincia,\n ditancia=tabla[registro]['Distancia a Matadero Purullena'],\n metros_cuadrados=tabla[registro]['Mts Cuadrados'],\n )\n\n # Se apunta el número de integrados creados en el proceso\n if creado == \"ok\":\n RESULTADO_SUBIDA['n_integrados_subidos'] = (\n RESULTADO_SUBIDA['n_integrados_subidos'] + 1)\n elif creado == \"existe\":\n # print(\"existe\")\n # No se considera tan relevante porque es repetitivo\n pass\n elif creado == \"error\":\n # print(\"error\")\n RESULTADO_SUBIDA['n_errores_integrado'] = (\n RESULTADO_SUBIDA['n_errores_integrado'] + 1)\n pass\n\n # ==================================================================\n\n # Cojo el integrado que ya está en bbdd\n integrado = models.Integrado.get(\n models.Integrado.nombre_integrado **\n tabla[registro]['Avicultor'].strip().title())\n\n # Se crea la camada\n\n creado = models.Camada.create_camada(\n integrado=integrado,\n fecha=tabla[registro]['FECHA'],\n codigo_camada=tabla[registro]['Código Camada'],\n\n pollos_entrados=float(tabla[registro]['Pollos Entrados']),\n pollos_salidos=float(tabla[registro]['Pollos Salidos']),\n porcentaje_bajas=float(tabla[registro]['% Bajas']),\n kilos_carne=float(tabla[registro]['Kilos Carne']),\n kilos_pienso=float(tabla[registro]['Kilos Pienso']),\n peso_medio=float(tabla[registro]['Peso Medio']),\n indice_transformacion=float(tabla[registro]['I.Transform']),\n retribucion=float(tabla[registro]['Retribución Pollo']),\n medicamentos_por_pollo=float(tabla[registro]['Medic/Pollo']),\n ganancia_media_diaria=float(tabla[registro]['Ganancia Media Diaria']),\n dias_media_retirada=float(tabla[registro]['Dias Media Retirada sin Asador']),\n\n medicamentos=tabla[registro]['MEDICAMENTOS'],\n liquidacion=tabla[registro]['LIQUIDACIÓN'],\n bajas_primera_semana=tabla[registro]['BAJAS 1a. SEMANA'],\n porcentaje_bajas_primera_semana=tabla[registro]['%BAJAS 1a. Semana'],\n rendimiento_metro_cuadrado=tabla[registro]['Rdto/M2'],\n pollo_metro_cuadrado=tabla[registro]['Pollo/Mt2'],\n kilos_consumidos_por_pollo_salido=tabla[registro]['Kilos Consumidos por Pollo Salido'],\n dias_primer_camion=tabla[registro]['Días Primer Camión'],\n peso_primer_dia=tabla[registro]['Peso 1 Día'],\n peso_semana_1=tabla[registro]['Peso 1 Semana'],\n peso_semana_2=tabla[registro]['peso 2 semana'],\n peso_semana_3=tabla[registro]['peso 3 semana'],\n peso_semana_4=tabla[registro]['peso 4 semana'],\n peso_semana_5=tabla[registro]['peso 5 semana'],\n peso_semana_6=tabla[registro]['peso 6 semana'],\n peso_semana_7=tabla[registro]['peso 7 semana'],\n rendimiento=tabla[registro]['Rendimiento'],\n FP=tabla[registro]['% FP'],\n bajas_matadero=tabla[registro]['Bajas'],\n decomisos_matadero=tabla[registro]['Decomisos'],\n porcentaje_bajas_matadero=tabla[registro]['% Bajas'],\n porcentaje_decomisos=tabla[registro]['% Decomisos'],\n )\n\n # Se apunta el numero de camadas creadas en el proceso\n if creado == \"ok\":\n RESULTADO_SUBIDA['n_camadas_subidas'] = (\n RESULTADO_SUBIDA['n_camadas_subidas'] + 1)\n elif creado == \"existe\":\n # print(\"existe\")\n RESULTADO_SUBIDA['n_errores_camada'] = (\n RESULTADO_SUBIDA['n_errores_camada'] + 1\n )\n RESULTADO_SUBIDA['lineas_error'].append(\n {str(linea): \"Esta camada ya existe.\"})\n pass\n elif creado == \"error\":\n # print(\"error\")\n RESULTADO_SUBIDA['n_errores_camada'] = (\n RESULTADO_SUBIDA['n_errores_camada'] + 1\n )\n RESULTADO_SUBIDA['lineas_error'].append({str(linea) : \"Error en el valor de los datos\"})\n pass\n except:\n RESULTADO_SUBIDA['lineas_error'].append({str(linea) : \"Error desconocido.\"})\n pass\n\n # if len(RESULTADO_SUBIDA['lineas_error']) > 0:\n # for linea in RESULTADO_SUBIDA['lineas_error']:\n # print(linea)\n\n # Esto determina si hubo registros analizados\n if RESULTADO_SUBIDA['n_total_registros_analizados']:\n return True\n else:\n return False", "def gerar_data():\n \n ano = random.randint(1980, 2018)\n mes = random.randint(1, 12)\n dia = random.randint(1, 28)\n hora = random.randint(1, 23)\n minuto = random.randint(1, 59)\n segundo = random.randint(1, 59)\n microsegundos = random.randint(1, 999999)\n data = datetime.datetime(\n ano, mes, dia, hora, minuto, segundo, microsegundo\n ).isoformat(\" \")\n \n return data", "def refondreDonnees(donneesBrutes):\n\tprint(\"REFONTE DES DONNÉES DANS STRUCTURE UNIFIÉE\")\n\n\t# initialiser dictionnaire unifié des valeurs\n\tdicoDesValeurs = {}\n\n\t# pour chaque fichier de données chargé dans une DataFrame\n\tnbDataFrames = len(donneesBrutes)\n\tnbDigits = len(str(nbDataFrames))\n\tcompteurFichiers=0\n\tidDonnee=0\n\tprint()\n\tfor attributs,df in donneesBrutes:\n\t\tcompteurFichiers+=1\n\t\t# pour chaque rangée de la DataFrame\n\t\tprint(\"---> depuis fichier {0:{n}d}/{1}\".format(\n\t\t\t\tcompteurFichiers,nbDataFrames,n=nbDigits))\n\t\tfor i in numpy.arange(len(df)):\n\t\t\t# récupérer cette rangée (ligne) de données brutes\n\t\t\tligne = df.iloc[i]\n\t\t\t# créer un dictionnaire ayant les mêmes clés principales\n\t\t\t# que dicoDesAttributs mais un seul champ numérique par attribut\n\t\t\tdicoValeursPourUneDonnee = {}\n\t\t\t# le compléter avec les valeurs associées à chaque attribut\n\t\t\tfor k in attributs:\n\t\t\t\t# retenir le domaine de la valeur de cet attribut\n\t\t\t\tdomaine = dicoDesAttributs[k]['domaine']\n\t\t\t\t# calculer le décalage en colonnes des valeurs\n\t\t\t\tdecalage = decalageEnColonnes(k,attributs)\n\t\t\t\t# retenir le nombre de valeurs à charger\n\t\t\t\tdimensions = dicoDesAttributs[k]['dimensions']\n\t\t\t\t# si plus d'une dimension, employer un NumPy array\n\t\t\t\t# sinon, enregistrer la valeur directement\n\t\t\t\tif dimensions>1:\n\t\t\t\t\t# préparer un NumPy array\n\t\t\t\t\tplusieursValeurs = numpy.array([])\n\t\t\t\t\t# configurer son type numérique\n\t\t\t\t\tif 'REEL' in domaine:\n\t\t\t\t\t\tplusieursValeurs.astype(float)\n\t\t\t\t\telif 'NAT' in domaine:\n\t\t\t\t\t\tplusieursValeurs.astype(int)\n\t\t\t\t\telse:\n\t\t\t\t\t\traise\n\n\t\t\t\t\tfor j in numpy.arange(dimensions):\n\t\t\t\t\t\t# extraire la valeur\n\t\t\t\t\t\tvaleur = ligne[j+decalage]\n\t\t\t\t\t\t# valider le domaine de la valeur\n\t\t\t\t\t\ttry:\n\t\t\t\t\t\t\tassert(domaineValide(valeur,domaine))\n\t\t\t\t\t\texcept:\n\t\t\t\t\t\t\tprint(\n\t\t\t\t\t\t\t\t\"!!! domaineValide_plusieursValeurs\"\n\t\t\t\t\t\t\t\t\"(i,k,j,decalage) = (%d,%d,%d,%d)\"\n\t\t\t\t\t\t\t\t%(i,k,j,decalage))\n\t\t\t\t\t\t# ajouter cette valeur au NumPy array\n\t\t\t\t\t\tplusieursValeurs = numpy.append(\n\t\t\t\t\t\t\t\tplusieursValeurs,valeur)\n\n\t\t\t\t\t# ajouter une valeur correspondant à cet attribut\n\t\t\t\t\tdicoValeursPourUneDonnee.update({k:plusieursValeurs})\n\n\t\t\t\telse:\n\t\t\t\t\tassert(dimensions==1) # check\n\t\t\t\t\t# extraire la valeur\n\t\t\t\t\tuneValeur = ligne[decalage]\n\t\t\t\t\t# valider le domaine de la valeur\n\t\t\t\t\ttry:\n\t\t\t\t\t\tassert(domaineValide(uneValeur,domaine))\n\t\t\t\t\texcept:\n\t\t\t\t\t\tprint(\n\t\t\t\t\t\t\t\"!!! domaineValide_uneValeur\"\n\t\t\t\t\t\t\t\"(i,k,decalage) = (%d,%d,%d)\"\n\t\t\t\t\t\t\t%(i,k,decalage))\n\t\t\t\t\t# forcer l'emploi d'un entier si approprié\n\t\t\t\t\tif domaine == 'NAT' or domaine == 'NATPOS':\n\t\t\t\t\t\tuneValeur = int(uneValeur)\n\t\t\t\t\t# ajouter une valeur correspondant à cet attribut\n\t\t\t\t\tdicoValeursPourUneDonnee.update({k:uneValeur})\n\n\t\t\t# ajouter le dictionnaire rempli pour cette donnée\n\t\t\t# au dictionnaire unifié des valeurs\n\t\t\tdicoDesValeurs.update({idDonnee:dicoValeursPourUneDonnee})\n\t\t\tidDonnee+=1\n\n\tprint()\n\t# retourner dictionnaire unifié des valeurs\n\treturn dicoDesValeurs", "def get_dict_fantoir(code_departement, code_commune):\n code_insee = cadastre.code_insee(code_departement, code_commune)\n dict_fantoir = {}\n try:\n db_cursor = addr_fantoir_building.get_pgc().cursor()\n sql_query = ''' SELECT code_insee||id_voie||cle_rivoli,\n nature_voie||' '||libelle_voie,\n type_voie, ld_bati\n FROM fantoir_voie\n WHERE code_insee = \\'''' + code_insee + '''\\' \n AND caractere_annul NOT IN ('O','Q');'''\n db_cursor.execute(sql_query)\n for result in db_cursor:\n code_fantoir = result[0]\n nom_fantoir = ' '.join(result[1].replace('-',' ').split())\n #lieu_dit_non_bati = (result[2] == '3') and (result[3] == '0')\n highway = result[2] in ['1', '4', '5']\n dict_fantoir[normalize(nom_fantoir)] = (code_fantoir, highway)\n assert(len(dict_fantoir) > 0)\n return dict_fantoir\n except:\n # La connexion avec la base SQL a du échouer, on \n # charge les fichiers zip fantoir manuellement:\n filename = FANTOIR_ZIP\n ok_filename = filename + \".ok\"\n if not (os.path.exists(filename) and os.path.exists(ok_filename)):\n sys.stdout.write(\"Téléchargement du fichier Fantoir \" + FANTOIR_URL + \"\\n\")\n if os.path.exists(filename): os.remove(filename)\n if os.path.exists(ok_filename): os.remove(ok_filename)\n write_stream_to_file(urllib2.urlopen(FANTOIR_URL), filename)\n open(ok_filename, \"a\").close()\n else:\n sys.stdout.write(\"Lecture du fichier FANTOIR.zip\")\n sys.stdout.flush()\n num_commune = code_insee[2:5]\n debut = get_fantoir_code_departement(code_departement) + num_commune\n for line in ZipFile(filename, \"r\").open(\"FANTOIR.txt\"):\n if line.startswith(debut):\n if line[108:109] != ' ':\n # C'est un unregistrement de voie\n if line[73] == ' ':\n # la voie n'est pas annulée\n assert(code_insee == line[0:2] + line[3:6])\n id_voie = line[6:10]\n cle_rivoli = line[10]\n nature_voie = line[11:15].strip()\n libele_voie = line[15:41].strip()\n code_fantoir = code_insee + id_voie + cle_rivoli\n nom_fantoir = nature_voie + \" \" + libele_voie\n #lieu_dit_non_bati = line[108:110] == '30'\n highway = line[108:109] in ['1', '4', '5']\n dict_fantoir[normalize(nom_fantoir)] = \\\n (code_fantoir, highway)\n return dict_fantoir" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Descarga la base de datos de una fecha anterior a la del día actual
def descarga_base_covid_antigua(fecha): zipname = "data/"+fecha+"COVID19MEXICO.zip" filename = fecha+"COVID19MEXICO.csv" if os.path.exists(zipname): print("La base de datos se encuentra en la carpeta") else: start_time = time() url = "http://datosabiertos.salud.gob.mx/gobmx/salud/datos_abiertos/" url += "historicos/20"+fecha[:2]+"/"+fecha[2:4] url += "/datos_abiertos_covid19_"+fecha[4:6]+"."+fecha[2:4]+"." url += "20"+fecha[:2]+".zip" r = requests.get(url) with open(zipname, "wb") as code: code.write(r.content) end_time = time() total_time = end_time - start_time print(r.status_code) if r.status_code == 200: print("Descarga exitosa") print(f"La descarga tomó {total_time} segundos")
[ "def descarga_base_covid(fecha):\n zipname = \"data/\"+fecha+\"COVID19MEXICO.zip\"\n filename = fecha+\"COVID19MEXICO.csv\"\n if os.path.exists(zipname):\n print(\"La base de datos se encuentra en la carpeta\")\n else:\n start_time = time()\n url = \"http://datosabiertos.salud.gob.mx/gobmx/salud/datos_abiertos/\"\n url += \"datos_abiertos_covid19.zip\"\n r = requests.get(url)\n with open(zipname, \"wb\") as code:\n code.write(r.content)\n end_time = time()\n total_time = end_time - start_time\n print(r.status_code)\n if r.status_code == 200:\n print(\"Descarga exitosa\")\n print(f\"La descarga tomó {total_time} segundos\")", "def execute_daily(self):\n self.update_comp_info()\n\n\n \"\"\"\n 새로운 구상, 일단 처음에 데이터베이스 MAX날짜 확인하고 NONE이면 PAGES TO FETCH에 100, 아니면 비교해서 10나누고 몫\n 구해서 변수에 넣기\n \n \"\"\"\n curs = self.conn.cursor()\n\n sql = \"SELECT max(date) FROM daily_price\"\n curs.execute(sql)\n rs = curs.fetchone()\n\n if rs[0] is None:\n pages_to_fetch =200\n else:\n lastday = datetime.strptime(str(rs[0]).replace('-',''),\"%Y%m%d\")\n today = datetime.now()\n diff = today - lastday\n diff = diff.days\n pages_to_fetch = diff//10\n if pages_to_fetch==0:\n pages_to_fetch=1\n \n self.update_daily_price(pages_to_fetch)\n\n tmnow = datetime.now()\n lastday = calendar.monthrange(tmnow.year, tmnow.month)[1]\n\n # 1년의 마지막 날인 경우\n if tmnow.month == 12 and tmnow.day == lastday:\n tmnext = tmnow.replace(year=tmnow.year + 1, month=1, day=1, hour=17, minute=0, second=0)\n\n # 한 달의 마지막 날인 경우\n elif tmnow.day == lastday:\n tmnext = tmnow.replace(month=tmnow.month + 1, day=1, hour=17, minute=0, second=0)\n\n # 그 나머지\n else:\n tmnext = tmnow.replace(day=tmnow.day + 1, hour=17, minute=0, second=0)\n\n # 다음 업데이트까지 남은 초 구하기 - 타이머에 활용할 것\n tmdiff = tmnext - tmnow\n secs = tmdiff.seconds\n\n t = Timer(secs, self.execute_daily)\n print(\"Waiting for next update ({}) ...\".format(tmnext.strftime('%Y-%m-%d %H:%M')))\n t.start()", "def date_get_last(self):\n if not self.date:\n self.date = get_date()", "def set_fecha_inicial(self, fecha):\n self.input.fill(vacaciones_crear_catalog.INPUT_FECHA_INICIAL, fecha)", "def diferenciaDeDias(self):\n\n dicFechas = self.anidarCompras()\n for dicFecha in dicFechas:\n if len(dicFecha[\"fechas\"]) > 1:\n guardarPrimerFecha = True\n diferenciasDeDias = []\n\n\n for fecha in dicFecha[\"fechas\"]: \n fecha = datetime.strptime(fecha, '%Y-%m-%d')\n\n if guardarPrimerFecha:\n fechaAComprar = fecha\n guardarPrimerFecha = False\n else:\n diferenciaDeDias = (fecha - fechaAComprar).days\n fechaAComprar = fecha\n diferenciasDeDias.append(int(diferenciaDeDias))\n\n DiasDeDiferencia = sum(diferenciasDeDias)/len(diferenciasDeDias)\n proximaCompra = fecha + timedelta(days=DiasDeDiferencia)\n\n print(\"su ultima compra de \", dicFecha[\"producto\"], \" fue el \", fecha, \"la próxima podría ser \", proximaCompra)", "def espera_fecha_retorno_actualizar(self):\n while self.input.get_text(vacaciones_crear_catalog.INPUT_FECHA_RETORNO) == \"\":\n pass", "def get_fecha_final(self):\n return self.input.get_text(vacaciones_crear_catalog.TB_FECHA_FINAL)", "def cargarFecha(qDate):\n try:\n data = ('{0}/{1}/{2}'.format(qDate.day(), qDate.month(), qDate.year()))\n var.ui.editClialta.setText(str(data))\n var.dlgcalendar.hide()\n except Exception as error:\n print('Error cargar fecha: %s ' % str(error))", "def get_fecha_inicial(self):\n return self.input.get_text(vacaciones_crear_catalog.INPUT_FECHA_INICIAL)", "def extraer():\n print \"extrayendo informacion...\"\n # nota: poner full path al archivo!\n archivo= open('/home/pi/django_projects/tango_project/datos_temp/datos.txt','r')\n f_raw = archivo.readline().split() #lista con los datos de fecha y hora\n print f_raw\n corr= timedelta(hours=4) #correccion para la hora de la base de datos - resolver\n fecha = datetime(int(f_raw[0]),int(f_raw[1]),int(f_raw[2]),int(f_raw[3]),int(f_raw[4]),)\n print fecha\n fecha = fecha - corr\n datos = archivo.readline().split() # lista de valores de las lecturas\n archivo.close()\t\t\t#cerramos archivo\n \n return datos,fecha\t\t#devuelve una tupla con la lista de los valores y la fecha", "def _choose_end_date_download(db: DbTicks, contract: Contract) -> datetime.datetime:\n _d: Optional[datetime.datetime] = db.get_oldest_timestamp()\n if _d is None:\n _d: datetime.datetime = _estimate_most_recent_tick_datetime(contract)\n return _d", "def update_disaffiliated(con):\n cursorObj = con.cursor()\n desafiliado=input(\"Identificacion del afiliado a desafiliar: \")\n fecha = date_to_string(date.today()) \n print(fecha) \n actualizar='update afiliados SET desafiliacion = \"'+fecha+'\" where id ='+desafiliado\n cursorObj.execute(actualizar)\n con.commit()", "def reset_last_dates(db):\n\tlast_date_params = {'table': 'regions',\n\t\t\t\t\t\t'destinations': ['last_date'],\n\t\t\t\t\t\t'data': (0,),\n\t\t\t\t\t\t'conditions': [],\n\t\t\t\t\t\t'condition_data': ()}\n\tdb.update(**last_date_params)", "def getIndicePreciosAlConsumidorCordobaBaseJulio2012(self):\n #Obtener la url de descarga del cvs\n urlPackage=\"https://datos.gob.ar/api/3/action/package_show?id=sspm-indice-precios-al-consumidor-provincia-cordoba-base-2014-100\"\n s=requests.get(urlPackage).content\n objJson = json.loads(s)\n resultado = objJson['result']['resources']\n selector = 0\n ultimoResultado = resultado[selector]\n urlDescarga = ultimoResultado['url']\n descripcion = ultimoResultado['description']\n print(\"Descargando: {}\".format(descripcion))\n print(\"Archivo: {}\".format(urlDescarga))\n \n #Descargar la url con cvs y generar pandas dataframe\n contenidoCVS = requests.get(urlDescarga).content\n flujoCVS = io.StringIO(contenidoCVS.decode('utf-8'))\n df_temp = pd.read_csv(flujoCVS)\n \n #transform string to datetime\n df_temp['indice_tiempo'] = pd.to_datetime(df_temp['indice_tiempo'], format='%Y-%m-%d', errors='ignore')\n df_temp['indice_tiempo'] = df_temp['indice_tiempo'].dt.date\n #set index\n df_temp.set_index('indice_tiempo', inplace=True)\n \n return df_temp", "def consultarInformes(self):\n try:\n cursor = self.__conexion.cursor()\n # Se le suma 1 hora para que este en nuestra franja horaria\n cursor.execute(\n \"SELECT datetime(fecha + 3600, 'unixepoch'), datos, sesion FROM Informes ORDER BY fecha DESC\")\n self.__conexion.commit()\n datos = cursor.fetchall()\n cursor.close()\n\n return datos\n\n except sqlite3.Error as error:\n print(\"Error al consultar la base de datos: \", error)", "def _get_eur_gbp_last_daily(self) -> None:\n data = _get_ecb_data(FREQUENCY_DAILY, _ten_days_ago(), _today())\n\n self.eur_gbp_last_day = _get_latest_ecb_rate(data)", "def get_fecha_actual():\n hoy = datetime.datetime.now()\n fecha_actual = hoy.strftime(\"%d-%m-%Y\")\n return fecha_actual", "def _actualizar_servicio(self, servicio):\n\n orden = self.orden\n self.servicio = servicio\n tarifa = orden.plan.tarifa(servicio, orden.institucion)\n self.valor = getattr(tarifa, 'valor', 0)\n self.coopago = getattr(tarifa, 'coopago', 0)", "def getIndiceDeConfianzaDelConsumidor(self):\n #Obtener la url de descarga del cvs\n urlPackage=\"https://datos.gob.ar/api/3/action/package_show?id=sspm-indice-confianza-consumidor\"\n s=requests.get(urlPackage).content\n objJson = json.loads(s)\n resultado = objJson['result']['resources']\n longitudResultado = len(resultado)-1\n ultimoResultado = resultado[longitudResultado]\n urlDescarga = ultimoResultado['url']\n descripcion = ultimoResultado['description']\n print(\"Descargando: {}\".format(descripcion))\n print(\"Archivo: {}\".format(urlDescarga))\n \n #Descargar la url con cvs y generar pandas dataframe\n contenidoCVS = requests.get(urlDescarga).content\n flujoCVS = io.StringIO(contenidoCVS.decode('utf-8'))\n df_temp = pd.read_csv(flujoCVS)\n \n #transform string to datetime\n df_temp['indice_tiempo'] = pd.to_datetime(df_temp['indice_tiempo'], format='%Y-%m-%d', errors='ignore')\n df_temp['indice_tiempo'] = df_temp['indice_tiempo'].dt.date\n #set index\n df_temp.set_index('indice_tiempo', inplace=True)\n \n return df_temp" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
It should create color shapes in the given data directory.
def test_create_shapes(data_dir): dataset.create_shapes(10, 10, 1, data_dir=data_dir) img_path = os.path.join(data_dir, "ellipse/0.png") assert os.path.exists(img_path) img = imageio.imread(img_path) assert img.shape == (10, 10, 4)
[ "def test_create_shapes_grayscale(data_dir):\n dataset.create_shapes(10, 10, 1, channels=1, data_dir=data_dir)\n img_path = os.path.join(data_dir, \"ellipse/0.png\")\n assert os.path.exists(img_path)\n img = imageio.imread(img_path)\n assert img.shape == (10, 10)", "def __createDataFolderStruct(self):\n if (not os.path.isdir('./data')):\n os.mkdir(\"./data\")\n os.mkdir(\"./data/laz/\")\n os.mkdir(\"./data/tif/\")\n if (not os.path.isdir('./data/laz')):\n os.mkdir(\"./data/laz/\")\n if (not os.path.isdir('./data/tif')):\n os.mkdir(\"./data/tif/\")", "def make_props_files(labels, label_list, dir_path, data,\r\n background_color, label_color, prefs):\r\n cat_connected_num = 0\r\n mapping = data['map']\r\n groups_and_colors = iter_color_groups(mapping, prefs)\r\n for params in groups_and_colors:\r\n l = params[0]\r\n if l == \"SampleID\" or l == \"Description\":\r\n continue\r\n m = params[2]\r\n c = params[3]\r\n output = open(os.path.join(dir_path, \"props/custom.%s.props\" % l), 'w')\r\n props_str_list = [l] * 5\r\n props_str_list.append(','.join(map(str, label_color.toRGB())))\r\n props_str_list.extend([l] * 22)\r\n props_str_list.append(','.join(map(str, label_color.toRGB())))\r\n props_str_list.extend([l] * 16)\r\n props_str_list.append(props_edge % (l, l))\r\n props_str_list.append(l)\r\n props_str_list.append(\r\n '\\n'.join([props_edge_meta % (l, s, ','.join(map(str, c[n].toRGB())))\r\n for s, n in m.items()]))\r\n props_str_list.extend([l] * 109)\r\n props_str_list.append(props_node % (l, l))\r\n props_str_list.append(l)\r\n props_str_list.append(\r\n '\\n'.join([props_node_meta % (l, s, ','.join(map(str, c[n].toRGB())))\r\n for s, n in m.items()]))\r\n props_str_list.extend([l] * 48)\r\n props_str_list[98] = ','.join(map(str, background_color.toRGB()))\r\n props_str_list[109] = ','.join(map(str, label_color.toRGB()))\r\n props_str_list[132] = ','.join(map(str, label_color.toRGB()))\r\n output.write(props_file_str % tuple(props_str_list))\r\n output.close()", "def make_palettes(self, dtypes: Tuple[str]) -> None:\n\n # If palettes are empty ..\n if not dtypes:\n # .. fallback to all available types\n dtypes = [\"acb\", \"gpl\", \"soc\", \"xml\"]\n\n # Build & create path to color sets\n sets_path = self.brand_path / \"sets\"\n sets_path.mkdir(parents=True, exist_ok=True)\n\n # Iterate over (selected) palette types\n for dtype in [dtype.lower() for dtype in dtypes]:\n # Iterate over color sets\n for set_name, data in self.sets.items():\n # Build path to palette file\n palette_file = sets_path / f\"{set_name}.{dtype}\"\n\n # Party time!\n getattr(self, f\"make_{dtype}\")(palette_file, data)", "def create_dataset(data_folder: str, dataset_file: str, targets_file: str = os.path.join('data', 'targets.pkl')):\n files = sorted(glob.glob(os.path.join(data_folder, '**/*.jpg'), recursive=True))\n images = []\n crop_sizes = []\n crop_centers = []\n targets = []\n for image in tqdm(files, desc='creating dataset', total=len(files)):\n img = Image.open(image)\n # quadruple dataset by vertical and horizontal flipping\n for i in range(4):\n if i == 1 or i == 3:\n img = img.transpose(Image.FLIP_LEFT_RIGHT)\n if i == 2:\n img = img.transpose(Image.FLIP_TOP_BOTTOM)\n x, y, w, h, cx, cy = get_random_image_values()\n resized = img.resize((y, x), Image.LANCZOS) # mind thee: x and y swapped\n arr = np.array(resized, dtype=np.float32)\n arr, target_array = create_cropped_data(np.copy(arr), (w, h), (cx, cy), crop_only=False)\n images.append(arr)\n crop_sizes.append((w, h))\n crop_centers.append((cx, cy))\n targets.append(target_array)\n data = {'images': images, 'crop_sizes': crop_sizes, 'crop_centers': crop_centers}\n # persist on harddrive\n with open(dataset_file, 'wb') as f:\n pickle.dump(data, f)\n with open(targets_file, 'wb') as f:\n pickle.dump(targets, f)\n print(f'created datset and saved it to {dataset_file} and targets to {targets_file}')", "def create_dataloaders(data_dir):\n train_dir = data_dir + '/train'\n valid_dir = data_dir + '/valid'\n test_dir = data_dir + '/test'\n train_data_transforms = transforms.Compose([transforms.RandomHorizontalFlip(), \\\n transforms.RandomRotation(45), \\\n transforms.RandomResizedCrop(224),\\\n transforms.ToTensor(), \\\n transforms.Normalize([0.485, 0.456, 0.406], [0.229, 0.224, 0.225])])\n valid_test_data_transforms = transforms.Compose([transforms.Resize(255), \\\n transforms.CenterCrop(224), \\\n transforms.ToTensor(), \\\n transforms.Normalize([0.485, 0.456, 0.406], [0.229, 0.224, 0.225])])\n\n train_image_datasets = datasets.ImageFolder(train_dir, transform=train_data_transforms)\n valid_image_datasets = datasets.ImageFolder(valid_dir, transform=valid_test_data_transforms)\n test_image_datasets = datasets.ImageFolder(test_dir, transform=valid_test_data_transforms)\n\n train_dataloaders = DataLoader(train_image_datasets, batch_size=32, shuffle=True)\n valid_dataloaders = DataLoader(valid_image_datasets, batch_size=32, shuffle=True)\n test_dataloaders = DataLoader(test_image_datasets, batch_size=32, shuffle=True)\n\n class_to_idx = train_image_datasets.class_to_idx\n \n return train_dataloaders, valid_dataloaders, test_dataloaders, class_to_idx", "def _make_dataset(self, data, base_path, dataset_name):\n dataset_classes = data['dataset_classes']\n images_path = data['images']\n objects_on_images = data['objects']\n self.create_folder_structure(base_path, dataset_name)\n self._create_classes_names(dataset_classes)\n self.create_classes_map(dataset_classes)\n self.save_images_and_labels(objects_on_images)\n return None", "def make_plot_colors(d , / , colors:str | list[str]=None , axis:int = 0, \r\n seed:int =None, chunk:bool =... ):\r\n \r\n # get the data size where colors must be fitted. \r\n # note colors should match either the row axis or colurms axis \r\n axis = str(axis).lower() \r\n if 'columns1'.find (axis)>=0: \r\n axis =1 \r\n else: axis =0\r\n \r\n # manage the array \r\n d= is_iterable( d, exclude_string=True, transform=True)\r\n if not hasattr (d, '__array__'): \r\n d = np.array(d, dtype =object ) \r\n \r\n axis_length = len(d) if len(d.shape )==1 else d.shape [axis]\r\n m_cs = make_mpl_properties(axis_length )\r\n \r\n #manage colors \r\n # we assume the first columns is dedicated for \r\n if colors ==...: colors =None \r\n if ( \r\n isinstance (colors, str) and \r\n ( \r\n \"cs4\" in str(colors).lower() \r\n or 'xkcd' in str(colors).lower() \r\n )\r\n ): \r\n #initilize colors infos\r\n c = copy.deepcopy(colors)\r\n if 'cs4' in str(colors).lower() : \r\n DCOLORS = mcolors.CSS4_COLORS\r\n else: \r\n # remake the dcolors my removing the xkcd: in the keys: \r\n DCOLORS = dict(( (k.replace ('xkcd:', ''), c) \r\n for k, c in mcolors.XKCD_COLORS.items())) \r\n \r\n key_colors = list(DCOLORS.keys ())\r\n colors = list(DCOLORS.values() )\r\n \r\n shuffle_cs4=True \r\n \r\n cs4_start= None\r\n #------\r\n if ':' in str(c).lower():\r\n cs4_start = str(c).lower().split(':')[-1]\r\n #try to converert into integer \r\n try: \r\n cs4_start= int (cs4_start)\r\n except : \r\n if str(cs4_start).lower() in key_colors: \r\n cs4_start= key_colors.index (cs4_start)\r\n shuffle_cs4=False\r\n else: \r\n pass \r\n \r\n else: shuffle_cs4=False # keep CS4 and dont shuffle \r\n \r\n cs4_start= cs4_start or 0\r\n \r\n if shuffle_cs4: \r\n np.random.seed (seed )\r\n colors = list(np.random.choice(colors , len(m_cs)))\r\n else: \r\n if cs4_start > len(colors)-1: \r\n cs4_start = 0 \r\n \r\n colors = colors[ cs4_start:]\r\n \r\n if colors is not None: \r\n if not is_iterable(colors): \r\n colors =[colors]\r\n colors += m_cs \r\n else :\r\n colors = m_cs \r\n \r\n # shrunk data to map the exact colors \r\n chunk =True if chunk is ... else False \r\n return colors[:axis_length] if chunk else colors", "def create_dataset(overlayPath, dataCubePath, setNames, colours):\n \n \n annotationsPositionsSet = get_annotations_positions(overlayPath, setNames, colours)\n \n dataCubeFiles = get_cube_name(setNames)\n \n print(\"loading data cube and concatenating each class\")\n \n spectraSet, waveNumbers, setInput, setOutput, setFileNumbers, setPositions = load_each_data_cube_and_catenate_each_class(dataCubeFiles, dataCubePath, annotationsPositionsSet)\n \n return spectraSet, waveNumbers, setInput, setOutput, setFileNumbers, setPositions, annotationsPositionsSet, dataCubeFiles", "def init_color_space(color_path):\n # type: (str) -> None\n color_space = np.zeros((256, 256, 256), dtype=np.uint8)\n if color_path.endswith('.yaml'):\n with open(color_path, 'r') as stream:\n try:\n color_values = yaml.safe_load(stream)\n except yaml.YAMLError as exc:\n # TODO: what now??? Handle the error?\n pass\n # pickle-file is stored as '.txt'\n elif color_path.endswith('.txt'):\n try:\n with open(color_path, 'rb') as f:\n color_values = pickle.load(f)\n except pickle.PickleError as exc:\n pass\n \n # compatibility with colorpicker\n if 'color_values' in color_values.keys():\n color_values = color_values['color_values']['greenField']\n length = len(color_values['red'])\n if length == len(color_values['green']) and \\\n length == len(color_values['blue']):\n # setting colors from yaml file to True in color space\n for x in range(length):\n color_space[color_values['blue'][x],\n color_values['green'][x],\n color_values['red'][x]] = 1\n print(\"Imported color space\")\n return color_space", "def _create_layout(root_dir, subsets):\n _create_folder(os.path.join(root_dir, \"images\"))\n _create_folder(os.path.join(root_dir, \"labels\"))\n\n for subset in subsets:\n _create_folder(os.path.join(root_dir, \"images\", subset))\n _create_folder(os.path.join(root_dir, \"labels\", subset))", "def create_test_dataset(data_folder):\n dataset = datasets.CIFAR100('data/', False, download=True)\n Path(data_folder).mkdir()\n for i in range(100):\n img, label = random.choice(dataset)\n img.save(data_folder + str(i) +\n '_' + dataset.classes[label] + '.jpg')", "def draw_circle_rgb(file_name, mode): #one way of drawing and filling circles\r\n file = open(file_name, mode)\r\n #colors = file.readlines()\r\n whole_thing = file.readlines()\r\n #print(whole_thing)\r\n x = 0\r\n y = 1\r\n z = 2\r\n #print(len(whole_thing)//3)\r\n for i in range(len(whole_thing)//3):\r\n r = int(whole_thing[x][:-1])\r\n g = int(whole_thing[y][:-1])\r\n b = int(whole_thing[z][:-1])\r\n print(\"The color is:\", r, g, b)\r\n turtle.colormode(255)\r\n t = turtle.Turtle()\r\n t.shape(\"turtle\")\r\n t.fillcolor(r, g, b)\r\n t.begin_fill()\r\n t.up()\r\n t.goto(0, -50)\r\n t.down()\r\n t.circle(100)\r\n t.end_fill()\r\n t.up()\r\n t.hideturtle()\r\n time.sleep(1)\r\n t.clear()\r\n x += 3\r\n y += 3\r\n z += 3\r\n file.close()", "def _create_data_folder(path, props):\n if 'data_folder' in props: # will this work?\n # => regenerating from existing data\n props['name'] = props['data_folder'] + '_regen'\n data_folder = props['name']\n else:\n data_folder = props['name'] + '_' + Path(props['templates']).stem\n\n # make unique\n data_folder += '_' + datetime.now().strftime('%y%m%d-%H-%M-%S')\n props['data_folder'] = data_folder\n path_with_dataset = path / data_folder\n os.makedirs(path_with_dataset)\n\n return path_with_dataset", "def generate_dat_files(rspecs, datroot, bands, labels):\n d = ds9.ds9()\n d.set('rgb')\n d.set('rgb red')\n\n # Save plaintext projection data\n # Idea: minimize file (band) loading operations\n for fname, flab in zip(bands, labels):\n d.set('file ' + fname) # Load a band\n for i in xrange(len(rspecs)):\n d.set('regions', rspecs[i]) # Load a region\n d.set('rgb red') # Plot projection data\n dat_fname = '{0}_{1:02d}_band_{2}.dat'.format(datroot, i+1, flab)\n d.set('plot {0} save {1}'.format(d.get('plot'), dat_fname))\n d.set('regions delete all')\n d.set('exit')", "def training_data_generation(DATA_DIR, img_height_size, img_width_size, perc, buff, label_list):\r\n \r\n if perc < 0 or perc > 1:\r\n raise ValueError('Please input a number between 0 and 1 (inclusive) for perc.')\r\n \r\n if buff < 0 or buff > 1:\r\n raise ValueError('Please input a number between 0 and 1 (inclusive) for buff.')\r\n \r\n img_files = glob.glob(DATA_DIR + '\\\\Train_MS' + '\\\\Train_*.tif')\r\n polygon_files = glob.glob(DATA_DIR + '\\\\Train_Polygons' + '\\\\Train_*.geojson')\r\n \r\n img_array_list = []\r\n mask_array_list = []\r\n \r\n for file in range(len(img_files)):\r\n with rasterio.open(img_files[file]) as f:\r\n metadata = f.profile\r\n img = np.transpose(f.read(tuple(np.arange(metadata['count']) + 1)), [1, 2, 0])\r\n mask = training_mask_generation(img_files[file], polygon_files[file], labels = label_list)\r\n \r\n if (img.shape[0] % img_height_size != 0) and (img.shape[1] % img_width_size == 0):\r\n img_array, mask_array = image_clip_to_segment_and_convert(img, mask, img_height_size, img_width_size, mode = 0, \r\n percentage_overlap = perc, buffer = buff)\r\n elif (img.shape[0] % img_height_size == 0) and (img.shape[1] % img_width_size != 0):\r\n img_array, mask_array = image_clip_to_segment_and_convert(img, mask, img_height_size, img_width_size, mode = 1, \r\n percentage_overlap = perc, buffer = buff)\r\n elif (img.shape[0] % img_height_size != 0) and (img.shape[1] % img_width_size != 0):\r\n img_array, mask_array = image_clip_to_segment_and_convert(img, mask, img_height_size, img_width_size, mode = 2, \r\n percentage_overlap = perc, buffer = buff)\r\n else:\r\n img_array, mask_array = image_clip_to_segment_and_convert(img, mask, img_height_size, img_width_size, mode = 3, \r\n percentage_overlap = perc, buffer = buff)\r\n \r\n img_array_list.append(img_array)\r\n mask_array_list.append(mask_array)\r\n \r\n img_full_array = np.concatenate(img_array_list, axis = 0)\r\n mask_full_array = np.concatenate(mask_array_list, axis = 0)\r\n \r\n return img_full_array, mask_full_array", "def create_folder():\n directory = \"data/\"\n if not os.path.exists(directory):\n os.makedirs(directory)\n logging.info(\"Data folder created.\")\n else:\n logging.info(\"Data folder already existed.\")", "def test_rgb_list(self):\n\n data_tups = catalogue._data_tuples_from_fnames(input_path=data_path)\n data_storage = data_path + 'test_1.pkl'\n catalogue.rgb_list(data_tups, storage_location=data_storage)\n plot_tups = handling.pickled_data_loader(data_path, 'test_1')\n\n for row in plot_tups:\n assert type(row) is tuple, \"List-of-Tuples has non-tuple?\"\n assert type(row[0]) is str, \"File Name in Tuple is wrong format.\"\n assert type(row[1]) is np.ndarray,\\\n \"List-of-image-Tuples is not in np.ndarray format??\"\n assert type(row[2]) is str, \"Class label is not a string?\"\n pass", "def test_dir_colors(file_name):\n assert (\n color_file(File.from_path(file_name), isdir=True)\n == colorama.Style.BRIGHT\n + colorama.Fore.BLUE\n + file_name\n + colorama.Style.RESET_ALL\n )" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
It should create grayscale shapes in the given data directory.
def test_create_shapes_grayscale(data_dir): dataset.create_shapes(10, 10, 1, channels=1, data_dir=data_dir) img_path = os.path.join(data_dir, "ellipse/0.png") assert os.path.exists(img_path) img = imageio.imread(img_path) assert img.shape == (10, 10)
[ "def test_create_shapes(data_dir):\n dataset.create_shapes(10, 10, 1, data_dir=data_dir)\n img_path = os.path.join(data_dir, \"ellipse/0.png\")\n assert os.path.exists(img_path)\n img = imageio.imread(img_path)\n assert img.shape == (10, 10, 4)", "def preprocess_directory(data_path, label_path, damage_fn):\r\n\r\n file_names = os.listdir(data_path)\r\n os.mkdir(label_path)\r\n\r\n for file_name in file_names:\r\n file_path = data_path + \"/\" + file_name\r\n cur_label_path = label_path + \"/\" + file_name\r\n current_image = Image.open(file_path)\r\n label = damage_fn(current_image)\r\n label.save(cur_label_path, \"JPEG\")", "def training_data_generation(DATA_DIR, img_height_size, img_width_size, label_list):\r\n \r\n img_ms_files = glob.glob(DATA_DIR + '\\\\Train_MS' + '\\\\Train_*.tif')\r\n img_pan_files = glob.glob(DATA_DIR + '\\\\Train_Pan' + '\\\\Train_*.tif')\r\n polygon_files = glob.glob(DATA_DIR + '\\\\Train_Polygons' + '\\\\Train_*.geojson')\r\n \r\n img_ms_array_list = []\r\n img_pan_array_list = []\r\n mask_array_list = []\r\n \r\n for file in range(len(img_ms_files)):\r\n with rasterio.open(img_ms_files[file]) as f:\r\n metadata = f.profile\r\n img_ms = np.transpose(f.read(tuple(np.arange(metadata['count']) + 1)), [1, 2, 0])\r\n \r\n with rasterio.open(img_pan_files[file]) as g:\r\n metadata_pan = g.profile\r\n img_pan = np.expand_dims(g.read(1), axis = 2)\r\n \r\n ms_to_pan_ratio = metadata['transform'][0] / metadata_pan['transform'][0]\r\n \r\n if (img_height_size % ms_to_pan_ratio) != 0 or (img_width_size % ms_to_pan_ratio) != 0:\r\n raise ValueError('Please make sure that both img_height_size and img_width_size can be divided by {}'.format(int(ms_to_pan_ratio)))\r\n \r\n mask = training_mask_generation(img_pan_files[file], polygon_files[file], labels = label_list)\r\n \r\n img_ms_array, img_pan_array, mask_array = image_clip_to_segment_and_convert(img_ms, img_pan, mask, ms_to_pan_ratio, \r\n img_height_size, img_width_size)\r\n \r\n img_ms_array_list.append(img_ms_array)\r\n img_pan_array_list.append(img_pan_array)\r\n mask_array_list.append(mask_array)\r\n \r\n img_ms_full_array = np.concatenate(img_ms_array_list, axis = 0)\r\n img_pan_full_array = np.concatenate(img_pan_array_list, axis = 0)\r\n mask_full_array = to_categorical(np.concatenate(mask_array_list, axis = 0), num_classes = len(label_list))\r\n \r\n return img_ms_full_array, img_pan_full_array, mask_full_array", "def create_test_dataset(data_folder):\n dataset = datasets.CIFAR100('data/', False, download=True)\n Path(data_folder).mkdir()\n for i in range(100):\n img, label = random.choice(dataset)\n img.save(data_folder + str(i) +\n '_' + dataset.classes[label] + '.jpg')", "def _make_dataset(self, data, base_path, dataset_name):\n dataset_classes = data['dataset_classes']\n images_path = data['images']\n objects_on_images = data['objects']\n self.create_folder_structure(base_path, dataset_name)\n self._create_classes_names(dataset_classes)\n self.create_classes_map(dataset_classes)\n self.save_images_and_labels(objects_on_images)\n return None", "def create_dataset(data_folder: str, dataset_file: str, targets_file: str = os.path.join('data', 'targets.pkl')):\n files = sorted(glob.glob(os.path.join(data_folder, '**/*.jpg'), recursive=True))\n images = []\n crop_sizes = []\n crop_centers = []\n targets = []\n for image in tqdm(files, desc='creating dataset', total=len(files)):\n img = Image.open(image)\n # quadruple dataset by vertical and horizontal flipping\n for i in range(4):\n if i == 1 or i == 3:\n img = img.transpose(Image.FLIP_LEFT_RIGHT)\n if i == 2:\n img = img.transpose(Image.FLIP_TOP_BOTTOM)\n x, y, w, h, cx, cy = get_random_image_values()\n resized = img.resize((y, x), Image.LANCZOS) # mind thee: x and y swapped\n arr = np.array(resized, dtype=np.float32)\n arr, target_array = create_cropped_data(np.copy(arr), (w, h), (cx, cy), crop_only=False)\n images.append(arr)\n crop_sizes.append((w, h))\n crop_centers.append((cx, cy))\n targets.append(target_array)\n data = {'images': images, 'crop_sizes': crop_sizes, 'crop_centers': crop_centers}\n # persist on harddrive\n with open(dataset_file, 'wb') as f:\n pickle.dump(data, f)\n with open(targets_file, 'wb') as f:\n pickle.dump(targets, f)\n print(f'created datset and saved it to {dataset_file} and targets to {targets_file}')", "def process_data(output_folder):\n # select imgs\n img_folder = join(output_folder, 'img')\n select_img(output_folder, img_folder, 'HE-green')\n\n mask_folder = join(output_folder, 'mask')\n select_img(output_folder, mask_folder, '_EF5')", "def training_data_generation(DATA_DIR, img_height_size, img_width_size, perc, buff, label_list):\r\n \r\n if perc < 0 or perc > 1:\r\n raise ValueError('Please input a number between 0 and 1 (inclusive) for perc.')\r\n \r\n if buff < 0 or buff > 1:\r\n raise ValueError('Please input a number between 0 and 1 (inclusive) for buff.')\r\n \r\n img_files = glob.glob(DATA_DIR + '\\\\Train_MS' + '\\\\Train_*.tif')\r\n polygon_files = glob.glob(DATA_DIR + '\\\\Train_Polygons' + '\\\\Train_*.geojson')\r\n \r\n img_array_list = []\r\n mask_array_list = []\r\n \r\n for file in range(len(img_files)):\r\n with rasterio.open(img_files[file]) as f:\r\n metadata = f.profile\r\n img = np.transpose(f.read(tuple(np.arange(metadata['count']) + 1)), [1, 2, 0])\r\n mask = training_mask_generation(img_files[file], polygon_files[file], labels = label_list)\r\n \r\n if (img.shape[0] % img_height_size != 0) and (img.shape[1] % img_width_size == 0):\r\n img_array, mask_array = image_clip_to_segment_and_convert(img, mask, img_height_size, img_width_size, mode = 0, \r\n percentage_overlap = perc, buffer = buff)\r\n elif (img.shape[0] % img_height_size == 0) and (img.shape[1] % img_width_size != 0):\r\n img_array, mask_array = image_clip_to_segment_and_convert(img, mask, img_height_size, img_width_size, mode = 1, \r\n percentage_overlap = perc, buffer = buff)\r\n elif (img.shape[0] % img_height_size != 0) and (img.shape[1] % img_width_size != 0):\r\n img_array, mask_array = image_clip_to_segment_and_convert(img, mask, img_height_size, img_width_size, mode = 2, \r\n percentage_overlap = perc, buffer = buff)\r\n else:\r\n img_array, mask_array = image_clip_to_segment_and_convert(img, mask, img_height_size, img_width_size, mode = 3, \r\n percentage_overlap = perc, buffer = buff)\r\n \r\n img_array_list.append(img_array)\r\n mask_array_list.append(mask_array)\r\n \r\n img_full_array = np.concatenate(img_array_list, axis = 0)\r\n mask_full_array = np.concatenate(mask_array_list, axis = 0)\r\n \r\n return img_full_array, mask_full_array", "def preprocess_dir(data_path,\n output_path,\n dataset,\n n_train,\n new_size,\n ):\n img_type_dict = get_class_labels()\n\n print('Preprocessing:', dataset)\n target_data_path = data_path\n disease_dirs = os.listdir(target_data_path)\n disease_dirs = [d for d in disease_dirs if\n os.path.isdir(os.path.join(target_data_path, d))]\n img_stack, target_list = [], []\n img_names = []\n for img_type in disease_dirs:\n class_lbl = img_type_dict[img_type]\n n_class = int(n_train / len(disease_dirs))\n print('\\t', img_type)\n img_files_path = os.path.join(target_data_path, img_type)\n if not (os.path.isdir(img_files_path)):\n continue\n img_files = os.listdir(img_files_path)\n img_files = [f for f in img_files if f.endswith('.jpeg')]\n if dataset == 'train':\n img_files = img_files[0:n_class]\n for img_fname in img_files:\n img_path = os.path.join(img_files_path, img_fname)\n img_arr = np.array(Image.open(img_path))\n img_arr = skimage.transform.resize(img_arr, new_size)\n img_arr = (img_arr - img_arr.min()) / img_arr.max()\n img_stack.append(img_arr)\n target_list.append(class_lbl)\n img_names += [n.split('.')[0] for n in img_files]\n # Save preprocessed data\n save_data(output_path, img_stack, target_list,\n new_size, dataset, n_train, img_names)", "def convert_images(img_dir, class_mapping, data_file):\n print 'Data File: ' + str(data_file)\n # the training data\n X = []\n # the taining labels\n Y = []\n # the images directory listing, filtered by jpg format\n fotos_list = glob.glob(os.path.join(img_dir, '') + \"*.jpg\")\n # we shuffle the list to randominze the images in the resulting array\n shuffle(fotos_list)\n # reads each image in the file list\n for filename in fotos_list:\n print \"Processing \" + filename\n # from the first part of the image name gets the image mapping value\n image_class = class_mapping[basename(filename.split(\"_\")[0])]\n # reads the image as array\n array_image = scipy.misc.imread(filename)\n # converts to float32\n array_image = array_image.astype('float32')\n # appends the image data to the training data set X\n X.append(np.array(array_image))\n # appends the image label to the training data set Y\n Y.append(image_class)\n\n # gets 1/4 of the data as test data and labels\n test_data = int(len(X[0:])/4.0)\n X_TEST = X[:test_data]\n Y_TEST = Y[:test_data]\n # resizes the training data and label arrays with the remaining 3/4 of the data\n X = X[test_data:]\n Y = Y[test_data:]\n\n print \"serializing image arrays to \" + data_file\n # opens the pickle file\n f = open(data_file, \"w\")\n # dumps the training data, training labels, test data and test labels\n pickle.dump((np.array(X), np.array(Y), np.array(X_TEST), np.array(Y_TEST)), f)\n\n # reads the pickle file and prints the array shapes\n print \"reading arrays from pickle file \" + data_file\n f = open(data_file, \"rb\")\n A, B, C, D = pickle.load(f)\n\n print A.shape\n print B.shape\n print C.shape\n print D.shape", "def create_train_data(self, wmap):\n\n i = 0\n print('-' * 30)\n print('Creating training images...')\n print('-' * 30)\n\n # original\n imgs = glob.glob(self.data_path + \"/*/*\")\n\n\n imgdatas = np.ndarray((len(imgs), self.out_rows, self.out_cols, 1), dtype=np.uint8)\n imglabels = np.ndarray((len(imgs), self.out_rows, self.out_cols, 1), dtype=np.uint8)\n imgweights = np.ndarray((len(imgs), self.out_rows, self.out_cols, 1), dtype=np.uint8)\n\n\n for imgname in imgs:\n\n midname = imgname.split(\"/\")[2] + \"/\" + imgname.split(\"/\")[3]\n\n img = cv2.imread(self.data_path + \"/\" + midname, cv2.IMREAD_GRAYSCALE)\n label = cv2.imread(self.label_path + \"/\" + midname, cv2.IMREAD_GRAYSCALE)\n\n img = np.array([img])\n img = img.reshape((width, height, 1))\n\n label = np.array([label])\n label = label.reshape((width, height, 1))\n\n imgdatas[i] = img\n imglabels[i] = label\n\n if wmap==True:\n\n weights = cv2.imread(self.weight_path + \"/\" + midname,cv2.IMREAD_GRAYSCALE)\n\n weights = np.array([weights])\n weights = weights.reshape((width, height, 1))\n\n imgweights[i] = weights\n\n\n if i % 100 == 0:\n print('Done: {0}/{1} images'.format(i, len(imgs)))\n i += 1\n\n print('Loading done')\n\n # original\n np.save(self.npy_path + '/imgs_train.npy', imgdatas)\n np.save(self.npy_path + '/imgs_mask_train.npy', imglabels)\n\n if wmap==True:\n np.save(self.npy_path + '/imgs_weights.npy', imgweights)\n\n print('Saving to .npy files done.')", "def __createDataFolderStruct(self):\n if (not os.path.isdir('./data')):\n os.mkdir(\"./data\")\n os.mkdir(\"./data/laz/\")\n os.mkdir(\"./data/tif/\")\n if (not os.path.isdir('./data/laz')):\n os.mkdir(\"./data/laz/\")\n if (not os.path.isdir('./data/tif')):\n os.mkdir(\"./data/tif/\")", "def generate_dummy_miniimagenet_data(dir_path):\n num_dummy_categories = 20\n num_dummy_img_per_category = 10\n img_height, img_width = 84, 84\n data_path = os.path.join(dir_path, \"miniimagent\")\n for set_name in [\"train\", \"valid\", \"test\"]:\n set_path = os.path.join(data_path, set_name)\n for cid in range(num_dummy_categories):\n dummy_category_name = f\"n{cid:05d}\"\n dummy_category_path = os.path.join(set_path, dummy_category_name)\n os.makedirs(dummy_category_path)\n for img_id in range(num_dummy_img_per_category):\n img_array = np.full(\n (img_height, img_width), img_id * 20, dtype=np.int8\n )\n img_path = os.path.join(dummy_category_path, f\"{img_id}.jpg\")\n Image.fromarray(img_array).convert(\"RGB\").save(img_path)", "def load_data(width=224, height=224, shrinking_method = 'nearest', n_classes = 120):\n # If the data hasn't been downloaded yet, download it first.\n if not (os.path.exists('./data/dog_images.tar') and os.path.exists('./data/annotations.tar')):\n download_data()\n else:\n print('./data/dog_images.tar and ./data/annotations.tar already exist. Begin extracting...')\n \n # Check if the package has been unpacked, otherwise unpack the package\n if not os.path.exists('./data/Images/'):\n package = tarfile.open('./data/dog_images.tar')\n package.extractall('./data')\n package.close()\n if not os.path.exists('./data/Annotation/'):\n package = tarfile.open('./data/annotations.tar')\n package.extractall('./data')\n package.close()\n \n print('Standford dogs data and annotations were extracted. Begin creating dataset...')\n \n # Go to the location where the files are unpacked\n root_dir = os.getcwd()\n path = './data/Images'\n os.chdir(path)\n #get all folders name (corresponding to all breed of dogs)\n folders = glob.glob('*')\n #to store all images (arrays) and their labels\n data = []\n label = []\n #dictionaries to match labels to breed\n breed_to_label = {}\n label_to_breed = {}\n #counter for classes (first breed is 0 - last breed is 119, there are only 120 classes)\n i=0\n for folder in folders:\n if i<n_classes:\n #get the breed from the name of the folder\n breed = folder.split('-')[1]\n #fill both dictionaries\n breed_to_label[breed] = i\n label_to_breed[i]=breed\n #each folder contains pictures about a specific breed\n #get the names of all those images\n images = glob.glob(folder+'/*')\n #fill data and labels with the images and their label\n #different method to change the shape of an image exists\n if shrinking_method == 'nearest':\n for image in images:\n tree = etree.parse('../Annotation/'+image[:-4])\n xmin = int(tree.find('object').find('bndbox').find('xmin').text)\n xmax = int(tree.find('object').find('bndbox').find('xmax').text)\n ymin = int(tree.find('object').find('bndbox').find('ymin').text)\n ymax = int(tree.find('object').find('bndbox').find('ymax').text)\n data.append(np.asarray(\n Image.open(image).crop(\n (xmin, ymin, xmax, ymax)).resize(\n (width, height), Image.NEAREST)))\n label.append(i)\n if shrinking_method == 'bilinear':\n for image in images:\n tree = etree.parse('../Annotation/'+image[:-4])\n xmin = int(tree.find('object').find('bndbox').find('xmin').text)\n xmax = int(tree.find('object').find('bndbox').find('xmax').text)\n ymin = int(tree.find('object').find('bndbox').find('ymin').text)\n ymax = int(tree.find('object').find('bndbox').find('ymax').text)\n data.append(np.asarray(Image.open(image).crop(\n (xmin, ymin, xmax, ymax)).resize(\n (width, height), Image.BILINEAR)))\n label.append(i)\n if shrinking_method == 'bicubic':\n for image in images:\n tree = etree.parse('../Annotation/'+image[:-4])\n xmin = int(tree.find('object').find('bndbox').find('xmin').text)\n xmax = int(tree.find('object').find('bndbox').find('xmax').text)\n ymin = int(tree.find('object').find('bndbox').find('ymin').text)\n ymax = int(tree.find('object').find('bndbox').find('ymax').text)\n data.append(np.asarray(Image.open(image).crop(\n (xmin, ymin, xmax, ymax)).resize(\n (width, height), Image.BICUBIC)))\n label.append(i)\n if shrinking_method == 'antialias':\n for image in images:\n tree = etree.parse('../Annotation/'+image[:-4])\n xmin = int(tree.find('object').find('bndbox').find('xmin').text)\n xmax = int(tree.find('object').find('bndbox').find('xmax').text)\n ymin = int(tree.find('object').find('bndbox').find('ymin').text)\n ymax = int(tree.find('object').find('bndbox').find('ymax').text)\n data.append(np.asarray(Image.open(image).crop(\n (xmin, ymin, xmax, ymax)).resize(\n (width, height), Image.ANTIALIAS)))\n label.append(i)\n i+=1\n os.chdir(root_dir)\n \n #remove weird image (with depth of 4)\n data = [d for d in data if d.shape[2]==3]\n label = [label[i] for i in range(len(data)) if data[i].shape[2]==3]\n \n #cast to array\n data = np.asarray(data)\n label = np.asarray(label)\n \n data = data/255\n label = to_categorical(label, n_classes)\n \n #shuffle dataset because images are grouped by similar class\n shuffle = np.random.choice(data.shape[0], data.shape[0], replace = False)\n\n data = data[shuffle,:,:,:]\n label = label[shuffle,:]\n \n print('Dataset, labels and dictionaries are loaded')\n return data, label, label_to_breed, breed_to_label", "def __init__(self, root_dir, redux, crop_size, resize_size, clean_targets=False,\n noise_dist=('gaussian', 50.), seed=None):\n super(NoisyDataset, self).__init__(root_dir, redux, crop_size, resize_size, clean_targets)\n self.imgs = os.listdir(root_dir)\n if redux:\n self.imgs = self.imgs[:redux]\n # Noise parameters (max std for Gaussian, lambda for Poisson, nb of artifacts for text)\n self.noise_type = noise_dist[0]\n self.noise_param = noise_dist[1]\n self.seed = seed\n if self.seed:\n np.random.seed(self.seed)", "def create_random_data(output_path: str, num_images: int = 5) -> None:\n train_path = os.path.join(output_path, \"train\")\n class1_train_path = os.path.join(train_path, \"class1\")\n class2_train_path = os.path.join(train_path, \"class2\")\n\n val_path = os.path.join(output_path, \"val\")\n class1_val_path = os.path.join(val_path, \"class1\")\n class2_val_path = os.path.join(val_path, \"class2\")\n\n test_path = os.path.join(output_path, \"test\")\n class1_test_path = os.path.join(test_path, \"class1\")\n class2_test_path = os.path.join(test_path, \"class2\")\n\n paths = [\n class1_train_path,\n class1_val_path,\n class1_test_path,\n class2_train_path,\n class2_val_path,\n class2_test_path,\n ]\n\n for path in paths:\n try:\n os.makedirs(path)\n except FileExistsError:\n pass\n\n for i in range(num_images):\n pixels = numpy.random.rand(64, 64, 3) * 255\n im = Image.fromarray(pixels.astype(\"uint8\")).convert(\"RGB\")\n im.save(os.path.join(path, f\"rand_image_{i}.jpeg\"))\n\n process_images(output_path)", "def explore_data():\n labels = [\"vehicles\", \"non-vehicles\"]\n labelmap = {0: \"vehicles\", 1: \"non-vehicles\"}\n vehicles_glob = os.path.join(data_dir, \"vehicles\", \"**\", \"*.png\")\n nonvehicles_glob = os.path.join(data_dir, \"non-vehicles\", \"**\", \"*.png\")\n class_fnames = [\n glob.glob(vehicles_glob, recursive = True),\n glob.glob(nonvehicles_glob, recursive = True)]\n n_samples = [len(fnames) for fnames in class_fnames]\n shapes = []\n samples = []\n print(table_format([\"label\", \"size\", \"shape\"], header = True))\n for label, fnames in enumerate(class_fnames):\n indices = np.random.choice(len(fnames), 4*10, replace = False)\n for i in indices:\n fname = fnames[i]\n img = cv2.imread(fname)\n samples.append(img)\n shape = img.shape\n shapes.append(shape)\n print(table_format([labels[label], n_samples[label], shapes[label]]))\n\n samples = np.stack(samples)\n samples = tile(samples, 2*4, 10)\n cv2.imwrite(os.path.join(out_dir, \"datasamples.png\"), samples)\n\n return class_fnames, labelmap", "def convert_crops_to_pytorch_imagefolder_structure(crops_dir):\n files = glob.glob(crops_dir + \"/*.png\")\n obj_id_to_crops = {}\n for f in files:\n obj_id = f.split(\".png\")[0].split(\"-\")[-1]\n if obj_id not in obj_id_to_crops:\n obj_id_to_crops[obj_id] = {f}\n else:\n obj_id_to_crops[obj_id].add(f)\n os.mkdir(crops_dir + \"/train\")\n os.mkdir(crops_dir + \"/test\")\n for obj_id in obj_id_to_crops:\n os.mkdir(crops_dir + \"/train/\" + obj_id)\n os.mkdir(crops_dir + \"/test/\" + obj_id)\n for f in obj_id_to_crops[obj_id]:\n img_name = f.split(\"/\")[-1]\n if np.random.randint(0, 10):\n os.rename(f, crops_dir + \"/train/\" + obj_id + \"/\" + img_name)\n else:\n os.rename(f, crops_dir + \"/test/\" + obj_id + \"/\" + img_name)", "def create_data_dict(data_dir, img_size=[25, 83]):\n print(\"Creating data dictionary\")\n print(\"- Using data at:\", data_dir)\n\n # Directories\n imgs_dir = os.path.join(data_dir, \"training/image_2\")\n labels_dir = os.path.join(data_dir, \"training/gt_image_2\")\n\n print(\"- Getting list of files\")\n # Only get the label files for road (not lane)\n label_files = glob.glob(os.path.join(labels_dir, \"*_road_*.png\"))\n\n # Create corresponding list of training image files\n img_files = list(map(lambda f: os.path.basename(f).replace(\"_road\", \"\"), label_files))\n img_files = list(map(lambda f: os.path.join(imgs_dir, f), img_files)) # absolute path\n\n n_samples = len(img_files)\n print(\"- Encountered {} samples\".format(n_samples))\n est_filesize = (n_samples*np.prod(img_size)*(3+1))/1e6\n print(\"- Estimated output filesize: {:0.3f} MB + overhead\".format(est_filesize))\n\n data = {}\n data[\"X_train\"] = np.empty([n_samples]+img_size+[3], dtype=np.uint8)\n data[\"Y_train\"] = np.empty([n_samples]+img_size, dtype=np.uint8)\n\n print(\"- Processing image files\")\n for i in range(n_samples):\n label_img = scipy.misc.imread(label_files[i])\n input_img = scipy.misc.imread(img_files[i])\n\n # PRERPOCESS THE IMAGES\n label_img = scipy.misc.imresize(label_img, img_size)\n input_img = scipy.misc.imresize(input_img, img_size)\n\n # PROCESSING LABEL IMAGE\n # Only one channel, (1=road, 0=not road)\n non_road_class = np.array([255,0,0])\n label_img = (1-np.all(label_img==non_road_class, axis=2, keepdims=False)).astype(np.uint8)\n\n # Place the images into the data arrays\n data[\"X_train\"][i] = input_img\n data[\"Y_train\"][i] = label_img\n\n print(\"- Shuffling the data\")\n np.random.seed(seed=128)\n ids = list(np.random.permutation(n_samples))\n data[\"X_train\"] = data[\"X_train\"][ids]\n data[\"Y_train\"] = data[\"Y_train\"][ids]\n\n print(\"- Done!\")\n return data" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Calculate by how many cells the moving window should be moved. If this is nonzero, shift the fields on the interpolation grid, and add new particles.
def move_grids(self, fld, comm, time): # To avoid discrepancies between processors, only the first proc # decides whether to send the data, and broadcasts the information. dz = comm.dz if comm.rank==0: # Move the continuous position of the moving window object self.zmin += self.v * (time - self.t_last_move) # Find the number of cells by which the window should move zmin_global_domain, zmax_global_domain = comm.get_zmin_zmax( local=False, with_damp=False, with_guard=False ) n_move = int( (self.zmin - zmin_global_domain)/dz ) else: n_move = None # Broadcast the information to all proc if comm.size > 1: n_move = comm.mpi_comm.bcast( n_move ) # Move the grids if n_move != 0: # Move the global domain comm.shift_global_domain_positions( n_move*dz ) # Shift the fields Nm = len(fld.interp) for m in range(Nm): # Modify the values of the corresponding z's fld.interp[m].zmin += n_move*fld.interp[m].dz fld.interp[m].zmax += n_move*fld.interp[m].dz # Shift/move fields by n_move cells in spectral space self.shift_spect_grid( fld.spect[m], n_move ) # Because the grids have just been shifted, there is a shift # in the cell indices that are used for the prefix sum. if fld.use_cuda: fld.prefix_sum_shift += n_move # This quantity is reset to 0 whenever prefix_sum is recalculated # Prepare the positions of injection for the particles # (The actual creation of particles is done when the routine # exchange_particles of boundary_communicator.py is called) if comm.rank == comm.size-1: # Move the injection position self.z_inject += self.v * (time - self.t_last_move) # Take into account the motion of the end of the plasma self.z_end_plasma += self.v_end_plasma * (time - self.t_last_move) # Increment the number of particle cells to add nz_new = int( (self.z_inject - self.z_end_plasma)/dz ) self.nz_inject += nz_new # Increment the virtual position of the end of the plasma # (When `generate_particles` is called, then the plasma # is injected between z_end_plasma - nz_inject*dz and z_end_plasma, # and afterwards nz_inject is set to 0.) self.z_end_plasma += nz_new*dz # Change the time of the last move self.t_last_move = time
[ "def move_grids(self, fld, ptcl, comm, time):\n # To avoid discrepancies between processors, only the first proc\n # decides whether to send the data, and broadcasts the information.\n dz = comm.dz\n if comm.rank==0:\n # Move the continuous position of the moving window object\n self.zmin += self.v * (time - self.t_last_move)\n # Find the number of cells by which the window should move\n zmin_global_domain, zmax_global_domain = comm.get_zmin_zmax(\n local=False, with_damp=False, with_guard=False )\n n_move = int( (self.zmin - zmin_global_domain)/dz )\n else:\n n_move = None\n # Broadcast the information to all proc\n if comm.size > 1:\n n_move = comm.mpi_comm.bcast( n_move )\n\n # Move the grids\n if n_move != 0:\n # Move the global domain\n comm.shift_global_domain_positions( n_move*dz )\n # Shift the fields\n Nm = len(fld.interp)\n for m in range(Nm):\n # Modify the values of the corresponding z's\n fld.interp[m].zmin += n_move*fld.interp[m].dz\n fld.interp[m].zmax += n_move*fld.interp[m].dz\n # Shift/move fields by n_move cells in spectral space\n self.shift_spect_grid( fld.spect[m], n_move )\n\n # Because the grids have just been shifted, there is a shift\n # in the cell indices that are used for the prefix sum.\n for species in ptcl:\n if species.use_cuda:\n species.prefix_sum_shift += n_move\n # This quantity is reset to 0 whenever prefix_sum is recalculated\n\n # Prepare the positions of injection for the particles\n # (The actual creation of particles is done when the routine\n # exchange_particles of boundary_communicator.py is called)\n if comm.rank == comm.size-1:\n for species in ptcl:\n if species.continuous_injection:\n # Increment the positions for the generation of particles\n # (Particles are generated when `generate_particles` is called)\n species.injector.increment_injection_positions(\n self.v, time-self.t_last_move )\n\n # Change the time of the last move\n self.t_last_move = time", "def misplaced_tiles():\n tiles = 0\n for c, f in zip(self.grid, Node.final_grid):\n if c:\n if c - f:\n tiles += 1\n return tiles", "def move(self):\n x = y = z = 0.0\n for cell in self.cells:\n x += (cell.x)#*n\n y += (cell.y)#*n\n z += (cell.z)#*n\n np = float(len(self.cells))\n med = numpy.array([x/np,y/np,z/np])\n \n dists = []\n for cell in self.cells:\n d = (cell.x-self.x)**2+(cell.y-self.y)**2+(cell.z-self.z)**2\n d = numpy.sqrt(d)\n dists.append(d)\n #md = (cell.x-med[0])**2+(cell.y-med[1])**2+(cell.z-med[2])**2\n #dists[-1] = (dists[-1]+md)/2\n cell = self.cells[numpy.argmin(dists)]\n cc = numpy.array([cell.x, cell.y, cell.z])\n \n t = self.t\n if abs(self.dnp) * ( self.np-self.np_req) > 0:\n t = self.tr\n self.dcenter = (1-t)*(med-self.center + self.u*(cc-med))\n self.x,self.y,self.z = self.center = self.center + self.dcenter", "def update_grid(self):\n dx = [-1, -1, -1, 0, 0, 1, 1, 1]\n dy = [-1, 0, 1, -1, 1, -1, 0, 1]\n\n for r in range(self.rows):\n for c in range(self.columns):\n life = dead = 0\n for i in range(8):\n # this was we consider the end of each edge is the start of the opposite edge\n nc = (c + dy[i]) % self.columns\n nr = (r + dx[i]) % self.rows\n\n if self.active_grid[nr][nc]:\n life += 1\n else:\n dead += 1\n if self.active_grid[r][c]:\n # Apply life cell rules\n if life == 2 or life == 3:\n self.inactive_grid[r][c] = 1\n else:\n self.inactive_grid[r][c] = 0\n\n else:\n # Apply dead Cell rules\n if life == 3:\n self.inactive_grid[r][c] = 1\n else:\n self.inactive_grid[r][c] = 0\n\n # deep copy == copy by value not refrence\n self.active_grid = copy.deepcopy(self.inactive_grid)", "def count_mines(self):\n for i in range(0, self.field_height):\n for j in range(0, self.field_width):\n if self.field[i][j] != FIELD_MINE:\n for delta_i in range(-1, 2):\n for delta_j in range(-1, 2):\n if (not (delta_i == 0 and delta_j == 0) and\n 0 <= i + delta_i < self.field_height and\n 0 <= j + delta_j < self.field_width and\n self.field[i + delta_i][j + delta_j]\n == -1):\n self.field[i][j] = self.field[i][j] + 1", "def update_shift_count(self, move):\n if len(move) == 2:\n self.shift_count += 1\n else:\n self.shift_count = 0", "def _add_mines(self):\n num = 0\n while num < self._n:\n x = random.randint(0, self._dim - 1)\n y = random.randint(0, self._dim - 1)\n if self._board[x][y] != -1:\n self._board[x][y] = -1\n neighbors = self._get_neighbors((x, y))\n for neighbor in neighbors:\n if self._board[neighbor[0]][neighbor[1]] != -1:\n self._board[neighbor[0]][neighbor[1]] += 1\n num += 1", "def update_move_index(self):\n\t\tself.move_index = (self.move_index + 1) % self.move_max", "def update_positions(self, grid):\r\n self.grid = grid", "def shift_spect_grid( self, grid, n_move,\n shift_rho=True, shift_currents=True ):\n if grid.use_cuda:\n shift = grid.d_field_shift\n # Get a 2D CUDA grid of the size of the grid\n tpb, bpg = cuda_tpb_bpg_2d( grid.Ep.shape[0], grid.Ep.shape[1] )\n # Shift all the fields on the GPU\n shift_spect_array_gpu[tpb, bpg]( grid.Ep, shift, n_move )\n shift_spect_array_gpu[tpb, bpg]( grid.Em, shift, n_move )\n shift_spect_array_gpu[tpb, bpg]( grid.Ez, shift, n_move )\n shift_spect_array_gpu[tpb, bpg]( grid.Bp, shift, n_move )\n shift_spect_array_gpu[tpb, bpg]( grid.Bm, shift, n_move )\n shift_spect_array_gpu[tpb, bpg]( grid.Bz, shift, n_move )\n if grid.use_pml:\n shift_spect_array_gpu[tpb, bpg]( grid.Ep_pml, shift, n_move )\n shift_spect_array_gpu[tpb, bpg]( grid.Em_pml, shift, n_move )\n shift_spect_array_gpu[tpb, bpg]( grid.Bp_pml, shift, n_move )\n shift_spect_array_gpu[tpb, bpg]( grid.Bm_pml, shift, n_move )\n if shift_rho:\n shift_spect_array_gpu[tpb, bpg]( grid.rho_prev, shift, n_move )\n if shift_currents:\n shift_spect_array_gpu[tpb, bpg]( grid.Jp, shift, n_move )\n shift_spect_array_gpu[tpb, bpg]( grid.Jm, shift, n_move )\n shift_spect_array_gpu[tpb, bpg]( grid.Jz, shift, n_move )\n else:\n shift = grid.field_shift\n # Shift all the fields on the CPU\n shift_spect_array_cpu( grid.Ep, shift, n_move )\n shift_spect_array_cpu( grid.Em, shift, n_move )\n shift_spect_array_cpu( grid.Ez, shift, n_move )\n shift_spect_array_cpu( grid.Bp, shift, n_move )\n shift_spect_array_cpu( grid.Bm, shift, n_move )\n shift_spect_array_cpu( grid.Bz, shift, n_move )\n if grid.use_pml:\n shift_spect_array_cpu( grid.Ep_pml, shift, n_move )\n shift_spect_array_cpu( grid.Em_pml, shift, n_move )\n shift_spect_array_cpu( grid.Bp_pml, shift, n_move )\n shift_spect_array_cpu( grid.Bm_pml, shift, n_move )\n if shift_rho:\n shift_spect_array_cpu( grid.rho_prev, shift, n_move )\n if shift_currents:\n shift_spect_array_cpu( grid.Jp, shift, n_move )\n shift_spect_array_cpu( grid.Jm, shift, n_move )\n shift_spect_array_cpu( grid.Jz, shift, n_move )", "def set_adjacent_mine_count(self):\n for position in self.grid_coords:\n x, y = position\n if self.grid[y][x] >= 0:\n grid_value = sum(map(self.is_mine, get_adjacent.get_adjacent(position)))\n self.grid[y][x] = grid_value", "def make_move(grid, n_columns, n_rows):\r\n # Generate the game grid to be manipulated\r\n new_grid = [[0] * (n_columns + 1) for i in range(n_rows + 1)]\r\n\r\n\r\n for i in range(n_rows):\r\n for j in range(n_columns):\r\n upper_left = grid[i-1][j-1] # neighbor to upper left of cell of interest\r\n upper = grid[i-1][j] # neighbor above cell of interest\r\n upper_right = grid[i-1][j+1] # neighbor to upper right of cell of interest\r\n left = grid[i][j-1] # neighbor to left of cell of interest\r\n right = grid[i][j+1] # neighbor to right of cell of interest\r\n bot_left = grid[i+1][j-1] # neighbor to bottom left cell of interest\r\n bot = grid[i+1][j] # neighbor below cell of interest\r\n bot_right = grid[i+1][j+1] # neighbor to bottom right of cell of interest\r\n\r\n # sum of the state of all neighbors\r\n on_neighbors = upper_left + upper + upper_right + left + right + bot_left + bot + bot_right\r\n\r\n # Any ON cell with fewer than two ON neighbors turns OFF\r\n if grid[i][j] == 1 and on_neighbors < 2:\r\n new_grid[i][j] = 0\r\n\r\n # Any ON cell with two or three ON neighbours stays ON\r\n elif grid[i][j] == 1 and (on_neighbors == 2 or on_neighbors == 3):\r\n new_grid[i][j] = 1\r\n\r\n # Any ON cell with more than three ON neighbors turns OFF\r\n elif grid[i][j] == 1 and on_neighbors > 3:\r\n new_grid[i][j] = 0\r\n\r\n # Any OFF cell with three ON neighbors turns ON\r\n elif grid[i][j] == 0 and on_neighbors == 3:\r\n new_grid[i][j] = 1\r\n\r\n return new_grid #manipulated game grid\r", "def add_num_to_space(self):\n\n\t\tnb_to_add = random.choice([2,4])\n\n\t\t# Add where\n\t\trows_indices, cols_indices = self.find_empty_spaces()\n\t\tindex = random.randint(len(rows_indices))\n\t\trow_chosen,col_chosen = rows_indices[index],cols_indices[index]\n\t\t# add value to self.observation_spacerix\n\t\tself.observation_space[row_chosen, col_chosen] = nb_to_add", "def add_to_dist_moved(self):\n change = ((self.previous_center_x - self.center_x)**2\n + (self.previous_center_y - self.center_y)**2)**(1./2)\n \n self.dist_moved += change", "def update_pop_matrix(self):\n for row in self.unique_rows[1:-1]: # First and last cell is water\n for col in self.unique_cols[1:-1]: # First and last cell is water\n cell = self.landscape[(row, col)]\n if cell.is_mainland:\n # print(cell)\n self.herb_pop_matrix[row - 1][col - 1] = cell.herb_count\n self.carn_pop_matrix[row - 1][col - 1] = cell.carn_count", "def update(self):\n if self.able_to_move:\n self.pix_position += self.direction * self.speed\n if self.move_next_position():\n if self.stored_direction is not None:\n self.direction = self.stored_direction\n self.able_to_move = self.can_move()\n\n # following the player circle on grid - tracking the movement\n self.grid_position[0] = (self.pix_position[0] - TOP_BOTTOM_BUFFER + self.app.cell_width // 2) // self.app.cell_width + 1\n self.grid_position[1] = (self.pix_position[1] - TOP_BOTTOM_BUFFER + self.app.cell_height // 2) // self.app.cell_height + 1\n\n # check the actual grid position, and if there it has a coin, the player \"eat\" that\n if self.on_coin():\n self.eat_coin()", "def move(self):\r\n for index in range(self.size):\r\n self.values[index] = self.values[index] + self.velocities[index]\r\n \r\n # Adjust values to keep particle inside boundaries.\r\n if self.values[index] < Particle.MIN_VALUE:\r\n self.values[index] = (-self.values[index] % Particle.MAX_VALUE)\r\n elif self.values[index] > Particle.MAX_VALUE:\r\n self.values[index] = (self.values[index] % Particle.MAX_VALUE)", "def topple_loc(grid, x, y, grid_x_size, grid_y_size):\n grid[x, y] -= 4\n\n if x - 1 >= 0:\n grid[x - 1, y] += 1\n if x + 1 < grid_x_size:\n grid[x + 1, y] += 1\n if y - 1 >= 0:\n grid[x, y - 1] += 1\n if y + 1 < grid_y_size:\n grid[x, y + 1] += 1", "def main_pre_displacement(self):\n # Step 1: initialisation of parameters v and D\n \n # advective velocity in each grid element of the soil matrix\n self.v = 1 * self.k\n # diffusivity in each grid element of the soil matrix\n self.D = 1 * self.k / self.c\n \n # finds all grid elements with a soil moisture near to thr (almost dry soil)\n # sets the velocity and diffusivity in this grid elements to 0-> no flux!\n ipres = self.theta < 1.1 * self.thr\n if ipres.any() == True:\n self.v[ipres==True] = 0\n self.D[ipres==True] = 0" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Generate new particles at the right end of the plasma (i.e. between z_end_plasma nz_injectdz and z_end_plasma) Return them in the form of a particle buffer of shape (8, Nptcl)
def generate_particles( self, species, dz, time ) : # Shortcut for the number of integer quantities n_int = species.n_integer_quantities n_float = species.n_float_quantities # Create new particle cells if (self.nz_inject > 0) and (species.continuous_injection == True): # Create a temporary density function that takes into # account the fact that the plasma has moved if species.dens_func is not None: def dens_func( z, r ): return( species.dens_func( z-self.v_end_plasma*time, r ) ) else: dens_func = None # Create the particles that will be added zmax = self.z_end_plasma zmin = self.z_end_plasma - self.nz_inject*dz Npz = self.nz_inject * self.p_nz new_ptcl = Particles( species.q, species.m, species.n, Npz, zmin, zmax, species.Npr, species.rmin, species.rmax, species.Nptheta, species.dt, dens_func=dens_func, ux_m=self.ux_m, uy_m=self.uy_m, uz_m=self.uz_m, ux_th=self.ux_th, uy_th=self.uy_th, uz_th=self.uz_th) # Initialize ionization-relevant arrays if species is ionizable if species.ionizer is not None: new_ptcl.make_ionizable( element=species.ionizer.element, target_species=species.ionizer.target_species, level_start=species.ionizer.level_start, full_initialization=False ) # Convert them to a particle buffer # - Float buffer float_buffer = np.empty( (n_float, new_ptcl.Ntot), dtype=np.float64 ) float_buffer[0,:] = new_ptcl.x float_buffer[1,:] = new_ptcl.y float_buffer[2,:] = new_ptcl.z float_buffer[3,:] = new_ptcl.ux float_buffer[4,:] = new_ptcl.uy float_buffer[5,:] = new_ptcl.uz float_buffer[6,:] = new_ptcl.inv_gamma float_buffer[7,:] = new_ptcl.w if species.ionizer is not None: float_buffer[8,:] = new_ptcl.ionizer.w_times_level # - Integer buffer uint_buffer = np.empty( (n_int, new_ptcl.Ntot), dtype=np.uint64 ) i_int = 0 if species.tracker is not None: uint_buffer[i_int,:] = \ species.tracker.generate_new_ids(new_ptcl.Ntot) i_int += 1 if species.ionizer is not None: uint_buffer[i_int,:] = new_ptcl.ionizer.ionization_level else: # No new particles: initialize empty arrays float_buffer = np.empty( (n_float, 0), dtype=np.float64 ) uint_buffer = np.empty( (n_int, 0), dtype=np.uint64 ) return( float_buffer, uint_buffer )
[ "def createparticles():\n if topography == \"Digital elevation model\": #Selected in GUI\n z = 276 # The elevation in the DEM at the bomb site is 200m\n else:\n z = 76 \n \n for i in range(num_of_particles):\n x = xb\n y = yb\n ws = wind_speed\n particles.append(particlemove.Particle (x, y, z, ws, environment))\n #print(particles[i]) # Used for testing\n #print(particles[0].x)", "def generate_particles(self):\n\n \t#This is called after the particles are trimmed, so we must re-generate the rest of the particles from the number of kept particles to the number of total particles\n keptParticleNumber = len(self.particles)\n for i in range(keptParticleNumber, self.totalParticles):\n\n \t#Every new particle we generate will be a copy of the previously kept particle with some added noise\n \t#This means that each particle that was kept will get 10 'copies' of itself in the next round\n copyParticle = self.particles[i % keptParticleNumber]\n\n #Generate the deviation/noise in each axis\n xdev = random.randrange(0, self.deviationXY*200)/100 - self.deviationXY\n ydev = random.randrange(0, self.deviationXY*200)/100 - self.deviationXY\n tdev = random.randrange(0, self.deviationTheta*200)/100 - self.deviationTheta\n\n #Put the new 'clone' into the master particle list\n self.particles.append(Particle(copyParticle.x + xdev, copyParticle.y + ydev, copyParticle.theta + tdev))", "def gen_neighbor_particles(self):\n #TODO:\n pass", "def generate_continuously_injected_particles( self, time ):\n # This function should only be called if continuous injection is activated\n assert self.continuous_injection == True\n\n # Have the continuous injector generate the new particles\n Ntot, x, y, z, ux, uy, uz, inv_gamma, w = \\\n self.injector.generate_particles( time )\n\n # Convert them to a particle buffer\n # - Float buffer\n float_buffer = np.empty((self.n_float_quantities,Ntot),dtype=np.float64)\n float_buffer[0,:] = x\n float_buffer[1,:] = y\n float_buffer[2,:] = z\n float_buffer[3,:] = ux\n float_buffer[4,:] = uy\n float_buffer[5,:] = uz\n float_buffer[6,:] = inv_gamma\n float_buffer[7,:] = w\n if self.ionizer is not None:\n # All new particles start at the default ionization level\n float_buffer[8,:] = w * self.ionizer.level_start\n # - Integer buffer\n uint_buffer = np.empty((self.n_integer_quantities,Ntot),dtype=np.uint64)\n i_int = 0\n if self.tracker is not None:\n uint_buffer[i_int,:] = self.tracker.generate_new_ids( Ntot )\n i_int += 1\n if self.ionizer is not None:\n # All new particles start at the default ionization level\n uint_buffer[i_int,:] = self.ionizer.level_start\n\n return( float_buffer, uint_buffer )", "def genParticle(self):\n self.vertices = [round(random.uniform(.03, -.03),7), round(random.uniform(.03, -.03),7)\\\n ,0] #Leaving z at 0, only considering 2d space for now\n self.mangle = [round(random.uniform(15000, 4000),2), round(random.uniform(4, 0),5),\\\n round(random.uniform(4, -4),5)]\n self.charge = random.randint(-1, 1)\n\n #particle trajectory estimations\n magfield = 1 # we will assume the magnetic field is uniform\n if self.charge != 0: \n self.p_radius = self.mangle[0] / (self.charge * magfield)\n self.centpt = [self.vertices[0] + (self.p_radius*math.sin(self.mangle[2])),\n self.vertices[1] + (self.p_radius*math.cos(self.mangle[2]))]\n self.p_circ = Circle(Point(self.centpt[0], self.centpt[1]), abs(self.p_radius))\n self.m_ray = Ray(Point(self.vertices[0], self.vertices[1]), #ray headed in direction of momentum (angle phi)\n Point(100, 100*math.tan(self.mangle[2])))", "def generate_particle_distribution(self, max_loop = np.inf, outfile=None):\n \n self.pos = np.zeros((self.N_part, 3))\n self.vel = np.zeros((self.N_part, 3))\n \n \n F_max = np.max(self.DF.f) ; F_min = np.min(self.DF.f)\n\n n_particles = 0\n loop_counter = 0\n \n if self.optimize:\n relative_potential = self._interpolate_relative_potential\n else:\n relative_potential = self.DF.relative_potential\n \n \n \n # Continue until max number of particles chosen, or until max loop counter\n while ((n_particles < self.N_part) and (loop_counter < max_loop)):\n \n # choose random position, eval potential, choose velocity\n r = self._choose_position()\n \n Psi = relative_potential(r) \n v = self._choose_velocity(r, Psi)\n \n E = Psi - 0.5 * v * v\n\n # interpolate along DF to find f(E) of chosen particle\n f_E = self.DF.interpolate_f(E)\n\n # random number from 0 to F_max for accept reject\n #F = np.random.rand() * F_max\n \n # HOLY CRAP....Fmax - Fmin ... not Fmin - Fmax\n F = 10.0**( np.random.rand()*(np.log10(F_max) - np.log10(F_min)) + np.log10(F_min) )\n \n \n if F <= f_E: # accept particle\n\n \n # convert position to cartesian using random theta and phi\n theta = np.random.rand() * np.pi\n phi = np.random.rand() * 2.0 * np.pi\n \n x = np.sin(theta) * np.cos(phi)\n y = np.sin(theta) * np.sin(phi)\n z = np.cos(theta)\n \n # save particle position\n self.pos[n_particles] = r * np.array([x,y,z])\n \n # repeat for velocity using new random numbers\n theta = np.random.rand() * np.pi\n phi = np.random.rand() * 2.0 * np.pi\n \n vx = np.sin(theta) * np.cos(phi)\n vy = np.sin(theta) * np.sin(phi)\n vz = np.cos(theta)\n \n # save particle velocity\n self.vel[n_particles] = v * np.array([vx,vy,vz])\n \n \n n_particles = n_particles + 1\n \n \n if (loop_counter % 5000) == 0:\n _my_print(\"Have %4i particles. On loop %6i\"%(n_particles, loop_counter))\n loop_counter = loop_counter + 1\n \n \n if (not outfile == None):\n self.write_pd(outfile)\n \n return self.pos, self.vel", "def generate_fake_particles():\n\n result = list()\n\n # Door wall particles\n y = 0\n while y <= HEIGHT:\n if not HEIGHT / 2 - DIAMETER / 2 <= y <= HEIGHT / 2 + DIAMETER / 2:\n result.append(Particle(DOOR_POSITION, y, radius=0, mass=math.inf, v=0, o=0, is_fake=True))\n y += MIN_PARTICLE_RADIUS\n\n # Corner particles\n result.append(Particle(0, 0, radius=0, mass=math.inf, v=0, o=0, is_fake=True))\n result.append(Particle(WIDTH, 0, radius=0, mass=math.inf, v=0, o=0, is_fake=True))\n result.append(Particle(0, HEIGHT, radius=0, mass=math.inf, v=0, o=0, is_fake=True))\n result.append(Particle(WIDTH, HEIGHT, radius=0, mass=math.inf, v=0, o=0, is_fake=True))\n\n return result", "def generate_particles_6d(self, n_particles):\n me = 9.11e-31\n qe = 1.602e-19\n c = 299792458.0\n\n n_rem = n_particles\n particles = np.zeros((6, 1))\n start = 0\n if self.gen == 'hammersley':\n logging.info('Using Hammersley quasi random distribution')\n random_vec = self._generate_hammersley(n_rem, start, 6, step=7).transpose()\n elif self.gen == 'halton':\n logging.info('Using Halton quasi random distribution')\n random_vec = self._generate_halton(n_rem, 6)\n elif self.gen == 'qr':\n random_vec = self._generate_quasi_random(n_rem, 6)\n else:\n logging.info('Using pseudo random distribution')\n random_vec = np.random.random((6, n_rem))\n logging.info('Random vec generated, shape {0}'.format(random_vec.shape))\n # Generate transverse profile from image\n x = self.sample_x(random_vec[:, 0:2])\n logging.info('Transverse points generated')\n\n t = self.sample_t(random_vec[:, 2]).reshape(-1, 1)\n logging.info('Time points generated')\n\n if self.momentum_source == \"fd\":\n p = self.sample_fermi_dirac(random_vec[:, 3:6])\n logging.info('Fermi-Dirac points generated')\n else:\n pt = self.sample_p(random_vec[:, 3:5])\n E = self.sample_energy(random_vec[:, 5])\n pz = np.sqrt(2 * me * E * qe) * c / qe\n p = np.hstack((pt, pz.reshape((-1, 1))))\n logging.info('Momentum points generated')\n\n # particles = np.hstack((x, t, px, energy))\n particles = np.hstack((x, t, p))\n particles = particles[~np.isnan(particles[:, 1])]\n return particles", "def add_particle_bunch(sim, q, m, gamma0, n, p_zmin, p_zmax, p_rmin, p_rmax,\n p_nr=2, p_nz=2, p_nt=4, dens_func=None, boost=None,\n direction='forward', z_injection_plane=None,\n initialize_self_field=True,\n boost_positions_in_dens_func=False ):\n # Calculate the electron momentum\n uz_m = ( gamma0**2 - 1. )**0.5\n if direction == 'backward':\n uz_m *= -1.\n # Create the electron species\n ptcl_bunch = sim.add_new_species( q=q, m=m, n=n,\n p_nz=p_nz, p_nr=p_nr, p_nt=p_nt,\n p_zmin=p_zmin, p_zmax=p_zmax,\n p_rmin=p_rmin, p_rmax=p_rmax,\n continuous_injection=False,\n dens_func=dens_func, uz_m=uz_m,\n boost_positions_in_dens_func=boost_positions_in_dens_func )\n\n # Initialize the injection plane for the particles\n if z_injection_plane is not None:\n assert ptcl_bunch.injector is None #Don't overwrite a previous injector\n ptcl_bunch.injector = BallisticBeforePlane( z_injection_plane, boost )\n\n # Get the corresponding space-charge fields\n if initialize_self_field:\n get_space_charge_fields( sim, ptcl_bunch, direction=direction )\n return ptcl_bunch", "def distribute_waterbag(self):\n # Generate particles by creating trials and finding particles with potential less than emittance, then assign the rest to momentum\n ptclsMade = 0\n phaseSpaceList = []\n while ptclsMade < self.npart:\n ranU = 0.0\n while ranU <= 0:\n ranU = random.random()\n\n # Generate some bounds on the transverse size to reduce waste in generating the bunch\n # Use the lemming method to find the maximum y\n trialH = np.sqrt(ranU)\n newH = self.emit*trialH\n y0 = np.sqrt(newH)\n #self.emittance = newH\n yMax = newton(self.whatsleft, y0)\n\n #bounding the horizontal coordinate is difficult, but it should not exceed the pole\n xMax = self.c\n #xMax = yMax\n\n trialValue = 1e10\n while trialValue >= newH:\n xTrial = 2.*(0.5 - random.random())*xMax\n yTrial = 2.*(0.5 - random.random())*yMax\n trialValue = self.compute_potential(xTrial, yTrial)\n\n initialValue = trialValue\n if initialValue < newH:\n pMag = np.sqrt(2*(newH - initialValue))\n pDir = 2*np.pi* random.random()\n pxHat = pMag * np.cos(pDir)\n pyHat = pMag * np.sin(pDir)\n xReal = xTrial * np.sqrt(self.betax)\n yReal = yTrial * np.sqrt(self.betay)\n pxReal = (pxHat - self.alphax*xTrial)/np.sqrt(self.betax)\n pyReal = (pyHat - self.alphay*yTrial)/np.sqrt(self.betay)\n ptclCoords = np.array([xReal, pxReal, yReal, pyReal])\n phaseSpaceList.append(ptclCoords)\n ptclsMade += 1\n\n #Add 3 more particles if creating a quiet start\n if self.quiet:\n self.exact_centroids(ptclCoords, phaseSpaceList)\n ptclsMade += 3\n else:\n print(\"Initial value generated exceeds limiting H. Sampling new value.\")\n\n self.particles[:,:4] = np.asarray(phaseSpaceList)", "def generate(n):\n #Initiate the Matrix position - velocity - acceleration\n particles = np.zeros(shape = (n,10)) \n side = ((n*4.0/3.0*math.pi*(sigma/2.0)**3/volume_fraction))**(1.0/3.0)\n\n #\"\"\"\n #Filling in positions for the matrix\n particles[:,0:3] = np.random.uniform(-.5*side, .5*side,size = (n,3))\n return particles\n #\"\"\"\n \"\"\"\n #Generate Determined Lattice\n read = lattice()\n particles[:,0:3] = read[0:n,0:3]\n print \"Done importing lattice\"\n return particles\n #\"\"\"", "def add_buffers_to_particles( species, float_recv_left, float_recv_right,\n uint_recv_left, uint_recv_right):\n # Copy the buffers to an enlarged array\n if species.use_cuda:\n add_buffers_gpu( species, float_recv_left, float_recv_right,\n uint_recv_left, uint_recv_right )\n else:\n add_buffers_cpu( species, float_recv_left, float_recv_right,\n uint_recv_left, uint_recv_right )\n\n # Reallocate the particles auxiliary arrays. This needs to be done,\n # as the total number of particles in this domain has changed.\n if species.use_cuda:\n shape = (species.Ntot,)\n # Reallocate empty field-on-particle arrays on the GPU\n species.Ex = cupy.empty( shape, dtype=np.float64 )\n species.Ex = cupy.empty( shape, dtype=np.float64 )\n species.Ey = cupy.empty( shape, dtype=np.float64 )\n species.Ez = cupy.empty( shape, dtype=np.float64 )\n species.Bx = cupy.empty( shape, dtype=np.float64 )\n species.By = cupy.empty( shape, dtype=np.float64 )\n species.Bz = cupy.empty( shape, dtype=np.float64 )\n # Reallocate empty auxiliary sorting arrays on the GPU\n species.cell_idx = cupy.empty( shape, dtype=np.int32 )\n species.sorted_idx = cupy.empty( shape, dtype=np.intp )\n species.sorting_buffer = cupy.empty( shape, dtype=np.float64 )\n if species.n_integer_quantities > 0:\n species.int_sorting_buffer = \\\n cupy.empty( shape, dtype=np.uint64 )\n else:\n # Reallocate empty field-on-particle arrays on the CPU\n species.Ex = np.empty(species.Ntot, dtype=np.float64)\n species.Ey = np.empty(species.Ntot, dtype=np.float64)\n species.Ez = np.empty(species.Ntot, dtype=np.float64)\n species.Bx = np.empty(species.Ntot, dtype=np.float64)\n species.By = np.empty(species.Ntot, dtype=np.float64)\n species.Bz = np.empty(species.Ntot, dtype=np.float64)\n\n # The particles are unsorted after adding new particles.\n species.sorted = False", "def generate_regular_pyramid_grid(self):\n z = np.arange(0, self.sensor_range+self.resolution, self.resolution)\n points_in_pyramid = np.zeros((0,3))\n\n for zz in z: \n xmax = zz*np.tan(self.cone_angle_x/2); ymax = zz*np.tan(self.cone_angle_y/2)\n NumberOfPointsX = int(2*xmax/self.resolution)+3\n NumberOfPointsY = int(2*ymax/self.resolution)+3\n \n x = np.linspace(-xmax, xmax, NumberOfPointsX)\n y = np.linspace(-ymax, ymax, NumberOfPointsY)\n xx, yy = np.meshgrid(x, y)\n xface = xx.ravel(); yface = yy.ravel()\n zface = np.ones(len(xface))* zz\n \n Pgrid = np.zeros((len(xface),3))\n Pgrid[:,0] = xface\n Pgrid[:,1] = yface\n Pgrid[:,2] = zface\n points_in_pyramid = np.concatenate((points_in_pyramid, Pgrid), 0)\n #for j in range(len(points_in_pyramid)): \n # f7 = open('points_in_cone.txt', 'a')\n # f7.write('%s, %s, %s, %s\\n' %(self.RHP_time, points_in_pyramid[j][0], points_in_pyramid[j][1], points_in_pyramid[j][2]))\n return points_in_pyramid", "def distribute_KV(self):\n\n assert (self.emitx == self.emity), \"For a KV distribution, the planar emittances must be equal\"\n\n #total emittance of the K-V distribution is 4 times the planar emittance\n emit = 4.*self.emitx\n self.emit = emit\n\n # Generate some bounds on the transverse size to reduce waste in generating the bunch\n # Use the lemming method to find the maximum y\n y0 = np.sqrt(self.emit)\n\n yMax = newton(self.whatsleft, y0)\n xMax = yMax\n\n # Generate particles by creating trials and finding particles with potential less than emittance,\n # then assign the rest to momentum\n ptclsMade = 0\n phaseSpaceList = []\n\n while ptclsMade < self.npart:\n #Note that the particle coordinates here are distributed in normal coordinates\n xTrial = 2.*(0.5 - random.random())*xMax\n yTrial = 2.*(0.5 - random.random())*yMax\n trialValue = self.compute_potential(xTrial, yTrial)\n if trialValue < self.emit:\n\n pMag = np.sqrt(2.*(self.emit - trialValue))\n pDir = 2.*np.pi * random.random()\n pxHat = pMag * np.cos(pDir)\n pyHat = pMag * np.sin(pDir)\n\n xReal = xTrial * np.sqrt(self.betax)\n yReal = yTrial * np.sqrt(self.betay)\n\n #We want to provide the user with standard (non-normal) coordinates\n pxReal = (pxHat - self.alphax*xTrial)/np.sqrt(self.betax)\n pyReal = (pyHat - self.alphay*yTrial)/np.sqrt(self.betay)\n\n ptclCoords = np.array([xReal, pxReal, yReal, pyReal])\n phaseSpaceList.append(ptclCoords)\n ptclsMade += 1\n\n #Add 3 more particles if creating a quiet start\n if self.quiet:\n self.exact_centroids(ptclCoords, phaseSpaceList)\n ptclsMade += 3\n\n self.particles[:,:4] = np.asarray(phaseSpaceList)", "def distribute_particles(self, n_left: int = 500, n_right: int = 500,\n v_init: float = 0.0):\n particles = []\n particles_right = []\n # Use local variables instead of class properties to speed things up\n box_width = self.box_width\n box_height = self.box_height\n particle_r = self.particle_r\n barrier_x = self.barrier_x\n barrier_width = self.barrier_width\n half_particle_r = self.particle_r / 2\n padding_top = box_height - half_particle_r\n\n # Generate particles @ the left\n padding_barrier_left = barrier_x - barrier_width / 2 - half_particle_r\n padding_barrier_right = barrier_x + barrier_width / 2 + half_particle_r\n padding_right_wall = box_width - half_particle_r\n\n for i in range(n_left):\n touching = True\n particle_id = (i << 1)\n while touching:\n touching = False\n pos_x = random.uniform(half_particle_r, padding_barrier_left)\n pos_y = random.uniform(half_particle_r, padding_top)\n angle = random.vonmisesvariate(0.0, 0.0)\n particle = Particle(particle_id, pos_x, pos_y,\n v_init * cos(angle), v_init * sin(angle))\n for par in particles:\n if particle.overlaps(par, particle_r):\n touching = True\n break\n particles.append(particle)\n\n # Generate particles @ the right\n for i in range(n_left, n_left + n_right):\n touching = True\n particle_id = 1 | (i << 1)\n while touching:\n touching = False\n pos_x = random.uniform(padding_barrier_right,\n padding_right_wall)\n pos_y = random.uniform(half_particle_r, padding_top)\n angle = random.vonmisesvariate(0.0, 0.0)\n particle = Particle(particle_id, pos_x, pos_y,\n v_init * cos(angle), v_init * sin(angle))\n for par in particles_right:\n if particle.overlaps(par, particle_r):\n touching = True\n break\n particles_right.append(particle)\n particles.extend(particles_right)\n return particles", "def pop_box():\n global L, dimensions, N\n\n # initial positions of created particles\n init_positions = []\n # initial velocities of created particles\n init_velocities = []\n\n count = 0\n # loop N times\n while count < N:\n # this will contain the positions and velocities of single particle (x, y, z)\n pos = []\n vel = []\n\n # get a random angle in radians to use as the velocity of the particle\n angle = random.uniform(-math.pi, math.pi)\n\n for i in range(dimensions):\n # create a random position for each dimension\n pos.append(random.uniform(0, L))\n vel.append(angle_to_xy(angle)[i])\n\n # don't put it in the positions if a particle already exists there\n if pos in init_positions:\n continue\n\n # put the particle in init_positions with its corresponding velocity\n init_positions.append(pos)\n init_velocities.append(vel)\n count += 1\n\n #returns the initial positions and velocities of all particles\n return init_positions, init_velocities", "def particleset_potential(particles, smoothing_length_squared = zero, G = constants.G, gravity_code = None, block_size = 0):\n n = len(particles)\n if block_size == 0:\n max = 100000 * 100 #100m floats\n block_size = max // n\n if block_size == 0:\n block_size = 1 #if more than 100m particles, then do 1 by one\n\n mass = particles.mass\n x_vector = particles.x\n y_vector = particles.y\n z_vector = particles.z\n\n potentials = VectorQuantity.zeros(len(mass),mass.unit/x_vector.unit) \n inf_len = numpy.inf | x_vector.unit\n offset = 0\n newshape =(n, 1)\n x_vector_r = x_vector.reshape(newshape)\n y_vector_r = y_vector.reshape(newshape)\n z_vector_r = z_vector.reshape(newshape)\n mass_r=mass.reshape(newshape)\n while offset < n:\n if offset + block_size > n:\n block_size = n - offset\n x = x_vector[offset:offset+block_size] \n y = y_vector[offset:offset+block_size] \n z = z_vector[offset:offset+block_size] \n indices = numpy.arange(block_size)\n dx = x_vector_r - x \n dy = y_vector_r - y\n dz = z_vector_r - z\n dr_squared = (dx * dx) + (dy * dy) + (dz * dz)\n dr = (dr_squared+smoothing_length_squared).sqrt()\n index = (indices + offset, indices)\n dr[index] = inf_len\n potentials += (mass[offset:offset+block_size]/dr).sum(axis=1)\n offset += block_size\n\n return -G * potentials", "def addParticles( screen, number, color ):\n\t\n\tparticles = []\n\t\n\tfor i in range( number ):\n\t\n\t\tradius = 5\n\t\tmass = 1\n\t\t\n\t\t#random position and velocity\n\t\tx, y = randint(-WINDOW_X + radius, 1), randint(-WINDOW_Y + radius, WINDOW_Y - radius)\n\t\tvx, vy = randrange(-1, 2, 2) * 100, randrange(-1, 2, 2) * 100\n\t\t\n\t\tparticles.append( Particle( screen, x, y, vx, vy, radius, mass, color ))\n\t\n\treturn particles", "def bound_particles(self, n=100, ptype='star'):\n\n #vrange = 100\n vx = self.star['vx']\n vy = self.star['vy']\n vz = self.star['vz']\n\n vdiff = np.square(vx - self.meta.vxc) + np.square(vy - self.meta.vyc) + np.square(vz - self.meta.vzc)\n npart = len(vdiff)\n\n if npart < 100:\n print(\"Too little particles within velocity window km/s\")\n return False\n\n vdiff = np.zeros(npart, dtype=float)\n\n emin = np.argsort(vdiff)[:min(n, npart)]\n self.most_bound_particles = self.star['id'][emin]" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Shift the spectral fields by n_move cells (with respect to the spatial grid). Shifting is done either on the CPU or the GPU, if use_cuda is True. (Typically n_move is positive, and the fields are shifted backwards)
def shift_spect_grid( self, grid, n_move, shift_rho=True, shift_currents=True ): if grid.use_cuda: shift = grid.d_field_shift # Get a 2D CUDA grid of the size of the grid tpb, bpg = cuda_tpb_bpg_2d( grid.Ep.shape[0], grid.Ep.shape[1] ) # Shift all the fields on the GPU shift_spect_array_gpu[tpb, bpg]( grid.Ep, shift, n_move ) shift_spect_array_gpu[tpb, bpg]( grid.Em, shift, n_move ) shift_spect_array_gpu[tpb, bpg]( grid.Ez, shift, n_move ) shift_spect_array_gpu[tpb, bpg]( grid.Bp, shift, n_move ) shift_spect_array_gpu[tpb, bpg]( grid.Bm, shift, n_move ) shift_spect_array_gpu[tpb, bpg]( grid.Bz, shift, n_move ) if shift_rho: shift_spect_array_gpu[tpb, bpg]( grid.rho_prev, shift, n_move ) if shift_currents: shift_spect_array_gpu[tpb, bpg]( grid.Jp, shift, n_move ) shift_spect_array_gpu[tpb, bpg]( grid.Jm, shift, n_move ) shift_spect_array_gpu[tpb, bpg]( grid.Jz, shift, n_move ) else: shift = grid.field_shift # Shift all the fields on the CPU shift_spect_array_cpu( grid.Ep, shift, n_move ) shift_spect_array_cpu( grid.Em, shift, n_move ) shift_spect_array_cpu( grid.Ez, shift, n_move ) shift_spect_array_cpu( grid.Bp, shift, n_move ) shift_spect_array_cpu( grid.Bm, shift, n_move ) shift_spect_array_cpu( grid.Bz, shift, n_move ) if shift_rho: shift_spect_array_cpu( grid.rho_prev, shift, n_move ) if shift_currents: shift_spect_array_cpu( grid.Jp, shift, n_move ) shift_spect_array_cpu( grid.Jm, shift, n_move ) shift_spect_array_cpu( grid.Jz, shift, n_move )
[ "def shift_spect_grid( self, grid, n_move,\n shift_rho=True, shift_currents=True ):\n if grid.use_cuda:\n shift = grid.d_field_shift\n # Get a 2D CUDA grid of the size of the grid\n tpb, bpg = cuda_tpb_bpg_2d( grid.Ep.shape[0], grid.Ep.shape[1] )\n # Shift all the fields on the GPU\n shift_spect_array_gpu[tpb, bpg]( grid.Ep, shift, n_move )\n shift_spect_array_gpu[tpb, bpg]( grid.Em, shift, n_move )\n shift_spect_array_gpu[tpb, bpg]( grid.Ez, shift, n_move )\n shift_spect_array_gpu[tpb, bpg]( grid.Bp, shift, n_move )\n shift_spect_array_gpu[tpb, bpg]( grid.Bm, shift, n_move )\n shift_spect_array_gpu[tpb, bpg]( grid.Bz, shift, n_move )\n if grid.use_pml:\n shift_spect_array_gpu[tpb, bpg]( grid.Ep_pml, shift, n_move )\n shift_spect_array_gpu[tpb, bpg]( grid.Em_pml, shift, n_move )\n shift_spect_array_gpu[tpb, bpg]( grid.Bp_pml, shift, n_move )\n shift_spect_array_gpu[tpb, bpg]( grid.Bm_pml, shift, n_move )\n if shift_rho:\n shift_spect_array_gpu[tpb, bpg]( grid.rho_prev, shift, n_move )\n if shift_currents:\n shift_spect_array_gpu[tpb, bpg]( grid.Jp, shift, n_move )\n shift_spect_array_gpu[tpb, bpg]( grid.Jm, shift, n_move )\n shift_spect_array_gpu[tpb, bpg]( grid.Jz, shift, n_move )\n else:\n shift = grid.field_shift\n # Shift all the fields on the CPU\n shift_spect_array_cpu( grid.Ep, shift, n_move )\n shift_spect_array_cpu( grid.Em, shift, n_move )\n shift_spect_array_cpu( grid.Ez, shift, n_move )\n shift_spect_array_cpu( grid.Bp, shift, n_move )\n shift_spect_array_cpu( grid.Bm, shift, n_move )\n shift_spect_array_cpu( grid.Bz, shift, n_move )\n if grid.use_pml:\n shift_spect_array_cpu( grid.Ep_pml, shift, n_move )\n shift_spect_array_cpu( grid.Em_pml, shift, n_move )\n shift_spect_array_cpu( grid.Bp_pml, shift, n_move )\n shift_spect_array_cpu( grid.Bm_pml, shift, n_move )\n if shift_rho:\n shift_spect_array_cpu( grid.rho_prev, shift, n_move )\n if shift_currents:\n shift_spect_array_cpu( grid.Jp, shift, n_move )\n shift_spect_array_cpu( grid.Jm, shift, n_move )\n shift_spect_array_cpu( grid.Jz, shift, n_move )", "def shift_spect_array_gpu( field_array, shift_factor, n_move ):\n # Get a 2D CUDA grid\n iz, ir = cuda.grid(2)\n\n # Only access values that are actually in the array\n if ir < field_array.shape[1] and iz < field_array.shape[0]:\n power_shift = 1. + 0.j\n # Calculate the shift factor (raising to the power n_move ;\n # for negative n_move, we take the complex conjugate, since\n # shift_factor is of the form e^{i k dz})\n for i in range( abs(n_move) ):\n power_shift *= shift_factor[iz]\n if n_move < 0:\n power_shift = power_shift.conjugate()\n # Shift fields\n field_array[iz, ir] *= power_shift", "def move_grids(self, fld, comm, time):\n # To avoid discrepancies between processors, only the first proc\n # decides whether to send the data, and broadcasts the information.\n dz = comm.dz\n if comm.rank==0:\n # Move the continuous position of the moving window object\n self.zmin += self.v * (time - self.t_last_move)\n # Find the number of cells by which the window should move\n zmin_global_domain, zmax_global_domain = comm.get_zmin_zmax(\n local=False, with_damp=False, with_guard=False )\n n_move = int( (self.zmin - zmin_global_domain)/dz )\n else:\n n_move = None\n # Broadcast the information to all proc\n if comm.size > 1:\n n_move = comm.mpi_comm.bcast( n_move )\n\n # Move the grids\n if n_move != 0:\n # Move the global domain\n comm.shift_global_domain_positions( n_move*dz )\n # Shift the fields\n Nm = len(fld.interp)\n for m in range(Nm):\n # Modify the values of the corresponding z's\n fld.interp[m].zmin += n_move*fld.interp[m].dz\n fld.interp[m].zmax += n_move*fld.interp[m].dz\n # Shift/move fields by n_move cells in spectral space\n self.shift_spect_grid( fld.spect[m], n_move )\n\n # Because the grids have just been shifted, there is a shift\n # in the cell indices that are used for the prefix sum.\n if fld.use_cuda:\n fld.prefix_sum_shift += n_move\n # This quantity is reset to 0 whenever prefix_sum is recalculated\n\n # Prepare the positions of injection for the particles\n # (The actual creation of particles is done when the routine\n # exchange_particles of boundary_communicator.py is called)\n if comm.rank == comm.size-1:\n # Move the injection position\n self.z_inject += self.v * (time - self.t_last_move)\n # Take into account the motion of the end of the plasma\n self.z_end_plasma += self.v_end_plasma * (time - self.t_last_move)\n # Increment the number of particle cells to add\n nz_new = int( (self.z_inject - self.z_end_plasma)/dz )\n self.nz_inject += nz_new\n # Increment the virtual position of the end of the plasma\n # (When `generate_particles` is called, then the plasma\n # is injected between z_end_plasma - nz_inject*dz and z_end_plasma,\n # and afterwards nz_inject is set to 0.)\n self.z_end_plasma += nz_new*dz\n\n # Change the time of the last move\n self.t_last_move = time", "def shift_spect_array_cpu( field_array, shift_factor, n_move ):\n Nz, Nr = field_array.shape\n\n # Loop over the 2D array (in parallel over z if threading is enabled)\n for iz in prange( Nz ):\n power_shift = 1. + 0.j\n # Calculate the shift factor (raising to the power n_move ;\n # for negative n_move, we take the complex conjugate, since\n # shift_factor is of the form e^{i k dz})\n for i in range( abs(n_move) ):\n power_shift *= shift_factor[iz]\n if n_move < 0:\n power_shift = power_shift.conjugate()\n # Shift the fields\n for ir in range( Nr ):\n field_array[iz, ir] *= power_shift", "def move_grids(self, fld, ptcl, comm, time):\n # To avoid discrepancies between processors, only the first proc\n # decides whether to send the data, and broadcasts the information.\n dz = comm.dz\n if comm.rank==0:\n # Move the continuous position of the moving window object\n self.zmin += self.v * (time - self.t_last_move)\n # Find the number of cells by which the window should move\n zmin_global_domain, zmax_global_domain = comm.get_zmin_zmax(\n local=False, with_damp=False, with_guard=False )\n n_move = int( (self.zmin - zmin_global_domain)/dz )\n else:\n n_move = None\n # Broadcast the information to all proc\n if comm.size > 1:\n n_move = comm.mpi_comm.bcast( n_move )\n\n # Move the grids\n if n_move != 0:\n # Move the global domain\n comm.shift_global_domain_positions( n_move*dz )\n # Shift the fields\n Nm = len(fld.interp)\n for m in range(Nm):\n # Modify the values of the corresponding z's\n fld.interp[m].zmin += n_move*fld.interp[m].dz\n fld.interp[m].zmax += n_move*fld.interp[m].dz\n # Shift/move fields by n_move cells in spectral space\n self.shift_spect_grid( fld.spect[m], n_move )\n\n # Because the grids have just been shifted, there is a shift\n # in the cell indices that are used for the prefix sum.\n for species in ptcl:\n if species.use_cuda:\n species.prefix_sum_shift += n_move\n # This quantity is reset to 0 whenever prefix_sum is recalculated\n\n # Prepare the positions of injection for the particles\n # (The actual creation of particles is done when the routine\n # exchange_particles of boundary_communicator.py is called)\n if comm.rank == comm.size-1:\n for species in ptcl:\n if species.continuous_injection:\n # Increment the positions for the generation of particles\n # (Particles are generated when `generate_particles` is called)\n species.injector.increment_injection_positions(\n self.v, time-self.t_last_move )\n\n # Change the time of the last move\n self.t_last_move = time", "def move(n, source, helper, target):", "def shift(x, N):\n up = N >= 0\n N = abs(N)\n _, _, H, W = x.size()\n head = torch.arange(N)\n tail = torch.arange(H - N)\n if up:\n head = torch.arange(H - N) + N\n tail = torch.arange(N)\n else:\n head = torch.arange(N) + (H - N)\n tail = torch.arange(H - N)\n perm = torch.cat([head, tail])\n out = x[:, :, perm, :]\n return out", "def shift(shape, stride, anchors):\n\n # create a grid starting from half stride from the top left corner.\n # Computing distinct X and Y coords in the image space for all grid cells in the the feature_map\n shift_x = (np.arange(0, shape[1]) + 0.5) * stride\n shift_y = (np.arange(0, shape[0]) + 0.5) * stride\n\n # With mesh grid, we assign distinct X and Y coords to every grid cell\n # shift_x.shape = shift_y.shape = shape\n shift_x, shift_y = np.meshgrid(shift_x, shift_y)\n\n # Stacking (such that its shape is (shape[0]*shape[1] ,4)) the shift values required to shift x1, y1, x2, y2\n # of the anchor BBox for every grid cell in the feature_map\n shifts = np.vstack((shift_x.ravel(), shift_y.ravel(),shift_x.ravel(), shift_y.ravel())).transpose()\n\n # add A anchors (1, A, 4) to\n # cell K shifts (K, 1, 4) to get\n # shift anchors (K, A, 4)\n # reshape to (K*A, 4) shifted anchors\n A = anchors.shape[0]\n K = shifts.shape[0]\n all_anchors = ( anchors.reshape((1, A, 4)) + shifts.reshape((1, K, 4)).transpose((1, 0, 2)) )\n all_anchors = all_anchors.reshape((K * A, 4))\n\n return all_anchors", "def shift(self):\n for pair in self.pipes:\n pair[0].move(self.pipe_speed)\n pair[1].move(self.pipe_speed)", "def move(self):\n x = y = z = 0.0\n for cell in self.cells:\n x += (cell.x)#*n\n y += (cell.y)#*n\n z += (cell.z)#*n\n np = float(len(self.cells))\n med = numpy.array([x/np,y/np,z/np])\n \n dists = []\n for cell in self.cells:\n d = (cell.x-self.x)**2+(cell.y-self.y)**2+(cell.z-self.z)**2\n d = numpy.sqrt(d)\n dists.append(d)\n #md = (cell.x-med[0])**2+(cell.y-med[1])**2+(cell.z-med[2])**2\n #dists[-1] = (dists[-1]+md)/2\n cell = self.cells[numpy.argmin(dists)]\n cc = numpy.array([cell.x, cell.y, cell.z])\n \n t = self.t\n if abs(self.dnp) * ( self.np-self.np_req) > 0:\n t = self.tr\n self.dcenter = (1-t)*(med-self.center + self.u*(cc-med))\n self.x,self.y,self.z = self.center = self.center + self.dcenter", "def shift_images(image, shift):\n return ndi.shift(image, shift=shift, order=1)", "def update_move_index(self):\n\t\tself.move_index = (self.move_index + 1) % self.move_max", "def _image_shift(img:np.ndarray, ratio:float, num:int, verbose:int=0) -> np.ndarray:\n l_shifted = []\n nrows,ncols = img.shape[:2]\n _ratio = abs(ratio)\n shift_x,shift_y = int(_ratio*ncols), int(_ratio*nrows)\n c = _generate_coordinates(-shift_x,shift_x,-shift_y,shift_y)\n shifts = []\n for _ in range(num):\n shifts.append(next(c))\n \n if verbose >= 1:\n print(f\"shift_y = {shift_y},shift_x = {shift_x}\")\n print(f\"shifts = {shifts}\")\n \n for x,y in shifts:\n M = np.array([[1,0,x],[0,1,y]], dtype=np.float32)\n l_shifted.append(cv2.warpAffine(img,M,(ncols,nrows)))\n \n return l_shifted", "def cshift(arr1, nx, ny):\n nx*=1.\n ny*=1.\n \n if ((nx % 1. == 0.) and (ny % 1. ==0)):\n return sp.roll(sp.roll(arr1, int(ny), axis=0),\n int(nx), axis=1 )\n else:\n \n return spf.ifft2(spnf.fourier_shift(spf.fft2(arr1),(ny,nx)))", "def _move_in_one_more_block():\n with tik_inst.for_range(0, sub_h_align_block_size) as sub_h_idx_0:\n tik_inst.data_move(dst[sub_w_block * data_cnt_one_block * sub_h_idx_0],\n src[in_offset + sub_h_idx_0 * w_size],\n 0, 1, sub_w_block, 0, 0)\n # move in one more block of h\n with tik_inst.for_range(0, data_cnt_one_block) as sub_h_idx_1:\n tik_inst.data_move(\n dst[sub_w_block * data_cnt_one_block * (sub_h_align_block_size + sub_h_idx_1)],\n src[in_offset + (sub_h_idx_1 + sub_h_size - data_cnt_one_block) * w_size],\n 0, 1, sub_w_block, 0, 0)", "def shift(self, n, fill_value=np.nan):\n if self.singular:\n return # Can't roll for singular coordinates\n\n super().shift(n, fill_value=fill_value)\n # Reset the cos_lat, sin_lat attributes\n self.set_x(self.coordinates[0], copy=False)\n self.set_y(self.coordinates[1], copy=False)", "def realign(microscope, new_image, reference_image):\n from autoscript_core.common import ApplicationServerException\n\n shift_in_meters = _calculate_beam_shift(new_image, reference_image)\n try:\n microscope.beams.ion_beam.beam_shift.value += shift_in_meters\n except ApplicationServerException:\n logging.warning(\n \"Cannot move beam shift beyond limits, \"\n \"will continue with no beam shift applied.\"\n )\n return microscope.beams.ion_beam.beam_shift.value", "def shift_x(self, nx):\n if self.data.ndim == 3:\n self.data = self.__shift3D(self.data, nx)\n else:\n self.data = self.__shift2D(self.data, nx)\n self.lat = self.__shift2D(self.lat, nx)\n self.lon = self.__shift2D(self.lon, nx)", "def move_to_cluster(self, n, move_from, move_to):\n x = self.X[n]\n self.clusters[move_from].remove_element(x)\n self.clusters[move_to].insert_element(x)" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Shift the field 'field_array' by n_move cells on CPU. This is done in spectral space and corresponds to multiplying the fields with the factor exp(ikz_truedz)n_move .
def shift_spect_array_cpu( field_array, shift_factor, n_move ): Nz, Nr = field_array.shape # Loop over the 2D array (in parallel over z if threading is enabled) for iz in prange( Nz ): power_shift = 1. + 0.j # Calculate the shift factor (raising to the power n_move ; # for negative n_move, we take the complex conjugate, since # shift_factor is of the form e^{i k dz}) for i in range( abs(n_move) ): power_shift *= shift_factor[iz] if n_move < 0: power_shift = power_shift.conjugate() # Shift the fields for ir in range( Nr ): field_array[iz, ir] *= power_shift
[ "def shift_spect_array_gpu( field_array, shift_factor, n_move ):\n # Get a 2D CUDA grid\n iz, ir = cuda.grid(2)\n\n # Only access values that are actually in the array\n if ir < field_array.shape[1] and iz < field_array.shape[0]:\n power_shift = 1. + 0.j\n # Calculate the shift factor (raising to the power n_move ;\n # for negative n_move, we take the complex conjugate, since\n # shift_factor is of the form e^{i k dz})\n for i in range( abs(n_move) ):\n power_shift *= shift_factor[iz]\n if n_move < 0:\n power_shift = power_shift.conjugate()\n # Shift fields\n field_array[iz, ir] *= power_shift", "def move_grids(self, fld, comm, time):\n # To avoid discrepancies between processors, only the first proc\n # decides whether to send the data, and broadcasts the information.\n dz = comm.dz\n if comm.rank==0:\n # Move the continuous position of the moving window object\n self.zmin += self.v * (time - self.t_last_move)\n # Find the number of cells by which the window should move\n zmin_global_domain, zmax_global_domain = comm.get_zmin_zmax(\n local=False, with_damp=False, with_guard=False )\n n_move = int( (self.zmin - zmin_global_domain)/dz )\n else:\n n_move = None\n # Broadcast the information to all proc\n if comm.size > 1:\n n_move = comm.mpi_comm.bcast( n_move )\n\n # Move the grids\n if n_move != 0:\n # Move the global domain\n comm.shift_global_domain_positions( n_move*dz )\n # Shift the fields\n Nm = len(fld.interp)\n for m in range(Nm):\n # Modify the values of the corresponding z's\n fld.interp[m].zmin += n_move*fld.interp[m].dz\n fld.interp[m].zmax += n_move*fld.interp[m].dz\n # Shift/move fields by n_move cells in spectral space\n self.shift_spect_grid( fld.spect[m], n_move )\n\n # Because the grids have just been shifted, there is a shift\n # in the cell indices that are used for the prefix sum.\n if fld.use_cuda:\n fld.prefix_sum_shift += n_move\n # This quantity is reset to 0 whenever prefix_sum is recalculated\n\n # Prepare the positions of injection for the particles\n # (The actual creation of particles is done when the routine\n # exchange_particles of boundary_communicator.py is called)\n if comm.rank == comm.size-1:\n # Move the injection position\n self.z_inject += self.v * (time - self.t_last_move)\n # Take into account the motion of the end of the plasma\n self.z_end_plasma += self.v_end_plasma * (time - self.t_last_move)\n # Increment the number of particle cells to add\n nz_new = int( (self.z_inject - self.z_end_plasma)/dz )\n self.nz_inject += nz_new\n # Increment the virtual position of the end of the plasma\n # (When `generate_particles` is called, then the plasma\n # is injected between z_end_plasma - nz_inject*dz and z_end_plasma,\n # and afterwards nz_inject is set to 0.)\n self.z_end_plasma += nz_new*dz\n\n # Change the time of the last move\n self.t_last_move = time", "def move_grids(self, fld, ptcl, comm, time):\n # To avoid discrepancies between processors, only the first proc\n # decides whether to send the data, and broadcasts the information.\n dz = comm.dz\n if comm.rank==0:\n # Move the continuous position of the moving window object\n self.zmin += self.v * (time - self.t_last_move)\n # Find the number of cells by which the window should move\n zmin_global_domain, zmax_global_domain = comm.get_zmin_zmax(\n local=False, with_damp=False, with_guard=False )\n n_move = int( (self.zmin - zmin_global_domain)/dz )\n else:\n n_move = None\n # Broadcast the information to all proc\n if comm.size > 1:\n n_move = comm.mpi_comm.bcast( n_move )\n\n # Move the grids\n if n_move != 0:\n # Move the global domain\n comm.shift_global_domain_positions( n_move*dz )\n # Shift the fields\n Nm = len(fld.interp)\n for m in range(Nm):\n # Modify the values of the corresponding z's\n fld.interp[m].zmin += n_move*fld.interp[m].dz\n fld.interp[m].zmax += n_move*fld.interp[m].dz\n # Shift/move fields by n_move cells in spectral space\n self.shift_spect_grid( fld.spect[m], n_move )\n\n # Because the grids have just been shifted, there is a shift\n # in the cell indices that are used for the prefix sum.\n for species in ptcl:\n if species.use_cuda:\n species.prefix_sum_shift += n_move\n # This quantity is reset to 0 whenever prefix_sum is recalculated\n\n # Prepare the positions of injection for the particles\n # (The actual creation of particles is done when the routine\n # exchange_particles of boundary_communicator.py is called)\n if comm.rank == comm.size-1:\n for species in ptcl:\n if species.continuous_injection:\n # Increment the positions for the generation of particles\n # (Particles are generated when `generate_particles` is called)\n species.injector.increment_injection_positions(\n self.v, time-self.t_last_move )\n\n # Change the time of the last move\n self.t_last_move = time", "def shift_spect_grid( self, grid, n_move,\n shift_rho=True, shift_currents=True ):\n if grid.use_cuda:\n shift = grid.d_field_shift\n # Get a 2D CUDA grid of the size of the grid\n tpb, bpg = cuda_tpb_bpg_2d( grid.Ep.shape[0], grid.Ep.shape[1] )\n # Shift all the fields on the GPU\n shift_spect_array_gpu[tpb, bpg]( grid.Ep, shift, n_move )\n shift_spect_array_gpu[tpb, bpg]( grid.Em, shift, n_move )\n shift_spect_array_gpu[tpb, bpg]( grid.Ez, shift, n_move )\n shift_spect_array_gpu[tpb, bpg]( grid.Bp, shift, n_move )\n shift_spect_array_gpu[tpb, bpg]( grid.Bm, shift, n_move )\n shift_spect_array_gpu[tpb, bpg]( grid.Bz, shift, n_move )\n if shift_rho:\n shift_spect_array_gpu[tpb, bpg]( grid.rho_prev, shift, n_move )\n if shift_currents:\n shift_spect_array_gpu[tpb, bpg]( grid.Jp, shift, n_move )\n shift_spect_array_gpu[tpb, bpg]( grid.Jm, shift, n_move )\n shift_spect_array_gpu[tpb, bpg]( grid.Jz, shift, n_move )\n else:\n shift = grid.field_shift\n # Shift all the fields on the CPU\n shift_spect_array_cpu( grid.Ep, shift, n_move )\n shift_spect_array_cpu( grid.Em, shift, n_move )\n shift_spect_array_cpu( grid.Ez, shift, n_move )\n shift_spect_array_cpu( grid.Bp, shift, n_move )\n shift_spect_array_cpu( grid.Bm, shift, n_move )\n shift_spect_array_cpu( grid.Bz, shift, n_move )\n if shift_rho:\n shift_spect_array_cpu( grid.rho_prev, shift, n_move )\n if shift_currents:\n shift_spect_array_cpu( grid.Jp, shift, n_move )\n shift_spect_array_cpu( grid.Jm, shift, n_move )\n shift_spect_array_cpu( grid.Jz, shift, n_move )", "def shift_spect_grid( self, grid, n_move,\n shift_rho=True, shift_currents=True ):\n if grid.use_cuda:\n shift = grid.d_field_shift\n # Get a 2D CUDA grid of the size of the grid\n tpb, bpg = cuda_tpb_bpg_2d( grid.Ep.shape[0], grid.Ep.shape[1] )\n # Shift all the fields on the GPU\n shift_spect_array_gpu[tpb, bpg]( grid.Ep, shift, n_move )\n shift_spect_array_gpu[tpb, bpg]( grid.Em, shift, n_move )\n shift_spect_array_gpu[tpb, bpg]( grid.Ez, shift, n_move )\n shift_spect_array_gpu[tpb, bpg]( grid.Bp, shift, n_move )\n shift_spect_array_gpu[tpb, bpg]( grid.Bm, shift, n_move )\n shift_spect_array_gpu[tpb, bpg]( grid.Bz, shift, n_move )\n if grid.use_pml:\n shift_spect_array_gpu[tpb, bpg]( grid.Ep_pml, shift, n_move )\n shift_spect_array_gpu[tpb, bpg]( grid.Em_pml, shift, n_move )\n shift_spect_array_gpu[tpb, bpg]( grid.Bp_pml, shift, n_move )\n shift_spect_array_gpu[tpb, bpg]( grid.Bm_pml, shift, n_move )\n if shift_rho:\n shift_spect_array_gpu[tpb, bpg]( grid.rho_prev, shift, n_move )\n if shift_currents:\n shift_spect_array_gpu[tpb, bpg]( grid.Jp, shift, n_move )\n shift_spect_array_gpu[tpb, bpg]( grid.Jm, shift, n_move )\n shift_spect_array_gpu[tpb, bpg]( grid.Jz, shift, n_move )\n else:\n shift = grid.field_shift\n # Shift all the fields on the CPU\n shift_spect_array_cpu( grid.Ep, shift, n_move )\n shift_spect_array_cpu( grid.Em, shift, n_move )\n shift_spect_array_cpu( grid.Ez, shift, n_move )\n shift_spect_array_cpu( grid.Bp, shift, n_move )\n shift_spect_array_cpu( grid.Bm, shift, n_move )\n shift_spect_array_cpu( grid.Bz, shift, n_move )\n if grid.use_pml:\n shift_spect_array_cpu( grid.Ep_pml, shift, n_move )\n shift_spect_array_cpu( grid.Em_pml, shift, n_move )\n shift_spect_array_cpu( grid.Bp_pml, shift, n_move )\n shift_spect_array_cpu( grid.Bm_pml, shift, n_move )\n if shift_rho:\n shift_spect_array_cpu( grid.rho_prev, shift, n_move )\n if shift_currents:\n shift_spect_array_cpu( grid.Jp, shift, n_move )\n shift_spect_array_cpu( grid.Jm, shift, n_move )\n shift_spect_array_cpu( grid.Jz, shift, n_move )", "def ShiftRows(state):\n for i in range(len(state)): #len(state) should be 4\n Nb = len(state[i])\n state[i] = [state[i][(j + i) % Nb] for j in range(Nb)]", "def cshift(arr1, nx, ny):\n nx*=1.\n ny*=1.\n \n if ((nx % 1. == 0.) and (ny % 1. ==0)):\n return sp.roll(sp.roll(arr1, int(ny), axis=0),\n int(nx), axis=1 )\n else:\n \n return spf.ifft2(spnf.fourier_shift(spf.fft2(arr1),(ny,nx)))", "def shift(array, shift_x, shift_y):\r\n\r\n # https://docs.scipy.org/doc/scipy/reference/generated/scipy.ndimage.shift.html\r\n\r\n return ndimage.shift(array, (shift_x, shift_y))", "def shift_rows(state):\n state[1][0], state[1][1], state[1][2], state[1][3] = state[1][1], state[1][2], state[1][3], state[1][0]\n state[2][0], state[2][1], state[2][2], state[2][3] = state[2][2], state[2][3], state[2][0], state[2][1]\n state[3][0], state[3][1], state[3][2], state[3][3] = state[3][3], state[3][0], state[3][1], state[3][2]", "def shift(x, N):\n up = N >= 0\n N = abs(N)\n _, _, H, W = x.size()\n head = torch.arange(N)\n tail = torch.arange(H - N)\n if up:\n head = torch.arange(H - N) + N\n tail = torch.arange(N)\n else:\n head = torch.arange(N) + (H - N)\n tail = torch.arange(H - N)\n perm = torch.cat([head, tail])\n out = x[:, :, perm, :]\n return out", "def move_6xx(record):\n\t# Functionally the same as move_9xx, but isolating since last-minute change\n\tfield_mapping = {\n\t\t\"692\": \"696\",\n\t\t\"693\": \"697\",\n\t\t\"699\": \"695\",\n\t}\n\tfor old_tag in field_mapping.keys():\n\t\tnew_tag = field_mapping[old_tag]\n\t\tfor fld in record.get_fields(old_tag):\n\t\t\tmove_field_safe(record, fld, new_tag)", "def update_move_index(self):\n\t\tself.move_index = (self.move_index + 1) % self.move_max", "def dynamic_shift(x, offset, dim, wrap):\n if dim not in x.shape.dims:\n raise ValueError(\"dim must be a dimension of x\")\n if dim in offset.shape.dims:\n raise ValueError(\"dim may not appear in offset\")\n for d in offset.shape.dims:\n if d not in x.shape.dims:\n raise ValueError(\"offset.shape %s must be a subset of x.shape %s\"\n % (offset.shape, x.shape))\n tmp_dim = Dimension(\"dynamic_shift_tmp\", dim.size)\n x_reshaped = replace_dimensions(x, dim, tmp_dim)\n dim_range = mtf_range(x.mesh, dim, dtype=tf.int32)\n tmp_dim_range = mtf_range(x.mesh, tmp_dim, dtype=tf.int32)\n tmp_dim_range_offset = tmp_dim_range + offset\n if wrap:\n tmp_dim_range_offset = mod(tmp_dim_range_offset, dim.size)\n perm = cast(equal(dim_range, tmp_dim_range_offset), x.dtype)\n return einsum([x_reshaped, perm], output_shape=x.shape)", "def field_to_real_space(self):\n logging.info('Calculating real space field...')\n\n self.field_real_space = np.empty([self.nt,self.nx,self.ny,self.ntheta],\n dtype=float)\n pyfftw.n_byte_align(self.field, 16)\n self.field_real_space = pyfftw.interfaces.numpy_fft.irfft2(self.field,\n axes=[1,2])\n\n if self.analysis == 'par' or self.analysis == 'write_field_full':\n self.field = None\n gc.collect()\n\n self.field_real_space = np.roll(self.field_real_space,\n int(self.nx/2), axis=1)\n\n self.field_real_space = self.field_real_space*self.nx*self.ny\n self.field_real_space = self.field_real_space*self.rho_star\n\n logging.info('Finished calculating real space field.')", "def inv_shift_rows(state):\n state[1][0], state[1][1], state[1][2], state[1][3] = state[1][3], state[1][0], state[1][1], state[1][2]\n state[2][0], state[2][1], state[2][2], state[2][3] = state[2][2], state[2][3], state[2][0], state[2][1]\n state[3][0], state[3][1], state[3][2], state[3][3] = state[3][1], state[3][2], state[3][3], state[3][0]", "def __expand(self, in_array):\n tmp = np.zeros((self.N-1, self.M*3), dtype = np.float64)\n for j in range(self.M):\n tmp[:, j*3:j*3+3] = in_array[0:-1, [ (j-1)%self.M, j, (j+1)%self.M ]]\n return np.asarray(tmp, dtype = np.float64)", "def _shiftRows(self, state): # returns state matrix\n for rowNumber, row in enumerate(state):\n rowForRotate = collections.deque(row)\n rowForRotate.rotate(-rowNumber)\n state[rowNumber] = list(rowForRotate)\n\n return state", "def _image_shift(img:np.ndarray, ratio:float, num:int, verbose:int=0) -> np.ndarray:\n l_shifted = []\n nrows,ncols = img.shape[:2]\n _ratio = abs(ratio)\n shift_x,shift_y = int(_ratio*ncols), int(_ratio*nrows)\n c = _generate_coordinates(-shift_x,shift_x,-shift_y,shift_y)\n shifts = []\n for _ in range(num):\n shifts.append(next(c))\n \n if verbose >= 1:\n print(f\"shift_y = {shift_y},shift_x = {shift_x}\")\n print(f\"shifts = {shifts}\")\n \n for x,y in shifts:\n M = np.array([[1,0,x],[0,1,y]], dtype=np.float32)\n l_shifted.append(cv2.warpAffine(img,M,(ncols,nrows)))\n \n return l_shifted", "def move(self):\n x = y = z = 0.0\n for cell in self.cells:\n x += (cell.x)#*n\n y += (cell.y)#*n\n z += (cell.z)#*n\n np = float(len(self.cells))\n med = numpy.array([x/np,y/np,z/np])\n \n dists = []\n for cell in self.cells:\n d = (cell.x-self.x)**2+(cell.y-self.y)**2+(cell.z-self.z)**2\n d = numpy.sqrt(d)\n dists.append(d)\n #md = (cell.x-med[0])**2+(cell.y-med[1])**2+(cell.z-med[2])**2\n #dists[-1] = (dists[-1]+md)/2\n cell = self.cells[numpy.argmin(dists)]\n cc = numpy.array([cell.x, cell.y, cell.z])\n \n t = self.t\n if abs(self.dnp) * ( self.np-self.np_req) > 0:\n t = self.tr\n self.dcenter = (1-t)*(med-self.center + self.u*(cc-med))\n self.x,self.y,self.z = self.center = self.center + self.dcenter" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Shift the field 'field_array' by n_move cells on the GPU. This is done in spectral space and corresponds to multiplying the fields with the factor exp(ikz_truedz)n_move .
def shift_spect_array_gpu( field_array, shift_factor, n_move ): # Get a 2D CUDA grid iz, ir = cuda.grid(2) # Only access values that are actually in the array if ir < field_array.shape[1] and iz < field_array.shape[0]: power_shift = 1. + 0.j # Calculate the shift factor (raising to the power n_move ; # for negative n_move, we take the complex conjugate, since # shift_factor is of the form e^{i k dz}) for i in range( abs(n_move) ): power_shift *= shift_factor[iz] if n_move < 0: power_shift = power_shift.conjugate() # Shift fields field_array[iz, ir] *= power_shift
[ "def shift_spect_array_cpu( field_array, shift_factor, n_move ):\n Nz, Nr = field_array.shape\n\n # Loop over the 2D array (in parallel over z if threading is enabled)\n for iz in prange( Nz ):\n power_shift = 1. + 0.j\n # Calculate the shift factor (raising to the power n_move ;\n # for negative n_move, we take the complex conjugate, since\n # shift_factor is of the form e^{i k dz})\n for i in range( abs(n_move) ):\n power_shift *= shift_factor[iz]\n if n_move < 0:\n power_shift = power_shift.conjugate()\n # Shift the fields\n for ir in range( Nr ):\n field_array[iz, ir] *= power_shift", "def move_grids(self, fld, comm, time):\n # To avoid discrepancies between processors, only the first proc\n # decides whether to send the data, and broadcasts the information.\n dz = comm.dz\n if comm.rank==0:\n # Move the continuous position of the moving window object\n self.zmin += self.v * (time - self.t_last_move)\n # Find the number of cells by which the window should move\n zmin_global_domain, zmax_global_domain = comm.get_zmin_zmax(\n local=False, with_damp=False, with_guard=False )\n n_move = int( (self.zmin - zmin_global_domain)/dz )\n else:\n n_move = None\n # Broadcast the information to all proc\n if comm.size > 1:\n n_move = comm.mpi_comm.bcast( n_move )\n\n # Move the grids\n if n_move != 0:\n # Move the global domain\n comm.shift_global_domain_positions( n_move*dz )\n # Shift the fields\n Nm = len(fld.interp)\n for m in range(Nm):\n # Modify the values of the corresponding z's\n fld.interp[m].zmin += n_move*fld.interp[m].dz\n fld.interp[m].zmax += n_move*fld.interp[m].dz\n # Shift/move fields by n_move cells in spectral space\n self.shift_spect_grid( fld.spect[m], n_move )\n\n # Because the grids have just been shifted, there is a shift\n # in the cell indices that are used for the prefix sum.\n if fld.use_cuda:\n fld.prefix_sum_shift += n_move\n # This quantity is reset to 0 whenever prefix_sum is recalculated\n\n # Prepare the positions of injection for the particles\n # (The actual creation of particles is done when the routine\n # exchange_particles of boundary_communicator.py is called)\n if comm.rank == comm.size-1:\n # Move the injection position\n self.z_inject += self.v * (time - self.t_last_move)\n # Take into account the motion of the end of the plasma\n self.z_end_plasma += self.v_end_plasma * (time - self.t_last_move)\n # Increment the number of particle cells to add\n nz_new = int( (self.z_inject - self.z_end_plasma)/dz )\n self.nz_inject += nz_new\n # Increment the virtual position of the end of the plasma\n # (When `generate_particles` is called, then the plasma\n # is injected between z_end_plasma - nz_inject*dz and z_end_plasma,\n # and afterwards nz_inject is set to 0.)\n self.z_end_plasma += nz_new*dz\n\n # Change the time of the last move\n self.t_last_move = time", "def move_grids(self, fld, ptcl, comm, time):\n # To avoid discrepancies between processors, only the first proc\n # decides whether to send the data, and broadcasts the information.\n dz = comm.dz\n if comm.rank==0:\n # Move the continuous position of the moving window object\n self.zmin += self.v * (time - self.t_last_move)\n # Find the number of cells by which the window should move\n zmin_global_domain, zmax_global_domain = comm.get_zmin_zmax(\n local=False, with_damp=False, with_guard=False )\n n_move = int( (self.zmin - zmin_global_domain)/dz )\n else:\n n_move = None\n # Broadcast the information to all proc\n if comm.size > 1:\n n_move = comm.mpi_comm.bcast( n_move )\n\n # Move the grids\n if n_move != 0:\n # Move the global domain\n comm.shift_global_domain_positions( n_move*dz )\n # Shift the fields\n Nm = len(fld.interp)\n for m in range(Nm):\n # Modify the values of the corresponding z's\n fld.interp[m].zmin += n_move*fld.interp[m].dz\n fld.interp[m].zmax += n_move*fld.interp[m].dz\n # Shift/move fields by n_move cells in spectral space\n self.shift_spect_grid( fld.spect[m], n_move )\n\n # Because the grids have just been shifted, there is a shift\n # in the cell indices that are used for the prefix sum.\n for species in ptcl:\n if species.use_cuda:\n species.prefix_sum_shift += n_move\n # This quantity is reset to 0 whenever prefix_sum is recalculated\n\n # Prepare the positions of injection for the particles\n # (The actual creation of particles is done when the routine\n # exchange_particles of boundary_communicator.py is called)\n if comm.rank == comm.size-1:\n for species in ptcl:\n if species.continuous_injection:\n # Increment the positions for the generation of particles\n # (Particles are generated when `generate_particles` is called)\n species.injector.increment_injection_positions(\n self.v, time-self.t_last_move )\n\n # Change the time of the last move\n self.t_last_move = time", "def shift_spect_grid( self, grid, n_move,\n shift_rho=True, shift_currents=True ):\n if grid.use_cuda:\n shift = grid.d_field_shift\n # Get a 2D CUDA grid of the size of the grid\n tpb, bpg = cuda_tpb_bpg_2d( grid.Ep.shape[0], grid.Ep.shape[1] )\n # Shift all the fields on the GPU\n shift_spect_array_gpu[tpb, bpg]( grid.Ep, shift, n_move )\n shift_spect_array_gpu[tpb, bpg]( grid.Em, shift, n_move )\n shift_spect_array_gpu[tpb, bpg]( grid.Ez, shift, n_move )\n shift_spect_array_gpu[tpb, bpg]( grid.Bp, shift, n_move )\n shift_spect_array_gpu[tpb, bpg]( grid.Bm, shift, n_move )\n shift_spect_array_gpu[tpb, bpg]( grid.Bz, shift, n_move )\n if shift_rho:\n shift_spect_array_gpu[tpb, bpg]( grid.rho_prev, shift, n_move )\n if shift_currents:\n shift_spect_array_gpu[tpb, bpg]( grid.Jp, shift, n_move )\n shift_spect_array_gpu[tpb, bpg]( grid.Jm, shift, n_move )\n shift_spect_array_gpu[tpb, bpg]( grid.Jz, shift, n_move )\n else:\n shift = grid.field_shift\n # Shift all the fields on the CPU\n shift_spect_array_cpu( grid.Ep, shift, n_move )\n shift_spect_array_cpu( grid.Em, shift, n_move )\n shift_spect_array_cpu( grid.Ez, shift, n_move )\n shift_spect_array_cpu( grid.Bp, shift, n_move )\n shift_spect_array_cpu( grid.Bm, shift, n_move )\n shift_spect_array_cpu( grid.Bz, shift, n_move )\n if shift_rho:\n shift_spect_array_cpu( grid.rho_prev, shift, n_move )\n if shift_currents:\n shift_spect_array_cpu( grid.Jp, shift, n_move )\n shift_spect_array_cpu( grid.Jm, shift, n_move )\n shift_spect_array_cpu( grid.Jz, shift, n_move )", "def shift_spect_grid( self, grid, n_move,\n shift_rho=True, shift_currents=True ):\n if grid.use_cuda:\n shift = grid.d_field_shift\n # Get a 2D CUDA grid of the size of the grid\n tpb, bpg = cuda_tpb_bpg_2d( grid.Ep.shape[0], grid.Ep.shape[1] )\n # Shift all the fields on the GPU\n shift_spect_array_gpu[tpb, bpg]( grid.Ep, shift, n_move )\n shift_spect_array_gpu[tpb, bpg]( grid.Em, shift, n_move )\n shift_spect_array_gpu[tpb, bpg]( grid.Ez, shift, n_move )\n shift_spect_array_gpu[tpb, bpg]( grid.Bp, shift, n_move )\n shift_spect_array_gpu[tpb, bpg]( grid.Bm, shift, n_move )\n shift_spect_array_gpu[tpb, bpg]( grid.Bz, shift, n_move )\n if grid.use_pml:\n shift_spect_array_gpu[tpb, bpg]( grid.Ep_pml, shift, n_move )\n shift_spect_array_gpu[tpb, bpg]( grid.Em_pml, shift, n_move )\n shift_spect_array_gpu[tpb, bpg]( grid.Bp_pml, shift, n_move )\n shift_spect_array_gpu[tpb, bpg]( grid.Bm_pml, shift, n_move )\n if shift_rho:\n shift_spect_array_gpu[tpb, bpg]( grid.rho_prev, shift, n_move )\n if shift_currents:\n shift_spect_array_gpu[tpb, bpg]( grid.Jp, shift, n_move )\n shift_spect_array_gpu[tpb, bpg]( grid.Jm, shift, n_move )\n shift_spect_array_gpu[tpb, bpg]( grid.Jz, shift, n_move )\n else:\n shift = grid.field_shift\n # Shift all the fields on the CPU\n shift_spect_array_cpu( grid.Ep, shift, n_move )\n shift_spect_array_cpu( grid.Em, shift, n_move )\n shift_spect_array_cpu( grid.Ez, shift, n_move )\n shift_spect_array_cpu( grid.Bp, shift, n_move )\n shift_spect_array_cpu( grid.Bm, shift, n_move )\n shift_spect_array_cpu( grid.Bz, shift, n_move )\n if grid.use_pml:\n shift_spect_array_cpu( grid.Ep_pml, shift, n_move )\n shift_spect_array_cpu( grid.Em_pml, shift, n_move )\n shift_spect_array_cpu( grid.Bp_pml, shift, n_move )\n shift_spect_array_cpu( grid.Bm_pml, shift, n_move )\n if shift_rho:\n shift_spect_array_cpu( grid.rho_prev, shift, n_move )\n if shift_currents:\n shift_spect_array_cpu( grid.Jp, shift, n_move )\n shift_spect_array_cpu( grid.Jm, shift, n_move )\n shift_spect_array_cpu( grid.Jz, shift, n_move )", "def ShiftRows(state):\n for i in range(len(state)): #len(state) should be 4\n Nb = len(state[i])\n state[i] = [state[i][(j + i) % Nb] for j in range(Nb)]", "def shift(x, N):\n up = N >= 0\n N = abs(N)\n _, _, H, W = x.size()\n head = torch.arange(N)\n tail = torch.arange(H - N)\n if up:\n head = torch.arange(H - N) + N\n tail = torch.arange(N)\n else:\n head = torch.arange(N) + (H - N)\n tail = torch.arange(H - N)\n perm = torch.cat([head, tail])\n out = x[:, :, perm, :]\n return out", "def shift_rows(state):\n state[1][0], state[1][1], state[1][2], state[1][3] = state[1][1], state[1][2], state[1][3], state[1][0]\n state[2][0], state[2][1], state[2][2], state[2][3] = state[2][2], state[2][3], state[2][0], state[2][1]\n state[3][0], state[3][1], state[3][2], state[3][3] = state[3][3], state[3][0], state[3][1], state[3][2]", "def shift(array, shift_x, shift_y):\r\n\r\n # https://docs.scipy.org/doc/scipy/reference/generated/scipy.ndimage.shift.html\r\n\r\n return ndimage.shift(array, (shift_x, shift_y))", "def cshift(arr1, nx, ny):\n nx*=1.\n ny*=1.\n \n if ((nx % 1. == 0.) and (ny % 1. ==0)):\n return sp.roll(sp.roll(arr1, int(ny), axis=0),\n int(nx), axis=1 )\n else:\n \n return spf.ifft2(spnf.fourier_shift(spf.fft2(arr1),(ny,nx)))", "def batch_displacement_warp3d(imgs, vector_fields):\n n_batch = tf.shape(imgs)[0]\n xlen = tf.shape(imgs)[1]\n ylen = tf.shape(imgs)[2]\n zlen = tf.shape(imgs)[3]\n\n grids = batch_mgrid(n_batch, xlen, ylen, zlen)\n\n T_g = grids + vector_fields\n output = batch_warp3d(imgs, T_g)\n return output", "def update_move_index(self):\n\t\tself.move_index = (self.move_index + 1) % self.move_max", "def inv_shift_rows(state):\n state[1][0], state[1][1], state[1][2], state[1][3] = state[1][3], state[1][0], state[1][1], state[1][2]\n state[2][0], state[2][1], state[2][2], state[2][3] = state[2][2], state[2][3], state[2][0], state[2][1]\n state[3][0], state[3][1], state[3][2], state[3][3] = state[3][1], state[3][2], state[3][3], state[3][0]", "def dynamic_shift(x, offset, dim, wrap):\n if dim not in x.shape.dims:\n raise ValueError(\"dim must be a dimension of x\")\n if dim in offset.shape.dims:\n raise ValueError(\"dim may not appear in offset\")\n for d in offset.shape.dims:\n if d not in x.shape.dims:\n raise ValueError(\"offset.shape %s must be a subset of x.shape %s\"\n % (offset.shape, x.shape))\n tmp_dim = Dimension(\"dynamic_shift_tmp\", dim.size)\n x_reshaped = replace_dimensions(x, dim, tmp_dim)\n dim_range = mtf_range(x.mesh, dim, dtype=tf.int32)\n tmp_dim_range = mtf_range(x.mesh, tmp_dim, dtype=tf.int32)\n tmp_dim_range_offset = tmp_dim_range + offset\n if wrap:\n tmp_dim_range_offset = mod(tmp_dim_range_offset, dim.size)\n perm = cast(equal(dim_range, tmp_dim_range_offset), x.dtype)\n return einsum([x_reshaped, perm], output_shape=x.shape)", "def shift(shape, stride, anchors):\n\n # create a grid starting from half stride from the top left corner.\n # Computing distinct X and Y coords in the image space for all grid cells in the the feature_map\n shift_x = (np.arange(0, shape[1]) + 0.5) * stride\n shift_y = (np.arange(0, shape[0]) + 0.5) * stride\n\n # With mesh grid, we assign distinct X and Y coords to every grid cell\n # shift_x.shape = shift_y.shape = shape\n shift_x, shift_y = np.meshgrid(shift_x, shift_y)\n\n # Stacking (such that its shape is (shape[0]*shape[1] ,4)) the shift values required to shift x1, y1, x2, y2\n # of the anchor BBox for every grid cell in the feature_map\n shifts = np.vstack((shift_x.ravel(), shift_y.ravel(),shift_x.ravel(), shift_y.ravel())).transpose()\n\n # add A anchors (1, A, 4) to\n # cell K shifts (K, 1, 4) to get\n # shift anchors (K, A, 4)\n # reshape to (K*A, 4) shifted anchors\n A = anchors.shape[0]\n K = shifts.shape[0]\n all_anchors = ( anchors.reshape((1, A, 4)) + shifts.reshape((1, K, 4)).transpose((1, 0, 2)) )\n all_anchors = all_anchors.reshape((K * A, 4))\n\n return all_anchors", "def move(self):\n x = y = z = 0.0\n for cell in self.cells:\n x += (cell.x)#*n\n y += (cell.y)#*n\n z += (cell.z)#*n\n np = float(len(self.cells))\n med = numpy.array([x/np,y/np,z/np])\n \n dists = []\n for cell in self.cells:\n d = (cell.x-self.x)**2+(cell.y-self.y)**2+(cell.z-self.z)**2\n d = numpy.sqrt(d)\n dists.append(d)\n #md = (cell.x-med[0])**2+(cell.y-med[1])**2+(cell.z-med[2])**2\n #dists[-1] = (dists[-1]+md)/2\n cell = self.cells[numpy.argmin(dists)]\n cc = numpy.array([cell.x, cell.y, cell.z])\n \n t = self.t\n if abs(self.dnp) * ( self.np-self.np_req) > 0:\n t = self.tr\n self.dcenter = (1-t)*(med-self.center + self.u*(cc-med))\n self.x,self.y,self.z = self.center = self.center + self.dcenter", "def _shiftRows(self, state): # returns state matrix\n for rowNumber, row in enumerate(state):\n rowForRotate = collections.deque(row)\n rowForRotate.rotate(-rowNumber)\n state[rowNumber] = list(rowForRotate)\n\n return state", "def ShiftFrame(Frame, PixShift):\n \n import numpy as np\n \n F, R, C = Frame.shape\n \n if F > 1:\n msg = f\"'Frame' must be a 2D frame with shape (1, R, C) but has shape\"\\\n + f\" ({F}, {R}, {C}).\"\n \n raise Exception(msg)\n \n # Initialise ShiftedFrame:\n ShiftedFrame = np.zeros((1, R, C), dtype='uint')\n #ShiftedFrame = np.empty_like(Frame, dtype='uint') # this creates 42,932\n # unique values for some reason!\n \n #unique = UniqueItems(Nda=Frame, NonZero=False)\n #print(f'\\n---> There are {len(unique)} unique items in Frame')\n #unique = UniqueItems(Nda=ShiftedFrame, NonZero=False)\n #print(f'\\n---> There are {len(unique)} unique items in the initialised',\n # f'ShiftedFrame: {unique[:11]}...')\n \n di, dj, dk = PixShift\n \n ##ShiftedFrame[0, dj:, di:] = Frame[0, :-(1+dj), :-(1+di)]\n ##ShiftedFrame[0, :-(1+dj), :-(1+di)] = Frame[0, dj:, di:]\n #ShiftedFrame[0, :R-dj, :C-di] = Frame[0, dj:, di:]\n \n if di > 0 and dj > 0:\n ShiftedFrame[0, dj:, di:] = Frame[0, :-dj, :-di]\n \n elif di < 0 and dj < 0:\n ShiftedFrame[0, :dj, :di] = Frame[0, -dj:, -di:]\n \n elif di > 0 and dj < 0:\n ShiftedFrame[0, :dj, di:] = Frame[0, -dj:, :-di]\n \n elif di < 0 and dj > 0:\n ShiftedFrame[0, dj:, :di] = Frame[0, :-dj, -di:]\n \n elif di == 0 and dj > 0:\n ShiftedFrame[0, dj:, :] = Frame[0, :-dj, :]\n \n elif di == 0 and dj < 0:\n ShiftedFrame[0, :dj, :] = Frame[0, -dj:, :]\n \n elif di > 0 and dj == 0:\n ShiftedFrame[0, :, di:] = Frame[0, :, :-di]\n \n elif di < 0 and dj == 0:\n ShiftedFrame[0, :, :di] = Frame[0, :, -di:]\n \n elif di == 0 and dj == 0:\n ShiftedFrame[0] = Frame[0]\n \n #unique = UniqueItems(Nda=ShiftedFrame, NonZero=False)\n #print(f'\\n---> There are {len(unique)} unique items in the ShiftedFrame',\n # 'after shifting.')\n \n return ShiftedFrame", "def half_advance_forward(self, field_data):\n\n field_data.omega_coords = np.einsum('ijkl, ijl -> ijk',\n self.half_for_rot_mat, field_data.omega_coords)\n\n field_data.dc_coords[:,:,1] += 0.5*field_data.dc_coords[:,:,0]/field_data.mode_mass*self.dt" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Note the camelcase name and unused variable. Bad bad bad.
def camelCaseFunc(): unused = 1
[ "def test_unusedVariable(self):\r\n self.flakes('''\r\n def a():\r\n b = 1\r\n ''', m.UnusedVariable)", "def test_magicGlobalsName(self):\r\n self.flakes('__name__')", "def check_for_unused_names(self):\n for s in self.unused_names:\n self.warning(\"'%s' is unused.\"%s)\n\n# warns for param that specified with -c (but also if name gets defined in __main__,\n# e.g. by default_density=global_params.default_density in a script file\n## for name in self.params():\n## if name in self.context:\n## self.warning(\"'%s' still exists in global_params.context\"%name)\n\n # detect duplicate param value that wasn't used (e.g. specified with after script)\n for name,val in self.params().items():\n if name in self.context:\n if self.context[name]!=self.inspect_value(name):\n self.warning(\"'%s=%s' is unused.\"%(name,self.context[name]))", "def VariableName(self) -> str:", "def nice_name():\n\n pass", "def test_instance_vars_have_valid_names(question):\n instance = question[\"instance\"]\n for name in instance.get(\"variables\", {}).keys():\n assert CAMEL_CASE_PATTERN.match(\n name\n ), \"variable {} not slouchingCamelCase\".format(name)", "def test_var_names(var_name):\n assert isinstance(var_name, str)\n if standard_names.is_valid_name(var_name):\n standard_names.StandardName(var_name)\n else:\n warnings.warn(\"not a valid standard name: {name}\".format(name=var_name))", "def _NiceNameToPreventCompilerErrors(self, attrname):\n # only emit the rhs of a multi part name e.g. undo.UndoItem will appear only as UndoItem\n if attrname.find(\".\") != -1:\n attrname = attrname.split(\".\")[-1] # take the last\n # Prevent compiler errors on the java side by avoiding the generating of java keywords as attribute names\n if attrname in javakeywords:\n attrname = \"_\" + attrname\n return attrname", "def test_get_name_of_variable(self):\n name = Code()\n self.assertEqual(str(name), 'name')", "def test_unusedImport_underscore(self):\n self.flakes('import fu as _', m.UnusedImport)", "def getNiceName(self, name):\n lname = name.lower()\n if lname.startswith(\"at_\"):\n name = name[3:]\n elif lname.startswith(\"mod_\"):\n name = name[4:]\n return name.capitalize()", "def test_unusedVariableAsLocals(self):\r\n self.flakes('''\r\n def a():\r\n b = 1\r\n return locals()\r\n ''')", "def __getattribute__(self, name):\n if name in ('_special_names', '__dict__'):\n return super().__getattribute__(name)\n if hasattr(self, '_special_names'):\n if name in self._special_names:\n raise AttributeError(\n f\"{name} is a reserved variable name and it cannot be read\")\n return super().__getattribute__(name)", "def verify_naming(self, reserved):\n for w in reserved:\n if w in self.decisions:\n raise ParseError('Duplicate variable/block name \"{}\"'.format(w))", "def verif_unused(sv):\r\n if Unused in sv.Object and sv.Object[Unused].value: # check presence and integrity of unused list\r\n unusedlist=[applied (x, Unused) for x in sv.Object[Unused].value]\r\n for nam in unusedlist: # check each unused declaration\r\n nod=sv.Object[nam]\r\n if sv.Namedpinlist.get(nam)==[nod.effects]: continue # pin is just named\r\n elif applied(nam, Output):\r\n if len(nod.effects)==1: # only effect is output list\r\n if len(nod.causes)<=2: continue\r\n if len(nod.causes)<=4 and Faux in nod.causes and Ewent in nod.causes: continue # allow 'take event'\r\n elif nod.causes or nod.effects: # object should have no cause and no effect\r\n print(Err_unused_obj) \r\n print(str(nam))\r\n sv.Current_clause=None, None, None\r\n raise ReferenceError", "def OldName(self) -> str:", "def pythonic(var_name):\r\n first_pass = re.sub('(.)([A-Z][a-z]+)', r'\\1_\\2', var_name)\r\n return re.sub('([a-z0-9])([A-Z])', r'\\1_\\2', first_pass).lower()", "def test_undeclared_variable_different_function(self):\n input = \"\"\"\n Function: main\n Body:\n Var: a;\n EndBody.\n Function: foo\n Body:\n a = 789;\n EndBody.\n \"\"\"\n expect = str(Undeclared(Identifier(), \"a\"))\n self.assertTrue(TestChecker.test(input, expect, 407))", "def test_deprecated_private_variables(attr):\n with pytest.warns(AstropyDeprecationWarning):\n resolve_name(\"astropy\", \"cosmology\", \"flrw\", attr)" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Whether or not the Window supports user resizing
def resizable(self): return self._frame._root.resizable() == '1 1'
[ "def IsResizeable(self):\r\n \r\n return self.HasFlag(self.optionResizable)", "def _check_window_size(self):\n desktop = QtGui.QDesktopWidget()\n screensize = desktop.availableGeometry()\n width = screensize.width()\n height = screensize.height()\n min_pixel_size = 1080\n if (width <= min_pixel_size or height <= min_pixel_size):\n self.showMaximized()", "def OnWindowSetResizable(self, Resizable=sentinel):", "def isMaximized(self):\n return ctypes.windll.user32.IsZoomed(self._hWnd) != 0", "def OnWindowResize(self):", "def ev_windowsizechanged(self, event: WindowResized) -> None:", "def isSelectionResizing(self):\n return self.resizing", "def ev_windowsizechanged(self, event: tcod.event.WindowResized) -> T | None:", "def ev_windowresized(self, event: WindowResized) -> None:", "def AuiManager_HasLiveResize(manager):\r\n\r\n # With Core Graphics on Mac, it's not possible to show sash feedback,\r\n # so we'll always use live update instead.\r\n \r\n if wx.Platform == \"__WXMAC__\":\r\n return True\r\n else:\r\n return (manager.GetAGWFlags() & AUI_MGR_LIVE_RESIZE) == AUI_MGR_LIVE_RESIZE", "def check_resize(self):\n yx = self.screen.getmaxyx()\n if self.current_yx != yx:\n self.current_yx = yx\n self.resize(yx)", "def __window_resizeTo(self, iWidth, iHeight):\n pass", "def IsWindow(*args, **kwargs):\n return _core_.SizerItem_IsWindow(*args, **kwargs)", "def is_wide(self) -> bool:\n return self.width > 40", "def check_window_size():\n \n wight = 870\n height = 519\n \n window = win32gui.FindWindow(MINECRAFT_CLASS_NAME, MINECRAFT_TITLE + MINECRAFT_VERSION)\n x0, y0, x1, y1 = win32gui.GetWindowRect(window)\n # x0 and y0 are initial points, upper left corner and lower left corner\n # then we need the difference between upper left corner and upper right corner to get the wight and\n # the difference between lower left corner and lower right corner to get the height\n \n w = x1 - x0\n h = y1 - y0\n \n if w is not wight or h is not height:\n win32gui.MoveWindow(window, x0, y0, wight, height, True)", "def ev_windowmaximized(self, event: WindowEvent) -> None:", "def ev_windowminimized(self, event: WindowEvent) -> None:", "def windowResized(self, renderWindow):\n pass", "def get_window_size(self):\n raise NotImplementedError()" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
The list of all turtles attached to this Window This attribute may not be altered directly
def turtles(self): return self._turtles[:]
[ "def turtles(self):\n return self._turtles", "def turbines(self) -> List[floris.simulation.Turbine]:\n return self._farm.turbines", "def getturtle(self):\n return self", "def thermostats(self):\n\n return self._thermostats", "def lights(self):\n return list(self.GetLights())", "def list_all_thicknesses(self):\n allTP = []\n del allTP[:]\n for i in range(0, self.Plates.__len__()):\n allTP = allTP + [self.Plates[i].tp]\n return allTP", "def terminals(self):\n terminals = []\n # The two following lines are not the same as\n # descendants = self.children\n # which would extend also to self.children!\n descendants = []\n descendants.extend(self.children)\n for child in descendants:\n if child.terminal:\n terminals.append(child)\n else:\n descendants.extend(child.children)\n return terminals", "def thermostats(self):\n return self._devices(\"thermostats\")", "def get_turgrupper_deep(self):\n turgrupper = []\n for child in self.children.all():\n if child.type == 'turgruppe':\n turgrupper.append(child)\n turgrupper += child.get_turgrupper_deep()\n return turgrupper", "def get_light_list(self):\n return self.light_array", "def lights(self) -> List[dict]:\n return self.items_by_domain(\"light\")", "def get_tunes(self) -> List[Tune]:\n if self._tune.title is not None:\n self._tunes.append(self._tune)\n self._tune = Tune()\n self._state = self.S_END\n return self._tunes", "def graphicsItems(self):\n return self.ctrl.getGraphicsItems()", "def 龜群(我):\n return 我._turtles", "def getAllTriStimulus(self):\n return self.tristimulus", "def _get_wires(self):\n wires = set()\n for drawable in self._get_drawables():\n for wire in drawable.wires():\n wires.add(wire)\n return wires", "def radials(self) -> List[Radial]:\n return self._radials", "def wires(self):\n return [o.wires for o in self.obs]", "def getListOfAllInstantiatedElements(self):\n return _libsbml.Submodel_getListOfAllInstantiatedElements(self)" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
The list of all pens attached to this Window This attribute may not be altered directly
def pens(self): return self._pencils[:]
[ "def drawables(self):\n\treturn self._Widget__w['drawables']", "def get(self):\n return list(self.pixels.values())", "def getPixels(self):\n\t\treturn self.strip.ledsColorBuffer", "def pixdimWidgets(self):\n return (self.__pixdimx, self.__pixdimy, self.__pixdimz)", "def getPixelsBuffer(self):\n\t\treturn self.leds", "def graphicsItems(self):\n return self.ctrl.getGraphicsItems()", "def list_layers(self):\n llist = []\n for x in range(self.num_panels):\n llist.append((self.length, self.width))\n return llist", "def getArray(self):\n return pygame.PixelArray(self.screen)", "def get_pixel_obs(self):\n self.viewer.get_pixel_obs()", "def gpio_properties(self):\n res = self._dll.JLINK_EMU_GPIO_GetProps(0, 0)\n if res < 0:\n raise errors.JLinkException(res)\n\n num_props = res\n buf = (structs.JLinkGPIODescriptor * num_props)()\n res = self._dll.JLINK_EMU_GPIO_GetProps(ctypes.byref(buf), num_props)\n if res < 0:\n raise errors.JLinkException(res)\n\n return list(buf)", "def colors(self):\r\n\t\treturn self._colors", "def get_all_drawables(self): \n drawables = []\n if len(self.component_list) > 0:\n for c in self.component_list:\n drawables.append(c.get_drawables())\n return drawables", "def list_widgets(self) -> list[str]:\n return list(self.widgets_map.keys())", "def PortHandles(self):\n if self.force_auto_sync:\n self.get('PortHandles')\n return self._PortHandles", "def lst(args, keyboard):\n print(f'Brightness: {keyboard.brightness}')\n zones = keyboard.get_color_zones()\n zone_colors = keyboard.color\n for zone in zones:\n print(f'{zone}: #{zone_colors[zone]}')", "def GetAttributes(self, pane):\r\n\r\n attrs = []\r\n attrs.extend([pane.window, pane.frame, pane.state, pane.dock_direction,\r\n pane.dock_layer, pane.dock_pos, pane.dock_row, pane.dock_proportion,\r\n pane.floating_pos, pane.floating_size, pane.best_size,\r\n pane.min_size, pane.max_size, pane.caption, pane.name,\r\n pane.buttons, pane.rect, pane.icon, pane.notebook_id,\r\n pane.transparent, pane.snapped, pane.minimize_mode])\r\n\r\n return attrs", "def chans(self):\n\n if self.prefs.chans:\n return self.prefs.chans\n else:\n# list(range(sum(self.metadata.spw_nchan)))\n chanlist = []\n nch = np.unique(self.metadata.spw_nchan)[0] # assume 1 nchan/spw\n if hasattr(self.prefs, 'ignore_spwedge'):\n edge = int(self.prefs.ignore_spwedge*nch)\n else:\n edge = 0\n for spw in self.spw:\n spwi = self.metadata.spw_orig.index(spw)\n chanlist += list(range(nch*spwi+edge, nch*(spwi+1)-edge))\n return chanlist", "def get_light_list(self):\n return self.light_array", "def hotkeys(self):\n return []" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Add a turtle to this window.
def _addTurtle(self,turt): assert (type(turt) == Turtle), "Parameter %s is not a valid Turtle object" % `turt` self._turtles.append(turt)
[ "def add(self, turtle):\n\n self.start_turtles.append(turtle.position)\n self.turtles.append(turtle)\n self.items[turtle] = self.canvas.create_polygon(0, 0)\n self.update(turtle)", "def setTurtle(t):\r\n t.pu()\r\n t.goto(initialCoordinates())", "def init_turtle():\n turtle.up()\n turtle.home()", "def add_turtle(uri_string_or_file, context_name):", "def initialize(turtle_shape, bg_color, turtle_color, turtle_speed):\n turtle_instance = turtle.Turtle()\n turtle_instance.shape(turtle_shape)\n turtle.bgcolor(bg_color)\n turtle_instance.color(turtle_color)\n turtle_instance.speed(turtle_speed)\n return turtle_instance", "def make_window(colr, ttle):\n w = turtle.Screen()\n w.title(ttle)\n w.bgcolor(colr)\n return w", "def cool_turtle():\n # Make the TurtleWindow.\n window = rg.TurtleWindow()\n\n # Make the SimpleTurtle.\n cool_turtle = rg.SimpleTurtle('turtle')\n cool_turtle.pen = rg.Pen('forest green', 1) # Try thickness 5 too\n cool_turtle.speed = 1 # Slow\n\n # Move the SimpleTurtle to her starting position.\n start_at = rg.Point(100, -50)\n cool_turtle.pen_up()\n cool_turtle.go_to(start_at)\n cool_turtle.pen_down()\n\n # Set up some parameters that control the nature of the shape drawn.\n size = 100 # Try 150 too\n angle = 1 # Try 20 too\n iterations = 360 # Try 90 too\n\n # Store the animation speed (to reset it later).\n tracer_n, tracer_d = window.tracer(), window.delay()\n\n # Make the animation go much faster.\n # First number: bigger means faster.\n # Second number: bigger means slower.\n window.tracer(5, 5)\n\n for _ in range(iterations):\n cool_turtle.right(angle)\n cool_turtle.draw_square(size)\n\n # Reset the animation to its original speed.\n window.tracer(tracer_n, tracer_d)\n\n window.close_on_mouse_click()", "def _drawturtle(self):\n screen = self.screen\n shape = screen._shapes[self.Myturtle.shapeIndex]\n ttype = shape._type\n titem = self.Myturtle._item\n if self._shown and screen._updatecounter == 0 and screen._tracing > 0:\n self._hidden_from_screen = False\n tshape = shape._data\n if ttype == \"polygon\":\n if self._resizemode == \"noresize\": w = 1\n elif self._resizemode == \"auto\": w = self._pensize\n else: w =self._outlinewidth\n shape = self._polytrafo(self._getshapepoly(tshape))\n fc, oc = self._fillcolor, self._pencolor\n screen._drawpoly(titem, shape, fill=fc, outline=oc,\n width=w, top=True)\n elif ttype == \"image\":\n screen._drawimage(titem, self._position, tshape)\n elif ttype == \"compound\":\n for item, (poly, fc, oc) in zip(titem, tshape):\n poly = self._polytrafo(self._getshapepoly(poly, True))\n screen._drawpoly(item, poly, fill=self._cc(fc),\n outline=self._cc(oc), width=self._outlinewidth, top=True)\n else:\n if self._hidden_from_screen:\n return\n if ttype == \"polygon\":\n screen._drawpoly(titem, ((0, 0), (0, 0), (0, 0)), \"\", \"\")\n elif ttype == \"image\":\n screen._drawimage(titem, self._position,\n screen._shapes[\"blank\"]._data)\n elif ttype == \"compound\":\n for item in titem:\n screen._drawpoly(item, ((0, 0), (0, 0), (0, 0)), \"\", \"\")\n self._hidden_from_screen = True", "def make_turtle(color, size):\n t = turtle.Turtle()\n t.color(color)\n t.pensize(size)\n return t", "def create(self, playername):\n if playername not in players:\n players[playername] = Turtle()\n\n padde = players[playername]\n padde.color(playername)\n padde.shape(\"turtle\")\n padde.penup()", "def _drawturtle(我):\n 幕 = 我.幕\n 形狀 = 幕._shapes[我.turtle.shapeIndex]\n ttype = 形狀._type\n titem = 我.turtle._item\n if 我._shown and 幕._updatecounter == 0 and 幕._tracing > 0:\n 我._hidden_from_screen = 假\n tshape = 形狀._data\n if ttype == \"polygon\":\n if 我._resizemode == \"noresize\": w = 1\n elif 我._resizemode == \"auto\": w = 我._pensize\n else: w =我._outlinewidth\n 形狀 = 我._polytrafo(我._getshapepoly(tshape))\n fc, oc = 我._fillcolor, 我._pencolor\n 幕._drawpoly(titem, 形狀, fill=fc, outline=oc,\n 筆寬=w, top=真)\n elif ttype == \"image\":\n 幕._drawimage(titem, 我._position, tshape)\n elif ttype == \"compound\":\n for item, (poly, fc, oc) in zip(titem, tshape):\n poly = 我._polytrafo(我._getshapepoly(poly, 真))\n 幕._drawpoly(item, poly, fill=我._cc(fc),\n outline=我._cc(oc), 筆寬=我._outlinewidth, top=真)\n else:\n if 我._hidden_from_screen:\n return\n if ttype == \"polygon\":\n 幕._drawpoly(titem, ((0, 0), (0, 0), (0, 0)), \"\", \"\")\n elif ttype == \"image\":\n 幕._drawimage(titem, 我._position,\n 幕._shapes[\"blank\"]._data)\n elif ttype == \"compound\":\n for item in titem:\n 幕._drawpoly(item, ((0, 0), (0, 0), (0, 0)), \"\", \"\")\n 我._hidden_from_screen = 真", "def make_turtle(color,fillcolor, size, shape):\n t = turtle.Turtle()\n t.color(color,fillcolor)\n t.pensize(size)\n t.shape(shape)\n t.speed(0)\n return t", "def make_turtle(colr, sz):\n t = turtle.Turtle()\n t.color(colr)\n t.pensize(sz)\n return t", "def turtle(self,turtleType):\n if self.turtleType == turtleType:\n return\n if self.turtleType and self.turtleType != PLAYER:\n self.mc.removeEntity(self.turtleId)\n self.turtleType = turtleType\n if turtleType == PLAYER:\n self.turtleId = None\n elif turtleType:\n self.turtleId = self.mc.spawnEntity(turtleType,\n self.position.x,self.position.y,self.position.z,\n \"{NoAI:1}\")\n self.setEntityCommands()\n self.positionOut()\n self.directionOut()", "def turtle_setup():\n # ___ ___ _ _ ___ _____ __ __ ___ ___ ___ _____ __\n # | \\ / _ \\ | \\| |/ _ \\_ _| | \\/ |/ _ \\| \\_ _| __\\ \\ / /\n # | |) | (_) | | .` | (_) || | | |\\/| | (_) | |) | || _| \\ V /\n # |___/ \\___/ |_|\\_|\\___/ |_| |_| |_|\\___/|___/___|_| |_|\n # _____ _ _ ___ ___ ___ _ _ _ _ ___ _____ ___ ___ _ _\n # |_ _| || |_ _/ __| | __| | | | \\| |/ __|_ _|_ _/ _ \\| \\| |\n # | | | __ || |\\__ \\ | _|| |_| | .` | (__ | | | | (_) | .` |\n # |_| |_||_|___|___/ |_| \\___/|_|\\_|\\___| |_| |___\\___/|_|\\_|\n #\n # Create the turtle graphics screen and set a few basic properties.\n screen = turtle.Screen()\n screen.setup( WIDTH, HEIGHT, MARGIN, MARGIN )\n screen.bgcolor( \"SkyBlue\" )\n\n # Create two turtles, one for drawing and one for writing.\n artist = turtle.Turtle()\n writer = turtle.Turtle()\n\n # Change the artist turtle's shape so the artist and writer are distinguishable.\n artist.shape( \"turtle\" )\n\n # Make the animation as fast as possible and hide the turtles.\n if DRAW_FAST:\n screen.delay( 0 )\n artist.hideturtle()\n artist.speed( \"fastest\" )\n writer.hideturtle()\n writer.speed( \"fastest\" )\n\n # Set a few properties of the writing turtle useful since it will only be writing.\n writer.setheading( 90 ) # Straight up, which makes it look sort of like a cursor.\n writer.penup() # A turtle's pen does not have to be down to write text.\n writer.setposition( 0, HEIGHT // 2 - FONT_SIZE * 2 ) # Centered at top of the screen.\n\n return screen, artist, writer", "def __init__(self):\r\n turtle.setup()\r\n turtle.screensize(100000, 100000)\r\n self.__risi_pot = turtle.Turtle()\r\n self.__risi_prijatelje = turtle.Turtle()\r\n self.__risi_pot.color('red')\r\n self.__risi_pot.pensize(2)\r\n self.__risi_pot.speed('fast')\r\n self.__risi_prijatelje.speed('fast')", "def show_turtle(self):\n self.visible = True\n self.update()", "def draw_petal():\n turtle.forward(30)\n turtle.left(45)\n turtle.forward(30)\n turtle.left(135)\n turtle.forward(30)\n turtle.left(45)\n turtle.forward(30)\n turtle.left(135)", "def drawJ(theTurtle):\n theTurtle.pensize(10)\n theTurtle.color(\"green\")\n\n theTurtle.pendown()\n theTurtle.right(90)\n theTurtle.forward(200)\n theTurtle.circle(-60, 180) # draw a semicircle\n theTurtle.penup()" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Add a pen to this window.
def _addPen(self,pen): assert (type(pen) == Pen), "Parameter %s is not a valid graphics pen" % `turt` self._pencils.append(pen)
[ "def addPen(self, name, pen=AUTO_GENERATE_PEN, updateCaption=True):\n if isinstance(pen, AutoPen):\n pen = copy.deepcopy(pen)\n pen.color = Colors.colors[self.colorIndex]\n self.colorIndex += 1\n if self.colorIndex >= len(Colors.colors):\n self.colorIndex = 0\n self.penOrder.append(name)\n self.pens[name] = pen\n self.values[name] = []\n if updateCaption:\n self.redrawCaption()", "def setPen(self, pen):\n \n self.pen = pen", "def pen(self, pen=None, **pendict):\n _pd = {\"shown\" : self._shown,\n \"pendown\" : self._drawing,\n \"pencolor\" : self._pencolor,\n \"fillcolor\" : self._fillcolor,\n \"pensize\" : self._pensize,\n \"speed\" : self._speed,\n \"resizemode\" : self._resizemode,\n \"stretchfactor\" : self._stretchfactor,\n \"outline\" : self._outlinewidth,\n \"tilt\" : self._tilt\n }\n\n if not (pen or pendict):\n return _pd\n\n if isinstance(pen, dict):\n p = pen\n else:\n p = {}\n p.update(pendict)\n\n _p_buf = {}\n for key in p:\n _p_buf[key] = _pd[key]\n\n if self.undobuffer:\n self.undobuffer.push((\"pen\", _p_buf))\n\n newLine = False\n if \"pendown\" in p:\n if self._drawing != p[\"pendown\"]:\n newLine = True\n if \"pencolor\" in p:\n if isinstance(p[\"pencolor\"], tuple):\n p[\"pencolor\"] = self._colorstr((p[\"pencolor\"],))\n if self._pencolor != p[\"pencolor\"]:\n newLine = True\n if \"pensize\" in p:\n if self._pensize != p[\"pensize\"]:\n newLine = True\n if newLine:\n self._newLine()\n if \"pendown\" in p:\n self._drawing = p[\"pendown\"]\n if \"pencolor\" in p:\n self._pencolor = p[\"pencolor\"]\n if \"pensize\" in p:\n self._pensize = p[\"pensize\"]\n if \"fillcolor\" in p:\n if isinstance(p[\"fillcolor\"], tuple):\n p[\"fillcolor\"] = self._colorstr((p[\"fillcolor\"],))\n self._fillcolor = p[\"fillcolor\"]\n if \"speed\" in p:\n self._speed = p[\"speed\"]\n if \"resizemode\" in p:\n self._resizemode = p[\"resizemode\"]\n if \"stretchfactor\" in p:\n sf = p[\"stretchfactor\"]\n if isinstance(sf, (int, float)):\n sf = (sf, sf)\n self._stretchfactor = sf\n # if \"shearfactor\" in p:\n # self._shearfactor = p[\"shearfactor\"]\n if \"outline\" in p:\n self._outlinewidth = p[\"outline\"]\n if \"shown\" in p:\n self._shown = p[\"shown\"]\n if \"tilt\" in p:\n self._tilt = p[\"tilt\"]\n \n self._update()", "def SetConnectionPen(self, pen):\r\n\r\n self._dottedPen = pen\r\n self._dirty = True", "def set_pen(self, pen_type: str):\n if pen_type not in PEN_TYPES:\n raise ValueError(\"Invalid value for pen_type.\")\n self.setPen(PEN_TYPES[pen_type])", "def SetPen(*args):\n return _gdi_.GraphicsContext_SetPen(*args)", "def SetPen(*args, **kwargs):\n return _gdi_.DC_SetPen(*args, **kwargs)", "def setHoverPen(self, *args, **kwargs):\n self.hoverPen = pg.mkPen(*args, **kwargs)\n if self.mouseHovering:\n self.currentPen = self.hoverPen\n self.update()", "def penup(self):\n if not self._drawing:\n return\n self.pen(pendown=False)", "def set_pen_color(self, color: tuple) -> Rectangle:\n self.pen.color = color\n return self", "def pen_down(self):\n self.pen = True", "def set_pen_size(self, pen_size):\n self.pen_size = pen_size", "def test_set_pen(self):\n painter = biotracker.QPainter()\n painter.setPen(100, 50, 30, 33)\n self.assertEqual(\"p(100,50,30,33)\", painter.to_msg())", "def SetBorderPen(self, pen):\r\n\r\n self._borderPen = pen\r\n self.RefreshSelected()", "def pensize(self, width):\n self._penwidth = width", "def setPenWidth(self, penWidth):\n \n self.penWidth = penWidth\n self.pen.setWidthF(self.penWidth)", "def __init__(self, *args, **kwargs):\n _gdi_.GraphicsPen_swiginit(self,_gdi_.new_GraphicsPen(*args, **kwargs))", "def draw(self, pen):\r\n lines = []\r\n vertices = self.vertices\r\n print(vertices)\r\n if vertices:\r\n for i in range(len(vertices)-1):\r\n lines.append(Line(vertices[i],vertices[i+1], self.pencolor))\r\n lines.append(Line(vertices[-1],vertices[0]))\r\n pen.color(self.pencolor, self.fillcolor)\r\n if self.fillcolor: pen.begin_fill()\r\n for l in lines:\r\n l.draw(pen)\r\n pen.end_fill()", "def __GSAnchor_drawPoints__(self, pen):\n\tpen.beginPath()\n\tpen.addPoint((self.x, self.y), segmentType=\"move\", smooth=False, name=self.name)\n\tpen.endPath()" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Remove a pen from this window.
def _removePen(self,pen): if pen in self._pencils: self._pencils.remove(pen)
[ "def __del__(self):\n self._screen._removePen(self)\n del self._turtle", "def remove(self) -> None:\n self.map.remove_brush(self)", "def remove_brush(self, brush: 'Solid') -> None:\n try:\n self.brushes.remove(brush)\n except ValueError:\n pass # Already removed.", "def pen_down(self):\n self.pen = True", "def removeFrom(self, win):\n \n if self._faceup:\n win.remove(self._face) #removes drawn cards from the\n else: #deck\n win.remove(self._back)\n self._window = None", "def penup(self):\n if not self._drawing:\n return\n self.pen(pendown=False)", "def removeFrom(self, win):\n \n win.remove(self._face)\n win.remove(self._face)", "def __del__(self):\n Window.nr_set.remove(self._nr)", "def remove_brick(self):\n b_upleft = self.window.get_object_at(self.ball.x, self.ball.y)\n b_upright = self.window.get_object_at(self.ball.x + self.ball.width, self.ball.y)\n b_lowerleft = self.window.get_object_at(self.ball.x, self.ball.y + self.ball.width)\n b_lowerright = self.window.get_object_at(self.ball.x + self.ball.width, self.ball.y + self.ball.width)\n if b_upleft is not None and b_upleft is not self.paddle:\n self.window.remove(b_upleft)\n elif b_upright is not None and b_upright is not self.paddle:\n self.window.remove(b_upright)\n elif b_lowerleft is not None and b_lowerleft is not self.paddle:\n self.window.remove(b_lowerleft)\n elif b_lowerright is not None and b_lowerright is not self.paddle:\n self.window.remove(b_lowerright)", "def RemoveShape(self, *args):\n return _XCAFDoc.XCAFDoc_ShapeTool_RemoveShape(self, *args)", "def remove(self):\n self.parent.widgets.remove(self)\n self.parent.dirty = 1", "def remove_curve(self, name):\n self._curve_reg.__delitem__(name)", "def remove(self, drawable):\n if drawable not in self._contents:\n raise ValueError('Object not currently on the Canvas')\n _GraphicsContainer.remove(self, drawable)", "def destroy(self):\n if self.widget is not None and self.widget.scene() is not None:\n self.widget.scene().removeItem(self.widget)\n del self.widget\n self.widget = None\n self._teardown_features()\n focus_registry.unregister(self.widget)\n super(QtGraphicsItem, self).destroy()", "def __del__(self):\n\t\tself.text_win.clear()\n\t\tdel self.text_win\t\t\t# Try to delete the text window first\n\t\tself.chooser_win.clear()\n\t\tdel self.chooser_win\t\t\t\t# Try to delete the window\n\t\t\n\t\tself.parent_win.redrawwin()\t\t# Make sure this window's content doesn't get left behind", "def undraw(self):\n with self.screen.blocks:\n while self.history and self.history[-1].action == 'turtle':\n self.screen.draw(self.history.pop().changed)", "def del_gobject(self, gobject):\n self.gobjects.remove(gobject)\n gobject._cell = None\n if gobject.solid:\n self.solid = False", "def remove_drawing_rect(self):\n self.drawing_rect = QPolygonF()\n if self.connecting_rect:\n self.connecting_rect.setVisible(False)\n self.connecting_rect = None\n self.first_draw = True", "def clearPaint(self):\n self.lastPaintPoint = None" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Sets the maximum size for this window Any attempt to resize a dimension beyond the maximum size will fail.
def setMaxSize(self,width,height): assert (type(width) == int), "width %s is not an int" % `width` assert (width > 0), "width %s is negative" % `width` assert (type(height) == int), "height %s is not an int" % `height` assert (height > 0), "height %s is negative" % `height` self._frame._root.maxsize(width,height)
[ "def set_maximum_size(self, max_size):\n # QWidget uses 16777215 as the max size\n if -1 in max_size:\n max_size = (16777215, 16777215)\n self.widget.setMaximumSize(QSize(*max_size))", "def SetMaxSize(*args, **kwargs):\n return _core_.Window_SetMaxSize(*args, **kwargs)", "def setmaxsize(self, maxsize):\n self.maxsize = maxsize", "def max_size(self, max_size):\n self._max_size = max_size", "def max_size(self, max_size):\n\n self._max_size = max_size", "def set_max_size(self, val):\n self._maxsize = val\n while self.size() > self._maxsize:\n self.cull()", "def SetSize(*args, **kwargs):\n return _core_.Window_SetSize(*args, **kwargs)", "def max_width(self, max_width):\n\n self._max_width = max_width", "def update_maximum_size(self):\n if self.initialized:\n max_size = self._compute_maximum_size()\n self.set_max_size(max_size)", "def set_progressbar_max(self, max_size: int) -> None:\n self.progress_bar.max = max_size", "def max_height(self, max_height):\n\n self._max_height = max_height", "def set_maxSize(self, maxSize):\n if self.__log:\n self.__logger.info(f\"Setting max size to {maxSize}\")\n self.__maxSize = maxSize # Set max size\n self.__handle_cache_size() # Adapt to new changes", "def max_size(self):\n max_size = self.widget.maximumSize()\n return Size(max_size.width(), max_size.height())", "def _set_size(self):\n if self.width_key is not None:\n width = config.get(self.width_key)\n height = config.get(self.height_key)\n self.window.resize(width, height)", "def set_max(self, max_value):\n self._max = max_value", "def resize(self):\n h, w = self.win.getmaxyx()\n self.maxh, self.maxw = h, w\n if w == 0 or h == 2:\n return\n self.win.resize(h, w)\n self.lpane.do_resize(h, w)\n self.rpane.do_resize(h, w)\n self.statusbar.resize(h, w)\n self.tabbar.resize(1,w)\n self.regenerate()\n self.display()", "def set_maxItemSize(self, maxItemSize):\n if self.__log:\n self.__logger.info(f\"Setting max item size to {maxItemSize}\")\n self.__maxItemSize = maxItemSize\n self.__handle_cache_size()", "def max_size(self, max_size: int):\n if max_size is not None and max_size < 0: # noqa: E501\n raise ValueError(\"Invalid value for `max_size`, must be a value greater than or equal to `0`\") # noqa: E501\n\n self._max_size = max_size", "def SetSizeWH(*args, **kwargs):\n return _core_.Window_SetSizeWH(*args, **kwargs)" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Sets the minimum size for this window Any attempt to resize a dimension below the minimum size will fail.
def setMinSize(self,width,height): assert (type(width) == int), "width %s is not an int" % `width` assert (width > 0), "width %s is negative" % `width` assert (type(height) == int), "height %s is not an int" % `height` assert (height > 0), "height %s is negative" % `height` self._frame._root.minsize(width,height)
[ "def SetMinimumPaneSize(self, minSize):\n self._minimumPaneSize = minSize", "def set_minimum_size(self, min_size):\n # QWidget uses (0, 0) as the minimum size.\n if -1 in min_size:\n min_size = (0, 0)\n self.widget.setMinimumSize(QSize(*min_size))", "def min_size(self, min_size):\n self._min_size = min_size", "def minimum_size(self, minimum_size):\n\n self._minimum_size = minimum_size", "def setMinimumWidth( self, value ):\n self._minimumWidth = value", "def min_size(self, min_size):\n\n self._min_size = min_size", "def resize_to_minimum(self):\n if self.initialized:\n min_size = self._compute_minimum_size()\n self.set_min_size(min_size)\n self.resize(min_size)", "def update_minimum_size(self):\n if self.initialized:\n min_size = self._compute_minimum_size()\n self.set_min_size(min_size)", "def SetMinSize(*args, **kwargs):\n return _core_.Sizer_SetMinSize(*args, **kwargs)", "def min_width(self, min_width):\n\n self._min_width = min_width", "def _set_size(self):\n if self.width_key is not None:\n width = config.get(self.width_key)\n height = config.get(self.height_key)\n self.window.resize(width, height)", "def SetMinSize(*args, **kwargs):\n return _core_.SizerItem_SetMinSize(*args, **kwargs)", "def SetInitialSize(*args, **kwargs):\n return _core_.Window_SetInitialSize(*args, **kwargs)", "def setMinimumHeight( self, value ):\n self._minimumHeight = value", "def min_size(self):\n min_size = self.widget.minimumSize()\n return Size(min_size.width(), min_size.height())", "def min_size(self, min_size: int):\n if min_size is not None and min_size < 0: # noqa: E501\n raise ValueError(\"Invalid value for `min_size`, must be a value greater than or equal to `0`\") # noqa: E501\n\n self._min_size = min_size", "def min_pixels(self, value) -> 'Size':\n raise_not_number(value)\n self.minimum = '{}px'.format(value)\n return self", "def minimumSizeHint(self):\n return QSize(1490, 800)", "def SetMinArea(self, *args):\n return _ShapeUpgrade.ShapeUpgrade_RemoveInternalWires_SetMinArea(self, *args)" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
The heading of this turtle in degrees. Heading is measured counter clockwise from due east.
def heading(self): return float(self._turtle.heading())
[ "def heading(self):\n current_x, current_y = self._orient\n result = round(math.atan2(current_y, current_x)*180.0/math.pi, 10) % 360.0\n result /= self._degrees_per_au\n return (self._angle_offset + self._angle_orient*result) % self._fullcircle", "def heading(self):\r\n print(self.read_rawData())\r\n x_raw, y_raw, z_raw = self.read_rawData()\r\n \r\n \r\n heading = math.atan2(y_raw, x_raw)\r\n \r\n if(heading > 2.0 * math.pi):\r\n heading = heading - 2.0 * math.pi\r\n \r\n # check for sign\r\n if(heading < 0.0):\r\n heading = heading + 2.0 * math.pi\r\n \r\n # convert into angle\r\n heading_angle = int(heading * 180.0 / math.pi)\r\n print('heading_angle', heading_angle)\r\n \r\n return heading_angle", "def raw_heading(self):\n\n self._heading = math.atan2(self._mag[X], self._mag[Y])\n\n if self._heading < 0:\n self._heading += 2*math.pi\n if self._heading > 2*math.pi:\n self._heading -= 2*math.pi\n\n self._heading_degrees = round(math.degrees(self._heading),2)\n\n return self._heading_degrees", "def heading_idx(self):\n if self.heading > 0:\n idx = self.heading * 180\n else:\n idx = 360 + self.heading * 180\n return int(idx - 1)", "def heading(self):\n\n self.update()\n\n truncate = [0,0,0]\n for i in range(X, Z+1):\n truncate[i] = math.copysign(min(math.fabs(self._accel[i]), 1.0), self._accel[i])\n try:\n pitch = math.asin(-1*truncate[X])\n roll = math.asin(truncate[Y]/math.cos(pitch)) if abs(math.cos(pitch)) >= abs(truncate[Y]) else 0\n # set roll to zero if pitch approaches -1 or 1\n\n self._tiltcomp[X] = self._mag[X] * math.cos(pitch) + self._mag[Z] * math.sin(pitch)\n self._tiltcomp[Y] = self._mag[X] * math.sin(roll) * math.sin(pitch) + \\\n self._mag[Y] * math.cos(roll) - self._mag[Z] * math.sin(roll) * math.cos(pitch)\n self._tiltcomp[Z] = self._mag[X] * math.cos(roll) * math.sin(pitch) + \\\n self._mag[Y] * math.sin(roll) + \\\n self._mag[Z] * math.cos(roll) * math.cos(pitch)\n self._tilt_heading = math.atan2(self._tiltcomp[Y], self._tiltcomp[X])\n\n if self._tilt_heading < 0:\n self._tilt_heading += 2*math.pi\n if self._tilt_heading > 2*math.pi:\n self._heading -= 2*math.pi\n\n self._tilt_heading_degrees = round(math.degrees(self._tilt_heading),2)\n return self._tilt_heading_degrees\n\n except Exception:\n return None", "def wind_angle_to_heading(self, wind_angle):\n return angleSum(self.wind_direction, wind_angle)", "def head_angle_rad(self) -> float:\n return self._head_angle_rad", "def heading_at(self, longitudinal: float) -> float:\n raise NotImplementedError()", "def heading_to_wind_angle(self, heading):\n # TODO: Is wind_direction the heading the wind is coming from, or\n # the heading it's moving towards? I assume the former\n res = (heading - self.wind_direction) % 360\n if res > 180:\n res -= 360\n return res", "def getPosHeading(self) :\n\t\treturn (self.avatarNP.getX(), self.avatarNP.getY(), \\\n\t\t\tself.avatarNP.getZ(), (self.avatarNP.getHpr()[0])%360)", "def __get_heading(self, robot_position, robot_yaw):\n abs_heading = math.atan2(self.pinger_loc[1] - robot_position[1],\n self.pinger_loc[0] - robot_position[0])\n return self.normalize(\n abs_heading - robot_yaw + random.gauss(0, self.noise))", "def getHeadingTime(self) -> float:\n return self.timestep_cached_heading_tm", "def update_heading(self):\n if self.waypoint_x == 0:\n self.waypoint_heading = 0 if self.waypoint_y > 0 else 180\n else:\n self.waypoint_heading = round(\n 180 / math.pi * math.atan(self.waypoint_y / self.waypoint_x), 3\n )", "def get_heading(hunter_position, target_position):\n hunter_x, hunter_y = hunter_position\n target_x, target_y = target_position\n heading = atan2(target_y - hunter_y, target_x - hunter_x)\n heading = angle_trunc(heading)\n return heading", "def Get_Heading(x1, y1, x2, y2):\n\n heading = 0\n dx = x2 - x1\n dy = y2 - y1\n\n if dx != 0:\n heading = (90 - math.degrees(math.atan2(dy,dx)) + 360) % 360\n\n elif dy > 0: heading = 0\n\n elif dy < 0: heading = 180\n\n return heading", "def heading_difference(self, other_heading):\n diff = abs(self.heading - other_heading)\n if diff > 180:\n diff = 360 - diff\n return diff", "def heading_diff(desired_heading, current_heading):\n hdiff = desired_heading - current_heading\n # handle 0/360 change at magnetic north\n if abs(hdiff) > 180:\n if hdiff < 0:\n hdiff += 360\n else:\n hdiff -= 360\n return hdiff", "def calculate_heading(self, dOverpass):\n dlat = dOverpass.lat - self.lat\n dlon = dOverpass.long - self.long\n #assert dlat < 0.05 # make sure its small for now!\n #assert dlon < 0.05 # make sure its small for now!\n # incase provided backward\n # the direction is as vector from lat + dlat\n vlat = self.lat + dlat\n vlon = self.long + dlon\n \"\"\"\n calculate heading!\n -- seems geopy does'nt do this yet\n\n https://gist.github.com/jeromer/2005586 has an answer which\n is re-used here...\n\n\n The formulae used is the following:\n θ = atan2(sin(Δlong).cos(lat2),\n cos(lat1).sin(lat2) − sin(lat1).cos(lat2).cos(Δlong))\n\n \"\"\"\n diffLong = np.radians(dOverpass.long - self.long)\n lat1 = self.lat\n lat2 = dOverpass.lat\n x = np.sin(diffLong) * np.cos(lat2)\n y = np.cos(lat1) * np.sin(lat2) - (np.sin(lat1)\n * np.cos(lat2) * np.cos(diffLong))\n initial_bearing = np.arctan2(x, y)\n # make into a compass bearing\n initial_bearing = np.degrees(initial_bearing)\n self.true_heading = initial_bearing\n compass_bearing = (initial_bearing + 360) % 360\n self.heading = compass_bearing", "def orientationToHeading(self, orientation):\n res = [0, 0, 0, 0]\n res[0] = orientation.x\n res[1] = orientation.y\n res[2] = orientation.z\n res[3] = orientation.w\n return tf.transformations.euler_from_quaternion(res)[2]" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Indicates whether the turtle's icon is visible. Drawing commands will still work while the turtle icon is hidden. There will just be no indication of the turtle's current location on the screen.
def visible(self): return self._turtle.isvisible()
[ "def is_visible():\n return self.turtle.visible", "def is_visible(sim_info: SimInfo) -> bool:\n return not CommonSimStateUtils.is_hidden(sim_info)", "def is_visible(self):\n return self.container['is_visible']", "def Visible(self) -> bool:", "def is_ruler_visible(self):\n return self.container['is_ruler_visible']", "def is_element_visible(self):\n if self.web_element.is_displayed():\n return True\n else:\n return False", "def isVisible(self):\r\n if self.style.get('display', '').lower() == 'none':\r\n return False\r\n if self.style.get('visibility','').lower() == 'hidden':\r\n return False\r\n return True", "def is_visible(self):\n return self._switcher.isVisible()", "def is_visible(self):\n\n if self._element is None:\n try:\n self._set_element()\n\n except NoSuchUIElement:\n return False\n\n return self._element.is_displayed()", "def is_hidden(self):\r\n if self.status is 'H':\r\n return True\r\n else:\r\n return False", "def is_displayed(self):\n self.wait_element()\n if self.is_element_hidden():\n return False\n else:\n return True", "def visible(visible=True):\n self.turtle.visible = visible\n self.send_report()", "def is_visible(self):\n try:\n return self.element.is_displayed()\n except (NoSuchElementException,\n ElementNotVisibleException,\n StaleElementReferenceException):\n return False", "def is_visible():\n ajustes = Ajustes()\n return ajustes.is_visible", "def is_info_showing(self):\n\t\treturn True if (self.info_span is not None) else False", "def isShown(self):\n return self.shown", "def is_visible(self):\n element = WebDriverWait(self.driver, 10).until(EC.visibility_of_element_located(locator = self.locator))\n self.webelement = element\n return bool(self.webelement)", "def inspectedNodeIsVisible(self):\n return self._inspected_node_is_visible", "def visibility(self):\n return self._nfvi_image.visibility" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Indicates whether the turtle is in draw mode. All drawing calls are active if an only if this mode is True
def drawmode(self): return self._turtle.isdown()
[ "def GetDrawMode(self):\n return self._drawmode", "def isdrawn(self):\n return hasattr(self, 'drawn')", "def isDraw(win):\n return E_DRAW_MIN <= win <= E_DRAW_MAX", "def get_drawing_mode(self) -> int:\n return self._drawing_mode", "def draw(self, canvas) -> bool:\n return False", "def SetDrawMode(self, drawmode):\n self._drawmode = drawmode\n if self.FIRST_PAINT:\n self.PrepareGL()\n self.OnDraw()", "def draw2DOutlineEnabled(self):\n\n opts = self.opts\n overlay = self.overlay\n\n return ((overlay.trimesh is not None) and\n (opts.outline or opts.vertexData is not None))", "def GetDrawOption(self):\n return self._drawoption", "def isDraw(board):\n if (PartB.isWin(board) is False and\n board.count('-') == 0):\n return True\n\n return False", "def can_draw(self):\n return len(self.sub_deck) > 0", "def check_game_draw(self):\n if self._board.get_board_is_full():\n self._game_state = True", "def isdown(self):\n return self.pen == 1", "def setPrimDrawMode(self, primPath, drawMode):\n prim = self._stage.GetPrimAtPath(primPath)\n if not primPath.IsValid():\n return False\n\n if drawMode == self.DrawMode.inherit:\n prim.RemoveProperty(self.drawModeAttribute)\n return True\n if drawMode == self.DrawMode.geometry:\n prim.GetAttribute(self.drawModeAttribute).Clear()\n return True\n if drawMode == self.DrawMode.boundingBox:\n prim.GetAttribute(self.drawModeAttribute).Set(Vt.Token(\"bounds\"))\n return True\n\n return False", "def is_pen_down():\n return self.turtle.pen_down", "def is_visible():\n return self.turtle.visible", "def check_mode(self):\n if self.proximity.check_press():\n self.cycle_mode()\n return self.mode", "def save_drawing_if_necessary(self):\n\n app_doc_data = AppDocData.instance()\n if app_doc_data.activeDrawing and app_doc_data.activeDrawing.modified:\n #if QMessageBox.Yes == QMessageBox.question(self, self.tr(\"Question\"),\n # self.tr(\"Do you want to save drawing?\"),\n # QMessageBox.Yes | QMessageBox.No):\n # self.actionSaveCliked()\n # return True\n if QMessageBox.Ignore == QMessageBox.question(self, self.tr('Continue?'),\n self.tr('Changes may not have been saved.'),\n QMessageBox.Ignore | QMessageBox.Cancel):\n return False\n return True", "def draw (self):\n screen = self.screen\n dirty = False\n for display in self.displays:\n dirty |= display.draw(screen)\n return dirty", "def is_graphic_driver(self):\n if self.class_id == \"0x03\":\n return True\n else:\n return False" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Deletes this turtle object.
def __del__(self): self.clear() self._screen._removeTurtle(self) del self._turtle
[ "def __del__(self):\n self._screen._removePen(self)\n del self._turtle", "def delete(self):\n del self.shx.atoms[self.index]", "def delete(self):\n self.__parent__.remove(self)", "def delete(self):\n Pet.data.remove(self)", "def delete(self):\n if self.shape is not None:\n self.shape.delete()\n if self in shared.obstacles:\n shared.obstacles.remove(self)", "def destroy(self):\r\n self._obj.destroy()\r\n self._obj = None", "def destroy(self):\n self._obj.destroy()\n self._obj = None", "def destroy(self):\n gameengine.GameEngine().game_objects.remove(self)", "def delete(self):\n with self.locked():\n self.path.delete()", "def delete(self):\r\n with self.locked():\r\n self.path.delete()", "def destroy(self):\n self.game.destroy_object(self.ID)", "def delete(self, obj):\n super(Canvas, self).delete(obj)", "def delete(self):\n for child in self.children:\n child.delete()\n del self", "def delete_current_shape(self):\n print(\"deleting shape!\")\n self.shapes.remove(self.current_shape)\n self.current_shape = None\n self.changed()", "def remove(self):\n traci.vehicle.remove(self.id)", "def remove(self):\n self.sensor.remove()\n del self", "def delete(self):\n self['_deleted'] = True", "def delete_ball(self):\r\n self.movement = \"\"\r\n self.canvas.delete(self.ball)", "def deleteBall(self):\n self._ball = None" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Moves the turtle forward by the given amount.
def forward(self,distance): assert (type(distance) in [int, float]), "parameter distance:%s is not a valid number" % `distance` self._turtle.forward(distance)
[ "def advance(self, amount=1):\n self._current += amount\n self.redraw()", "def advance_by(self, amount: float):\n if amount < 0:\n raise ValueError(\"cannot retreat time reference: amount {} < 0\"\n .format(amount))\n self.__delta += amount", "def move_forward(self, distance):\r\n return self.move('forward', distance)", "def move_forward_for_angle(self, angle, **kwargs):\n\t\tself._set_speed(kwargs)\n\t\tself.step_direction = FORWARD\n\t\tself._move(self._calculate_steps(angle))", "def move_forward(self):\n self.at(at_pcmd, True, 0, -self.speed, 0, 0)", "def move_forward(self, x, y):\n self.model.grid.move_agent(self, (self.pos[0] + x, self.pos[1] + y))\n self.distance += 1", "def forward(self, amount=1):\n if amount == 0:\n return 0\n assert amount > 0\n amount = int(amount)\n moved = 0\n fetch = self._result.fetch\n while moved != amount:\n new_current = fetch()\n if new_current is None:\n break\n else:\n self._current = new_current\n moved += 1\n return moved", "def move_forward(self, distance):\n quad_offset = self.quad_offset_mapping['forward']\n client.moveByVelocityAsync(self.velocity * quad_offset[0], self.velocity * quad_offset[1],\n 0.15, distance/self.velocity).join()\n # if self.logging:\n # self.log_arr.append(\"forward\")", "def forward(self, distance):\n self._go(distance)", "def forward(action):\n\t\taction.put(PIN.STPENA, Stepper.rotationStates[Stepper.nextState%4][0])\n\t\taction.put(PIN.STPENB, Stepper.rotationStates[Stepper.nextState%4][1])\n\t\tStepper.nextState += 1", "def move_forward(self, steps, **kwargs):\n\t\tself._set_speed(kwargs)\n\t\tself.step_direction = FORWARD\n\t\tself._move(steps)", "def go_forward() -> None:\n set_motors_speed(FULL_SPEED, FULL_SPEED)", "def move_forward():\n pass", "def move_forward(self, d):\n self.x += d * math.sin(self.theta)\n self.y += d * math.cos(self.theta)", "def move_forward(self,length,draw=True):\r\n new_x = self.x + length * math.cos(math.radians(self.angle))\r\n new_y = self.y + length * math.sin(math.radians(self.angle))\r\n self.draw_tool.line(((self.x,self.y),(new_x,new_y)), fill=(0,0,0),width=2)\r\n self.x = new_x\r\n self.y = new_y", "def move_forward(self, speed):\n\n # Clamp the speed\n speed = clamp(delta_unit(speed), 0, delta_unit(Car.max_speed))\n\n # Appends the speed according to the direction\n rad = np.radians(self.direction)\n self.fx += speed * np.cos(rad)\n self.fy += speed * np.sin(rad)\n\n # Set marker to move\n self.moved = True", "def advanceTime(self, amount):\n self.currentSeconds += amount", "def _step(self):\n self._amount = self._incremental.add(\n self._amount, self._increment_amount)", "def forward(self, dist):\n start = (self.pos_x, self.pos_y)\n self.pos_x += dist * math.cos(math.radians(self.angle))\n self.pos_y += dist * math.sin(math.radians(self.angle))\n self._update_limits()\n end = (self.pos_x, self.pos_y)\n if self.pen_down:\n self.draw.line([start, end], fill=self.colour, width=self.width)" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Moves the turtle backward by the given amount.
def backward(self,distance): assert (type(distance) in [int, float]), "parameter distance:%s is not a valid number" % `distance` self._turtle.backward(distance)
[ "def move_backward(self, distance):\r\n return self.move('back', distance)", "def move_backward_for_angle(self, angle, **kwargs):\n\t\tself._set_speed(kwargs)\n\t\tself.step_direction = BACKWARD\n\t\tself._move(self._calculate_steps(angle))", "def move_backward(self):\n self.at(at_pcmd, True, 0, self.speed, 0, 0)", "def moveBackward(self):\r\n\t\t\r\n\t\tself.position -= self.pointer*0.1", "def back(self, distance):\n self._go(-distance)", "def go_backward() -> None:\n set_motors_speed(-FULL_SPEED, -FULL_SPEED)", "def move_down(self, distance):\r\n return self.move('down', distance)", "def move_down(self):\n self.move_step(1)", "def backward(self, speed, seconds=None):\n # Set motor speed and move both backward.\n self._drive_speed(speed)\n self._drive.run(Adafruit_MotorHAT.BACKWARD)\n # If an amount of time is specified, move for that time and then stop.\n if seconds is not None:\n time.sleep(seconds)\n self.stop()", "def down():\n turtle.down()", "def move_down(self):\n self.move_to_position(self.position + 1)", "def move_backward():\n pass", "def move_backward(self, steps, **kwargs):\n\t\tself._set_speed(kwargs)\n\t\tself.step_direction = BACKWARD\n\t\tself._move(steps)", "def lose_weight(self, amount):\n self.weight -= amount", "def go_backward(self):\n command = _build_robovac_command(RobovacModes.GO_BACKWARD, RobovacCommands.MOVE)\n message = self._build_command_user_data_message(command)\n\n self._send_packet(message, False)", "def withdraw(self, amount):\n self.balance -= amount", "def move_lift_down():\n return _move_lift(0.2)", "def backward(self, speed):\n\n self.pwm_forward.ChangeDutyCycle(0)\n self.pwm_backward.ChangeDutyCycle(speed)", "def moveDown(self):\r\n\t\t\r\n\t\tself.position -= self.upVector*0.1" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
The pen color of this pen. The pen color is used for drawing lines and circles. All subsequent draw commands draw using this color. If the color changes, it only affects future draw commands, not past ones. This color is only used for lines and the border of circles. It is not the color used for filling in solid areas (if the ``fill`` attribute is True). See the attribute ``fillcolor`` for solid shapes.
def pencolor(self): return self._pencolor
[ "def penColor( self ):\n return self._penColor", "def get_color(self):\n return(self.pen_color)", "def set_pen_color(self, color: tuple) -> Rectangle:\n self.pen.color = color\n return self", "def set_color(self, color):\n self.pen_color = color", "def linecolor(self):\n return self._linecolor", "def get_color(self):\n return self.color", "def line_color(self) -> String:\r\n from apysc.type import value_util\r\n self._initialize_line_color_if_not_initialized()\r\n line_color: String = value_util.get_copy(value=self._line_color)\r\n return line_color", "def GetDrawColor(self):\n ...", "def markColor(self):\r\n return self.__markColor", "def color(self):\n return self.SUITS[self.suit].get(\"color\")", "def getFillColor(self):\n return self._fillColor", "def color(self) -> curses.color_pair:\n if self.selected:\n return Colors.black_on_white()\n\n if self._path.is_dir():\n return Colors.blue_on_black()\n\n return Colors.default()", "def _get_Color(self):\n return self._currentColor", "def get_color(self, point):\n return self._color.dup()", "def base_color(self):\n return curses.color_pair(3) if self.cycling else curses.color_pair(1)", "def fillcolor(self):\n return self._fillcolor", "def getColor(self):\n return qt.QColor.fromRgbF(*self._color)", "def getColor(self):\n\n return self.pktColor", "def getFontColor(self):\n return self._color" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
The fill color of this turtle. The fill color is used for filling in solid shapes. If the ``fill`` attribute is True, all subsequent draw commands fill their insides using this color. If the color changes, it only affects future draw commands, not past ones. This color is only used for filling in the insides of solid shapes. It is not the color used for the shape border. See the attribute ``pencolor`` for the border color.
def fillcolor(self): return self._fillcolor
[ "def fillColor(self, clr=None):\n if clr is None: return self.fill\n self.fill = clr", "def getFillColor(self):\n return getColor() if (fillColor == None) else fillColor", "def fill(self) -> int:\n return self._fill_color", "def sparkline_fill_color(self, sparkline_fill_color):\n\n self._sparkline_fill_color = sparkline_fill_color", "def set_fill_color(self, obj, fill_color):\n try:\n self.itemconfig(obj, fill=fill_color)\n except tkinter.TclError:\n raise tkinter.TclError(\"You can't set the fill color on this object\")", "def setFillColors(self, color):\n self._circle.setFillColor(color)", "def getFillColor(self):\n return self._fillColor", "def setFilled(self, fill):\n isFilled = fill\n repaint()", "def write_fill(self, fill: FillFormat):\n if self.fill_type is not None:\n self._write_fill_type(fill)", "def GetFillAlpha(self):\n return self._attalpha[\"fill\"]", "def setDefaultFill(self, fill):\n defaultFill = fill\n if targetContainer != None:\n targetContainer.invalidate()", "def solid_fill(self, solid_fill):\n\n self.container['solid_fill'] = solid_fill", "def setPointFill(self, fill):\n for point in self.points:\n point.fill = fill", "def get_fill_colour():\n return LevelManager.colour", "def setDefaultFill(self, fill):\n (getLayout()).setDefaultFill(fill)", "def fill_region(self, fill_x, fill_y, color=255, show_key=True):\n self.canvas[fill_x, fill_y] = color\n if show_key:\n self.show_layer(show_key)\n return self", "def solid_fill(self):\n return self.container['solid_fill']", "def gradient_fill(self, gradient_fill):\n\n self.container['gradient_fill'] = gradient_fill", "def position_fill(self, position_fill):\n allowed_values = [\"OPEN_ONLY\", \"REDUCE_FIRST\", \"REDUCE_ONLY\", \"DEFAULT\"] # noqa: E501\n if position_fill not in allowed_values:\n raise ValueError(\n \"Invalid value for `position_fill` ({0}), must be one of {1}\" # noqa: E501\n .format(position_fill, allowed_values)\n )\n\n self._position_fill = position_fill" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Indicates whether the pen's icon is visible. Drawing commands will still work while the pen icon is hidden. There will just be no indication of the pen's current location on the screen.
def visible(self): return self._turtle.isvisible()
[ "def is_visible():\n return self.turtle.visible", "def is_visible(self):\n return self.container['is_visible']", "def strokeVisible(self):\n return self._strokeVisible", "def is_visible(self):\n return self._switcher.isVisible()", "def IsByPen(self) -> bool:", "def GetGripperVisible(self):\r\n\r\n return self._gripper_visible", "def Visible(self) -> bool:", "def is_visible(self):\n\n if self._element is None:\n try:\n self._set_element()\n\n except NoSuchUIElement:\n return False\n\n return self._element.is_displayed()", "def is_visible(sim_info: SimInfo) -> bool:\n return not CommonSimStateUtils.is_hidden(sim_info)", "def is_ruler_visible(self):\n return self.container['is_ruler_visible']", "def is_element_visible(self):\n if self.web_element.is_displayed():\n return True\n else:\n return False", "def visible(self):\n return -PipePair.WIDTH < self.x < WIN_WIDTH", "def is_visible(self):\n try:\n return self.element.is_displayed()\n except (NoSuchElementException,\n ElementNotVisibleException,\n StaleElementReferenceException):\n return False", "def is_visible(self):\n return self.window.active_panel() == self.full_name", "def isVisible(self):\r\n if self.style.get('display', '').lower() == 'none':\r\n return False\r\n if self.style.get('visibility','').lower() == 'hidden':\r\n return False\r\n return True", "def is_on() -> bool:\n return __display.is_on()", "def is_outline_shown(self):\n return self.container['is_outline_shown']", "def visible(self):\n return ctypes.windll.user32.IsWindowVisible(self.hwnd)", "def isShown(self):\n return self.shown" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Deletes this pen object.
def __del__(self): self._screen._removePen(self) del self._turtle
[ "def delete(self, obj):\n super(Canvas, self).delete(obj)", "def delete(self):\n Pet.data.remove(self)", "def delete(self):\n del self.shx.atoms[self.index]", "def delete(self):\n self.__parent__.remove(self)", "def delX(self):\n del self.components[0]", "def delete_current_shape(self):\n print(\"deleting shape!\")\n self.shapes.remove(self.current_shape)\n self.current_shape = None\n self.changed()", "def __delitem__(self, key):\n self.deleteCurve(key)", "def delete_curve(self, curve):\n self.command(f\"CRVDEL {curve}\")", "def _removePen(self,pen):\n if pen in self._pencils:\n self._pencils.remove(pen)", "def delete(self):\n del self.characters[self.cursor.position]", "def delete(self):\n if self.shape is not None:\n self.shape.delete()\n if self in shared.obstacles:\n shared.obstacles.remove(self)", "def __del__(self):\n self.clear()\n self._screen._removeTurtle(self)\n del self._turtle", "def delete(self):\n where = {self.primary_key: self.obj_id}\n return self.DB.delete(self.table, where=\"%s=%s\" % (self.primary_key, self.obj_id))", "def delete(self):\n if self.parent() is not None:\n self.parent().removeRow( self.id() )\n else:\n self.model().removeRow( self.id() )", "def __del__(self):\n Window.nr_set.remove(self._nr)", "def deleteRigPart(self):\n self.setParent(None)\n self.setVisible(False)\n self.deleteLater()", "def removeFromParentAndDelete(self):\n return _libsbml.KineticLaw_removeFromParentAndDelete(self)", "def remove_object_from_canvas(self, tk_object):\n self.canvas.delete(tk_object)", "def erase(self):\n\t\tpass" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Draws a line segment (dx,dy) from the current pen position
def drawLine(self, dx, dy): assert (type(dx) in [int, float]), "parameter x:%s is not a valid number" % `dx` assert (type(dy) in [int, float]), "parameter y:%s is not a valid number" % `dy` x = self._turtle.xcor() y = self._turtle.ycor() self._turtle.setposition(x+dx, y+dy)
[ "def draw_line(x1, y1, x2, y2):\n penup()\n setposition(x1,y1)\n pendown()\n setposition(x2,y2)\n update()", "def draw(self, pen):\r\n lines = []\r\n vertices = self.vertices\r\n print(vertices)\r\n if vertices:\r\n for i in range(len(vertices)-1):\r\n lines.append(Line(vertices[i],vertices[i+1], self.pencolor))\r\n lines.append(Line(vertices[-1],vertices[0]))\r\n pen.color(self.pencolor, self.fillcolor)\r\n if self.fillcolor: pen.begin_fill()\r\n for l in lines:\r\n l.draw(pen)\r\n pen.end_fill()", "def lineDraw(self):\r\n self.canvas.lineDrawSelect()", "def draw_dashed_line(start_pos, end_pos):\n\n line.color(\"white\")\n xs, ys = start_pos\n xe, ye = end_pos\n line.penup()\n line.goto(xs, ys)\n line.pendown()\n\n cur_x = xs\n while cur_x <= xe:\n cur_x += 40\n line.goto(cur_x, ys)\n line.penup()\n cur_x += 20\n line.goto(cur_x, ys)\n line.pendown()", "def draw_line(self, x):\n self.PDF.setStrokeColor(black01)\n self.PDF.setLineWidth(1)\n self.PDF.line(75, x, 550, x)\n self.PDF.setStrokeColor(\"black\")", "def drawLine(self, line):\n\n line = line.getLineItem()\n line.setPen(self.pen)\n self.addItem(line)", "def Draw_FlightLine( self, dc, selected=False ):\n\n scaling,same_scaling = dc.GetUserScale()\n\n # Set the pen - green\n dc.SetPen(wx.Pen('#00DD00', 1, wx.SOLID))\n\n\n # Decode the start and end points.\n #print self.parent.p2.glue_points.keys()\n if self.startpt in self.parent.p2.glue_points.keys():\n start_point = self.parent.p2.glue_points[self.startpt]\n else:\n print \"Warning \", self.startpt, \"not a gluepoint\"\n start_point = None\n \n if self.endpt in self.parent.p2.glue_points.keys():\n end_point = self.parent.p2.glue_points[self.endpt]\n else:\n print \"Warning \", self.endpt, \"not a gluepoint\"\n end_point = None\n\n if start_point and end_point:\n print \"Draw flightline between\", start_point, end_point\n dc.DrawLinePoint( start_point, end_point + wx.Point(1,1) )", "def doLineDraw(self, sx, sy, ex, ey):\r\n self.parent.addLine(sx, sy, ex, ey, self.set_colour)", "def draw_X(x, y):\n color('blue')\n pensize(10)\n draw_line(x - 40, y - 40, x + 40, y + 40)\n draw_line(x - 40, y + 40, x + 40, y - 40)", "def draw(x,y,x1,y1,d,color=1):\n d.add(dxf.line((x,y),(x1,y1),color=color, layer='LINES',thickness=0.01))", "def draw(self):\n # select pen 1\n self.ser.write('SP 1;')\n for point in self.coordinates:\n # place pen at this points location\n x = self.coordinate_to_plotter(point.get_x())\n y = self.coordinate_to_plotter(point.get_y())\n str_command = \"PA {} {};\".format(x, y)\n self.ser.write(str_command)\n # put pen down\n self.ser.write('PD;')\n\n # check if there is a connection in each direction,\n # think of it like a compass North, North-East etc.\n self.check_move(point, x, y)\n # mark this point as drawn\n point.plotted()\n # at end bring pen back up\n self.ser.write(\"PU;\")\n # select pen 0, disengage pen\n self.ser.write('SP 0;')", "def draw_line(x1, y1, x2, y2):\r\n #global _canvas\r\n #global _current_line_thickness\r\n #global _current_color\r\n if _canvas == None:\r\n raise RuntimeError(\"Canvas is not open yet.\")\r\n else:\r\n path = Path(Point(x1, y1), Point(x2, y2))\r\n path.setBorderWidth(_current_line_thickness)\r\n path.setBorderColor(_current_color)\r\n _canvas.add(path)", "def __GSAnchor_drawPoints__(self, pen):\n\tpen.beginPath()\n\tpen.addPoint((self.x, self.y), segmentType=\"move\", smooth=False, name=self.name)\n\tpen.endPath()", "def draw_line(self, x0, y0, x1, y1, color=Color['white']):\n pygame.draw.line(self.display, color, (x0, y0), (x1, y1))", "def dline(x, y):\n glClear(GL_COLOR_BUFFER_BIT)\n glColor3f(0.0, 0.0, 1.0)\n glPointSize(10.0)\n glBegin(GL_POINTS)\n while (x <= y):\n glVertex2f(x, x)\n x += 0.05\n glEnd()\n glFlush()", "def beginPath(self, identifier=None):\n from fontTools.pens.pointPen import PointToSegmentPen\n self._pointToSegmentPen = PointToSegmentPen(self)\n self._pointToSegmentPen.beginPath()", "def drawSlope(self):\n length = sqrt(1 + self.slope**2) # Length of the line segment over 1 x-unit\n xOffset = (segmentLength / length) / 2 # Figures out how many times the length of the 1 unit length fits into the desired length\n # then divides by 2 becuase half is on the left and half on the right of the center\n\n\n # Left end point\n xLeft = self.x - xOffset\n yLeft = (self.slope * (xLeft - self.x)) + self.y\n\n # Right end point\n xRight = self.x + xOffset\n yRight = (self.slope * (xRight - self.x)) + self.y\n\n\n # Converts the left and right end points from cartesian coordinates to screen coordinates\n left = cartesianToScreen(xLeft , yLeft)\n right = cartesianToScreen(xRight, yRight)\n\n\n pygame.draw.aaline(display, self.color, left, right, 1) # DRAWS THE LINE AHHHHHHHHHHHHHHHHHH :P", "def _defLine(self):\n self._dline=GPath(points = [0,100,GAME_WIDTH,100], linewidth = 1.5,\n linecolor = 'cyan')", "def startLineDrawing(self, startPos):\n self.line = LineNodePath(render2d, thickness=2, colorVec=(0.8,0.8,0.8,1))\n self.line.moveTo(startPos)\n t = taskMgr.add(self.drawLineTask, \"drawLineTask\")\n t.startPos = startPos" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Draw a circle of radius r centered on the pen.
def drawCircle(self, r): assert (type(r) in [int, float]), "parameter r:%s is not a valid number" % `r` x = self._turtle.xcor() y = self._turtle.ycor() # Move the pen into position fstate = self._turtle.pendown() if fstate: self._turtle.penup() self._turtle.setposition(x, y-r) if fstate: self._turtle.pendown() # Draw the circle and fill if necessary self._turtle.circle(r) self.flush() self._turtle.forward(0) # Return the pen to the position if fstate: self._turtle.penup() self._turtle.setposition(x, y) if fstate: self._turtle.pendown()
[ "def drawCircle(x, y, r):\n pen1.up()\n pen1.goto(x,y)\n pen1.down()\n pen1.circle(r)", "def circle(self, x, y, r):\n # Render units to points.\n xpt, ypt, rpt = upt(x, y, r)\n self.b.oval(xpt-rpt, ypt-rpt, 2*rpt, 2*rpt)", "def draw_circle(c):\n turtle.circle(c.radius)", "def draw_circle(self, x, y, radius, color=Color['white']):\n pygame.draw.circle(self.display, color, (x, y), radius)", "def draw_circle(self, color, center, radius, width):\n _c = self.T.itrans(center)\n pg.draw.circle(self.screen, color, _c(), radius, width)", "def circle(self, x, y, r, solid = False):\n px = 0\n py = r\n d = 1 - 2 * r\n err = 0\n while py >= 0:\n if solid:\n for i in range(x - px, x + px + 1):\n self.pixel(i, y + py, 1)\n self.pixel(i, y - py, 1)\n else:\n self.pixel(x + px, y + py, 1)\n self.pixel(x + px, y - py, 1)\n self.pixel(x - px, y + py, 1)\n self.pixel(x - px, y - py, 1)\n err = 2 * (d + py) - 1\n if d < 0 and err <= 0:\n px += 1\n d += 2 *px + 1\n else:\n err = 2 * (d - px) - 1\n if d > 0 and err > 0:\n py -= 1\n d += 1 - 2 * py\n else:\n px += 1\n d += 2 * (px - py)\n py -= 1", "def draw_circle(view, centre_x, centre_y, circle_r):\n view.newCircle(centre_x, centre_y, circle_r)\n view.viewAll()", "def DrawSolidCircle(self, center, radius, axis, color):\r\n radius *= self.zoom\r\n if radius < 1:\r\n radius = 1\r\n else: radius = int(radius)\r\n\r\n pygame.draw.circle(self.surface, (color/2).bytes+[127],\r\n center, radius, 0)\r\n pygame.draw.circle(self.surface, color.bytes, center, radius, 1)\r\n pygame.draw.aaline(self.surface, (255, 0, 0), center,\r\n (center[0] - radius*axis[0], center[1] +\r\n radius*axis[1]))", "def plot_circle(self,x,y,r,fc='r'):\n cir = mpl.patches.Circle((x,y), radius=r, fc=fc)\n self.patch = mpl.pyplot.gca().add_patch(cir)", "def _circle(i, r=.05):\n\treturn Circle((i, 0), r, fill=True, color='black')", "def plot_circle(r,**kw):\n try:\n fmt = kw.pop('fmt')\n except:\n fmt='k'\n try:\n label = kw.pop('label')\n except:\n label = None\n x = num.arange(-r,r+0.01,0.01)\n y = num.sqrt(num.fabs(r**2. - x**2.))\n pyplot.plot(x,y,fmt,**kw)\n pyplot.plot(x,-y,fmt,label=label,**kw)", "def drawCircle(myturtle, x, y, r):\n\n #Getting new turtle object and draws circle\n myturtle.penup()\n myturtle.setposition(x, y)\n myturtle.pendown()\n myturtle.circle(r)", "def draw_circle(centerx, centery, radius):\r\n global _canvas\r\n global _current_color\r\n if _canvas == None:\r\n raise RuntimeError(\"Canvas is not open yet.\")\r\n else:\r\n circle = Circle()\r\n circle.move(centerx, centery)\r\n circle.setRadius(radius)\r\n _set_not_filled(circle)\r\n _canvas.add(circle)", "def _create_circle(self, x, y, r, **kwargs):\r\n return self.create_oval(x-r, y-r, x+r, y+r, **kwargs)", "def create_circle(self, x, y, r, **kwargs):\n return self.create_oval(*self.circ_to_oval(x, y, r), **kwargs)", "def draw_rounded(cr, x,y,width,height, radius):\n degrees = math.pi / 180.0\n cr.arc(x + width - radius, y + radius, radius, -90 * degrees, 0 * degrees)\n cr.arc(x + width - radius, y + height - radius, radius, 0 * degrees, 90 * degrees)\n cr.arc(x + radius, y + height - radius, radius, 90 * degrees, 180 * degrees) # ;o)\n cr.arc(x + radius, y + radius, radius, 180 * degrees, 270 * degrees)\n cr.close_path()", "def DrawSolidCircle(self, center_v, radius, axis, color):\n color = self.convertColor(color)\n radius *= self.camera.scale.x\n if radius < 1: radius = 1\n else: radius = int(radius)\n\n center = self.toScreen(center_v)\n pygame.draw.circle(self.surface, color, center, radius, 1)", "def draw_circle_filled(center_x, center_y, radius, color):\n width = radius\n height = radius\n draw_ellipse_filled(center_x, center_y, width, height, color)", "def draw(self):\n x, y = self.circle.get_center() # Unpack center's coordinates\n radius = self.circle.get_radius()\n self.turtle.penup() # Lift pen\n self.turtle.setposition(x, y) # Move pen to (x,y)\n self.turtle.pendown() # Place pen\n self.turtle.dot() # Draw a dot at the circle's center\n self.turtle.penup() # Lift pen\n self.turtle.setposition(x, y - radius) # Position pen to draw rim of circle\n self.turtle.pendown() # Place pen to draw\n self.turtle.circle(radius) # Draw the circle\n self.turtle.penup() # Lift pen" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Fills in the current drawing, but retains state. Normally, an object is not filled until you set the state to False. Calling this method executes this fill, without setting the state to False. If fill is False, this method does nothing.
def flush(self): if self.fill: self._turtle.fill(False) self._turtle.fill(True)
[ "def setFilled(self, fill):\n isFilled = fill\n repaint()", "def update_fill(self, event):\n if event.type == 'FILL':\n self.update_positions_from_fill(event)\n self.update_holdings_from_fill(event)", "def fill():\n # Switch in edit mode\n bpy.ops.object.mode_set(mode = 'EDIT')\n \n # Fill hole\n bpy.ops.mesh.fill()", "def setDefaultFill(self, fill):\n defaultFill = fill\n if targetContainer != None:\n targetContainer.invalidate()", "def _switch(self):\n self.fill= not self.fill", "def fill(self):\n\n self.flush()\n self.target.paste(self.fullFg, (0, 0))", "def update_fill(self, event):\n if event.type == 'FILL':\n self.update_positions_from_fill(event)\n self.update_prices_from_fill(event)\n self.update_holdings_from_fill(event)", "def fill_oval(self, x, y, width, height, color='black'):\n if type(color) == tuple:\n color = DrawCanvas.color_name(color)\n self.canvas.create_oval(x, y, x + width - 1, y + height - 1, outline=color, fill=color)\n if self.auto_update:\n self.canvas.update()", "def setDefaultFill(self, fill):\n (getLayout()).setDefaultFill(fill)", "def solid_fill(self, solid_fill):\n\n self.container['solid_fill'] = solid_fill", "def 結束填(我):\n if 我.是否正在填色():\n if len(我._fillpath) > 2:\n 我.幕._drawpoly(我._fillitem, 我._fillpath,\n fill=我._fillcolor)\n if 我.undobuffer:\n 我.undobuffer.push((\"dofill\", 我._fillitem))\n 我._fillitem = 我._fillpath = 無\n 我._update()", "def _draw(self):\r\n if self.changed or self.alwaysDirty:\r\n self.on_draw()\r\n self.changed = False\r\n return", "def redraw(self):\n self.turtle.clear() # Clear the drawing screen\n self.draw() # Redraw the circle object", "def setPointFill(self, fill):\n for point in self.points:\n point.fill = fill", "def fill_region(self, fill_x, fill_y, color=255, show_key=True):\n self.canvas[fill_x, fill_y] = color\n if show_key:\n self.show_layer(show_key)\n return self", "def write_fill(self, fill: FillFormat):\n if self.fill_type is not None:\n self._write_fill_type(fill)", "def redraw(self):\n self.undraw()\n self.draw()", "def redraw(self):\n for i, j in self.rectangles:\n self.canvas.itemconfig(self.rectangles[(i, j)], fill=self.check_colour((i, j)))\n if self.check_visible(self.coord):\n self.canvas.itemconfig(self.rectangles[self.coord], fill=\"red\")", "def draw(self, surface):\n checked_color = (0, 196, 0) if self.checked else pg.Color(\"white\")\n surface.fill(pg.Color(\"black\"), self.rect)\n surface.fill(self.color, self.rect.inflate(-2,-2))\n surface.fill(pg.Color(\"white\"), self.rect.inflate(-6,-6))\n surface.fill((205,205,205), self.rect.inflate(-8,-8))\n surface.fill(checked_color, self.select_rect)" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
hgtStartData is the source data from the NASA JPL topological data
def __init__(self, hgtStartData): self.data = [] for row in hgtStartData: toAdd = [] for height in row: toAdd.append([height, 0]) self.data.append(toAdd) self.maxX = len(hgtStartData[0]) - 1 self.maxY = len(hgtStartData) - 1 self.minFloodHeight = 0
[ "def test_data_source_soaps_id_head(self):\n pass", "def prep_data_JLP(path):\n journeys_all = pd.read_csv(\n path,\n parse_dates=['Start_Date_of_Route', 'Start_Time_of_Route',\n 'End_Time_of_Route'],\n dayfirst=True)\n branches = gv.STORE_SPEC.keys()\n journeys_all['date'] = pd.to_datetime(\n journeys_all['Start_Date_of_Route']).dt.date\n journeys_all['Start_Time_of_Route'] = (\n journeys_all['Start_Time_of_Route'] - dt.datetime(1900, 1, 1))\n journeys_all['Start_Time_of_Route'] = (\n journeys_all['Start_Time_of_Route']\n + journeys_all['Start_Date_of_Route'])\n journeys_all['End_Time_of_Route'] = (\n journeys_all['End_Time_of_Route'] - dt.datetime(1900, 1, 1))\n journeys_all['End_Time_of_Route'] = (\n journeys_all['End_Time_of_Route']\n + journeys_all['Start_Date_of_Route'])\n journeys_all['Route_Time'] = (\n journeys_all['End_Time_of_Route']\n - journeys_all['Start_Time_of_Route']).dt.total_seconds()/3600\n\n jour = {}\n for branch in branches:\n jour[branch] = journeys_all[journeys_all['Branch_ID'] == branch]\n jour[branch] = get_prev_arrival(jour[branch])\n jour[branch].sort_values(by=['date', 'Route_ID'], inplace=True)\n jour[branch].set_index(['date', 'Route_ID'], inplace=True)\n jour[branch]['Energy_Required'] = (\n jour[branch]['Planned_total_Mileage']\n * gv.VSPEC[gv.STORE_SPEC[branch]['V']]['D']\n + jour[branch]['Route_Time']*gv.REF_CONS)\n journeys = pd.concat(jour)\n return journeys", "def extract_head(data):\n tl = data['tls'][data['i']];\n br = data['brs'][data['i']];\n head = extract_area(data,(tl,br));\n return head;", "def compute_head():\n for node, datadict in G.nodes.items():\n if datadict[\"Tank\"] == True:\n start = node\n H = {start: 164}\n visited, queue = set(), [start]\n while queue:\n node = queue.pop(0)\n if node not in visited:\n visited.add(node)\n queue.extend(set(G.neighbors(node)) - visited)\n for neighbour in G.neighbors(node):\n l = G[node][neighbour][\"LENGHT\"]\n K = 10.67 / 120 ** 1.852\n def contain(d, k1, k2):\n if k1 in d:\n if k2 in d[k1]:\n return True\n return False\n if (node, neighbour) in dict(G.nodes):\n q = G[node][neighbour][\"Q\"]\n else:\n q = - G[neighbour][node][\"Q\"]\n d = G[node][neighbour][\"DIAMETER\"]\n H[neighbour] = H[node] - (K * l * math.copysign((abs(q) / 1000) ** 1.852, q) / (d / 1000) ** 4.8704)\n return H", "def getOLAPSource():", "def mdsData(shotno=None,\n\t\t\tdataAddress=['\\HBTEP2::TOP.DEVICES.SOUTH_RACK:CPCI_10:INPUT_94',\n\t\t\t\t\t\t '\\HBTEP2::TOP.DEVICES.SOUTH_RACK:CPCI_10:INPUT_95'],\n\t\t\ttStart=[],tStop=[]):\t\t\t\n\t\t\n\t# convert dataAddress to a list if it not one originally \n\tif type(dataAddress) is not list:\n\t\tdataAddress=[dataAddress];\n\t\t\n#\t# if shotno == -1, use the latest shot number\n#\tif shotno==-1:\n#\t\tshotno=latestShotNumber()\n\t\t\n\t# init arrays\n\ttime = []\n\tdata = []\n\t\t\n\t# check if computer is located locally or remotely. The way it connects to spitzer remotely can only use one method, but locally, either method can be used. \n\tif _ON_HBTEP_SERVER==True: # if operating local to the tree\n\t\t# converted from Ian's code\n\t\t\n\t\ttree = _mds.Tree('hbtep2', shotno) \n\t\tfor i in range(0,len(dataAddress)):\n\n\t\t\tnode = tree.getNode(dataAddress[i])\t\t\t#Get the proper node\t\n\t\t\tdata.append(node.data())\t\t\t \t \t#Get the data from this node \n\t\tif type(data[0]) is _np.ndarray: # if node is an array, return data and time\n\t\t\ttime = node.dim_of().data()\t\t\n\n\t\n\telse: # operaeting remotely\n\t\n\t\t# if shotno is specified, this function gets its own mdsConn\n\t\tif type(shotno) is float or type(shotno) is int or type(shotno) is _np.int64:\n\t\t\tmdsConn=_initRemoteMDSConnection(shotno);\n\n\t\tfor i in range(0,len(dataAddress)):\n\t\t\tdata.append(mdsConn.get(dataAddress[i]).data())\n\t\t\n\t\t# if data is an array, also get time\n\t\tif type(data[0]) is _np.ndarray:\n\t\n\t\t\ttime = mdsConn.get('dim_of('+dataAddress[0]+')').data(); # time assocated with data\n \n\tif time != [] and type(tStop)!=list:\n\t\t# trim time and data\n\t\ttime,data= _trimTime(time,data,tStart,tStop)\n\t\t\n\tif time != []:\n\t\treturn data, time\n\telse: \n\t\treturn data", "def get_start_state_data(start_state: int, states: [State]) -> tuple:\n first_node = 0\n for state in states:\n if state.trigs:\n for trig in state.trigs:\n if trig.source == start_state:\n first_node = trig.target\n return (get_state_by_id(states, first_node, \"new\").new_id, get_state_by_id(states, first_node, \"old\").y,\n (get_state_by_id(states, first_node, \"new\").x - 2))", "def starting_nodes(self):\r\n return self.start_node", "def create_hotstart(self,ntracers=2,filename='hotstart.nc',tr_nd=0.0,time=0.0,iths=0,ifile=0,elev=0.0,u=0.0,v=0.0,use_ice=False,ice_ntr=3,ice_free_flag=1):\n\n import netCDF4\n nc = netCDF4.Dataset(filename,'w',format='NETCDF4_CLASSIC')\n nc.createDimension('node',self.nnodes)\n nc.createDimension('elem',self.nelements)\n nc.createDimension('side',self.nsides)\n nc.createDimension('nVert',self.znum)\n nc.createDimension('ntracers',ntracers)\n nc.createDimension('one',1)\n nc.createDimension('three',3)\n\n vv = nc.createVariable('time','f8',('one',))\n vv[:] = time\n vv = nc.createVariable('iths','i',('one',))\n vv[:] = iths \n vv = nc.createVariable('ifile','i',('one',))\n vv[:] = ifile\n vv = nc.createVariable('idry_e','i',('elem',))\n vv[:] = 0\n vv = nc.createVariable('idry_s','i',('side',))\n vv[:] = 0\n vv = nc.createVariable('idry','i',('node',))\n vv[:] = 0\n vv = nc.createVariable('eta2','f8',('node',))\n vv[:] = elev\n vv = nc.createVariable('we','f8',('elem','nVert'))\n vv[:] = 0.0\n vv = nc.createVariable('su2','f8',('side','nVert'))\n vv[:] = u\n vv = nc.createVariable('sv2','f8',('side','nVert'))\n vv[:] = v\n vv = nc.createVariable('q2','f8',('node','nVert'))\n vv[:] = 0.0\n vv = nc.createVariable('xl','f8',('node','nVert'))\n vv[:] = 0.0\n vv = nc.createVariable('dfv','f8',('node','nVert'))\n vv[:] = 0.0\n vv = nc.createVariable('dfh','f8',('node','nVert'))\n vv[:] = 0.0\n vv = nc.createVariable('dfq1','f8',('node','nVert'))\n vv[:] = 0.0\n vv = nc.createVariable('dfq2','f8',('node','nVert'))\n vv[:] = 0.0\n\n if use_ice:\n nc.createDimension('ice_ntr',ice_ntr)\n nc.createDimension('two',2)\n\n vv = nc.createVariable('ice_free_flag','i',('one',))\n vv[:] = ice_free_flag\n vv = nc.createVariable('ice_velocity_x','f8',('node',))\n vv[:] = 0.0\n vv = nc.createVariable('ice_velocity_y','f8',('node',))\n vv[:] = 0.0\n vv = nc.createVariable('ice_water_flux','f8',('node',))\n vv[:] = 0.0\n vv = nc.createVariable('ice_heat_flux','f8',('node',))\n vv[:] = 0.0\n vv = nc.createVariable('ice_surface_T','f8',('node',))\n vv[:] = 0.0\n vv = nc.createVariable('ice_ocean_stress','f8',('node','two'))\n vv[:] = 0.0\n vv = nc.createVariable('ice_sigma11','f8',('elem',))\n vv[:] = 0.0\n vv = nc.createVariable('ice_sigma12','f8',('elem',))\n vv[:] = 0.0\n vv = nc.createVariable('ice_sigma22','f8',('elem',))\n vv[:] = 0.0\n vv = nc.createVariable('ice_tracers','f8',('node','ice_ntr'))\n vv[:] = 0.0\n\n nc.sync()\n\n # write tracer concentrations on nodes\n v = nc.createVariable('tr_nd','f8',('node','nVert','ntracers'))\n v[:] = tr_nd\n nc.sync()\n v = nc.createVariable('tr_nd0','f8',('node','nVert','ntracers'))\n v[:] = tr_nd\n nc.sync()\n v = nc.createVariable('tr_el','f8',('elem','nVert','ntracers'))\n nc.sync()\n\n self.hotstart_nc = nc", "def get_fwhm_startend(self) :\n self.fwhm_startobs = self.primary_header['HIERARCH ESO TEL AMBI FWHM START']\n self.fwhm_endobs = self.primary_header['HIERARCH ESO TEL AMBI FWHM END']", "def start_date(dataset):\n epoch = epoch_from_nc(dataset)\n return dt.datetime.fromordinal(epoch.toordinal()+\n int(dataset.variables['time'][0]))", "def get_heads(model_name,workspace):\n headfile = '{}.hds'.format(model_name)\n fname = os.path.join(workspace,headfile) \n hdobj = fp.utils.HeadFile(fname, precision='double') \n head = hdobj.get_data()\n return head", "def get_source_data(\n self, source_key: str, start: datetime, resolution: timedelta, end: Optional[datetime] = None\n ):\n assert start.tzinfo\n assert resolution.total_seconds() == int(resolution.total_seconds())\n params = {\n \"start\": int(start.timestamp()),\n \"end\": int(end.timestamp()) if end else int(datetime.now().timestamp()),\n \"resolution\": f\"P{resolution.days}DT{resolution.seconds}S\",\n }\n return self.get(f\"org/{self.org_id}/sources/{source_key}/data\", params=params)", "def hcgps(data_src, min_supp=MIN_SUPPORT, max_iteration=MAX_ITERATIONS, step_size=STEP_SIZE, return_gps=False):\n # Prepare data set\n d_set = DataGP(data_src, min_supp)\n d_set.init_attributes()\n attr_keys = [GI(x[0], x[1].decode()).as_string() for x in d_set.valid_bins[:, 0]]\n\n if d_set.no_bins:\n return []\n\n # Parameters\n it_count = 0\n var_min = 0\n counter = 0\n var_max = int(''.join(['1'] * len(attr_keys)), 2)\n eval_count = 0\n\n # Empty Individual Template\n best_sol = structure()\n candidate = structure()\n\n # Best Cost of Iteration\n best_costs = np.empty(max_iteration)\n best_patterns = []\n str_best_gps = list()\n str_iter = ''\n str_eval = ''\n repeated = 0\n\n # generate an initial point\n best_sol.position = None\n # candidate.position = None\n if best_sol.position is None:\n best_sol.position = np.random.uniform(var_min, var_max, N_VAR)\n # evaluate the initial point\n apply_bound(best_sol, var_min, var_max)\n best_sol.cost = costfxn(best_sol.position, attr_keys, d_set)\n\n # run the hill climb\n while counter < max_iteration:\n # while eval_count < max_evaluations:\n # take a step\n candidate.position = None\n if candidate.position is None:\n candidate.position = best_sol.position + (random.randrange(var_min, var_max) * step_size)\n apply_bound(candidate, var_min, var_max)\n candidate.cost = costfxn(candidate.position, attr_keys, d_set)\n\n if candidate.cost < best_sol.cost:\n best_sol = candidate.deepcopy()\n eval_count += 1\n str_eval += \"{}: {} \\n\".format(eval_count, best_sol.cost)\n\n best_gp = validategp(d_set, decodegp(attr_keys, best_sol.position))\n \"\"\":type best_gp: GP\"\"\"\n is_present = isduplicate(best_gp, best_patterns)\n is_sub = amcheck(best_patterns, best_gp, subset=True)\n if is_present or is_sub:\n repeated += 1\n else:\n if best_gp.support >= min_supp:\n best_patterns.append(best_gp)\n str_best_gps.append(best_gp.print(d_set.titles))\n\n try:\n # Show Iteration Information\n # Store Best Cost\n best_costs[it_count] = best_sol.cost\n str_iter += \"{}: {} \\n\".format(it_count, best_sol.cost)\n except IndexError:\n pass\n it_count += 1\n\n if max_iteration == 1:\n counter = repeated\n else:\n counter = it_count\n # Output\n out = json.dumps({\"Algorithm\": \"LS-GRAD\", \"Best Patterns\": str_best_gps, \"Iterations\": it_count})\n \"\"\":type out: object\"\"\"\n if return_gps:\n return out, best_patterns\n else:\n return out", "def start_gap( veh_data, tp):\n veh_data['PSN'] = -1\n veh_data['PSN_time_to_end_gap'] = tp + self.random_generator['GapTimeout']\n veh_data['PSN_distance_to_end_of_gap'] = veh_data['total_dist_traveled'] + self.random_generator['GapDistance']\n if self.CF.Control[\"OutputLevel\"] >= 2:\n logger.debug('Vehicle ID %s gap started - time to until gap ends: %s, distance until gap ends: %s' % (veh_data['vehicle_ID'],\\\n veh_data['PSN_time_to_end_gap'],veh_data['PSN_distance_to_end_of_gap']))\n veh_data['privacy_gap_start'] = tp\n veh_data['in_privacy_gap'] = True", "def get_tmin(self):\n tmin = min(sorted(self.srcData.keys()))\n return tmin", "def initialize_trajectory_data(self):\n\n # Variables for one trajectory.\n # Each trajectory may consist of multiple episodes.\n self.trajectory_states = []\n self.trajectory_actions = []\n self.trajectory_values = []\n self.trajectory_returns = []\n self.trajectory_means = []\n self.trajectory_logstds = []\n self.trajectory_neg_logprobs = []\n self.trajectory_per_episode_rewards = []\n self.trajectory_per_episode_lengths = []\n self.trajectory_per_step_rewards = []\n self.trajectory_dones = []\n self.env_states = []", "def getDetectedSource(self):\n return self.ioc.getCollection('SourceData').find_one(\n {\n 'grease_data.scheduling.server': ObjectId(self.ioc.getConfig().NodeIdentity),\n 'grease_data.scheduling.start': None,\n 'grease_data.scheduling.end': None\n },\n sort=[('grease_data.createTime', pymongo.DESCENDING)]\n )", "def test_readHeadData(self):\n # create a unloaded river unit to just check the readHeadData() method.\n s = spillunit.SpillUnit()\n # Put the test data into the method\n s._readHeadData(self.spill_unitdata, 0) \n \n self.assertEqual(s.unit_type, 'spill')\n self.assertEqual(s.unit_category, 'spill')\n \n self.assertEqual(s._name, '1.056_SU')\n self.assertEqual(s._name_ds, '1.056_SD')\n self.assertEqual(s.head_data['comment'].value, 'A spill comment')\n self.assertEqual(s.head_data['weir_coef'].value, 1.200)\n self.assertEqual(s.head_data['modular_limit'].value, 0.900)" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
returns the topological height at (x, y)
def getHeight(self, x, y): if x > self.maxX or y > self.maxY or x < 0 or y < 0: return 10000000 # effectively infinity return self.data[y][x][0]
[ "def find_height(self, x, z):\n x_index = bisect_left(self.x_terrain, x)\n z_index = bisect_left(self.z_terrain, z)\n if x_index < len(self.x_terrain)-2:\n x_finded = self.x_terrain[x_index+1]\n else :\n x_finded = self.x_terrain[-1]\n if z_index < len(self.z_terrain)-2:\n z_finded = self.z_terrain[z_index+1]\n else :\n z_finded = self.z_terrain[-1]\n return self.height_for_2d_position.get((x_finded, z_finded))", "def height(self) -> float:\n return self.y2 - self.y1", "def get_height(self):\n def _get_height(node, height=None):\n if not height:\n height = self._get_level(node) + 1\n if node.left:\n height = _get_height(node.left, height+1)\n if node.right:\n height = max(height, _get_height(node.right, height+1))\n if not node.left and not node.right:\n height = self._get_level(node)\n return height\n return _get_height(self.root)", "def height(self, p=None):\r\n if p is None:\r\n \tp = self.root()\r\n return self._height2(p) # start height2 recursion\r", "def height(t):\n if t.is_empty:\n return 0\n else:\n left = height(t.left)\n right = height(t.right)\n \n return 1 + max([left, right])", "def node_y_dimensionality(self) -> int:\n return int(self.graph_tuple_stats.node_y_dimensionality or 0)", "def get_height(self, obj):\n if len(self.coords(obj)) == 2: # two-dimensional coords\n return self.bbox(obj)[3] - self.bbox(obj)[1]\n return self.coords(obj)[3] - self.coords(obj)[1]", "def getHeight(self, x, z):\n return self.get_ground_level(x, z)", "def height(self):\n\n # if tree is empty\n if self.node is None:\n\n return -1\n\n # else\n return max(self.node.node_height(), self.node.node_height())", "def height(self, p=None):\n if p is None:\n p = self.root()\n return self._height2(p)", "def height(poly):\n num = len(poly) - 1\n if abs(poly[num][2] - poly[0][2]) > abs(poly[1][2] - poly[0][2]):\n return dist(poly[num], poly[0])\n elif abs(poly[num][2] - poly[0][2]) < abs(poly[1][2] - poly[0][2]):\n return dist(poly[1], poly[0])\n else:\n return min(dist(poly[num], poly[0]), dist(poly[1], poly[0]))", "def _height(node):\n\n if not node:\n return 0\n\n return 1 + max(_height(node.left), _height(node.right))", "def height(self):\n return self.z", "def getMaxY(self, n: LNode) -> float:\n # node size + margins + inside shift etc\n rootNode = self.root[n]\n return (self.y[rootNode]\n + self.innerShift[n]\n + n.getSize().y\n + n.getMargin().bottom)", "def get_height(self, treenode=self):\n\t\treturn self.__get_height(treenode)", "def getHeightMap(self, x: int, z: int) -> int:\n\t\treturn self.heightMap[(z << 4) | x]", "def _node_height(self, node):\n if node is None:\n height = 0\n else:\n height = node._height\n return height", "def height(self) -> int:\n return self._obj[self.y_dim].size", "def height(self):\n if self.is_leaf():\n return 0\n else:\n return max(c.height() + c.length for c in self.children)" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
returns the level of water at the point (x, y)
def getWater(self, x, y): if x > self.maxX or y > self.maxY or x < 0 or y < 0: raise Exception("accessed an invalid position in method getWater") return self.data[y][x][1]
[ "def water_depth(self,x,y):\n bed=self.bed_topo(x,y)\n D = -bed*(bed<0.0)\n \n return D", "def get_water_level(self):\n return self.water_level", "def water_depth(self,x):\n bed=self.bed_topo(x)\n D = -bed*(bed<0.0)\n return D", "def get_water_depth_at(x: float, z: float, surface_level: int=0) -> float:\n from terrain import get_water_depth\n return get_water_depth(x, z, level=surface_level)", "def get_current_water_level(self):\n \n url = f'http://waterservices.usgs.gov/nwis/iv/?format=json&sites={self.site_number}&parameterCd=00060,00065&siteStatus=all'\n\n response = requests.request(\"GET\", url)\n data = json.loads(response.text)\n \n #parses json response to get only value of current water level for given river\n current_water_level = data['value']['timeSeries'][0]['values'][0]['value'][0]['value']\n \n return current_water_level", "def get_water_level(df):\n\n water,lat = [],[]\n #gets just ocean photons\n df = df.loc[df.Conf_ocean == 4]\n if len(df) == 0:\n return None\n #getting photons +- 2 of the median height of photons\n df = df.loc[(df.Height > df.Height.median() - 2) & (df.Height < df.Height.median() + 2)]\n\n #creating a df with just the latitude and height\n sea_level = pd.DataFrame([df.Height,df.Latitude]).T.dropna()\n sea_level.columns = ['water','latitude']\n\n #getting photons +- 1.25 of the median height of photons\n sea_level = sea_level.loc[(sea_level.water > sea_level.water.median() -1.25) & (sea_level.water < sea_level.water.median() +1.25)]\n\n #fitting linear line to remaining points\n z = np.polyfit(sea_level.latitude, sea_level.water,1)\n f = np.poly1d(z)\n\n #getting points with <2m abs error\n sea_level['abs_diff'] = np.abs(sea_level.water - f(sea_level.latitude))\n sea_level = sea_level.loc[sea_level.abs_diff < 2]\n #fitting a parabolic function to the remaining points\n z2 = np.polyfit(sea_level.latitude, sea_level.water,2)\n f2 = np.poly1d(z2)\n\n return f2", "def get_geologic_level(self, point: Point) -> int:\n if point == self.target:\n return 0\n if point.y == 0:\n return point.x * 16807\n if point.x == 0:\n return point.y * 48271\n return self.get_erosion_level(to_above(point)) * self.get_erosion_level(to_left(point))", "def power_level(x, y, serial):\n rack_id = x + 10\n return (rack_id * y + serial) * rack_id // 100 % 10 - 5", "def relative_water_level(self):\n\n if not self.typical_range_consistent() or self.latest_level is None or isinstance(self.latest_level,list):\n return None\n else:\n return (self.latest_level - self.typical_range[0])/(self.typical_range[1]-self.typical_range[0])", "def getHeight(self, x, z):\n return self.get_ground_level(x, z)", "def compute_level(self):\n if self.y > 0.5: # use properties to guarantee first values populated\n self.levels.append(1)\n self._x *= 2\n self._y = (self._y - 0.5) * 2\n elif self.y < 0.5 - self.x:\n self.levels.append(2)\n self._x *= 2\n self._y *= 2\n elif self.x >= 0.5:\n self.levels.append(3)\n self._x = (self._x - 0.5) * 2\n self._y *= 2\n else:\n # And this is an inverse triangle\n self.levels.append(0)\n # self._x = 2*x + 2*y - 1\n self._x = 1 - self._x * 2\n self._y = 1 - self._y * 2", "def level(hexagon): \r\n lev = 0\r\n for coord in hexagon:\r\n if abs(coord) > lev:\r\n lev = abs(coord)\r\n return lev", "def supply_region(self, x, y):\n if x>350 and x<450 and y>0 and y<100:\n return 1\n elif x>350 and x<450 and y>400 and y<500:\n return 2\n return 0", "def getBlockSkyLight(self, x: int, y: int, z: int) -> int:\n\t\treturn self.getSubChunk(y >> 4).getBlockSkyLight(x, y & 0x0f, z)", "def isWater(x, y):\n if interface.pix[x, y][:3] == (0,0,255):\n return True\n return False", "def calc_level(self):\n if self.xp < 3:\n xp_potential = 1\n if self.xp >= 3 and self.xp < 6:\n xp_potential = 2\n if self.xp >= 6 and self.xp < 12:\n xp_potential = 3\n if self.xp >= 12 and self.xp < 24:\n xp_potential = 4\n if self.xp >= 24 and self.xp < 48:\n xp_potential = 5\n if self.xp >= 48 and self.xp < 72:\n xp_potential = 6\n if self.xp >= 72 and self.xp < 96:\n xp_potential = 7\n if self.xp >= 96 and self.xp < 130:\n xp_potential = 8\n if self.xp >= 130 and self.xp < 170:\n xp_potential = 9\n if self.xp >= 170:\n xp_potential = 10\n if self.dominion < 2:\n dom_potential = 1\n if self.dominion >= 2 and self.dominion < 4:\n dom_potential = 2\n if self.dominion >= 4 and self.dominion < 10:\n dom_potential = 3\n if self.dominion >= 10 and self.dominion < 22:\n dom_potential = 4\n if self.dominion >= 22 and self.dominion < 38:\n dom_potential = 5\n if self.dominion >= 38 and self.dominion < 57:\n dom_potential = 6\n if self.dominion >= 57 and self.dominion < 76:\n dom_potential = 7\n if self.dominion >= 76 and self.dominion < 95:\n dom_potential = 8\n if self.dominion >= 95 and self.dominion < 124:\n dom_potential = 9\n if self.dominion >= 124:\n dom_potential = 10\n self.level = min(xp_potential, dom_potential)", "def groundwater_level_offset(self) -> float | None:\n return depth_to_offset(\n self.groundwater_level, offset=self.delivered_vertical_position_offset\n )", "def elevation_level():\n return F.udf(lambda x: str(int(x/1000)*1000))", "def elevation(self):\n return self.altitude - self.heightAboveGround" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Function to check if a number is very small.
def isSmall(number): return abs(number) < epsilon
[ "def is_small(a:int, b:int) -> bool:\n return a <= b", "def is_int(num):\n\n return abs(num - round(num)) < 1e-09", "def checkImgSize(img_size, number=4):\n if all([x >= 2**number for x in img_size]):\n return number\n else:\n return int(np.min(np.floor(np.log2(img_size))))", "def has_small_digits(n,maxdigit):\n digits = [int(num) for num in str(n)]\n return all([num <= maxdigit for num in digits])", "def verySmall(a, delta=1e-30):\n # used when we really want the values to be 0, but this\n # can't be guaranteed due to floating errors.\n return np.average(a*a) <= delta", "def check_file_size(num):\n if num <=104857600:\n return False\n else:\n return True", "def largest_scaled_float_not_above(val, scale):\n assert val >= 0\n assert scale >= 0\n float_val = float(val) / 10**scale\n if float_val * 10**scale > val:\n # Take the float just below... it *should* satisfy\n float_val = np.nextafter(float_val, 0.0)\n if float_val * 10**scale > val:\n float_val = np.nextafter(float_val, 0.0)\n assert float_val * 10**scale <= val\n return float_val", "def isSmallInt(op):\n return op == opcode.OP_0 or (op >= opcode.OP_1 and op <= opcode.OP_16)", "def validate_low_integer(number):\n if number < 2:\n raise MaxLimitTooLowError()", "def is_scientific(number):\n if convert_to_scientific_notation(float(number)) == number:\n return True\n return False", "def is_deficient_number(x):\n return sum(proper_divisors(x)) < x", "def largest_square_under(number):\n return floor(sqrt(number))", "def should_be_small(pos_tag):\n\n return is_noun(pos_tag)", "def check_is_less_than(number1, number2):\n if number1<number2:\n return True\n else:\n return False\n pass", "def isLowPrime(num):\n lowPrimes = [2, 3, 5, 7, 11, 13, 17, 19, 23, 29, 31, 37, 41, 43, 47, 53, 59, 61, \n 67, 71, 73, 79, 83, 89, 97, 101, 103, 107, 109, 113, 127, 131, 137, 139, 149, 151, \n 157, 163, 167, 173, 179, 181, 191, 193, 197, 199, 211, 223, 227, 229, 233, 239, 241, \n 251, 257, 263, 269, 271, 277, 281, 283, 293, 307, 311, 313,317, 331, 337, 347, 349, \n 353, 359, 367, 373, 379, 383, 389, 397, 401, 409, 419, 421, 431, 433, 439, 443, 449, \n 457, 461, 463, 467, 479, 487, 491, 499, 503, 509, 521, 523, 541, 547, 557, 563, 569, \n 571, 577, 587, 593, 599, 601, 607, 613, 617, 619, 631, 641, 643, 647, 653, 659, 661, \n 673, 677, 683, 691, 701, 709, 719, 727, 733, 739, 743, 751, 757, 761, 769, 773, 787, \n 797, 809, 811, 821, 823, 827, 829, 839, 853, 857, 859, 863, 877, 881, 883, 887, 907, \n 911, 919, 929, 937, 941, 947, 953, 967, 971, 977, 983, 991, 997]\n for divisor in lowPrimes:\n if num % divisor == 0 and divisor**2 <= num:\n return False\n else: return True", "def is_valid_significant_digits(\n value: Decimal,\n max_significant_digits: int\n) -> bool:\n return round(value, max_significant_digits) == value", "def is_number(n):\n try:\n float(n)\n return True\n except ValueError:\n return False", "def _is_pos_int(number: int) -> bool:\n return type(number) == int and number >= 0", "def isPerfectSqare(number: int):\n\n return intSqrt(number) ** 2 == number" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Compute the crossProduct of the vectors p2 p1 and p3 p1.
def crossProduct(p1, p2, p3): return ( -(p1[1]*p2[0]) + p1[0]*p2[1] + p1[1]*p3[0] - p2[1]*p3[0] - p1[0]*p3[1] + p2[0]*p3[1] )
[ "def cross_product(p0,p1,p2):\n\treturn (((p1[0]-p0[0])*(p2[1]-p0[1]))-((p2[0]-p0[0])*(p1[1]-p0[1])))", "def crossProduct(self, *args):\n return _almathswig.Position3D_crossProduct(self, *args)", "def cross(p1, p2):\n return p1[0]*p2[1] - p1[1]*p2[0]", "def cross_product(v1, v2):\r\n x3 = v1[1] * v2[2] - v2[1] * v1[2]\r\n y3 = -(v1[0] * v2[2] - v2[0] * v1[2])\r\n z3 = v1[0] * v2[1] - v2[0] * v1[1]\r\n return [x3, y3, z3]", "def crossProduct(self, p1, p2):\n return (p1.x * p2.y - p1.y * p2.x)", "def cross_pts_triangle(p1, p2, p3):\n return (p1[:, 0] - p3[0]) * (p2[1] - p3[1]) - (p2[0] - p3[0]) * (p1[:, 1] - p3[1])", "def cross_product(v1, v2):\r\n\r\n return v1.x * v2.y - v2.x * v1.y", "def plane_3p(p1, p2, p3):\n v1 = p3 - p1\n v2 = p2 - p1\n cp = np.cross(v1, v2)\n a, b, c = cp\n d = - np.dot(cp, p3)\n return np.array([a, b, c, d])", "def tripleProd (self,vec0,vec1):\n return self.dotProd ( vec0.crossProd ( vec1 ) )", "def crossProduct( set1, set2):\n set1 = asarray( set1, _aformat(set1))\n set1 = reshape( set1, (-1, 3))\n set2 = asarray( set2, _aformat(set2))\n set2 = reshape( set2, (-1, 3))\n return cross( set1, set2 )", "def cross(self, v):\n\n if len(self) > 3 | len(v) > 3:\n raise Exception(\"only 3-vectors have defined cross product\")\n\n a = self\n b = v\n\n cz = a[0]*b[1] - a[1]*b[0]\n cy = a[2]*b[0] - a[0]*b[2]\n cx = a[1]*b[2] - a[2]*b[1]\n\n return Vector([cx, cy, cz])", "def cross(vec1, vec2):\n a1, a2, a3 = vec1[0], vec1[1], vec1[2]\n b1, b2, b3 = vec2[0], vec2[1], vec2[2]\n return np.array([a2 * b3 - a3 * b2, a3 * b1 - a1 * b3, a1 * b2 - a2 * b1], dtype=nb.float32)", "def crossProduct(self, *args):\n return _almathswig.Position2D_crossProduct(self, *args)", "def cross_product(vec_a: Vector, vec_b: Vector) -> Vector:\n a_x, a_y, a_z = vec_a.data\n b_x, b_y, b_z = vec_b.data\n\n return Vector((\n (a_y * b_z) - (a_z * b_y),\n (a_z * b_x) - (a_x * b_z),\n (a_x * b_y) - (a_y * b_x),\n ))", "def _compute_cross_product(self, vector1, vector2):\n # compute cross product: x1y2 - x2y1\n cross_product = vector1[1] * vector2[0] - vector1[0] * vector2[1]\n\n return cross_product", "def cross_product(u, v):\n ux, uy, uz = u\n vx, vy, vz = v\n return (uy * vz - uz * vy, uz * vx - ux * vz, ux * vy - uy * vx)", "def cross(vec1, vec2):\n return ((vec1[1]*vec2[2]-vec1[2]*vec2[1]),(vec1[0]*vec2[2]-vec1[2]*vec2[0]),\n (vec1[0]*vec2[1]-vec1[1]*vec2[0]))", "def cross_multiply(x):\n return (x[0][0] * x[1][1]) - (x[0][1] * x[1][0])", "def cross_product(vector_a, vector_b):\n return vector_a[0] * vector_b[1] - vector_a[1] * vector_b[0]" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Update employment history for given profile id.
def update_work_history(work_history_list, profile_id): saved_work_history_ids = set() for work_history in work_history_list: work_history_id = work_history.get("id") work_history_instance = None if work_history_id: try: work_history_instance = Employment.objects.get( profile_id=profile_id, id=work_history_id ) except Employment.DoesNotExist: raise ValidationError("Work history {} does not exist".format(work_history_id)) work_history_serializer = EmploymentSerializer(instance=work_history_instance, data=work_history) work_history_serializer.is_valid(raise_exception=True) work_history_serializer.save(profile_id=profile_id) saved_work_history_ids.add(work_history_serializer.instance.id) Employment.objects.filter(profile_id=profile_id).exclude(id__in=saved_work_history_ids).delete()
[ "def update(self,employment=None,access=None):\n\n if employment != None:\n self.employment.value = employment\n if access != None:\n self.access.value = access\n self.save.click()", "def update_profits(self, next_profit):\n self.profit = next_profit\n self.profit_history.append(next_profit)", "def test_employment_update(self):\n program_enrollment = ProgramEnrollmentFactory.create()\n assert es.search()['total'] == 1\n employment = EmploymentFactory.create(profile=program_enrollment.user.profile)\n employment.city = 'city'\n employment.save()\n assert_search(es.search(), [program_enrollment])", "def update(self, profile: Dict[datetime.time, float]) -> None:\n\n if self._profile is None:\n self._profile = profile\n else:\n self._profile.update(profile)", "def api_profiles_update(profile_id):\n if request.method == 'PUT':\n existing_profile = profiles.get_profile(profile_id)\n # Update existing profile with new profile object.\n if not existing_profile:\n # Profile does not exist\n return abort(404)\n\n profile = request.get_json()\n name = profile.get('name', existing_profile['name'])\n ami = profile.get('ami', existing_profile['ami'])\n userdata = profile.get('userdata', existing_profile['userdata'])\n profiles.update_profile(profile_id, name, ami, userdata)\n return jsonify({'status': 'ok', 'id': profile_id})", "def profile_update():\n db = flask.current_app.container.get('db')\n gh_api = flask.current_app.container.get(\n 'gh_api', token=flask.session['github_token']\n )\n\n user_data = gh_api.get('/user').data\n gh_user = flask_login.current_user.github_user\n gh_user.update_from_dict(user_data)\n db.session.commit()\n return flask.redirect(flask.url_for('manage.dashboard', tab='profile'))", "def UpdateProfile(self, request, context):\n context.code(beta_interfaces.StatusCode.UNIMPLEMENTED)", "def updateFilmPrizeHistory(self, filmID, prizeHistory=None):\n if self.role != 'admin':\n raise RuntimeError('only administrators can modify film data')\n\n try:\n self._update(FILM_TABLE.table, (self.__columns[-2], ),\n (prizeHistory, ), self.__idCondition, (filmID, ))\n except Exception:\n print('failed to update film prize history')\n raise", "def profile_id(self, profile_id):\n\n self._profile_id = profile_id", "def update_followers(request, profile_id):\n user = request.user\n profile = User.objects.get(id=profile_id)\n\n if profile in user.following.all():\n user.following.remove(profile.id)\n user.save()\n else:\n user.following.add(profile.id)\n user.save()\n\n return HttpResponseRedirect(reverse(\"profile\", kwargs={\n \"username\": profile.username\n }))", "def update_experience(uid, rid, increment):\n errmsg = []\n\n experience = Experience.query.filter(Experience.uid == uid).filter(Experience.rid == rid).first()\n if not experience:\n errmsg.append(\"Experience entry does not exist for the given user ID and restaurant ID.\")\n elif increment < 0:\n errmsg.append(\"Experience cannot be incremented by a negative number.\")\n\n if not errmsg:\n old_level = convert_experience_to_level(experience.experience)\n milestone = get_milestone(uid, rid)\n Experience.query.filter(Experience.uid == uid).filter(Experience.rid == rid).update(dict(experience=experience.experience + increment))\n db.session.commit()\n if milestone:\n new_level = convert_experience_to_level(experience.experience)\n if old_level < new_level and new_level == int(milestone[\"level\"]):\n update_points(uid, rid, milestone[\"reward\"])\n return None\n\n return errmsg", "def profileid(self, profileid):\n self._profileid = profileid", "def test_profile_update(self):\n program_enrollment = ProgramEnrollmentFactory.create()\n assert es.search()['total'] == 1\n profile = program_enrollment.user.profile\n profile.first_name = 'updated'\n profile.save()\n assert_search(es.search(), [program_enrollment])", "def update_employee(self, obj):\n cursor = self.dbconnect.get_cursor()\n try:\n cursor.execute('UPDATE employee '\n 'SET name = %s, email = %s, office = %s, extra_info = %s, picture_location = %s, '\n 'research_group = %s, title = %s, is_external = %s, is_admin = %s, is_active = %s '\n 'WHERE id = %s;',\n (obj.name, obj.email, obj.office, obj.extra_info, obj.picture_location, obj.research_group,\n obj.title, obj.is_external, obj.is_admin, obj.is_active, obj.e_id))\n self.dbconnect.commit()\n except:\n self.dbconnect.rollback()\n raise", "def update_air_profile(id):\n air = Air.query.get_or_404(id)\n if request.method == 'POST':\n air.name = request.form['name']\n air.min_temp = request.form['min_temp']\n air.max_temp = request.form['max_temp']\n air.min_humid = request.form['min_humid']\n air.max_humid = request.form['max_humid']\n\n try:\n db.session.commit()\n return redirect('/air')\n except:\n return 'There was an issue updating the air profile'\n else:\n return render_template('air/update_air.html', air=air)", "def update(self,\n ipfix_l2_profile_id,\n i_pfix_l2_profile,\n ):\n return self._invoke('update',\n {\n 'ipfix_l2_profile_id': ipfix_l2_profile_id,\n 'i_pfix_l2_profile': i_pfix_l2_profile,\n })", "def put(self, id):\n\n adm = Administration()\n learningprofilegroup = LearningProfileGroup.from_dict(api.payload)\n print('main aufruf')\n\n if learningprofilegroup is not None:\n \"\"\"Hierdurch wird die id des zu überschreibenden (vgl. Update) Person-Objekts gesetzt.\"\"\"\n\n learningprofilegroup.set_id(id)\n adm.save_learningprofile_group(learningprofilegroup)\n return '', 200\n\n else:\n return '', 500", "def _update_state(self, job_id):\n self.logger.info(\"updating 'timestamp' in profile state\")\n # get current state ...\n with open(self.state_file, \"r\") as json_current:\n state = json.load(json_current)\n json_current.close()\n # ... and write new timestamp\n with open(self.state_file, \"w\") as json_new:\n state[\"timestamp\"] = job_id\n json.dump(state, json_new, indent=4)\n json_new.close()", "def do_user_baseline_update():\n targetUsers = User.query.filter_by(id=request.form['id']).all()\n if not any(targetUsers):\n return user_list(\"Unknown user.\")\n\n targetUser = targetUsers[0]\n\n targetUser.baseline = request.form['baseline']\n\n db.session.commit()\n return Response(render_template('employee/user/list.html',\n users=targetUsers,\n message=f\"Updated baseline for {targetUser.name}\"),\n mimetype='text/html')" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Iterate through fields in serializer and set all to required except ignore_fields
def set_fields_to_required(serializer, ignore_fields=None): if ignore_fields is None: ignore_fields = [] for field in serializer.fields.values(): if field.field_name not in ignore_fields: field.required = True field.allow_null = False field.allow_blank = False
[ "def filter_required_fields(self, document):\n document.json = {\n field: value for field, value in document.json.items() if field in required_fields\n }\n return document", "def _required_fields(self, *fields):\n missing_fields = []\n for field in fields:\n if field not in self.raw_element:\n missing_fields.append(field)\n\n if missing_fields:\n raise SpecError(\n \"Missing required fields: {}\".format(\", \".join(missing_fields)), path=self.path, element=self\n )", "def get_required_fields(self) -> Iterable[fields.Field]:\n for model_field in self.get_fields():\n if model_field.required:\n yield model_field", "def test_create_risk_type_fields_is_required(self):\n copied_data = copy.deepcopy(data)\n copied_data.pop('risk_fields')\n serializer = RiskTypeSerializer(data=copied_data)\n self.assertEqual(serializer.is_valid(), False)", "def null_required_fields(self):\n self._null_required_fields = {key for key in self.required_keys if self.get(key) is None}\n return self._null_required_fields", "def test_extra_field_kwargs_required(self):\n class TestSerializer(serializers.ModelSerializer):\n class Meta:\n model = RegularFieldsModel\n fields = ('auto_field', 'char_field')\n extra_kwargs = {'auto_field': {'required': False, 'read_only': False}}\n\n expected = dedent(\"\"\"\n TestSerializer():\n auto_field = IntegerField(read_only=False, required=False)\n char_field = CharField(max_length=100)\n \"\"\")\n self.assertEqual(repr(TestSerializer()), expected)", "def required_fields(*fields):\n\n def wrapper(fn):\n @wraps(fn)\n def decorated_view(*args, **kwargs):\n errors = {}\n for field in fields:\n if field not in request.json:\n if field not in errors:\n errors[field] = []\n errors[field].append(field + ' is required')\n\n if len(errors) > 0:\n raise FieldError(error=errors, description=\"Missing or invalid information\")\n\n return fn(*args, **kwargs)\n return decorated_view\n return wrapper", "def enforce_required_fields(self, attrs):\n if self.instance is not None:\n return\n # missing_items = {\n # field_name: self.missing_message\n # for field_name in self.fields\n # if field_name not in attrs\n # }\n # if missing_items:\n # raise ValidationError(missing_items, code='required')", "def forbid_properties(schema: Dict[str, Any], forbidden: List[str]) -> None:\n not_schema = schema.setdefault(\"not\", {})\n already_forbidden = not_schema.setdefault(\"required\", [])\n already_forbidden.extend(forbidden)\n not_schema[\"required\"] = list(set(chain(already_forbidden, forbidden)))", "def test_required_fields(self):\n collection = Collection.objects.create(\n name=\"Test collection\", contributor=self.contributor\n )\n self.assertCountEqual(collection.annotation_fields.all(), [])\n self.annotation_field1.required = True\n self.annotation_field1.save()\n collection = Collection.objects.create(\n name=\"Test collection\", contributor=self.contributor\n )\n self.assertCountEqual(\n collection.annotation_fields.all(), [self.annotation_field1]\n )", "def test_schema_field_not_required(self):\n fields = self.backend.get_schema_fields(self.view)\n fields = [f.required for f in fields]\n for field in fields:\n self.assertFalse(field)", "def test_create_risk_type_fields_field_name_is_required(self):\n copied_data = copy.deepcopy(data)\n copied_data['risk_fields'][0].pop('field_name')\n serializer = RiskTypeSerializer(data=copied_data)\n self.assertEqual(serializer.is_valid(), False)", "def check_required_fields(self, ignore_fields: List[str] = list()) -> None:\n self.check_url_filetoupload()\n self._check_required_fields(\"resource\", ignore_fields)", "def setRequiredValues(self, instance):\n for key in instance.__slots__:\n if key in instance.requiredFields:\n value = self.getTypicalValue(type(instance), key)\n setattr(instance, key, value)", "def required_fields(model, values):\n if values:\n for k in list(values):\n if k not in model.__table__.columns.keys():\n values.pop(k)\n return values", "def filter_required_fields(self, data):\r\n filtered_data = {name: value for req in self.required\r\n for name, value in data._asdict().items()\r\n if req == name}\r\n return filtered_data", "def validate_fields(class_name: str, required_fields: Iterable[str], dic: Mapping[str, Any]) -> None:\n missing_fields = []\n for f in required_fields:\n if f not in dic:\n missing_fields.append(f)\n\n if len(missing_fields) > 0:\n raise DeserializationError(class_name, \"missing values for fields \" + \", \".join(missing_fields))", "def test_init_sets_required_all_fields_to_false(self, super_init_mock):\n for kwargs in ({}, {'require_all_fields': True}):\n with self.subTest(kwargs=kwargs):\n PartialDateFormField(**kwargs)\n super_init_mock.assert_called()\n _args, kwargs = super_init_mock.call_args\n self.assertFalse(kwargs['require_all_fields'])\n super_init_mock.reset_mock()", "def get_empty_required_fields(self):\n empty_fields = self.get_empty_fields()\n return [f for f in empty_fields if f in self.REQUIRED_FIELDS]" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Returns an instance for a given Jenkins URL. The returned instance is usually a instance of a PlatformJenkins subclass (this allows to switch to a different Jenkins API.
def get_jenkins(cls, url, template_dir=None): return PlatformJenkinsJavaCLI(template_dir, url)
[ "def get_object(session, path_or_url):\n parsed = urlparse.urlparse(path_or_url)\n url = urlparse.urljoin(session.jenkins_url, '%s/api/json' % parsed.path)\n resp = session.get(url)\n resp.raise_for_status()\n return resp.json()", "def get_from_url(url, base_path, api_version, credentials):\n # pylint: disable=unused-variable\n (scheme, netloc, path, params, query, fragment) = urlparse(url)\n\n return APIManager(\n scheme=scheme,\n region=None,\n netlocation=netloc,\n base_path=base_path,\n api_version=api_version,\n credentials=credentials)", "def find_jenkins(self):\n\n ec2 = boto3.resource(\n \"ec2\", region_name=\"eu-west-1\", endpoint_url=\"http://localhost:5000\"\n )\n\n return ec2.instances.filter(\n Filters=[{\"Name\": \"tag:Project\", \"Values\": [\"Jenkins\"]}]\n )", "def connect_to_jenkins(self):\n\n url = self.cli.jenkins_host\n remote = self.cli.run_remote\n pysid = self.cli.pysid\n self.cli.LOG.debug('Connecting to jenkins @ {0} remote={1}'.format(url,\n remote))\n\n if pysid:\n self.cli.LOG.info(\"Using pysid for jenkins cookie: %s\" % pysid)\n self.cookie = {'pysid': pysid}\n elif remote:\n self.cli.LOG.info(\"Fetching cookies for %s\" % url)\n self.cookie = pycookiecheat.chrome_cookies(url)\n try:\n try:\n requester = Requester(baseurl=url, cookies=self.cookie,\n ssl_verify=self.cli.verify,\n netloc=self.netloc)\n except TypeError:\n requester = Requester(baseurl=url, ssl_verify=self.cli.verify)\n self.jenkins_api = JenkinsAPI(baseurl=url, requester=requester)\n except JenkinsAPIException:\n self.cli.LOG.exception('Failed to connect to Jenkins')", "def create_addon_instance(url):\n parsed_url = urlparse(url)\n \n # Search for class that can handle URL and create new instance of it\n match = next((s for s in addon_sites if parsed_url.netloc in s.HandleURLs()), None)\n new_type = type(match)\n return new_type(url, parsed_url, WOW_PATH, WOW_VERSION)", "def get_robot_instance():\n return BuiltIn()", "def _get_jenkins_url(self):\n return 'http://127.0.0.1:8080'", "def from_url(self, url: str, params: Optional[Dict[str, Any]] = None) -> Any:\n result = self.perform_api_call(self.REST_READ, url, params=params)\n return self.get_resource_object(result)", "def getjiraobject():\n\n # type: () -> JIRA\n\n options = {\n 'server': jira_url}\n jira = JIRA(options, basic_auth=(jira_username,jira_password))\n return jira", "def _get_instance(cls, type):\n try:\n return cls.__components[type]\n except KeyError as e:\n raise NotImplementedError('Component type \"%s\" is not implemented' % type)", "async def from_url(cls) -> \"AocPrivateLeaderboard\":\n api_json = await cls.json_from_url()\n return cls.from_json(api_json)", "def instance(self):\n return plugins.get(self.plugin.slug)", "def get_galaxy_instance(url=None, api_key=None):\n if not (url and api_key):\n tl = load_input_file()\n url = tl['galaxy_instance']\n api_key = tl['api_key']\n return GalaxyInstance(url, api_key)", "def get_instance(cls, project, parameters):\n\n\t\treturn Servers()", "def get_jenkins_with_http_info(self, **kwargs):\n\n all_params = []\n all_params.append('async')\n all_params.append('_return_http_data_only')\n all_params.append('_preload_content')\n all_params.append('_request_timeout')\n\n params = locals()\n for key, val in iteritems(params['kwargs']):\n if key not in all_params:\n raise TypeError(\n \"Got an unexpected keyword argument '%s'\"\n \" to method get_jenkins\" % key\n )\n params[key] = val\n del params['kwargs']\n\n collection_formats = {}\n\n path_params = {}\n\n query_params = []\n\n header_params = {}\n\n form_params = []\n local_var_files = {}\n\n body_params = None\n # HTTP header `Accept`\n header_params['Accept'] = self.api_client.\\\n select_header_accept(['application/json'])\n\n # Authentication setting\n auth_settings = ['jenkins_auth']\n\n return self.api_client.call_api('/api/json', 'GET',\n path_params,\n query_params,\n header_params,\n body=body_params,\n post_params=form_params,\n files=local_var_files,\n response_type='Hudson',\n auth_settings=auth_settings,\n async=params.get('async'),\n _return_http_data_only=params.get('_return_http_data_only'),\n _preload_content=params.get('_preload_content', True),\n _request_timeout=params.get('_request_timeout'),\n collection_formats=collection_formats)", "def get_by_url(self, url, repository=None):\n if not repository:\n from .models import Repository\n repository = Repository.objects.get_by_url(url)\n if not repository:\n return None\n number = self.get_number_from_url(url)\n return self.get_by_repository_and_number(repository, number)", "def model_get_from_url(self, url):\n s = j.data.schema.get_from_url_latest(url=url)\n return self.model_get_from_schema(s)", "def getinstance() :\n\t\treturn Jikji.instance", "def get_instance(cls, project, parameters):\n\n\t\tparameters = project.process_node_parameters(\n\t\t\tparameters,\n\t\t\t[\"id\"],\n\t\t\t{\"root\": None},\n\t\t\t{\"id\": \"variable_name\", \"root\": \"string\"}\n\t\t\t)\n\n\t\treturn LocalServer(parameters[\"id\"], parameters[\"root\"])" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Returns true if a given view exists.
def view_exists(self, view): with open("/dev/null", "w") as devnull: call = subprocess.Popen(self.cli + [PlatformJenkinsJavaCLI.GET_VIEW, view], stdout=devnull, stderr=devnull) call.wait() return call.returncode == 0
[ "def has_view(view_type, meta_type):", "def viewExists(self, objType, viewname):\n sql = \"select distinct schemaname, tablename from pg_tables where schemaname = '%s' and tablename = '%s'\"%(objType, viewname)\n result = self.__executeQuery(sql)\n if result:\n return True\n else:\n return False", "def viewObjectExists(self, objType, viewName, guid, version):\n if not version:\n sql = \"select * from only %s.%s where guid='%s'\"%(objType, viewName, guid)\n else:\n sql =\"select * from %s.%s where guid='%s' and version='%s'\"%(objType, viewName, guid, version)\n\n result = self.__executeQuery(sql)\n if result:\n return result[0] > 0\n return False", "def has_debug_view(name=None):\r\n for view in sublime.active_window().views():\r\n if is_debug_view(view):\r\n if name is not None:\r\n if view.name() == name:\r\n return True\r\n else:\r\n return True\r\n return False", "def is_view_active(self, key, raise_if_not_found=False):\n views = self._cache_customize_show_views()\n view = key in views and views[key]\n if view is None and raise_if_not_found:\n raise ValueError('No view of type customize_show found for key %s' % key)\n return view", "def has_view_permission(self, request, obj=None):\n return True\n opts = self.opts\n codename = get_permission_codename('view', opts)\n return any([\n request.user.has_perm(\"%s.%s\" % (opts.app_label, codename)),\n request.user.has_perm(\"%s.%s\" % (opts.app_label, codename), obj)])", "def is_view(self):\n return self._base is not None", "def active(request, view_prefix):\n try:\n resolved = resolve(request.path)\n except Exception: # pragma: no cover\n return False\n if not (resolved.view_name + u':').startswith(view_prefix + u':'):\n return False\n return True", "def is_view(self):\r\n return self._is_view", "def get_is_view_of_type(view, typ):\n return not not view.settings().get(\"git_savvy.{}_view\".format(typ))", "def active(request, view_prefixes):\n try:\n resolved = resolve(request.path)\n except Exception: # pragma: no cover\n return False\n for view_prefix in view_prefixes.split(u','):\n if (resolved.view_name + u':').startswith(view_prefix + u':'):\n return True\n return False", "def HasPerspective(self, name):\n return name in self._viewset", "def HasViewpointId(cls, viewpoint_lock_id):\r\n lock_tracker = ViewpointLockTracker._GetInstance()\r\n return viewpoint_lock_id in lock_tracker.viewpoint_lock_ids", "def _check_view_names_exist():\n for view_name in settings.REVIEWER_CAN_ACCESS_VIEW_NAMES:\n try:\n reverse(view_name)\n except NoReverseMatch as e:\n assert 'with no arguments not found' in str(e), f'view_name: \"{view_name}\" needs to exist'", "def check_unique_name(context, request, name):\n if name in context:\n return False\n if get_view(context, request, view_name = name) is None:\n return True\n return False", "def is_viewed(self):\n return self.has_label(VIEWED_LABEL)", "def exists(self, url):\n return (self.base_path / url).exists()", "def pv_exists(pv):\n return True if g.get_pv(pv) is not None else False", "def verify_view_access(view):\n for access in current_user.role.access:\n if access.views.name == view:\n return\n\n abort(403)" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Creates a View, defined by XML in view_xml_filename. If the file exists, it will be update using the provided definition.
def set_view(self, view, view_xml_filename): if self.view_exists(view): command = PlatformJenkinsJavaCLI.UPDATE_VIEW else: command = PlatformJenkinsJavaCLI.CREATE_VIEW with open(view_xml_filename) as view_xml_file: view_xml = view_xml_file.read() call = subprocess.Popen(self.cli + [command, view], stdin=subprocess.PIPE) call.communicate(view_xml) call.wait()
[ "def _view(template, filename):\n\n if \".\" in template:\n template = os.path.join(*(template.split(\".\")))\n\n view = os.path.join(current.request.folder, \"modules\", \"templates\",\n template, \"views\", filename)\n\n try:\n # Pass view as file not str to work in compiled mode\n current.response.view = open(view, \"rb\")\n except IOError:\n msg = \"Unable to open Custom View: %s\" % view\n current.log.error(\"%s (%s)\" % (msg, sys.exc_info()[1]))\n raise HTTP(404, msg)", "def create_and_add_view(self, view_name):\n assert isinstance(view_name, str)\n fn = self.view_creator.get(view_name, None)\n if fn is None:\n return\n # Create the view with the view creation function.\n view = fn()\n if view is None: # pragma: no cover\n logger.warning(\"Could not create view %s.\", view_name)\n return\n # Attach the view to the GUI if it has an attach(gui) method,\n # otherwise add the view.\n if hasattr(view, 'attach'):\n view.attach(self)\n else:\n self.add_view(view)\n return view", "def create_view(name, fields=''):\n if '/' in name:\n blueprint_name, model_name = name.split('/')\n output_file = 'blueprints/%s/views.py' % blueprint_name\n else:\n model_name = name\n output_file = 'views.py'\n file_exists = os.path.exists(output_file)\n form_data = []\n for f in fields.split():\n form_data.append('form.%s.data' % f.split(':')[0])\n views = create_view.views_scaffold % dict(name=model_name.lower(),\n model_name=model_name.capitalize(),\n form_data=', '.join(form_data))\n with open(output_file, 'a') as out_file:\n if not file_exists:\n views = '''%(imports)s\\n%(rest)s''' % dict(imports=create_view.imports,\n rest=views)\n out_file.write(views)\n create_templates(name, fields)", "def setUpView(self, filename):\n # Open the view.\n file_path = path.join(path.dirname(__file__), filename)\n self.view = sublime.active_window().open_file(file_path)\n\n # Ensure it's loaded.\n while self.view.is_loading():\n time.sleep(0.1)", "def create(self,\n view,\n ):\n return self._invoke('create',\n {\n 'view': view,\n })", "def create_view(self, view_name='', description='', fields=None, order=None, filters=''):\n res, _ = self.clients.resource_registry.find_resources(name=view_name, id_only=True)\n if len(res) > 0:\n raise BadRequest('The view resource with name: %s, already exists.' % view_name)\n\n #======================\n # Arg Validations\n #======================\n validate_is_instance(fields,list, 'Specified fields must be a list.')\n validate_true(len(fields)>0, 'Specfied fields must be a list.')\n if order is not None:\n validate_is_instance(order,list, 'Specified order must be a list of fields')\n for field in order:\n if not field in fields:\n raise BadRequest('The specified ordering field was not part of the search fields.')\n\n fields = set(fields) # Convert fields to a set for aggregation across the catalogs\n #======================================================================================================\n # Priorty Queue Index Matching\n #======================================================================================================\n\n pq = [] # Priority queue for matching\n catalog_id = None\n catalogs, _ = self.clients.resource_registry.find_resources(restype=RT.Catalog, id_only=False)\n for catalog in catalogs:\n if set(catalog.catalog_fields).issubset(fields):\n index_num = len(self.clients.catalog_management.list_indexes(catalog._id))\n heapq.heappush(pq, (index_num,catalog))\n if pq:\n weight, catalog = heapq.heappop(pq)\n if weight < self.heuristic_cutoff:\n catalog_id = catalog._id\n\n \n if catalog_id is None:\n catalog_id = self.clients.catalog_management.create_catalog('%s_catalog'% view_name, keywords=list(fields))\n\n view_res = View(name=view_name, description=description)\n view_res.order = order\n view_res.filters = filters\n view_id, _ = self.clients.resource_registry.create(view_res)\n self.clients.resource_registry.create_association(subject=view_id, predicate=PRED.hasCatalog,object=catalog_id)\n return view_id", "def create_view(view_dir, repository_dir):\n if os.path.exists(view_dir):\n exit(\"Directory '%s' already exists.\" % view_dir)\n if not repository_exists(repository_dir):\n exit(\"Repository '%s' does not exist\" % repository_dir)\n os.makedirs(view_dir)\n with open(os.path.join(view_dir, 'repository.txt'), 'w') as fh:\n fh.write(\"%s\\n\" % repository_dir)\n read_only(os.path.join(view_dir, 'repository.txt'))\n view = View(view_dir)\n os.makedirs(view.index_dir)", "def newEditorView(self, fn, caller, filetype=\"\", indexes=None):\n editor, assembly = self.cloneEditor(caller, filetype, fn)\n \n self._addView(assembly, fn, caller.getNoName(), indexes=indexes)\n self._modificationStatusChanged(editor.isModified(), editor)\n self._checkActions(editor)\n \n return editor", "def saveView( self, viewName, viewDescription ):\n if 'stacked' not in viewDescription :\n viewDescription[ 'stacked' ] = False\n if 'label' not in viewDescription:\n viewDescription[ 'label' ] = \"\"\n if 'variable' in viewDescription:\n for varField in viewDescription[ 'variable' ]:\n if varField in viewDescription[ 'definition' ]:\n del( viewDescription[ 'definition' ][ varField ] )\n else:\n viewDescription[ 'variable' ] = []\n acCatalog = self.__createCatalog()\n return acCatalog.registerView( viewName, DEncode.encode( viewDescription ), viewDescription[ 'variable' ] )", "def create_view(self, repo, view, sql):\n return self.user_con.create_view(\n repo=repo, view=view, sql=sql)", "def __newDocumentView(self):\n aw = self.activeWindow()\n if aw:\n self.newEditorView(aw.getFileName(), aw, aw.getFileType())", "def create_view(\n request_id: str,\n context_id: str,\n command_id: str,\n table_name: str,\n columns: List[str],\n filters: dict,\n) -> str:\n view_name = create_table_name(\n TableType.VIEW,\n node_config.identifier,\n context_id,\n command_id,\n )\n return views.create_view(\n view_name=view_name,\n table_name=table_name,\n columns=columns,\n filters=filters,\n minimum_row_count=MINIMUM_ROW_COUNT,\n ).json()", "def add_view(self, view):\n params = {\"view_id\": view.id, \"view_name\": view.name}\n change = SKETCH_CHANGE(\"STORY_ADD\", \"view\", params)\n self._analyzer.updates.append(change)", "def get_view_definition(self, view):\n inspector = inspect(self.engine)\n return inspector.get_view_definition(view, schema=self.schema)", "def view(view_name, db=None, empty_ok=True, **kwargs):\n db_name = DEFAULT_DATABASE if db is None else db\n db = dbs[db_name]\n name = '%s%s/%s' % (db_name, DESIGN_DOC_SUFFIX, view_name)\n if empty_ok:\n return _view_empty_ok(db, name, kwargs)\n return db.view(name, **kwargs)", "def view(self, view_id):\r\n return resources.View(self, view_id)", "def loadfile(file):\n with open(file, 'r') as f:\n viewdata = f.readlines()\n return buildview(\"\".join(viewdata))", "def create_view_from_corpus(corpus_dir, view_dir, repository_dir):\n if not os.path.exists(corpus_dir):\n exit(\"WARNING: corpus directory does not exist\")\n create_view(view_dir, repository_dir)\n view = View(view_dir)\n view.add_corpus_data(corpus_dir)\n view.create_index_from_corpus_data()", "def createDesignDoc(self, design='myview', language='javascript'):\n view = Document('_design/%s' % design)\n view['language'] = language\n view['views'] = {}\n return view" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Returns True if the given job exists.
def job_exists(self, job): with open(os.devnull, 'w') as devnull: result = subprocess.call(self.cli + [PlatformJenkinsJavaCLI.GET_JOB, job.name], stdout=devnull) return result == 0
[ "def job_exists(self, job_id):\n\n return True if self.get_status(job_id) else False", "def job_exists(self, job_name):\n return os.path.exists(self.jobs_path + '/' + job_name)", "def has_job(self, job_name) -> bool:\n self.log.info(\"Checking if job already exists: %s\", job_name)\n\n try:\n self.conn.get_job(JobName=job_name)\n return True\n except self.conn.exceptions.EntityNotFoundException:\n return False", "def exists(cls, job_id: str, connection: Optional['Redis'] = None) -> bool:\n if not connection:\n connection = resolve_connection()\n job_key = cls.key_for(job_id)\n job_exists = connection.exists(job_key)\n return bool(job_exists)", "def job_exists(self, prov):\n with self.lock:\n self.cur.execute('select * from \"jobs\" where \"prov\" = ?;', (prov,))\n rec = self.cur.fetchone()\n return rec is not None", "def test_job_exists():\n with tempfile.TemporaryDirectory() as STATUS_DIR:\n Status.add_job(STATUS_DIR, 'generation', 'test1',\n job_attrs={'job_status': 'submitted'})\n exists = Status.job_exists(STATUS_DIR, 'test1')\n assert exists", "def exists(self):\n if self[\"id\"] is not None:\n action = self.daofactory(classname=\"Jobs.ExistsByID\")\n result = action.execute(id=self[\"id\"], conn=self.getDBConn(),\n transaction=self.existingTransaction())\n else:\n action = self.daofactory(classname=\"Jobs.Exists\")\n result = action.execute(name=self[\"name\"], conn=self.getDBConn(),\n transaction=self.existingTransaction())\n\n if result:\n self[\"id\"] = result\n\n return result", "def __contains__(self, item: Union[str, 'Job']) -> bool:\n job_id = item\n if isinstance(item, self.job_class):\n job_id = item.id\n return self.connection.zscore(self.key, job_id) is not None", "def in_queue(self):\n if self.get_db('jobid') is None:\n log.debug('jobid not found for calculation.')\n return False\n else:\n # get the jobid\n jobid = self.get_db('jobid')\n # see if jobid is in queue\n _, jobids_in_queue, _ = getstatusoutput('qselect',\n stdout=subprocess.PIPE,\n stderr=subprocess.PIPE)\n\n if str(jobid) in jobids_in_queue.split('\\n'):\n # get details on specific jobid in case it is complete\n status, output, err = getstatusoutput(['qstat', jobid],\n stdout=subprocess.PIPE,\n stderr=subprocess.PIPE)\n if status == 0:\n lines = output.split('\\n')\n fields = lines[2].split()\n job_status = fields[4]\n if job_status == 'C':\n return False\n else:\n return True\n else:\n return False", "def _check_queryinfo_existence(self, hostname: str, job: str) -> bool:\n with self.lock:\n hosts = self.host_query_info.all()\n for host in hosts:\n if host['hostname'] == hostname and host['job'] == job:\n return True\n return False", "def batch_job_running(self, name):\n if name not in self.batch_jobs:\n raise ValueError(\"job {} doesn't exists\".format(name))\n return name in self.jobs", "def pid_exists(self, job_id):\n ## class QueuePage()\n pid = mysql.job_get_pid(job_id)\n if pid == None:\n ## job PID somehow did not get stored in the database, so return\n ## False => state='syserror'; job may still be running!\n return False\n else:\n pid = int(pid)\n try:\n #os.kill(pid, 0) ## This does not work, 2009-05-27\n ## NOTE: Three possible results:\n ## (1): os.kill(pid, 0) -> None: process exists, and you are process\n ## owner or root\n ## (2): os.kill(pid, 0) -> OSError, Operation not permitted:\n ## process exists, you are not owner or root\n ## (3): os.kill(pid, 0) -> OSError, No such process:\n ## process does not exist\n if os.path.exists(\"/proc/%s\" % pid):\n return True ## process is still running\n return False\n except:\n return False", "def job_is_ok(self, job: Job) -> Tuple[bool, List[str]]:\n return self._evaluate(\"OK\", self._ok_criteria, job)", "def isThisJobFinished(self, identifier):\n identifier = identifier.strip()\n with self.__queueLock:\n # Look through the finished jobs and attempt to find a matching\n # identifier. If the job exists here, it is finished\n for run in self.__finished:\n if run.identifier == identifier:\n return True\n\n # Look through the pending jobs and attempt to find a matching identifier\n # If the job exists here, it is not finished\n for queue in [self.__queue, self.__clientQueue]:\n for run in queue:\n if run.identifier == identifier:\n return False\n\n # Look through the running jobs and attempt to find a matching identifier\n # If the job exists here, it is not finished\n for run in self.__running+self.__clientRunning:\n if run is not None and run.identifier == identifier:\n return False\n\n # If you made it here and we still have not found anything, we have got\n # problems.\n self.raiseAnError(RuntimeError,\"Job \"+identifier+\" is unknown!\")", "def remove_job_if_exists(name: str, context: CallbackContext) -> bool:\n current_jobs = context.job_queue.get_jobs_by_name(name)\n if not current_jobs:\n return False\n for job in current_jobs:\n job.schedule_removal()\n return True", "def exists(self):\n return os.path.exists(self.filename)", "def verify_job(cls, auth_key, job_id):\n key = ObjectId(job_id)\n user_id = ObjectId(auth_key)\n db = cls.mongo_cli.get_database(collection=Job.collection_name)\n if db.count({\"_id\": key, \"user_id\": user_id}) > 0:\n return True\n return False", "def remove_job_if_exists(self, name: str, context: CallbackContext) -> bool:\n current_jobs = context.job_queue.get_jobs_by_name(name)\n if not current_jobs:\n return False\n for job in current_jobs:\n job.schedule_removal()\n return True", "def queue_exists(name: str) -> bool:\n try:\n batch = aws.client_with_default_region(\"batch\")\n\n return bool(\n batch.describe_job_queues(jobQueues = [name]) \\\n .get(\"jobQueues\"))\n except:\n return False" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Deletes a given job from Jenkins.
def delete_job(self, job): subprocess.call(self.cli + [PlatformJenkinsJavaCLI.DELETE_JOB, job.name])
[ "def delete_job(session, name):\n url = urlparse.urljoin(session.jenkins_url, '/job/%s/doDelete' % name)\n resp = session.post(url)\n resp.raise_for_status()", "def _delete_job(self, job):\n if self.jenkins.job_exists(job):\n self.jenkins.delete_job(job)", "def delete(job_id):\n\t_jobs.delete(jobs.get_or_404(job_id))\n\treturn None, 204", "def delete_job(jobId=None, force=None):\n pass", "def delete_job(context, job_id=None):\n endpoint = job_endpoint(context, job_id)\n context.response = requests.delete(endpoint)", "def delete_job(self, job, context=None):\n return self._client.call_method(\n 'UserAndJobState.delete_job',\n [job], self._service_ver, context)", "def delete_job(self, job):\n self.cron.remove(job)\n self.write_changes_to_cron()", "def delete(job_id):\n job = JobModel.get_one_job(job_id)\n if not job:\n return custom_response({'Error':'Job Not Found'}, 404)\n\n JobModel.query.filter(JobModel.job_id == job_id).delete()\n\n return custom_response({'Message': 'Deleted'}, 204)", "def DeleteJob(self, job_urn, token=None):\n aff4.FACTORY.Delete(job_urn, token=token)", "def delete_job(self,jobid=None,squash=None):\n\n self.check_all_jobs()\n\n if jobid is None:\n if hasattr(self,'current_job'):\n jobid = self.current_job\n\n if jobid:\n if hasattr(self,'current_job'):\n if jobid == self.current_job:\n del self.current_job\n\n if self.job_dict[jobid] in ['COMPLETED','ERROR','ABORTED','PENDING']:\n result = self.session.delete(CosmoSim.QUERY_URL+\"/{}\".format(jobid),\n auth=(self.username, self.password),\n data={'follow':''})\n else:\n warnings.warn(\"Can only delete a job with phase: 'COMPLETED', 'ERROR', 'ABORTED', or 'PENDING'.\")\n return\n\n if not result.ok:\n result.raise_for_status()\n if squash is None:\n warnings.warn('Deleted job: {}'.format(jobid))\n\n return result", "def delete_job(self, jobid=None, squash=None):\n\n self.check_all_jobs()\n\n if jobid is None:\n if hasattr(self, 'current_job'):\n jobid = self.current_job\n\n if jobid:\n if hasattr(self, 'current_job'):\n if jobid == self.current_job:\n del self.current_job\n\n if self.job_dict[jobid] in ['COMPLETED', 'ERROR',\n 'ABORTED', 'PENDING']:\n result = self.session.delete(\n CosmoSim.QUERY_URL + \"/{}\".format(jobid),\n auth=(self.username, self.password), data={'follow': ''})\n\n else:\n warnings.warn(\"Can only delete a job with phase: \"\n \"'COMPLETED', 'ERROR', 'ABORTED', or 'PENDING'.\")\n return\n\n if not result.ok:\n result.raise_for_status()\n if squash is None:\n warnings.warn('Deleted job: {}'.format(jobid))\n\n return result", "def cmd_DeleteJob(self):\n\n self._create_contextual_parser(\"DeleteJob\", \"Delete Jenkins Jobs\")\n self._add_common_arg_parsers()\n\n self.parser.add_argument(\n metavar=\"<Task Name>\", help=\"Task to Delete\", action=\"store\", dest=\"task_name\"\n )\n\n args = self.parser.parse_args()\n\n self._validate_server_url(args)\n\n return_data = self._handle_authentication(args)\n\n return return_data", "def delete_job_by_id(self, job_id: str) -> None:\n self.log.info(\"Deleting Job with ID '%s'\", job_id)\n endpoint = ModelManagerPaths.format_job_endpoint_by_id(job_id)\n self.session.delete_from_endpoint(endpoint)", "def _delete_job(self, job):", "def delete(self):\n self._server.delete_job(self.name)", "def deleteJob(self, jobId):\n params = {'id': jobId}\n try:\n return self.gc.delete(JobUtils.JOB_ID_PATH, parameters=params)\n except HttpError as e:\n if e.status == 400:\n print('Error. invalid job id:', jobId)\n return {}\n raise", "def delete(session, jobid):\n #\n url = '/'.join([session.base_url(), 'grid/job', urlencode(jobid)])\n r = requests.delete(url)\n return fulfill202(session, r)", "def delete(self, job_id):\n # Only admin can delete any job\n if not current_user.is_admin():\n return get_message_json('删除任务需要管理员权限'), HTTPStatus.FORBIDDEN\n\n try:\n result = jobs.delete_job_by_id(job_id)\n if result == 1:\n return get_message_json('已删除该任务'), HTTPStatus.OK\n else:\n if jobs.find_job_by_id(job_id) is None:\n return get_message_json('任务不存在'), HTTPStatus.NOT_FOUND\n return get_message_json('未知的任务删除失败'), HTTPStatus.BAD_REQUEST\n except Exception as err:\n return handle_internal_error(str(err))", "def delete_job(self, filename):\n job = Jobs.get(Jobs.filename == filename)\n job.delete_instance()" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Triggers given job, providing a set of parameters to it.
def trigger_job(self, job, parameters=None): parameters = parameters or {} parameter_list = [] for key in parameters: parameter_list.append("-p") parameter_list.append("%s=%s" % (key, parameters[key])) if subprocess.call(self.cli + [PlatformJenkinsJavaCLI.BUILD_JOB, job.name] + parameter_list) != 0: raise PlatformJenkinsException("Triggering job failed: " + job.name)
[ "def trigger_job(self, job_id, auth_token=RUNDECK_AUTH_TOKEN, argString=None):\n self.headers['X-Rundeck-Auth-Token'] = auth_token\n if argString:\n payload = {\"argString\": argString}\n self.job_response = requests.post(json=payload,\n url= \"{}/job/{}/run\".format(self.api,job_id),\n headers = self.headers)\n\n else:\n self.job_response = requests.post(url=\"{}/job/{}/run\".format(self.api, job_id),\n headers=self.headers)", "def modify_job(job, parameter):\n raise NotImplementedError(\"Implement in derived class\")", "def execute(self, job):\n raise NotImplementedError", "def trigger_arbitrary_job(repo_name, builder, revision, files=[], dry_run=False,\n extra_properites=None):\n url = _builders_api_url(repo_name, builder, revision)\n payload = _payload(repo_name, revision, files, extra_properites)\n\n if dry_run:\n LOG.info(\"Dry-run: We were going to request a job for '%s'\" % builder)\n LOG.info(\" with this payload: %s\" % str(payload))\n return None\n\n # NOTE: A good response returns json with request_id as one of the keys\n req = requests.post(\n url,\n headers={'Accept': 'application/json'},\n data=payload,\n auth=get_credentials()\n )\n assert req.status_code != 401, req.reason\n content = req.json()\n LOG.debug(\"Status of the request: %s\" %\n _jobs_api_url(content[\"request_id\"]))\n\n return req", "def trigger(self, state, updated_vars):\n raise NotImplementedError()", "def task(self, job: Job):\n self.connection.send(job)", "def modify_job(self, job, parameter):\n job.set_encut(parameter[0])\n job.set_kpoints(parameter[1])\n return job", "def trigger(self, trigger):\n\n self._trigger = trigger", "def _call_trigger(self, transition, **kwargs):\n trigger_name = transition.trigger.__name__\n handler_trigger = getattr(self, '_' + trigger_name)\n handler_trigger(**kwargs)", "def triggerHook(self, *args, **kwargs):\n\n return self._makeApiCall(self.funcinfo[\"triggerHook\"], *args, **kwargs)", "def build_trigger(ctx, build_type_id, branch, comment, parameter, agent_id,\n open_build_log, wait_for_run):\n parameters = dict([p.split('=', 1) for p in parameter])\n data = ctx.obj.trigger_build(\n build_type_id=build_type_id,\n branch=branch,\n comment=comment,\n parameters=parameters,\n agent_id=agent_id)\n build_id = data['id']\n ctx.invoke(build_queue_show, args=[build_id])\n if open_build_log:\n url = data['webUrl'] + '&tab=buildLog'\n webbrowser.open(url)\n if not wait_for_run:\n return\n while data['state'] == 'queued':\n data = ctx.obj.get_queued_build_by_build_id(build_id)\n click.echo('state: %s' % data['state'])\n time.sleep(1)\n ctx.invoke(build_queue_show, args=[build_id])", "def fix_trigger(self, kwargs):\n trigger_kwargs = self.get_trigger_kwargs(**kwargs)\n if kwargs[\"trigger\"] == \"interval\":\n kwargs[\"trigger\"] = apscheduler.triggers.interval.IntervalTrigger(**trigger_kwargs)\n elif kwargs[\"trigger\"] == \"date\":\n kwargs[\"trigger\"] = apscheduler.triggers.date.DateTrigger(**trigger_kwargs)\n elif kwargs[\"trigger\"] == \"cron\":\n kwargs[\"trigger\"] = apscheduler.triggers.cron.CronTrigger(**trigger_kwargs)\n return kwargs", "def task_trigger(self, args):\n h, tmp = tempfile.mkstemp(\n dir=self._tmpdir, prefix='trigger_raw', suffix='.json')\n os.close(h)\n cmd = [\n '-user',\n 'joe@localhost',\n '-d',\n 'pool=default',\n '-dump-json',\n tmp,\n ]\n cmd.extend(args)\n assert not self._run_swarming('trigger',\n cmd), 'Failed to trigger a task. cmd=%s' % cmd\n with open(tmp, 'rb') as f:\n data = json.load(f)\n task_id = data['tasks'][0]['task_id']\n logging.debug('task_id = %s', task_id)\n return task_id", "def submitJob(self, job):\n raise NotImplementedError()", "def _trigger_tryjobs(changelist, jobs, options, patchset):\n print('Scheduling jobs on:')\n for project, bucket, builder in jobs:\n print(' %s/%s: %s' % (project, bucket, builder))\n print('To see results here, run: git cl try-results')\n print('To see results in browser, run: git cl web')\n\n requests = _make_tryjob_schedule_requests(changelist, jobs, options, patchset)\n if not requests:\n return\n\n http = auth.Authenticator().authorize(httplib2.Http())\n http.force_exception_to_status_code = True\n\n batch_request = {'requests': requests}\n batch_response = _call_buildbucket(http, DEFAULT_BUILDBUCKET_HOST, 'Batch',\n batch_request)\n\n errors = [\n ' ' + response['error']['message']\n for response in batch_response.get('responses', [])\n if 'error' in response\n ]\n if errors:\n raise BuildbucketResponseException(\n 'Failed to schedule builds for some bots:\\n%s' % '\\n'.join(errors))", "def do_build(self):\n # In our business scenarios, job is always built with parameters.\n # According to jenkins API, to build with parameters, the second\n # parameter of build_job() is needed, or error will occur.\n with self.get_handler() as handler:\n handler.build_job(self.name,\n parameters={'delay': '0sec'})", "def fire_trigger(self, trigger):\n if not self.exists():\n return\n if trigger in self.events:\n for action in self.events[trigger]:\n action(requestor=self)", "def trigger(self, event):\n self._ok = event._ok\n self._value = event._value\n self.env.schedule(self, agent=self.agent)", "def cancel_job_execution(jobId=None, thingName=None, force=None, expectedVersion=None, statusDetails=None):\n pass" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Enables given job on Jenkins.
def enable_job(self, job): if subprocess.call(self.cli + [PlatformJenkinsJavaCLI.ENABLE_JOB, job.name]) != 0: raise PlatformJenkinsException("Enabling job failed: " + job.name)
[ "def _enable_job(self, job):\n if self.jenkins.job_exists(job):\n self.jenkins.update_job(job)\n self.jenkins.enable_job(job)\n else:\n self.jenkins.create_job(job)", "def toggle_job(session, job_name, enable):\n url = urlparse.urljoin(session.jenkins_url,\n '/job/%s/%s' %\n (job_name, 'enable' if enable else 'disable'))\n resp = session.post(url)\n resp.raise_for_status()", "def EnableJob(self, job_urn, token=None):\n cron_job = aff4.FACTORY.Open(job_urn, mode=\"rw\", aff4_type=\"CronJob\",\n token=token)\n cron_job.Set(cron_job.Schema.DISABLED(0))\n cron_job.Close()", "def disable_job(self, job):\n if subprocess.call(self.cli + [PlatformJenkinsJavaCLI.DISABLE_JOB, job.name]) != 0:\n raise PlatformJenkinsException(\"Disabling job failed: \" + job.name)", "def _disable_job(self, job):\n if self.jenkins.job_exists(job):\n self.jenkins.disable_job(job)", "def post_job_enable_with_http_info(self, name, **kwargs):\n\n all_params = ['name', 'jenkins_crumb']\n all_params.append('async')\n all_params.append('_return_http_data_only')\n all_params.append('_preload_content')\n all_params.append('_request_timeout')\n\n params = locals()\n for key, val in iteritems(params['kwargs']):\n if key not in all_params:\n raise TypeError(\n \"Got an unexpected keyword argument '%s'\"\n \" to method post_job_enable\" % key\n )\n params[key] = val\n del params['kwargs']\n # verify the required parameter 'name' is set\n if ('name' not in params) or (params['name'] is None):\n raise ValueError(\"Missing the required parameter `name` when calling `post_job_enable`\")\n\n\n collection_formats = {}\n\n path_params = {}\n if 'name' in params:\n path_params['name'] = params['name']\n\n query_params = []\n\n header_params = {}\n if 'jenkins_crumb' in params:\n header_params['Jenkins-Crumb'] = params['jenkins_crumb']\n\n form_params = []\n local_var_files = {}\n\n body_params = None\n # Authentication setting\n auth_settings = ['jenkins_auth']\n\n return self.api_client.call_api('/job/{name}/enable', 'POST',\n path_params,\n query_params,\n header_params,\n body=body_params,\n post_params=form_params,\n files=local_var_files,\n response_type=None,\n auth_settings=auth_settings,\n async=params.get('async'),\n _return_http_data_only=params.get('_return_http_data_only'),\n _preload_content=params.get('_preload_content', True),\n _request_timeout=params.get('_request_timeout'),\n collection_formats=collection_formats)", "def enableJobs(self):\n\t\tfor item in self.ui.jobs_listWidget.selectedItems():\n\t\t\titem.setCheckState(QtCore.Qt.Checked)\n\t\t\t# self.j.enableJob(item.text(), True) # Already called via signals/slots", "def enable(self, subsystem=False):\n self.__dict__[\"enabled\"] = True\n\n if subsystem:\n self.subsystem.enable()", "def cli_enable_plugin(self, args) -> str:\n plugin_name = args.plugin_name\n if plugin_name not in self.name_to_plugin_class:\n return error(\"Plugin {} DNE\".format(plugin_name))\n\n self.name_to_enabled[plugin_name] = True\n return ok(\"Plugin {} enabled\".format(plugin_name))", "def enable(self, **kw):\n kw_copy = deepcopy(kw)\n pool_spec = kw_copy.pop(\"pool-spec\", \"\")\n mode = kw_copy.pop(\"mode\", \"\")\n cmd = f\"{self.base_cmd} enable {pool_spec} {mode} \\\n {build_cmd_from_args(**kw_copy)}\"\n\n return self.execute_as_sudo(cmd=cmd)", "def _enable_plugin(env, module):\n if env.is_component_enabled(module) is None:\n env.enable_component(module)", "def enable_plugin(self, plugin_name: str):\n self._set_plugin_enabled(plugin_name, True)", "def plugin_enable(self, plugin_enable):\n self._plugin_enable = plugin_enable", "def post_job_enable(self, name, **kwargs):\n kwargs['_return_http_data_only'] = True\n if kwargs.get('async'):\n return self.post_job_enable_with_http_info(name, **kwargs)\n else:\n (data) = self.post_job_enable_with_http_info(name, **kwargs)\n return data", "def enable_on_stage(self):\n self.stage_sigs[\"enable\"] = 1", "def agent_enable(self, agent_enable):\n self._agent_enable = agent_enable", "def enable(self):\n self.post(\"enable\")\n return self", "async def enable(self, ctx):\n self.bot.db.execute(\"UPDATE starboards SET enabled = 1 WHERE channel_id = ?\", (ctx.channel.id,))\n await ctx.say(\"star.enabled\")", "def mark(self, job, status='succeeded'):\n pass" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Disables given job on Jenkins.
def disable_job(self, job): if subprocess.call(self.cli + [PlatformJenkinsJavaCLI.DISABLE_JOB, job.name]) != 0: raise PlatformJenkinsException("Disabling job failed: " + job.name)
[ "def _disable_job(self, job):\n if self.jenkins.job_exists(job):\n self.jenkins.disable_job(job)", "def DisableJob(self, job_urn, token=None):\n cron_job = aff4.FACTORY.Open(job_urn, mode=\"rw\", aff4_type=\"CronJob\",\n token=token)\n cron_job.Set(cron_job.Schema.DISABLED(1))\n cron_job.Close()", "def disable_job_and_wait(self, jobName):\n self.disable_job(jobName)\n return self.wait_until_jobs_finished(jobName)", "def deactivate_job(job_name):\n job = Job.from_name(job_name)\n job.set_active(False)\n return redirect(url_for('active_jobs_for_client', ClientID=job.ClientID))", "def cancel_simulation_job(job=None):\n pass", "def toggle_job(session, job_name, enable):\n url = urlparse.urljoin(session.jenkins_url,\n '/job/%s/%s' %\n (job_name, 'enable' if enable else 'disable'))\n resp = session.post(url)\n resp.raise_for_status()", "def abandon(self, job, who):", "def cancel_deployment_job(job=None):\n pass", "def disableJobs(self):\n\t\tfor item in self.ui.jobs_listWidget.selectedItems():\n\t\t\titem.setCheckState(QtCore.Qt.Unchecked)\n\t\t\t# self.j.enableJob(item.text(), False) # Already called via signals/slots", "def cli(ctx, job_id):\n return ctx.gi.jobs.cancel_job(job_id)", "def kill_job(self , index):\n job = self.jobs.__getitem__( index )\n if job:\n job.kill()", "def _enable_job(self, job):\n if self.jenkins.job_exists(job):\n self.jenkins.update_job(job)\n self.jenkins.enable_job(job)\n else:\n self.jenkins.create_job(job)", "def _delete_job(self, job):\n if self.jenkins.job_exists(job):\n self.jenkins.delete_job(job)", "def cancel_job(self, job_number):\n raise NotImplementedError", "def cancel_job(self):\n r = self.s.post(self.base_address + '/api/job', json={'command': 'cancel'})\n if r.status_code != 204:\n raise Exception(\"Error: {code} - {content}\".format(code=r.status_code, content=r.content.decode('utf-8')))", "def disable(self, **kw):\n kw_copy = deepcopy(kw)\n pool_spec = kw_copy.pop(\"pool-spec\", \"\")\n cmd = f\"{self.base_cmd} disable {pool_spec} {build_cmd_from_args(**kw_copy)}\"\n\n return self.execute_as_sudo(cmd=cmd)", "def stop_labeling_job(LabelingJobName=None):\n pass", "def on_disable(self) -> None:\n self._cancel_automation()", "def cancel(self):\n log.debug('({}) Cancel job'.format(self.name))\n os.system('condor_rm {}'.format(\" \".join(self._job_id.keys())))" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Create a given job on Jenkins.
def create_job(self, job): call = subprocess.Popen(self.cli + [PlatformJenkinsJavaCLI.CREATE_JOB, job.name], stdin=subprocess.PIPE) out, err = call.communicate(input=platform_ci.jjb.get_job_as_xml(job, self.template_dir)) call.wait() if call.returncode != 0: logging.info(out) logging.error(err) raise PlatformJenkinsException("Creating job failed: " + job.name)
[ "def createJob(self, name):\n self._server.create_job(name, jenkins.EMPTY_CONFIG_XML)\n return self.job(name)", "def create(cfg, jobs):\n server = jenkins_utils.server_factory(cfg)\n libjobs.createJobs(server, jobs)", "def create_job():\n data = request.json\n job = {\n \"repository_url\": data.pop(\"repo_url\", None),\n \"commit_hash\": data.pop(\"commit_hash\", None),\n \"branch\": data.pop(\"branch\", None),\n \"keep_data\": data.pop(\"keep_data\", False),\n \"attributes\": data\n }\n workers.queue.put(job)\n return jsonify({\"status\": True})", "def create_job(session, name, conf):\n url = urlparse.urljoin(session.jenkins_url, '/createItem?name=%s' %\n urlparse.quote_plus(name))\n resp = _post_data(session, url, conf, 'application/xml')\n resp.raise_for_status()", "def create_job(jobtype, server):\n name = generate_job_name(jobtype)\n job = Job.objects.create(jobtype=jobtype, server=server, name=name)\n return job", "def create_job(self, name: str) -> Slurm:\n LOG.info(\"Create a slurm job with name %s\", name)\n job = Slurm(\n name,\n {\"account\": self.account, \"time\": self.time,},\n scripts_dir=str(self.scripts_dir),\n log_dir=str(self.log_dir),\n )\n return job", "def create_job(context):\n\n job_name = f'{time.time():.0f}'\n\n job_json = get_config_map()\n job_json[\"metadata\"][\"name\"] = job_name\n\n # Replace the Jinja2 templates.\n if context:\n job_json = json.loads(Template(json.dumps(job_json)).render(context))\n\n job = kubernetes.client.V1Job(\n api_version=job_json[\"apiVersion\"],\n kind=job_json[\"kind\"],\n metadata=job_json[\"metadata\"],\n spec=job_json[\"spec\"])\n\n api = kubernetes.client.BatchV1Api()\n\n try:\n res = api.create_namespaced_job(namespace, job)\n logger.info(res)\n return res\n except ApiException as e:\n logger.error(\"Exception when calling BatchV1Api->create_namespaced_job: %s\\n\" % e)", "def create_job(self, job_id=None,\n job_name=None,\n verbatim=None,\n timestamp=None,\n duration=None,\n link=None,\n description=None):\n\n created_job = Jobs(job_id=job_id,\n job_name=job_name,\n verbatim=verbatim,\n timestamp=timestamp,\n duration=duration,\n download_link=link,\n description=description)\n self.session.add(created_job)\n self.session.commit()\n\n return created_job.job_id", "def create_job(self, context=None):\n return self._client.call_method(\n 'UserAndJobState.create_job',\n [], self._service_ver, context)", "def create_knitting_job():\n plugin_id = request.form['plugin_id']\n port_str = request.form['port']\n plugin_class = knitlib.machine_handler.get_machine_plugin_by_id(plugin_id)\n job = KnittingJob(plugin_class, port_str, callbacks_dict={\n \"blocking_user_action\": emit_blocking_message,\n \"progress\": emit_progress,\n \"message\": emit_nonblocking_message\n })\n job_string_id = str(job.id)\n job_dict[job_string_id] = job\n return jsonify({\"job_id\": job_string_id})", "def create_job(self, search_job: SearchJob = None, query_params: Dict[str, object] = None) -> SearchJob:\n if query_params is None:\n query_params = {}\n\n path_params = {\n }\n\n path = Template(\"/search/v3alpha1/jobs\").substitute(path_params)\n url = self.base_client.build_url(path)\n data = search_job.to_dict()\n response = self.base_client.post(url, json=data, params=query_params)\n return handle_response(response, SearchJob)", "def _create_job(self):\n LOGGER.debug(\"Creating Solve Engine job...\")\n pb_data = self.model.build_str_model().encode('ascii')\n\n pb_data = b64.b64encode(pb_data).decode('utf-8')\n\n dict_data = dict(problems=[dict(name=self.model.file_name, data=pb_data)])\n resp = self._send(\"post\", with_job_id=False, json=dict_data)\n\n solution = ObjResponse(resp, SERequests.CREATE_JOB)\n if solution.unusual_answer:\n raise ValueError(solution.build_err_msg)\n self._id = solution.job_id\n \n LOGGER.debug(\"Job created {}\".format(self._id))", "def test_create_job(job_payload):\n response = requests.post(MANAGER_URL + '/submissions', json=job_payload)\n response_payload = response.json()\n assert response.ok\n assert response_payload\n stop_job(manager_url=MANAGER_URL, job_id=response_payload.get('job_id'))", "def job_create(self, sender, name=None):\n self._require_running()\n name = name or self.DEFAULT_JOB_NAME\n job_id = uuid.uuid4().hex\n assert job_id not in self._jobs\n assert sender is not None\n assert sender.connection\n job = Job(\n job_id,\n name,\n self._session_root.joinpath(job_id),\n sender,\n self._loop\n )\n self._jobs[job_id] = job\n self._jobs_by_connection[sender.connection][job_id] = job\n self._log.debug('Created job %s', job)\n return job_id", "def create_job(project, description):\n randomnames = open(os.path.join(\"Anemone\", \"templates\", \"namegen.html\")).readlines()\n jobname = (\"Quick.\" +\n random.choice(randomnames)[:-1] + # for some reason choice gives extra space\n random.choice(randomnames)[:-1]) # for some reason choice gives extra space\n\n newjob = Job.create(project=project, name=jobname, description=description)\n newjob.name = newjob.name + \".{0:0=3d}\".format(newjob.id)\n newjob.save()\n return newjob", "def build_job(session, job_name, parameters=None):\n if parameters:\n url = urlparse.urljoin(session.jenkins_url,\n '/job/%s/buildWithParameters' % job_name)\n else:\n url = urlparse.urljoin(session.jenkins_url, '/job/%s/build' % job_name)\n resp = _post_data(session, url, parameters)\n if resp.status_code == 400:\n raise exceptions.MissingParametrizedBuildParameters(job_name)\n if resp.status_code == 500:\n raise exceptions.BuildIsNotParametrized(job_name)\n resp.raise_for_status()\n if resp.status_code != 201:\n raise exceptions.BuildNotQueued(job_name)\n return resp.headers['location']", "def test_job_create(client): # pylint: disable=unused-argument\n\n task_id = str(uuid4())\n\n task = Task.create_task(task_id)\n job = task.create_job()\n\n expect(job.created_at).not_to_be_null()\n expect(job.last_modified_at).not_to_be_null()\n expect(job.executions).to_be_empty()", "def send_job(self):\n graph = self.processgraphEdit.toPlainText()\n # info(self.iface, graph)\n response = self.connection.job_create(json.loads(graph))\n if response.status_code == 201:\n info(self.iface, \"Successfully created new job, Response: {}\".format(response.status_code))\n else:\n warning(self.iface, \"Not able to created new job, Response: {}\".format(str(response.json())))", "def create_new_project_for_job(\n solver: Solver, job: Job, inputs: JobInputs\n) -> ProjectCreateNew:\n project_id = job.id\n solver_id = get_node_id(project_id, solver.id)\n\n # map Job inputs with solveri nputs\n # TODO: ArgumentType -> InputTypes dispatcher and reversed\n solver_inputs: dict[InputID, InputTypes] = create_node_inputs_from_job_inputs(\n inputs\n )\n\n solver_service = Node(\n key=solver.id,\n version=solver.version,\n label=solver.title,\n inputs=solver_inputs,\n inputsUnits={},\n )\n\n # Ensembles project model so it can be used as input for create_project\n job_info = job.json(\n include={\"id\", \"name\", \"inputs_checksum\", \"created_at\"}, indent=2\n )\n\n return ProjectCreateNew(\n uuid=project_id,\n name=job.name, # NOTE: this IS an identifier as well. MUST NOT be changed in the case of project APIs!\n description=f\"Study associated to solver job:\\n{job_info}\",\n thumbnail=\"https://via.placeholder.com/170x120.png\",\n workbench={solver_id: solver_service},\n ui=StudyUI(\n workbench={\n f\"{solver_id}\": {\"position\": {\"x\": 633, \"y\": 229}},\n },\n slideshow={},\n currentNodeId=solver_id,\n annotations={},\n ),\n accessRights={},\n )" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Update a given job on Jenkins.
def update_job(self, job): call = subprocess.Popen(self.cli + [PlatformJenkinsJavaCLI.UPDATE_JOB, job.name], stdin=subprocess.PIPE) call.communicate(input=platform_ci.jjb.get_job_as_xml(job, self.template_dir)) call.wait() if call.returncode != 0: raise PlatformJenkinsException("Updating job failed: " + job.name)
[ "def update(cfg, jobs):\n server = jenkins_utils.server_factory(cfg)\n libjobs.updateJobs(server, jobs)", "def update(self, job_name, param_name, value, description=None):\n if job_name in self._jobs:\n getattr(self._jobs[job_name], param_name).update(value, description)\n else:\n self.log.error(\"Invalid job name: %s\", job_name)", "def update(self):\n self._log.debug(\"About to update job {0}\".format(self.id))\n resp = self._api.get_job(self.id)\n\n if resp.success:\n self.submission = self._format_submission(resp.result)\n return True\n\n else:\n raise resp.result", "def update_job(self, sid: str, update_job: UpdateJob = None, query_params: Dict[str, object] = None) -> SearchJob:\n if query_params is None:\n query_params = {}\n\n path_params = {\n \"sid\": sid,\n }\n\n path = Template(\"/search/v3alpha1/jobs/${sid}\").substitute(path_params)\n url = self.base_client.build_url(path)\n data = update_job.to_dict()\n response = self.base_client.patch(url, json=data, params=query_params)\n return handle_response(response, SearchJob)", "def _update_job(self, job):\n self.queues[job['envo']].save(job)", "def on_job_update(_job):\n nonlocal job\n job = _job", "def put(self, job_id):\n form = request.get_json()\n try:\n the_job = jobs.find_job_by_id(job_id)\n if the_job is None:\n return get_message_json('任务不存在'), HTTPStatus.NOT_FOUND\n\n if the_job.account_id != current_user.account_id:\n return get_message_json('用户无法修改他人任务'), HTTPStatus.FORBIDDEN\n\n # The job state must be valid and can not go back\n form_job_state = form.get('job_state')\n if not(validate_job_state_code(form_job_state) and form_job_state >= the_job.job_state):\n return get_message_json('任务状态不合法'), HTTPStatus.BAD_REQUEST\n\n # Client can edit label id if and only if the job is 'unlabeled'\n form_label_id = form.get('label_id')\n if the_job.job_state == ConstantCodes.Unlabeled:\n if not form_label_id:\n return get_message_json('必须为该任务提供对应的标注'), HTTPStatus.BAD_REQUEST\n elif the_job.job_state == ConstantCodes.Labeling:\n # Can NOT change the label id\n if form_label_id is not None and form_label_id != the_job.label_id:\n return get_message_json('用户无法替换任务的标注'), HTTPStatus.FORBIDDEN\n elif the_job.job_state == ConstantCodes.Finished:\n return get_message_json('用户无法修改已完成的任务'), HTTPStatus.FORBIDDEN\n\n # Update finished date automatically when the job is updated to be finished\n finished_date = None\n if form_job_state == ConstantCodes.Finished:\n finished_date = datetime.date.today()\n\n if not form_label_id:\n form_label_id = the_job.label_id\n\n result = jobs.update_job_by_id(\n job_id,\n form_label_id,\n finished_date,\n form_job_state,\n the_job.image_id,\n the_job.account_id\n )\n if result == 1:\n json_res = form.copy()\n json_res['message'] = '成功编辑任务'\n\n return json_res, HTTPStatus.OK\n else:\n return get_message_json('未知的任务更新失败'), HTTPStatus.BAD_REQUEST\n\n except IntegrityError as err:\n if err.orig.args[0] == DBErrorCodes.FOREIGN_KEY_FAILURE:\n return get_message_json('指定的用户或标注不存在'), HTTPStatus.BAD_REQUEST\n else:\n return handle_internal_error(err.orig.args[1])\n except Exception as err:\n return handle_internal_error(str(err))", "def update_job_item(self, job_item_id, new_job):\n\n updated_job = None\n jobs = self.get_item_by_id(job_item_id)\n job = None\n if len(jobs) is not 1:\n return updated_job\n else:\n job = jobs[0]\n\n if job:\n job.date_completed = new_job[\"date_completed\"]\n job.competed = new_job[\"competed\"]\n job.duration = new_job[\"duration\"]\n job.description = new_job[\"description\"]\n job.user = new_job[\"user\"]\n job.paid = new_job[\"paid\"]\n self.session.add(job)\n self.session.commit()\n updated_job = self.get_item_by_id(job_item_id)[0]\n\n return updated_job.serialize()", "def update(self) -> None:\n self.previous_status = self.status\n\n jobs = self._client.describe_jobs(jobs = [ self.id ])[\"jobs\"]\n\n try:\n self.state = jobs[0]\n except IndexError:\n raise ValueError(\"Invalid or unknown job id %s\" % self.id) from None", "def rename_job(session, name, new_name):\n url = urlparse.urljoin(session.jenkins_url, '/job/%s/doRename?newName=%s' %\n (name, new_name))\n resp = session.post(url)\n resp.raise_for_status()", "def editJob(self):\n\t\titem = self.ui.jobs_listWidget.selectedItems()[0]\n\t\tjobName = item.text()\n\n\t\teditJobDialog = edit_job.dialog(parent=self)\n\t\tif editJobDialog.display(jobName, self.j.getPath(jobName), self.j.getVersion(jobName), self.j.getEnabled(jobName)):\n\t\t\tself.j.enableJob(jobName, editJobDialog.jobActive)\n\t\t\tself.j.setVersion(jobName, editJobDialog.jobVersion)\n\t\t\tself.j.setPath(jobName, editJobDialog.jobPath)\n\t\t\tif self.j.renameJob(jobName, editJobDialog.jobName): # Do this last as jobs are referenced by name\n\t\t\t\tself.reloadJobs(reloadDatabase=False, selectItem=editJobDialog.jobName)\n\t\t\telse:\n\t\t\t\terrorMsg = \"Could not rename job as a job with the name '%s' already exists.\" % editJobDialog.jobName\n\t\t\t\tdialogMsg = errorMsg + \"\\nWould you still like to edit the job '%s'?\" % jobName\n\t\t\t\tverbose.error(errorMsg)\n\n\t\t\t\t# Confirmation dialog\n\t\t\t\tdialogTitle = 'Job Not Created'\n\t\t\t\tdialog = prompt.dialog()\n\t\t\t\tif dialog.display(dialogMsg, dialogTitle):\n\t\t\t\t\tself.editJob()", "def run_job(playerID, rF2root, job, config):\n _j = Job(job, config)\n # read the file to be edited\n try:\n _j.read_json_file_to_be_edited()\n # do the edits\n try:\n _edit_count = _j.run_edits()\n if _edit_count:\n # if successful:\n # backup 'filepath'\n # save new contents to 'filepath\n _report = _j.backup_file()\n _j.write()\n else:\n _report = ''\n return _report\n except (KeyError, ValueError, EmptyJsonError) as e:\n raise JobFailedError\n except JsonContentError:\n raise FileNotFoundError", "def update_job_metrics(self, job_id:int)->None:\n with connection.cursor() as cursor:\n cursor.execute(f\"SELECT update_job_metrics({job_id})\")\n ##TODO: this should return something ", "def test_update_part_job():\n job = job_module.Job(\"24371655\", \"24371655\", None)\n job.update(\n {\n \"JobID\": \"24371655.batch\",\n \"State\": \"COMPLETED\",\n \"AllocCPUS\": \"1\",\n \"REQMEM\": \"1Gn\",\n \"TotalCPU\": \"00:09:00\",\n \"Elapsed\": \"00:10:00\",\n \"MaxRSS\": \"495644K\",\n \"NNodes\": \"1\",\n \"NTasks\": \"\",\n }\n )\n assert job.state is None\n assert job.time == \"---\"\n assert job.cpu == \"---\"\n assert job.totalmem is None\n assert job.stepmem == 495644", "def modify_job(job, parameter):\n raise NotImplementedError(\"Implement in derived class\")", "def update_job_status(job):\n if (job.status != 3 and job.status != 4):\n logger.info('Getting status from crawler-manager')\n jobs = CrawlRequest.objects.filter(user=job.user)\n last_job = True\n if (len(jobs) > 0):\n for j in jobs:\n if (j.id > job.id):\n logger.info('Marking it failed because it doesnt have finished status yet.')\n job.status = 4\n job.save()\n last_job = False\n if (last_job == True):\n try:\n if (job.crawler_manager_endpoint != \"\"):\n resp = requests.get(job.crawler_manager_endpoint+'/status')\n logger.info('Received status response')\n logger.info('response content:{}'.format(resp.text))\n payload = json.loads(resp.text)\n logger.info('job id: {}'.format(payload[\"job_id\"]))\n if (payload[\"job_id\"] == job.id):\n job.docs_collected = payload[\"processed_count\"]\n job.docs_uploaded = payload[\"uploaded_pages\"]\n job.save()\n except Exception as ex:\n logger.info('Exception in getting status')", "def update_job(request):\n req_data = request.POST.copy()\n job_id = req_data['job_id']\n db_result = job_detail.objects.filter(id=job_id)\n template_values = {'MEDIA_URL': media_url, 'db_result': db_result}\n return render_to_response('update_job_page.html', template_values)", "def update_job_status(jid, new_status):\n jid, status, start, end = rd.hmget(_generate_job_key(jid), 'id', 'status', 'start', 'end')\n job = _instantiate_job(jid, status, start, end)\n if job:\n \tjob['status'] = new_status\n _save_job(_generate_job_key(job['id']), job)\n else:\n raise Exception()", "def job(self, job: str):\n\n self._job = job" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Updates a job build description.
def set_build_description(self, job_name, build, description): try: subprocess.check_call(self.cli + [PlatformJenkinsJavaCLI.SET_DESCRIPTION, job_name, build, description]) except subprocess.CalledProcessError: message = "Setting build description failed (job={0}, build={1}, description='{2}')".format(job_name, build, description) raise PlatformJenkinsException(message)
[ "def set_current_build_description(self, description):\n job_name = os.environ.get(\"JOB_NAME\", None)\n build_id = os.environ.get(\"BUILD_NUMBER\", None)\n if job_name is not None and build_id is not None:\n self.set_build_description(job_name, build_id, description)", "def job_description(self, job_description):\n self._job_description = job_description", "def updateDescription(self, descr):\n self.description = descr", "def update(self, job_name, param_name, value, description=None):\n if job_name in self._jobs:\n getattr(self._jobs[job_name], param_name).update(value, description)\n else:\n self.log.error(\"Invalid job name: %s\", job_name)", "def update_info(self, *args, **kwargs):\n # Create the layout with the information\n self.info_widgets_list = [\n urwid.Text('ID: {}'.format(self.job.id)),\n urwid.Divider('='),\n urwid.Text('Command: {}'.format(self.job.command)),\n urwid.Text('Status: {}'.format(self.job.status))\n ]\n\n if self.job.status == JobStatus.FAILED: # If job has failed add error reason (if available)\n if 'Error reason' in self.job.metadata:\n self.info_widgets_list.append(urwid.Text('Possible error reason: {}'.format(self.job.metadata['Error reason'])))\n\n # Add button with the option available depending on the job status\n if self.job.status in [JobStatus.DONE, JobStatus.FAILED]:\n self.info_widgets_list.append(urwid.Padding(JobResubmitWidget(self.job, callback=self.resubmit), align='center', left=4, right=2))\n self.info_widgets_list.append(urwid.Divider('-'))\n elif self.job.status != JobStatus.UNSUBMITTED:\n self.info_widgets_list.append(create_button('Kill', self.terminate))\n self.info_widgets_list.append(urwid.Divider('-'))\n\n self.metadata_widgets_list = []\n self.metadata_widgets_list.append(urwid.Text('Retries: {}'.format(self.job.retries)))\n self.metadata_widgets_list.append(urwid.Divider())\n # Add resources requested by the job\n requested_resources = 'Specific requested resources:\\n'\n requested_resources += ' '+str(self.job.params).replace('\\n', '\\n ')\n self.metadata_widgets_list.append(urwid.Text(requested_resources))\n\n # If usage information is available, display it\n if 'usage' in self.job.metadata:\n self.metadata_widgets_list.append(urwid.Divider())\n used_resources = 'Used resources:\\n'\n used_resources += \"\\n\".join([\" {} = {}\".format(k, v) for k, v in self.job.metadata['usage'].items()])\n self.metadata_widgets_list.append(urwid.Text(used_resources))\n\n self.file_widgets_list = [] # Reset files widget\n # Create widget with the files if the job has failed\n if self.job.status == JobStatus.FAILED:\n # Generate wigets with stdout and stderr if available. Done here because Failed state is \"absolute\"=\n stdout_widget = self._load_file_as_widget(self.job.f_stdout, 'stdout')\n if stdout_widget is not None:\n self.file_widgets_list.append(stdout_widget)\n self.file_widgets_list.append(urwid.Divider('*'))\n stderr_widget = self._load_file_as_widget(self.job.f_stderr, 'stderr')\n if stderr_widget is not None:\n self.file_widgets_list.append(stderr_widget)\n self.file_widgets_list.append(urwid.Divider('*'))", "def request_description_update():\n global should_update_description\n should_update_description = True", "def update_job(self, job):\n call = subprocess.Popen(self.cli + [PlatformJenkinsJavaCLI.UPDATE_JOB, job.name], stdin=subprocess.PIPE)\n call.communicate(input=platform_ci.jjb.get_job_as_xml(job, self.template_dir))\n call.wait()\n if call.returncode != 0:\n raise PlatformJenkinsException(\"Updating job failed: \" + job.name)", "async def slashtag_edit_description(\n self, ctx: commands.Context, tag: GuildTagConverter, *, description: str\n ):\n await ctx.send(await tag.edit_description(description))", "def job_description(self):\n return self._job_description", "def updateJobData(self, jobName):\n self.jobRow.setText(jobName)\n self.updateSelectedLayer()", "def submission_update_description(request, submission_pk):\n try:\n submission = models.CompetitionSubmission.objects.get(pk=submission_pk)\n if submission.participant.user != request.user:\n raise Http404()\n submission.description = request.POST.get('updated_description')\n submission.save()\n return HttpResponse()\n except models.CompetitionSubmission.DoesNotExist:\n raise Http404()", "def _setDescription(self,newDescription):\n\t\tself._description = newDescription", "def description(self, description):\n self._label_data['description'] = description", "def update(self):\n self._log.debug(\"About to update job {0}\".format(self.id))\n resp = self._api.get_job(self.id)\n\n if resp.success:\n self.submission = self._format_submission(resp.result)\n return True\n\n else:\n raise resp.result", "def update_htmldesc(self):\n self.htmldesc = self.title = '(item)'", "def update_description(self):\n with self.supervisor:\n if logger.isEnabledFor(logging.DEBUG):\n logger.debug(\"Thread blocking function called\")\n element = threads.blockingCallFromThread(reactor,\n self.control.query,\n SERVICE_FULL_DESCRIPTION,\n self.peerid)\n self.update(element)", "def set_longdescription(self, longdesc):\n self.longdescription(longdesc)", "def update(cfg, jobs):\n server = jenkins_utils.server_factory(cfg)\n libjobs.updateJobs(server, jobs)", "def update_description(self, option, desc):\n _, command = self.__options[option]\n self.__options[option] = (desc, command)" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Updates a job build description for the current build. This method is intended to be run in an environment where JOB_NAME and BUILD_NUMBER are set in the environment, such as from within the job build itself. If either of the environment variables is not set, setting the description is not attempted at all.
def set_current_build_description(self, description): job_name = os.environ.get("JOB_NAME", None) build_id = os.environ.get("BUILD_NUMBER", None) if job_name is not None and build_id is not None: self.set_build_description(job_name, build_id, description)
[ "def set_build_description(self, job_name, build, description):\n try:\n subprocess.check_call(self.cli + [PlatformJenkinsJavaCLI.SET_DESCRIPTION, job_name, build, description])\n except subprocess.CalledProcessError:\n message = \"Setting build description failed (job={0}, build={1}, description='{2}')\".format(job_name,\n build,\n description)\n raise PlatformJenkinsException(message)", "def job_description(self, job_description):\n self._job_description = job_description", "def request_description_update():\n global should_update_description\n should_update_description = True", "def updateDescription(self, descr):\n self.description = descr", "def set_repo_description(self):\n desc_path = os.path.join(self.git_path, 'description')\n if self.title and os.path.exists(self.git_path) and os.access(desc_path, os.W_OK):\n repo = dvcs.repository(self.path)\n repo.description = self.title", "def format_description(self, project_name, description):\n description = description if description else ''\n return \"%s %s\" % (project_name, '- ' + description)", "def set_description(desc):\n global last_description\n last_description = desc", "def update(self, job_name, param_name, value, description=None):\n if job_name in self._jobs:\n getattr(self._jobs[job_name], param_name).update(value, description)\n else:\n self.log.error(\"Invalid job name: %s\", job_name)", "def update_setup_description(self, setup_name: str, description: str) -> None:\n setup_now = self.get_current_setup()\n if setup_name != \".\" and setup_name != setup_now:\n path_to_the_setup = os.path.join(dp.DAF_CONFIGS, setup_name)\n dict_args = self.io.read(filepath=path_to_the_setup)\n dict_args[\"setup_desc\"] = description\n du.write(dict_args, filepath=path_to_the_setup)\n else:\n self.experiment_file_dict[\"setup_desc\"] = description\n self.write_flag = True", "def set_description(self, room_description):\n self.description = room_description", "def set_description(self, room_description):\r\n self.description = room_description", "def save_environment_description(session_id, environment, inner=True):\n unit = db_session.get_session()\n session = unit.query(models.Session).get(session_id)\n if inner:\n data = session.description.copy()\n data['Objects'] = environment\n session.description = data\n else:\n session.description = environment\n session.save(unit)", "def get_job_description(self, job, context=None):\n return self._client.call_method(\n 'UserAndJobState.get_job_description',\n [job], self._service_ver, context)", "def _get_ci_job_display_name(self):\n return '{} build'.format(self.pipeline_name)", "async def set_description(self, payload):\n await self.message.edit(content=\"Send the message you want to add as a description\")\n msg = await self.wait_for_message()\n self.embed.description = msg.content", "def channels_set_description(self, room_id, description, **kwargs):\n return self.__call_api_post('channels.setDescription', roomId=room_id, description=description, kwargs=kwargs)", "def set_description(self, desc: str) -> None:\n self.metadata.data[\"description\"] = desc", "def update_model_description(\n self, content: Dict[str, Any], model_name: str, is_already_documented: bool = False\n ) -> Dict[str, Any]:\n message = f\"Do you want to write a description for {model_name}\"\n if is_already_documented:\n message = f\"Do you want to change the model description of {model_name}\"\n model_doc_payload: List[Mapping[str, Any]] = [\n {\n \"type\": \"confirm\",\n \"name\": \"wants_to_document_model\",\n \"message\": message,\n \"default\": True,\n },\n {\n \"type\": \"text\",\n \"name\": \"model_description\",\n \"message\": \"Please write down your description:\",\n },\n ]\n user_input = UserInputCollector(\"model\", model_doc_payload).collect()\n if user_input.get(\"model_description\", None):\n for model in content.get(\"models\", []):\n if model[\"name\"] == model_name:\n model[\"description\"] = user_input[\"model_description\"]\n return content", "def buildname(self, env_prop=None):\n if self._buildname is not None:\n return self._buildname\n try:\n platform, build = env_prop['chipName'], env_prop['switchppVersion']\n except (KeyError, TypeError):\n message = 'Cannot determine build name'\n self.class_logger.warning(message)\n self._buildname = self.UNDEFINED_BUILD\n else:\n self.platform = platform\n self.build = build\n name_iter = (MODULES[_var].ReportingServerConfig._get_build_name(self._opts) for _var in # pylint: disable=protected-access\n MODULES if 'reports_conf.' in _var)\n with suppress(StopIteration): # retain build name from env_prop\n build = next(name for name in name_iter if name is not None)\n self._buildname = '{0}-{1}'.format(build, platform)\n\n # WORKAROUND to add 'sanity' suffix to buildname\n if 'sanity' in self._opts.markexpr and self._buildname is not None:\n self._buildname += \"-sanity\"\n # WORKAROUND END\n return self._buildname" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Loop through the exposure list and construct an observation table.
def _buildtable(self): tabrows = [] for i, (expid, exfiles) in enumerate(self._exposure_files.items()): specflux_b, specflux_r, specflux_z = [], [], [] tab = None if len(exfiles) == 0: continue print(expid) for exfile in exfiles: print(exfile) hdu = fits.open(exfile) # The following tables are present in the redux sframes and the # nightwatch qcframes. wave = hdu['WAVELENGTH'].data # However, in the nightwatch files the wavelength data are a # table of size nfiber x nwavelength. if self._filetype == 'nightwatch': if wave.ndim > 1: wave = wave[0] fluxhead = hdu['FLUX'].header fluxdata = hdu['FLUX'].data ivardata = hdu['IVAR'].data fibermap = hdu['FIBERMAP'].data exptime = fluxhead['EXPTIME'] if not np.all(self._unditherfa['FIBER'] == np.arange(len(self._unditherfa))): raise ValueError('weird fiberassign file format!') fibermap = self._unditherfa[fibermap['FIBER']] target_id = fibermap['TARGETID'] target_ra = fibermap['TARGET_RA'] target_dec = fibermap['TARGET_DEC'] fiber = fibermap['FIBER'] objtype = fibermap['OBJTYPE'] flux_g = fibermap['FLUX_G'] flux_r = fibermap['FLUX_R'] flux_z = fibermap['FLUX_Z'] x, y = [fibermap['FIBERASSIGN_{}'.format(val)] for val in ('X', 'Y')] camera = fluxhead['CAMERA'][0].upper() if getattr(self, '_deltara', None) is not None: dra = self._deltara[i]*np.ones(len(fiber)) ddec = self._deltadec[i]*np.ones(len(fiber)) elif self._dithertype == 'telescope': dithra = self._ditherfa['target_ra'] dithdec = self._ditherfa['target_dec'] udithra = self._unditherfa['target_ra'] udithdec = self._unditherfa['target_dec'] ontarget = ((self._ditherfa['targetid'] == self._unditherfa['targetid']) & (self._ditherfa['objtype'] == 'TGT')) dfiberra = (dithra-udithra)*np.cos(np.radians(udithdec))*60*60 dfiberdec = (dithdec-udithdec)*60*60 if not np.all(self._ditherfa['FIBER'] == np.arange(len(self._ditherfa))): raise ValueError('unexpected shape of dither file') dfiberra[~ontarget] = np.nan dfiberdec[~ontarget] = np.nan dfiberra = dfiberra[fiber] dfiberdec = dfiberdec[fiber] wcs = self.lookup_wcs(fluxhead['MJD-OBS']) centralwcs = self._central_wcs if (~np.isfinite(centralwcs['cenra'][1]) or ~np.isfinite(centralwcs['cendec'][1])): raise ValueError('central pointing ra/dec is NaN!') dtelra = (wcs['cenra'][1]-centralwcs['cenra'][1]) dtelra *= np.cos(np.radians(centralwcs['cendec'][1])) dteldec = wcs['cendec'][1]-centralwcs['cendec'][1] dra = dfiberra + dtelra*60*60 ddec = dfiberdec + dteldec*60*60 if np.all(~np.isfinite(dra)): print('warning: no good telescope offset for %s' % exfile) else: raise ValueError('not implemented') for j, fiber_id in enumerate(fiber): flux = fluxdata[j] ivar = ivardata[j] if not np.any(ivar > 0): specflux = 0 specflux_ivar = 0 else: meanivar = np.mean(ivar[ivar > 0]) mask = ivar > meanivar / 100 specflux = np.trapz(flux*mask, wave) specflux_ivar = 1./np.sum(ivar[mask]**-1) # Schlegel: sum over correct wavelengths, all three # filters, plus 11 pixel median filter to reject # cosmics. # will require being better about reading in # the spectrographs together. tabrows.append((expid, exptime, target_id[j], target_ra[j], target_dec[j], fiber[j], objtype[j], flux_g[j], flux_r[j], flux_z[j], specflux, specflux_ivar, camera, dra[j], ddec[j], x[j], y[j])) tab = Table(rows=tabrows, names=('EXPID', 'EXPTIME', 'TARGETID', 'TARGET_RA', 'TARGET_DEC', 'FIBER', 'OBJTYPE', 'FLUX_G', 'FLUX_R', 'FLUX_Z', 'SPECTROFLUX', 'SPECTROFLUX_IVAR', 'CAMERA', 'DELTA_X_ARCSEC', 'DELTA_Y_ARCSEC', 'XFOCAL', 'YFOCAL'), meta={'EXTNAME' : 'DITHER', 'TILEID' : '{}'.format(self._tileid)}) return tab
[ "def buildExposureTable(exposures, fields, instruments):\n name = []\n ra = []\n dec= []\n field= []\n inst = []\n airmass = []\n mjd = []\n exptime = []\n epoch = []\n apcorr = []\n index = 0\n for k,e in exposures.items():\n name.append(e.name)\n ra.append(getDegree(e.coords.ra))\n dec.append(getDegree(e.coords.dec))\n field.append(fields[e.field].index)\n if e.instrument in specialInstruments:\n inst.append(specialInstruments[e.instrument])\n else:\n inst.append(instruments[e.instrument].index)\n e.index = index\n index += 1\n\n airmass.append(e.airmass)\n mjd.append(e.mjd)\n exptime.append(e.exptime)\n epoch.append(e.epoch)\n apcorr.append(e.apcorr)\n hdu = pf.BinTableHDU.from_columns(\\\n pf.ColDefs( [pf.Column(name='NAME',format=py_to_fits(name),array=name),\n pf.Column(name='RA',format=py_to_fits(ra),array=ra),\n pf.Column(name='DEC',format=py_to_fits(dec),array=dec),\n pf.Column(name='FIELDNUMBER',format=py_to_fits(field),array=field),\n pf.Column(name='INSTRUMENTNUMBER',format=py_to_fits(inst),\\\n array=inst),\n pf.Column(name=\"MJD\",format=py_to_fits(mjd),array=mjd),\n pf.Column(name=\"AIRMASS\",format=py_to_fits(airmass),array=airmass),\n pf.Column(name=\"EXPTIME\",format=py_to_fits(exptime),array=exptime),\n pf.Column(name=\"EPOCH\",format=py_to_fits(epoch),array=epoch),\n pf.Column(name=\"APCORR\",format=py_to_fits(apcorr),array=apcorr)] ),\n name = 'Exposures')\n # hdu.header['EXTNAME'] = 'Exposures'\n return hdu", "def make_test_observation_table(observatory_name='HESS', n_obs=10,\n az_range=Angle([0, 360], 'deg'),\n alt_range=Angle([45, 90], 'deg'),\n date_range=(Time('2010-01-01'),\n Time('2015-01-01')),\n use_abs_time=False,\n n_tels_range=(3, 4),\n random_state='random-seed'):\n from ..data import ObservationTable, observatory_locations\n random_state = get_random_state(random_state)\n\n n_obs_start = 1\n\n obs_table = ObservationTable()\n\n # build a time reference as the start of 2010\n dateref = Time('2010-01-01T00:00:00')\n dateref_mjd_fra, dateref_mjd_int = np.modf(dateref.mjd)\n\n # define table header\n obs_table.meta['OBSERVATORY_NAME'] = observatory_name\n obs_table.meta['MJDREFI'] = dateref_mjd_int\n obs_table.meta['MJDREFF'] = dateref_mjd_fra\n if use_abs_time:\n # show the observation times in UTC\n obs_table.meta['TIME_FORMAT'] = 'absolute'\n else:\n # show the observation times in seconds after the reference\n obs_table.meta['TIME_FORMAT'] = 'relative'\n header = obs_table.meta\n\n # obs id\n obs_id = np.arange(n_obs_start, n_obs_start + n_obs)\n obs_table['OBS_ID'] = obs_id\n\n # obs time: 30 min\n ontime = Quantity(30. * np.ones_like(obs_id), 'minute').to('second')\n obs_table['ONTIME'] = ontime\n\n # livetime: 25 min\n time_live = Quantity(25. * np.ones_like(obs_id), 'minute').to('second')\n obs_table['LIVETIME'] = time_live\n\n # start time\n # - random points between the start of 2010 and the end of 2014 (unless\n # otherwise specified)\n # - using the start of 2010 as a reference time for the header of the table\n # - observations restrict to night time (only if specified time interval is\n # more than 1 day)\n # - considering start of astronomical day at midday: implicit in setting\n # the start of the night, when generating random night hours\n datestart = date_range[0]\n dateend = date_range[1]\n time_start = random_state.uniform(datestart.mjd, dateend.mjd, len(obs_id))\n time_start = Time(time_start, format='mjd', scale='utc')\n\n # check if time interval selected is more than 1 day\n if (dateend - datestart).jd > 1.:\n # keep only the integer part (i.e. the day, not the fraction of the day)\n time_start_f, time_start_i = np.modf(time_start.mjd)\n time_start = Time(time_start_i, format='mjd', scale='utc')\n\n # random generation of night hours: 6 h (from 22 h to 4 h), leaving 1/2 h\n # time for the last run to finish\n night_start = Quantity(22., 'hour')\n night_duration = Quantity(5.5, 'hour')\n hour_start = random_state.uniform(night_start.value,\n night_start.value + night_duration.value,\n len(obs_id))\n hour_start = Quantity(hour_start, 'hour')\n\n # add night hour to integer part of MJD\n time_start += hour_start\n\n if use_abs_time:\n # show the observation times in UTC\n time_start = Time(time_start.isot)\n else:\n # show the observation times in seconds after the reference\n time_start = time_relative_to_ref(time_start, header)\n # converting to quantity (better treatment of units)\n time_start = Quantity(time_start.sec, 'second')\n\n obs_table['TSTART'] = time_start\n\n # stop time\n # calculated as TSTART + ONTIME\n if use_abs_time:\n time_stop = Time(obs_table['TSTART'])\n time_stop += TimeDelta(obs_table['ONTIME'])\n else:\n time_stop = TimeDelta(obs_table['TSTART'])\n time_stop += TimeDelta(obs_table['ONTIME'])\n # converting to quantity (better treatment of units)\n time_stop = Quantity(time_stop.sec, 'second')\n\n obs_table['TSTOP'] = time_stop\n\n # az, alt\n # random points in a portion of sphere; default: above 45 deg altitude\n az, alt = sample_sphere(size=len(obs_id),\n lon_range=az_range,\n lat_range=alt_range,\n random_state=random_state)\n az = Angle(az, 'deg')\n alt = Angle(alt, 'deg')\n obs_table['AZ'] = az\n obs_table['ALT'] = alt\n\n # RA, dec\n # derive from az, alt taking into account that alt, az represent the values\n # at the middle of the observation, i.e. at time_ref + (TIME_START + TIME_STOP)/2\n # (or better: time_ref + TIME_START + (TIME_OBSERVATION/2))\n # in use_abs_time mode, the time_ref should not be added, since it's already included\n # in TIME_START and TIME_STOP\n az = Angle(obs_table['AZ'])\n alt = Angle(obs_table['ALT'])\n if use_abs_time:\n obstime = Time(obs_table['TSTART'])\n obstime += TimeDelta(obs_table['ONTIME']) / 2.\n else:\n obstime = time_ref_from_dict(obs_table.meta)\n obstime += TimeDelta(obs_table['TSTART'])\n obstime += TimeDelta(obs_table['ONTIME']) / 2.\n location = observatory_locations[observatory_name]\n altaz_frame = AltAz(obstime=obstime, location=location)\n alt_az_coord = SkyCoord(az, alt, frame=altaz_frame)\n sky_coord = alt_az_coord.transform_to('icrs')\n obs_table['RA'] = sky_coord.ra\n obs_table['DEC'] = sky_coord.dec\n\n # positions\n\n # number of telescopes\n # random integers in a specified range; default: between 3 and 4\n n_tels = random_state.randint(n_tels_range[0], n_tels_range[1] + 1, len(obs_id))\n obs_table['N_TELS'] = n_tels\n\n # muon efficiency\n # random between 0.6 and 1.0\n muoneff = random_state.uniform(low=0.6, high=1.0, size=len(obs_id))\n obs_table['MUONEFF'] = muoneff\n\n return obs_table", "def make_hdu_index_table_from_obs_table(obs_table):\n rows = []\n for obs_table_row in obs_table:\n obs_def = ObservationDefinition(data=dict(\n events_tag='gc_baseline',\n obs_id=obs_table_row['OBS_ID']\n ))\n\n rows.extend(obs_def.make_hdu_index_rows())\n\n # names = list(rows[0].keys())\n names = ['OBS_ID', 'HDU_TYPE', 'HDU_CLASS', 'FILE_DIR', 'FILE_NAME', 'HDU_NAME']\n\n table = Table(rows=rows, names=names)\n table.meta['dataset'] = 'CTA 1DC test data'\n return table", "def info_for_all_observations(self):\n # Get all combinations of instrument, detector, filter, exp_type,\n all_combinations = []\n for i in range(len(self.info['Instrument'])):\n # Get instrument information for the exposure\n instrument = self.info['Instrument'][i]\n detector = self.info['detector'][i]\n if instrument == 'NIRCAM':\n detector = 'NRC{}'.format(detector)\n if '5' in detector:\n filtername = self.info['LongFilter'][i]\n pupilname = self.info['LongPupil'][i]\n detector = detector.replace('5', 'LONG')\n else:\n filtername = self.info['ShortFilter'][i]\n pupilname = self.info['ShortPupil'][i]\n elif instrument == 'NIRISS':\n filtername = self.info['ShortFilter'][i]\n pupilname = self.info['ShortPupil'][i]\n elif instrument == 'FGS':\n filtername = 'N/A'\n pupilname = 'N/A'\n readpattern = self.info['ReadoutPattern'][i]\n\n if instrument == 'NIRCAM':\n exptype = 'NRC_IMAGE'\n elif instrument == 'NIRISS':\n exptype = 'NIS_IMAGE'\n elif instrument == 'FGS':\n exptype = 'FGS_IMAGE'\n\n entry = (instrument, detector, filtername, pupilname, readpattern, exptype)\n all_combinations.append(entry)\n unique_combinations = list(set(all_combinations))\n return all_combinations, unique_combinations", "def fill_obs(self, observation_table, data_store):\n for obs in observation_table:\n events = data_store.obs(obs_id=obs['OBS_ID']).events\n\n # TODO: filter out (mask) possible sources in the data\n # for now, the observation table should not contain any\n # run at or near an existing source\n\n self.counts_cube.fill_events([events])\n self.livetime_cube.data += events.observation_live_time_duration", "def _generate_exposure(self, expstart, number):\n\n index_number = number - 1 # for zero indexing\n\n filename = '{:04d}_raw.fits'.format(number)\n\n exp_gen = ExposureGenerator(self.detector, self.grism, self.NSAMP,\n self.SAMPSEQ, self.SUBARRAY,\n self.planet, filename, expstart)\n\n if not self.spatial_scan:\n self.sample_rate = 1 * u.year # high number reverts to read times\n\n _, sample_mid_points, sample_durations, read_index = \\\n exp_gen._gen_scanning_sample_times(self.sample_rate)\n\n time_array = (sample_mid_points + expstart).to(u.day)\n\n if self.transmission_spectroscopy:\n star_norm_flux = self.generate_lightcurves(time_array)\n planet_depths = 1 - star_norm_flux\n else:\n planet_depths = None\n\n # x shifts - linear shift with exposure, second exposure shifted by\n # x_shifts, direct image and first exp will match.\n x_ref = self._try_index(self.x_ref, index_number)\n y_ref = self._try_index(self.y_ref, index_number)\n sky_background = self._try_index(self.sky_background, index_number)\n\n # X and Y Shifts\n x_ref += self.x_shifts * index_number\n y_ref += self.y_shifts * index_number\n x_jitter = self.x_jitter\n y_jitter = self.y_jitter\n\n if self._visit_trend:\n scale_factor = self._visit_trend.get_scale_factor(index_number)\n else:\n scale_factor = None\n\n if self.spatial_scan:\n exp_frame = exp_gen.scanning_frame(\n x_ref, y_ref, x_jitter, y_jitter,\n self.wl, self.stellar_flux, planet_depths,\n self.scan_speed, self.sample_rate, sample_mid_points,\n sample_durations, read_index, ssv_generator=self.ssv_gen,\n noise_mean=self.noise_mean, noise_std=self.noise_std,\n add_flat=self.add_flat, add_dark=self.add_dark,\n scale_factor=scale_factor, sky_background=sky_background,\n cosmic_rate=self.cosmic_rate,\n add_gain_variations=self.add_gain_variations,\n add_non_linear=self.add_non_linear,\n clip_values_det_limits=self.clip_values_det_limits,\n add_read_noise=self.add_read_noise,\n add_stellar_noise=self.add_stellar_noise,\n add_initial_bias=self.add_initial_bias,\n progress_bar=self.progess,\n threads=self.threads\n )\n else:\n exp_frame = exp_gen.staring_frame(\n x_ref, y_ref, x_jitter, y_jitter,\n self.wl, self.stellar_flux, planet_depths,\n sample_mid_points, sample_durations, read_index,\n noise_mean=self.noise_mean, noise_std=self.noise_std,\n add_flat=self.add_flat, add_dark=self.add_dark,\n scale_factor=scale_factor, sky_background=sky_background,\n cosmic_rate=self.cosmic_rate,\n add_gain_variations=self.add_gain_variations,\n add_non_linear=self.add_non_linear,\n clip_values_det_limits=self.clip_values_det_limits,\n add_read_noise=self.add_read_noise,\n add_stellar_noise=self.add_stellar_noise,\n add_initial_bias=self.add_initial_bias,\n progress_bar=self.progess,\n threads=self.threads\n )\n\n exp_frame.generate_fits(self.outdir, filename, ldcoeffs=self.ldcoeffs)\n\n return exp_frame", "def generate_exptime_table(self, ):\n\n # Perform calculation for all stars in biased sample\n Ndraw = self.NBIAS\n\n np.random.seed(seed=None)\n\n # Allocate memory for exposure times\n t_tots = np.zeros(Ndraw)\n tpbpcs = []\n pct_obs_iwas = []\n lammax_obs_iwas = []\n specs = []\n\n \"\"\"\n Calculate the exposure times and spectra in each bandpass for each\n star in biased sample\n \"\"\"\n\n # Loop over stars in this sample\n for i in range(Ndraw):\n #print(\"HIP %i, %.2f pc, %s \" %(hip[i], dist[i], stype[i]))\n\n # Set system parameters for this star\n self.prep_ith_star(i)\n\n # Calculate the time to observe the complete spectrum\n t_tots[i], tpbpc, spectrum, iwa = self.complete_spectrum_time()\n\n tpbpcs.append(tpbpc)\n pct_obs_iwas.append(iwa[0])\n specs.append(spectrum)\n\n # Calculate channel widths\n deltas = []\n for channel in CHANNELS:\n l = default_luvoir(channel=channel)\n deltas.append(l.lammax - l.lammin)\n self.deltas = np.array(deltas)\n\n # Calculate channel fractional completeness\n self.channel_weights = (self.deltas / np.sum(self.deltas))\n\n # Calculate completeness for each star in sample\n self.completeness = np.sum(np.array(pct_obs_iwas) * self.channel_weights, axis = 1)\n\n \"\"\"\n Make a Lookup Table of Exposure times for each star in sample\n \"\"\"\n\n tpbpcs_rect = [] # Time per bandpass\n tpcs_rect = [] # Time per channel\n\n # Loop over all the stars in sample\n for idrew in range(self.NBIAS):\n\n tpbpcs_rect.append([])\n tpcs_rect.append([])\n bp_names = []\n bp_chan = []\n\n # Loop over all the LUVOIR channels\n for ichan in range(len(CHANNELS)):\n\n tpcs_rect[idrew].append(0.0)\n\n # Loop over all the bands in this channel\n for iband in range(len(tpbpcs[0][ichan])):\n\n bp_names.append(\"%s %i\" %(CHANNELS[ichan], iband+1))\n bp_chan.append(ichan)\n tpbpcs_rect[idrew].append(tpbpcs[idrew][ichan][iband])\n tpcs_rect[idrew][ichan] += tpbpcs[idrew][ichan][iband]\n\n # Make np arrays\n tpbpcs_rect = np.array(tpbpcs_rect)\n tpcs_rect = np.array(tpcs_rect)\n bp_names = np.array(bp_names)\n bp_chan = np.array(bp_chan)\n\n # Make infs --> nans\n infmask = ~np.isfinite(tpbpcs_rect)\n tpbpcs_rect[infmask] = np.nan\n infmask = ~np.isfinite(tpcs_rect)\n tpcs_rect[infmask] = np.nan\n\n # Set attributes\n self.tpbpcs_rect = tpbpcs_rect\n self.tpcs_rect = tpcs_rect\n self.bp_names = bp_names\n self.bp_chan = bp_chan\n\n \"\"\"\n New completeness calculations\n \"\"\"\n\n bandpasses = []\n\n # Loop over telescope channels\n for j, channel in enumerate(CHANNELS):\n\n # Channel dependent bandwidth?\n if type(self.bandwidth) is float:\n bandwidth = self.bandwidth\n else:\n assert len(self.bandwidth) == len(CHANNELS)\n bandwidth = self.bandwidth[j]\n\n # Get the channel specific telescope parameters\n luvoir = default_luvoir(channel=channel)\n self.cn.telescope = luvoir\n\n # Calculate the bandpass edges\n edges = calculate_bandpass_edges(luvoir.lammin, luvoir.lammax, bandwidth = bandwidth)\n\n # Calculate the number of bandpasses\n Nbands = len(edges) - 1\n\n # Loop over bandpasses\n for i in range(Nbands):\n\n # Get the max, min, and middle wavelenths for this bandpass\n lammin = edges[i]\n lammax = edges[i+1]\n\n bandpasses.append([lammin, lammax])\n\n bandpasses = np.array(bandpasses)\n lmin, lmax = np.min(np.hstack(bandpasses)), np.max(np.hstack(bandpasses))\n\n # Fractional completeness of each bandpass\n bp_frac = ((bandpasses[:,1] - bandpasses[:,0]) / (lmax - lmin)) / np.sum((bandpasses[:,1] - bandpasses[:,0]) / (lmax - lmin))\n\n # Completeness by target\n tot_completeness = np.sum(np.isfinite(self.tpbpcs_rect) * bp_frac, axis=1)\n\n # Fraction of stars in biased sample that can completely observe each bandpass\n frac_bias_bp = np.sum(np.isfinite(tpbpcs_rect)*1.0, axis=0) / self.NBIAS\n\n # Set attributes\n self.bandpasses = bandpasses\n self.bp_frac = bp_frac\n self.tot_completeness = tot_completeness\n self.frac_bias_bp = frac_bias_bp\n\n self._make_pandas_table()\n\n return", "def Table(self: Any, accessories: List[Dict[str, Any]]) -> List[Dict[str, Any]]:\n\n table: List[Dict[str, Any]] = Utility.ReadCSV(\n self, f\"{self.iXAssets}/mp/accessorytable.csv\", AccessoryTable\n )\n\n if table is None:\n return accessories\n\n for accessory in accessories:\n for entry in table:\n if accessory.get(\"altId\") != entry.get(\"ref\"):\n continue\n\n accessory[\"name\"] = self.localize.get(entry.get(\"name\"))\n accessory[\"description\"] = self.localize.get(entry.get(\"description\"))\n accessory[\"image\"] = entry.get(\"lootImage\")\n accessory[\"hidden\"] = bool(entry.get(\"hideInUI\"))\n\n return accessories", "def make_meta_table(observation):\n meta_table = Table()\n meta_table[\"TELESCOP\"] = [observation.aeff.meta[\"TELESCOP\"]]\n meta_table[\"INSTRUME\"] = [observation.aeff.meta[\"INSTRUME\"]]\n# meta_table[\"NAME\"] = [observation.aeff.meta[\"CBD10001\"][5:-1]]\n meta_table[\"OBS_ID\"] = [observation.obs_id]\n # NOT WORK YET\n # info_table['AZ'] = [observation.pointing_altaz.az]\n # info_table['ALT'] = [observation.pointing_altaz.alt]\n\n meta_table[\"RA_PNT\"] = [observation.pointing_radec.icrs.ra.deg] * u.deg\n meta_table[\"DEC_PNT\"] = [observation.pointing_radec.icrs.dec.deg] * u.deg\n\n return meta_table", "def iterate_observations(self, exp: HealpixFrame) -> Iterator[Observation]:\n f = self.data_files[exp.data_uri]\n f.seek(exp.data_offset)\n data_layout = struct.Struct(DATA_LAYOUT)\n datagram_size = struct.calcsize(DATA_LAYOUT)\n bytes_read = 0\n while bytes_read < exp.data_length:\n raw = f.read(datagram_size)\n (\n mjd,\n ra,\n dec,\n ra_sigma,\n dec_sigma,\n mag,\n mag_sigma,\n id_size,\n ) = data_layout.unpack(raw)\n id = f.read(id_size)\n bytes_read += datagram_size + id_size\n yield Observation(mjd, ra, dec, ra_sigma, dec_sigma, mag, mag_sigma, id)", "def build_data(self):\n from desiutil.io import combine_dicts\n # Loop on exposures\n odict = {}\n for qanight in self.qa_nights:\n for qaexp in qanight.qa_exps:\n # Get the exposure dict\n idict = write_qa_exposure('foo', qaexp, ret_dict=True)\n odict = combine_dicts(odict, idict)\n # Finish\n self.data = odict", "def exp_filters(self):\n exp_filters = gui.HBox()\n for idx, exp in enumerate(self.exp_names):\n if (idx % 15) == 0: # starts an new table after each 15 experiments. todo: make all tables be of the same length (by adding empty rows)\n exp_table = gui.Table()\n exp_table.style['margin-right'] = '10px'\n # creating the titles:\n row = gui.TableRow()\n item = gui.TableTitle()\n item.add_child(str(id(item)),'Include')\n row.add_child(str(id(item)),item)\n item = gui.TableTitle()\n item.add_child(str(id(item)),'Exclude')\n row.add_child(str(id(item)),item)\n item = gui.TableTitle()\n item.add_child(str(id(item)),'Experiment')\n row.add_child(str(id(item)),item)\n exp_table.add_child(str(id(row)), row)\n # creating a row for each experiment:\n row = gui.TableRow()\n item = gui.TableItem()\n cb_yes = gui.CheckBox()\n item.add_child(str(id(item)),cb_yes)\n row.add_child(str(id(item)),item)\n item = gui.TableItem()\n cb_no = gui.CheckBox()\n item.add_child(str(id(item)),cb_no)\n row.add_child(str(id(item)),item)\n item = gui.TableItem()\n exp_name = gui.Label(exp)\n item.add_child(str(id(item)),exp_name)\n row.add_child(str(id(item)),item)\n exp_table.add_child(str(id(row)), row)\n self.filter_exp_yes_widgets.append(cb_yes)\n self.filter_exp_no_widgets.append(cb_no)\n if (idx % 15) == 0:\n exp_filters.append(exp_table)\n return exp_filters", "def abstract_obs_table(self):\n\n self.S = self.observation_table.S\n self.S_dot_A = self.observation_table.S_dot_A\n self.E = self.observation_table.E\n\n update_S = self.S + self.S_dot_A\n update_E = self.E\n\n for s in update_S:\n for e in update_E:\n observed_outputs = self.observation_table.T[s][e]\n for o_tup in observed_outputs:\n abstracted_outputs = []\n if len(e) == 1:\n o_tup = tuple([o_tup])\n for o in o_tup:\n abstract_output = self.get_abstraction(o)\n abstracted_outputs.append(abstract_output)\n self.add_to_T(s, e, tuple(abstracted_outputs))", "def factor_exposure(self):\n exp_hs_all = pd.DataFrame([])\n exp_zz_all = pd.DataFrame([])\n for i in range(len(self.weekly_date)):\n date = self.weekly_date.iloc[i,0]\n factor = get_barra_factor_from_sql(date)\n factor['secID'] = factor.index.tolist()\n stocklist = factor.index.tolist()\n \n hs300 = get_index_composition(date,'000300.SH')\n zz500 = get_index_composition(date,'000905.SH')\n hs300['secID'] = hs300.index.tolist()\n zz500['secID'] = zz500.index.tolist()\n \n stocklist_hs300 = list(set(hs300.index.tolist()).intersection(set(stocklist)))\n stocklist_zz500 = list(set(zz500.index.tolist()).intersection(set(stocklist)))\n stocklist_hs300.sort()\n stocklist_zz500.sort()\n \n factor_hs = extract_part_from_all(stocklist_hs300,factor,'secID')\n factor_zz = extract_part_from_all(stocklist_zz500,factor,'secID')\n hs_weight = extract_part_from_all(stocklist_hs300,hs300,'secID')\n zz_weight = extract_part_from_all(stocklist_zz500,zz500,'secID')\n del factor_hs['secID'],factor_zz['secID'],hs_weight['secID'],zz_weight['secID']\n \n \n exp_hs = pd.DataFrame(np.dot(hs_weight.T,factor_hs))\n exp_zz = pd.DataFrame(np.dot(zz_weight.T,factor_zz))\n \n \n exp_hs_all = pd.concat([exp_hs_all,exp_hs], axis = 0)\n exp_zz_all = pd.concat([exp_zz_all,exp_zz], axis = 0) \n print(i)\n exp_hs_all.columns = ['Beta','Momentum','Size','EY','RV','Growth',\\\n 'BP','Leverage','Liquidity']\n exp_zz_all.columns = ['Beta','Momentum','Size','EY','RV','Growth',\\\n 'BP','Leverage','Liquidity']\n exp_hs_all.index = self.weekly_date.iloc[:,0]\n exp_zz_all.index = self.weekly_date.iloc[:,0]\n return exp_hs_all,exp_zz_all", "def exp_scan(self, exposure_time_list):\n self.generic_scan(self.exp, exposure_time_list)", "def run_observation(self):\n\n self._generate_direct_image() # to calibrate x_ref and y_ref\n\n num_frames = len(self.exp_start_times)\n progress = Progress(num_frames)\n self.progess = progress\n\n progress_line = 'Generating frames 0/{} done'.format(num_frames)\n progress.print_status_line(progress_line)\n progress.progress_line = progress_line\n\n for i, start_time in enumerate(self.exp_start_times):\n filenum = i + 1\n self._generate_exposure(start_time, filenum)\n\n progress.increment()\n progress_line = 'Generating frames {}/{} done'.format(filenum,\n num_frames)\n progress.print_status_line(progress_line)\n\n # so it can be retreived by exposure_generator\n progress.progress_line = progress_line", "def __dict_to_table(self):\n\n # need to setup all numpy arrays first, then put into table, adding columns is apparently slow\n file_ext_list = self.final_key_dict.keys()\n num_rows = len(file_ext_list)\n final_keyword_list = list(self.final_key_set)\n\n array_list = [[elem.split('-')[0] for elem in file_ext_list],\n [int(elem.split('-')[1]) for elem in file_ext_list]]\n col_names = ['Filename', 'ExtNumber']\n data_types = ['S80', int]\n # the bool mask array list will be off by index of 2\n mask_list = []\n dumb_indx_dict = {}\n\n # make initial empty data and bool arrays\n for indx, key_tuple in enumerate(final_keyword_list):\n # make dumb keyword index dict, probably a better way to do this\n dumb_indx_dict[key_tuple[0]] = indx\n if key_tuple[1] == str:\n data_type = 'S68'\n else:\n data_type = float\n\n array_list.append(np.zeros(num_rows, dtype=data_type))\n mask_list.append(np.ones(num_rows, dtype=bool))\n col_names.append(key_tuple[0])\n data_types.append(data_type)\n\n # now fill in arrays and mask arrays\n for indx, efile in enumerate(file_ext_list):\n for (ikeyword, value) in iteritems(self.final_key_dict[efile]):\n ikeyword_indx = dumb_indx_dict[ikeyword]\n array_list[ikeyword_indx+2][indx] = value\n mask_list[ikeyword_indx][indx] = False\n\n # put arrays and masks together into one masked array\n for indx, mask_arr in enumerate(mask_list):\n array_list[indx+2] = np.ma.array(array_list[indx+2], mask=mask_arr)\n\n # put everything into an astropy table\n self.table = Table(array_list, names=tuple(col_names), dtype=tuple(data_types))", "def build_table(line_lists=None):\n names = ['Intensity', 'Wavelength', 'Element', 'Reference']\n # Use packaging directory instead of relative path in the future.\n if line_lists is None:\n nist_dir = os.path.join(\"datasets\", \"line_lists\", \"NIST\")\n line_lists = []\n for list_file in pkg_resources.resource_listdir(\"specreduce\", nist_dir):\n if \".txt\" in list_file:\n list_path = pkg_resources.resource_filename(\n \"specreduce\",\n os.path.join(nist_dir, list_file)\n )\n line_lists.append(list_path)\n\n tabs_to_stack = []\n for line_list in line_lists:\n try:\n t = Table.read(line_list, format='ascii', names=names)\n tabs_to_stack.append(t)\n except Exception as e:\n warnings.warn(\n f\"Astropy Table reading failed for {line_list}. Using raw numpy reader... {e}\",\n UserWarning\n )\n # Use numpy to parse table that arent comma delimited.\n data = np.genfromtxt(\n line_list,\n delimiter=(13, 14, 13, 16),\n dtype=str\n )\n t = Table(\n data,\n names=names,\n dtype=('S10', 'f8', 'S15', 'S15')\n )\n tabs_to_stack.append(t)\n\n # Stack all of the tables.\n master_table = vstack(tabs_to_stack)\n\n # Add on switch for users. Use line if True, don't if False\n # Set to True by default.\n on_off_column = Column([True] * len(master_table))\n master_table.add_column(on_off_column, name='On')\n\n # Strip the numeric characters off of the intensities and add the letters\n # that denote intensities to their own column\n intensity = master_table['Intensity']\n strength = [re.sub('[0-9]+', '', value).strip() for value in intensity]\n master_table.add_column(Column(strength), name='Strength')\n\n # Find and strip all alphabetic + special characters\n intensity_wo_strength = [re.sub('[a-zA-Z!@#$%^&*]', '', value).strip()\n for value in intensity]\n\n # Delete old column\n master_table.remove_column('Intensity')\n\n # Add new Intensity column that only has intensity as an integer.\n master_table.add_column(Column(intensity_wo_strength,\n dtype=int,\n name='Intensity'))\n\n # Reorder table columns\n neworder = ('Element', 'Wavelength', 'Intensity', 'Strength', 'On', 'Reference')\n master_table = master_table[neworder]\n\n return master_table", "def summarize_observing_conditions(fitsFiles):\n count = len(fitsFiles)\n\n # Here is the data we are going to collect from the fits headers\n year = np.zeros(count, dtype=int)\n month = np.zeros(count, dtype=int)\n day = np.zeros(count, dtype=int)\n hour = np.zeros(count, dtype=int)\n minute = np.zeros(count, dtype=int)\n airmass = np.zeros(count, dtype=float)\n water_column = np.zeros(count, dtype=float)\n \n for ii in range(len(fitsFiles)):\n # Get header info\n hdr = pyfits.getheader(fitsFiles[ii])\n\n airmass[ii] = float(hdr['AIRMASS'])\n\n date = hdr['DATE-OBS'].split('-')\n _year = int(date[0])\n _month = int(date[1])\n _day = int(date[2])\n\n utc = hdr['UTC'].split(':')\n _hour = int(utc[0])\n _minute = int(utc[1])\n _second = int(math.floor(float(utc[2])))\n\n utc = datetime.datetime(_year, _month, _day, _hour, _minute, _second)\n utc2hst = datetime.timedelta(hours=-10)\n hst = utc + utc2hst\n\n year[ii] = hst.year\n month[ii] = hst.month\n day[ii] = hst.day\n hour[ii] = hst.hour\n minute[ii] = hst.minute\n\n # Get the water column in mm of H2O\n water_column[ii] = weather.cso_water_column(_year, _month, _day, \n _hour, _minute)\n\n # Now lets fetch the CFHT weather data\n (temperature, pressure, humidity, wind_speed, wind_dir) = \\\n weather.cfht_weather_data(year, month, day, hour, minute)\n\n # Print out a nicely formatted table\n print('%-20s %4s %2s %2s %2s %2s %4s %4s %5s %5s %4s %4s %4s' % \\\n ('Filename', 'Year', 'M', 'D', 'h', 'm', 'AirM', 'H2O', 'Temp', \n 'Press', 'Humi', 'Wind', 'Dir'))\n print('%-20s %4s %2s %2s %2s %2s %4s %4s %5s %5s %4s %4s %4s' % \\\n ('HST', '', '', '', '', '', '', 'mm', 'C', 'mbar', '%', 'km/h', 'deg'))\n print('%-20s %4s %2s %2s %2s %2s %4s %4s %5s %5s %4s %4s %4s' % \\\n ('--------', '----', '--', '--', '--', '--', '----', '----', '-----', \n '-----', '----', '----', '----'))\n\n for ii in range(len(fitsFiles)):\n print('%-20s %4d %2d %2d %2d %2d ' % \\\n (fitsFiles[ii], year[ii], month[ii], day[ii], hour[ii], minute[ii]),)\n print('%4.2f %4.2f %5.1f %5.1f %4.1f %4.1f %4d' % \\\n (airmass[ii], water_column[ii], temperature[ii], pressure[ii],\n humidity[ii], wind_speed[ii], wind_dir[ii]))\n\n # Print out the average values\n print('%-20s %4s %2s %2s %2s %2s %4s %4s %5s %5s %4s %4s %4s' % \\\n ('--------', '----', '--', '--', '--', '--', '----', '----', '-----', \n '-----', '----', '----', '----'))\n print('%-20s %4d %2d %2d %2d %2d ' % \\\n ('Average', year.mean(), month.mean(), day.mean(), hour.mean(), \n minute.mean()),)\n print('%4.2f %4.2f %5.1f %5.1f %4.1f %4.1f %4d' % \\\n (airmass.mean(), water_column.mean(), temperature.mean(), \n pressure.mean(), humidity.mean(), wind_speed.mean(), wind_dir.mean()))\n print('%-20s %4d %2d %2d %2d %2d ' % \\\n ('Std. Dev.', year.std(), month.std(), day.std(), hour.std(), \n minute.std()),)\n print('%4.2f %4.2f %5.1f %5.1f %4.1f %4.1f %4d' % \\\n (airmass.std(), water_column.std(), temperature.std(), \n pressure.std(), humidity.std(), wind_speed.std(), wind_dir.std()))" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
String representation of the exposure sequence.
def __str__(self): output = ['Tile ID {}'.format(self._tileid)] for ex, files in self._exposure_files.items(): filenames = '- exposure {:08d}\n'.format(ex) for f in files: filenames = '{} + {}\n'.format(filenames, f) output.append(filenames) return '\n'.join(output)
[ "def to_string(self):\n return self.sequence", "def get_sequence_str(self):\n return self.sequence.get_sequence()", "def __str__(self):\n return str(self._seq)", "def __str__(self):\n return '{},{},{},{},{},{},{},{},{}'.format(self.video_filename, self.driver_id, self.which_map, self.seq_str,\n self.start_frame_str, self.end_frame_str, self.is_acting,\n self.sequence_area_str, self.count_acting_str)", "def __str__(self):\n return str(self.get_gapped_seq())", "def __str__(self):\n\n pose_str = \"[Pos: (\"\n\n for coord in self.position:\n pose_str += str(coord) + \", \"\n\n pose_str = pose_str[0:-2] + \") Orient: (\"\n\n for coord in self.orientation:\n pose_str += str(coord) + \", \"\n\n pose_str = pose_str[0:-2] + \")]\"\n\n return pose_str", "def __str__(self):\n return self._seq", "def __str__(self):\n \n temp = \"Experiment(\"\n return temp+str(self.value)+\", \"+str(self.tpr)+\", \"+str(self.fpr)+\")\"", "def dump(self):\n outputs = [\"Sequence : %s\" % self.name]\n if self.curr_value:\n outputs.append(\" start : %d\" % self.curr_value)\n outputs.append(\" minimum : %d\" % self.min_value)\n outputs.append(\" maximum : %d\" % self.max_value)\n if self.increment_by > 1:\n outputs.append(\" increment : %d\" % self.increment_by)\n return \"\\n\".join(outputs)", "def __str__(self):\n # self._examples.values\n string = \"\"\n for e in self._examples:\n for i, v in enumerate(e.values):\n if self._attributes[i].type == 'Nominal':\n string = string + self._attributes[i].domain[v]\n else:\n string = string + v\n if i == len(e.values) - 1:\n string = string + \"\\n\"\n else:\n string = string + \" \"\n return string", "def __str__(self):\n\n nframes = len(self.frames)\n if nframes == 0:\n return \"\"\n elif nframes == 1:\n frame, = self.frames\n return str(frame)\n else:\n frames = sorted(self.frames)\n start = prev = frames[0] # First frame.\n step = None\n subranges = []\n for end in frames[1:]: # Frame starting from the second in the list.\n\n if step is None: # Step is still none.\n step = end - prev # Find and set step.\n\n if prev + step != end: # If the sequence is broken.\n subranges.append((start, prev, step)) # Create a subrange.\n step = None # Reset step.\n start = end # Re-start start.\n prev = end # The next previous.\n\n else:\n subranges.append((start, end, step))\n\n return \", \".join(format_subrange(start, end, step) for (start, end, step) in subranges)", "def __str__(self):\n\n operations = re.sub(\n '^',\n ' ' * 4,\n '\\n\\n'.join([str(a) for a in self._sequence]),\n flags=re.MULTILINE)\n operations = re.sub('^\\\\s+$', '', operations, flags=re.MULTILINE)\n\n return ('LUT Sequence\\n'\n '------------\\n\\n'\n 'Overview\\n\\n'\n ' {0}\\n\\n'\n 'Operations\\n\\n'\n '{1}').format(\n ' ---> '.join(\n [a.__class__.__name__ for a in self._sequence]),\n operations)", "def excitation_seq(self) -> str:\n return self.frame_selector.excitation_seq", "def __str__(self):\r\n\r\n data = [self.seq_f,\r\n self.seq_r,\r\n self.tf,\r\n self.df,\r\n len(self.taxons),\r\n len(self.families),\r\n ]\r\n\r\n return \"%s\\n\" % \"\\t\".join([str(x) for x in data])", "def __str__(self):\n\n out_string = \"Pose ID: \" + str(self.id)\n out_string += \"\\nDetection confidence: \" + str(self.confidence) + \"\\nKeypoints name-position:\\n\"\n # noinspection PyUnresolvedReferences\n for name, kpt in zip(Pose.kpt_names, self.data.tolist()):\n out_string += name + \": \" + str(kpt) + \"\\n\"\n return out_string", "def get_seq_info(self):\n return f\"[Label]: {self.label}\\n[Sequence]: {self.seq}\\n[Biotype]: {self.seq_type}\\n[Length]: {len(self.seq)}\"", "def __str__(self):\n return str(''.join(self.write([], format='fasta')))", "def to_string(self) -> str:\n dc_dict = self.to_dict()\n descriptors = _expand_decay_modes(dc_dict, top=True)\n assert len(descriptors) == 1\n return descriptors[0]", "def __repr__ ( self ):\n\t\treturn \"RNA sequence: %s\" % str ( self . data )" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Returns source that matches the user provided source_id or display_name.
def ExtractMatchingSourceFromResponse(response, args): for source in response: if ((args.source and source.name.endswith(args.source)) or (args.source_display_name and source.displayName == args.source_display_name)): return source raise core_exceptions.Error( "Source: %s not found." % (args.source if args.source is not None else args.source_display_name))
[ "def find_source(self, name):\n t = filter( lambda x: x.name==name, self.point_sources+self.extended_sources)\n return t[0] if len(t)==1 else None", "def getSourceForId(context, identifier):\n nearest = getattr(context, identifier, None)\n if IExternalSource.providedBy(nearest):\n return nearest\n return None", "def get_source_name(self, source_id: str) -> str:\n if not self._source_list_map:\n return \"\"\n if source_id.upper() == DIGITAL_TV.upper():\n source_id = \"dtv\"\n for map_value in self._source_list_map:\n map_id = map_value.get(\"id\")\n if map_id and map_id == source_id:\n return map_value.get(\"name\", \"\")\n return \"\"", "def by_source(self, source):\n return self.filter(source_object=source)", "def get_source_link(self, source):\n for link in self.sources:\n if link.source == source:\n return link", "def get_source(self, name):\n return self._sources[name]", "def _get_source(self, uri: str) -> Optional[_Source]:\n\n for source in self._sources:\n if uri == source.uri:\n return source\n\n return None", "def get_source(self, source_name: str) -> Optional[Tuple[int, str, str]]:\n prefix = \"Source\"\n self.get_main_source()\n regex = re.compile(r\"^Source\\s*:.+$\")\n spec_section = self.spec_content.section(SPEC_PACKAGE_SECTION)\n for idx, line in enumerate(spec_section):\n # we are looking for Source lines\n if line.startswith(prefix):\n # it's a Source line!\n if line.startswith(source_name):\n # it even matches the specific Source\\d+\n full_name = source_name\n elif regex.match(line):\n # okay, let's try the other very common default\n # https://github.com/packit/packit/issues/536#issuecomment-534074925\n full_name = prefix\n else:\n # nope, let's continue the search\n continue\n return idx, full_name, line\n return None", "def get_data_source(DataSourceId=None, Verbose=None):\n pass", "def get_source(self, credential_source_type, credential_source_field_name):\n pass", "def get_media_source_id(self, source_name):\n\t\tvalidation.required(source_name, 'source_name')\n\n\t\treturn self.media_sources.get(source_name, 1)", "def _get_source(self, source_id):\n logging.debug(\"Getting entity for source_id %s\", source_id)\n if self.trace.has_item(source_id):\n return self.trace.get_item(source_id)\n\n source_components = source_id.split('/')\n if len(source_components) not in [2, 4]:\n logging.error(\n \"Expecting source with either 2 or 4 components, got %s\",\n source_id)\n return None\n\n if not re.match(\"[0-9]+\", source_components[0]):\n logging.error(\n \"Expecting source beginning with item ID, got %s\",\n source_components[0])\n return None\n\n if len(source_components) == 2:\n source_item_id = source_components[0]\n well = source_components[1]\n\n pattern = r\"\\[\\[([0-9]+),[ \\t]*([0-9]+)\\]\\]\"\n match = re.match(pattern, source_components[1])\n if match:\n well = well_coordinates(int(match[1]), int(match[2]))\n\n elif len(source_components) == 4:\n source_item_id = source_components[1]\n well = source_components[3]\n\n source_item_entity = self.factory.get_item(item_id=source_item_id)\n\n if not source_item_entity.is_collection():\n msg = \"Ignoring source part %s from non-collection %s\"\n logging.info(msg, well, source_item_id)\n return source_item_entity\n\n source_part_entity = self.factory.get_part(\n collection=source_item_entity,\n well=well\n )\n\n return source_part_entity", "def get_source_id(self):\n return self.source_id", "def get_source(slug):\n from .models import SOURCE\n for cls in SOURCE:\n if cls.slug == slug:\n return cls", "def data_source_display_name(self, data_source_display_name):\n\n self._data_source_display_name = data_source_display_name", "def get_account_source_infos(self, account_source):\r\n for source, source_infos in self.account_sources.items():\r\n if account_source.lower() == source.lower():\r\n return (source, source_infos)\r\n if source_infos[\"aliases\"]:\r\n for alias in source_infos[\"aliases\"]:\r\n if account_source.lower() == alias.lower():\r\n return (source, source_infos)\r\n raise exceptions.NoAccountSourceInfosException()", "def by_display_name(cls, host, display_name):\n if host is None or display_name is None:\n return None\n return DBSession.query(cls).filter(\n cls.host == host).filter(cls.display_name == display_name).first()", "def source_id(self) -> str:\n return self._source_id", "def grab_external_id(stix_object, source_name):\n for external_reference in stix_object.get(\"external_references\", []):\n if external_reference.get(\"source_name\") == source_name:\n return external_reference[\"external_id\"]" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Make sure that the specified node is visible.
def ensure_visible(self, node): try: components = node.namespace_name.split('/') # Make sure that the tree is expanded down to the context that # contains the node. binding = self.root for atom in components[:-1]: binding = binding.obj.lookup_binding(atom) self.expand(binding) # The context is expanded so we know that the node will be in the # node to Id map. wxid = self._node_to_id_map.get(self.model.get_key(node), None) self.control.EnsureVisible(wxid) # We need 'namespace_name' to make this work. If we don't have it # then we simply cannot do this! except OperationNotSupportedError: binding = None return binding
[ "def assertVisible(self, element):\n return self.assertTrue(element.is_displayed())", "def Visible(self) -> bool:", "def ensure_visible(self):\n self.widget.setVisible(True)\n action = self._widget_action\n if action is not None:\n action.setVisible(True)", "def setVisible( self, state ):\n self._visible = state\n \n super(XNode, self).setVisible(self.isVisible())\n \n self.dispatch.visibilityChanged.emit(state)\n self.setDirty()", "def inspectedNodeIsVisible(self):\n return self._inspected_node_is_visible", "def invisible_visit(self, node: Node) -> None:\n pass", "def ensure_visible(self):\n self.widget.setVisible(True)", "def has_visible_parents(self, node):\n parent = node\n if parent is None:\n return True \n else:\n style = self.get_style(parent)\n if style is not None and style.replace(\" \",\"\").find(\"display:none\") >= 0:\n return False\n return self.has_visible_parents(parent.getparent())", "def set_visible(self, visible): \n self.visible = visible", "def isVisible(self, p_int): # real signature unknown; restored from __doc__\n return False", "def assertVisible(self, element_locator, msg=None, wait_timeout=None):\n wait_timeout = wait_timeout or self.DEFAULT_ASSERTION_TIMEOUT\n if not test.visibility_change_test(self.driver, element_locator, wait_timeout=wait_timeout):\n failure_message = 'Element is not visible'\n msg = self._formatMessage(msg, failure_message)\n raise self.failureException(msg)", "def wait_for_visible(self, timeout=None):\n wait_until(lambda: self.is_displayed(),\n \"Element '%s' not visible after <TIMEOUT>.\" % self._locator,\n timeout)", "def is_visible(self, timeout=None):\n try:\n self.visibility_of_element_located(timeout)\n except TimeoutException:\n return False\n return True", "def visible(visible=True):\n self.turtle.visible = visible\n self.send_report()", "def assert_visible(self, locator, msg=None):\r\n e = driver.find_elements_by_locator(locator)\r\n if len(e) == 0:\r\n raise AssertionError(\"Element at %s was not found\" % locator)\r\n assert e.is_displayed()", "def has_visibility_of(self, n=None):\n raise NotImplementedError(\n 'operation has_visibility_of(...) not yet implemented')", "def unmute_and_show_node(node):\n\n # we query connections and hide from the mute node\n if cmds.listConnections(\"{}.visibility\".format(node)):\n cmds.mute(\"{}.visibility\".format(node), disable=True, force=True)\n\n # we simply hide\n else:\n cmds.setAttr(\"{}.visibility\".format(node), 1)", "def wait_for_invisible(self, timeout=None):\n wait_until(lambda: not self.is_displayed(),\n \"Element '%s' still visible after <TIMEOUT>.\" % self._locator,\n timeout)", "def EnsureVisible(self, item):\r\n\r\n # first expand all parent branches\r\n parent = item.GetParent()\r\n\r\n if self.HasAGWFlag(TR_HIDE_ROOT):\r\n while parent and parent != self._anchor:\r\n self.Expand(parent)\r\n parent = parent.GetParent()\r\n else:\r\n while parent:\r\n self.Expand(parent)\r\n parent = parent.GetParent()\r\n \r\n self.ScrollTo(item)" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Method test the endpoint for getting bucketlist items
def test_get_bucketlist_items(self): email = "test@test.com" _pword = "test" user = User.query.filter_by(email=email).first() bucketlist = BucketList.query.filter_by(user_id=user.id, id=1).first() items_no = len(bucketlist.bucketlist_items) headers = self.authentication_headers(email=email, password=_pword) response = self.client.get( '/api/v1/bucketlist/1/items/', content_type="application/json", headers=headers, follow_redirects=True ) result = json.loads(response.data.decode('utf-8')) self.assertEqual(len(result), items_no)
[ "def test_get_bucketlist_items(self):\n resp = self.client.post('/bucketlists',\n data=json.dumps(self.bucketlist),\n content_type=\"application/json\", headers={\n \"Authorization\": self.token\n })\n self.assertEqual(resp.status_code, 201)\n\n resp_item = self.client.post('/bucketlistitems/1/items',\n data=json.dumps(\n {\"name\": \"visit the busy surburbs.\"}),\n content_type=\"application/json\", headers={\n \"Authorization\": self.token\n })\n\n self.assertEqual(resp_item.status_code, 200)\n resp_item = self.client.get('/bucketlistitems/1/items', headers={\n \"Authorization\": self.token\n })\n self.assertEqual(resp_item.status_code, 200)", "def test_get_all_bucketitems(self):\n\n resp = self.client().post('/auth/register', data = self.user)\n\n resp_login = self.client().post('/auth/login', data = self.form_data, ) ## Login the user.\n token = json.loads(resp_login.data.decode())['auth_token'] ## Get the authentication token.\n\n resp = self.client().post('/bucketlists/', data = self.bucket, headers=dict(Authorization=token))\n resp = self.client().post('/bucketlists/1/items/', data = self.bucketitems, headers=dict(Authorization=token))\n self.assertEqual(resp.status_code, 200)\n\n resp = self.client().get('/bucketlists/1/items/', headers=dict(Authorization=token))\n self.assertEqual(resp.status_code, 200) ## Test if the response is successfully loaded.\n self.assertIn('Climbing', str(resp.data))", "def test_get_bucketlist(self):\n self.test_store.add_bucketlist('travel', 'visit london')\n test_bucketlist = self.test_store.get_single_bucketlist(3)\n self.assertEquals(\n test_bucketlist,\n {\n \"id\": 3,\n \"title\": \"travel\",\n \"description\": \"visit london\",\n \"items\": []\n }, 'bucketlist not found')", "def test_user_can_get_list_of_buckets(self):\n with self.client:\n response = self.client.get(\n '/bucketlists/',\n headers=dict(Authorization='Bearer ' + self.get_user_token())\n )\n data = json.loads(response.data.decode())\n self.assertEqual(response.status_code, 200)\n self.assertTrue(data['status'] == 'success')\n self.assertIsInstance(data['buckets'], list)\n self.assertEqual(len(data['buckets']), 0)\n self.assertEqual(data['count'], 0)\n self.assertIsInstance(data['count'], int)\n self.assertEqual(data['previous'], None)\n self.assertEqual(data['next'], None)", "def bucketlist_get():\n pass", "def test_api_get_all_bucketlists(self):\n res = self.client().post('/bucketlist', data=self.bucketlist)\n self.assertEqual(res.status_code, 201)\n res = self.client().get('/bucketlist')\n self.assertEqual(res.status_code, 200)\n self.assertIn('Go to vacation', str(res.data))", "def test_get_request_on_bucketlist_resource(self):\n\n response = self.client.get(\"/bucketlists/\")\n self.assertEqual(response.status_code, 401)", "def test_read_bucket(self):\n post_data = self.post_a_bucket()\n self.assertEqual(post_data.status_code, 201)\n result_of_get_method = self.client().get('/bucketlists/',\n headers=dict(Authorization='Bearer '\n + self.token())\n )\n self.assertEqual(result_of_get_method.status_code, 200)\n self.assertIn('Climb the Himalayas', str(result_of_get_method.data))", "def test_list(self):\n responses.add(\n responses.Response(\n method='GET',\n url='https://connection.keboola.com/v2/storage/buckets',\n json=list_response\n )\n )\n buckets_list = self.buckets.list()\n assert isinstance(buckets_list, list)", "def test_get_one_bucketlist(self):\n bucketlist = {'title': 'Swimming'}\n # post first time\n self.client.post(\n '/bucketlists/', data=json.dumps(bucketlist), headers=self.get_header())\n response = self.client.get('/bucketlists/1', headers=self.get_header())\n self.assertEqual(response.status_code, 200)\n self.assertIn(\"Swimming\", response.get_data(as_text=True))", "def test_creating_and_getting_a_bucketlist_for_authenticated_user(self):\n\n # test all bucketlists\n response = self.client.post(\n \"/bucketlists/\",\n data=dict(name='test_bucketlist'),\n headers={'Authorization': self.user_token}\n )\n bucketlist = json.loads(response.data)\n\n self.assertEqual(response.status_code, 200)\n self.assertEqual(bucketlist[\"name\"], 'test_bucketlist')\n\n # test single bucketlist\n self.bucketlist_id = bucketlist[\"bucketlist_id\"]\n single_bucketlist = self.client.get(\n \"/bucketlists/\" + str(self.bucketlist_id) + \"\",\n headers={'Authorization': self.user_token}\n )\n\n one_bucketlist = json.loads(single_bucketlist.data)\n\n self.assertEqual(single_bucketlist.status_code, 200)\n self.assertEqual(one_bucketlist[\"name\"], 'test_bucketlist')\n\n # test all items in bucketlist\n item = self.client.post(\n \"/bucketlists/\" + str(self.bucketlist_id) + \"/items/\",\n data=dict(name=\"test_item\"),\n headers={'Authorization': self.user_token}\n )\n\n one_item = json.loads(item.data)\n\n self.assertEqual(item.status_code, 200)\n self.assertEqual(one_item[\"name\"], 'test_item')\n\n # test single item in bucketlist\n self.item_id = one_item[\"item_id\"]\n single_item = self.client.get(\n \"/bucketlists/\" + str(self.bucketlist_id) + \"/items/\" + str(self.item_id) + \"\",\n headers={'Authorization': self.user_token}\n )\n\n created_item = json.loads(single_item.data)\n\n self.assertEqual(single_item.status_code, 200)\n self.assertEqual(created_item[\"name\"], 'test_item')\n\n # test for deletion of bucketlist\n second_bucketlist = self.client.post(\n \"/bucketlists/\",\n data=dict(name='second_bucketlist'),\n headers={'Authorization': self.user_token}\n )\n\n bucketlist_two = json.loads(second_bucketlist.data)\n\n self.assertEqual(second_bucketlist.status_code, 200)\n self.assertEqual(bucketlist_two[\"name\"], 'second_bucketlist')\n\n delete_response = self.client.delete(\n \"/bucketlists/\" + str(bucketlist_two[\"bucketlist_id\"]) + \"\",\n headers={'Authorization': self.user_token}\n )\n\n deletion = json.loads(delete_response.data)\n\n self.assertEqual(delete_response.status_code, 200)\n self.assertEqual(deletion[\"message\"], \"Deleted\")\n\n # test for deletion of an item in bucketlist\n delete_item = self.client.delete(\n \"/bucketlists/\" + str(bucketlist[\"bucketlist_id\"]) + \"/items/\" + str(one_item[\"item_id\"]) + \"\",\n headers={'Authorization': self.user_token}\n )\n\n item_deletion = json.loads(delete_item.data)\n\n self.assertEqual(delete_item.status_code, 200)\n self.assertEqual(item_deletion[\"message\"], \"Deleted\")\n\n # test for updating of bucketlist\n self.bucketlist_id = bucketlist[\"bucketlist_id\"]\n bucketlist_update = self.client.put(\n \"/bucketlists/\" + str(self.bucketlist_id) + \"\",\n data=dict(name='bucketlist_test'),\n headers={'Authorization': self.user_token}\n )\n\n updated_bucketlist = json.loads(bucketlist_update.data)\n\n self.assertEqual(bucketlist_update.status_code, 200)\n self.assertEqual(updated_bucketlist[\"name\"], 'bucketlist_test')\n\n # test update of item in bucketlist\n item = self.client.post(\n \"/bucketlists/\" + str(self.bucketlist_id) + \"/items/\",\n data=dict(name=\"test_item\"),\n headers={'Authorization': self.user_token}\n )\n\n one_item = json.loads(item.data)\n\n item_update = self.client.put(\n \"/bucketlists/\" + str(self.bucketlist_id) + \"/items/\"+ str(one_item[\"item_id\"]) + \"\",\n data=dict(name=\"item_test\"),\n headers={'Authorization': self.user_token}\n )\n\n updated_item = json.loads(item_update.data)\n\n self.assertEqual(item_update.status_code, 200)\n self.assertEqual(updated_item[\"name\"], 'item_test')", "def test_bucket_by_id_is_returned_on_get_request(self):\n with self.client:\n token = self.get_user_token()\n # Create a Bucket\n response = self.client.post(\n '/bucketlists',\n data=json.dumps(dict(name='Travel')),\n headers=dict(Authorization='Bearer ' + token),\n content_type='application/json'\n )\n # Test Bucket creation\n data = json.loads(response.data.decode())\n self.assertEqual(response.status_code, 201)\n self.assertTrue(data['status'], 'success')\n self.assertTrue(data['name'], 'Travel')\n response = self.client.get(\n '/bucketlists/1',\n headers=dict(Authorization='Bearer ' + token)\n )\n data = json.loads(response.data.decode())\n self.assertEqual(response.status_code, 200)\n self.assertTrue(data['status'] == 'success')\n self.assertTrue(data['bucket']['name'] == 'travel')\n self.assertIsInstance(data['bucket'], dict)\n self.assertTrue(response.content_type == 'application/json')", "def test_buckets_returned_when_searched(self):\n with self.client:\n token = self.get_user_token()\n self.create_buckets(token)\n response = self.client.get(\n '/bucketlists/?q=T',\n headers=dict(Authorization='Bearer ' + token)\n )\n data = json.loads(response.data.decode())\n self.assertTrue(data['status'] == 'success')\n self.assertIsInstance(data['buckets'], list, 'Items must be a list')\n self.assertEqual(len(data['buckets']), 3)\n self.assertEqual(data['buckets'][0]['id'], 1)\n self.assertEqual(data['count'], 6)\n self.assertEqual(data['next'], 'http://localhost/bucketlists/?page=2')\n self.assertEqual(data['previous'], None)\n self.assertEqual(response.status_code, 200)", "def test_bucketlist_item_creation(self):\n resp = self.client.post(\"/auth/register/\", data=self.user_details)\n self.assertEqual(resp.status_code, 201)\n result = self.client.post(\"/auth/login/\", data=self.user_details)\n access_token = json.loads(result.data.decode())[\"access_token\"]\n\n self.client.post(\n \"/bucketlists/\",\n headers=dict(Authorization=access_token),\n data=self.bucketlist)\n # result = json.loads(bucketlist.data.decode())\n\n resp = self.client.post(\"/bucketlists/1/items/\", headers=dict(\n Authorization=access_token), data=self.item)\n self.assertEqual(resp.status_code, 201)\n self.assertIn(\"Standard chartered marathon\", str(resp.data))", "def test_buckets_returned_when_searched_2(self):\n with self.client:\n token = self.get_user_token()\n self.create_buckets(token)\n response = self.client.get(\n '/bucketlists/?q=T&page=2',\n headers=dict(Authorization='Bearer ' + token)\n )\n data = json.loads(response.data.decode())\n self.assertTrue(data['status'] == 'success')\n self.assertIsInstance(data['buckets'], list, 'Items must be a list')\n self.assertEqual(len(data['buckets']), 3)\n self.assertEqual(data['buckets'][0]['id'], 4)\n self.assertEqual(data['count'], 6)\n self.assertEqual(data['next'], None)\n self.assertEqual(data['previous'], 'http://localhost/bucketlists/?page=1')\n self.assertEqual(response.status_code, 200)", "def test_api_get_bucketlist_by_id(self):\n res_post = self.client().post('/bucketlist', data=self.bucketlist)\n self.assertEqual(res_post.status_code, 201)\n res_in_json = json.loads(res_post.data.decode('UTF-8').replace(\"'\", \"\\\"\"))\n res = self.client().get(f\"/bucketlist/{res_in_json['id']}\")\n self.assertEqual(res.status_code, 200)\n self.assertIn('Go to vacation', str(res.data))", "def test_create_bucket_list_return(self):\n bucket = BucketList(\"\", \"\")\n bucket = bucket.create_bucket_list(\"Name\", \"Completed\")\n self.assertIsInstance(bucket, BucketList)", "def test_no_bucket_returned_by_given_id(self):\n with self.client:\n token = self.get_user_token()\n\n response = self.client.get(\n '/bucketlists/1',\n headers=dict(Authorization='Bearer ' + token)\n )\n data = json.loads(response.data.decode())\n self.assertEqual(response.status_code, 200)\n self.assertTrue(data['status'] == 'success')\n self.assertIsInstance(data['bucket'], list)\n self.assertTrue(response.content_type == 'application/json')", "def test_create_bucketlist_item(self):\n resp = self.client.post('/bucketlists',\n data=json.dumps(self.bucketlist),\n content_type=\"application/json\", headers={\n \"Authorization\": self.token\n })\n self.assertEqual(resp.status_code, 201)\n response = self.client.get(\n \"/bucketlists/1\", headers={\n \"Authorization\": self.token})\n self.assertEqual(response.status_code, 200)\n resp_item = self.client.post('/bucketlistitems/1/items',\n data=json.dumps(\n {\"name\": \"visit the busy surburbs.\"}),\n content_type=\"application/json\", headers={\n \"Authorization\": self.token\n })\n result = json.loads(resp_item.data)\n self.assertEqual(result[\"message\"],\n \"Bucket list item added successfully.\")\n self.assertEqual(resp.status_code, 201)" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Method test the endpoint for adding bucketlist item
def test_add_bucketlist_items(self): email = "test@test.com" _pword = "test" user = User.query.filter_by(email=email).first() bucketlist = BucketList.query.filter_by(user_id=user.id, name="test bucketlist").first() item_no = BucketListItem.query.filter_by(bucketlist_id=bucketlist.id).count() response = self.add_bucketlist_item(email, _pword, bucketlist.id, "bucketlist item name") result = json.loads(response.data.decode('utf-8')) self.assertEqual(response.status, '201 CREATED') self.assertEqual(result['message'], 'Bucket list item added') new_item_no = BucketListItem.query.filter_by(bucketlist_id=bucketlist.id).count() self.assertLess(item_no, new_item_no)
[ "def test_create_bucketlist_item(self):\n resp = self.client.post('/bucketlists',\n data=json.dumps(self.bucketlist),\n content_type=\"application/json\", headers={\n \"Authorization\": self.token\n })\n self.assertEqual(resp.status_code, 201)\n response = self.client.get(\n \"/bucketlists/1\", headers={\n \"Authorization\": self.token})\n self.assertEqual(response.status_code, 200)\n resp_item = self.client.post('/bucketlistitems/1/items',\n data=json.dumps(\n {\"name\": \"visit the busy surburbs.\"}),\n content_type=\"application/json\", headers={\n \"Authorization\": self.token\n })\n result = json.loads(resp_item.data)\n self.assertEqual(result[\"message\"],\n \"Bucket list item added successfully.\")\n self.assertEqual(resp.status_code, 201)", "def test_add_bucketlist_item(self):\n self.test_store.add_bucketlist('travel', 'visit london')\n test_bucketlist = self.test_store.get_single_bucketlist(1)\n # import pdb; pdb.set_trace()\n initial_bucketlist_items = len(test_bucketlist['items'])\n self.test_store.add_bucketlist_item(1, \"Tour Big Ben\", \"12 Nov 2017\")\n final_bucketlist_items = len(test_bucketlist['items'])\n self.assertEquals(\n 1, final_bucketlist_items-initial_bucketlist_items,\n 'Bucketlist item not created properly')", "def test_bucketlist_item_creation(self):\n resp = self.client.post(\"/auth/register/\", data=self.user_details)\n self.assertEqual(resp.status_code, 201)\n result = self.client.post(\"/auth/login/\", data=self.user_details)\n access_token = json.loads(result.data.decode())[\"access_token\"]\n\n self.client.post(\n \"/bucketlists/\",\n headers=dict(Authorization=access_token),\n data=self.bucketlist)\n # result = json.loads(bucketlist.data.decode())\n\n resp = self.client.post(\"/bucketlists/1/items/\", headers=dict(\n Authorization=access_token), data=self.item)\n self.assertEqual(resp.status_code, 201)\n self.assertIn(\"Standard chartered marathon\", str(resp.data))", "def test_bucketlist_item_creation_with_Existing_name(self):\n resp = self.client.post(\"/auth/register/\", data=self.user_details)\n self.assertEqual(resp.status_code, 201)\n result = self.client.post(\"/auth/login/\", data=self.user_details)\n access_token = json.loads(result.data.decode())[\"access_token\"]\n\n self.client.post(\n \"/bucketlists/\",\n headers=dict(Authorization=access_token),\n data=self.bucketlist)\n # result = json.loads(bucketlist.data.decode())\n self.client.post(\"/bucketlists/1/items/\", headers=dict(\n Authorization=access_token), data=self.item)\n\n resp = self.client.post(\"/bucketlists/1/items/\", headers=dict(\n Authorization=access_token), data=self.item)\n self.assertEqual(resp.status_code, 205)\n self.assertIn(\"Item exists\", str(resp.data))", "def test_put_bucketlist_item(self):\r\n data = {\"name\": \"bucketlist item name\", \"completed\": \"true\"}\r\n email = \"test@test.com\"\r\n _pword = \"test\"\r\n user = User.query.filter_by(email=email).first()\r\n bucketlist = BucketList.query.filter_by(user_id=user.id, name=\"test bucketlist\").first()\r\n item = BucketListItem.query.filter_by(bucketlist_id=bucketlist.id, id=1).first()\r\n self.assertNotEqual(item.name, \"bucketlist item name\")\r\n self.assertFalse(item.completed)\r\n\r\n response = self.put_bucketlist_item(email, _pword, bucketlist.id, 1, data)\r\n result = json.loads(response.data.decode('utf-8'))\r\n item2 = BucketListItem.query.filter_by(bucketlist_id=bucketlist.id, id=1).first()\r\n self.assertEqual(response.status, '201 CREATED')\r\n self.assertEqual(item2.name, \"bucketlist item name\")\r\n self.assertTrue(item2.completed)", "def test_create_bucketlist(self):\n bucketlist = {'title': 'Swimming'}\n response = self.client.post(\n '/bucketlists/', data=json.dumps(bucketlist), headers=self.get_header())\n self.assertEqual(response.status_code, 201)\n self.assertIn(\"Swimming - bucketlist has been added\",\n response.get_data(as_text=True))", "def test_bucketlist_item_creation_without_bucketlist(self):\n resp = self.client.post(\"/auth/register/\", data=self.user_details)\n self.assertEqual(resp.status_code, 201)\n result = self.client.post(\"/auth/login/\", data=self.user_details)\n access_token = json.loads(result.data.decode())[\"access_token\"]\n resp = self.client.post(\"/bucketlists/1/items/\", headers=dict(\n Authorization=access_token), data=self.item)\n self.assertEqual(resp.status_code, 404)", "def test_bucketitems_creation(self):\n\n resp = self.client().post('/auth/register', data = self.user)\n\n resp_login = self.client().post('/auth/login', data = self.form_data) ## Login the user.\n token = json.loads(resp_login.data.decode())['auth_token'] ## Get the authentication token.\n\n resp = self.client().post('/bucketlists/', data = self.bucket, headers=dict(Authorization=token))\n resp = self.client().post('/bucketlists/1/items/', data = self.bucketitems, headers=dict(Authorization=token))\n self.assertEqual(resp.status_code, 200)\n self.assertIn('Climbing', str(resp.data)) ## Searches for climbing in the users string.", "def test_post_bucketlists_items_list(self):\n\n new_bucketlist_item = {\"title\": \"Title 3\"}\n\n # Asserting no access without token\n response = self.client.post(url, new_bucketlist_item)\n self.assertEqual(response.status_code, 401)\n self.assertEqual(response.data, message)\n\n # set authentication token in header\n self.client.credentials(HTTP_AUTHORIZATION=self.token)\n\n # Asserting access upon auth by token\n auth_response = self.client.post(url, new_bucketlist_item)\n self.assertEqual(auth_response.status_code, 201)\n self.assertEqual(BucketlistItem.objects.count(), 3)", "def test_bucketlist_create(self):\n res = self.client().post('/bucketlist', data=self.bucketlist)\n self.assertEqual(res.status_code, 201)\n self.assertIn('Go to vacation', str(res.data))", "def test_get_bucketlist_items(self):\n resp = self.client.post('/bucketlists',\n data=json.dumps(self.bucketlist),\n content_type=\"application/json\", headers={\n \"Authorization\": self.token\n })\n self.assertEqual(resp.status_code, 201)\n\n resp_item = self.client.post('/bucketlistitems/1/items',\n data=json.dumps(\n {\"name\": \"visit the busy surburbs.\"}),\n content_type=\"application/json\", headers={\n \"Authorization\": self.token\n })\n\n self.assertEqual(resp_item.status_code, 200)\n resp_item = self.client.get('/bucketlistitems/1/items', headers={\n \"Authorization\": self.token\n })\n self.assertEqual(resp_item.status_code, 200)", "def test_bucketlist_item_edit(self):\n resp = self.client.post(\"/auth/register/\", data=self.user_details)\n self.assertEqual(resp.status_code, 201)\n # login user\n result = self.client.post(\"/auth/login/\", data=self.user_details)\n access_token = json.loads(result.data.decode())[\"access_token\"]\n # create bucketlist\n self.client.post('/bucketlists/', headers=dict(\n Authorization=access_token),\n data=self.bucketlist)\n # create bucketlist item\n bucketlist_item = self.client.post(\n \"/bucketlists/1/items/\", headers=dict(Authorization=access_token),\n data=self.item)\n result = json.loads(bucketlist_item.data.decode())\n resp = self.client.put(\n \"/bucketlists/1/items/1/\",\n headers=dict(Authorization=access_token),\n data={\"name\": \"Stan Chart Marathon\"})\n result = self.client.get(\n \"/bucketlists/1/items/1/\",\n headers=dict(Authorization=access_token))\n self.assertEqual(resp.status_code, 200)\n self.assertIn(\"Stan Chart\", str(result.data))", "def test_put_bucketlists_detail(self):\n\n update_bucketlist_item = {\"title\": \"Item Three\"}\n\n # Asserting no access without token\n response = self.client.put(url_one, update_bucketlist_item)\n self.assertEqual(response.status_code, 401)\n self.assertEqual(response.data, message)\n\n # set authentication token in header\n self.client.credentials(HTTP_AUTHORIZATION=self.token)\n\n # Asserting access upon auth by token\n auth_response = self.client.put(url_one, update_bucketlist_item)\n self.assertEqual(auth_response.status_code, 200)\n self.assertEqual(Bucketlist.objects.count(), 2)\n self.assertEqual(auth_response.data.get('message'), \"Item '2' updated successfully\")", "def test_create_bucketlist(self):\n initial_bucketlists = len(self.test_store.bucketlists)\n self.test_store.add_bucketlist('travel', 'visit london')\n final_bucketlists = len(self.test_store.bucketlists)\n self.assertEquals(\n 1, final_bucketlists-initial_bucketlists, 'User not created')", "def test_get_bucketlist(self):\n self.test_store.add_bucketlist('travel', 'visit london')\n test_bucketlist = self.test_store.get_single_bucketlist(3)\n self.assertEquals(\n test_bucketlist,\n {\n \"id\": 3,\n \"title\": \"travel\",\n \"description\": \"visit london\",\n \"items\": []\n }, 'bucketlist not found')", "def test_add_item_at_using_put(self):\n pass", "def test_creating_and_getting_a_bucketlist_for_authenticated_user(self):\n\n # test all bucketlists\n response = self.client.post(\n \"/bucketlists/\",\n data=dict(name='test_bucketlist'),\n headers={'Authorization': self.user_token}\n )\n bucketlist = json.loads(response.data)\n\n self.assertEqual(response.status_code, 200)\n self.assertEqual(bucketlist[\"name\"], 'test_bucketlist')\n\n # test single bucketlist\n self.bucketlist_id = bucketlist[\"bucketlist_id\"]\n single_bucketlist = self.client.get(\n \"/bucketlists/\" + str(self.bucketlist_id) + \"\",\n headers={'Authorization': self.user_token}\n )\n\n one_bucketlist = json.loads(single_bucketlist.data)\n\n self.assertEqual(single_bucketlist.status_code, 200)\n self.assertEqual(one_bucketlist[\"name\"], 'test_bucketlist')\n\n # test all items in bucketlist\n item = self.client.post(\n \"/bucketlists/\" + str(self.bucketlist_id) + \"/items/\",\n data=dict(name=\"test_item\"),\n headers={'Authorization': self.user_token}\n )\n\n one_item = json.loads(item.data)\n\n self.assertEqual(item.status_code, 200)\n self.assertEqual(one_item[\"name\"], 'test_item')\n\n # test single item in bucketlist\n self.item_id = one_item[\"item_id\"]\n single_item = self.client.get(\n \"/bucketlists/\" + str(self.bucketlist_id) + \"/items/\" + str(self.item_id) + \"\",\n headers={'Authorization': self.user_token}\n )\n\n created_item = json.loads(single_item.data)\n\n self.assertEqual(single_item.status_code, 200)\n self.assertEqual(created_item[\"name\"], 'test_item')\n\n # test for deletion of bucketlist\n second_bucketlist = self.client.post(\n \"/bucketlists/\",\n data=dict(name='second_bucketlist'),\n headers={'Authorization': self.user_token}\n )\n\n bucketlist_two = json.loads(second_bucketlist.data)\n\n self.assertEqual(second_bucketlist.status_code, 200)\n self.assertEqual(bucketlist_two[\"name\"], 'second_bucketlist')\n\n delete_response = self.client.delete(\n \"/bucketlists/\" + str(bucketlist_two[\"bucketlist_id\"]) + \"\",\n headers={'Authorization': self.user_token}\n )\n\n deletion = json.loads(delete_response.data)\n\n self.assertEqual(delete_response.status_code, 200)\n self.assertEqual(deletion[\"message\"], \"Deleted\")\n\n # test for deletion of an item in bucketlist\n delete_item = self.client.delete(\n \"/bucketlists/\" + str(bucketlist[\"bucketlist_id\"]) + \"/items/\" + str(one_item[\"item_id\"]) + \"\",\n headers={'Authorization': self.user_token}\n )\n\n item_deletion = json.loads(delete_item.data)\n\n self.assertEqual(delete_item.status_code, 200)\n self.assertEqual(item_deletion[\"message\"], \"Deleted\")\n\n # test for updating of bucketlist\n self.bucketlist_id = bucketlist[\"bucketlist_id\"]\n bucketlist_update = self.client.put(\n \"/bucketlists/\" + str(self.bucketlist_id) + \"\",\n data=dict(name='bucketlist_test'),\n headers={'Authorization': self.user_token}\n )\n\n updated_bucketlist = json.loads(bucketlist_update.data)\n\n self.assertEqual(bucketlist_update.status_code, 200)\n self.assertEqual(updated_bucketlist[\"name\"], 'bucketlist_test')\n\n # test update of item in bucketlist\n item = self.client.post(\n \"/bucketlists/\" + str(self.bucketlist_id) + \"/items/\",\n data=dict(name=\"test_item\"),\n headers={'Authorization': self.user_token}\n )\n\n one_item = json.loads(item.data)\n\n item_update = self.client.put(\n \"/bucketlists/\" + str(self.bucketlist_id) + \"/items/\"+ str(one_item[\"item_id\"]) + \"\",\n data=dict(name=\"item_test\"),\n headers={'Authorization': self.user_token}\n )\n\n updated_item = json.loads(item_update.data)\n\n self.assertEqual(item_update.status_code, 200)\n self.assertEqual(updated_item[\"name\"], 'item_test')", "def test_bucketlist_item_edit_with_existing_name(self):\n resp = self.client.post(\"/auth/register/\", data=self.user_details)\n self.assertEqual(resp.status_code, 201)\n # login user\n result = self.client.post(\"/auth/login/\", data=self.user_details)\n access_token = json.loads(result.data.decode())[\"access_token\"]\n # create bucketlist\n self.client.post('/bucketlists/', headers=dict(\n Authorization=access_token),\n data=self.bucketlist)\n # create bucketlist item\n bucketlist_item = self.client.post(\n \"/bucketlists/1/items/\", headers=dict(Authorization=access_token),\n data=self.item)\n self.client.post(\n \"/bucketlists/1/items/\", headers=dict(Authorization=access_token),\n data={\"name\": \"Stan Chart Marathon\"})\n result = json.loads(bucketlist_item.data.decode())\n resp = self.client.put(\n \"/bucketlists/1/items/1/\",\n headers=dict(Authorization=access_token),\n data={\"name\": \"Stan Chart Marathon\"})\n self.assertEqual(resp.status_code, 409)\n self.assertIn(\"Name exists, enter another\", str(resp.data))", "def post_bucketlist():\n pass" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Method tests that there can not be more than one bucketlist item added with the same name. We will use one of the already existing bucketlist names 'test item'
def test_fail_repeated_buckelist_item(self): user = User.query.filter_by(email="test@test.com").first() bucketlist = BucketList.query.filter_by(user_id=user.id, name="test bucketlist").first() item_no = BucketListItem.query.filter_by(bucketlist_id=bucketlist.id).count() response = self.add_bucketlist_item("test@test.com", "test", bucketlist.id, "test item") result = json.loads(response.data.decode('utf-8')) self.assertEqual(response.status, '409 CONFLICT') self.assertEqual(result['message'], 'Bucketlist Item Exists') new_item_no = BucketListItem.query.filter_by(bucketlist_id=bucketlist.id).count() self.assertEqual(item_no, new_item_no)
[ "def test_add_bucketlist_item(self):\n self.test_store.add_bucketlist('travel', 'visit london')\n test_bucketlist = self.test_store.get_single_bucketlist(1)\n # import pdb; pdb.set_trace()\n initial_bucketlist_items = len(test_bucketlist['items'])\n self.test_store.add_bucketlist_item(1, \"Tour Big Ben\", \"12 Nov 2017\")\n final_bucketlist_items = len(test_bucketlist['items'])\n self.assertEquals(\n 1, final_bucketlist_items-initial_bucketlist_items,\n 'Bucketlist item not created properly')", "def test_bucketlist_item_creation_with_Existing_name(self):\n resp = self.client.post(\"/auth/register/\", data=self.user_details)\n self.assertEqual(resp.status_code, 201)\n result = self.client.post(\"/auth/login/\", data=self.user_details)\n access_token = json.loads(result.data.decode())[\"access_token\"]\n\n self.client.post(\n \"/bucketlists/\",\n headers=dict(Authorization=access_token),\n data=self.bucketlist)\n # result = json.loads(bucketlist.data.decode())\n self.client.post(\"/bucketlists/1/items/\", headers=dict(\n Authorization=access_token), data=self.item)\n\n resp = self.client.post(\"/bucketlists/1/items/\", headers=dict(\n Authorization=access_token), data=self.item)\n self.assertEqual(resp.status_code, 205)\n self.assertIn(\"Item exists\", str(resp.data))", "def test_bucketlist_item_edit_with_existing_name(self):\n resp = self.client.post(\"/auth/register/\", data=self.user_details)\n self.assertEqual(resp.status_code, 201)\n # login user\n result = self.client.post(\"/auth/login/\", data=self.user_details)\n access_token = json.loads(result.data.decode())[\"access_token\"]\n # create bucketlist\n self.client.post('/bucketlists/', headers=dict(\n Authorization=access_token),\n data=self.bucketlist)\n # create bucketlist item\n bucketlist_item = self.client.post(\n \"/bucketlists/1/items/\", headers=dict(Authorization=access_token),\n data=self.item)\n self.client.post(\n \"/bucketlists/1/items/\", headers=dict(Authorization=access_token),\n data={\"name\": \"Stan Chart Marathon\"})\n result = json.loads(bucketlist_item.data.decode())\n resp = self.client.put(\n \"/bucketlists/1/items/1/\",\n headers=dict(Authorization=access_token),\n data={\"name\": \"Stan Chart Marathon\"})\n self.assertEqual(resp.status_code, 409)\n self.assertIn(\"Name exists, enter another\", str(resp.data))", "def test_bucketlist_item_edit_blank_name(self):\n resp = self.client.post(\"/auth/register/\", data=self.user_details)\n self.assertEqual(resp.status_code, 201)\n # login user\n result = self.client.post(\"/auth/login/\", data=self.user_details)\n access_token = json.loads(result.data.decode())[\"access_token\"]\n # create bucketlist\n self.client.post('/bucketlists/', headers=dict(\n Authorization=access_token),\n data=self.bucketlist)\n # create bucketlist item\n bucketlist_item = self.client.post(\n \"/bucketlists/1/items/\", headers=dict(Authorization=access_token),\n data=self.item)\n result = json.loads(bucketlist_item.data.decode())\n resp = self.client.put(\n \"/bucketlists/1/items/1/\",\n headers=dict(Authorization=access_token))\n result = self.client.get(\n \"/bucketlists/1/items/1/\",\n headers=dict(Authorization=access_token))\n self.assertEqual(resp.status_code, 400)\n self.assertIn(\"Enter a Valid Name\", str(resp.data))", "def test_add_bucketlist_items(self):\r\n email = \"test@test.com\"\r\n _pword = \"test\"\r\n user = User.query.filter_by(email=email).first()\r\n bucketlist = BucketList.query.filter_by(user_id=user.id, name=\"test bucketlist\").first()\r\n item_no = BucketListItem.query.filter_by(bucketlist_id=bucketlist.id).count()\r\n response = self.add_bucketlist_item(email, _pword, bucketlist.id, \"bucketlist item name\")\r\n result = json.loads(response.data.decode('utf-8'))\r\n self.assertEqual(response.status, '201 CREATED')\r\n self.assertEqual(result['message'], 'Bucket list item added')\r\n new_item_no = BucketListItem.query.filter_by(bucketlist_id=bucketlist.id).count()\r\n self.assertLess(item_no, new_item_no)", "def test_create_bucketlist_item(self):\n resp = self.client.post('/bucketlists',\n data=json.dumps(self.bucketlist),\n content_type=\"application/json\", headers={\n \"Authorization\": self.token\n })\n self.assertEqual(resp.status_code, 201)\n response = self.client.get(\n \"/bucketlists/1\", headers={\n \"Authorization\": self.token})\n self.assertEqual(response.status_code, 200)\n resp_item = self.client.post('/bucketlistitems/1/items',\n data=json.dumps(\n {\"name\": \"visit the busy surburbs.\"}),\n content_type=\"application/json\", headers={\n \"Authorization\": self.token\n })\n result = json.loads(resp_item.data)\n self.assertEqual(result[\"message\"],\n \"Bucket list item added successfully.\")\n self.assertEqual(resp.status_code, 201)", "def test_bucketlist_item_creation_without_bucketlist(self):\n resp = self.client.post(\"/auth/register/\", data=self.user_details)\n self.assertEqual(resp.status_code, 201)\n result = self.client.post(\"/auth/login/\", data=self.user_details)\n access_token = json.loads(result.data.decode())[\"access_token\"]\n resp = self.client.post(\"/bucketlists/1/items/\", headers=dict(\n Authorization=access_token), data=self.item)\n self.assertEqual(resp.status_code, 404)", "def test_bucketlist_item_creation(self):\n resp = self.client.post(\"/auth/register/\", data=self.user_details)\n self.assertEqual(resp.status_code, 201)\n result = self.client.post(\"/auth/login/\", data=self.user_details)\n access_token = json.loads(result.data.decode())[\"access_token\"]\n\n self.client.post(\n \"/bucketlists/\",\n headers=dict(Authorization=access_token),\n data=self.bucketlist)\n # result = json.loads(bucketlist.data.decode())\n\n resp = self.client.post(\"/bucketlists/1/items/\", headers=dict(\n Authorization=access_token), data=self.item)\n self.assertEqual(resp.status_code, 201)\n self.assertIn(\"Standard chartered marathon\", str(resp.data))", "def test_create_bucketlist(self):\n initial_bucketlists = len(self.test_store.bucketlists)\n self.test_store.add_bucketlist('travel', 'visit london')\n final_bucketlists = len(self.test_store.bucketlists)\n self.assertEquals(\n 1, final_bucketlists-initial_bucketlists, 'User not created')", "def test_delete_bucketlist_item(self):\n self.test_store.add_bucketlist('travel', 'visit london')\n self.test_store.add_bucketlist_item(2, \"Tour Big Ben\", \"12 Nov 2017\")\n test_bucketlist = self.test_store.get_single_bucketlist(2)\n initial_bucketlist_items = len(test_bucketlist['items'])\n self.test_store.remove_bucketlist_item(2, 1)\n final_bucketlist_items = len(test_bucketlist['items'])\n self.assertEquals(\n 1,\n initial_bucketlist_items-final_bucketlist_items,\n 'Items not removed'\n )", "def test_nonexistence_bucketlist_item_edit(self):\n resp = self.client.post(\"/auth/register/\", data=self.user_details)\n self.assertEqual(resp.status_code, 201)\n # login user\n result = self.client.post(\"/auth/login/\", data=self.user_details)\n access_token = json.loads(result.data.decode())[\"access_token\"]\n\n resp = self.client.put(\n \"/bucketlists/1/items/1/\",\n headers=dict(Authorization=access_token),\n data={\"name\": \"Stan Chart Marathon\"})\n self.assertEqual(resp.status_code, 404)", "def test_put_bucketlist_item(self):\r\n data = {\"name\": \"bucketlist item name\", \"completed\": \"true\"}\r\n email = \"test@test.com\"\r\n _pword = \"test\"\r\n user = User.query.filter_by(email=email).first()\r\n bucketlist = BucketList.query.filter_by(user_id=user.id, name=\"test bucketlist\").first()\r\n item = BucketListItem.query.filter_by(bucketlist_id=bucketlist.id, id=1).first()\r\n self.assertNotEqual(item.name, \"bucketlist item name\")\r\n self.assertFalse(item.completed)\r\n\r\n response = self.put_bucketlist_item(email, _pword, bucketlist.id, 1, data)\r\n result = json.loads(response.data.decode('utf-8'))\r\n item2 = BucketListItem.query.filter_by(bucketlist_id=bucketlist.id, id=1).first()\r\n self.assertEqual(response.status, '201 CREATED')\r\n self.assertEqual(item2.name, \"bucketlist item name\")\r\n self.assertTrue(item2.completed)", "def test_get_bucketlist_items(self):\n resp = self.client.post('/bucketlists',\n data=json.dumps(self.bucketlist),\n content_type=\"application/json\", headers={\n \"Authorization\": self.token\n })\n self.assertEqual(resp.status_code, 201)\n\n resp_item = self.client.post('/bucketlistitems/1/items',\n data=json.dumps(\n {\"name\": \"visit the busy surburbs.\"}),\n content_type=\"application/json\", headers={\n \"Authorization\": self.token\n })\n\n self.assertEqual(resp_item.status_code, 200)\n resp_item = self.client.get('/bucketlistitems/1/items', headers={\n \"Authorization\": self.token\n })\n self.assertEqual(resp_item.status_code, 200)", "def test_post_bucketlists_items_list(self):\n\n new_bucketlist_item = {\"title\": \"Title 3\"}\n\n # Asserting no access without token\n response = self.client.post(url, new_bucketlist_item)\n self.assertEqual(response.status_code, 401)\n self.assertEqual(response.data, message)\n\n # set authentication token in header\n self.client.credentials(HTTP_AUTHORIZATION=self.token)\n\n # Asserting access upon auth by token\n auth_response = self.client.post(url, new_bucketlist_item)\n self.assertEqual(auth_response.status_code, 201)\n self.assertEqual(BucketlistItem.objects.count(), 3)", "def test_bucketitems_creation(self):\n\n resp = self.client().post('/auth/register', data = self.user)\n\n resp_login = self.client().post('/auth/login', data = self.form_data) ## Login the user.\n token = json.loads(resp_login.data.decode())['auth_token'] ## Get the authentication token.\n\n resp = self.client().post('/bucketlists/', data = self.bucket, headers=dict(Authorization=token))\n resp = self.client().post('/bucketlists/1/items/', data = self.bucketitems, headers=dict(Authorization=token))\n self.assertEqual(resp.status_code, 200)\n self.assertIn('Climbing', str(resp.data)) ## Searches for climbing in the users string.", "def test_put_item_wrong_id(self):\r\n data = {\"name\": \"bucketlist item name\", \"completed\": \"true\"}\r\n email = \"test@test.com\"\r\n _pword = \"test\"\r\n user = User.query.filter_by(email=email).first()\r\n bucketlist = BucketList.query.filter_by(user_id=user.id, name=\"test bucketlist\").first()\r\n item = BucketListItem.query.filter_by(bucketlist_id=bucketlist.id, id=0).first()\r\n self.assertFalse(item)\r\n\r\n response = self.put_bucketlist_item(email, _pword, bucketlist.id, 0, data)\r\n result = json.loads(response.data.decode('utf-8'))\r\n self.assertEqual(response.status, '404 NOT FOUND')\r\n self.assertEqual(\r\n result['message'],\r\n 'Bucketlist Item with ID {} not found in the database. You have requested this URI '\\\r\n '[/api/v1/bucketlist/1/items/0] but did you mean /api/v1/bucketlist/<int:bucketlist_id>/items/'\\\r\n ' or /api/v1/bucketlist/<int:bucketlist_id> or /api/v1/bucketlist ?'.format(0)\r\n )", "def test_create_bucket_list_name(self):\n bucket = BucketList(\"\", \"\")\n bucket = bucket.create_bucket_list(\"\")\n self.assertEqual(bucket, \"Please provide a name for your bucket list\", )", "def test_get_bucketlist(self):\n self.test_store.add_bucketlist('travel', 'visit london')\n test_bucketlist = self.test_store.get_single_bucketlist(3)\n self.assertEquals(\n test_bucketlist,\n {\n \"id\": 3,\n \"title\": \"travel\",\n \"description\": \"visit london\",\n \"items\": []\n }, 'bucketlist not found')", "def test_get_one_bucketlist(self):\n bucketlist = {'title': 'Swimming'}\n # post first time\n self.client.post(\n '/bucketlists/', data=json.dumps(bucketlist), headers=self.get_header())\n response = self.client.get('/bucketlists/1', headers=self.get_header())\n self.assertEqual(response.status_code, 200)\n self.assertIn(\"Swimming\", response.get_data(as_text=True))" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Method tests the end point for updating a bucket list item using put
def test_put_bucketlist_item(self): data = {"name": "bucketlist item name", "completed": "true"} email = "test@test.com" _pword = "test" user = User.query.filter_by(email=email).first() bucketlist = BucketList.query.filter_by(user_id=user.id, name="test bucketlist").first() item = BucketListItem.query.filter_by(bucketlist_id=bucketlist.id, id=1).first() self.assertNotEqual(item.name, "bucketlist item name") self.assertFalse(item.completed) response = self.put_bucketlist_item(email, _pword, bucketlist.id, 1, data) result = json.loads(response.data.decode('utf-8')) item2 = BucketListItem.query.filter_by(bucketlist_id=bucketlist.id, id=1).first() self.assertEqual(response.status, '201 CREATED') self.assertEqual(item2.name, "bucketlist item name") self.assertTrue(item2.completed)
[ "def test_put_bucketlists_detail(self):\n\n update_bucketlist_item = {\"title\": \"Item Three\"}\n\n # Asserting no access without token\n response = self.client.put(url_one, update_bucketlist_item)\n self.assertEqual(response.status_code, 401)\n self.assertEqual(response.data, message)\n\n # set authentication token in header\n self.client.credentials(HTTP_AUTHORIZATION=self.token)\n\n # Asserting access upon auth by token\n auth_response = self.client.put(url_one, update_bucketlist_item)\n self.assertEqual(auth_response.status_code, 200)\n self.assertEqual(Bucketlist.objects.count(), 2)\n self.assertEqual(auth_response.data.get('message'), \"Item '2' updated successfully\")", "def test_bucketlist_item_edit(self):\n resp = self.client.post(\"/auth/register/\", data=self.user_details)\n self.assertEqual(resp.status_code, 201)\n # login user\n result = self.client.post(\"/auth/login/\", data=self.user_details)\n access_token = json.loads(result.data.decode())[\"access_token\"]\n # create bucketlist\n self.client.post('/bucketlists/', headers=dict(\n Authorization=access_token),\n data=self.bucketlist)\n # create bucketlist item\n bucketlist_item = self.client.post(\n \"/bucketlists/1/items/\", headers=dict(Authorization=access_token),\n data=self.item)\n result = json.loads(bucketlist_item.data.decode())\n resp = self.client.put(\n \"/bucketlists/1/items/1/\",\n headers=dict(Authorization=access_token),\n data={\"name\": \"Stan Chart Marathon\"})\n result = self.client.get(\n \"/bucketlists/1/items/1/\",\n headers=dict(Authorization=access_token))\n self.assertEqual(resp.status_code, 200)\n self.assertIn(\"Stan Chart\", str(result.data))", "def test_edit_bucketlist(self):\n post_data = self.post_a_bucket()\n self.assertEqual(post_data.status_code, 201)\n result_of_put_method = self.client().put(\n '/bucketlists/1',\n headers=dict(Authorization='Bearer '\n + self.token()\n ),\n data={\n \"name\": \"The seasons will be, summer winter and autumn\"\n })\n self.assertEqual(result_of_put_method.status_code, 201)\n result_of_get_method = self.client().get('/bucketlists/1',\n headers=dict(Authorization='Bearer '\n + self.token())\n )\n self.assertIn('The seasons will b', str(result_of_get_method.data))", "def test_bucketitems_can_be_edited(self):\n\n resp = self.client().post('/auth/register', data = self.user)\n\n resp_login = self.client().post('/auth/login', data = self.form_data) ## Login the user.\n token = json.loads(resp_login.data.decode())['auth_token'] ## Get the authentication token.\n\n resp = self.client().post('/bucketlists/', data = self.bucket, headers=dict(Authorization=token))\n resp = self.client().post('/bucketlists/1/items/', data = self.bucketitems, headers=dict(Authorization=token)) ## Create the item.\n\n form_data = {'name': 'walking on the moon', 'description': 'Go by the space craft'}\n resp = self.client().put('/bucketlists/1/items/1', data = form_data, headers=dict(Authorization=token))\n self.assertEqual(resp.status_code, 200)\n\n self.assertIn('true', str(resp.data))", "def test_bucket_is_updated(self):\n with self.client:\n # Get an auth token\n token = self.get_user_token()\n # Create a Bucket\n response = self.client.post(\n '/bucketlists',\n data=json.dumps(dict(name='Travel')),\n headers=dict(Authorization='Bearer ' + token),\n content_type='application/json'\n )\n # Test Bucket creation\n data = json.loads(response.data.decode())\n self.assertEqual(response.status_code, 201)\n self.assertTrue(data['status'], 'success')\n self.assertTrue(data['name'], 'Travel')\n # Update the bucket name\n res = self.client.put(\n '/bucketlists/1',\n headers=dict(Authorization='Bearer ' + token),\n data=json.dumps(dict(name='Adventure')),\n content_type='application/json'\n )\n data = json.loads(res.data.decode())\n self.assertEqual(res.status_code, 201)\n self.assertTrue(res.content_type == 'application/json')\n self.assertTrue(data['status'] == 'success')\n self.assertTrue(data['name'] == 'Adventure')\n self.assertEqual(data['id'], 1)", "def test_api_edit_bucketlist(self):\n res_post = self.client().post('/bucketlist', data={'name': 'Wake up, Eat, Code, Sleep & Repeat'})\n self.assertEqual(res_post.status_code, 201)\n res_post_in_json = json.loads(res_post.data.decode('UTF-8').replace(\"'\", \"\\\"\"))\n id = res_post_in_json['id']\n res_put = self.client().put(\n f'bucketlist/{id}',\n data={\n 'name': \"Don't forget to exercise\"\n }\n )\n self.assertEqual(res_put.status_code, 200)\n res = self.client().get(f'/bucketlist/{id}')\n self.assertIn(\"exercise\", str(res.data))", "def test_item_put_update(self):\n\n data = {\n \"code\": \"TSLA\",\n \"name\": \"Tesla\",\n \"details\": \"Stocks of Tesla Inc.\",\n }\n response = self.post_item(data)\n\n url = reverse(\"item-detail\", None, {response.data[\"id\"]})\n new_data = {\n \"code\": \"AAPL\",\n \"name\": \"Apple\",\n \"details\": \"Stocks of Apple Inc.\",\n }\n put_response = self.client.put(url, new_data, format=\"json\")\n\n assert put_response.status_code == status.HTTP_200_OK\n assert put_response.data[\"name\"] == new_data[\"name\"]\n assert put_response.data[\"code\"] == new_data[\"code\"]\n assert put_response.data[\"details\"] == new_data[\"details\"]", "def test_bucketlist_item_edit_with_existing_name(self):\n resp = self.client.post(\"/auth/register/\", data=self.user_details)\n self.assertEqual(resp.status_code, 201)\n # login user\n result = self.client.post(\"/auth/login/\", data=self.user_details)\n access_token = json.loads(result.data.decode())[\"access_token\"]\n # create bucketlist\n self.client.post('/bucketlists/', headers=dict(\n Authorization=access_token),\n data=self.bucketlist)\n # create bucketlist item\n bucketlist_item = self.client.post(\n \"/bucketlists/1/items/\", headers=dict(Authorization=access_token),\n data=self.item)\n self.client.post(\n \"/bucketlists/1/items/\", headers=dict(Authorization=access_token),\n data={\"name\": \"Stan Chart Marathon\"})\n result = json.loads(bucketlist_item.data.decode())\n resp = self.client.put(\n \"/bucketlists/1/items/1/\",\n headers=dict(Authorization=access_token),\n data={\"name\": \"Stan Chart Marathon\"})\n self.assertEqual(resp.status_code, 409)\n self.assertIn(\"Name exists, enter another\", str(resp.data))", "def test_update_busketlistitem_by_id(self):\n resp = self.client.post('/bucketlists',\n data=json.dumps(self.bucketlist),\n content_type=\"application/json\", headers={\n \"Authorization\": self.token\n })\n self.assertEqual(resp.status_code, 201)\n resp_item = self.client.post('/bucketlistitems/1/items',\n data=json.dumps(\n {\"name\": \"visit the busy surburbs.\"}),\n content_type=\"application/json\", headers={\n \"Authorization\": self.token\n })\n self.assertEqual(resp.status_code, 201)\n update_item = self.client.put('/bucketlistitems/1/items/1',\n data=json.dumps(\n {\"name\": \"visit the busy surburbs and museums too.\"}),\n content_type=\"application/json\", headers={\n \"Authorization\": self.token\n })\n self.assertEqual(update_item.status_code, 201)", "def test_update_bucket(self):\n pass", "def test_bucketlist_can_be_edited(self):\n self.register_user()\n result = self.login_user()\n access_token = json.loads(result.data.decode())['token']\n res = self.create_bucketlist(self.bucketlist, access_token)\n self.assertEqual(res.status_code, 201)\n results = json.loads(res.data.decode())\n\n res = self.client.put(\n '/lists/{}'.format(results['id']),\n headers=dict(Authorization=\"Bearer \" + access_token),\n data={\n \"name\": \"Dont just eat, but also pray and love :-)\"\n })\n self.assertEqual(res.status_code, 200)\n\n results = self.client.get(\n '/lists/{}'.format(results['id']),\n headers=dict(Authorization=\"Bearer \" + access_token))\n self.assertIn('Dont just eat', str(results.data))", "def test_put_item_wrong_id(self):\r\n data = {\"name\": \"bucketlist item name\", \"completed\": \"true\"}\r\n email = \"test@test.com\"\r\n _pword = \"test\"\r\n user = User.query.filter_by(email=email).first()\r\n bucketlist = BucketList.query.filter_by(user_id=user.id, name=\"test bucketlist\").first()\r\n item = BucketListItem.query.filter_by(bucketlist_id=bucketlist.id, id=0).first()\r\n self.assertFalse(item)\r\n\r\n response = self.put_bucketlist_item(email, _pword, bucketlist.id, 0, data)\r\n result = json.loads(response.data.decode('utf-8'))\r\n self.assertEqual(response.status, '404 NOT FOUND')\r\n self.assertEqual(\r\n result['message'],\r\n 'Bucketlist Item with ID {} not found in the database. You have requested this URI '\\\r\n '[/api/v1/bucketlist/1/items/0] but did you mean /api/v1/bucketlist/<int:bucketlist_id>/items/'\\\r\n ' or /api/v1/bucketlist/<int:bucketlist_id> or /api/v1/bucketlist ?'.format(0)\r\n )", "def test_nonexistence_bucketlist_item_edit(self):\n resp = self.client.post(\"/auth/register/\", data=self.user_details)\n self.assertEqual(resp.status_code, 201)\n # login user\n result = self.client.post(\"/auth/login/\", data=self.user_details)\n access_token = json.loads(result.data.decode())[\"access_token\"]\n\n resp = self.client.put(\n \"/bucketlists/1/items/1/\",\n headers=dict(Authorization=access_token),\n data={\"name\": \"Stan Chart Marathon\"})\n self.assertEqual(resp.status_code, 404)", "def bucketlist_item_update():\n pass", "def test_put_item(client):\n\n response = client.put(\n \"/items/id/9/?name=Sulfuras Hand of Ragnaros&sell_in=3&quality=6\"\n )\n\n assert json.loads(response.data) == {\"message\": \"Item content updated successfully\"}\n assert response.status_code == 201", "def test_add_item_at_using_put(self):\n pass", "def test_bucketlist_item_edit_blank_name(self):\n resp = self.client.post(\"/auth/register/\", data=self.user_details)\n self.assertEqual(resp.status_code, 201)\n # login user\n result = self.client.post(\"/auth/login/\", data=self.user_details)\n access_token = json.loads(result.data.decode())[\"access_token\"]\n # create bucketlist\n self.client.post('/bucketlists/', headers=dict(\n Authorization=access_token),\n data=self.bucketlist)\n # create bucketlist item\n bucketlist_item = self.client.post(\n \"/bucketlists/1/items/\", headers=dict(Authorization=access_token),\n data=self.item)\n result = json.loads(bucketlist_item.data.decode())\n resp = self.client.put(\n \"/bucketlists/1/items/1/\",\n headers=dict(Authorization=access_token))\n result = self.client.get(\n \"/bucketlists/1/items/1/\",\n headers=dict(Authorization=access_token))\n self.assertEqual(resp.status_code, 400)\n self.assertIn(\"Enter a Valid Name\", str(resp.data))", "def edit_item(**kwargs):\n db.session.add(kwargs[\"item\"])\n db.session.commit()\n if kwargs[\"is_bucket\"]:\n item_type = \"bucket list\"\n elif kwargs[\"is_item\"]:\n item_type = \"bucket list item\"\n\n message = {\"message\": \"Successfully updated \" + item_type + \".\"}\n response = marshal(kwargs[\"item\"], kwargs[\"serializer\"])\n response.update(message)\n return response", "def test_api_can_update_shoppingitem(self):\n change_shoppinglist = {'name': 'new item name'}\n response = self.client.put(\n reverse('shoppingitemdetails', args=[1, 1]),\n change_shoppinglist, format='json'\n )\n self.assertEqual(response.status_code, status.HTTP_200_OK)" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Method tests the error raised when end point for updating a bucket list item using put contains the wrong id
def test_put_item_wrong_id(self): data = {"name": "bucketlist item name", "completed": "true"} email = "test@test.com" _pword = "test" user = User.query.filter_by(email=email).first() bucketlist = BucketList.query.filter_by(user_id=user.id, name="test bucketlist").first() item = BucketListItem.query.filter_by(bucketlist_id=bucketlist.id, id=0).first() self.assertFalse(item) response = self.put_bucketlist_item(email, _pword, bucketlist.id, 0, data) result = json.loads(response.data.decode('utf-8')) self.assertEqual(response.status, '404 NOT FOUND') self.assertEqual( result['message'], 'Bucketlist Item with ID {} not found in the database. You have requested this URI '\ '[/api/v1/bucketlist/1/items/0] but did you mean /api/v1/bucketlist/<int:bucketlist_id>/items/'\ ' or /api/v1/bucketlist/<int:bucketlist_id> or /api/v1/bucketlist ?'.format(0) )
[ "def test_id_of_bucket_to_be_edited_is_invalid(self):\n with self.client:\n # Get an auth token\n token = self.get_user_token()\n # Update the bucket name\n res = self.client.put(\n '/bucketlists/bucketid',\n headers=dict(Authorization='Bearer ' + token),\n data=json.dumps(dict(name='Adventure')),\n content_type='application/json'\n )\n data = json.loads(res.data.decode())\n self.assertEqual(res.status_code, 400)\n self.assertTrue(res.content_type == 'application/json')\n self.assertTrue(data['status'] == 'failed')\n self.assertTrue(data['message'] == 'Please provide a valid Bucket Id')", "def test_update_item_incorrect_id(test_client, item):\n\n response = test_client.put(BAD_ITEM_URL,\n data=json.dumps(item),\n content_type='application/json')\n\n data = json.loads(response.get_data())\n\n assert response.status_code == 404\n assert data['error'] == app.NOT_FOUND", "def test_id_of_bucket_to_be_edited_does_not_exist(self):\n with self.client:\n # Get an auth token\n token = self.get_user_token()\n # Update the bucket name\n res = self.client.put(\n '/bucketlists/1',\n headers=dict(Authorization='Bearer ' + token),\n data=json.dumps(dict(name='Adventure')),\n content_type='application/json'\n )\n data = json.loads(res.data.decode())\n self.assertEqual(res.status_code, 404)\n self.assertTrue(res.content_type == 'application/json')\n self.assertTrue(data['status'] == 'failed')\n self.assertTrue(data['message'] == 'The Bucket with Id 1 does not exist')", "def test_nonexistence_bucketlist_item_edit(self):\n resp = self.client.post(\"/auth/register/\", data=self.user_details)\n self.assertEqual(resp.status_code, 201)\n # login user\n result = self.client.post(\"/auth/login/\", data=self.user_details)\n access_token = json.loads(result.data.decode())[\"access_token\"]\n\n resp = self.client.put(\n \"/bucketlists/1/items/1/\",\n headers=dict(Authorization=access_token),\n data={\"name\": \"Stan Chart Marathon\"})\n self.assertEqual(resp.status_code, 404)", "def test_update_busketlistitem_by_id(self):\n resp = self.client.post('/bucketlists',\n data=json.dumps(self.bucketlist),\n content_type=\"application/json\", headers={\n \"Authorization\": self.token\n })\n self.assertEqual(resp.status_code, 201)\n resp_item = self.client.post('/bucketlistitems/1/items',\n data=json.dumps(\n {\"name\": \"visit the busy surburbs.\"}),\n content_type=\"application/json\", headers={\n \"Authorization\": self.token\n })\n self.assertEqual(resp.status_code, 201)\n update_item = self.client.put('/bucketlistitems/1/items/1',\n data=json.dumps(\n {\"name\": \"visit the busy surburbs and museums too.\"}),\n content_type=\"application/json\", headers={\n \"Authorization\": self.token\n })\n self.assertEqual(update_item.status_code, 201)", "def test_put_bucketlists_detail(self):\n\n update_bucketlist_item = {\"title\": \"Item Three\"}\n\n # Asserting no access without token\n response = self.client.put(url_one, update_bucketlist_item)\n self.assertEqual(response.status_code, 401)\n self.assertEqual(response.data, message)\n\n # set authentication token in header\n self.client.credentials(HTTP_AUTHORIZATION=self.token)\n\n # Asserting access upon auth by token\n auth_response = self.client.put(url_one, update_bucketlist_item)\n self.assertEqual(auth_response.status_code, 200)\n self.assertEqual(Bucketlist.objects.count(), 2)\n self.assertEqual(auth_response.data.get('message'), \"Item '2' updated successfully\")", "def test_api_edit_bucketlist(self):\n res_post = self.client().post('/bucketlist', data={'name': 'Wake up, Eat, Code, Sleep & Repeat'})\n self.assertEqual(res_post.status_code, 201)\n res_post_in_json = json.loads(res_post.data.decode('UTF-8').replace(\"'\", \"\\\"\"))\n id = res_post_in_json['id']\n res_put = self.client().put(\n f'bucketlist/{id}',\n data={\n 'name': \"Don't forget to exercise\"\n }\n )\n self.assertEqual(res_put.status_code, 200)\n res = self.client().get(f'/bucketlist/{id}')\n self.assertIn(\"exercise\", str(res.data))", "def test_delete_item_wrong_id(self):\r\n email = \"test@test.com\"\r\n _pword = \"test\"\r\n user = User.query.filter_by(email=email).first()\r\n bucketlist = BucketList.query.filter_by(user_id=user.id, name=\"test bucketlist\").first()\r\n item = BucketListItem.query.filter_by(bucketlist_id=bucketlist.id, id=0).first()\r\n self.assertFalse(item)\r\n\r\n response = self.delete_bucketlist_item(email, _pword, bucketlist.id, 0)\r\n result = json.loads(response.data.decode('utf-8'))\r\n self.assertEqual(response.status, '404 NOT FOUND')\r\n self.assertEqual(\r\n result['message'],\r\n 'Bucketlist Item with ID {} not found in the database. You have requested this URI '\\\r\n '[/api/v1/bucketlist/1/items/0] but did you mean /api/v1/bucketlist/<int:bucketlist_id>/items/'\\\r\n ' or /api/v1/bucketlist/<int:bucketlist_id> or /api/v1/bucketlist ?'.format(0)\r\n )", "def test_put_bucketlist_item(self):\r\n data = {\"name\": \"bucketlist item name\", \"completed\": \"true\"}\r\n email = \"test@test.com\"\r\n _pword = \"test\"\r\n user = User.query.filter_by(email=email).first()\r\n bucketlist = BucketList.query.filter_by(user_id=user.id, name=\"test bucketlist\").first()\r\n item = BucketListItem.query.filter_by(bucketlist_id=bucketlist.id, id=1).first()\r\n self.assertNotEqual(item.name, \"bucketlist item name\")\r\n self.assertFalse(item.completed)\r\n\r\n response = self.put_bucketlist_item(email, _pword, bucketlist.id, 1, data)\r\n result = json.loads(response.data.decode('utf-8'))\r\n item2 = BucketListItem.query.filter_by(bucketlist_id=bucketlist.id, id=1).first()\r\n self.assertEqual(response.status, '201 CREATED')\r\n self.assertEqual(item2.name, \"bucketlist item name\")\r\n self.assertTrue(item2.completed)", "def test_bucketlist_item_edit_with_existing_name(self):\n resp = self.client.post(\"/auth/register/\", data=self.user_details)\n self.assertEqual(resp.status_code, 201)\n # login user\n result = self.client.post(\"/auth/login/\", data=self.user_details)\n access_token = json.loads(result.data.decode())[\"access_token\"]\n # create bucketlist\n self.client.post('/bucketlists/', headers=dict(\n Authorization=access_token),\n data=self.bucketlist)\n # create bucketlist item\n bucketlist_item = self.client.post(\n \"/bucketlists/1/items/\", headers=dict(Authorization=access_token),\n data=self.item)\n self.client.post(\n \"/bucketlists/1/items/\", headers=dict(Authorization=access_token),\n data={\"name\": \"Stan Chart Marathon\"})\n result = json.loads(bucketlist_item.data.decode())\n resp = self.client.put(\n \"/bucketlists/1/items/1/\",\n headers=dict(Authorization=access_token),\n data={\"name\": \"Stan Chart Marathon\"})\n self.assertEqual(resp.status_code, 409)\n self.assertIn(\"Name exists, enter another\", str(resp.data))", "def test_put_fail2(client):\n # test that viewing the page renders without template errors\n response = client.put(\"/api/authors?author=48627\", data={'rating':4.3})\n # print(response)\n assert response.status_code == 400", "def test_item_put_update(self):\n\n data = {\n \"code\": \"TSLA\",\n \"name\": \"Tesla\",\n \"details\": \"Stocks of Tesla Inc.\",\n }\n response = self.post_item(data)\n\n url = reverse(\"item-detail\", None, {response.data[\"id\"]})\n new_data = {\n \"code\": \"AAPL\",\n \"name\": \"Apple\",\n \"details\": \"Stocks of Apple Inc.\",\n }\n put_response = self.client.put(url, new_data, format=\"json\")\n\n assert put_response.status_code == status.HTTP_200_OK\n assert put_response.data[\"name\"] == new_data[\"name\"]\n assert put_response.data[\"code\"] == new_data[\"code\"]\n assert put_response.data[\"details\"] == new_data[\"details\"]", "def test_bucketitems_can_be_edited(self):\n\n resp = self.client().post('/auth/register', data = self.user)\n\n resp_login = self.client().post('/auth/login', data = self.form_data) ## Login the user.\n token = json.loads(resp_login.data.decode())['auth_token'] ## Get the authentication token.\n\n resp = self.client().post('/bucketlists/', data = self.bucket, headers=dict(Authorization=token))\n resp = self.client().post('/bucketlists/1/items/', data = self.bucketitems, headers=dict(Authorization=token)) ## Create the item.\n\n form_data = {'name': 'walking on the moon', 'description': 'Go by the space craft'}\n resp = self.client().put('/bucketlists/1/items/1', data = form_data, headers=dict(Authorization=token))\n self.assertEqual(resp.status_code, 200)\n\n self.assertIn('true', str(resp.data))", "def test_update_bucket(self):\n pass", "def test_bucket_is_updated(self):\n with self.client:\n # Get an auth token\n token = self.get_user_token()\n # Create a Bucket\n response = self.client.post(\n '/bucketlists',\n data=json.dumps(dict(name='Travel')),\n headers=dict(Authorization='Bearer ' + token),\n content_type='application/json'\n )\n # Test Bucket creation\n data = json.loads(response.data.decode())\n self.assertEqual(response.status_code, 201)\n self.assertTrue(data['status'], 'success')\n self.assertTrue(data['name'], 'Travel')\n # Update the bucket name\n res = self.client.put(\n '/bucketlists/1',\n headers=dict(Authorization='Bearer ' + token),\n data=json.dumps(dict(name='Adventure')),\n content_type='application/json'\n )\n data = json.loads(res.data.decode())\n self.assertEqual(res.status_code, 201)\n self.assertTrue(res.content_type == 'application/json')\n self.assertTrue(data['status'] == 'success')\n self.assertTrue(data['name'] == 'Adventure')\n self.assertEqual(data['id'], 1)", "def test_fail_repeated_buckelist_item(self):\r\n user = User.query.filter_by(email=\"test@test.com\").first()\r\n bucketlist = BucketList.query.filter_by(user_id=user.id, name=\"test bucketlist\").first()\r\n item_no = BucketListItem.query.filter_by(bucketlist_id=bucketlist.id).count()\r\n response = self.add_bucketlist_item(\"test@test.com\", \"test\", bucketlist.id, \"test item\")\r\n result = json.loads(response.data.decode('utf-8'))\r\n self.assertEqual(response.status, '409 CONFLICT')\r\n self.assertEqual(result['message'], 'Bucketlist Item Exists')\r\n new_item_no = BucketListItem.query.filter_by(bucketlist_id=bucketlist.id).count()\r\n self.assertEqual(item_no, new_item_no)", "def test_bucketlist_item_edit(self):\n resp = self.client.post(\"/auth/register/\", data=self.user_details)\n self.assertEqual(resp.status_code, 201)\n # login user\n result = self.client.post(\"/auth/login/\", data=self.user_details)\n access_token = json.loads(result.data.decode())[\"access_token\"]\n # create bucketlist\n self.client.post('/bucketlists/', headers=dict(\n Authorization=access_token),\n data=self.bucketlist)\n # create bucketlist item\n bucketlist_item = self.client.post(\n \"/bucketlists/1/items/\", headers=dict(Authorization=access_token),\n data=self.item)\n result = json.loads(bucketlist_item.data.decode())\n resp = self.client.put(\n \"/bucketlists/1/items/1/\",\n headers=dict(Authorization=access_token),\n data={\"name\": \"Stan Chart Marathon\"})\n result = self.client.get(\n \"/bucketlists/1/items/1/\",\n headers=dict(Authorization=access_token))\n self.assertEqual(resp.status_code, 200)\n self.assertIn(\"Stan Chart\", str(result.data))", "def test_edit_bucketlist(self):\n post_data = self.post_a_bucket()\n self.assertEqual(post_data.status_code, 201)\n result_of_put_method = self.client().put(\n '/bucketlists/1',\n headers=dict(Authorization='Bearer '\n + self.token()\n ),\n data={\n \"name\": \"The seasons will be, summer winter and autumn\"\n })\n self.assertEqual(result_of_put_method.status_code, 201)\n result_of_get_method = self.client().get('/bucketlists/1',\n headers=dict(Authorization='Bearer '\n + self.token())\n )\n self.assertIn('The seasons will b', str(result_of_get_method.data))", "def test_put_item(client):\n\n response = client.put(\n \"/items/id/9/?name=Sulfuras Hand of Ragnaros&sell_in=3&quality=6\"\n )\n\n assert json.loads(response.data) == {\"message\": \"Item content updated successfully\"}\n assert response.status_code == 201" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Method tests the request to delete a bucketlist item
def test_delete_bucketlist_item(self): email = "test@test.com" _pword = "test" user = User.query.filter_by(email=email).first() bucketlist = BucketList.query.filter_by(user_id=user.id, name="test bucketlist").first() item = BucketListItem.query.filter_by(bucketlist_id=bucketlist.id, id=1).first() self.assertTrue(item) response = self.delete_bucketlist_item(email, _pword, bucketlist.id, item.id) result = json.loads(response.data.decode('utf-8')) self.assertEqual(response.status, '200 OK') self.assertEqual( result['message'], 'Bucketlist Item with ID {} deleted'.format(item.id) ) item = BucketListItem.query.filter_by(bucketlist_id=bucketlist.id, id=1).first() self.assertFalse(item)
[ "def bucketlist_item_delete():\n pass", "def test_bucketlist_item_deletion(self):\n resp = self.client.post(\"/auth/register/\", data=self.user_details)\n self.assertEqual(resp.status_code, 201)\n result = self.client.post(\"/auth/login/\", data=self.user_details)\n\n access_token = json.loads(result.data.decode())[\"access_token\"]\n # create bucketlist\n self.client.post('/bucketlists/', headers=dict(\n Authorization=access_token),\n data=self.bucketlist)\n # create bucketlist item\n bucketlist_item = self.client.post(\n \"/bucketlists/1/items/\", headers=dict(Authorization=access_token),\n data=self.item)\n result = json.loads(bucketlist_item.data.decode())\n # delete created bucketlist item\n resp = self.client.delete(\"/bucketlists/1/items/1/\",\n headers=dict(\n Authorization=access_token))\n self.assertEqual(resp.status_code, 200)\n # confirm item has been deleted by accessing it\n resp = self.client.get(\n \"/bucketlists/1/items/1/\", headers=dict(\n Authorization=access_token))\n self.assertEqual(resp.status_code, 404)", "def test_delete_bucketlistitem_by_id(self):\n resp = self.client.post('/bucketlists',\n data=json.dumps(self.bucketlist),\n content_type=\"application/json\", headers={\n \"Authorization\": self.token\n })\n self.assertEqual(resp.status_code, 201)\n resp_item = self.client.post('/bucketlistitems/1/items',\n data=json.dumps(\n {\"name\": \"visit the busy surburbs.\"}),\n content_type=\"application/json\", headers={\n \"Authorization\": self.token\n })\n self.assertEqual(resp.status_code, 201)\n delete_item = self.client.delete('/bucketlistitems/1/items/1',\n headers={\n \"Authorization\": self.token\n })\n self.assertEqual(delete_item.status_code, 204)", "def test_delete_bucketlist(self):\n post_data = self.post_a_bucket()\n self.assertEqual(post_data.status_code, 201)\n result_of_delete_method = self.client().delete('/bucketlists/1',\n headers=dict(Authorization='Bearer '\n + self.token())\n )\n self.assertEqual(result_of_delete_method.status_code, 200)\n response_after_removal = self.client().get('/bucketlists/1',\n headers=dict(Authorization='Bearer '\n + self.token())\n )\n self.assertEqual(response_after_removal.status_code, 400)", "def test_api_delete_bucketlist(self):\n\n res_post = self.client().post('/bucketlist', data={'name': \"Don't forget to exercise\"})\n self.assertEqual(res_post.status_code, 201)\n res_post_in_json = json.loads(res_post.data.decode('UTF-8'))\n id = res_post_in_json['id']\n res_delete = self.client().delete(f\"/bucketlist/{id}\")\n self.assertEqual(res_delete.status_code, 200)\n\n # should return 404 after delete the data\n res = self.client().get(f'/bucketlist/{id}')\n self.assertEqual(res.status_code, 404)", "def test_delete_bucketlist_item(self):\n self.test_store.add_bucketlist('travel', 'visit london')\n self.test_store.add_bucketlist_item(2, \"Tour Big Ben\", \"12 Nov 2017\")\n test_bucketlist = self.test_store.get_single_bucketlist(2)\n initial_bucketlist_items = len(test_bucketlist['items'])\n self.test_store.remove_bucketlist_item(2, 1)\n final_bucketlist_items = len(test_bucketlist['items'])\n self.assertEquals(\n 1,\n initial_bucketlist_items-final_bucketlist_items,\n 'Items not removed'\n )", "def test_bucketlist_deletion(self):\n self.register_user()\n result = self.login_user()\n access_token = json.loads(result.data.decode())['token']\n res = self.create_bucketlist(self.bucketlist, access_token)\n self.assertEqual(res.status_code, 201)\n results = json.loads(res.data.decode())\n\n res = self.client.delete(\n '/lists/{}'.format(results['id']),\n headers=dict(Authorization=\"Bearer \" + access_token),)\n self.assertEqual(res.status_code, 200)\n\n result = self.client.get(\n '/lists/1',\n headers=dict(Authorization=\"Bearer \" + access_token))\n self.assertEqual(result.status_code, 404)", "def test_delete_bucketlists_detail(self):\n\n # Asserting no access without token\n response = self.client.delete(url_one)\n self.assertEqual(response.status_code, 401)\n self.assertEqual(response.data, message)\n\n # set authentication token in header\n self.client.credentials(HTTP_AUTHORIZATION=self.token)\n\n # Asserting access upon auth by token\n auth_response = self.client.delete(url_one)\n self.assertEqual(auth_response.status_code, 204)\n self.assertEqual(BucketlistItem.objects.count(), 1)", "def test_bucket_is_deleted(self):\n with self.client:\n # Get an auth token\n token = self.get_user_token()\n response = self.client.post(\n '/bucketlists',\n data=json.dumps(dict(name='Travel')),\n headers=dict(Authorization='Bearer ' + token),\n content_type='application/json'\n )\n # Test Bucket creation\n data = json.loads(response.data.decode())\n self.assertEqual(response.status_code, 201)\n self.assertTrue(data['status'], 'success')\n self.assertTrue(data['name'], 'Travel')\n # Delete the created Bucket\n res = self.client.delete(\n '/bucketlists/1',\n headers=dict(Authorization='Bearer ' + token)\n )\n data = json.loads(res.data.decode())\n self.assertEqual(res.status_code, 200)\n self.assertTrue(data['status'] == 'success')\n self.assertTrue(data['message'] == 'Bucket Deleted successfully')\n self.assertTrue(res.content_type == 'application/json')", "def bucketlist_delete():\n pass", "def test_delete_item_wrong_id(self):\r\n email = \"test@test.com\"\r\n _pword = \"test\"\r\n user = User.query.filter_by(email=email).first()\r\n bucketlist = BucketList.query.filter_by(user_id=user.id, name=\"test bucketlist\").first()\r\n item = BucketListItem.query.filter_by(bucketlist_id=bucketlist.id, id=0).first()\r\n self.assertFalse(item)\r\n\r\n response = self.delete_bucketlist_item(email, _pword, bucketlist.id, 0)\r\n result = json.loads(response.data.decode('utf-8'))\r\n self.assertEqual(response.status, '404 NOT FOUND')\r\n self.assertEqual(\r\n result['message'],\r\n 'Bucketlist Item with ID {} not found in the database. You have requested this URI '\\\r\n '[/api/v1/bucketlist/1/items/0] but did you mean /api/v1/bucketlist/<int:bucketlist_id>/items/'\\\r\n ' or /api/v1/bucketlist/<int:bucketlist_id> or /api/v1/bucketlist ?'.format(0)\r\n )", "def test_deletion_handles_no_bucket_found_by_id(self):\n with self.client:\n response = self.client.delete(\n '/bucketlists/1',\n headers=dict(Authorization='Bearer ' + self.get_user_token())\n )\n data = json.loads(response.data.decode())\n self.assertEqual(response.status_code, 404)\n self.assertTrue(data['status'] == 'failed')\n self.assertTrue(data['message'] == 'Bucket resource cannot be found')\n self.assertTrue(response.content_type == 'application/json')", "def test_api_can_delete_item(self):\n # create an item\n self.test_shoppingitem()\n # delete item\n res2 = self.client().delete(\"/shoppinglists/1/items/1\",\n headers=dict(\n Authorization=\"Bearer \" + self.access_token))\n self.assertIn(\"Bread\", str(res2.data))", "def test_request_for_deleting_bucket_has_integer_id(self):\n with self.client:\n response = self.client.delete(\n '/bucketlists/dsfgsdsg',\n headers=dict(Authorization='Bearer ' + self.get_user_token())\n )\n data = json.loads(response.data.decode())\n self.assertEqual(response.status_code, 400)\n self.assertTrue(data['status'] == 'failed')\n self.assertTrue(data['message'] == 'Please provide a valid Bucket Id')", "def test_delete(self):\n responses.add(\n responses.Response(\n method='DELETE',\n url='https://connection.keboola.com/v2/storage/buckets/1?force=False&async=False',\n json={}\n )\n )\n bucket_id = '1'\n deleted_detail = self.buckets.delete(bucket_id, asynchronous=False)\n assert deleted_detail is None", "def delete_bucketlist():\n pass", "def test_delete_item_using_delete(self):\n pass", "def test_delete_bucket(self):\n pass", "def delete_item(item, name, **kwargs):\n if item:\n db.session.delete(item)\n db.session.commit()\n if kwargs[\"is_bucket\"]:\n item_type = \"bucket list\"\n elif kwargs[\"is_item\"]:\n item_type = \"bucket list item\"\n return {\"message\": \"Successfully deleted \" + item_type + \": '\" +\n name + \"'.\"}\n else:\n return {\"message\": \"Delete was unsuccessful. Please try again!\"}" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Method tests the error raised when end point for delete a bucket list item contains the wrong id
def test_delete_item_wrong_id(self): email = "test@test.com" _pword = "test" user = User.query.filter_by(email=email).first() bucketlist = BucketList.query.filter_by(user_id=user.id, name="test bucketlist").first() item = BucketListItem.query.filter_by(bucketlist_id=bucketlist.id, id=0).first() self.assertFalse(item) response = self.delete_bucketlist_item(email, _pword, bucketlist.id, 0) result = json.loads(response.data.decode('utf-8')) self.assertEqual(response.status, '404 NOT FOUND') self.assertEqual( result['message'], 'Bucketlist Item with ID {} not found in the database. You have requested this URI '\ '[/api/v1/bucketlist/1/items/0] but did you mean /api/v1/bucketlist/<int:bucketlist_id>/items/'\ ' or /api/v1/bucketlist/<int:bucketlist_id> or /api/v1/bucketlist ?'.format(0) )
[ "def test_deletion_handles_no_bucket_found_by_id(self):\n with self.client:\n response = self.client.delete(\n '/bucketlists/1',\n headers=dict(Authorization='Bearer ' + self.get_user_token())\n )\n data = json.loads(response.data.decode())\n self.assertEqual(response.status_code, 404)\n self.assertTrue(data['status'] == 'failed')\n self.assertTrue(data['message'] == 'Bucket resource cannot be found')\n self.assertTrue(response.content_type == 'application/json')", "def test_request_for_deleting_bucket_has_integer_id(self):\n with self.client:\n response = self.client.delete(\n '/bucketlists/dsfgsdsg',\n headers=dict(Authorization='Bearer ' + self.get_user_token())\n )\n data = json.loads(response.data.decode())\n self.assertEqual(response.status_code, 400)\n self.assertTrue(data['status'] == 'failed')\n self.assertTrue(data['message'] == 'Please provide a valid Bucket Id')", "def test_api_delete_bucketlist(self):\n\n res_post = self.client().post('/bucketlist', data={'name': \"Don't forget to exercise\"})\n self.assertEqual(res_post.status_code, 201)\n res_post_in_json = json.loads(res_post.data.decode('UTF-8'))\n id = res_post_in_json['id']\n res_delete = self.client().delete(f\"/bucketlist/{id}\")\n self.assertEqual(res_delete.status_code, 200)\n\n # should return 404 after delete the data\n res = self.client().get(f'/bucketlist/{id}')\n self.assertEqual(res.status_code, 404)", "def test_delete_bucketlistitem_by_id(self):\n resp = self.client.post('/bucketlists',\n data=json.dumps(self.bucketlist),\n content_type=\"application/json\", headers={\n \"Authorization\": self.token\n })\n self.assertEqual(resp.status_code, 201)\n resp_item = self.client.post('/bucketlistitems/1/items',\n data=json.dumps(\n {\"name\": \"visit the busy surburbs.\"}),\n content_type=\"application/json\", headers={\n \"Authorization\": self.token\n })\n self.assertEqual(resp.status_code, 201)\n delete_item = self.client.delete('/bucketlistitems/1/items/1',\n headers={\n \"Authorization\": self.token\n })\n self.assertEqual(delete_item.status_code, 204)", "def test_delete_bucketlist_item(self):\r\n email = \"test@test.com\"\r\n _pword = \"test\"\r\n user = User.query.filter_by(email=email).first()\r\n bucketlist = BucketList.query.filter_by(user_id=user.id, name=\"test bucketlist\").first()\r\n item = BucketListItem.query.filter_by(bucketlist_id=bucketlist.id, id=1).first()\r\n self.assertTrue(item)\r\n\r\n response = self.delete_bucketlist_item(email, _pword, bucketlist.id, item.id)\r\n result = json.loads(response.data.decode('utf-8'))\r\n self.assertEqual(response.status, '200 OK')\r\n self.assertEqual(\r\n result['message'],\r\n 'Bucketlist Item with ID {} deleted'.format(item.id)\r\n )\r\n item = BucketListItem.query.filter_by(bucketlist_id=bucketlist.id, id=1).first()\r\n self.assertFalse(item)", "def test_delete_bucketlists_detail(self):\n\n # Asserting no access without token\n response = self.client.delete(url_one)\n self.assertEqual(response.status_code, 401)\n self.assertEqual(response.data, message)\n\n # set authentication token in header\n self.client.credentials(HTTP_AUTHORIZATION=self.token)\n\n # Asserting access upon auth by token\n auth_response = self.client.delete(url_one)\n self.assertEqual(auth_response.status_code, 204)\n self.assertEqual(BucketlistItem.objects.count(), 1)", "def test_delete_bad_obj_id(self):\r\n res = self.post({\"action-delete\": \"element-1\"})\r\n\r\n self.assertEqual(res.json, {\"messages\": [], \"no_replace\": True})", "def test_bucketlist_item_deletion(self):\n resp = self.client.post(\"/auth/register/\", data=self.user_details)\n self.assertEqual(resp.status_code, 201)\n result = self.client.post(\"/auth/login/\", data=self.user_details)\n\n access_token = json.loads(result.data.decode())[\"access_token\"]\n # create bucketlist\n self.client.post('/bucketlists/', headers=dict(\n Authorization=access_token),\n data=self.bucketlist)\n # create bucketlist item\n bucketlist_item = self.client.post(\n \"/bucketlists/1/items/\", headers=dict(Authorization=access_token),\n data=self.item)\n result = json.loads(bucketlist_item.data.decode())\n # delete created bucketlist item\n resp = self.client.delete(\"/bucketlists/1/items/1/\",\n headers=dict(\n Authorization=access_token))\n self.assertEqual(resp.status_code, 200)\n # confirm item has been deleted by accessing it\n resp = self.client.get(\n \"/bucketlists/1/items/1/\", headers=dict(\n Authorization=access_token))\n self.assertEqual(resp.status_code, 404)", "def test_delete_car_invalid_id():\n response = client.delete(\"/11111\")\n assert response.status_code == STATUS_NOT_FOUND", "def test_put_item_wrong_id(self):\r\n data = {\"name\": \"bucketlist item name\", \"completed\": \"true\"}\r\n email = \"test@test.com\"\r\n _pword = \"test\"\r\n user = User.query.filter_by(email=email).first()\r\n bucketlist = BucketList.query.filter_by(user_id=user.id, name=\"test bucketlist\").first()\r\n item = BucketListItem.query.filter_by(bucketlist_id=bucketlist.id, id=0).first()\r\n self.assertFalse(item)\r\n\r\n response = self.put_bucketlist_item(email, _pword, bucketlist.id, 0, data)\r\n result = json.loads(response.data.decode('utf-8'))\r\n self.assertEqual(response.status, '404 NOT FOUND')\r\n self.assertEqual(\r\n result['message'],\r\n 'Bucketlist Item with ID {} not found in the database. You have requested this URI '\\\r\n '[/api/v1/bucketlist/1/items/0] but did you mean /api/v1/bucketlist/<int:bucketlist_id>/items/'\\\r\n ' or /api/v1/bucketlist/<int:bucketlist_id> or /api/v1/bucketlist ?'.format(0)\r\n )", "def test_AlgorithmsIdHandler_DELETE_MalformedRequest(self):\n searchedId='xyz' + ' ' + '1'\n response = self.testapp.delete('/algorithms/' + searchedId, expect_errors=True)\n self.assertEqual(400, response.status_int, msg='Wrong answer code')\n self.assertEqual('application/json', response.content_type)\n self.assertIn('Malformed Data', response.normal_body.decode(encoding='UTF-8'))", "def test_delete(self):\n responses.add(\n responses.Response(\n method='DELETE',\n url='https://connection.keboola.com/v2/storage/buckets/1?force=False&async=False',\n json={}\n )\n )\n bucket_id = '1'\n deleted_detail = self.buckets.delete(bucket_id, asynchronous=False)\n assert deleted_detail is None", "def test_delete_bucketlist(self):\n post_data = self.post_a_bucket()\n self.assertEqual(post_data.status_code, 201)\n result_of_delete_method = self.client().delete('/bucketlists/1',\n headers=dict(Authorization='Bearer '\n + self.token())\n )\n self.assertEqual(result_of_delete_method.status_code, 200)\n response_after_removal = self.client().get('/bucketlists/1',\n headers=dict(Authorization='Bearer '\n + self.token())\n )\n self.assertEqual(response_after_removal.status_code, 400)", "def test_bucketlist_deletion(self):\n self.register_user()\n result = self.login_user()\n access_token = json.loads(result.data.decode())['token']\n res = self.create_bucketlist(self.bucketlist, access_token)\n self.assertEqual(res.status_code, 201)\n results = json.loads(res.data.decode())\n\n res = self.client.delete(\n '/lists/{}'.format(results['id']),\n headers=dict(Authorization=\"Bearer \" + access_token),)\n self.assertEqual(res.status_code, 200)\n\n result = self.client.get(\n '/lists/1',\n headers=dict(Authorization=\"Bearer \" + access_token))\n self.assertEqual(result.status_code, 404)", "def bucketlist_item_delete():\n pass", "def test_delete_item_using_delete(self):\n pass", "def test_bucket_is_deleted(self):\n with self.client:\n # Get an auth token\n token = self.get_user_token()\n response = self.client.post(\n '/bucketlists',\n data=json.dumps(dict(name='Travel')),\n headers=dict(Authorization='Bearer ' + token),\n content_type='application/json'\n )\n # Test Bucket creation\n data = json.loads(response.data.decode())\n self.assertEqual(response.status_code, 201)\n self.assertTrue(data['status'], 'success')\n self.assertTrue(data['name'], 'Travel')\n # Delete the created Bucket\n res = self.client.delete(\n '/bucketlists/1',\n headers=dict(Authorization='Bearer ' + token)\n )\n data = json.loads(res.data.decode())\n self.assertEqual(res.status_code, 200)\n self.assertTrue(data['status'] == 'success')\n self.assertTrue(data['message'] == 'Bucket Deleted successfully')\n self.assertTrue(res.content_type == 'application/json')", "def test_api_can_delete_item(self):\n # create an item\n self.test_shoppingitem()\n # delete item\n res2 = self.client().delete(\"/shoppinglists/1/items/1\",\n headers=dict(\n Authorization=\"Bearer \" + self.access_token))\n self.assertIn(\"Bread\", str(res2.data))", "def test_delete_for_deleted_image_fails(self):\n with self.assertRaises(ItemNotFound):\n self.images_client.delete_image(self.image.id)" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Method is used to send request for put for the bucketlist item to the api
def put_bucketlist_item(self, email, password, bucketlist_id, item_id, data): headers = self.authentication_headers(email=email, password=password) return self.client.put( '/api/v1/bucketlist/{}/items/{}'.format(bucketlist_id, item_id), content_type="application/json", data=json.dumps(data), headers=headers, follow_redirects=True )
[ "def test_put_bucketlist_item(self):\r\n data = {\"name\": \"bucketlist item name\", \"completed\": \"true\"}\r\n email = \"test@test.com\"\r\n _pword = \"test\"\r\n user = User.query.filter_by(email=email).first()\r\n bucketlist = BucketList.query.filter_by(user_id=user.id, name=\"test bucketlist\").first()\r\n item = BucketListItem.query.filter_by(bucketlist_id=bucketlist.id, id=1).first()\r\n self.assertNotEqual(item.name, \"bucketlist item name\")\r\n self.assertFalse(item.completed)\r\n\r\n response = self.put_bucketlist_item(email, _pword, bucketlist.id, 1, data)\r\n result = json.loads(response.data.decode('utf-8'))\r\n item2 = BucketListItem.query.filter_by(bucketlist_id=bucketlist.id, id=1).first()\r\n self.assertEqual(response.status, '201 CREATED')\r\n self.assertEqual(item2.name, \"bucketlist item name\")\r\n self.assertTrue(item2.completed)", "def test_put_bucketlists_detail(self):\n\n update_bucketlist_item = {\"title\": \"Item Three\"}\n\n # Asserting no access without token\n response = self.client.put(url_one, update_bucketlist_item)\n self.assertEqual(response.status_code, 401)\n self.assertEqual(response.data, message)\n\n # set authentication token in header\n self.client.credentials(HTTP_AUTHORIZATION=self.token)\n\n # Asserting access upon auth by token\n auth_response = self.client.put(url_one, update_bucketlist_item)\n self.assertEqual(auth_response.status_code, 200)\n self.assertEqual(Bucketlist.objects.count(), 2)\n self.assertEqual(auth_response.data.get('message'), \"Item '2' updated successfully\")", "def bucketlist_item_update():\n pass", "def put(self, id, item_id):\n\n args = validate_args({'task': False, 'done': False})\n user_id = get_user_id_from_token(token)\n bucketlist = BucketListModel.query.filter_by(\n id=id, created_by=int(user_id)).first()\n if not bucketlist:\n abort(404, message=\"Bucketlist not found\")\n\n item = BucketListItemModel.query.filter_by(\n bucketlist_id=bucketlist.id, id=int(item_id)).first()\n if not item:\n abort(400, message=\"Bucketlist Item does not exist\")\n\n if args.get('task'):\n item.task = args.get('task')\n\n if args.get('done'):\n item.done = True if args.get('done') == 'true' else False\n\n if not save(item):\n abort(409, message=\"Unable to update record\")\n\n return item.get(), 201", "def test_edit_bucketlist(self):\n post_data = self.post_a_bucket()\n self.assertEqual(post_data.status_code, 201)\n result_of_put_method = self.client().put(\n '/bucketlists/1',\n headers=dict(Authorization='Bearer '\n + self.token()\n ),\n data={\n \"name\": \"The seasons will be, summer winter and autumn\"\n })\n self.assertEqual(result_of_put_method.status_code, 201)\n result_of_get_method = self.client().get('/bucketlists/1',\n headers=dict(Authorization='Bearer '\n + self.token())\n )\n self.assertIn('The seasons will b', str(result_of_get_method.data))", "def test_update_busketlistitem_by_id(self):\n resp = self.client.post('/bucketlists',\n data=json.dumps(self.bucketlist),\n content_type=\"application/json\", headers={\n \"Authorization\": self.token\n })\n self.assertEqual(resp.status_code, 201)\n resp_item = self.client.post('/bucketlistitems/1/items',\n data=json.dumps(\n {\"name\": \"visit the busy surburbs.\"}),\n content_type=\"application/json\", headers={\n \"Authorization\": self.token\n })\n self.assertEqual(resp.status_code, 201)\n update_item = self.client.put('/bucketlistitems/1/items/1',\n data=json.dumps(\n {\"name\": \"visit the busy surburbs and museums too.\"}),\n content_type=\"application/json\", headers={\n \"Authorization\": self.token\n })\n self.assertEqual(update_item.status_code, 201)", "def update_bucketlist():\n pass", "def test_bucketlist_item_edit(self):\n resp = self.client.post(\"/auth/register/\", data=self.user_details)\n self.assertEqual(resp.status_code, 201)\n # login user\n result = self.client.post(\"/auth/login/\", data=self.user_details)\n access_token = json.loads(result.data.decode())[\"access_token\"]\n # create bucketlist\n self.client.post('/bucketlists/', headers=dict(\n Authorization=access_token),\n data=self.bucketlist)\n # create bucketlist item\n bucketlist_item = self.client.post(\n \"/bucketlists/1/items/\", headers=dict(Authorization=access_token),\n data=self.item)\n result = json.loads(bucketlist_item.data.decode())\n resp = self.client.put(\n \"/bucketlists/1/items/1/\",\n headers=dict(Authorization=access_token),\n data={\"name\": \"Stan Chart Marathon\"})\n result = self.client.get(\n \"/bucketlists/1/items/1/\",\n headers=dict(Authorization=access_token))\n self.assertEqual(resp.status_code, 200)\n self.assertIn(\"Stan Chart\", str(result.data))", "def test_api_edit_bucketlist(self):\n res_post = self.client().post('/bucketlist', data={'name': 'Wake up, Eat, Code, Sleep & Repeat'})\n self.assertEqual(res_post.status_code, 201)\n res_post_in_json = json.loads(res_post.data.decode('UTF-8').replace(\"'\", \"\\\"\"))\n id = res_post_in_json['id']\n res_put = self.client().put(\n f'bucketlist/{id}',\n data={\n 'name': \"Don't forget to exercise\"\n }\n )\n self.assertEqual(res_put.status_code, 200)\n res = self.client().get(f'/bucketlist/{id}')\n self.assertIn(\"exercise\", str(res.data))", "def edit_item(**kwargs):\n db.session.add(kwargs[\"item\"])\n db.session.commit()\n if kwargs[\"is_bucket\"]:\n item_type = \"bucket list\"\n elif kwargs[\"is_item\"]:\n item_type = \"bucket list item\"\n\n message = {\"message\": \"Successfully updated \" + item_type + \".\"}\n response = marshal(kwargs[\"item\"], kwargs[\"serializer\"])\n response.update(message)\n return response", "def bucketlist_update():\n pass", "def test_bucketlist_can_be_edited(self):\n self.register_user()\n result = self.login_user()\n access_token = json.loads(result.data.decode())['token']\n res = self.create_bucketlist(self.bucketlist, access_token)\n self.assertEqual(res.status_code, 201)\n results = json.loads(res.data.decode())\n\n res = self.client.put(\n '/lists/{}'.format(results['id']),\n headers=dict(Authorization=\"Bearer \" + access_token),\n data={\n \"name\": \"Dont just eat, but also pray and love :-)\"\n })\n self.assertEqual(res.status_code, 200)\n\n results = self.client.get(\n '/lists/{}'.format(results['id']),\n headers=dict(Authorization=\"Bearer \" + access_token))\n self.assertIn('Dont just eat', str(results.data))", "def test_bucket_is_updated(self):\n with self.client:\n # Get an auth token\n token = self.get_user_token()\n # Create a Bucket\n response = self.client.post(\n '/bucketlists',\n data=json.dumps(dict(name='Travel')),\n headers=dict(Authorization='Bearer ' + token),\n content_type='application/json'\n )\n # Test Bucket creation\n data = json.loads(response.data.decode())\n self.assertEqual(response.status_code, 201)\n self.assertTrue(data['status'], 'success')\n self.assertTrue(data['name'], 'Travel')\n # Update the bucket name\n res = self.client.put(\n '/bucketlists/1',\n headers=dict(Authorization='Bearer ' + token),\n data=json.dumps(dict(name='Adventure')),\n content_type='application/json'\n )\n data = json.loads(res.data.decode())\n self.assertEqual(res.status_code, 201)\n self.assertTrue(res.content_type == 'application/json')\n self.assertTrue(data['status'] == 'success')\n self.assertTrue(data['name'] == 'Adventure')\n self.assertEqual(data['id'], 1)", "def test_bucketitems_can_be_edited(self):\n\n resp = self.client().post('/auth/register', data = self.user)\n\n resp_login = self.client().post('/auth/login', data = self.form_data) ## Login the user.\n token = json.loads(resp_login.data.decode())['auth_token'] ## Get the authentication token.\n\n resp = self.client().post('/bucketlists/', data = self.bucket, headers=dict(Authorization=token))\n resp = self.client().post('/bucketlists/1/items/', data = self.bucketitems, headers=dict(Authorization=token)) ## Create the item.\n\n form_data = {'name': 'walking on the moon', 'description': 'Go by the space craft'}\n resp = self.client().put('/bucketlists/1/items/1', data = form_data, headers=dict(Authorization=token))\n self.assertEqual(resp.status_code, 200)\n\n self.assertIn('true', str(resp.data))", "def editBucketlistsbyid():", "def test_bucketlist_item_edit_with_existing_name(self):\n resp = self.client.post(\"/auth/register/\", data=self.user_details)\n self.assertEqual(resp.status_code, 201)\n # login user\n result = self.client.post(\"/auth/login/\", data=self.user_details)\n access_token = json.loads(result.data.decode())[\"access_token\"]\n # create bucketlist\n self.client.post('/bucketlists/', headers=dict(\n Authorization=access_token),\n data=self.bucketlist)\n # create bucketlist item\n bucketlist_item = self.client.post(\n \"/bucketlists/1/items/\", headers=dict(Authorization=access_token),\n data=self.item)\n self.client.post(\n \"/bucketlists/1/items/\", headers=dict(Authorization=access_token),\n data={\"name\": \"Stan Chart Marathon\"})\n result = json.loads(bucketlist_item.data.decode())\n resp = self.client.put(\n \"/bucketlists/1/items/1/\",\n headers=dict(Authorization=access_token),\n data={\"name\": \"Stan Chart Marathon\"})\n self.assertEqual(resp.status_code, 409)\n self.assertIn(\"Name exists, enter another\", str(resp.data))", "def test_create_bucketlist_item(self):\n resp = self.client.post('/bucketlists',\n data=json.dumps(self.bucketlist),\n content_type=\"application/json\", headers={\n \"Authorization\": self.token\n })\n self.assertEqual(resp.status_code, 201)\n response = self.client.get(\n \"/bucketlists/1\", headers={\n \"Authorization\": self.token})\n self.assertEqual(response.status_code, 200)\n resp_item = self.client.post('/bucketlistitems/1/items',\n data=json.dumps(\n {\"name\": \"visit the busy surburbs.\"}),\n content_type=\"application/json\", headers={\n \"Authorization\": self.token\n })\n result = json.loads(resp_item.data)\n self.assertEqual(result[\"message\"],\n \"Bucket list item added successfully.\")\n self.assertEqual(resp.status_code, 201)", "def test_nonexistence_bucketlist_item_edit(self):\n resp = self.client.post(\"/auth/register/\", data=self.user_details)\n self.assertEqual(resp.status_code, 201)\n # login user\n result = self.client.post(\"/auth/login/\", data=self.user_details)\n access_token = json.loads(result.data.decode())[\"access_token\"]\n\n resp = self.client.put(\n \"/bucketlists/1/items/1/\",\n headers=dict(Authorization=access_token),\n data={\"name\": \"Stan Chart Marathon\"})\n self.assertEqual(resp.status_code, 404)", "def _put(self, item, data):\n return super(Consul, self)._put(self._std_headers(), item, data)" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Check if model uses submodules
def defined_submodule(arr): return any([el.endswith('_module]') for el in arr])
[ "def model_from_timm(model: Module) -> bool:\n if \"timm\" in model.__module__.split(\".\"):\n return True\n\n is_fisrt = True\n for sub_module in model.modules():\n if is_fisrt: # First module is the module itself.\n is_fisrt = False\n continue\n\n if model_from_timm(sub_module):\n return True\n\n return False", "def test_submodule(submodule):\n is_correct_subclass = issubclass(submodule, AnalysisModule)\n # Ensure submodule is defined within the package we are inspecting (and not 'base')\n is_correct_module = package.__name__ in submodule.__module__\n return is_correct_subclass and is_correct_module", "def _submodule_existance (self, module_path):\n modules = Module.objects.filter (path=module_path). \\\n filter (parent__isnull=False)\n self.assertTrue (len(modules) > 0,\n \"Module with path %s does not exist in fixture.\" %\n module_path)\n\n parent_module = modules[0].parent\n self.assertFalse (parent_module is None,\n \"Module with path %s is not a submodule.\" %\n module_path)", "def has_subobjects_of_module(obj, module_name):\n if not is_module_available(module_name):\n return False\n for sub_object in get_object_hierarchy(obj):\n if object_belongs_to_module(sub_object, module_name):\n return True\n return False", "def check_model_contains_quantized_modules(model: nn.Module) -> bool:\n try:\n from pytorch_quantization.nn.modules._utils import QuantMixin\n except ImportError:\n # If pytorch_quantization is not installed then by definition the model cannot contain any quantized modules\n return False\n\n from super_gradients.training.utils.quantization.core import SGQuantMixin\n\n model = unwrap_model(model)\n for m in model.modules():\n if isinstance(m, (QuantMixin, SGQuantMixin)):\n return True\n\n return False", "def can_use_model(self, model):\n pass", "def __subclasscheck__(cls, sub):\n cls_mod_name = (cls.__module__, cls.__name__)\n for b in sub.__mro__:\n b_mod_name = (b.__module__, b.__name__)\n if cls_mod_name == b_mod_name:\n return True", "def check_submodules():\n if not os.path.exists('.git'):\n return\n with open('.gitmodules') as f:\n for l in f:\n if 'path' in l:\n p = l.split('=')[-1].strip()\n if not os.path.exists(p):\n raise ValueError('Submodule %s missing' % p)\n\n\n proc = subprocess.Popen(['git', 'submodule', 'status'],\n stdout=subprocess.PIPE)\n status, _ = proc.communicate()\n status = status.decode(\"ascii\", \"replace\")\n for line in status.splitlines():\n if line.startswith('-') or line.startswith('+'):\n raise ValueError('Submodule not clean: %s' % line)", "def has_model(self, directory):\n return os.path.isfile(directory + '/models.py')", "def is_model(model_name: str) -> bool:\n arch_name = get_arch_name(model_name)\n return arch_name in _model_entrypoints", "def check_submodules():\n if not os.path.exists('.git'):\n return\n with open('.gitmodules') as f:\n for l in f:\n if 'path' in l:\n p = l.split('=')[-1].strip()\n if not os.path.exists(p):\n raise ValueError('Submodule %s missing' % p)\n\n proc = subprocess.Popen(['git', 'submodule', 'status'],\n stdout=subprocess.PIPE)\n status, _ = proc.communicate()\n status = status.decode(\"ascii\", \"replace\")\n for line in status.splitlines():\n if line.startswith('-') or line.startswith('+'):\n raise ValueError('Submodule not clean: %s' % line)", "def object_belongs_to_module(obj, module_name):\n return any(module_name == x for x in type(obj).__module__.split('.'))", "def check_models_are_in_init():\n models_not_in_init = []\n dir_transformers = dir(transformers)\n for module in get_model_modules():\n models_not_in_init += [\n model[0] for model in get_models(module, include_pretrained=True) if model[0] not in dir_transformers\n ]\n\n # Remove private models\n models_not_in_init = [model for model in models_not_in_init if not is_a_private_model(model)]\n if len(models_not_in_init) > 0:\n raise Exception(f\"The following models should be in the main init: {','.join(models_not_in_init)}.\")", "def isModellingFramework(*args):\n return _libsbml.SBO_isModellingFramework(*args)", "def hasModel(self, model):\n if model in self.models:\n return S_OK()\n else:\n return S_ERROR(\"Model %s is not defined, use any of %s\" % (model, self.models.keys()))", "def is_model_library(self):\n raise NotImplementedError(\n 'operation is_model_library(...) not yet implemented')", "def requires_model_loading(self):\n return self.requires_loaded_models", "def models_compatible(model_a: ModuleModel, model_b: ModuleModel) -> bool:\n if model_a == model_b:\n return True\n return model_b.value in _load_v2_module_def(model_a)['compatibleWith']", "def has_single_module(self) -> bool:\n return len(self._modules) == 1" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Fills up the ReplayBuffer memory with PRETRAIN_LENGTH number of experiences before training begins.
def initialize_memory(self, pretrain_length, env): if self.memlen >= pretrain_length: print("Memory already filled, length: {}".format(len(self.memory))) return interval = max(10, int(pretrain_length/25)) print("Initializing memory buffer.") obs = env.states while self.memlen < pretrain_length: actions = np.random.uniform(-1, 1, (self.agent_count, self.action_size)) next_obs, rewards, dones = env.step(actions) self.store((obs, next_obs, actions, rewards, dones)) obs = next_obs if np.any(dones): env.reset() obs = env.states self.memory.init_n_step() if self.memlen % interval == 1 or self.memlen >= pretrain_length: print("...memory filled: {}/{}".format(self.memlen, pretrain_length)) print("Done!")
[ "def __initialize_buffers(self):\n obs = self.env.reset()\n\n bar = (range(MIN_REPLAY_SIZE))\n for _ in bar:\n action = self.env.sample()\n\n new_obs, rew, done, _ = self.env.step(action)\n transition = (obs, action, rew, done, new_obs)\n obs = new_obs\n\n self.__replay_buffer.append(transition)\n # this following isn't actually correct but it doesnt affect the training.\n self.__rew_buffer.append(self.env.fidelity())\n\n if done:\n obs = self.env.reset()", "def overwrite_replay_buffer_and_pre_train_agent(self, replay_buffer, training_iterations, only_train_final_layer,\n only_train_new_actions):\n assert replay_buffer is not None\n self.agent.memory = replay_buffer\n if only_train_final_layer:\n print(\"Only training the final layer\")\n self.freeze_all_but_output_layers(self.agent.q_network_local)\n for _ in range(training_iterations):\n if only_train_new_actions: new_actions = self.new_actions_just_added\n else: new_actions = []\n output = self.agent.learn(print_loss=False, only_these_actions=new_actions)\n if output == \"BREAK\": break\n if only_train_final_layer: self.unfreeze_all_layers(self.agent.q_network_local)", "def collect_samples(self):\n self.replay_buffer = self.collect_initial_batch(\n self.replay_buffer, self.acm_pre_train_samples\n )", "def prepare_data(self, train_data, **kwargs):\n data_len = len(train_data[\"done\"])\n for index in range(data_len):\n if self.multi_step == 1:\n self.buff.add(train_data[\"cur_state\"][index],\n train_data[\"action\"][index],\n train_data[\"reward\"][index],\n train_data[\"next_state\"][index],\n float(train_data[\"done\"][index])) # Add replay buffer", "def _preallocate_samples(self):\n self.prealloc_samples_ = []\n for _ in range(self.num_prealloc_samples_):\n self.prealloc_samples_.append(self.sample())", "def _replay(self):\n\n losses = []\n\n train_data = utils.memory_dataset(self._Memory._samples, utils.transform())\n\n params = {\n 'batch_size': self._Model.batch_size,\n 'sampler': torch.utils.data.RandomSampler(train_data),\n # 'shuffle': True,\n 'num_workers': 2,\n 'drop_last': True,\n 'pin_memory': True\n }\n\n train_data_gen = torch.utils.data.DataLoader(train_data, **params)\n\n for i in range(self._training_epochs):\n loss = 0\n\n for local_batch in train_data_gen:\n # local_bath[0][0] is full_s, a list of length 6, each of which is tensor of 64\n\n loss += self._Model.train(local_batch, True)\n \n losses.append(loss)\n\n # if i * self._Model.batch_size <= self._Memory._size_now() - self._Model.batch_size:\n # batch = self._Memory._samples[i*self._Model.batch_size: (i+1)*self._Model.batch_size]\n\n # for _, b in enumerate(batch):\n # self._Model.train(b)", "def _add_to_replay_buffer(self, sample_buffer):\n n = 0\n for episode in sample_buffer.episodes():\n v, f, a, r = episode.views, episode.features, episode.actions, episode.rewards\n\n m = len(r)\n\n mask = np.ones((m,))\n terminal = np.zeros((m,), dtype=np.bool)\n if episode.terminal:\n terminal[-1] = True\n else:\n mask[-1] = 0\n\n self.replay_buf_view.put(v)\n self.replay_buf_feature.put(f)\n self.replay_buf_action.put(a)\n self.replay_buf_reward.put(r)\n self.replay_buf_terminal.put(terminal)\n self.replay_buf_mask.put(mask)\n\n n += m\n\n self.replay_buf_len = min(self.memory_size, self.replay_buf_len + n)\n return n", "def __init__(self, buffer_size):\n self.num_experiences = 0\n self.buffer = deque(maxlen=buffer_size)", "def _build_replay_buffer(self):\n return prioritized_replay_buffer.OutOfGraphPrioritizedReplayBuffer(\n observation_shape=self.observation_shape,\n stack_size=self.stack_size,\n update_horizon=self.update_horizon,\n gamma=self.gamma,\n observation_dtype=self.observation_dtype)", "def __init__(self, env, preprocess, sess):\n self.D = deque(maxlen=self.REPLAY_MEMORY_SIZE)\n self.populate_memory(env, preprocess, sess)", "def __init__(self, replay_buffer_max_size=50):\n\n # replay buffer 사이즈 1보다 작을 경우 에러 메세지 출력\n assert replay_buffer_max_size > 0, print(const_var_cyclegan.MSG_REPLAY_BUFFER_ERROR)\n\n # replay buffer 사이즈\n self.replay_buffer_max_size = replay_buffer_max_size\n # replay buffer 리스트\n self.replay_buffer = []", "def reset(self):\n self.prev_char = ''\n self.unmerged = []\n self.data_layer = AudioBuffersDataLayer()\n self.data_loader = DataLoader(self.data_layer, batch_size=self.batch_size, collate_fn=speech_collate_fn)\n self.all_logits = []\n self.all_preds = []\n self.toks_unmerged = []\n self.frame_buffers = []\n self.frame_bufferer.reset()", "def set_recurrent_buffers(self, buf_size):\n self.feat_enc_net.reset_lstm(buf_size=buf_size)", "def pretrain_v2(env: ConnectFourEnv, params: Dict, player: DeepQPlayer):\n\n # If the player is not deepQ player, it's meaningless to pre-train.\n if not isinstance(player, DeepQPlayer):\n return\n\n print(f\"Pre-train {player!r}\")\n print(\"-\" * 30)\n memory_size = params[\"REPLAY_BUFFER_MAX_LENGTH\"]\n replay_memory = ReplayMemory(memory_size)\n\n # Setup random players generating the memories.\n player1: Player = PretrainRandomPlayerOnlyReward(env, replay_memory,\n seed=3359)\n player2: Player = PretrainRandomPlayerOnlyReward(env, replay_memory,\n seed=4904)\n players = {1: player1, 2: player2}\n\n total_step = 0\n for i in range(memory_size // 2):\n _, total_step = train_one_episode(env, params, players, total_step)\n print(f\"\\rPreparing Pre-train memory: {2 * (i + 1)} / {memory_size}\",\n end=\"\")\n print()\n\n # Pre-train starts here.\n # Pass in generated memories to player to be pre-trained.\n player.model.memory.memory = replay_memory.memory\n\n # Prepare for experience replay.\n batch_size = params[\"BATCH_SIZE\"]\n utilisation_rate = params[\"PRETRAIN_UTILISATION_RATE\"]\n utilisation_rate = min(1, max(0, utilisation_rate)) # clip within [0,1].\n utilisation_rate = utilisation_rate if utilisation_rate < 1 else 0.9999\n\n # Solving 1 - utilisation_rate = (1 - batch_size / memory)**n for n.\n n_episode = int(\n log(1 - utilisation_rate) / log(1 - batch_size / memory_size))\n print(f\"Pre-train {player!r} for {n_episode} episodes \"\n f\"to achieve {utilisation_rate:.0%} utilisation of prepared memory\")\n\n # Experience replay.\n epochs = params[\"EPOCHS_PER_PRETRAIN_LEARNING\"]\n for episode in range(int(n_episode)):\n print(f\"\\rPre-training {player!r} for episode {episode + 1}\", end=\"\")\n player.model.experience_replay(epochs=epochs)\n print()\n\n print(f\"Pre-trained {player!r}\")\n print(\"=\" * 30)", "def reset(self):\n self.buffer.clear()\n self.num_replay_samples = 0\n return", "def initialize_replay_memory(self, size):\n if self.replay_memory:\n self.train_logger.info('INFO: Replay memory already initialized')\n return\n\n assert size >= self.batch_size, \"Initialize with size >= batch size\"\n\n old_state = self.env.reset()\n for i in range(size):\n action = random.choice(self.valid_actions)\n new_state, reward, done, _ = self.env.step(action)\n reward = np.clip(self.scale * reward, -self.clip, self.clip)\n self.replay_memory.append(transition(old_state, action,\n reward, new_state, done))\n\n if done:\n old_state = self.env.reset()\n else:\n old_state = new_state\n\n self.train_logger.info(f'INFO: Replay memory initialized with {size} experiences')", "def train(self, sample_buffer, print_every=1000):\n add_num = self._add_to_replay_buffer(sample_buffer)\n batch_size = self.batch_size\n total_loss = 0\n\n n_batches = int(self.train_freq * add_num / batch_size)\n if n_batches == 0:\n return 0, 0\n\n print(\"batch number: %d add: %d replay_len: %d/%d\" %\n (n_batches, add_num, self.replay_buf_len, self.memory_size))\n\n start_time = time.time()\n ct = 0\n for i in range(n_batches):\n # fetch a batch\n index = np.random.choice(self.replay_buf_len - 1, batch_size)\n\n batch_view = self.replay_buf_view.get(index)\n batch_feature = self.replay_buf_feature.get(index)\n batch_action = self.replay_buf_action.get(index)\n batch_reward = self.replay_buf_reward.get(index)\n batch_terminal = self.replay_buf_terminal.get(index)\n batch_mask = self.replay_buf_mask.get(index)\n\n batch_next_view = self.replay_buf_view.get(index+1)\n batch_next_feature = self.replay_buf_feature.get(index+1)\n\n batch_target = self._calc_target(batch_next_view, batch_next_feature,\n batch_reward, batch_terminal)\n\n self._reset_bind_size(batch_size)\n batch = mx.io.DataBatch(data=[mx.nd.array(batch_view),\n mx.nd.array(batch_feature)],\n label=[mx.nd.array(batch_action),\n mx.nd.array(batch_target),\n mx.nd.array(batch_mask)])\n self.model.forward(batch, is_train=True)\n self.model.backward()\n self.model.update()\n loss = np.mean(self.model.get_outputs()[1].asnumpy())\n total_loss += loss\n\n if ct % self.target_update == 0:\n self._copy_network(self.target_model, self.model)\n\n if ct % print_every == 0:\n print(\"batch %5d, loss %.6f, eval %.6f\" % (ct, loss, self._eval(batch_target)))\n ct += 1\n self.train_ct += 1\n\n total_time = time.time() - start_time\n step_average = total_time / max(1.0, (ct / 1000.0))\n print(\"batches: %d, total time: %.2f, 1k average: %.2f\" % (ct, total_time, step_average))\n\n return total_loss / ct if ct != 0 else 0, self._eval(batch_target)", "def update_replay_sample_priority(self) -> None:\n num_samples_trained_this_itr = 0\n for _ in range(self.learner_thread.outqueue.qsize()):\n if self.learner_thread.is_alive():\n (\n replay_actor_id,\n priority_dict,\n env_steps,\n agent_steps,\n ) = self.learner_thread.outqueue.get(timeout=0.001)\n if self.config.replay_buffer_config.get(\"prioritized_replay_alpha\") > 0:\n self._replay_actor_manager.foreach_actor(\n func=lambda actor: actor.update_priorities(priority_dict),\n remote_actor_ids=[replay_actor_id],\n timeout_seconds=0, # Do not wait for results.\n )\n num_samples_trained_this_itr += env_steps\n self.update_target_networks(env_steps)\n self._counters[NUM_ENV_STEPS_TRAINED] += env_steps\n self._counters[NUM_AGENT_STEPS_TRAINED] += agent_steps\n self.workers.local_worker().set_global_vars(\n {\"timestep\": self._counters[NUM_ENV_STEPS_TRAINED]}\n )\n else:\n raise RuntimeError(\"The learner thread died while training\")\n\n self._timers[\"learner_dequeue\"] = self.learner_thread.queue_timer\n self._timers[\"learner_grad\"] = self.learner_thread.grad_timer\n self._timers[\"learner_overall\"] = self.learner_thread.overall_timer", "def _build_replay_buffer(self, use_staging):\n return replay_buffer.SAILWrappedReplayBuffer(\n observation_shape=self.observation_shape,\n stack_size=self.stack_size,\n use_staging=use_staging,\n update_horizon=self.update_horizon,\n gamma=self.gamma,\n observation_dtype=self.observation_dtype.as_numpy_dtype)" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }